diff --git a/packages/base/any/kernels/3.18.25/configs/arm64-all/.gitignore b/packages/base/any/kernels/3.18.25/configs/arm64-all/.gitignore new file mode 100644 index 00000000..02cf40ca --- /dev/null +++ b/packages/base/any/kernels/3.18.25/configs/arm64-all/.gitignore @@ -0,0 +1 @@ +kernel-3.18.25-arm64-all diff --git a/packages/base/any/kernels/3.18.25/configs/arm64-all/Makefile b/packages/base/any/kernels/3.18.25/configs/arm64-all/Makefile new file mode 100644 index 00000000..52b52ce7 --- /dev/null +++ b/packages/base/any/kernels/3.18.25/configs/arm64-all/Makefile @@ -0,0 +1,25 @@ +############################################################ +# +# Default 3.18.25 configuration for arm64 platforms. +# +############################################################ +THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) +include $(ONL)/make/config.mk + +ifndef K_TARGET_DIR +K_TARGET_DIR := $(THIS_DIR) +endif + +include ../../kconfig.mk +K_CONFIG := arm64-all.config +K_BUILD_TARGET := Image Image.gz arm64-nxp-ls2080ardb-r0.dtb +K_COPY_SRC := arch/arm64/boot/Image +K_COPY_GZIP := 1 +ifndef K_COPY_DST +K_COPY_DST := kernel-3.18.25-arm64-all.bin.gz +endif + +export ARCH=arm64 +DTS_LIST := arm64-nxp-ls2080ardb-r0 + +include $(ONL)/make/kbuild.mk diff --git a/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config b/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config new file mode 100644 index 00000000..bfee8e37 --- /dev/null +++ b/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config @@ -0,0 +1,2976 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 3.18.25 Kernel Configuration +# +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_MMU=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ZONE_DMA=y +CONFIG_HAVE_GENERIC_RCU_GUP=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_SWIOTLB=y +CONFIG_IOMMU_HELPER=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="aarch64-linux-gnu-" +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_FHANDLE is not set +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +# CONFIG_AUDITSYSCALL is not set + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_HANDLE_DOMAIN_IRQ=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set +CONFIG_SPARSE_IRQ=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +# CONFIG_NO_HZ is not set +CONFIG_HIGH_RES_TIMERS=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y + +# +# RCU Subsystem +# +CONFIG_TREE_PREEMPT_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_TASKS_RCU is not set +CONFIG_RCU_STALL_COMMON=y +# CONFIG_RCU_USER_QS is not set +CONFIG_RCU_FANOUT=64 +CONFIG_RCU_FANOUT_LEAF=16 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_RCU_FAST_NO_HZ is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_RCU_BOOST is not set +# CONFIG_RCU_NOCB_CPU is not set +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_CGROUPS=y +# CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_FREEZER is not set +# CONFIG_CGROUP_DEVICE is not set +# CONFIG_CPUSETS is not set +# CONFIG_CGROUP_CPUACCT is not set +CONFIG_RESOURCE_COUNTERS=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +CONFIG_CGROUP_HUGETLB=y +# CONFIG_CGROUP_PERF is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +# CONFIG_RT_GROUP_SCHED is not set +# CONFIG_BLK_CGROUP is not set +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_IPC_NS is not set +# CONFIG_USER_NS is not set +# CONFIG_PID_NS is not set +CONFIG_NET_NS=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +# CONFIG_RELAY is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +# CONFIG_BPF_SYSCALL is not set +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_PCI_QUIRKS=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SYSTEM_TRUSTED_KEYRING is not set +CONFIG_PROFILING=y +CONFIG_JUMP_LABEL=y +# CONFIG_UPROBES is not set +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_ATTRS=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_CC_STACKPROTECTOR=y +# CONFIG_CC_STACKPROTECTOR is not set +CONFIG_CC_STACKPROTECTOR_NONE=y +# CONFIG_CC_STACKPROTECTOR_REGULAR is not set +# CONFIG_CC_STACKPROTECTOR_STRONG is not set +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODVERSIONS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +CONFIG_STOP_MACHINE=y +CONFIG_BLOCK=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_BSGLIB is not set +# CONFIG_BLK_DEV_INTEGRITY is not set +# CONFIG_BLK_CMDLINE_PARSER is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLOCK_COMPAT=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_CFQ=y +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_FREEZER=y + +# +# Platform selection +# +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_VEXPRESS=y +CONFIG_ARCH_XGENE=y +CONFIG_ARCH_LAYERSCAPE=y + +# +# Bus support +# +CONFIG_ARM_AMBA=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_IOV is not set +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set + +# +# PCI host controller drivers +# +CONFIG_PCIE_DW=y +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +CONFIG_PCI_LAYERSCAPE=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +# CONFIG_PCIE_ECRC is not set +# CONFIG_PCIEAER_INJECT is not set +CONFIG_PCIEASPM=y +# CONFIG_PCIEASPM_DEBUG is not set +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +# CONFIG_HOTPLUG_PCI is not set + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PGTABLE_LEVELS=4 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SMP=y +# CONFIG_SCHED_MC is not set +# CONFIG_SCHED_SMT is not set +CONFIG_NR_CPUS=64 +CONFIG_HOTPLUG_CPU=y +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y +CONFIG_HZ=100 +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_NO_BOOTMEM=y +CONFIG_MEMORY_ISOLATION=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_ZONE_DMA_FLAG=1 +CONFIG_BOUNCE=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +CONFIG_CMA_AREAS=7 +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_XEN is not set +CONFIG_FORCE_MAX_ZONEORDER=11 + +# +# Boot options +# +CONFIG_CMDLINE="console=ttyAMA0" +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_SCRIPT=y +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set +CONFIG_COREDUMP=y +CONFIG_COMPAT=y +CONFIG_SYSVIPC_COMPAT=y + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +# CONFIG_PM_RUNTIME is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_CPU_PM=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARM64_CPU_SUSPEND=y + +# +# CPU Power Management +# + +# +# CPU Idle +# +# CONFIG_CPU_IDLE is not set +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +# CONFIG_CPU_FREQ_STAT_DETAILS is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPUFREQ_DT is not set + +# +# ARM CPU frequency scaling drivers +# +# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_DIAG is not set +CONFIG_UNIX=y +# CONFIG_UNIX_DIAG is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=y +# CONFIG_XFRM_USER is not set +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +# CONFIG_IP_ROUTE_VERBOSE is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +CONFIG_NET_IP_TUNNEL=y +CONFIG_IP_MROUTE=y +# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set +# CONFIG_IP_PIMSM_V1 is not set +CONFIG_IP_PIMSM_V2=y +# CONFIG_SYN_COOKIES is not set +# CONFIG_NET_IPVTI is not set +# CONFIG_NET_UDP_TUNNEL is not set +# CONFIG_NET_FOU is not set +# CONFIG_GENEVE is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_INET_UDP_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=y +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=y +CONFIG_TCP_CONG_HTCP=y +# CONFIG_TCP_CONG_HSTCP is not set +# CONFIG_TCP_CONG_HYBLA is not set +# CONFIG_TCP_CONG_VEGAS is not set +# CONFIG_TCP_CONG_SCALABLE is not set +# CONFIG_TCP_CONG_LP is not set +# CONFIG_TCP_CONG_VENO is not set +# CONFIG_TCP_CONG_YEAH is not set +# CONFIG_TCP_CONG_ILLINOIS is not set +# CONFIG_TCP_CONG_DCTCP is not set +# CONFIG_DEFAULT_BIC is not set +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_WESTWOOD is not set +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y +# CONFIG_IPV6_VTI is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_SIT_6RD is not set +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=y +# CONFIG_IPV6_GRE is not set +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +# CONFIG_IPV6_MROUTE is not set +# CONFIG_NETLABEL is not set +# CONFIG_NETWORK_SECMARK is not set +CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +# CONFIG_NETFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_BRIDGE_VLAN_FILTERING is not set +CONFIG_HAVE_NET_DSA=y +CONFIG_VLAN_8021Q=y +# CONFIG_VLAN_8021Q_GVRP is not set +# CONFIG_VLAN_8021Q_MVRP is not set +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +# CONFIG_NET_SCH_HTB is not set +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFB is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +# CONFIG_NET_SCH_MQPRIO is not set +# CONFIG_NET_SCH_CHOKE is not set +# CONFIG_NET_SCH_QFQ is not set +# CONFIG_NET_SCH_CODEL is not set +# CONFIG_NET_SCH_FQ_CODEL is not set +# CONFIG_NET_SCH_FQ is not set +# CONFIG_NET_SCH_HHF is not set +# CONFIG_NET_SCH_PIE is not set +# CONFIG_NET_SCH_PLUG is not set + +# +# Classification +# +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +# CONFIG_NET_CLS_U32 is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +# CONFIG_NET_CLS_BPF is not set +# CONFIG_NET_EMATCH is not set +# CONFIG_NET_CLS_ACT is not set +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +# CONFIG_OPENVSWITCH is not set +# CONFIG_VSOCKETS is not set +# CONFIG_NETLINK_MMAP is not set +# CONFIG_NETLINK_DIAG is not set +# CONFIG_NET_MPLS_GSO is not set +# CONFIG_HSR is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +# CONFIG_CGROUP_NET_PRIO is not set +# CONFIG_CGROUP_NET_CLASSID is not set +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +CONFIG_FIB_RULES=y +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_RFKILL_REGULATOR is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +CONFIG_HAVE_BPF_JIT=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_GENERIC_CPU_DEVICES is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_REGMAP=y +CONFIG_REGMAP_MMIO=y +# CONFIG_DMA_SHARED_BUFFER is not set +CONFIG_DMA_CMA=y + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 + +# +# Bus devices +# +# CONFIG_ARM_CCN is not set +CONFIG_VEXPRESS_CONFIG=y +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_OF_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +CONFIG_FTL=y +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_GEOMETRY is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_OTP is not set +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_CFI_UTIL=y +CONFIG_MTD_RAM=y +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=y +# CONFIG_MTD_PHYSMAP_COMPAT is not set +CONFIG_MTD_PHYSMAP_OF=y +# CONFIG_MTD_INTEL_VR_NOR is not set +CONFIG_MTD_PLATRAM=y + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +CONFIG_MTD_M25P80=y +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +CONFIG_MTD_NAND_ECC=y +# CONFIG_MTD_NAND_ECC_SMC is not set +CONFIG_MTD_NAND=y +# CONFIG_MTD_NAND_ECC_BCH is not set +# CONFIG_MTD_SM_COMMON is not set +# CONFIG_MTD_NAND_DENALI is not set +CONFIG_MTD_NAND_GPIO=y +# CONFIG_MTD_NAND_OMAP_BCH_BUILD is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND_RICOH is not set +# CONFIG_MTD_NAND_DISKONCHIP is not set +# CONFIG_MTD_NAND_DOCG4 is not set +# CONFIG_MTD_NAND_CAFE is not set +# CONFIG_MTD_NAND_NANDSIM is not set +# CONFIG_MTD_NAND_PLATFORM is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_MTD_UBI is not set +CONFIG_DTC=y +CONFIG_OF=y + +# +# Device Tree and Open Firmware support +# +# CONFIG_OF_SELFTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +CONFIG_OF_MTD=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_NVME is not set +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=262144 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +# CONFIG_BLK_DEV_RBD is not set +# CONFIG_BLK_DEV_RSXX is not set + +# +# Misc devices +# +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_SGI_IOC4 is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1780 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +# CONFIG_BMP085_I2C is not set +# CONFIG_BMP085_SPI is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +CONFIG_VEXPRESS_SYSCFG=y +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=y +CONFIG_EEPROM_AT25=y +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# CONFIG_SENSORS_LIS3_SPI is not set +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_CXL_BASE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_NETLINK is not set +# CONFIG_SCSI_MQ_DEFAULT is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +# CONFIG_CHR_DEV_SG is not set +# CONFIG_CHR_DEV_SCH is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +# CONFIG_SCSI_LOWLEVEL is not set +# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_ATA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_AHCI_XGENE=y +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +# CONFIG_ATA_PIIX is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_ATA_GENERIC is not set +# CONFIG_PATA_LEGACY is not set +# CONFIG_MD is not set +# CONFIG_TARGET_CORE is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# CONFIG_I2O is not set +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +# CONFIG_DUMMY is not set +# CONFIG_EQUALIZER is not set +# CONFIG_NET_FC is not set +# CONFIG_NET_TEAM is not set +CONFIG_MACVLAN=y +# CONFIG_MACVTAP is not set +# CONFIG_VXLAN is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +CONFIG_TUN=y +# CONFIG_VETH is not set +CONFIG_VIRTIO_NET=y +# CONFIG_NLMON is not set +# CONFIG_ARCNET is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +# CONFIG_NET_DSA_MV88E6XXX is not set +# CONFIG_NET_DSA_MV88E6060 is not set +# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set +# CONFIG_NET_DSA_MV88E6131 is not set +# CONFIG_NET_DSA_MV88E6123_61_65 is not set +# CONFIG_NET_DSA_MV88E6171 is not set +# CONFIG_NET_DSA_BCM_SF2 is not set +CONFIG_ETHERNET=y +CONFIG_NET_VENDOR_3COM=y +# CONFIG_VORTEX is not set +# CONFIG_TYPHOON is not set +CONFIG_NET_VENDOR_ADAPTEC=y +# CONFIG_ADAPTEC_STARFIRE is not set +CONFIG_NET_VENDOR_AGERE=y +# CONFIG_ET131X is not set +CONFIG_NET_VENDOR_ALTEON=y +# CONFIG_ACENIC is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +# CONFIG_AMD_XGBE is not set +CONFIG_NET_XGENE=y +CONFIG_NET_VENDOR_ARC=y +# CONFIG_ARC_EMAC is not set +# CONFIG_EMAC_ROCKCHIP is not set +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +# CONFIG_ATL1 is not set +# CONFIG_ATL1E is not set +# CONFIG_ATL1C is not set +# CONFIG_ALX is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +# CONFIG_BNX2 is not set +# CONFIG_CNIC is not set +# CONFIG_TIGON3 is not set +# CONFIG_BNX2X is not set +# CONFIG_SYSTEMPORT is not set +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +# CONFIG_CHELSIO_T4 is not set +# CONFIG_CHELSIO_T4VF is not set +CONFIG_NET_VENDOR_CISCO=y +# CONFIG_ENIC is not set +# CONFIG_DNET is not set +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +CONFIG_NET_VENDOR_DLINK=y +# CONFIG_DL2K is not set +# CONFIG_SUNDANCE is not set +CONFIG_NET_VENDOR_EMULEX=y +# CONFIG_BE2NET is not set +CONFIG_NET_VENDOR_EXAR=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_HP=y +# CONFIG_HP100 is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=y +CONFIG_E1000E=y +# CONFIG_IGB is not set +# CONFIG_IGBVF is not set +# CONFIG_IXGB is not set +# CONFIG_IXGBE is not set +# CONFIG_IXGBEVF is not set +# CONFIG_I40E is not set +# CONFIG_I40EVF is not set +# CONFIG_FM10K is not set +CONFIG_NET_VENDOR_I825XX=y +# CONFIG_IP1000 is not set +# CONFIG_JME is not set +CONFIG_NET_VENDOR_MARVELL=y +# CONFIG_MVMDIO is not set +# CONFIG_SKGE is not set +# CONFIG_SKY2 is not set +CONFIG_NET_VENDOR_MELLANOX=y +# CONFIG_MLX4_EN is not set +# CONFIG_MLX4_CORE is not set +# CONFIG_MLX5_CORE is not set +CONFIG_NET_VENDOR_MICREL=y +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_KSZ884X_PCI is not set +CONFIG_NET_VENDOR_MICROCHIP=y +# CONFIG_ENC28J60 is not set +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +CONFIG_NET_VENDOR_NATSEMI=y +# CONFIG_NATSEMI is not set +# CONFIG_NS83820 is not set +CONFIG_NET_VENDOR_8390=y +# CONFIG_NE2K_PCI is not set +CONFIG_NET_VENDOR_NVIDIA=y +# CONFIG_FORCEDETH is not set +CONFIG_NET_VENDOR_OKI=y +# CONFIG_ETHOC is not set +CONFIG_NET_PACKET_ENGINE=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_QLOGIC=y +# CONFIG_QLA3XXX is not set +# CONFIG_QLCNIC is not set +# CONFIG_QLGE is not set +# CONFIG_NETXEN_NIC is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000 is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_R8169 is not set +CONFIG_NET_VENDOR_RDC=y +# CONFIG_R6040 is not set +CONFIG_NET_VENDOR_SAMSUNG=y +# CONFIG_SXGBE_ETH is not set +CONFIG_NET_VENDOR_SEEQ=y +CONFIG_NET_VENDOR_SILAN=y +# CONFIG_SC92031 is not set +CONFIG_NET_VENDOR_SIS=y +# CONFIG_SIS900 is not set +# CONFIG_SIS190 is not set +# CONFIG_SFC is not set +CONFIG_NET_VENDOR_SMSC=y +CONFIG_SMC91X=y +# CONFIG_EPIC100 is not set +CONFIG_SMSC911X=y +# CONFIG_SMSC911X_ARCH_HOOKS is not set +# CONFIG_SMSC9420 is not set +CONFIG_NET_VENDOR_STMICRO=y +# CONFIG_STMMAC_ETH is not set +CONFIG_NET_VENDOR_SUN=y +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_CASSINI is not set +# CONFIG_NIU is not set +CONFIG_NET_VENDOR_TEHUTI=y +# CONFIG_TEHUTI is not set +CONFIG_NET_VENDOR_TI=y +# CONFIG_TLAN is not set +CONFIG_NET_VENDOR_VIA=y +# CONFIG_VIA_RHINE is not set +# CONFIG_VIA_VELOCITY is not set +CONFIG_NET_VENDOR_WIZNET=y +# CONFIG_WIZNET_W5100 is not set +# CONFIG_WIZNET_W5300 is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +CONFIG_PHYLIB=y + +# +# MII PHY device drivers +# +# CONFIG_AT803X_PHY is not set +# CONFIG_AMD_PHY is not set +# CONFIG_AMD_XGBE_PHY is not set +# CONFIG_MARVELL_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_CICADA_PHY is not set +CONFIG_VITESSE_PHY=y +CONFIG_SMSC_PHY=y +CONFIG_BROADCOM_PHY=y +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_ICPLUS_PHY is not set +CONFIG_REALTEK_PHY=y +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_MICREL_PHY is not set +CONFIG_FIXED_PHY=y +# CONFIG_MDIO_BITBANG is not set +CONFIG_MDIO_BUS_MUX=y +# CONFIG_MDIO_BUS_MUX_GPIO is not set +CONFIG_MDIO_BUS_MUX_MMIOREG=y +# CONFIG_MDIO_BCM_UNIMAC is not set +# CONFIG_MICREL_KS8995MA is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +CONFIG_USB_NET_DRIVERS=y +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +# CONFIG_USB_USBNET is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_VMXNET3 is not set +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP1106 is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +# CONFIG_MOUSE_SYNAPTICS_I2C is not set +# CONFIG_MOUSE_SYNAPTICS_USB is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=16 +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +CONFIG_DEVKMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_NR_UARTS=4 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set +# CONFIG_SERIAL_8250_DW is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_MFD_HSU is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +CONFIG_HVC_DRIVER=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# PCMCIA character devices +# +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_DEVPORT=y +# CONFIG_XILLYBUS is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_PCA9541 is not set +CONFIG_I2C_MUX_PCA954x=y +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_PXA_PCI is not set +# CONFIG_I2C_RK3X is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_VERSATILE is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_OC_TINY is not set +CONFIG_SPI_PL022=y +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_PXA2XX_PCI is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_DESIGNWARE is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set + +# +# PPS support +# +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set +# CONFIG_NTP_PPS is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_DEVRES=y +CONFIG_OF_GPIO=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers: +# +CONFIG_GPIO_GENERIC_PLATFORM=y +# CONFIG_GPIO_DWAPB is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_SYSCON is not set +CONFIG_GPIO_XGENE=y +# CONFIG_GPIO_VX855 is not set +# CONFIG_GPIO_GRGPIO is not set + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set + +# +# PCI GPIO expanders: +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_RDC321X is not set + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_74X164 is not set + +# +# AC97 GPIO expanders: +# + +# +# LPC GPIO expanders: +# + +# +# MODULbus GPIO expanders: +# + +# +# USB GPIO expanders: +# +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_SMB347 is not set +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_LAYERSCAPE=y +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +# CONFIG_POWER_RESET_LTC2952 is not set +CONFIG_POWER_RESET_VEXPRESS=y +# CONFIG_POWER_RESET_XGENE is not set +# CONFIG_POWER_RESET_SYSCON is not set +# CONFIG_POWER_AVS is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y + +# +# Broadcom specific AMBA +# +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_AXP20X is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_INTEL_SOC_PMIC is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RTSX_PCI is not set +# CONFIG_MFD_RTSX_USB is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +CONFIG_MFD_VEXPRESS_SYSREG=y +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +CONFIG_REGULATOR_FIXED_VOLTAGE=y +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_ANATOP is not set +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +# CONFIG_REGULATOR_FAN53555 is not set +# CONFIG_REGULATOR_GPIO is not set +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS6524X is not set +# CONFIG_REGULATOR_VEXPRESS is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 + +# +# Direct Rendering Manager +# +# CONFIG_DRM is not set + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +CONFIG_FB_MODE_HELPERS=y +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +CONFIG_FB_ARMCLCD=y +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_AUO_K190X is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEOMODE_HELPERS=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +# CONFIG_HID_ACRUX is not set +CONFIG_HID_APPLE=y +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_AUREAL is not set +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +# CONFIG_HID_CP2112 is not set +CONFIG_HID_CYPRESS=y +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +CONFIG_HID_EZKEY=y +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_HUION is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_TWINHAN is not set +CONFIG_HID_KENSINGTON=y +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +CONFIG_HID_LOGITECH=y +# CONFIG_HID_LOGITECH_HIDPP is not set +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +# CONFIG_HID_MAGICMOUSE is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_FSM is not set +# CONFIG_USB_MON is not set +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_PCI=y +CONFIG_USB_XHCI_PLATFORM=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_ROOT_HUB_TT is not set +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +CONFIG_USB_ISP1760_HCD=y +# CONFIG_USB_ISP1362_HCD is not set +# CONFIG_USB_FUSBH200_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_HOST=y + +# +# Platform Glue Driver Support +# +CONFIG_USB_DWC3_PCI=y + +# +# Debugging features +# +# CONFIG_USB_DWC3_DEBUG is not set +# CONFIG_DWC3_HOST_USB3_LPM_ENABLE is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_LED is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set + +# +# USB Physical Layer drivers +# +# CONFIG_USB_PHY is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +CONFIG_USB_ULPI=y +# CONFIG_USB_GADGET is not set +# CONFIG_UWB is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +# CONFIG_MMC_CLKGATE is not set + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_MMC_BLOCK_BOUNCE=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +CONFIG_MMC_ARMMMCI=y +CONFIG_MMC_SDHCI=y +# CONFIG_MMC_SDHCI_PCI is not set +CONFIG_MMC_SDHCI_PLTFM=y +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_PXAV3 is not set +# CONFIG_MMC_SDHCI_PXAV2 is not set +# CONFIG_MMC_TIFM_SD is not set +CONFIG_MMC_SPI=y +# CONFIG_MMC_CB710 is not set +# CONFIG_MMC_VIA_SDMMC is not set +# CONFIG_MMC_VUB300 is not set +# CONFIG_MMC_USHC is not set +# CONFIG_MMC_USDHI6ROL0 is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +# CONFIG_INFINIBAND is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_HYM8563 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_ISL12057 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_MCP795 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +CONFIG_RTC_DRV_EFI=y +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +# CONFIG_RTC_DRV_PL031 is not set +# CONFIG_RTC_DRV_SNVS is not set +CONFIG_RTC_DRV_XGENE=y + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +# CONFIG_AMBA_PL08X is not set +# CONFIG_DW_DMAC_CORE is not set +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_PL330_DMA is not set +# CONFIG_FSL_EDMA is not set +CONFIG_DMA_ENGINE=y +CONFIG_DMA_OF=y + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +# CONFIG_VFIO is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y + +# +# Virtio drivers +# +# CONFIG_VIRTIO_PCI is not set +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_STAGING is not set + +# +# SOC (System On Chip) specific Drivers +# +# CONFIG_SOC_TI is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI570 is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_PXA is not set +# CONFIG_COMMON_CLK_QCOM is not set + +# +# Hardware Spinlock drivers +# + +# +# Clock Source drivers +# +CONFIG_CLKSRC_OF=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +# CONFIG_ATMEL_PIT is not set +# CONFIG_SH_TIMER_CMT is not set +# CONFIG_SH_TIMER_MTU2 is not set +# CONFIG_SH_TIMER_TMU is not set +# CONFIG_EM_TIMER_STI is not set +CONFIG_CLKSRC_VERSATILE=y +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +CONFIG_OF_IOMMU=y +CONFIG_ARM_SMMU=y + +# +# Remoteproc drivers +# +# CONFIG_STE_MODEM_RPROC is not set + +# +# Rpmsg drivers +# + +# +# SOC (System On Chip) specific Drivers +# +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +CONFIG_MEMORY=y +# CONFIG_IIO is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_BCM_KONA_USB2_PHY is not set +CONFIG_PHY_XGENE=y +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set +CONFIG_RAS=y +# CONFIG_THUNDERBOLT is not set + +# +# Firmware Drivers +# +# CONFIG_FIRMWARE_MEMMAP is not set + +# +# EFI (Extensible Firmware Interface) Support +# +# CONFIG_EFI_VARS is not set +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_ARMSTUB=y + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +# CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_FS_POSIX_ACL is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +CONFIG_OVERLAY_FS=y + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +# CONFIG_PROC_KCORE is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +CONFIG_JFFS2_SUMMARY=y +# CONFIG_JFFS2_FS_XATTR is not set +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +CONFIG_JFFS2_ZLIB=y +# CONFIG_JFFS2_LZO is not set +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +# CONFIG_LOGFS is not set +# CONFIG_CRAMFS is not set +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_PSTORE is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_EFIVAR_FS is not set +# CONFIG_AUFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V2=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +CONFIG_NFS_V4=y +# CONFIG_NFS_SWAP is not set +# CONFIG_NFS_V4_1 is not set +CONFIG_ROOT_NFS=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +# CONFIG_NFSD is not set +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +# CONFIG_SUNRPC_DEBUG is not set +# CONFIG_CEPH_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=y +# CONFIG_9P_FS_POSIX_ACL is not set +# CONFIG_9P_FS_SECURITY is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +CONFIG_NLS_UTF8=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_MAX_VCPUS=8 +CONFIG_KVM_ARM_VGIC=y +CONFIG_KVM_ARM_TIMER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_DYNAMIC_DEBUG is not set + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +# CONFIG_DEBUG_INFO_DWARF4 is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=2048 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +CONFIG_LOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_TIMER_STATS is not set +CONFIG_DEBUG_PREEMPT=y + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_STACKTRACE is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_SPARSE_RCU_POINTER is not set +# CONFIG_TORTURE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +CONFIG_RCU_CPU_STALL_VERBOSE=y +# CONFIG_RCU_CPU_STALL_INFO is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set + +# +# Runtime Testing +# +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_STRICT_DEVMEM is not set +CONFIG_PID_IN_CONTEXTIDR=y +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_DEBUG_SET_MODULE_RONX is not set + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_ENCRYPTED_KEYS is not set +# CONFIG_KEYS_DEBUG_PROC_KEYS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_YAMA is not set +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +CONFIG_INTEGRITY_AUDIT=y +# CONFIG_IMA is not set +# CONFIG_EVM is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_CRYPTD=y +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set +CONFIG_CRYPTO_ABLK_HELPER=y + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_CMAC is not set +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32 is not set +# CONFIG_CRYPTO_CRCT10DIF is not set +# CONFIG_CRYPTO_GHASH is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set + +# +# Random Number Generation +# +CONFIG_CRYPTO_ANSI_CPRNG=y +# CONFIG_CRYPTO_DRBG_MENU is not set +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_CCP is not set +# CONFIG_ASYMMETRIC_KEY_TYPE is not set +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_GENERIC_IO=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +# CONFIG_CRC_CCITT is not set +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC7=y +# CONFIG_LIBCRC32C is not set +# CONFIG_CRC8 is not set +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y +CONFIG_AVERAGE=y +# CONFIG_CORDIC is not set +# CONFIG_DDR is not set +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_ARCH_HAS_SG_CHAIN=y diff --git a/packages/base/any/kernels/3.18.25/patches/add-kernel-patches-for-nxp-arm64-ls2080ardb-based-on.patch b/packages/base/any/kernels/3.18.25/patches/add-kernel-patches-for-nxp-arm64-ls2080ardb-based-on.patch new file mode 100644 index 00000000..7942b14d --- /dev/null +++ b/packages/base/any/kernels/3.18.25/patches/add-kernel-patches-for-nxp-arm64-ls2080ardb-based-on.patch @@ -0,0 +1,17982 @@ +From f64b882ce6cd659cc725a4097c39e5d97441127f Mon Sep 17 00:00:00 2001 +From: Shengzhou Liu +Date: Mon, 1 Aug 2016 12:57:39 +0800 +Subject: [PATCH] Add kernel patches for nxp arm64 ls2080ardb based on 3.18.25 + +This patch integrated a ton of patches to support misc functionalities +(e.g. USB, PCIe, IOMMU, GIC, reboot, etc) on arm64 LS2080ARDB platform. +--- + Documentation/IRQ-domain.txt | 71 + + Documentation/devicetree/bindings/arm/fsl.txt | 15 + + .../devicetree/bindings/pci/designware-pcie.txt | 3 +- + .../devicetree/bindings/powerpc/fsl/board.txt | 14 +- + Documentation/devicetree/bindings/usb/dwc3.txt | 3 +- + MAINTAINERS | 19 + + arch/arm/Kconfig | 3 + + arch/arm/Makefile | 8 +- + arch/arm/boot/dts/Makefile | 12 +- + arch/arm/include/asm/dma-mapping.h | 10 +- + arch/arm/include/asm/mach/pci.h | 12 +- + arch/arm/include/asm/pci.h | 7 - + arch/arm/kernel/bios32.c | 39 +- + arch/arm/mach-iop13xx/msi.c | 10 +- + arch/arm64/Kconfig | 7 +- + arch/arm64/Makefile | 11 +- + arch/arm64/boot/dts/Makefile | 1 + + arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts | 249 +++ + arch/arm64/boot/dts/fsl-ls2080a.dtsi | 729 +++++++++ + arch/arm64/boot/dts/include/dt-bindings | 1 + + arch/arm64/configs/defconfig | 1 + + arch/arm64/include/asm/mmu_context.h | 43 + + arch/arm64/include/asm/page.h | 6 +- + arch/arm64/include/asm/pgtable-hwdef.h | 7 +- + arch/arm64/kernel/head.S | 37 + + arch/arm64/kernel/smp.c | 1 + + arch/arm64/mm/mmu.c | 7 +- + arch/arm64/mm/proc-macros.S | 10 + + arch/arm64/mm/proc.S | 3 + + arch/ia64/kernel/msi_ia64.c | 8 +- + arch/ia64/sn/kernel/msi_sn.c | 8 +- + arch/mips/pci/msi-octeon.c | 2 +- + arch/mips/pci/msi-xlp.c | 12 +- + arch/mips/pci/pci-xlr.c | 2 +- + arch/powerpc/platforms/512x/mpc5121_ads_cpld.c | 3 +- + arch/powerpc/platforms/cell/axon_msi.c | 8 +- + arch/powerpc/platforms/cell/interrupt.c | 3 +- + arch/powerpc/platforms/embedded6xx/flipper-pic.c | 3 +- + arch/powerpc/platforms/powermac/pic.c | 3 +- + arch/powerpc/platforms/powernv/pci.c | 2 +- + arch/powerpc/platforms/ps3/interrupt.c | 3 +- + arch/powerpc/platforms/pseries/msi.c | 2 +- + arch/powerpc/sysdev/ehv_pic.c | 3 +- + arch/powerpc/sysdev/fsl_msi.c | 6 +- + arch/powerpc/sysdev/i8259.c | 3 +- + arch/powerpc/sysdev/ipic.c | 3 +- + arch/powerpc/sysdev/mpic.c | 3 +- + arch/powerpc/sysdev/mpic_pasemi_msi.c | 6 +- + arch/powerpc/sysdev/mpic_u3msi.c | 6 +- + arch/powerpc/sysdev/ppc4xx_hsta_msi.c | 2 +- + arch/powerpc/sysdev/ppc4xx_msi.c | 2 +- + arch/powerpc/sysdev/qe_lib/qe_ic.c | 3 +- + arch/powerpc/sysdev/xics/ics-opal.c | 2 +- + arch/powerpc/sysdev/xics/ics-rtas.c | 2 +- + arch/powerpc/sysdev/xics/xics-common.c | 3 +- + arch/s390/pci/pci.c | 10 +- + arch/sparc/kernel/pci_msi.c | 10 +- + arch/tile/kernel/pci_gx.c | 8 +- + arch/x86/include/asm/x86_init.h | 3 - + arch/x86/kernel/apic/io_apic.c | 8 +- + arch/x86/kernel/x86_init.c | 10 - + arch/x86/pci/bus_numa.c | 4 +- + arch/x86/pci/xen.c | 19 +- + drivers/acpi/acpi_lpss.c | 8 +- + drivers/acpi/acpi_platform.c | 4 +- + drivers/acpi/resource.c | 17 +- + drivers/base/core.c | 3 + + drivers/base/platform.c | 1 + + drivers/dma/acpi-dma.c | 10 +- + drivers/iommu/Kconfig | 34 +- + drivers/iommu/Makefile | 2 + + drivers/iommu/amd_iommu.c | 6 +- + drivers/iommu/arm-smmu.c | 1382 ++++++++--------- + drivers/iommu/exynos-iommu.c | 2 +- + drivers/iommu/fsl_pamu.c | 1 - + drivers/iommu/intel-iommu.c | 1 + + drivers/iommu/io-pgtable-arm.c | 986 ++++++++++++ + drivers/iommu/io-pgtable.c | 82 + + drivers/iommu/io-pgtable.h | 143 ++ + drivers/iommu/iommu.c | 111 +- + drivers/iommu/ipmmu-vmsa.c | 2 +- + drivers/iommu/irq_remapping.c | 8 - + drivers/iommu/msm_iommu.c | 1 + + drivers/iommu/of_iommu.c | 95 ++ + drivers/iommu/omap-iommu.c | 1 + + drivers/iommu/shmobile-iommu.c | 1 + + drivers/iommu/shmobile-ipmmu.c | 1 - + drivers/iommu/tegra-gart.c | 1 - + drivers/iommu/tegra-smmu.c | 2 +- + drivers/irqchip/Kconfig | 4 + + drivers/irqchip/Makefile | 1 + + drivers/irqchip/irq-armada-370-xp.c | 16 +- + drivers/irqchip/irq-atmel-aic.c | 40 +- + drivers/irqchip/irq-atmel-aic5.c | 65 +- + drivers/irqchip/irq-gic-v3-its.c | 1628 ++++++++++++++++++++ + drivers/irqchip/irq-gic-v3.c | 114 +- + drivers/irqchip/irq-sunxi-nmi.c | 4 +- + drivers/irqchip/irq-tb10x.c | 4 +- + drivers/of/device.c | 84 + + drivers/of/irq.c | 21 + + drivers/of/of_pci.c | 34 +- + drivers/of/platform.c | 139 +- + drivers/pci/Kconfig | 6 + + drivers/pci/bus.c | 18 +- + drivers/pci/host-bridge.c | 22 +- + drivers/pci/host/Kconfig | 17 + + drivers/pci/host/Makefile | 3 + + drivers/pci/host/pci-dra7xx.c | 8 +- + drivers/pci/host/pci-exynos.c | 5 +- + drivers/pci/host/pci-host-generic.c | 229 +-- + drivers/pci/host/pci-keystone-dw.c | 37 +- + drivers/pci/host/pci-keystone.h | 4 +- + drivers/pci/host/pci-layerscape.c | 669 ++++++++ + drivers/pci/host/pci-layerscape.h | 13 + + drivers/pci/host/pci-mvebu.c | 17 +- + drivers/pci/host/pci-tegra.c | 22 +- + drivers/pci/host/pci-xgene-msi.c | 595 +++++++ + drivers/pci/host/pci-xgene.c | 25 +- + drivers/pci/host/pcie-designware.c | 657 +++----- + drivers/pci/host/pcie-designware.h | 23 +- + drivers/pci/host/pcie-rcar.c | 22 +- + drivers/pci/host/pcie-xilinx.c | 64 +- + drivers/pci/msi.c | 528 +++++-- + drivers/pci/pci.h | 21 + + drivers/pci/probe.c | 28 +- + drivers/pci/quirks.c | 10 +- + drivers/pci/search.c | 5 +- + drivers/pci/xen-pcifront.c | 2 +- + drivers/power/reset/Kconfig | 6 + + drivers/power/reset/Makefile | 1 + + drivers/power/reset/ls-reboot.c | 93 ++ + drivers/usb/core/config.c | 3 +- + drivers/usb/core/driver.c | 6 +- + drivers/usb/core/hcd-pci.c | 9 + + drivers/usb/core/hub.c | 66 +- + drivers/usb/core/quirks.c | 6 + + drivers/usb/dwc3/core.c | 76 +- + drivers/usb/dwc3/core.h | 8 + + drivers/usb/dwc3/host.c | 6 + + drivers/usb/host/xhci-pci.c | 114 +- + drivers/usb/host/xhci-ring.c | 6 +- + drivers/usb/host/xhci.c | 28 +- + drivers/usb/host/xhci.h | 3 + + drivers/vfio/pci/vfio_pci_intrs.c | 2 +- + include/asm-generic/msi.h | 32 + + include/asm-generic/vmlinux.lds.h | 2 + + include/linux/acpi.h | 6 +- + include/linux/device.h | 24 + + include/linux/dma-mapping.h | 13 +- + include/linux/fsl/guts.h | 192 +++ + include/linux/iommu.h | 75 +- + include/linux/iopoll.h | 144 ++ + include/linux/irq.h | 67 +- + include/linux/irqchip/arm-gic-v3.h | 153 ++ + include/linux/irqdomain.h | 126 +- + include/linux/irqhandler.h | 14 + + include/linux/msi.h | 199 ++- + include/linux/of_device.h | 3 + + include/linux/of_iommu.h | 25 + + include/linux/of_irq.h | 1 + + include/linux/of_pci.h | 15 +- + include/linux/of_platform.h | 6 + + include/linux/pci.h | 21 +- + include/linux/resource_ext.h | 77 + + include/linux/usb/quirks.h | 3 + + include/trace/events/iommu.h | 31 +- + kernel/irq/Kconfig | 15 + + kernel/irq/Makefile | 1 + + kernel/irq/chip.c | 105 ++ + kernel/irq/generic-chip.c | 36 +- + kernel/irq/irqdomain.c | 585 ++++++- + kernel/irq/manage.c | 2 + + kernel/irq/msi.c | 347 +++++ + kernel/resource.c | 25 + + scripts/Kbuild.include | 6 + + scripts/Makefile.lib | 12 - + 176 files changed, 10196 insertions(+), 2223 deletions(-) + create mode 100644 arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts + create mode 100644 arch/arm64/boot/dts/fsl-ls2080a.dtsi + create mode 120000 arch/arm64/boot/dts/include/dt-bindings + create mode 100644 drivers/iommu/io-pgtable-arm.c + create mode 100644 drivers/iommu/io-pgtable.c + create mode 100644 drivers/iommu/io-pgtable.h + create mode 100644 drivers/irqchip/irq-gic-v3-its.c + create mode 100644 drivers/pci/host/pci-layerscape.c + create mode 100644 drivers/pci/host/pci-layerscape.h + create mode 100644 drivers/pci/host/pci-xgene-msi.c + create mode 100644 drivers/power/reset/ls-reboot.c + create mode 100644 include/asm-generic/msi.h + create mode 100644 include/linux/fsl/guts.h + create mode 100644 include/linux/iopoll.h + create mode 100644 include/linux/irqhandler.h + create mode 100644 include/linux/resource_ext.h + create mode 100644 kernel/irq/msi.c + +diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt +index 8a8b82c..39cfa72 100644 +--- a/Documentation/IRQ-domain.txt ++++ b/Documentation/IRQ-domain.txt +@@ -151,3 +151,74 @@ used and no descriptor gets allocated it is very important to make sure + that the driver using the simple domain call irq_create_mapping() + before any irq_find_mapping() since the latter will actually work + for the static IRQ assignment case. ++ ++==== Hierarchy IRQ domain ==== ++On some architectures, there may be multiple interrupt controllers ++involved in delivering an interrupt from the device to the target CPU. ++Let's look at a typical interrupt delivering path on x86 platforms: ++ ++Device --> IOAPIC -> Interrupt remapping Controller -> Local APIC -> CPU ++ ++There are three interrupt controllers involved: ++1) IOAPIC controller ++2) Interrupt remapping controller ++3) Local APIC controller ++ ++To support such a hardware topology and make software architecture match ++hardware architecture, an irq_domain data structure is built for each ++interrupt controller and those irq_domains are organized into hierarchy. ++When building irq_domain hierarchy, the irq_domain near to the device is ++child and the irq_domain near to CPU is parent. So a hierarchy structure ++as below will be built for the example above. ++ CPU Vector irq_domain (root irq_domain to manage CPU vectors) ++ ^ ++ | ++ Interrupt Remapping irq_domain (manage irq_remapping entries) ++ ^ ++ | ++ IOAPIC irq_domain (manage IOAPIC delivery entries/pins) ++ ++There are four major interfaces to use hierarchy irq_domain: ++1) irq_domain_alloc_irqs(): allocate IRQ descriptors and interrupt ++ controller related resources to deliver these interrupts. ++2) irq_domain_free_irqs(): free IRQ descriptors and interrupt controller ++ related resources associated with these interrupts. ++3) irq_domain_activate_irq(): activate interrupt controller hardware to ++ deliver the interrupt. ++3) irq_domain_deactivate_irq(): deactivate interrupt controller hardware ++ to stop delivering the interrupt. ++ ++Following changes are needed to support hierarchy irq_domain. ++1) a new field 'parent' is added to struct irq_domain; it's used to ++ maintain irq_domain hierarchy information. ++2) a new field 'parent_data' is added to struct irq_data; it's used to ++ build hierarchy irq_data to match hierarchy irq_domains. The irq_data ++ is used to store irq_domain pointer and hardware irq number. ++3) new callbacks are added to struct irq_domain_ops to support hierarchy ++ irq_domain operations. ++ ++With support of hierarchy irq_domain and hierarchy irq_data ready, an ++irq_domain structure is built for each interrupt controller, and an ++irq_data structure is allocated for each irq_domain associated with an ++IRQ. Now we could go one step further to support stacked(hierarchy) ++irq_chip. That is, an irq_chip is associated with each irq_data along ++the hierarchy. A child irq_chip may implement a required action by ++itself or by cooperating with its parent irq_chip. ++ ++With stacked irq_chip, interrupt controller driver only needs to deal ++with the hardware managed by itself and may ask for services from its ++parent irq_chip when needed. So we could achieve a much cleaner ++software architecture. ++ ++For an interrupt controller driver to support hierarchy irq_domain, it ++needs to: ++1) Implement irq_domain_ops.alloc and irq_domain_ops.free ++2) Optionally implement irq_domain_ops.activate and ++ irq_domain_ops.deactivate. ++3) Optionally implement an irq_chip to manage the interrupt controller ++ hardware. ++4) No need to implement irq_domain_ops.map and irq_domain_ops.unmap, ++ they are unused with hierarchy irq_domain. ++ ++Hierarchy irq_domain may also be used to support other architectures, ++such as ARM, ARM64 etc. +diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt +index e935d7d..5c9f338 100644 +--- a/Documentation/devicetree/bindings/arm/fsl.txt ++++ b/Documentation/devicetree/bindings/arm/fsl.txt +@@ -74,3 +74,18 @@ Required root node properties: + i.MX6q generic board + Required root node properties: + - compatible = "fsl,imx6q"; ++ +++Freescale ARMv8 based Layerscape SoC family Device Tree Bindings +++---------------------------------------------------------------- ++ ++LS2080A ARMv8 based Simulator model ++Required root node properties: ++ - compatible = "fsl,ls2080a-simu", "fsl,ls2080a"; ++ ++LS2080A ARMv8 based QDS Board ++Required root node properties: ++ - compatible = "fsl,ls2080a-qds", "fsl,ls2080a"; ++ ++LS2080A ARMv8 based RDB Board ++Required root node properties: ++ - compatible = "fsl,ls2080a-rdb", "fsl,ls2080a"; +diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt +index 9f4faa8..0036ab3 100644 +--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt ++++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt +@@ -14,7 +14,6 @@ Required properties: + - interrupt-map-mask and interrupt-map: standard PCI properties + to define the mapping of the PCIe interface to interrupt + numbers. +-- num-lanes: number of lanes to use + - clocks: Must contain an entry for each entry in clock-names. + See ../clocks/clock-bindings.txt for details. + - clock-names: Must include the following entries: +@@ -22,6 +21,8 @@ Required properties: + - "pcie_bus" + + Optional properties: ++- num-lanes: number of lanes to use (this property should be specified unless ++ the link is brought already up in BIOS) + - reset-gpio: gpio pin number of power good signal + - bus-range: PCI bus numbers covered (it is recommended for new devicetrees to + specify this property, to keep backwards compatibility a range of 0x00-0xff +diff --git a/Documentation/devicetree/bindings/powerpc/fsl/board.txt b/Documentation/devicetree/bindings/powerpc/fsl/board.txt +index cff38bd..89c90f4 100644 +--- a/Documentation/devicetree/bindings/powerpc/fsl/board.txt ++++ b/Documentation/devicetree/bindings/powerpc/fsl/board.txt +@@ -21,11 +21,14 @@ Example: + + This is the memory-mapped registers for on board FPGA. + +-Required properities: ++Required properties: + - compatible: should be a board-specific string followed by a string + indicating the type of FPGA. Example: +- "fsl,-fpga", "fsl,fpga-pixis" ++ "fsl,-fpga", "fsl,fpga-pixis" or ++ "fsl,-fpga", "fsl,fpga-qixis" + - reg: should contain the address and the length of the FPGA register set. ++ ++Optional properties: + - interrupt-parent: should specify phandle for the interrupt controller. + - interrupts: should specify event (wakeup) IRQ. + +@@ -38,6 +41,13 @@ Example (P1022DS): + interrupts = <8 8 0 0>; + }; + ++Example (LS2080A-RDB): ++ ++ cpld@3,0 { ++ compatible = "fsl,ls2080ardb-fpga", "fsl,fpga-qixis"; ++ reg = <0x3 0 0x10000>; ++ }; ++ + * Freescale BCSR GPIO banks + + Some BCSR registers act as simple GPIO controllers, each such +diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt +index 471366d..1f9900c 100644 +--- a/Documentation/devicetree/bindings/usb/dwc3.txt ++++ b/Documentation/devicetree/bindings/usb/dwc3.txt +@@ -1,6 +1,7 @@ + synopsys DWC3 CORE + +-DWC3- USB3 CONTROLLER ++DWC3- USB3 CONTROLLER. Complies to the generic USB binding properties ++ as described in 'usb/generic.txt' + + Required properties: + - compatible: must be "snps,dwc3" +diff --git a/MAINTAINERS b/MAINTAINERS +index c721042..1ae7362 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -1562,6 +1562,7 @@ M: Will Deacon + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) + S: Maintained + F: drivers/iommu/arm-smmu.c ++F: drivers/iommu/io-pgtable-arm.c + + ARM64 PORT (AARCH64 ARCHITECTURE) + M: Catalin Marinas +@@ -7047,6 +7048,16 @@ S: Maintained + F: Documentation/devicetree/bindings/pci/xgene-pci.txt + F: drivers/pci/host/pci-xgene.c + ++PCI DRIVER FOR FREESCALE LAYERSCAPE ++M: Minghuan Lian ++M: Mingkai Hu ++M: Roy Zang ++L: linuxppc-dev@lists.ozlabs.org ++L: linux-pci@vger.kernel.org ++L: linux-arm-kernel@lists.infradead.org ++S: Maintained ++F: drivers/pci/host/*layerscape* ++ + PCI DRIVER FOR IMX6 + M: Richard Zhu + M: Lucas Stach +@@ -7122,6 +7133,14 @@ L: linux-pci@vger.kernel.org + S: Maintained + F: drivers/pci/host/*spear* + ++PCI MSI DRIVER FOR APPLIEDMICRO XGENE ++M: Duc Dang ++L: linux-pci@vger.kernel.org ++L: linux-arm-kernel@lists.infradead.org ++S: Maintained ++F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt ++F: drivers/pci/host/pci-xgene-msi.c ++ + PCMCIA SUBSYSTEM + P: Linux PCMCIA Team + L: linux-pcmcia@lists.infradead.org +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig +index 89c4b5c..29544f0 100644 +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -1292,6 +1292,9 @@ config PCI_DOMAINS + bool + depends on PCI + ++config PCI_DOMAINS_GENERIC ++ def_bool PCI_DOMAINS ++ + config PCI_NANOENGINE + bool "BSE nanoEngine PCI support" + depends on SA1100_NANOENGINE +diff --git a/arch/arm/Makefile b/arch/arm/Makefile +index b5d7988..93a30a2 100644 +--- a/arch/arm/Makefile ++++ b/arch/arm/Makefile +@@ -320,8 +320,12 @@ $(INSTALL_TARGETS): + $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@ + + PHONY += dtbs dtbs_install +-dtbs dtbs_install: prepare scripts +- $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $@ ++ ++dtbs: prepare scripts ++ $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) ++ ++dtbs_install: ++ $(Q)$(MAKE) $(dtbinst)=$(boot)/dts MACHINE=$(MACHINE) + + # We use MRPROPER_FILES and CLEAN_FILES now + archclean: +diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile +index 38c89ca..6e784fa 100644 +--- a/arch/arm/boot/dts/Makefile ++++ b/arch/arm/boot/dts/Makefile +@@ -517,15 +517,7 @@ dtb-$(CONFIG_MACH_DOVE) += dove-cm-a510.dtb \ + dove-dove-db.dtb + dtb-$(CONFIG_ARCH_MEDIATEK) += mt6589-aquaris5.dtb + +-targets += dtbs dtbs_install +-targets += $(dtb-y) + endif + +-# *.dtb used to be generated in the directory above. Clean out the +-# old build results so people don't accidentally use them. +-dtbs: $(addprefix $(obj)/, $(dtb-y)) +- $(Q)rm -f $(obj)/../*.dtb +- +-clean-files := *.dtb +- +-dtbs_install: $(addsuffix _dtbinst_, $(dtb-y)) ++always := $(dtb-y) ++clean-files := *.dtb +diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h +index 85738b2..f3c0d95 100644 +--- a/arch/arm/include/asm/dma-mapping.h ++++ b/arch/arm/include/asm/dma-mapping.h +@@ -121,12 +121,14 @@ static inline unsigned long dma_max_pfn(struct device *dev) + } + #define dma_max_pfn(dev) dma_max_pfn(dev) + +-static inline int set_arch_dma_coherent_ops(struct device *dev) ++static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, ++ u64 size, struct iommu_ops *iommu, ++ bool coherent) + { +- set_dma_ops(dev, &arm_coherent_dma_ops); +- return 0; ++ if (coherent) ++ set_dma_ops(dev, &arm_coherent_dma_ops); + } +-#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) ++#define arch_setup_dma_ops arch_setup_dma_ops + + static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) + { +diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h +index 7fc4278..c074e7a 100644 +--- a/arch/arm/include/asm/mach/pci.h ++++ b/arch/arm/include/asm/mach/pci.h +@@ -19,9 +19,7 @@ struct pci_bus; + struct device; + + struct hw_pci { +-#ifdef CONFIG_PCI_DOMAINS +- int domain; +-#endif ++ struct msi_controller *msi_ctrl; + struct pci_ops *ops; + int nr_controllers; + void **private_data; +@@ -36,16 +34,14 @@ struct hw_pci { + resource_size_t start, + resource_size_t size, + resource_size_t align); +- void (*add_bus)(struct pci_bus *bus); +- void (*remove_bus)(struct pci_bus *bus); + }; + + /* + * Per-controller structure + */ + struct pci_sys_data { +-#ifdef CONFIG_PCI_DOMAINS +- int domain; ++#ifdef CONFIG_PCI_MSI ++ struct msi_controller *msi_ctrl; + #endif + struct list_head node; + int busnr; /* primary bus number */ +@@ -65,8 +61,6 @@ struct pci_sys_data { + resource_size_t start, + resource_size_t size, + resource_size_t align); +- void (*add_bus)(struct pci_bus *bus); +- void (*remove_bus)(struct pci_bus *bus); + void *private_data; /* platform controller private data */ + }; + +diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h +index 7e95d85..585dc33 100644 +--- a/arch/arm/include/asm/pci.h ++++ b/arch/arm/include/asm/pci.h +@@ -18,13 +18,6 @@ static inline int pcibios_assign_all_busses(void) + } + + #ifdef CONFIG_PCI_DOMAINS +-static inline int pci_domain_nr(struct pci_bus *bus) +-{ +- struct pci_sys_data *root = bus->sysdata; +- +- return root->domain; +-} +- + static inline int pci_proc_domain(struct pci_bus *bus) + { + return pci_domain_nr(bus); +diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c +index 17a26c1..a5cd259 100644 +--- a/arch/arm/kernel/bios32.c ++++ b/arch/arm/kernel/bios32.c +@@ -18,6 +18,15 @@ + + static int debug_pci; + ++#ifdef CONFIG_PCI_MSI ++struct msi_controller *pcibios_msi_controller(struct pci_dev *dev) ++{ ++ struct pci_sys_data *sysdata = dev->bus->sysdata; ++ ++ return sysdata->msi_ctrl; ++} ++#endif ++ + /* + * We can't use pci_get_device() here since we are + * called from interrupt context. +@@ -360,20 +369,6 @@ void pcibios_fixup_bus(struct pci_bus *bus) + } + EXPORT_SYMBOL(pcibios_fixup_bus); + +-void pcibios_add_bus(struct pci_bus *bus) +-{ +- struct pci_sys_data *sys = bus->sysdata; +- if (sys->add_bus) +- sys->add_bus(bus); +-} +- +-void pcibios_remove_bus(struct pci_bus *bus) +-{ +- struct pci_sys_data *sys = bus->sysdata; +- if (sys->remove_bus) +- sys->remove_bus(bus); +-} +- + /* + * Swizzle the device pin each time we cross a bridge. If a platform does + * not provide a swizzle function, we perform the standard PCI swizzling. +@@ -427,17 +422,16 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) + static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) + { + int ret; +- struct pci_host_bridge_window *window; ++ struct resource_entry *window; + + if (list_empty(&sys->resources)) { + pci_add_resource_offset(&sys->resources, + &iomem_resource, sys->mem_offset); + } + +- list_for_each_entry(window, &sys->resources, list) { ++ resource_list_for_each_entry(window, &sys->resources) + if (resource_type(window->res) == IORESOURCE_IO) + return 0; +- } + + sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io; + sys->io_res.end = (busnr + 1) * SZ_64K - 1; +@@ -468,15 +462,13 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, + if (!sys) + panic("PCI: unable to allocate sys data!"); + +-#ifdef CONFIG_PCI_DOMAINS +- sys->domain = hw->domain; ++#ifdef CONFIG_PCI_MSI ++ sys->msi_ctrl = hw->msi_ctrl; + #endif + sys->busnr = busnr; + sys->swizzle = hw->swizzle; + sys->map_irq = hw->map_irq; + sys->align_resource = hw->align_resource; +- sys->add_bus = hw->add_bus; +- sys->remove_bus = hw->remove_bus; + INIT_LIST_HEAD(&sys->resources); + + if (hw->private_data) +@@ -494,8 +486,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, + if (hw->scan) + sys->bus = hw->scan(nr, sys); + else +- sys->bus = pci_scan_root_bus(parent, sys->busnr, +- hw->ops, sys, &sys->resources); ++ sys->bus = pci_scan_root_bus_msi(parent, ++ sys->busnr, hw->ops, sys, ++ &sys->resources, hw->msi_ctrl); + + if (!sys->bus) + panic("PCI: unable to scan bus!"); +diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c +index e7730cf..9f89e76 100644 +--- a/arch/arm/mach-iop13xx/msi.c ++++ b/arch/arm/mach-iop13xx/msi.c +@@ -126,10 +126,10 @@ static void iop13xx_msi_nop(struct irq_data *d) + static struct irq_chip iop13xx_msi_chip = { + .name = "PCI-MSI", + .irq_ack = iop13xx_msi_nop, +- .irq_enable = unmask_msi_irq, +- .irq_disable = mask_msi_irq, +- .irq_mask = mask_msi_irq, +- .irq_unmask = unmask_msi_irq, ++ .irq_enable = pci_msi_unmask_irq, ++ .irq_disable = pci_msi_mask_irq, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, + }; + + int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) +@@ -153,7 +153,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) + id = iop13xx_cpu_id(); + msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f); + +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + irq_set_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq); + + return 0; +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 00b9c48..08e1287 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -14,6 +14,7 @@ config ARM64 + select ARM_GIC + select AUDIT_ARCH_COMPAT_GENERIC + select ARM_GIC_V3 ++ select ARM_GIC_V3_ITS if PCI_MSI + select BUILDTIME_EXTABLE_SORT + select CLONE_BACKWARDS + select COMMON_CLK +@@ -166,6 +167,11 @@ config ARCH_XGENE + help + This enables support for AppliedMicro X-Gene SOC Family + ++config ARCH_LAYERSCAPE ++ bool "ARMv8 based Freescale Layerscape SoC family" ++ help ++ This enables support for the Freescale Layerscape SoC family. ++ + endmenu + + menu "Bus support" +@@ -366,7 +372,6 @@ config ARM64_VA_BITS_42 + + config ARM64_VA_BITS_48 + bool "48-bit" +- depends on !ARM_SMMU + + endchoice + +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile +index 2d54c55..7cf8a29 100644 +--- a/arch/arm64/Makefile ++++ b/arch/arm64/Makefile +@@ -74,8 +74,13 @@ zinstall install: vmlinux + %.dtb: scripts + $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ + +-dtbs: scripts +- $(Q)$(MAKE) $(build)=$(boot)/dts dtbs ++PHONY += dtbs dtbs_install ++ ++dtbs: prepare scripts ++ $(Q)$(MAKE) $(build)=$(boot)/dts ++ ++dtbs_install: ++ $(Q)$(MAKE) $(dtbinst)=$(boot)/dts + + PHONY += vdso_install + vdso_install: +@@ -84,11 +89,13 @@ vdso_install: + # We use MRPROPER_FILES and CLEAN_FILES now + archclean: + $(Q)$(MAKE) $(clean)=$(boot) ++ $(Q)$(MAKE) $(clean)=$(boot)/dts + + define archhelp + echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' + echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' + echo '* dtbs - Build device tree blobs for enabled boards' ++ echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)' + echo ' install - Install uncompressed kernel' + echo ' zinstall - Install compressed kernel' + echo ' Install using (your) ~/bin/installkernel or' +diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile +index f8001a6..93f2fc3 100644 +--- a/arch/arm64/boot/dts/Makefile ++++ b/arch/arm64/boot/dts/Makefile +@@ -1,6 +1,7 @@ + dtb-$(CONFIG_ARCH_THUNDER) += thunder-88xx.dtb + dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb + dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb ++dtb-$(CONFIG_ARCH_LAYERSCAPE) += arm64-nxp-ls2080ardb-r0.dtb + + targets += dtbs + targets += $(dtb-y) +diff --git a/arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts b/arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts +new file mode 100644 +index 0000000..5da2834 +--- /dev/null ++++ b/arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts +@@ -0,0 +1,249 @@ ++/* ++ * Device Tree file for NXP LS2080a RDB board ++ * ++ */ ++ ++/dts-v1/; ++ ++#include "fsl-ls2080a.dtsi" ++ ++/ { ++ model = "arm64-nxp-ls2080ardb-r0"; ++ compatible = "fsl,ls2080a-rdb", "fsl,ls2080a"; ++}; ++ ++&esdhc { ++ status = "okay"; ++}; ++ ++&ifc { ++ status = "okay"; ++ #address-cells = <2>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x5 0x80000000 0x08000000 ++ 0x2 0x0 0x5 0x30000000 0x00010000 ++ 0x3 0x0 0x5 0x20000000 0x00010000>; ++ ++ nor@0,0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "cfi-flash"; ++ reg = <0x0 0x0 0x8000000>; ++ bank-width = <2>; ++ device-width = <1>; ++ ++ partition@0 { ++ /* SoC RCW, this location must not be altered */ ++ reg = <0x0 0x100000>; ++ label = "rcw (RO)"; ++ read-only; ++ }; ++ ++ partition@1 { ++ /* U-Boot image */ ++ reg = <0x100000 0x100000>; ++ label = "uboot"; ++ }; ++ ++ partition@2 { ++ /* U-Boot environment varialbes, 1MB */ ++ reg = <0x200000 0x100000>; ++ label = "uboot-env"; ++ env_size = <0x20000>; ++ }; ++ ++ partition@3 { ++ /* MC firmware, 4MB*/ ++ reg = <0x300000 0x400000>; ++ label = "mc_firmware"; ++ }; ++ ++ partition@4 { ++ /* MC DPL Blob, 1MB */ ++ reg = <0x700000 0x100000>; ++ label = "mc_dpl_blob"; ++ }; ++ ++ partition@5 { ++ /* MC DPC Blob, 1MB */ ++ reg = <0x800000 0x100000>; ++ label = "mc_dpc_blob"; ++ }; ++ ++ partition@6 { ++ /* AIOP FW, 4MB */ ++ reg = <0x900000 0x400000>; ++ label = "aiop_fw"; ++ }; ++ ++ partition@7 { ++ /* DebugServerFW, 2MB */ ++ reg = <0xd00000 0x200000>; ++ label = "DebugServer_fw"; ++ }; ++ }; ++ ++ nand@2,0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "fsl,ifc-nand"; ++ reg = <0x2 0x0 0x10000>; ++ }; ++ ++ cpld@3,0 { ++ reg = <0x3 0x0 0x10000>; ++ compatible = "fsl,ls2080a-rdb-qixis", "fsl,fpga-qixis"; ++ }; ++ ++}; ++ ++&i2c0 { ++ status = "okay"; ++ pca9547@75 { ++ compatible = "nxp,pca9547"; ++ reg = <0x75>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ i2c-mux-never-disable; ++ i2c@1 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x01>; ++ rtc@68 { ++ compatible = "dallas,ds3232"; ++ reg = <0x68>; ++ }; ++ }; ++ ++ i2c@3 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x3>; ++ ++ adt7481@4c { ++ compatible = "adi,adt7461"; ++ reg = <0x4c>; ++ }; ++ }; ++ }; ++}; ++ ++&i2c1 { ++ status = "disabled"; ++}; ++ ++&i2c2 { ++ status = "disabled"; ++}; ++ ++&i2c3 { ++ status = "disabled"; ++}; ++ ++&dspi { ++ status = "okay"; ++ dflash0: n25q512a { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "st,m25p80"; ++ spi-max-frequency = <3000000>; ++ reg = <0>; ++ }; ++}; ++ ++&qspi { ++ status = "disabled"; ++}; ++ ++&sata0 { ++ status = "okay"; ++}; ++ ++&sata1 { ++ status = "okay"; ++}; ++ ++&usb0 { ++ status = "okay"; ++}; ++ ++&usb1 { ++ status = "okay"; ++}; ++ ++&emdio1 { ++ status = "disabled"; ++ /* CS4340 PHYs */ ++ mdio1_phy1: emdio1_phy@1 { ++ reg = <0x10>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio1_phy2: emdio1_phy@2 { ++ reg = <0x11>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio1_phy3: emdio1_phy@3 { ++ reg = <0x12>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio1_phy4: emdio1_phy@4 { ++ reg = <0x13>; ++ phy-connection-type = "xfi"; ++ }; ++}; ++ ++&emdio2 { ++ /* AQR405 PHYs */ ++ mdio2_phy1: emdio2_phy@1 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 1 0x4>; /* Level high type */ ++ reg = <0x0>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio2_phy2: emdio2_phy@2 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 2 0x4>; /* Level high type */ ++ reg = <0x1>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio2_phy3: emdio2_phy@3 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 4 0x4>; /* Level high type */ ++ reg = <0x2>; ++ phy-connection-type = "xfi"; ++ }; ++ mdio2_phy4: emdio2_phy@4 { ++ compatible = "ethernet-phy-ieee802.3-c45"; ++ interrupts = <0 5 0x4>; /* Level high type */ ++ reg = <0x3>; ++ phy-connection-type = "xfi"; ++ }; ++}; ++ ++/* Update DPMAC connections to external PHYs, under the assumption of ++ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board. ++ */ ++&dpmac1 { ++ phy-handle = <&mdio1_phy1>; ++}; ++&dpmac2 { ++ phy-handle = <&mdio1_phy2>; ++}; ++&dpmac3 { ++ phy-handle = <&mdio1_phy3>; ++}; ++&dpmac4 { ++ phy-handle = <&mdio1_phy4>; ++}; ++&dpmac5 { ++ phy-handle = <&mdio2_phy1>; ++}; ++&dpmac6 { ++ phy-handle = <&mdio2_phy2>; ++}; ++&dpmac7 { ++ phy-handle = <&mdio2_phy3>; ++}; ++&dpmac8 { ++ phy-handle = <&mdio2_phy4>; ++}; +diff --git a/arch/arm64/boot/dts/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/fsl-ls2080a.dtsi +new file mode 100644 +index 0000000..5e53b04 +--- /dev/null ++++ b/arch/arm64/boot/dts/fsl-ls2080a.dtsi +@@ -0,0 +1,729 @@ ++/* ++ * Device Tree Include file for Freescale Layerscape-2080A family SoC. ++ * ++ * Copyright (C) 2014-2015, Freescale Semiconductor ++ * ++ * Bhupesh Sharma ++ * Harninder Rai ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include ++ ++/memreserve/ 0x80000000 0x00010000; ++ ++/ { ++ compatible = "fsl,ls2080a"; ++ interrupt-parent = <&gic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ cpus { ++ #address-cells = <2>; ++ #size-cells = <0>; ++ ++ /* We have 4 clusters having 2 Cortex-A57 cores each */ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x0 0x0>; ++ clocks = <&clockgen 1 0>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x0 0x1>; ++ clocks = <&clockgen 1 0>; ++ }; ++ ++ cpu2: cpu@100 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x0 0x100>; ++ clocks = <&clockgen 1 1>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu3: cpu@101 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x0 0x101>; ++ clocks = <&clockgen 1 1>; ++ }; ++ ++ cpu4: cpu@200 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x0 0x200>; ++ clocks = <&clockgen 1 2>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu5: cpu@201 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x0 0x201>; ++ clocks = <&clockgen 1 2>; ++ }; ++ ++ cpu6: cpu@300 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x0 0x300>; ++ clocks = <&clockgen 1 3>; ++ #cooling-cells = <2>; ++ }; ++ ++ cpu7: cpu@301 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a57"; ++ reg = <0x0 0x301>; ++ clocks = <&clockgen 1 3>; ++ }; ++ }; ++ ++ pmu { ++ compatible = "arm,armv8-pmuv3"; ++ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */ ++ }; ++ ++ gic: interrupt-controller@6000000 { ++ compatible = "arm,gic-v3"; ++ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */ ++ <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */ ++ <0x0 0x0c0c0000 0 0x2000>, /* GICC */ ++ <0x0 0x0c0d0000 0 0x1000>, /* GICH */ ++ <0x0 0x0c0e0000 0 0x20000>; /* GICV */ ++ #interrupt-cells = <3>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ interrupt-controller; ++ interrupts = <1 9 0x4>; ++ ++ its: gic-its@6020000 { ++ compatible = "arm,gic-v3-its"; ++ msi-controller; ++ reg = <0x0 0x6020000 0 0x20000>; ++ }; ++ }; ++ ++ sysclk: sysclk { ++ compatible = "fixed-clock"; ++ #clock-cells = <0>; ++ clock-frequency = <100000000>; ++ clock-output-names = "sysclk"; ++ }; ++ ++ clockgen: clocking@1300000 { ++ compatible = "fsl,ls2080a-clockgen"; ++ reg = <0 0x1300000 0 0xa0000>; ++ #clock-cells = <2>; ++ clocks = <&sysclk>; ++ }; ++ ++ tmu: tmu@1f80000 { ++ compatible = "fsl,qoriq-tmu", "fsl,ls2080a-tmu"; ++ reg = <0x0 0x1f80000 0x0 0x10000>; ++ interrupts = <0 23 0x4>; ++ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; ++ fsl,tmu-calibration = <0x00000000 0x00000026 ++ 0x00000001 0x0000002d ++ 0x00000002 0x00000032 ++ 0x00000003 0x00000039 ++ 0x00000004 0x0000003f ++ 0x00000005 0x00000046 ++ 0x00000006 0x0000004d ++ 0x00000007 0x00000054 ++ 0x00000008 0x0000005a ++ 0x00000009 0x00000061 ++ 0x0000000a 0x0000006a ++ 0x0000000b 0x00000071 ++ ++ 0x00010000 0x00000025 ++ 0x00010001 0x0000002c ++ 0x00010002 0x00000035 ++ 0x00010003 0x0000003d ++ 0x00010004 0x00000045 ++ 0x00010005 0x0000004e ++ 0x00010006 0x00000057 ++ 0x00010007 0x00000061 ++ 0x00010008 0x0000006b ++ 0x00010009 0x00000076 ++ ++ 0x00020000 0x00000029 ++ 0x00020001 0x00000033 ++ 0x00020002 0x0000003d ++ 0x00020003 0x00000049 ++ 0x00020004 0x00000056 ++ 0x00020005 0x00000061 ++ 0x00020006 0x0000006d ++ ++ 0x00030000 0x00000021 ++ 0x00030001 0x0000002a ++ 0x00030002 0x0000003c ++ 0x00030003 0x0000004e>; ++ little-endian; ++ #thermal-sensor-cells = <1>; ++ }; ++ ++ thermal-zones { ++ cpu_thermal: cpu-thermal { ++ polling-delay-passive = <1000>; ++ polling-delay = <5000>; ++ ++ thermal-sensors = <&tmu 4>; ++ ++ trips { ++ cpu_alert: cpu-alert { ++ temperature = <75000>; ++ hysteresis = <2000>; ++ type = "passive"; ++ }; ++ cpu_crit: cpu-crit { ++ temperature = <85000>; ++ hysteresis = <2000>; ++ type = "critical"; ++ }; ++ }; ++ ++ cooling-maps { ++ map0 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu0 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ map1 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu2 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ map2 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu4 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ map3 { ++ trip = <&cpu_alert>; ++ cooling-device = ++ <&cpu6 THERMAL_NO_LIMIT ++ THERMAL_NO_LIMIT>; ++ }; ++ }; ++ }; ++ }; ++ ++ serial0: serial@21c0500 { ++ device_type = "serial"; ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x0 0x21c0500 0x0 0x100>; ++ clocks = <&clockgen 4 3>; ++ interrupts = <0 32 0x4>; /* Level high type */ ++ }; ++ ++ serial1: serial@21c0600 { ++ device_type = "serial"; ++ compatible = "fsl,ns16550", "ns16550a"; ++ reg = <0x0 0x21c0600 0x0 0x100>; ++ clocks = <&clockgen 4 3>; ++ interrupts = <0 32 0x4>; /* Level high type */ ++ }; ++ ++ gpio0: gpio@2300000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2300000 0x0 0x10000>; ++ interrupts = <0 36 0x4>; /* Level high type */ ++ gpio-controller; ++ little-endian; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio1: gpio@2310000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2310000 0x0 0x10000>; ++ interrupts = <0 36 0x4>; /* Level high type */ ++ gpio-controller; ++ little-endian; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio2: gpio@2320000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2320000 0x0 0x10000>; ++ interrupts = <0 37 0x4>; /* Level high type */ ++ gpio-controller; ++ little-endian; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ gpio3: gpio@2330000 { ++ compatible = "fsl,qoriq-gpio"; ++ reg = <0x0 0x2330000 0x0 0x10000>; ++ interrupts = <0 37 0x4>; /* Level high type */ ++ gpio-controller; ++ little-endian; ++ #gpio-cells = <2>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ /* TODO: WRIOP (CCSR?) */ ++ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000, E-MDIO1: 0x1_6000 */ ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8B96000 0x0 0x1000>; ++ device_type = "mdio"; /* TODO: is this necessary? */ ++ little-endian; /* force the driver in LE mode */ ++ ++ /* Not necessary on the QDS, but needed on the RDB */ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000, E-MDIO2: 0x1_7000 */ ++ compatible = "fsl,fman-memac-mdio"; ++ reg = <0x0 0x8B97000 0x0 0x1000>; ++ device_type = "mdio"; /* TODO: is this necessary? */ ++ little-endian; /* force the driver in LE mode */ ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ ifc: ifc@2240000 { ++ compatible = "fsl,ifc", "simple-bus"; ++ reg = <0x0 0x2240000 0x0 0x20000>; ++ interrupts = <0 21 0x4>; /* Level high type */ ++ little-endian; ++ #address-cells = <2>; ++ #size-cells = <1>; ++ ++ ranges = <0 0 0x5 0x80000000 0x08000000 ++ 2 0 0x5 0x30000000 0x00010000 ++ 3 0 0x5 0x20000000 0x00010000>; ++ }; ++ ++ esdhc: esdhc@2140000 { ++ compatible = "fsl,ls2080a-esdhc", "fsl,esdhc"; ++ reg = <0x0 0x2140000 0x0 0x10000>; ++ interrupts = <0 28 0x4>; /* Level high type */ ++ clock-frequency = <0>; ++ voltage-ranges = <1800 1800 3300 3300>; ++ sdhci,auto-cmd12; ++ little-endian; ++ bus-width = <4>; ++ }; ++ ++ ftm0: ftm0@2800000 { ++ compatible = "fsl,ftm-alarm"; ++ reg = <0x0 0x2800000 0x0 0x10000>; ++ interrupts = <0 44 4>; ++ }; ++ ++ reset: reset@1E60000 { ++ compatible = "fsl,ls-reset"; ++ reg = <0x0 0x1E60000 0x0 0x10000>; ++ }; ++ ++ dspi: dspi@2100000 { ++ compatible = "fsl,ls2085a-dspi", "fsl,ls2080a-dspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2100000 0x0 0x10000>; ++ interrupts = <0 26 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>; ++ clock-names = "dspi"; ++ spi-num-chipselects = <5>; ++ bus-num = <0>; ++ }; ++ ++ i2c0: i2c@2000000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2000000 0x0 0x10000>; ++ interrupts = <0 34 0x4>; /* Level high type */ ++ clock-names = "i2c"; ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ i2c1: i2c@2010000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2010000 0x0 0x10000>; ++ interrupts = <0 34 0x4>; /* Level high type */ ++ clock-names = "i2c"; ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ i2c2: i2c@2020000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2020000 0x0 0x10000>; ++ interrupts = <0 35 0x4>; /* Level high type */ ++ clock-names = "i2c"; ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ i2c3: i2c@2030000 { ++ compatible = "fsl,vf610-i2c"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x2030000 0x0 0x10000>; ++ interrupts = <0 35 0x4>; /* Level high type */ ++ clock-names = "i2c"; ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ qspi: quadspi@20c0000 { ++ compatible = "fsl,ls2080a-qspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x20c0000 0x0 0x10000>, ++ <0x0 0x20000000 0x0 0x10000000>; ++ reg-names = "QuadSPI", "QuadSPI-memory"; ++ interrupts = <0 25 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>, <&clockgen 4 3>; ++ clock-names = "qspi_en", "qspi"; ++ }; ++ ++ pcie@3400000 { ++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", ++ "snps,dw-pcie"; ++ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ ++ 0x10 0x00000000 0x0 0x00001000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = <0 108 0x4>; /* Level high type */ ++ interrupt-names = "intr"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ num-lanes = <4>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x10 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x10 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&its>; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>, ++ <0000 0 0 2 &gic 0 0 0 110 4>, ++ <0000 0 0 3 &gic 0 0 0 111 4>, ++ <0000 0 0 4 &gic 0 0 0 112 4>; ++ }; ++ ++ pcie@3500000 { ++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", ++ "snps,dw-pcie"; ++ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ ++ 0x12 0x00000000 0x0 0x00001000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = <0 113 0x4>; /* Level high type */ ++ interrupt-names = "intr"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ num-lanes = <4>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x12 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x12 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&its>; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>, ++ <0000 0 0 2 &gic 0 0 0 115 4>, ++ <0000 0 0 3 &gic 0 0 0 116 4>, ++ <0000 0 0 4 &gic 0 0 0 117 4>; ++ }; ++ ++ pcie@3600000 { ++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", ++ "snps,dw-pcie"; ++ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ ++ 0x14 0x00000000 0x0 0x00001000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = <0 118 0x4>; /* Level high type */ ++ interrupt-names = "intr"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ num-lanes = <8>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x14 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x14 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&its>; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>, ++ <0000 0 0 2 &gic 0 0 0 120 4>, ++ <0000 0 0 3 &gic 0 0 0 121 4>, ++ <0000 0 0 4 &gic 0 0 0 122 4>; ++ }; ++ ++ pcie@3700000 { ++ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", ++ "snps,dw-pcie"; ++ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */ ++ 0x16 0x00000000 0x0 0x00001000>; /* configuration space */ ++ reg-names = "regs", "config"; ++ interrupts = <0 123 0x4>; /* Level high type */ ++ interrupt-names = "intr"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ num-lanes = <4>; ++ bus-range = <0x0 0xff>; ++ ranges = <0x81000000 0x0 0x00000000 0x16 0x00010000 0x0 0x00010000 /* downstream I/O */ ++ 0x82000000 0x0 0x40000000 0x16 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ ++ msi-parent = <&its>; ++ #interrupt-cells = <1>; ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>, ++ <0000 0 0 2 &gic 0 0 0 125 4>, ++ <0000 0 0 3 &gic 0 0 0 126 4>, ++ <0000 0 0 4 &gic 0 0 0 127 4>; ++ }; ++ ++ sata0: sata@3200000 { ++ compatible = "fsl,ls2080a-ahci", "fsl,ls1021a-ahci"; ++ reg = <0x0 0x3200000 0x0 0x10000>; ++ interrupts = <0 133 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ sata1: sata@3210000 { ++ compatible = "fsl,ls2080a-ahci", "fsl,ls1021a-ahci"; ++ reg = <0x0 0x3210000 0x0 0x10000>; ++ interrupts = <0 136 0x4>; /* Level high type */ ++ clocks = <&clockgen 4 3>; ++ }; ++ ++ usb0: usb3@3100000 { ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x3100000 0x0 0x10000>; ++ interrupts = <0 80 0x4>; /* Level high type */ ++ dr_mode = "host"; ++ configure-gfladj; ++ }; ++ ++ usb1: usb3@3110000 { ++ compatible = "snps,dwc3"; ++ reg = <0x0 0x3110000 0x0 0x10000>; ++ interrupts = <0 81 0x4>; /* Level high type */ ++ dr_mode = "host"; ++ configure-gfladj; ++ }; ++ ++ smmu: iommu@5000000 { ++ compatible = "arm,mmu-500"; ++ reg = <0 0x5000000 0 0x800000>; ++ #global-interrupts = <12>; ++ interrupts = <0 13 4>, /* global secure fault */ ++ <0 14 4>, /* combined secure interrupt */ ++ <0 15 4>, /* global non-secure fault */ ++ <0 16 4>, /* combined non-secure interrupt */ ++ /* performance counter interrupts 0-7 */ ++ <0 211 4>, ++ <0 212 4>, ++ <0 213 4>, ++ <0 214 4>, ++ <0 215 4>, ++ <0 216 4>, ++ <0 217 4>, ++ <0 218 4>, ++ /* per context interrupt, 64 interrupts */ ++ <0 146 4>, ++ <0 147 4>, ++ <0 148 4>, ++ <0 149 4>, ++ <0 150 4>, ++ <0 151 4>, ++ <0 152 4>, ++ <0 153 4>, ++ <0 154 4>, ++ <0 155 4>, ++ <0 156 4>, ++ <0 157 4>, ++ <0 158 4>, ++ <0 159 4>, ++ <0 160 4>, ++ <0 161 4>, ++ <0 162 4>, ++ <0 163 4>, ++ <0 164 4>, ++ <0 165 4>, ++ <0 166 4>, ++ <0 167 4>, ++ <0 168 4>, ++ <0 169 4>, ++ <0 170 4>, ++ <0 171 4>, ++ <0 172 4>, ++ <0 173 4>, ++ <0 174 4>, ++ <0 175 4>, ++ <0 176 4>, ++ <0 177 4>, ++ <0 178 4>, ++ <0 179 4>, ++ <0 180 4>, ++ <0 181 4>, ++ <0 182 4>, ++ <0 183 4>, ++ <0 184 4>, ++ <0 185 4>, ++ <0 186 4>, ++ <0 187 4>, ++ <0 188 4>, ++ <0 189 4>, ++ <0 190 4>, ++ <0 191 4>, ++ <0 192 4>, ++ <0 193 4>, ++ <0 194 4>, ++ <0 195 4>, ++ <0 196 4>, ++ <0 197 4>, ++ <0 198 4>, ++ <0 199 4>, ++ <0 200 4>, ++ <0 201 4>, ++ <0 202 4>, ++ <0 203 4>, ++ <0 204 4>, ++ <0 205 4>, ++ <0 206 4>, ++ <0 207 4>, ++ <0 208 4>, ++ <0 209 4>; ++ mmu-masters = <&fsl_mc 0x300 0>; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = <1 13 0x1>, /* Physical Secure PPI, edge triggered */ ++ <1 14 0x1>, /* Physical Non-Secure PPI, edge triggered */ ++ <1 11 0x1>, /* Virtual PPI, edge triggered */ ++ <1 10 0x1>; /* Hypervisor PPI, edge triggered */ ++ arm,reread-timer; ++ }; ++ ++ fsl_mc: fsl-mc@80c000000 { ++ compatible = "fsl,qoriq-mc"; ++ #stream-id-cells = <2>; ++ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ ++ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ ++ msi-parent = <&its>; ++ #address-cells = <3>; ++ #size-cells = <1>; ++ ++ /* ++ * Region type 0x0 - MC portals ++ * Region type 0x1 - QBMAN portals ++ */ ++ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000 ++ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>; ++ ++ /* ++ * Define the maximum number of MACs present on the SoC. ++ * They won't necessarily be all probed, since the ++ * Data Path Layout file and the MC firmware can put fewer ++ * actual DPMAC objects on the MC bus. ++ */ ++ dpmacs { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ dpmac1: dpmac@1 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <1>; ++ }; ++ dpmac2: dpmac@2 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <2>; ++ }; ++ dpmac3: dpmac@3 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <3>; ++ }; ++ dpmac4: dpmac@4 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <4>; ++ }; ++ dpmac5: dpmac@5 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <5>; ++ }; ++ dpmac6: dpmac@6 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <6>; ++ }; ++ dpmac7: dpmac@7 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <7>; ++ }; ++ dpmac8: dpmac@8 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <8>; ++ }; ++ dpmac9: dpmac@9 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <9>; ++ }; ++ dpmac10: dpmac@10 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xa>; ++ }; ++ dpmac11: dpmac@11 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xb>; ++ }; ++ dpmac12: dpmac@12 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xc>; ++ }; ++ dpmac13: dpmac@13 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xd>; ++ }; ++ dpmac14: dpmac@14 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xe>; ++ }; ++ dpmac15: dpmac@15 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0xf>; ++ }; ++ dpmac16: dpmac@16 { ++ compatible = "fsl,qoriq-mc-dpmac"; ++ reg = <0x10>; ++ }; ++ }; ++ }; ++ ++ ccn@4000000 { ++ compatible = "arm,ccn-504"; ++ reg = <0x0 0x04000000 0x0 0x01000000>; ++ interrupts = <0 12 4>; ++ }; ++ ++ memory@80000000 { ++ device_type = "memory"; ++ reg = <0x00000000 0x80000000 0 0x80000000>; ++ /* DRAM space 1 - 2 GB DRAM */ ++ }; ++}; +diff --git a/arch/arm64/boot/dts/include/dt-bindings b/arch/arm64/boot/dts/include/dt-bindings +new file mode 120000 +index 0000000..08c00e4 +--- /dev/null ++++ b/arch/arm64/boot/dts/include/dt-bindings +@@ -0,0 +1 @@ ++../../../../../include/dt-bindings +\ No newline at end of file +diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig +index dd301be..3852a77 100644 +--- a/arch/arm64/configs/defconfig ++++ b/arch/arm64/configs/defconfig +@@ -32,6 +32,7 @@ CONFIG_MODULES=y + CONFIG_MODULE_UNLOAD=y + # CONFIG_BLK_DEV_BSG is not set + # CONFIG_IOSCHED_DEADLINE is not set ++CONFIG_ARCH_LAYERSCAPE=y + CONFIG_ARCH_THUNDER=y + CONFIG_ARCH_VEXPRESS=y + CONFIG_ARCH_XGENE=y +diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h +index 101a42b..8ec41e5 100644 +--- a/arch/arm64/include/asm/mmu_context.h ++++ b/arch/arm64/include/asm/mmu_context.h +@@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void) + : "r" (ttbr)); + } + ++/* ++ * TCR.T0SZ value to use when the ID map is active. Usually equals ++ * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in ++ * physical memory, in which case it will be smaller. ++ */ ++extern u64 idmap_t0sz; ++ ++static inline bool __cpu_uses_extended_idmap(void) ++{ ++ return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) && ++ unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS))); ++} ++ ++static inline void __cpu_set_tcr_t0sz(u64 t0sz) ++{ ++ unsigned long tcr; ++ ++ if (__cpu_uses_extended_idmap()) ++ asm volatile ( ++ " mrs %0, tcr_el1 ;" ++ " bfi %0, %1, %2, %3 ;" ++ " msr tcr_el1, %0 ;" ++ " isb" ++ : "=&r" (tcr) ++ : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); ++} ++ ++/* ++ * Set TCR.T0SZ to the value appropriate for activating the identity map. ++ */ ++static inline void cpu_set_idmap_tcr_t0sz(void) ++{ ++ __cpu_set_tcr_t0sz(idmap_t0sz); ++} ++ ++/* ++ * Set TCR.T0SZ to its default value (based on VA_BITS) ++ */ ++static inline void cpu_set_default_tcr_t0sz(void) ++{ ++ __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)); ++} ++ + static inline void switch_new_context(struct mm_struct *mm) + { + unsigned long flags; +diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h +index 22b1623..3d02b18 100644 +--- a/arch/arm64/include/asm/page.h ++++ b/arch/arm64/include/asm/page.h +@@ -33,7 +33,9 @@ + * image. Both require pgd, pud (4 levels only) and pmd tables to (section) + * map the kernel. With the 64K page configuration, swapper and idmap need to + * map to pte level. The swapper also maps the FDT (see __create_page_tables +- * for more information). ++ * for more information). Note that the number of ID map translation levels ++ * could be increased on the fly if system RAM is out of reach for the default ++ * VA range, so 3 pages are reserved in all cases. + */ + #ifdef CONFIG_ARM64_64K_PAGES + #define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS) +@@ -42,7 +44,7 @@ + #endif + + #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) +-#define IDMAP_DIR_SIZE (SWAPPER_DIR_SIZE) ++#define IDMAP_DIR_SIZE (3 * PAGE_SIZE) + + #ifndef __ASSEMBLY__ + +diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h +index 88174e0..500b74e 100644 +--- a/arch/arm64/include/asm/pgtable-hwdef.h ++++ b/arch/arm64/include/asm/pgtable-hwdef.h +@@ -142,7 +142,12 @@ + /* + * TCR flags. + */ +-#define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0)) ++#define TCR_T0SZ_OFFSET 0 ++#define TCR_T1SZ_OFFSET 16 ++#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET) ++#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET) ++#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) ++#define TCR_TxSZ_WIDTH 6 + #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) + #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) + #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S +index 2877dd8..ca02239 100644 +--- a/arch/arm64/kernel/head.S ++++ b/arch/arm64/kernel/head.S +@@ -592,6 +592,43 @@ __create_page_tables: + mov x0, x25 // idmap_pg_dir + ldr x3, =KERNEL_START + add x3, x3, x28 // __pa(KERNEL_START) ++ ++#ifndef CONFIG_ARM64_VA_BITS_48 ++#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) ++#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) ++ ++ /* ++ * If VA_BITS < 48, it may be too small to allow for an ID mapping to be ++ * created that covers system RAM if that is located sufficiently high ++ * in the physical address space. So for the ID map, use an extended ++ * virtual range in that case, by configuring an additional translation ++ * level. ++ * First, we have to verify our assumption that the current value of ++ * VA_BITS was chosen such that all translation levels are fully ++ * utilised, and that lowering T0SZ will always result in an additional ++ * translation level to be configured. ++ */ ++#if VA_BITS != EXTRA_SHIFT ++#error "Mismatch between VA_BITS and page size/number of translation levels" ++#endif ++ ++ /* ++ * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the ++ * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used), ++ * this number conveniently equals the number of leading zeroes in ++ * the physical address of KERNEL_END. ++ */ ++ adrp x5, KERNEL_END ++ clz x5, x5 ++ cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? ++ b.ge 1f // .. then skip additional level ++ ++ str_l x5, idmap_t0sz, x6 ++ ++ create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 ++1: ++#endif ++ + create_pgd_entry x0, x3, x5, x6 + ldr x6, =KERNEL_END + mov x5, x3 // __pa(KERNEL_START) +diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c +index 0ef8789..5657692 100644 +--- a/arch/arm64/kernel/smp.c ++++ b/arch/arm64/kernel/smp.c +@@ -152,6 +152,7 @@ asmlinkage void secondary_start_kernel(void) + */ + cpu_set_reserved_ttbr0(); + flush_tlb_all(); ++ cpu_set_default_tcr_t0sz(); + + preempt_disable(); + trace_hardirqs_off(); +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c +index f4f8b50..53bbff9 100644 +--- a/arch/arm64/mm/mmu.c ++++ b/arch/arm64/mm/mmu.c +@@ -37,6 +37,8 @@ + + #include "mm.h" + ++u64 idmap_t0sz = TCR_T0SZ(VA_BITS); ++ + /* + * Empty_zero_page is a special page that is used for zero-initialized data + * and COW. +@@ -369,6 +371,7 @@ void __init paging_init(void) + */ + cpu_set_reserved_ttbr0(); + flush_tlb_all(); ++ cpu_set_default_tcr_t0sz(); + } + + /* +@@ -376,8 +379,10 @@ void __init paging_init(void) + */ + void setup_mm_for_reboot(void) + { +- cpu_switch_mm(idmap_pg_dir, &init_mm); ++ cpu_set_reserved_ttbr0(); + flush_tlb_all(); ++ cpu_set_idmap_tcr_t0sz(); ++ cpu_switch_mm(idmap_pg_dir, &init_mm); + } + + /* +diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S +index 005d29e..4c4d93c 100644 +--- a/arch/arm64/mm/proc-macros.S ++++ b/arch/arm64/mm/proc-macros.S +@@ -52,3 +52,13 @@ + mov \reg, #4 // bytes per word + lsl \reg, \reg, \tmp // actual cache line size + .endm ++ ++/* ++ * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map ++ */ ++ .macro tcr_set_idmap_t0sz, valreg, tmpreg ++#ifndef CONFIG_ARM64_VA_BITS_48 ++ ldr_l \tmpreg, idmap_t0sz ++ bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH ++#endif ++ .endm +diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S +index 4e778b1..cbea872 100644 +--- a/arch/arm64/mm/proc.S ++++ b/arch/arm64/mm/proc.S +@@ -156,6 +156,7 @@ ENTRY(cpu_do_resume) + msr cpacr_el1, x6 + msr ttbr0_el1, x1 + msr ttbr1_el1, x7 ++ tcr_set_idmap_t0sz x8, x7 + msr tcr_el1, x8 + msr vbar_el1, x9 + msr mdscr_el1, x10 +@@ -233,6 +234,8 @@ ENTRY(__cpu_setup) + */ + ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ + TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 ++ tcr_set_idmap_t0sz x10, x9 ++ + /* + * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in + * TCR_EL1. +diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c +index 8c3730c..8ae36ea 100644 +--- a/arch/ia64/kernel/msi_ia64.c ++++ b/arch/ia64/kernel/msi_ia64.c +@@ -35,7 +35,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata, + data |= MSI_DATA_VECTOR(irq_to_vector(irq)); + msg.data = data; + +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + cpumask_copy(idata->affinity, cpumask_of(cpu)); + + return 0; +@@ -71,7 +71,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) + MSI_DATA_DELIVERY_FIXED | + MSI_DATA_VECTOR(vector); + +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); + + return 0; +@@ -102,8 +102,8 @@ static int ia64_msi_retrigger_irq(struct irq_data *data) + */ + static struct irq_chip ia64_msi_chip = { + .name = "PCI-MSI", +- .irq_mask = mask_msi_irq, +- .irq_unmask = unmask_msi_irq, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, + .irq_ack = ia64_ack_msi_irq, + #ifdef CONFIG_SMP + .irq_set_affinity = ia64_set_msi_irq_affinity, +diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c +index 446e779..a0eb27b 100644 +--- a/arch/ia64/sn/kernel/msi_sn.c ++++ b/arch/ia64/sn/kernel/msi_sn.c +@@ -145,7 +145,7 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) + msg.data = 0x100 + irq; + + irq_set_msi_desc(irq, entry); +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); + + return 0; +@@ -205,7 +205,7 @@ static int sn_set_msi_irq_affinity(struct irq_data *data, + msg.address_hi = (u32)(bus_addr >> 32); + msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); + +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + cpumask_copy(data->affinity, cpu_mask); + + return 0; +@@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(struct irq_data *data) + + static struct irq_chip sn_msi_chip = { + .name = "PCI-MSI", +- .irq_mask = mask_msi_irq, +- .irq_unmask = unmask_msi_irq, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, + .irq_ack = sn_ack_msi_irq, + #ifdef CONFIG_SMP + .irq_set_affinity = sn_set_msi_irq_affinity, +diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c +index 63bbe07..cffaaf4 100644 +--- a/arch/mips/pci/msi-octeon.c ++++ b/arch/mips/pci/msi-octeon.c +@@ -178,7 +178,7 @@ msi_irq_allocated: + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); + + irq_set_msi_desc(irq, desc); +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + return 0; + } + +diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c +index f7ac3ed..6a40f24 100644 +--- a/arch/mips/pci/msi-xlp.c ++++ b/arch/mips/pci/msi-xlp.c +@@ -217,7 +217,7 @@ static void xlp_msix_mask_ack(struct irq_data *d) + + msixvec = nlm_irq_msixvec(d->irq); + link = nlm_irq_msixlink(msixvec); +- mask_msi_irq(d); ++ pci_msi_mask_irq(d); + md = irq_data_get_irq_handler_data(d); + + /* Ack MSI on bridge */ +@@ -239,10 +239,10 @@ static void xlp_msix_mask_ack(struct irq_data *d) + + static struct irq_chip xlp_msix_chip = { + .name = "XLP-MSIX", +- .irq_enable = unmask_msi_irq, +- .irq_disable = mask_msi_irq, ++ .irq_enable = pci_msi_unmask_irq, ++ .irq_disable = pci_msi_mask_irq, + .irq_mask_ack = xlp_msix_mask_ack, +- .irq_unmask = unmask_msi_irq, ++ .irq_unmask = pci_msi_unmask_irq, + }; + + void arch_teardown_msi_irq(unsigned int irq) +@@ -345,7 +345,7 @@ static int xlp_setup_msi(uint64_t lnkbase, int node, int link, + if (ret < 0) + return ret; + +- write_msi_msg(xirq, &msg); ++ pci_write_msi_msg(xirq, &msg); + return 0; + } + +@@ -446,7 +446,7 @@ static int xlp_setup_msix(uint64_t lnkbase, int node, int link, + if (ret < 0) + return ret; + +- write_msi_msg(xirq, &msg); ++ pci_write_msi_msg(xirq, &msg); + return 0; + } + +diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c +index 0dde803..26d2dab 100644 +--- a/arch/mips/pci/pci-xlr.c ++++ b/arch/mips/pci/pci-xlr.c +@@ -260,7 +260,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) + if (ret < 0) + return ret; + +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + return 0; + } + #endif +diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +index ca3a062..11090ab 100644 +--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c ++++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +@@ -123,7 +123,8 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc) + } + + static int +-cpld_pic_host_match(struct irq_domain *h, struct device_node *node) ++cpld_pic_host_match(struct irq_domain *h, struct device_node *node, ++ enum irq_domain_bus_token bus_token) + { + return cpld_pic_node == node; + } +diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c +index 862b327..0883994 100644 +--- a/arch/powerpc/platforms/cell/axon_msi.c ++++ b/arch/powerpc/platforms/cell/axon_msi.c +@@ -279,7 +279,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + + irq_set_msi_desc(virq, entry); + msg.data = virq; +- write_msi_msg(virq, &msg); ++ pci_write_msi_msg(virq, &msg); + } + + return 0; +@@ -301,9 +301,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) + } + + static struct irq_chip msic_irq_chip = { +- .irq_mask = mask_msi_irq, +- .irq_unmask = unmask_msi_irq, +- .irq_shutdown = mask_msi_irq, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, ++ .irq_shutdown = pci_msi_mask_irq, + .name = "AXON-MSI", + }; + +diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c +index 28e558d..109d236 100644 +--- a/arch/powerpc/platforms/cell/interrupt.c ++++ b/arch/powerpc/platforms/cell/interrupt.c +@@ -222,7 +222,8 @@ void iic_request_IPIs(void) + #endif /* CONFIG_SMP */ + + +-static int iic_host_match(struct irq_domain *h, struct device_node *node) ++static int iic_host_match(struct irq_domain *h, struct device_node *node, ++ enum irq_domain_bus_token bus_token) + { + return of_device_is_compatible(node, + "IBM,CBEA-Internal-Interrupt-Controller"); +diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c +index 4cde8e7..b7866e0 100644 +--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c ++++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c +@@ -108,7 +108,8 @@ static int flipper_pic_map(struct irq_domain *h, unsigned int virq, + return 0; + } + +-static int flipper_pic_match(struct irq_domain *h, struct device_node *np) ++static int flipper_pic_match(struct irq_domain *h, struct device_node *np, ++ enum irq_domain_bus_token bus_token) + { + return 1; + } +diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c +index 4c24bf6..246cab4 100644 +--- a/arch/powerpc/platforms/powermac/pic.c ++++ b/arch/powerpc/platforms/powermac/pic.c +@@ -268,7 +268,8 @@ static struct irqaction gatwick_cascade_action = { + .name = "cascade", + }; + +-static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node) ++static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node, ++ enum irq_domain_bus_token bus_token) + { + /* We match all, we don't always have a node anyway */ + return 1; +diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c +index 9ff55d5..019991d 100644 +--- a/arch/powerpc/platforms/powernv/pci.c ++++ b/arch/powerpc/platforms/powernv/pci.c +@@ -90,7 +90,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) + return rc; + } + irq_set_msi_desc(virq, entry); +- write_msi_msg(virq, &msg); ++ pci_write_msi_msg(virq, &msg); + } + return 0; + } +diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c +index 5f3b232..df0c086 100644 +--- a/arch/powerpc/platforms/ps3/interrupt.c ++++ b/arch/powerpc/platforms/ps3/interrupt.c +@@ -678,7 +678,8 @@ static int ps3_host_map(struct irq_domain *h, unsigned int virq, + return 0; + } + +-static int ps3_host_match(struct irq_domain *h, struct device_node *np) ++static int ps3_host_match(struct irq_domain *h, struct device_node *np, ++ enum irq_domain_bus_token bus_token) + { + /* Match all */ + return 1; +diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c +index 8b909e9..691a154 100644 +--- a/arch/powerpc/platforms/pseries/msi.c ++++ b/arch/powerpc/platforms/pseries/msi.c +@@ -476,7 +476,7 @@ again: + irq_set_msi_desc(virq, entry); + + /* Read config space back so we can restore after reset */ +- __read_msi_msg(entry, &msg); ++ __pci_read_msi_msg(entry, &msg); + entry->msg = msg; + } + +diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c +index 2d20f10..eca0b00 100644 +--- a/arch/powerpc/sysdev/ehv_pic.c ++++ b/arch/powerpc/sysdev/ehv_pic.c +@@ -177,7 +177,8 @@ unsigned int ehv_pic_get_irq(void) + return irq_linear_revmap(global_ehv_pic->irqhost, irq); + } + +-static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node) ++static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node, ++ enum irq_domain_bus_token bus_token) + { + /* Exact match, unless ehv_pic node is NULL */ + return h->of_node == NULL || h->of_node == node; +diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c +index ea6b3a1..f13282c 100644 +--- a/arch/powerpc/sysdev/fsl_msi.c ++++ b/arch/powerpc/sysdev/fsl_msi.c +@@ -82,8 +82,8 @@ static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p) + + + static struct irq_chip fsl_msi_chip = { +- .irq_mask = mask_msi_irq, +- .irq_unmask = unmask_msi_irq, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, + .irq_ack = fsl_msi_end_irq, + .irq_print_chip = fsl_msi_print_chip, + }; +@@ -243,7 +243,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) + irq_set_msi_desc(virq, entry); + + fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); +- write_msi_msg(virq, &msg); ++ pci_write_msi_msg(virq, &msg); + } + return 0; + +diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c +index 45598da..8c3756c 100644 +--- a/arch/powerpc/sysdev/i8259.c ++++ b/arch/powerpc/sysdev/i8259.c +@@ -162,7 +162,8 @@ static struct resource pic_edgectrl_iores = { + .flags = IORESOURCE_BUSY, + }; + +-static int i8259_host_match(struct irq_domain *h, struct device_node *node) ++static int i8259_host_match(struct irq_domain *h, struct device_node *node, ++ enum irq_domain_bus_token bus_token) + { + return h->of_node == NULL || h->of_node == node; + } +diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c +index b50f978..1b9b00f 100644 +--- a/arch/powerpc/sysdev/ipic.c ++++ b/arch/powerpc/sysdev/ipic.c +@@ -672,7 +672,8 @@ static struct irq_chip ipic_edge_irq_chip = { + .irq_set_type = ipic_set_irq_type, + }; + +-static int ipic_host_match(struct irq_domain *h, struct device_node *node) ++static int ipic_host_match(struct irq_domain *h, struct device_node *node, ++ enum irq_domain_bus_token bus_token) + { + /* Exact match, unless ipic node is NULL */ + return h->of_node == NULL || h->of_node == node; +diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c +index 89cec0e..bf6f77e 100644 +--- a/arch/powerpc/sysdev/mpic.c ++++ b/arch/powerpc/sysdev/mpic.c +@@ -1009,7 +1009,8 @@ static struct irq_chip mpic_irq_ht_chip = { + #endif /* CONFIG_MPIC_U3_HT_IRQS */ + + +-static int mpic_host_match(struct irq_domain *h, struct device_node *node) ++static int mpic_host_match(struct irq_domain *h, struct device_node *node, ++ enum irq_domain_bus_token bus_token) + { + /* Exact match, unless mpic node is NULL */ + return h->of_node == NULL || h->of_node == node; +diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c +index a6add4a..5a4c474 100644 +--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c ++++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c +@@ -42,7 +42,7 @@ static struct mpic *msi_mpic; + static void mpic_pasemi_msi_mask_irq(struct irq_data *data) + { + pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); +- mask_msi_irq(data); ++ pci_msi_mask_irq(data); + mpic_mask_irq(data); + } + +@@ -50,7 +50,7 @@ static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) + { + pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); + mpic_unmask_irq(data); +- unmask_msi_irq(data); ++ pci_msi_unmask_irq(data); + } + + static struct irq_chip mpic_pasemi_msi_chip = { +@@ -138,7 +138,7 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) + * register to generate MSI [512...1023] + */ + msg.data = hwirq-0x200; +- write_msi_msg(virq, &msg); ++ pci_write_msi_msg(virq, &msg); + } + + return 0; +diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c +index db35a40..65880cc 100644 +--- a/arch/powerpc/sysdev/mpic_u3msi.c ++++ b/arch/powerpc/sysdev/mpic_u3msi.c +@@ -25,14 +25,14 @@ static struct mpic *msi_mpic; + + static void mpic_u3msi_mask_irq(struct irq_data *data) + { +- mask_msi_irq(data); ++ pci_msi_mask_irq(data); + mpic_mask_irq(data); + } + + static void mpic_u3msi_unmask_irq(struct irq_data *data) + { + mpic_unmask_irq(data); +- unmask_msi_irq(data); ++ pci_msi_unmask_irq(data); + } + + static struct irq_chip mpic_u3msi_chip = { +@@ -172,7 +172,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) + printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", + virq, hwirq, (unsigned long)addr); + msg.data = hwirq; +- write_msi_msg(virq, &msg); ++ pci_write_msi_msg(virq, &msg); + + hwirq++; + } +diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c +index a6a4dbd..908105f 100644 +--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c ++++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c +@@ -85,7 +85,7 @@ static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1); + return -EINVAL; + } +- write_msi_msg(hwirq, &msg); ++ pci_write_msi_msg(hwirq, &msg); + } + + return 0; +diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c +index 85d9c18..c6df3e2 100644 +--- a/arch/powerpc/sysdev/ppc4xx_msi.c ++++ b/arch/powerpc/sysdev/ppc4xx_msi.c +@@ -116,7 +116,7 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + + irq_set_msi_desc(virq, entry); + msg.data = int_no; +- write_msi_msg(virq, &msg); ++ pci_write_msi_msg(virq, &msg); + } + return 0; + } +diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c +index b2b87c3..a433b3d 100644 +--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c ++++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c +@@ -245,7 +245,8 @@ static struct irq_chip qe_ic_irq_chip = { + .irq_mask_ack = qe_ic_mask_irq, + }; + +-static int qe_ic_host_match(struct irq_domain *h, struct device_node *node) ++static int qe_ic_host_match(struct irq_domain *h, struct device_node *node, ++ enum irq_domain_bus_token bus_token) + { + /* Exact match, unless qe_ic node is NULL */ + return h->of_node == NULL || h->of_node == node; +diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c +index 3c6ee1b..4ba554e 100644 +--- a/arch/powerpc/sysdev/xics/ics-opal.c ++++ b/arch/powerpc/sysdev/xics/ics-opal.c +@@ -73,7 +73,7 @@ static unsigned int ics_opal_startup(struct irq_data *d) + * at that level, so we do it here by hand. + */ + if (d->msi_desc) +- unmask_msi_irq(d); ++ pci_msi_unmask_irq(d); + #endif + + /* unmask it */ +diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c +index 936575d..bc81335 100644 +--- a/arch/powerpc/sysdev/xics/ics-rtas.c ++++ b/arch/powerpc/sysdev/xics/ics-rtas.c +@@ -76,7 +76,7 @@ static unsigned int ics_rtas_startup(struct irq_data *d) + * at that level, so we do it here by hand. + */ + if (d->msi_desc) +- unmask_msi_irq(d); ++ pci_msi_unmask_irq(d); + #endif + /* unmask it */ + ics_rtas_unmask_irq(d); +diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c +index fe0cca4..13ab716 100644 +--- a/arch/powerpc/sysdev/xics/xics-common.c ++++ b/arch/powerpc/sysdev/xics/xics-common.c +@@ -300,7 +300,8 @@ int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, + } + #endif /* CONFIG_SMP */ + +-static int xics_host_match(struct irq_domain *h, struct device_node *node) ++static int xics_host_match(struct irq_domain *h, struct device_node *node, ++ enum irq_domain_bus_token bus_token) + { + struct ics *ics; + +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c +index 2fa7b14..d59c825 100644 +--- a/arch/s390/pci/pci.c ++++ b/arch/s390/pci/pci.c +@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(zpci_list_lock); + + static struct irq_chip zpci_irq_chip = { + .name = "zPCI", +- .irq_unmask = unmask_msi_irq, +- .irq_mask = mask_msi_irq, ++ .irq_unmask = pci_msi_unmask_irq, ++ .irq_mask = pci_msi_mask_irq, + }; + + static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); +@@ -403,7 +403,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) + msg.data = hwirq; + msg.address_lo = zdev->msi_addr & 0xffffffff; + msg.address_hi = zdev->msi_addr >> 32; +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + airq_iv_set_data(zdev->aibv, hwirq, irq); + hwirq++; + } +@@ -448,9 +448,9 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) + /* Release MSI interrupts */ + list_for_each_entry(msi, &pdev->msi_list, list) { + if (msi->msi_attrib.is_msix) +- default_msix_mask_irq(msi, 1); ++ __pci_msix_desc_mask_irq(msi, 1); + else +- default_msi_mask_irq(msi, 1, 1); ++ __pci_msi_desc_mask_irq(msi, 1, 1); + irq_set_msi_desc(msi->irq, NULL); + irq_free_desc(msi->irq); + msi->msg.address_lo = 0; +diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c +index 580651a..84e16d8 100644 +--- a/arch/sparc/kernel/pci_msi.c ++++ b/arch/sparc/kernel/pci_msi.c +@@ -111,10 +111,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num) + + static struct irq_chip msi_irq = { + .name = "PCI-MSI", +- .irq_mask = mask_msi_irq, +- .irq_unmask = unmask_msi_irq, +- .irq_enable = unmask_msi_irq, +- .irq_disable = mask_msi_irq, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, ++ .irq_enable = pci_msi_unmask_irq, ++ .irq_disable = pci_msi_mask_irq, + /* XXX affinity XXX */ + }; + +@@ -161,7 +161,7 @@ static int sparc64_setup_msi_irq(unsigned int *irq_p, + msg.data = msi; + + irq_set_msi_desc(*irq_p, entry); +- write_msi_msg(*irq_p, &msg); ++ pci_write_msi_msg(*irq_p, &msg); + + return 0; + +diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c +index e39f9c5..e717af2 100644 +--- a/arch/tile/kernel/pci_gx.c ++++ b/arch/tile/kernel/pci_gx.c +@@ -1453,7 +1453,7 @@ static struct pci_ops tile_cfg_ops = { + static unsigned int tilegx_msi_startup(struct irq_data *d) + { + if (d->msi_desc) +- unmask_msi_irq(d); ++ pci_msi_unmask_irq(d); + + return 0; + } +@@ -1465,14 +1465,14 @@ static void tilegx_msi_ack(struct irq_data *d) + + static void tilegx_msi_mask(struct irq_data *d) + { +- mask_msi_irq(d); ++ pci_msi_mask_irq(d); + __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); + } + + static void tilegx_msi_unmask(struct irq_data *d) + { + __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); +- unmask_msi_irq(d); ++ pci_msi_unmask_irq(d); + } + + static struct irq_chip tilegx_msi_chip = { +@@ -1590,7 +1590,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) + msg.address_hi = msi_addr >> 32; + msg.address_lo = msi_addr & 0xffffffff; + +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq); + irq_set_handler_data(irq, controller); + +diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h +index e45e4da..f58a9c7 100644 +--- a/arch/x86/include/asm/x86_init.h ++++ b/arch/x86/include/asm/x86_init.h +@@ -172,7 +172,6 @@ struct x86_platform_ops { + + struct pci_dev; + struct msi_msg; +-struct msi_desc; + + struct x86_msi_ops { + int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); +@@ -183,8 +182,6 @@ struct x86_msi_ops { + void (*teardown_msi_irqs)(struct pci_dev *dev); + void (*restore_msi_irqs)(struct pci_dev *dev); + int (*setup_hpet_msi)(unsigned int irq, unsigned int id); +- u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); +- u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); + }; + + struct IO_APIC_route_entry; +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c +index 1183d54..7ffe0a2 100644 +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -3158,7 +3158,7 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; + msg.address_lo |= MSI_ADDR_DEST_ID(dest); + +- __write_msi_msg(data->msi_desc, &msg); ++ __pci_write_msi_msg(data->msi_desc, &msg); + + return IRQ_SET_MASK_OK_NOCOPY; + } +@@ -3169,8 +3169,8 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) + */ + static struct irq_chip msi_chip = { + .name = "PCI-MSI", +- .irq_unmask = unmask_msi_irq, +- .irq_mask = mask_msi_irq, ++ .irq_unmask = pci_msi_unmask_irq, ++ .irq_mask = pci_msi_mask_irq, + .irq_ack = ack_apic_edge, + .irq_set_affinity = msi_set_affinity, + .irq_retrigger = ioapic_retrigger_irq, +@@ -3196,7 +3196,7 @@ int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, + * MSI message denotes a contiguous group of IRQs, written for 0th IRQ. + */ + if (!irq_offset) +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + + setup_remapped_irq(irq, irq_cfg(irq), chip); + +diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c +index e48b674..234b072 100644 +--- a/arch/x86/kernel/x86_init.c ++++ b/arch/x86/kernel/x86_init.c +@@ -116,8 +116,6 @@ struct x86_msi_ops x86_msi = { + .teardown_msi_irqs = default_teardown_msi_irqs, + .restore_msi_irqs = default_restore_msi_irqs, + .setup_hpet_msi = default_setup_hpet_msi, +- .msi_mask_irq = default_msi_mask_irq, +- .msix_mask_irq = default_msix_mask_irq, + }; + + /* MSI arch specific hooks */ +@@ -140,14 +138,6 @@ void arch_restore_msi_irqs(struct pci_dev *dev) + { + x86_msi.restore_msi_irqs(dev); + } +-u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +-{ +- return x86_msi.msi_mask_irq(desc, mask, flag); +-} +-u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag) +-{ +- return x86_msi.msix_mask_irq(desc, flag); +-} + #endif + + struct x86_io_apic_ops x86_io_apic_ops = { +diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c +index f3a2cfc..7bcf06a 100644 +--- a/arch/x86/pci/bus_numa.c ++++ b/arch/x86/pci/bus_numa.c +@@ -31,7 +31,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources) + { + struct pci_root_info *info = x86_find_pci_root_info(bus); + struct pci_root_res *root_res; +- struct pci_host_bridge_window *window; ++ struct resource_entry *window; + bool found = false; + + if (!info) +@@ -41,7 +41,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources) + bus); + + /* already added by acpi ? */ +- list_for_each_entry(window, resources, list) ++ resource_list_for_each_entry(window, resources) + if (window->res->flags & IORESOURCE_BUS) { + found = true; + break; +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c +index 6b3cf7c..4f6844b 100644 +--- a/arch/x86/pci/xen.c ++++ b/arch/x86/pci/xen.c +@@ -229,7 +229,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + return 1; + + list_for_each_entry(msidesc, &dev->msi_list, list) { +- __read_msi_msg(msidesc, &msg); ++ __pci_read_msi_msg(msidesc, &msg); + pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | + ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); + if (msg.data != XEN_PIRQ_MSI_DATA || +@@ -240,7 +240,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + goto error; + } + xen_msi_compose_msg(dev, pirq, &msg); +- __write_msi_msg(msidesc, &msg); ++ __pci_write_msi_msg(msidesc, &msg); + dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); + } else { + dev_dbg(&dev->dev, +@@ -394,14 +394,7 @@ static void xen_teardown_msi_irq(unsigned int irq) + { + xen_destroy_irq(irq); + } +-static u32 xen_nop_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +-{ +- return 0; +-} +-static u32 xen_nop_msix_mask_irq(struct msi_desc *desc, u32 flag) +-{ +- return 0; +-} ++ + #endif + + int __init pci_xen_init(void) +@@ -425,8 +418,7 @@ int __init pci_xen_init(void) + x86_msi.setup_msi_irqs = xen_setup_msi_irqs; + x86_msi.teardown_msi_irq = xen_teardown_msi_irq; + x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; +- x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; +- x86_msi.msix_mask_irq = xen_nop_msix_mask_irq; ++ pci_msi_ignore_mask = 1; + #endif + return 0; + } +@@ -460,8 +452,7 @@ int __init pci_xen_initial_domain(void) + x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; + x86_msi.teardown_msi_irq = xen_teardown_msi_irq; + x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; +- x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; +- x86_msi.msix_mask_irq = xen_nop_msix_mask_irq; ++ pci_msi_ignore_mask = 1; + #endif + __acpi_register_gsi = acpi_register_gsi_xen; + /* Pre-allocate legacy irqs */ +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c +index fdb5701..0ad0ce6 100644 +--- a/drivers/acpi/acpi_lpss.c ++++ b/drivers/acpi/acpi_lpss.c +@@ -325,7 +325,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev, + { + struct lpss_device_desc *dev_desc; + struct lpss_private_data *pdata; +- struct resource_list_entry *rentry; ++ struct resource_entry *rentry; + struct list_head resource_list; + struct platform_device *pdev; + int ret; +@@ -345,12 +345,12 @@ static int acpi_lpss_create_device(struct acpi_device *adev, + goto err_out; + + list_for_each_entry(rentry, &resource_list, node) +- if (resource_type(&rentry->res) == IORESOURCE_MEM) { ++ if (resource_type(rentry->res) == IORESOURCE_MEM) { + if (dev_desc->prv_size_override) + pdata->mmio_size = dev_desc->prv_size_override; + else +- pdata->mmio_size = resource_size(&rentry->res); +- pdata->mmio_base = ioremap(rentry->res.start, ++ pdata->mmio_size = resource_size(rentry->res); ++ pdata->mmio_base = ioremap(rentry->res->start, + pdata->mmio_size); + break; + } +diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c +index 6ba8beb..1284138 100644 +--- a/drivers/acpi/acpi_platform.c ++++ b/drivers/acpi/acpi_platform.c +@@ -45,7 +45,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) + struct platform_device *pdev = NULL; + struct acpi_device *acpi_parent; + struct platform_device_info pdevinfo; +- struct resource_list_entry *rentry; ++ struct resource_entry *rentry; + struct list_head resource_list; + struct resource *resources = NULL; + int count; +@@ -71,7 +71,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) + } + count = 0; + list_for_each_entry(rentry, &resource_list, node) +- resources[count++] = rentry->res; ++ resources[count++] = *rentry->res; + + acpi_dev_free_resource_list(&resource_list); + } +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c +index 2ba8f02..e7f4aa0 100644 +--- a/drivers/acpi/resource.c ++++ b/drivers/acpi/resource.c +@@ -415,12 +415,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt); + */ + void acpi_dev_free_resource_list(struct list_head *list) + { +- struct resource_list_entry *rentry, *re; +- +- list_for_each_entry_safe(rentry, re, list, node) { +- list_del(&rentry->node); +- kfree(rentry); +- } ++ resource_list_free(list); + } + EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list); + +@@ -435,15 +430,15 @@ struct res_proc_context { + static acpi_status acpi_dev_new_resource_entry(struct resource *r, + struct res_proc_context *c) + { +- struct resource_list_entry *rentry; ++ struct resource_entry *rentry; + +- rentry = kmalloc(sizeof(*rentry), GFP_KERNEL); ++ rentry = resource_list_create_entry(NULL, 0); + if (!rentry) { + c->error = -ENOMEM; + return AE_NO_MEMORY; + } +- rentry->res = *r; +- list_add_tail(&rentry->node, c->list); ++ *rentry->res = *r; ++ resource_list_add_tail(rentry, c->list); + c->count++; + return AE_OK; + } +@@ -503,7 +498,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, + * returned as the final error code. + * + * The resultant struct resource objects are put on the list pointed to by +- * @list, that must be empty initially, as members of struct resource_list_entry ++ * @list, that must be empty initially, as members of struct resource_entry + * objects. Callers of this routine should use %acpi_dev_free_resource_list() to + * free that list. + * +diff --git a/drivers/base/core.c b/drivers/base/core.c +index 842d047..4c7a18f 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -661,6 +661,9 @@ void device_initialize(struct device *dev) + INIT_LIST_HEAD(&dev->devres_head); + device_pm_init(dev); + set_dev_node(dev, -1); ++#ifdef CONFIG_GENERIC_MSI_IRQ ++ INIT_LIST_HEAD(&dev->msi_list); ++#endif + } + EXPORT_SYMBOL_GPL(device_initialize); + +diff --git a/drivers/base/platform.c b/drivers/base/platform.c +index 317e0e4..b387fb9 100644 +--- a/drivers/base/platform.c ++++ b/drivers/base/platform.c +@@ -1011,6 +1011,7 @@ int __init platform_bus_init(void) + error = bus_register(&platform_bus_type); + if (error) + device_unregister(&platform_bus); ++ of_platform_register_reconfig_notifier(); + return error; + } + +diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c +index de361a1..5a63564 100644 +--- a/drivers/dma/acpi-dma.c ++++ b/drivers/dma/acpi-dma.c +@@ -43,7 +43,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, + { + const struct acpi_csrt_shared_info *si; + struct list_head resource_list; +- struct resource_list_entry *rentry; ++ struct resource_entry *rentry; + resource_size_t mem = 0, irq = 0; + int ret; + +@@ -56,10 +56,10 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, + return 0; + + list_for_each_entry(rentry, &resource_list, node) { +- if (resource_type(&rentry->res) == IORESOURCE_MEM) +- mem = rentry->res.start; +- else if (resource_type(&rentry->res) == IORESOURCE_IRQ) +- irq = rentry->res.start; ++ if (resource_type(rentry->res) == IORESOURCE_MEM) ++ mem = rentry->res->start; ++ else if (resource_type(rentry->res) == IORESOURCE_IRQ) ++ irq = rentry->res->start; + } + + acpi_dev_free_resource_list(&resource_list); +diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig +index dd51122..2cdcc76 100644 +--- a/drivers/iommu/Kconfig ++++ b/drivers/iommu/Kconfig +@@ -13,9 +13,35 @@ menuconfig IOMMU_SUPPORT + + if IOMMU_SUPPORT + ++menu "Generic IOMMU Pagetable Support" ++ ++# Selected by the actual pagetable implementations ++config IOMMU_IO_PGTABLE ++ bool ++ ++config IOMMU_IO_PGTABLE_LPAE ++ bool "ARMv7/v8 Long Descriptor Format" ++ select IOMMU_IO_PGTABLE ++ help ++ Enable support for the ARM long descriptor pagetable format. ++ This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page ++ sizes at both stage-1 and stage-2, as well as address spaces ++ up to 48-bits in size. ++ ++config IOMMU_IO_PGTABLE_LPAE_SELFTEST ++ bool "LPAE selftests" ++ depends on IOMMU_IO_PGTABLE_LPAE ++ help ++ Enable self-tests for LPAE page table allocator. This performs ++ a series of page-table consistency checks during boot. ++ ++ If unsure, say N here. ++ ++endmenu ++ + config OF_IOMMU + def_bool y +- depends on OF ++ depends on OF && IOMMU_API + + config FSL_PAMU + bool "Freescale IOMMU support" +@@ -291,13 +317,13 @@ config SPAPR_TCE_IOMMU + + config ARM_SMMU + bool "ARM Ltd. System MMU (SMMU) Support" +- depends on ARM64 || (ARM_LPAE && OF) ++ depends on ARM64 || ARM + select IOMMU_API ++ select IOMMU_IO_PGTABLE_LPAE + select ARM_DMA_USE_IOMMU if ARM + help + Support for implementations of the ARM System MMU architecture +- versions 1 and 2. The driver supports both v7l and v8l table +- formats with 4k and 64k page sizes. ++ versions 1 and 2. + + Say Y here if your SoC includes an IOMMU device implementing + the ARM SMMU architecture. +diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile +index 16edef7..269cdd8 100644 +--- a/drivers/iommu/Makefile ++++ b/drivers/iommu/Makefile +@@ -1,6 +1,8 @@ + obj-$(CONFIG_IOMMU_API) += iommu.o + obj-$(CONFIG_IOMMU_API) += iommu-traces.o + obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o ++obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o ++obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o + obj-$(CONFIG_OF_IOMMU) += of_iommu.o + obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o + obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index af3daf8..f7131fa 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -343,8 +343,9 @@ static u16 get_alias(struct device *dev) + */ + if (pci_alias == devid && + PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) { +- pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; +- pdev->dma_alias_devfn = ivrs_alias & 0xff; ++ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; ++ pdev->dma_alias_devid = PCI_DEVID(pdev->bus->number, ++ ivrs_alias & 0xff); + pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n", + PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias), + dev_name(dev)); +@@ -3432,6 +3433,7 @@ static const struct iommu_ops amd_iommu_ops = { + .detach_dev = amd_iommu_detach_device, + .map = amd_iommu_map, + .unmap = amd_iommu_unmap, ++ .map_sg = default_iommu_map_sg, + .iova_to_phys = amd_iommu_iova_to_phys, + .pgsize_bitmap = AMD_IOMMU_PGSIZES, + }; +diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c +index 60558f7..10e584b 100644 +--- a/drivers/iommu/arm-smmu.c ++++ b/drivers/iommu/arm-smmu.c +@@ -23,8 +23,6 @@ + * - Stream-matching and stream-indexing + * - v7/v8 long-descriptor format + * - Non-secure access to the SMMU +- * - 4k and 64k pages, with contiguous pte hints. +- * - Up to 48-bit addressing (dependent on VA_BITS) + * - Context fault reporting + */ + +@@ -36,7 +34,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include +@@ -46,6 +44,16 @@ + + #include + ++#include "io-pgtable.h" ++ ++#ifdef CONFIG_FSL_MC_BUS ++#include <../drivers/staging/fsl-mc/include/mc.h> ++#endif ++ ++#ifdef CONFIG_PCI_LAYERSCAPE ++#include <../drivers/pci/host/pci-layerscape.h> ++#endif ++ + #include + + /* Maximum number of stream IDs assigned to a single device */ +@@ -71,40 +79,6 @@ + ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ + ? 0x400 : 0)) + +-/* Page table bits */ +-#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) +-#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) +-#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) +-#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) +-#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) +-#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) +-#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) +- +-#if PAGE_SIZE == SZ_4K +-#define ARM_SMMU_PTE_CONT_ENTRIES 16 +-#elif PAGE_SIZE == SZ_64K +-#define ARM_SMMU_PTE_CONT_ENTRIES 32 +-#else +-#define ARM_SMMU_PTE_CONT_ENTRIES 1 +-#endif +- +-#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) +-#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) +- +-/* Stage-1 PTE */ +-#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) +-#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6) +-#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2 +-#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11) +- +-/* Stage-2 PTE */ +-#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6) +-#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6) +-#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6) +-#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2) +-#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2) +-#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2) +- + /* Configuration registers */ + #define ARM_SMMU_GR0_sCR0 0x0 + #define sCR0_CLIENTPD (1 << 0) +@@ -132,17 +106,12 @@ + #define ARM_SMMU_GR0_sGFSYNR0 0x50 + #define ARM_SMMU_GR0_sGFSYNR1 0x54 + #define ARM_SMMU_GR0_sGFSYNR2 0x58 +-#define ARM_SMMU_GR0_PIDR0 0xfe0 +-#define ARM_SMMU_GR0_PIDR1 0xfe4 +-#define ARM_SMMU_GR0_PIDR2 0xfe8 + + #define ID0_S1TS (1 << 30) + #define ID0_S2TS (1 << 29) + #define ID0_NTS (1 << 28) + #define ID0_SMS (1 << 27) +-#define ID0_PTFS_SHIFT 24 +-#define ID0_PTFS_MASK 0x2 +-#define ID0_PTFS_V8_ONLY 0x2 ++#define ID0_ATOSNS (1 << 26) + #define ID0_CTTW (1 << 14) + #define ID0_NUMIRPT_SHIFT 16 + #define ID0_NUMIRPT_MASK 0xff +@@ -169,11 +138,7 @@ + #define ID2_PTFS_16K (1 << 13) + #define ID2_PTFS_64K (1 << 14) + +-#define PIDR2_ARCH_SHIFT 4 +-#define PIDR2_ARCH_MASK 0xf +- + /* Global TLB invalidation */ +-#define ARM_SMMU_GR0_STLBIALL 0x60 + #define ARM_SMMU_GR0_TLBIVMID 0x64 + #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 + #define ARM_SMMU_GR0_TLBIALLH 0x6c +@@ -231,13 +196,25 @@ + #define ARM_SMMU_CB_TTBCR2 0x10 + #define ARM_SMMU_CB_TTBR0_LO 0x20 + #define ARM_SMMU_CB_TTBR0_HI 0x24 ++#define ARM_SMMU_CB_TTBR1_LO 0x28 ++#define ARM_SMMU_CB_TTBR1_HI 0x2c + #define ARM_SMMU_CB_TTBCR 0x30 + #define ARM_SMMU_CB_S1_MAIR0 0x38 ++#define ARM_SMMU_CB_S1_MAIR1 0x3c ++#define ARM_SMMU_CB_PAR_LO 0x50 ++#define ARM_SMMU_CB_PAR_HI 0x54 + #define ARM_SMMU_CB_FSR 0x58 + #define ARM_SMMU_CB_FAR_LO 0x60 + #define ARM_SMMU_CB_FAR_HI 0x64 + #define ARM_SMMU_CB_FSYNR0 0x68 ++#define ARM_SMMU_CB_S1_TLBIVA 0x600 + #define ARM_SMMU_CB_S1_TLBIASID 0x610 ++#define ARM_SMMU_CB_S1_TLBIVAL 0x620 ++#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 ++#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 ++#define ARM_SMMU_CB_ATS1PR_LO 0x800 ++#define ARM_SMMU_CB_ATS1PR_HI 0x804 ++#define ARM_SMMU_CB_ATSR 0x8f0 + + #define SCTLR_S1_ASIDPNE (1 << 12) + #define SCTLR_CFCFG (1 << 7) +@@ -249,64 +226,17 @@ + #define SCTLR_M (1 << 0) + #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) + +-#define RESUME_RETRY (0 << 0) +-#define RESUME_TERMINATE (1 << 0) +- +-#define TTBCR_EAE (1 << 31) ++#define CB_PAR_F (1 << 0) + +-#define TTBCR_PASIZE_SHIFT 16 +-#define TTBCR_PASIZE_MASK 0x7 ++#define ATSR_ACTIVE (1 << 0) + +-#define TTBCR_TG0_4K (0 << 14) +-#define TTBCR_TG0_64K (1 << 14) +- +-#define TTBCR_SH0_SHIFT 12 +-#define TTBCR_SH0_MASK 0x3 +-#define TTBCR_SH_NS 0 +-#define TTBCR_SH_OS 2 +-#define TTBCR_SH_IS 3 +- +-#define TTBCR_ORGN0_SHIFT 10 +-#define TTBCR_IRGN0_SHIFT 8 +-#define TTBCR_RGN_MASK 0x3 +-#define TTBCR_RGN_NC 0 +-#define TTBCR_RGN_WBWA 1 +-#define TTBCR_RGN_WT 2 +-#define TTBCR_RGN_WB 3 +- +-#define TTBCR_SL0_SHIFT 6 +-#define TTBCR_SL0_MASK 0x3 +-#define TTBCR_SL0_LVL_2 0 +-#define TTBCR_SL0_LVL_1 1 +- +-#define TTBCR_T1SZ_SHIFT 16 +-#define TTBCR_T0SZ_SHIFT 0 +-#define TTBCR_SZ_MASK 0xf ++#define RESUME_RETRY (0 << 0) ++#define RESUME_TERMINATE (1 << 0) + + #define TTBCR2_SEP_SHIFT 15 +-#define TTBCR2_SEP_MASK 0x7 +- +-#define TTBCR2_PASIZE_SHIFT 0 +-#define TTBCR2_PASIZE_MASK 0x7 +- +-/* Common definitions for PASize and SEP fields */ +-#define TTBCR2_ADDR_32 0 +-#define TTBCR2_ADDR_36 1 +-#define TTBCR2_ADDR_40 2 +-#define TTBCR2_ADDR_42 3 +-#define TTBCR2_ADDR_44 4 +-#define TTBCR2_ADDR_48 5 +- +-#define TTBRn_HI_ASID_SHIFT 16 +- +-#define MAIR_ATTR_SHIFT(n) ((n) << 3) +-#define MAIR_ATTR_MASK 0xff +-#define MAIR_ATTR_DEVICE 0x04 +-#define MAIR_ATTR_NC 0x44 +-#define MAIR_ATTR_WBRWA 0xff +-#define MAIR_ATTR_IDX_NC 0 +-#define MAIR_ATTR_IDX_CACHE 1 +-#define MAIR_ATTR_IDX_DEV 2 ++#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) ++ ++#define TTBRn_HI_ASID_SHIFT 16 + + #define FSR_MULTI (1 << 31) + #define FSR_SS (1 << 30) +@@ -345,6 +275,7 @@ struct arm_smmu_smr { + struct arm_smmu_master_cfg { + int num_streamids; + u16 streamids[MAX_MASTER_STREAMIDS]; ++ u16 mask; + struct arm_smmu_smr *smrs; + }; + +@@ -366,6 +297,7 @@ struct arm_smmu_device { + #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) + #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) + #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) ++#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) + u32 features; + + #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) +@@ -380,10 +312,9 @@ struct arm_smmu_device { + u32 num_mapping_groups; + DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); + +- unsigned long s1_input_size; +- unsigned long s1_output_size; +- unsigned long s2_input_size; +- unsigned long s2_output_size; ++ unsigned long va_size; ++ unsigned long ipa_size; ++ unsigned long pa_size; + + u32 num_global_irqs; + u32 num_context_irqs; +@@ -397,19 +328,33 @@ struct arm_smmu_cfg { + u8 cbndx; + u8 irptndx; + u32 cbar; +- pgd_t *pgd; + }; + #define INVALID_IRPTNDX 0xff + + #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) + #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) + ++enum arm_smmu_domain_stage { ++ ARM_SMMU_DOMAIN_S1 = 0, ++ ARM_SMMU_DOMAIN_S2, ++ ARM_SMMU_DOMAIN_NESTED, ++}; ++ + struct arm_smmu_domain { + struct arm_smmu_device *smmu; ++ struct io_pgtable_ops *pgtbl_ops; ++ spinlock_t pgtbl_lock; + struct arm_smmu_cfg cfg; +- spinlock_t lock; ++ enum arm_smmu_domain_stage stage; ++ struct mutex init_mutex; /* Protects smmu pointer */ ++ struct iommu_domain domain; + }; + ++static struct iommu_ops arm_smmu_ops; ++#ifdef CONFIG_FSL_MC_BUS ++static struct iommu_ops arm_fsl_mc_smmu_ops; ++#endif ++ + static DEFINE_SPINLOCK(arm_smmu_devices_lock); + static LIST_HEAD(arm_smmu_devices); + +@@ -422,6 +367,43 @@ static struct arm_smmu_option_prop arm_smmu_options[] = { + { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, + { 0, NULL}, + }; ++#define CONFIG_AIOP_ERRATA ++#ifdef CONFIG_AIOP_ERRATA ++/* ++ * PL = 1, BMT = 1, VA = 1 ++ */ ++#define AIOP_SMR_VALUE 0x380 ++/* ++ * Following should be set: ++ * SHCFG: 0x3 ++ * MTCFG: 0x1 ++ * MemAttr: 0xf ++ * Type: 0x1 ++ * RACFG: 0x2 ++ * WACFG: 0x2 ++ */ ++#define AIOP_S2CR_VALUE 0xA1FB00 ++ ++static void arm_smmu_aiop_attr_trans(struct arm_smmu_device *smmu) ++{ ++ void __iomem *gr0_base = ARM_SMMU_GR0(smmu); ++ u16 mask = 0x7c7f; ++ int index; ++ u32 reg; ++ /* reserve one smr group for AIOP */ ++ index = --smmu->num_mapping_groups; ++ ++ reg = SMR_VALID | AIOP_SMR_VALUE << SMR_ID_SHIFT | ++ mask << SMR_MASK_SHIFT; ++ writel(reg, gr0_base + ARM_SMMU_GR0_SMR(index)); ++ writel(AIOP_S2CR_VALUE, gr0_base + ARM_SMMU_GR0_S2CR(index)); ++} ++#endif ++ ++static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) ++{ ++ return container_of(dom, struct arm_smmu_domain, domain); ++} + + static void parse_driver_options(struct arm_smmu_device *smmu) + { +@@ -447,6 +429,16 @@ static struct device_node *dev_get_dev_node(struct device *dev) + return bus->bridge->parent->of_node; + } + ++#ifdef CONFIG_FSL_MC_BUS ++ if (dev->bus == &fsl_mc_bus_type) { ++ /* ++ * Get to the MC device tree node. ++ */ ++ while (dev->bus == &fsl_mc_bus_type) ++ dev = dev->parent; ++ } ++#endif ++ + return dev->of_node; + } + +@@ -590,7 +582,7 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx) + } + + /* Wait for any pending TLB invalidations to complete */ +-static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) ++static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) + { + int count = 0; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); +@@ -608,12 +600,19 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) + } + } + +-static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) ++static void arm_smmu_tlb_sync(void *cookie) + { ++ struct arm_smmu_domain *smmu_domain = cookie; ++ __arm_smmu_tlb_sync(smmu_domain->smmu); ++} ++ ++static void arm_smmu_tlb_inv_context(void *cookie) ++{ ++ struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + struct arm_smmu_device *smmu = smmu_domain->smmu; +- void __iomem *base = ARM_SMMU_GR0(smmu); + bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; ++ void __iomem *base; + + if (stage1) { + base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); +@@ -625,16 +624,83 @@ static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) + base + ARM_SMMU_GR0_TLBIVMID); + } + +- arm_smmu_tlb_sync(smmu); ++ __arm_smmu_tlb_sync(smmu); ++} ++ ++static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, ++ bool leaf, void *cookie) ++{ ++ struct arm_smmu_domain *smmu_domain = cookie; ++ struct arm_smmu_cfg *cfg = &smmu_domain->cfg; ++ struct arm_smmu_device *smmu = smmu_domain->smmu; ++ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; ++ void __iomem *reg; ++ ++ if (stage1) { ++ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); ++ reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; ++ ++ if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) { ++ iova &= ~12UL; ++ iova |= ARM_SMMU_CB_ASID(cfg); ++ writel_relaxed(iova, reg); ++#ifdef CONFIG_64BIT ++ } else { ++ iova >>= 12; ++ iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; ++ writeq_relaxed(iova, reg); ++#endif ++ } ++#ifdef CONFIG_64BIT ++ } else if (smmu->version == ARM_SMMU_V2) { ++ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); ++ reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ++ ARM_SMMU_CB_S2_TLBIIPAS2; ++ writeq_relaxed(iova >> 12, reg); ++#endif ++ } else { ++ reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; ++ writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg); ++ } ++} ++ ++static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie) ++{ ++ struct arm_smmu_domain *smmu_domain = cookie; ++ struct arm_smmu_device *smmu = smmu_domain->smmu; ++ unsigned long offset = (unsigned long)addr & ~PAGE_MASK; ++ ++ ++ /* Ensure new page tables are visible to the hardware walker */ ++ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { ++ dsb(ishst); ++ } else { ++ /* ++ * If the SMMU can't walk tables in the CPU caches, treat them ++ * like non-coherent DMA since we need to flush the new entries ++ * all the way out to memory. There's no possibility of ++ * recursion here as the SMMU table walker will not be wired ++ * through another SMMU. ++ */ ++ dma_map_page(smmu->dev, virt_to_page(addr), offset, size, ++ DMA_TO_DEVICE); ++ } + } + ++static struct iommu_gather_ops arm_smmu_gather_ops = { ++ .tlb_flush_all = arm_smmu_tlb_inv_context, ++ .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, ++ .tlb_sync = arm_smmu_tlb_sync, ++ .flush_pgtable = arm_smmu_flush_pgtable, ++}; ++ + static irqreturn_t arm_smmu_context_fault(int irq, void *dev) + { + int flags, ret; + u32 fsr, far, fsynr, resume; + unsigned long iova; + struct iommu_domain *domain = dev; +- struct arm_smmu_domain *smmu_domain = domain->priv; ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + struct arm_smmu_device *smmu = smmu_domain->smmu; + void __iomem *cb_base; +@@ -705,29 +771,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) + return IRQ_HANDLED; + } + +-static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, +- size_t size) +-{ +- unsigned long offset = (unsigned long)addr & ~PAGE_MASK; +- +- +- /* Ensure new page tables are visible to the hardware walker */ +- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { +- dsb(ishst); +- } else { +- /* +- * If the SMMU can't walk tables in the CPU caches, treat them +- * like non-coherent DMA since we need to flush the new entries +- * all the way out to memory. There's no possibility of +- * recursion here as the SMMU table walker will not be wired +- * through another SMMU. +- */ +- dma_map_page(smmu->dev, virt_to_page(addr), offset, size, +- DMA_TO_DEVICE); +- } +-} +- +-static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) ++static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, ++ struct io_pgtable_cfg *pgtbl_cfg) + { + u32 reg; + bool stage1; +@@ -740,6 +785,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) + stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + ++ if (smmu->version > ARM_SMMU_V1) { ++ /* ++ * CBA2R. ++ * *Must* be initialised before CBAR thanks to VMID16 ++ * architectural oversight affected some implementations. ++ */ ++#ifdef CONFIG_64BIT ++ reg = CBA2R_RW64_64BIT; ++#else ++ reg = CBA2R_RW64_32BIT; ++#endif ++ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); ++ } ++ + /* CBAR */ + reg = cfg->cbar; + if (smmu->version == ARM_SMMU_V1) +@@ -757,135 +816,51 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) + } + writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); + +- if (smmu->version > ARM_SMMU_V1) { +- /* CBA2R */ +-#ifdef CONFIG_64BIT +- reg = CBA2R_RW64_64BIT; +-#else +- reg = CBA2R_RW64_32BIT; +-#endif +- writel_relaxed(reg, +- gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); +- +- /* TTBCR2 */ +- switch (smmu->s1_input_size) { +- case 32: +- reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); +- break; +- case 36: +- reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); +- break; +- case 39: +- case 40: +- reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); +- break; +- case 42: +- reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); +- break; +- case 44: +- reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); +- break; +- case 48: +- reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); +- break; +- } +- +- switch (smmu->s1_output_size) { +- case 32: +- reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT); +- break; +- case 36: +- reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); +- break; +- case 39: +- case 40: +- reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); +- break; +- case 42: +- reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT); +- break; +- case 44: +- reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT); +- break; +- case 48: +- reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT); +- break; +- } +- +- if (stage1) +- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); +- } ++ /* TTBRs */ ++ if (stage1) { ++ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); ++ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32; ++ reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); + +- /* TTBR0 */ +- arm_smmu_flush_pgtable(smmu, cfg->pgd, +- PTRS_PER_PGD * sizeof(pgd_t)); +- reg = __pa(cfg->pgd); +- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); +- reg = (phys_addr_t)__pa(cfg->pgd) >> 32; +- if (stage1) ++ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO); ++ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32; + reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; +- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI); ++ } else { ++ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); ++ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32; ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); ++ } + +- /* +- * TTBCR +- * We use long descriptor, with inner-shareable WBWA tables in TTBR0. +- */ +- if (smmu->version > ARM_SMMU_V1) { +- if (PAGE_SIZE == SZ_4K) +- reg = TTBCR_TG0_4K; +- else +- reg = TTBCR_TG0_64K; +- +- if (!stage1) { +- reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT; +- +- switch (smmu->s2_output_size) { +- case 32: +- reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); +- break; +- case 36: +- reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT); +- break; +- case 40: +- reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT); +- break; +- case 42: +- reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT); +- break; +- case 44: +- reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT); +- break; +- case 48: +- reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT); +- break; +- } +- } else { +- reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT; ++ /* TTBCR */ ++ if (stage1) { ++ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); ++ if (smmu->version > ARM_SMMU_V1) { ++ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; ++ reg |= TTBCR2_SEP_UPSTREAM; ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); + } + } else { +- reg = 0; ++ reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); + } + +- reg |= TTBCR_EAE | +- (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | +- (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | +- (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT); +- +- if (!stage1) +- reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); +- +- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); +- +- /* MAIR0 (stage-1 only) */ ++ /* MAIRs (stage-1 only) */ + if (stage1) { +- reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) | +- (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) | +- (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV)); ++ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); ++ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1); + } + + /* SCTLR */ +- reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; ++ /* Disable stall mode */ ++ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; + if (stage1) + reg |= SCTLR_S1_ASIDPNE; + #ifdef __BIG_ENDIAN +@@ -898,27 +873,69 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, + struct arm_smmu_device *smmu) + { + int irq, start, ret = 0; +- unsigned long flags; +- struct arm_smmu_domain *smmu_domain = domain->priv; ++ unsigned long ias, oas; ++ struct io_pgtable_ops *pgtbl_ops; ++ struct io_pgtable_cfg pgtbl_cfg; ++ enum io_pgtable_fmt fmt; ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + +- spin_lock_irqsave(&smmu_domain->lock, flags); ++ mutex_lock(&smmu_domain->init_mutex); + if (smmu_domain->smmu) + goto out_unlock; + +- if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { ++ /* ++ * Mapping the requested stage onto what we support is surprisingly ++ * complicated, mainly because the spec allows S1+S2 SMMUs without ++ * support for nested translation. That means we end up with the ++ * following table: ++ * ++ * Requested Supported Actual ++ * S1 N S1 ++ * S1 S1+S2 S1 ++ * S1 S2 S2 ++ * S1 S1 S1 ++ * N N N ++ * N S1+S2 S2 ++ * N S2 S2 ++ * N S1 S1 ++ * ++ * Note that you can't actually request stage-2 mappings. ++ */ ++ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) ++ smmu_domain->stage = ARM_SMMU_DOMAIN_S2; ++ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) ++ smmu_domain->stage = ARM_SMMU_DOMAIN_S1; ++ ++ switch (smmu_domain->stage) { ++ case ARM_SMMU_DOMAIN_S1: ++ cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; ++ start = smmu->num_s2_context_banks; ++ ias = smmu->va_size; ++ oas = smmu->ipa_size; ++ if (IS_ENABLED(CONFIG_64BIT)) ++ fmt = ARM_64_LPAE_S1; ++ else ++ fmt = ARM_32_LPAE_S1; ++ break; ++ case ARM_SMMU_DOMAIN_NESTED: + /* + * We will likely want to change this if/when KVM gets + * involved. + */ +- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; +- start = smmu->num_s2_context_banks; +- } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) { +- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; +- start = smmu->num_s2_context_banks; +- } else { ++ case ARM_SMMU_DOMAIN_S2: + cfg->cbar = CBAR_TYPE_S2_TRANS; + start = 0; ++ ias = smmu->ipa_size; ++ oas = smmu->pa_size; ++ if (IS_ENABLED(CONFIG_64BIT)) ++ fmt = ARM_64_LPAE_S2; ++ else ++ fmt = ARM_32_LPAE_S2; ++ break; ++ default: ++ ret = -EINVAL; ++ goto out_unlock; + } + + ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, +@@ -934,10 +951,33 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, + cfg->irptndx = cfg->cbndx; + } + +- ACCESS_ONCE(smmu_domain->smmu) = smmu; +- arm_smmu_init_context_bank(smmu_domain); +- spin_unlock_irqrestore(&smmu_domain->lock, flags); ++ pgtbl_cfg = (struct io_pgtable_cfg) { ++ .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap, ++ .ias = ias, ++ .oas = oas, ++ .tlb = &arm_smmu_gather_ops, ++ }; ++ ++ smmu_domain->smmu = smmu; ++ pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); ++ if (!pgtbl_ops) { ++ ret = -ENOMEM; ++ goto out_clear_smmu; ++ } ++ ++ /* Update our support page sizes to reflect the page table format */ ++ arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; ++#ifdef CONFIG_FSL_MC_BUS ++ arm_fsl_mc_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; ++#endif ++ ++ /* Initialise the context bank with our page table cfg */ ++ arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); + ++ /* ++ * Request context fault interrupt. Do this last to avoid the ++ * handler seeing a half-initialised domain state. ++ */ + irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; + ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, + "arm-smmu-context-fault", domain); +@@ -947,16 +987,22 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, + cfg->irptndx = INVALID_IRPTNDX; + } + ++ mutex_unlock(&smmu_domain->init_mutex); ++ ++ /* Publish page table ops for map/unmap */ ++ smmu_domain->pgtbl_ops = pgtbl_ops; + return 0; + ++out_clear_smmu: ++ smmu_domain->smmu = NULL; + out_unlock: +- spin_unlock_irqrestore(&smmu_domain->lock, flags); ++ mutex_unlock(&smmu_domain->init_mutex); + return ret; + } + + static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) + { +- struct arm_smmu_domain *smmu_domain = domain->priv; ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + void __iomem *cb_base; +@@ -965,24 +1011,30 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) + if (!smmu) + return; + +- /* Disable the context bank and nuke the TLB before freeing it. */ ++ /* ++ * Disable the context bank and free the page tables before freeing ++ * it. ++ */ + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); +- arm_smmu_tlb_inv_context(smmu_domain); + + if (cfg->irptndx != INVALID_IRPTNDX) { + irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; + free_irq(irq, domain); + } + ++ if (smmu_domain->pgtbl_ops) ++ free_io_pgtable_ops(smmu_domain->pgtbl_ops); ++ + __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); + } + +-static int arm_smmu_domain_init(struct iommu_domain *domain) ++static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) + { + struct arm_smmu_domain *smmu_domain; +- pgd_t *pgd; + ++ if (type != IOMMU_DOMAIN_UNMANAGED) ++ return NULL; + /* + * Allocate the domain and initialise some of its data structures. + * We can't really do anything meaningful until we've added a +@@ -990,95 +1042,23 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) + */ + smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); + if (!smmu_domain) +- return -ENOMEM; ++ return NULL; + +- pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL); +- if (!pgd) +- goto out_free_domain; +- smmu_domain->cfg.pgd = pgd; ++ mutex_init(&smmu_domain->init_mutex); ++ spin_lock_init(&smmu_domain->pgtbl_lock); + +- spin_lock_init(&smmu_domain->lock); +- domain->priv = smmu_domain; +- return 0; +- +-out_free_domain: +- kfree(smmu_domain); +- return -ENOMEM; ++ return &smmu_domain->domain; + } + +-static void arm_smmu_free_ptes(pmd_t *pmd) ++static void arm_smmu_domain_free(struct iommu_domain *domain) + { +- pgtable_t table = pmd_pgtable(*pmd); +- +- __free_page(table); +-} +- +-static void arm_smmu_free_pmds(pud_t *pud) +-{ +- int i; +- pmd_t *pmd, *pmd_base = pmd_offset(pud, 0); +- +- pmd = pmd_base; +- for (i = 0; i < PTRS_PER_PMD; ++i) { +- if (pmd_none(*pmd)) +- continue; +- +- arm_smmu_free_ptes(pmd); +- pmd++; +- } +- +- pmd_free(NULL, pmd_base); +-} +- +-static void arm_smmu_free_puds(pgd_t *pgd) +-{ +- int i; +- pud_t *pud, *pud_base = pud_offset(pgd, 0); +- +- pud = pud_base; +- for (i = 0; i < PTRS_PER_PUD; ++i) { +- if (pud_none(*pud)) +- continue; +- +- arm_smmu_free_pmds(pud); +- pud++; +- } +- +- pud_free(NULL, pud_base); +-} +- +-static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) +-{ +- int i; +- struct arm_smmu_cfg *cfg = &smmu_domain->cfg; +- pgd_t *pgd, *pgd_base = cfg->pgd; +- +- /* +- * Recursively free the page tables for this domain. We don't +- * care about speculative TLB filling because the tables should +- * not be active in any context bank at this point (SCTLR.M is 0). +- */ +- pgd = pgd_base; +- for (i = 0; i < PTRS_PER_PGD; ++i) { +- if (pgd_none(*pgd)) +- continue; +- arm_smmu_free_puds(pgd); +- pgd++; +- } +- +- kfree(pgd_base); +-} +- +-static void arm_smmu_domain_destroy(struct iommu_domain *domain) +-{ +- struct arm_smmu_domain *smmu_domain = domain->priv; ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + + /* + * Free the domain resources. We assume that all devices have + * already been detached. + */ + arm_smmu_destroy_domain_context(domain); +- arm_smmu_free_pgtables(smmu_domain); + kfree(smmu_domain); + } + +@@ -1113,7 +1093,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, + + smrs[i] = (struct arm_smmu_smr) { + .idx = idx, +- .mask = 0, /* We don't currently share SMRs */ ++ .mask = cfg->mask, + .id = cfg->streamids[i], + }; + } +@@ -1209,8 +1189,8 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, + static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) + { + int ret; +- struct arm_smmu_domain *smmu_domain = domain->priv; +- struct arm_smmu_device *smmu, *dom_smmu; ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); ++ struct arm_smmu_device *smmu; + struct arm_smmu_master_cfg *cfg; + + smmu = find_smmu_for_device(dev); +@@ -1224,21 +1204,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) + return -EEXIST; + } + ++ /* Ensure that the domain is finalised */ ++ ret = arm_smmu_init_domain_context(domain, smmu); ++ if (IS_ERR_VALUE(ret)) ++ return ret; ++ + /* + * Sanity check the domain. We don't support domains across + * different SMMUs. + */ +- dom_smmu = ACCESS_ONCE(smmu_domain->smmu); +- if (!dom_smmu) { +- /* Now that we have a master, we can finalise the domain */ +- ret = arm_smmu_init_domain_context(domain, smmu); +- if (IS_ERR_VALUE(ret)) +- return ret; +- +- dom_smmu = smmu_domain->smmu; +- } +- +- if (dom_smmu != smmu) { ++ if (smmu_domain->smmu != smmu) { + dev_err(dev, + "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", + dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); +@@ -1258,7 +1233,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) + + static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) + { +- struct arm_smmu_domain *smmu_domain = domain->priv; ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_master_cfg *cfg; + + cfg = find_smmu_master_cfg(dev); +@@ -1269,292 +1244,106 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) + arm_smmu_domain_remove_master(smmu_domain, cfg); + } + +-static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, +- unsigned long end) +-{ +- return !(addr & ~ARM_SMMU_PTE_CONT_MASK) && +- (addr + ARM_SMMU_PTE_CONT_SIZE <= end); +-} +- +-static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, +- unsigned long addr, unsigned long end, +- unsigned long pfn, int prot, int stage) +-{ +- pte_t *pte, *start; +- pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; +- +- if (pmd_none(*pmd)) { +- /* Allocate a new set of tables */ +- pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO); +- +- if (!table) +- return -ENOMEM; +- +- arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); +- pmd_populate(NULL, pmd, table); +- arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); +- } +- +- if (stage == 1) { +- pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG; +- if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) +- pteval |= ARM_SMMU_PTE_AP_RDONLY; +- +- if (prot & IOMMU_CACHE) +- pteval |= (MAIR_ATTR_IDX_CACHE << +- ARM_SMMU_PTE_ATTRINDX_SHIFT); +- } else { +- pteval |= ARM_SMMU_PTE_HAP_FAULT; +- if (prot & IOMMU_READ) +- pteval |= ARM_SMMU_PTE_HAP_READ; +- if (prot & IOMMU_WRITE) +- pteval |= ARM_SMMU_PTE_HAP_WRITE; +- if (prot & IOMMU_CACHE) +- pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; +- else +- pteval |= ARM_SMMU_PTE_MEMATTR_NC; +- } +- +- /* If no access, create a faulting entry to avoid TLB fills */ +- if (prot & IOMMU_EXEC) +- pteval &= ~ARM_SMMU_PTE_XN; +- else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) +- pteval &= ~ARM_SMMU_PTE_PAGE; +- +- pteval |= ARM_SMMU_PTE_SH_IS; +- start = pmd_page_vaddr(*pmd) + pte_index(addr); +- pte = start; +- +- /* +- * Install the page table entries. This is fairly complicated +- * since we attempt to make use of the contiguous hint in the +- * ptes where possible. The contiguous hint indicates a series +- * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically +- * contiguous region with the following constraints: +- * +- * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE +- * - Each pte in the region has the contiguous hint bit set +- * +- * This complicates unmapping (also handled by this code, when +- * neither IOMMU_READ or IOMMU_WRITE are set) because it is +- * possible, yet highly unlikely, that a client may unmap only +- * part of a contiguous range. This requires clearing of the +- * contiguous hint bits in the range before installing the new +- * faulting entries. +- * +- * Note that re-mapping an address range without first unmapping +- * it is not supported, so TLB invalidation is not required here +- * and is instead performed at unmap and domain-init time. +- */ +- do { +- int i = 1; +- +- pteval &= ~ARM_SMMU_PTE_CONT; +- +- if (arm_smmu_pte_is_contiguous_range(addr, end)) { +- i = ARM_SMMU_PTE_CONT_ENTRIES; +- pteval |= ARM_SMMU_PTE_CONT; +- } else if (pte_val(*pte) & +- (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) { +- int j; +- pte_t *cont_start; +- unsigned long idx = pte_index(addr); +- +- idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1); +- cont_start = pmd_page_vaddr(*pmd) + idx; +- for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j) +- pte_val(*(cont_start + j)) &= +- ~ARM_SMMU_PTE_CONT; +- +- arm_smmu_flush_pgtable(smmu, cont_start, +- sizeof(*pte) * +- ARM_SMMU_PTE_CONT_ENTRIES); +- } +- +- do { +- *pte = pfn_pte(pfn, __pgprot(pteval)); +- } while (pte++, pfn++, addr += PAGE_SIZE, --i); +- } while (addr != end); +- +- arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start)); +- return 0; +-} +- +-static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, +- unsigned long addr, unsigned long end, +- phys_addr_t phys, int prot, int stage) ++static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, ++ phys_addr_t paddr, size_t size, int prot) + { + int ret; +- pmd_t *pmd; +- unsigned long next, pfn = __phys_to_pfn(phys); +- +-#ifndef __PAGETABLE_PMD_FOLDED +- if (pud_none(*pud)) { +- pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); +- if (!pmd) +- return -ENOMEM; +- +- arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE); +- pud_populate(NULL, pud, pmd); +- arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); +- +- pmd += pmd_index(addr); +- } else +-#endif +- pmd = pmd_offset(pud, addr); ++ unsigned long flags; ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); ++ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + +- do { +- next = pmd_addr_end(addr, end); +- ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn, +- prot, stage); +- phys += next - addr; +- pfn = __phys_to_pfn(phys); +- } while (pmd++, addr = next, addr < end); ++ if (!ops) ++ return -ENODEV; + ++ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); ++ ret = ops->map(ops, iova, paddr, size, prot); ++ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); + return ret; + } + +-static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, +- unsigned long addr, unsigned long end, +- phys_addr_t phys, int prot, int stage) ++static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, ++ size_t size) + { +- int ret = 0; +- pud_t *pud; +- unsigned long next; +- +-#ifndef __PAGETABLE_PUD_FOLDED +- if (pgd_none(*pgd)) { +- pud = (pud_t *)get_zeroed_page(GFP_ATOMIC); +- if (!pud) +- return -ENOMEM; +- +- arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE); +- pgd_populate(NULL, pgd, pud); +- arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); +- +- pud += pud_index(addr); +- } else +-#endif +- pud = pud_offset(pgd, addr); ++ size_t ret; ++ unsigned long flags; ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); ++ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + +- do { +- next = pud_addr_end(addr, end); +- ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, +- prot, stage); +- phys += next - addr; +- } while (pud++, addr = next, addr < end); ++ if (!ops) ++ return 0; + ++ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); ++ ret = ops->unmap(ops, iova, size); ++ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); + return ret; + } + +-static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, +- unsigned long iova, phys_addr_t paddr, +- size_t size, int prot) ++static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, ++ dma_addr_t iova) + { +- int ret, stage; +- unsigned long end; +- phys_addr_t input_mask, output_mask; ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; +- pgd_t *pgd = cfg->pgd; +- unsigned long flags; ++ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; ++ struct device *dev = smmu->dev; ++ void __iomem *cb_base; ++ u32 tmp; ++ u64 phys; ++ ++ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + +- if (cfg->cbar == CBAR_TYPE_S2_TRANS) { +- stage = 2; +- input_mask = (1ULL << smmu->s2_input_size) - 1; +- output_mask = (1ULL << smmu->s2_output_size) - 1; ++ if (smmu->version == 1) { ++ u32 reg = iova & ~0xfff; ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO); + } else { +- stage = 1; +- input_mask = (1ULL << smmu->s1_input_size) - 1; +- output_mask = (1ULL << smmu->s1_output_size) - 1; ++ u32 reg = iova & ~0xfff; ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO); ++ reg = ((u64)iova & ~0xfff) >> 32; ++ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI); + } + +- if (!pgd) +- return -EINVAL; +- +- if (size & ~PAGE_MASK) +- return -EINVAL; +- +- if ((phys_addr_t)iova & ~input_mask) +- return -ERANGE; +- +- if (paddr & ~output_mask) +- return -ERANGE; +- +- spin_lock_irqsave(&smmu_domain->lock, flags); +- pgd += pgd_index(iova); +- end = iova + size; +- do { +- unsigned long next = pgd_addr_end(iova, end); +- +- ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, +- prot, stage); +- if (ret) +- goto out_unlock; +- +- paddr += next - iova; +- iova = next; +- } while (pgd++, iova != end); +- +-out_unlock: +- spin_unlock_irqrestore(&smmu_domain->lock, flags); +- +- return ret; +-} +- +-static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, +- phys_addr_t paddr, size_t size, int prot) +-{ +- struct arm_smmu_domain *smmu_domain = domain->priv; +- +- if (!smmu_domain) +- return -ENODEV; ++ if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, ++ !(tmp & ATSR_ACTIVE), 5, 50)) { ++ dev_err(dev, ++ "iova to phys timed out on 0x%pad. Falling back to software table walk.\n", ++ &iova); ++ return ops->iova_to_phys(ops, iova); ++ } + +- return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); +-} ++ phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO); ++ phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32; + +-static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, +- size_t size) +-{ +- int ret; +- struct arm_smmu_domain *smmu_domain = domain->priv; ++ if (phys & CB_PAR_F) { ++ dev_err(dev, "translation fault!\n"); ++ dev_err(dev, "PAR = 0x%llx\n", phys); ++ return 0; ++ } + +- ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); +- arm_smmu_tlb_inv_context(smmu_domain); +- return ret ? 0 : size; ++ return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff); + } + + static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, +- dma_addr_t iova) ++ dma_addr_t iova) + { +- pgd_t *pgdp, pgd; +- pud_t pud; +- pmd_t pmd; +- pte_t pte; +- struct arm_smmu_domain *smmu_domain = domain->priv; +- struct arm_smmu_cfg *cfg = &smmu_domain->cfg; +- +- pgdp = cfg->pgd; +- if (!pgdp) +- return 0; +- +- pgd = *(pgdp + pgd_index(iova)); +- if (pgd_none(pgd)) +- return 0; ++ phys_addr_t ret; ++ unsigned long flags; ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); ++ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + +- pud = *pud_offset(&pgd, iova); +- if (pud_none(pud)) ++ if (!ops) + return 0; + +- pmd = *pmd_offset(&pud, iova); +- if (pmd_none(pmd)) +- return 0; ++ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); ++ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && ++ smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { ++ ret = arm_smmu_iova_to_phys_hard(domain, iova); ++ } else { ++ ret = ops->iova_to_phys(ops, iova); ++ } + +- pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); +- if (pte_none(pte)) +- return 0; ++ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); + +- return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); ++ return ret; + } + + static bool arm_smmu_capable(enum iommu_cap cap) +@@ -1568,6 +1357,8 @@ static bool arm_smmu_capable(enum iommu_cap cap) + return true; + case IOMMU_CAP_INTR_REMAP: + return true; /* MSIs are just memory writes */ ++ case IOMMU_CAP_NOEXEC: ++ return true; + default: + return false; + } +@@ -1584,81 +1375,248 @@ static void __arm_smmu_release_pci_iommudata(void *data) + kfree(data); + } + +-static int arm_smmu_add_device(struct device *dev) ++static int arm_smmu_add_pci_device(struct pci_dev *pdev) + { +- struct arm_smmu_device *smmu; ++ int i, ret; ++ u16 sid; ++ struct iommu_group *group; + struct arm_smmu_master_cfg *cfg; ++#ifdef CONFIG_PCI_LAYERSCAPE ++ u32 streamid; ++#endif ++ ++ group = iommu_group_get_for_dev(&pdev->dev); ++ if (IS_ERR(group)) ++ return PTR_ERR(group); ++ ++ cfg = iommu_group_get_iommudata(group); ++ if (!cfg) { ++ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); ++ if (!cfg) { ++ ret = -ENOMEM; ++ goto out_put_group; ++ } ++ ++ iommu_group_set_iommudata(group, cfg, ++ __arm_smmu_release_pci_iommudata); ++ } ++ ++ if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) { ++ ret = -ENOSPC; ++ goto out_put_group; ++ } ++ ++ /* ++ * Assume Stream ID == Requester ID for now. ++ * We need a way to describe the ID mappings in FDT. ++ */ ++ pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); ++ for (i = 0; i < cfg->num_streamids; ++i) ++ if (cfg->streamids[i] == sid) ++ break; ++ ++ /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ ++ if (i == cfg->num_streamids) ++ cfg->streamids[cfg->num_streamids++] = sid; ++ ++#ifdef CONFIG_PCI_LAYERSCAPE ++ streamid = set_pcie_streamid_translation(pdev, sid); ++ if (~streamid == 0) { ++ ret = -ENODEV; ++ goto out_put_group; ++ } ++ cfg->streamids[0] = streamid; ++ cfg->mask = 0x7c00; ++ ++ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; ++ pdev->dma_alias_devid = streamid; ++#endif ++ ++ return 0; ++out_put_group: ++ iommu_group_put(group); ++ return ret; ++} ++ ++static int arm_smmu_add_platform_device(struct device *dev) ++{ + struct iommu_group *group; +- void (*releasefn)(void *) = NULL; +- int ret; ++ struct arm_smmu_master *master; ++ struct arm_smmu_device *smmu = find_smmu_for_device(dev); + +- smmu = find_smmu_for_device(dev); + if (!smmu) + return -ENODEV; + ++ master = find_smmu_master(smmu, dev->of_node); ++ if (!master) ++ return -ENODEV; ++ ++ /* No automatic group creation for platform devices */ + group = iommu_group_alloc(); +- if (IS_ERR(group)) { +- dev_err(dev, "Failed to allocate IOMMU group\n"); ++ if (IS_ERR(group)) + return PTR_ERR(group); ++ ++ iommu_group_set_iommudata(group, &master->cfg, NULL); ++ return iommu_group_add_device(group, dev); ++} ++ ++static int arm_smmu_add_device(struct device *dev) ++{ ++ if (dev_is_pci(dev)) ++ return arm_smmu_add_pci_device(to_pci_dev(dev)); ++ ++ return arm_smmu_add_platform_device(dev); ++} ++ ++static void arm_smmu_remove_device(struct device *dev) ++{ ++ iommu_group_remove_device(dev); ++} ++ ++static int arm_smmu_domain_get_attr(struct iommu_domain *domain, ++ enum iommu_attr attr, void *data) ++{ ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); ++ ++ switch (attr) { ++ case DOMAIN_ATTR_NESTING: ++ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); ++ return 0; ++ default: ++ return -ENODEV; + } ++} + +- if (dev_is_pci(dev)) { +- struct pci_dev *pdev = to_pci_dev(dev); ++static int arm_smmu_domain_set_attr(struct iommu_domain *domain, ++ enum iommu_attr attr, void *data) ++{ ++ int ret = 0; ++ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + +- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); +- if (!cfg) { +- ret = -ENOMEM; +- goto out_put_group; ++ mutex_lock(&smmu_domain->init_mutex); ++ ++ switch (attr) { ++ case DOMAIN_ATTR_NESTING: ++ if (smmu_domain->smmu) { ++ ret = -EPERM; ++ goto out_unlock; + } + +- cfg->num_streamids = 1; ++ if (*(int *)data) ++ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; ++ else ++ smmu_domain->stage = ARM_SMMU_DOMAIN_S1; ++ ++ break; ++ default: ++ ret = -ENODEV; ++ } ++ ++out_unlock: ++ mutex_unlock(&smmu_domain->init_mutex); ++ return ret; ++} ++ ++static struct iommu_ops arm_smmu_ops = { ++ .capable = arm_smmu_capable, ++ .domain_alloc = arm_smmu_domain_alloc, ++ .domain_free = arm_smmu_domain_free, ++ .attach_dev = arm_smmu_attach_dev, ++ .detach_dev = arm_smmu_detach_dev, ++ .map = arm_smmu_map, ++ .unmap = arm_smmu_unmap, ++ .iova_to_phys = arm_smmu_iova_to_phys, ++ .add_device = arm_smmu_add_device, ++ .remove_device = arm_smmu_remove_device, ++ .domain_get_attr = arm_smmu_domain_get_attr, ++ .domain_set_attr = arm_smmu_domain_set_attr, ++ .pgsize_bitmap = -1UL, /* Restricted during device attach */ ++}; ++ ++#ifdef CONFIG_FSL_MC_BUS ++ ++static void arm_smmu_release_fsl_mc_iommudata(void *data) ++{ ++ kfree(data); ++} ++ ++/* ++ * IOMMU group creation and stream ID programming for ++ * the LS devices ++ * ++ */ ++static int arm_fsl_mc_smmu_add_device(struct device *dev) ++{ ++ struct device *cont_dev; ++ struct fsl_mc_device *mc_dev; ++ struct iommu_group *group; ++ struct arm_smmu_master_cfg *cfg; ++ int ret = 0; ++ ++ mc_dev = to_fsl_mc_device(dev); ++ if (mc_dev->flags & FSL_MC_IS_DPRC) ++ cont_dev = dev; ++ else ++ cont_dev = mc_dev->dev.parent; ++ ++ get_device(cont_dev); ++ group = iommu_group_get(cont_dev); ++ put_device(cont_dev); ++ if (!group) { ++ void (*releasefn)(void *) = NULL; ++ ++ group = iommu_group_alloc(); ++ if (IS_ERR(group)) ++ return PTR_ERR(group); + /* +- * Assume Stream ID == Requester ID for now. +- * We need a way to describe the ID mappings in FDT. ++ * allocate the cfg for the container and associate it with ++ * the iommu group. In the find cfg function we get the cfg ++ * from the iommu group. + */ +- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, +- &cfg->streamids[0]); +- releasefn = __arm_smmu_release_pci_iommudata; +- } else { +- struct arm_smmu_master *master; +- +- master = find_smmu_master(smmu, dev->of_node); +- if (!master) { +- ret = -ENODEV; +- goto out_put_group; +- } ++ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); ++ if (!cfg) ++ return -ENOMEM; + +- cfg = &master->cfg; ++ mc_dev = to_fsl_mc_device(cont_dev); ++ cfg->num_streamids = 1; ++ cfg->streamids[0] = mc_dev->icid; ++ cfg->mask = 0x7c00; ++ releasefn = arm_smmu_release_fsl_mc_iommudata; ++ iommu_group_set_iommudata(group, cfg, releasefn); ++ ret = iommu_group_add_device(group, cont_dev); + } + +- iommu_group_set_iommudata(group, cfg, releasefn); +- ret = iommu_group_add_device(group, dev); ++ if (!ret && cont_dev != dev) ++ ret = iommu_group_add_device(group, dev); + +-out_put_group: + iommu_group_put(group); ++ + return ret; + } + +-static void arm_smmu_remove_device(struct device *dev) ++static void arm_fsl_mc_smmu_remove_device(struct device *dev) + { + iommu_group_remove_device(dev); ++ + } + +-static const struct iommu_ops arm_smmu_ops = { +- .capable = arm_smmu_capable, +- .domain_init = arm_smmu_domain_init, +- .domain_destroy = arm_smmu_domain_destroy, +- .attach_dev = arm_smmu_attach_dev, +- .detach_dev = arm_smmu_detach_dev, +- .map = arm_smmu_map, +- .unmap = arm_smmu_unmap, +- .iova_to_phys = arm_smmu_iova_to_phys, +- .add_device = arm_smmu_add_device, +- .remove_device = arm_smmu_remove_device, +- .pgsize_bitmap = (SECTION_SIZE | +- ARM_SMMU_PTE_CONT_SIZE | +- PAGE_SIZE), ++static struct iommu_ops arm_fsl_mc_smmu_ops = { ++ .capable = arm_smmu_capable, ++ .domain_alloc = arm_smmu_domain_alloc, ++ .domain_free = arm_smmu_domain_free, ++ .attach_dev = arm_smmu_attach_dev, ++ .detach_dev = arm_smmu_detach_dev, ++ .map = arm_smmu_map, ++ .unmap = arm_smmu_unmap, ++ .map_sg = default_iommu_map_sg, ++ .iova_to_phys = arm_smmu_iova_to_phys, ++ .add_device = arm_fsl_mc_smmu_add_device, ++ .remove_device = arm_fsl_mc_smmu_remove_device, ++ .domain_get_attr = arm_smmu_domain_get_attr, ++ .domain_set_attr = arm_smmu_domain_set_attr, ++ .pgsize_bitmap = -1UL, /* Restricted during device attach */ + }; ++#endif + + static void arm_smmu_device_reset(struct arm_smmu_device *smmu) + { +@@ -1686,7 +1644,6 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) + } + + /* Invalidate the TLB, just in case */ +- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); + +@@ -1708,7 +1665,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) + reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); + + /* Push the button */ +- arm_smmu_tlb_sync(smmu); ++ __arm_smmu_tlb_sync(smmu); + writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); + } + +@@ -1742,12 +1699,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) + + /* ID0 */ + id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); +-#ifndef CONFIG_64BIT +- if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) { +- dev_err(smmu->dev, "\tno v7 descriptor support!\n"); +- return -ENODEV; +- } +-#endif + + /* Restrict available stages based on module parameter */ + if (force_stage == 1) +@@ -1776,6 +1727,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) + return -ENODEV; + } + ++ if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) { ++ smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; ++ dev_notice(smmu->dev, "\taddress translation ops\n"); ++ } ++ + if (id & ID0_CTTW) { + smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; + dev_notice(smmu->dev, "\tcoherent table walk\n"); +@@ -1820,16 +1776,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) + smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; + + /* Check for size mismatch of SMMU address space from mapped region */ +- size = 1 << +- (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); ++ size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); + size *= 2 << smmu->pgshift; + if (smmu->size != size) + dev_warn(smmu->dev, + "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", + size, smmu->size); + +- smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & +- ID1_NUMS2CB_MASK; ++ smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; + smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; + if (smmu->num_s2_context_banks > smmu->num_context_banks) { + dev_err(smmu->dev, "impossible number of S2 context banks!\n"); +@@ -1841,46 +1795,49 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) + /* ID2 */ + id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); + size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); +- smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); +- +- /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */ +-#ifdef CONFIG_64BIT +- smmu->s2_input_size = min_t(unsigned long, VA_BITS, size); +-#else +- smmu->s2_input_size = min(32UL, size); +-#endif ++ smmu->ipa_size = size; + +- /* The stage-2 output mask is also applied for bypass */ ++ /* The output mask is also applied for bypass */ + size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); +- smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); ++ smmu->pa_size = size; ++ ++ /* ++ * What the page table walker can address actually depends on which ++ * descriptor format is in use, but since a) we don't know that yet, ++ * and b) it can vary per context bank, this will have to do... ++ */ ++ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) ++ dev_warn(smmu->dev, ++ "failed to set DMA mask for table walker\n"); + + if (smmu->version == ARM_SMMU_V1) { +- smmu->s1_input_size = 32; ++ smmu->va_size = smmu->ipa_size; ++ size = SZ_4K | SZ_2M | SZ_1G; + } else { +-#ifdef CONFIG_64BIT + size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; +- size = min(VA_BITS, arm_smmu_id_size_to_bits(size)); +-#else +- size = 32; ++ smmu->va_size = arm_smmu_id_size_to_bits(size); ++#ifndef CONFIG_64BIT ++ smmu->va_size = min(32UL, smmu->va_size); + #endif +- smmu->s1_input_size = size; +- +- if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || +- (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || +- (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) { +- dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n", +- PAGE_SIZE); +- return -ENODEV; +- } ++ size = 0; ++ if (id & ID2_PTFS_4K) ++ size |= SZ_4K | SZ_2M | SZ_1G; ++ if (id & ID2_PTFS_16K) ++ size |= SZ_16K | SZ_32M; ++ if (id & ID2_PTFS_64K) ++ size |= SZ_64K | SZ_512M; + } + ++ arm_smmu_ops.pgsize_bitmap &= size; ++ dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size); ++ + if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) + dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", +- smmu->s1_input_size, smmu->s1_output_size); ++ smmu->va_size, smmu->ipa_size); + + if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) + dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", +- smmu->s2_input_size, smmu->s2_output_size); ++ smmu->ipa_size, smmu->pa_size); + + return 0; + } +@@ -2007,6 +1964,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) + spin_unlock(&arm_smmu_devices_lock); + + arm_smmu_device_reset(smmu); ++ /* AIOP Rev1 errata work around */ ++#ifdef CONFIG_AIOP_ERRATA ++ arm_smmu_aiop_attr_trans(smmu); ++#endif + return 0; + + out_free_irqs: +@@ -2062,7 +2023,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev) + + static struct platform_driver arm_smmu_driver = { + .driver = { +- .owner = THIS_MODULE, + .name = "arm-smmu", + .of_match_table = of_match_ptr(arm_smmu_of_match), + }, +@@ -2072,8 +2032,20 @@ static struct platform_driver arm_smmu_driver = { + + static int __init arm_smmu_init(void) + { ++ struct device_node *np; + int ret; + ++ /* ++ * Play nice with systems that don't have an ARM SMMU by checking that ++ * an ARM SMMU exists in the system before proceeding with the driver ++ * and IOMMU bus operation registration. ++ */ ++ np = of_find_matching_node(NULL, arm_smmu_of_match); ++ if (!np) ++ return 0; ++ ++ of_node_put(np); ++ + ret = platform_driver_register(&arm_smmu_driver); + if (ret) + return ret; +@@ -2092,6 +2064,10 @@ static int __init arm_smmu_init(void) + bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + #endif + ++#ifdef CONFIG_FSL_MC_BUS ++ if (!iommu_present(&fsl_mc_bus_type)) ++ bus_set_iommu(&fsl_mc_bus_type, &arm_fsl_mc_smmu_ops); ++#endif + return 0; + } + +diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c +index 7423318..7ce5273 100644 +--- a/drivers/iommu/exynos-iommu.c ++++ b/drivers/iommu/exynos-iommu.c +@@ -684,7 +684,6 @@ static const struct of_device_id sysmmu_of_match[] __initconst = { + static struct platform_driver exynos_sysmmu_driver __refdata = { + .probe = exynos_sysmmu_probe, + .driver = { +- .owner = THIS_MODULE, + .name = "exynos-sysmmu", + .of_match_table = sysmmu_of_match, + } +@@ -1178,6 +1177,7 @@ static const struct iommu_ops exynos_iommu_ops = { + .detach_dev = exynos_iommu_detach_device, + .map = exynos_iommu_map, + .unmap = exynos_iommu_unmap, ++ .map_sg = default_iommu_map_sg, + .iova_to_phys = exynos_iommu_iova_to_phys, + .add_device = exynos_iommu_add_device, + .remove_device = exynos_iommu_remove_device, +diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c +index 2b6ce93..80ac68d 100644 +--- a/drivers/iommu/fsl_pamu.c ++++ b/drivers/iommu/fsl_pamu.c +@@ -1227,7 +1227,6 @@ static const struct of_device_id fsl_of_pamu_ids[] = { + static struct platform_driver fsl_of_pamu_driver = { + .driver = { + .name = "fsl-of-pamu", +- .owner = THIS_MODULE, + }, + .probe = fsl_pamu_probe, + }; +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 3d1fc73..9e97328 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -4474,6 +4474,7 @@ static const struct iommu_ops intel_iommu_ops = { + .detach_dev = intel_iommu_detach_device, + .map = intel_iommu_map, + .unmap = intel_iommu_unmap, ++ .map_sg = default_iommu_map_sg, + .iova_to_phys = intel_iommu_iova_to_phys, + .add_device = intel_iommu_add_device, + .remove_device = intel_iommu_remove_device, +diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c +new file mode 100644 +index 0000000..5a500ed +--- /dev/null ++++ b/drivers/iommu/io-pgtable-arm.c +@@ -0,0 +1,986 @@ ++/* ++ * CPU-agnostic ARM page table allocator. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ * Copyright (C) 2014 ARM Limited ++ * ++ * Author: Will Deacon ++ */ ++ ++#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "io-pgtable.h" ++ ++#define ARM_LPAE_MAX_ADDR_BITS 48 ++#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 ++#define ARM_LPAE_MAX_LEVELS 4 ++ ++/* Struct accessors */ ++#define io_pgtable_to_data(x) \ ++ container_of((x), struct arm_lpae_io_pgtable, iop) ++ ++#define io_pgtable_ops_to_pgtable(x) \ ++ container_of((x), struct io_pgtable, ops) ++ ++#define io_pgtable_ops_to_data(x) \ ++ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) ++ ++/* ++ * For consistency with the architecture, we always consider ++ * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 ++ */ ++#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) ++ ++/* ++ * Calculate the right shift amount to get to the portion describing level l ++ * in a virtual address mapped by the pagetable in d. ++ */ ++#define ARM_LPAE_LVL_SHIFT(l,d) \ ++ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ ++ * (d)->bits_per_level) + (d)->pg_shift) ++ ++#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) ++ ++/* ++ * Calculate the index at level l used to map virtual address a using the ++ * pagetable in d. ++ */ ++#define ARM_LPAE_PGD_IDX(l,d) \ ++ ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) ++ ++#define ARM_LPAE_LVL_IDX(a,l,d) \ ++ (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ ++ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) ++ ++/* Calculate the block/page mapping size at level l for pagetable in d. */ ++#define ARM_LPAE_BLOCK_SIZE(l,d) \ ++ (1 << (ilog2(sizeof(arm_lpae_iopte)) + \ ++ ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) ++ ++/* Page table bits */ ++#define ARM_LPAE_PTE_TYPE_SHIFT 0 ++#define ARM_LPAE_PTE_TYPE_MASK 0x3 ++ ++#define ARM_LPAE_PTE_TYPE_BLOCK 1 ++#define ARM_LPAE_PTE_TYPE_TABLE 3 ++#define ARM_LPAE_PTE_TYPE_PAGE 3 ++ ++#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) ++#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) ++#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) ++#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) ++#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) ++#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) ++#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) ++#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) ++ ++#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) ++/* Ignore the contiguous bit for block splitting */ ++#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) ++#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ ++ ARM_LPAE_PTE_ATTR_HI_MASK) ++ ++/* Stage-1 PTE */ ++#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) ++#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) ++#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 ++#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) ++ ++/* Stage-2 PTE */ ++#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) ++#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) ++#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) ++#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) ++#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) ++#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) ++ ++/* Register bits */ ++#define ARM_32_LPAE_TCR_EAE (1 << 31) ++#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) ++ ++#define ARM_LPAE_TCR_TG0_4K (0 << 14) ++#define ARM_LPAE_TCR_TG0_64K (1 << 14) ++#define ARM_LPAE_TCR_TG0_16K (2 << 14) ++ ++#define ARM_LPAE_TCR_SH0_SHIFT 12 ++#define ARM_LPAE_TCR_SH0_MASK 0x3 ++#define ARM_LPAE_TCR_SH_NS 0 ++#define ARM_LPAE_TCR_SH_OS 2 ++#define ARM_LPAE_TCR_SH_IS 3 ++ ++#define ARM_LPAE_TCR_ORGN0_SHIFT 10 ++#define ARM_LPAE_TCR_IRGN0_SHIFT 8 ++#define ARM_LPAE_TCR_RGN_MASK 0x3 ++#define ARM_LPAE_TCR_RGN_NC 0 ++#define ARM_LPAE_TCR_RGN_WBWA 1 ++#define ARM_LPAE_TCR_RGN_WT 2 ++#define ARM_LPAE_TCR_RGN_WB 3 ++ ++#define ARM_LPAE_TCR_SL0_SHIFT 6 ++#define ARM_LPAE_TCR_SL0_MASK 0x3 ++ ++#define ARM_LPAE_TCR_T0SZ_SHIFT 0 ++#define ARM_LPAE_TCR_SZ_MASK 0xf ++ ++#define ARM_LPAE_TCR_PS_SHIFT 16 ++#define ARM_LPAE_TCR_PS_MASK 0x7 ++ ++#define ARM_LPAE_TCR_IPS_SHIFT 32 ++#define ARM_LPAE_TCR_IPS_MASK 0x7 ++ ++#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL ++#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL ++#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL ++#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL ++#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL ++#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL ++ ++#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) ++#define ARM_LPAE_MAIR_ATTR_MASK 0xff ++#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 ++#define ARM_LPAE_MAIR_ATTR_NC 0x44 ++#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff ++#define ARM_LPAE_MAIR_ATTR_IDX_NC 0 ++#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 ++#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 ++ ++/* IOPTE accessors */ ++#define iopte_deref(pte,d) \ ++ (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ ++ & ~((1ULL << (d)->pg_shift) - 1))) ++ ++#define iopte_type(pte,l) \ ++ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) ++ ++#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) ++ ++#define iopte_leaf(pte,l) \ ++ (l == (ARM_LPAE_MAX_LEVELS - 1) ? \ ++ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \ ++ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK)) ++ ++#define iopte_to_pfn(pte,d) \ ++ (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift) ++ ++#define pfn_to_iopte(pfn,d) \ ++ (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) ++ ++struct arm_lpae_io_pgtable { ++ struct io_pgtable iop; ++ ++ int levels; ++ size_t pgd_size; ++ unsigned long pg_shift; ++ unsigned long bits_per_level; ++ ++ void *pgd; ++}; ++ ++typedef u64 arm_lpae_iopte; ++ ++static bool selftest_running = false; ++ ++static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, ++ unsigned long iova, phys_addr_t paddr, ++ arm_lpae_iopte prot, int lvl, ++ arm_lpae_iopte *ptep) ++{ ++ arm_lpae_iopte pte = prot; ++ ++ /* We require an unmap first */ ++ if (iopte_leaf(*ptep, lvl)) { ++ WARN_ON(!selftest_running); ++ return -EEXIST; ++ } ++ ++ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) ++ pte |= ARM_LPAE_PTE_NS; ++ ++ if (lvl == ARM_LPAE_MAX_LEVELS - 1) ++ pte |= ARM_LPAE_PTE_TYPE_PAGE; ++ else ++ pte |= ARM_LPAE_PTE_TYPE_BLOCK; ++ ++ pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; ++ pte |= pfn_to_iopte(paddr >> data->pg_shift, data); ++ ++ *ptep = pte; ++ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie); ++ return 0; ++} ++ ++static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, ++ phys_addr_t paddr, size_t size, arm_lpae_iopte prot, ++ int lvl, arm_lpae_iopte *ptep) ++{ ++ arm_lpae_iopte *cptep, pte; ++ void *cookie = data->iop.cookie; ++ size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); ++ ++ /* Find our entry at the current level */ ++ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); ++ ++ /* If we can install a leaf entry at this level, then do so */ ++ if (size == block_size && (size & data->iop.cfg.pgsize_bitmap)) ++ return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); ++ ++ /* We can't allocate tables at the final level */ ++ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) ++ return -EINVAL; ++ ++ /* Grab a pointer to the next level */ ++ pte = *ptep; ++ if (!pte) { ++ cptep = alloc_pages_exact(1UL << data->pg_shift, ++ GFP_ATOMIC | __GFP_ZERO); ++ if (!cptep) ++ return -ENOMEM; ++ ++ data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift, ++ cookie); ++ pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; ++ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) ++ pte |= ARM_LPAE_PTE_NSTABLE; ++ *ptep = pte; ++ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); ++ } else { ++ cptep = iopte_deref(pte, data); ++ } ++ ++ /* Rinse, repeat */ ++ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); ++} ++ ++static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, ++ int prot) ++{ ++ arm_lpae_iopte pte; ++ ++ if (data->iop.fmt == ARM_64_LPAE_S1 || ++ data->iop.fmt == ARM_32_LPAE_S1) { ++ pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG; ++ ++ if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) ++ pte |= ARM_LPAE_PTE_AP_RDONLY; ++ ++ if (prot & IOMMU_CACHE) ++ pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE ++ << ARM_LPAE_PTE_ATTRINDX_SHIFT); ++ } else { ++ pte = ARM_LPAE_PTE_HAP_FAULT; ++ if (prot & IOMMU_READ) ++ pte |= ARM_LPAE_PTE_HAP_READ; ++ if (prot & IOMMU_WRITE) ++ pte |= ARM_LPAE_PTE_HAP_WRITE; ++ if (prot & IOMMU_CACHE) ++ pte |= ARM_LPAE_PTE_MEMATTR_OIWB; ++ else ++ pte |= ARM_LPAE_PTE_MEMATTR_NC; ++ } ++ ++ if (prot & IOMMU_NOEXEC) ++ pte |= ARM_LPAE_PTE_XN; ++ ++ return pte; ++} ++ ++static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, ++ phys_addr_t paddr, size_t size, int iommu_prot) ++{ ++ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); ++ arm_lpae_iopte *ptep = data->pgd; ++ int lvl = ARM_LPAE_START_LVL(data); ++ arm_lpae_iopte prot; ++ ++ /* If no access, then nothing to do */ ++ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) ++ return 0; ++ ++ prot = arm_lpae_prot_to_pte(data, iommu_prot); ++ return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); ++} ++ ++static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, ++ arm_lpae_iopte *ptep) ++{ ++ arm_lpae_iopte *start, *end; ++ unsigned long table_size; ++ ++ /* Only leaf entries at the last level */ ++ if (lvl == ARM_LPAE_MAX_LEVELS - 1) ++ return; ++ ++ if (lvl == ARM_LPAE_START_LVL(data)) ++ table_size = data->pgd_size; ++ else ++ table_size = 1UL << data->pg_shift; ++ ++ start = ptep; ++ end = (void *)ptep + table_size; ++ ++ while (ptep != end) { ++ arm_lpae_iopte pte = *ptep++; ++ ++ if (!pte || iopte_leaf(pte, lvl)) ++ continue; ++ ++ __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); ++ } ++ ++ free_pages_exact(start, table_size); ++} ++ ++static void arm_lpae_free_pgtable(struct io_pgtable *iop) ++{ ++ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); ++ ++ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd); ++ kfree(data); ++} ++ ++static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, ++ unsigned long iova, size_t size, ++ arm_lpae_iopte prot, int lvl, ++ arm_lpae_iopte *ptep, size_t blk_size) ++{ ++ unsigned long blk_start, blk_end; ++ phys_addr_t blk_paddr; ++ arm_lpae_iopte table = 0; ++ void *cookie = data->iop.cookie; ++ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; ++ ++ blk_start = iova & ~(blk_size - 1); ++ blk_end = blk_start + blk_size; ++ blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; ++ ++ for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { ++ arm_lpae_iopte *tablep; ++ ++ /* Unmap! */ ++ if (blk_start == iova) ++ continue; ++ ++ /* __arm_lpae_map expects a pointer to the start of the table */ ++ tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); ++ if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, ++ tablep) < 0) { ++ if (table) { ++ /* Free the table we allocated */ ++ tablep = iopte_deref(table, data); ++ __arm_lpae_free_pgtable(data, lvl + 1, tablep); ++ } ++ return 0; /* Bytes unmapped */ ++ } ++ } ++ ++ *ptep = table; ++ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); ++ iova &= ~(blk_size - 1); ++ tlb->tlb_add_flush(iova, blk_size, true, cookie); ++ return size; ++} ++ ++static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, ++ unsigned long iova, size_t size, int lvl, ++ arm_lpae_iopte *ptep) ++{ ++ arm_lpae_iopte pte; ++ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; ++ void *cookie = data->iop.cookie; ++ size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); ++ ++ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); ++ pte = *ptep; ++ ++ /* Something went horribly wrong and we ran out of page table */ ++ if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS))) ++ return 0; ++ ++ /* If the size matches this level, we're in the right place */ ++ if (size == blk_size) { ++ *ptep = 0; ++ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); ++ ++ if (!iopte_leaf(pte, lvl)) { ++ /* Also flush any partial walks */ ++ tlb->tlb_add_flush(iova, size, false, cookie); ++ tlb->tlb_sync(data->iop.cookie); ++ ptep = iopte_deref(pte, data); ++ __arm_lpae_free_pgtable(data, lvl + 1, ptep); ++ } else { ++ tlb->tlb_add_flush(iova, size, true, cookie); ++ } ++ ++ return size; ++ } else if (iopte_leaf(pte, lvl)) { ++ /* ++ * Insert a table at the next level to map the old region, ++ * minus the part we want to unmap ++ */ ++ return arm_lpae_split_blk_unmap(data, iova, size, ++ iopte_prot(pte), lvl, ptep, ++ blk_size); ++ } ++ ++ /* Keep on walkin' */ ++ ptep = iopte_deref(pte, data); ++ return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); ++} ++ ++static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, ++ size_t size) ++{ ++ size_t unmapped; ++ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); ++ struct io_pgtable *iop = &data->iop; ++ arm_lpae_iopte *ptep = data->pgd; ++ int lvl = ARM_LPAE_START_LVL(data); ++ ++ unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); ++ if (unmapped) ++ iop->cfg.tlb->tlb_sync(iop->cookie); ++ ++ return unmapped; ++} ++ ++static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, ++ unsigned long iova) ++{ ++ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); ++ arm_lpae_iopte pte, *ptep = data->pgd; ++ int lvl = ARM_LPAE_START_LVL(data); ++ ++ do { ++ /* Valid IOPTE pointer? */ ++ if (!ptep) ++ return 0; ++ ++ /* Grab the IOPTE we're interested in */ ++ pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data)); ++ ++ /* Valid entry? */ ++ if (!pte) ++ return 0; ++ ++ /* Leaf entry? */ ++ if (iopte_leaf(pte,lvl)) ++ goto found_translation; ++ ++ /* Take it to the next level */ ++ ptep = iopte_deref(pte, data); ++ } while (++lvl < ARM_LPAE_MAX_LEVELS); ++ ++ /* Ran out of page tables to walk */ ++ return 0; ++ ++found_translation: ++ iova &= ((1 << data->pg_shift) - 1); ++ return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; ++} ++ ++static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) ++{ ++ unsigned long granule; ++ ++ /* ++ * We need to restrict the supported page sizes to match the ++ * translation regime for a particular granule. Aim to match ++ * the CPU page size if possible, otherwise prefer smaller sizes. ++ * While we're at it, restrict the block sizes to match the ++ * chosen granule. ++ */ ++ if (cfg->pgsize_bitmap & PAGE_SIZE) ++ granule = PAGE_SIZE; ++ else if (cfg->pgsize_bitmap & ~PAGE_MASK) ++ granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); ++ else if (cfg->pgsize_bitmap & PAGE_MASK) ++ granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); ++ else ++ granule = 0; ++ ++ switch (granule) { ++ case SZ_4K: ++ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); ++ break; ++ case SZ_16K: ++ cfg->pgsize_bitmap &= (SZ_16K | SZ_32M); ++ break; ++ case SZ_64K: ++ cfg->pgsize_bitmap &= (SZ_64K | SZ_512M); ++ break; ++ default: ++ cfg->pgsize_bitmap = 0; ++ } ++} ++ ++static struct arm_lpae_io_pgtable * ++arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) ++{ ++ unsigned long va_bits, pgd_bits; ++ struct arm_lpae_io_pgtable *data; ++ ++ arm_lpae_restrict_pgsizes(cfg); ++ ++ if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) ++ return NULL; ++ ++ if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) ++ return NULL; ++ ++ if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) ++ return NULL; ++ ++ data = kmalloc(sizeof(*data), GFP_KERNEL); ++ if (!data) ++ return NULL; ++ ++ data->pg_shift = __ffs(cfg->pgsize_bitmap); ++ data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); ++ ++ va_bits = cfg->ias - data->pg_shift; ++ data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); ++ ++ /* Calculate the actual size of our pgd (without concatenation) */ ++ pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); ++ data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); ++ ++ data->iop.ops = (struct io_pgtable_ops) { ++ .map = arm_lpae_map, ++ .unmap = arm_lpae_unmap, ++ .iova_to_phys = arm_lpae_iova_to_phys, ++ }; ++ ++ return data; ++} ++ ++static struct io_pgtable * ++arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) ++{ ++ u64 reg; ++ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); ++ ++ if (!data) ++ return NULL; ++ ++ /* TCR */ ++ reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | ++ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | ++ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); ++ ++ switch (1 << data->pg_shift) { ++ case SZ_4K: ++ reg |= ARM_LPAE_TCR_TG0_4K; ++ break; ++ case SZ_16K: ++ reg |= ARM_LPAE_TCR_TG0_16K; ++ break; ++ case SZ_64K: ++ reg |= ARM_LPAE_TCR_TG0_64K; ++ break; ++ } ++ ++ switch (cfg->oas) { ++ case 32: ++ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT); ++ break; ++ case 36: ++ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT); ++ break; ++ case 40: ++ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT); ++ break; ++ case 42: ++ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT); ++ break; ++ case 44: ++ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT); ++ break; ++ case 48: ++ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); ++ break; ++ default: ++ goto out_free_data; ++ } ++ ++ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; ++ cfg->arm_lpae_s1_cfg.tcr = reg; ++ ++ /* MAIRs */ ++ reg = (ARM_LPAE_MAIR_ATTR_NC ++ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | ++ (ARM_LPAE_MAIR_ATTR_WBRWA ++ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | ++ (ARM_LPAE_MAIR_ATTR_DEVICE ++ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); ++ ++ cfg->arm_lpae_s1_cfg.mair[0] = reg; ++ cfg->arm_lpae_s1_cfg.mair[1] = 0; ++ ++ /* Looking good; allocate a pgd */ ++ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO); ++ if (!data->pgd) ++ goto out_free_data; ++ ++ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie); ++ ++ /* TTBRs */ ++ cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd); ++ cfg->arm_lpae_s1_cfg.ttbr[1] = 0; ++ return &data->iop; ++ ++out_free_data: ++ kfree(data); ++ return NULL; ++} ++ ++static struct io_pgtable * ++arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) ++{ ++ u64 reg, sl; ++ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); ++ ++ if (!data) ++ return NULL; ++ ++ /* ++ * Concatenate PGDs at level 1 if possible in order to reduce ++ * the depth of the stage-2 walk. ++ */ ++ if (data->levels == ARM_LPAE_MAX_LEVELS) { ++ unsigned long pgd_pages; ++ ++ pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte)); ++ if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { ++ data->pgd_size = pgd_pages << data->pg_shift; ++ data->levels--; ++ } ++ } ++ ++ /* VTCR */ ++ reg = ARM_64_LPAE_S2_TCR_RES1 | ++ (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | ++ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | ++ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); ++ ++ sl = ARM_LPAE_START_LVL(data); ++ ++ switch (1 << data->pg_shift) { ++ case SZ_4K: ++ reg |= ARM_LPAE_TCR_TG0_4K; ++ sl++; /* SL0 format is different for 4K granule size */ ++ break; ++ case SZ_16K: ++ reg |= ARM_LPAE_TCR_TG0_16K; ++ break; ++ case SZ_64K: ++ reg |= ARM_LPAE_TCR_TG0_64K; ++ break; ++ } ++ ++ switch (cfg->oas) { ++ case 32: ++ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT); ++ break; ++ case 36: ++ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT); ++ break; ++ case 40: ++ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT); ++ break; ++ case 42: ++ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT); ++ break; ++ case 44: ++ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT); ++ break; ++ case 48: ++ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT); ++ break; ++ default: ++ goto out_free_data; ++ } ++ ++ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; ++ reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT; ++ cfg->arm_lpae_s2_cfg.vtcr = reg; ++ ++ /* Allocate pgd pages */ ++ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO); ++ if (!data->pgd) ++ goto out_free_data; ++ ++ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie); ++ ++ /* VTTBR */ ++ cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); ++ return &data->iop; ++ ++out_free_data: ++ kfree(data); ++ return NULL; ++} ++ ++static struct io_pgtable * ++arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) ++{ ++ struct io_pgtable *iop; ++ ++ if (cfg->ias > 32 || cfg->oas > 40) ++ return NULL; ++ ++ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); ++ iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); ++ if (iop) { ++ cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE; ++ cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff; ++ } ++ ++ return iop; ++} ++ ++static struct io_pgtable * ++arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) ++{ ++ struct io_pgtable *iop; ++ ++ if (cfg->ias > 40 || cfg->oas > 40) ++ return NULL; ++ ++ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); ++ iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie); ++ if (iop) ++ cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff; ++ ++ return iop; ++} ++ ++struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { ++ .alloc = arm_64_lpae_alloc_pgtable_s1, ++ .free = arm_lpae_free_pgtable, ++}; ++ ++struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { ++ .alloc = arm_64_lpae_alloc_pgtable_s2, ++ .free = arm_lpae_free_pgtable, ++}; ++ ++struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { ++ .alloc = arm_32_lpae_alloc_pgtable_s1, ++ .free = arm_lpae_free_pgtable, ++}; ++ ++struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { ++ .alloc = arm_32_lpae_alloc_pgtable_s2, ++ .free = arm_lpae_free_pgtable, ++}; ++ ++#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST ++ ++static struct io_pgtable_cfg *cfg_cookie; ++ ++static void dummy_tlb_flush_all(void *cookie) ++{ ++ WARN_ON(cookie != cfg_cookie); ++} ++ ++static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf, ++ void *cookie) ++{ ++ WARN_ON(cookie != cfg_cookie); ++ WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); ++} ++ ++static void dummy_tlb_sync(void *cookie) ++{ ++ WARN_ON(cookie != cfg_cookie); ++} ++ ++static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie) ++{ ++ WARN_ON(cookie != cfg_cookie); ++} ++ ++static struct iommu_gather_ops dummy_tlb_ops __initdata = { ++ .tlb_flush_all = dummy_tlb_flush_all, ++ .tlb_add_flush = dummy_tlb_add_flush, ++ .tlb_sync = dummy_tlb_sync, ++ .flush_pgtable = dummy_flush_pgtable, ++}; ++ ++static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) ++{ ++ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); ++ struct io_pgtable_cfg *cfg = &data->iop.cfg; ++ ++ pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", ++ cfg->pgsize_bitmap, cfg->ias); ++ pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", ++ data->levels, data->pgd_size, data->pg_shift, ++ data->bits_per_level, data->pgd); ++} ++ ++#define __FAIL(ops, i) ({ \ ++ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ ++ arm_lpae_dump_ops(ops); \ ++ selftest_running = false; \ ++ -EFAULT; \ ++}) ++ ++static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) ++{ ++ static const enum io_pgtable_fmt fmts[] = { ++ ARM_64_LPAE_S1, ++ ARM_64_LPAE_S2, ++ }; ++ ++ int i, j; ++ unsigned long iova; ++ size_t size; ++ struct io_pgtable_ops *ops; ++ ++ selftest_running = true; ++ ++ for (i = 0; i < ARRAY_SIZE(fmts); ++i) { ++ cfg_cookie = cfg; ++ ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); ++ if (!ops) { ++ pr_err("selftest: failed to allocate io pgtable ops\n"); ++ return -ENOMEM; ++ } ++ ++ /* ++ * Initial sanity checks. ++ * Empty page tables shouldn't provide any translations. ++ */ ++ if (ops->iova_to_phys(ops, 42)) ++ return __FAIL(ops, i); ++ ++ if (ops->iova_to_phys(ops, SZ_1G + 42)) ++ return __FAIL(ops, i); ++ ++ if (ops->iova_to_phys(ops, SZ_2G + 42)) ++ return __FAIL(ops, i); ++ ++ /* ++ * Distinct mappings of different granule sizes. ++ */ ++ iova = 0; ++ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); ++ while (j != BITS_PER_LONG) { ++ size = 1UL << j; ++ ++ if (ops->map(ops, iova, iova, size, IOMMU_READ | ++ IOMMU_WRITE | ++ IOMMU_NOEXEC | ++ IOMMU_CACHE)) ++ return __FAIL(ops, i); ++ ++ /* Overlapping mappings */ ++ if (!ops->map(ops, iova, iova + size, size, ++ IOMMU_READ | IOMMU_NOEXEC)) ++ return __FAIL(ops, i); ++ ++ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) ++ return __FAIL(ops, i); ++ ++ iova += SZ_1G; ++ j++; ++ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); ++ } ++ ++ /* Partial unmap */ ++ size = 1UL << __ffs(cfg->pgsize_bitmap); ++ if (ops->unmap(ops, SZ_1G + size, size) != size) ++ return __FAIL(ops, i); ++ ++ /* Remap of partial unmap */ ++ if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) ++ return __FAIL(ops, i); ++ ++ if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) ++ return __FAIL(ops, i); ++ ++ /* Full unmap */ ++ iova = 0; ++ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); ++ while (j != BITS_PER_LONG) { ++ size = 1UL << j; ++ ++ if (ops->unmap(ops, iova, size) != size) ++ return __FAIL(ops, i); ++ ++ if (ops->iova_to_phys(ops, iova + 42)) ++ return __FAIL(ops, i); ++ ++ /* Remap full block */ ++ if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) ++ return __FAIL(ops, i); ++ ++ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) ++ return __FAIL(ops, i); ++ ++ iova += SZ_1G; ++ j++; ++ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); ++ } ++ ++ free_io_pgtable_ops(ops); ++ } ++ ++ selftest_running = false; ++ return 0; ++} ++ ++static int __init arm_lpae_do_selftests(void) ++{ ++ static const unsigned long pgsize[] = { ++ SZ_4K | SZ_2M | SZ_1G, ++ SZ_16K | SZ_32M, ++ SZ_64K | SZ_512M, ++ }; ++ ++ static const unsigned int ias[] = { ++ 32, 36, 40, 42, 44, 48, ++ }; ++ ++ int i, j, pass = 0, fail = 0; ++ struct io_pgtable_cfg cfg = { ++ .tlb = &dummy_tlb_ops, ++ .oas = 48, ++ }; ++ ++ for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { ++ for (j = 0; j < ARRAY_SIZE(ias); ++j) { ++ cfg.pgsize_bitmap = pgsize[i]; ++ cfg.ias = ias[j]; ++ pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", ++ pgsize[i], ias[j]); ++ if (arm_lpae_run_tests(&cfg)) ++ fail++; ++ else ++ pass++; ++ } ++ } ++ ++ pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); ++ return fail ? -EFAULT : 0; ++} ++subsys_initcall(arm_lpae_do_selftests); ++#endif +diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c +new file mode 100644 +index 0000000..6436fe2 +--- /dev/null ++++ b/drivers/iommu/io-pgtable.c +@@ -0,0 +1,82 @@ ++/* ++ * Generic page table allocator for IOMMUs. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ * ++ * Copyright (C) 2014 ARM Limited ++ * ++ * Author: Will Deacon ++ */ ++ ++#include ++#include ++#include ++ ++#include "io-pgtable.h" ++ ++extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; ++extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; ++extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; ++extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; ++ ++static const struct io_pgtable_init_fns * ++io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = ++{ ++#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE ++ [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns, ++ [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns, ++ [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, ++ [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns, ++#endif ++}; ++ ++struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, ++ struct io_pgtable_cfg *cfg, ++ void *cookie) ++{ ++ struct io_pgtable *iop; ++ const struct io_pgtable_init_fns *fns; ++ ++ if (fmt >= IO_PGTABLE_NUM_FMTS) ++ return NULL; ++ ++ fns = io_pgtable_init_table[fmt]; ++ if (!fns) ++ return NULL; ++ ++ iop = fns->alloc(cfg, cookie); ++ if (!iop) ++ return NULL; ++ ++ iop->fmt = fmt; ++ iop->cookie = cookie; ++ iop->cfg = *cfg; ++ ++ return &iop->ops; ++} ++ ++/* ++ * It is the IOMMU driver's responsibility to ensure that the page table ++ * is no longer accessible to the walker by this point. ++ */ ++void free_io_pgtable_ops(struct io_pgtable_ops *ops) ++{ ++ struct io_pgtable *iop; ++ ++ if (!ops) ++ return; ++ ++ iop = container_of(ops, struct io_pgtable, ops); ++ iop->cfg.tlb->tlb_flush_all(iop->cookie); ++ io_pgtable_init_table[iop->fmt]->free(iop); ++} +diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h +new file mode 100644 +index 0000000..10e32f6 +--- /dev/null ++++ b/drivers/iommu/io-pgtable.h +@@ -0,0 +1,143 @@ ++#ifndef __IO_PGTABLE_H ++#define __IO_PGTABLE_H ++ ++/* ++ * Public API for use by IOMMU drivers ++ */ ++enum io_pgtable_fmt { ++ ARM_32_LPAE_S1, ++ ARM_32_LPAE_S2, ++ ARM_64_LPAE_S1, ++ ARM_64_LPAE_S2, ++ IO_PGTABLE_NUM_FMTS, ++}; ++ ++/** ++ * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. ++ * ++ * @tlb_flush_all: Synchronously invalidate the entire TLB context. ++ * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. ++ * @tlb_sync: Ensure any queue TLB invalidation has taken effect. ++ * @flush_pgtable: Ensure page table updates are visible to the IOMMU. ++ * ++ * Note that these can all be called in atomic context and must therefore ++ * not block. ++ */ ++struct iommu_gather_ops { ++ void (*tlb_flush_all)(void *cookie); ++ void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf, ++ void *cookie); ++ void (*tlb_sync)(void *cookie); ++ void (*flush_pgtable)(void *ptr, size_t size, void *cookie); ++}; ++ ++/** ++ * struct io_pgtable_cfg - Configuration data for a set of page tables. ++ * ++ * @quirks: A bitmap of hardware quirks that require some special ++ * action by the low-level page table allocator. ++ * @pgsize_bitmap: A bitmap of page sizes supported by this set of page ++ * tables. ++ * @ias: Input address (iova) size, in bits. ++ * @oas: Output address (paddr) size, in bits. ++ * @tlb: TLB management callbacks for this set of tables. ++ */ ++struct io_pgtable_cfg { ++ #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */ ++ int quirks; ++ unsigned long pgsize_bitmap; ++ unsigned int ias; ++ unsigned int oas; ++ const struct iommu_gather_ops *tlb; ++ ++ /* Low-level data specific to the table format */ ++ union { ++ struct { ++ u64 ttbr[2]; ++ u64 tcr; ++ u64 mair[2]; ++ } arm_lpae_s1_cfg; ++ ++ struct { ++ u64 vttbr; ++ u64 vtcr; ++ } arm_lpae_s2_cfg; ++ }; ++}; ++ ++/** ++ * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. ++ * ++ * @map: Map a physically contiguous memory region. ++ * @unmap: Unmap a physically contiguous memory region. ++ * @iova_to_phys: Translate iova to physical address. ++ * ++ * These functions map directly onto the iommu_ops member functions with ++ * the same names. ++ */ ++struct io_pgtable_ops { ++ int (*map)(struct io_pgtable_ops *ops, unsigned long iova, ++ phys_addr_t paddr, size_t size, int prot); ++ int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, ++ size_t size); ++ phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, ++ unsigned long iova); ++}; ++ ++/** ++ * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU. ++ * ++ * @fmt: The page table format. ++ * @cfg: The page table configuration. This will be modified to represent ++ * the configuration actually provided by the allocator (e.g. the ++ * pgsize_bitmap may be restricted). ++ * @cookie: An opaque token provided by the IOMMU driver and passed back to ++ * the callback routines in cfg->tlb. ++ */ ++struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, ++ struct io_pgtable_cfg *cfg, ++ void *cookie); ++ ++/** ++ * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller ++ * *must* ensure that the page table is no longer ++ * live, but the TLB can be dirty. ++ * ++ * @ops: The ops returned from alloc_io_pgtable_ops. ++ */ ++void free_io_pgtable_ops(struct io_pgtable_ops *ops); ++ ++ ++/* ++ * Internal structures for page table allocator implementations. ++ */ ++ ++/** ++ * struct io_pgtable - Internal structure describing a set of page tables. ++ * ++ * @fmt: The page table format. ++ * @cookie: An opaque token provided by the IOMMU driver and passed back to ++ * any callback routines. ++ * @cfg: A copy of the page table configuration. ++ * @ops: The page table operations in use for this set of page tables. ++ */ ++struct io_pgtable { ++ enum io_pgtable_fmt fmt; ++ void *cookie; ++ struct io_pgtable_cfg cfg; ++ struct io_pgtable_ops ops; ++}; ++ ++/** ++ * struct io_pgtable_init_fns - Alloc/free a set of page tables for a ++ * particular format. ++ * ++ * @alloc: Allocate a set of page tables described by cfg. ++ * @free: Free the page tables associated with iop. ++ */ ++struct io_pgtable_init_fns { ++ struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie); ++ void (*free)(struct io_pgtable *iop); ++}; ++ ++#endif /* __IO_PGTABLE_H */ +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c +index ed8b048..8d8e5a7 100644 +--- a/drivers/iommu/iommu.c ++++ b/drivers/iommu/iommu.c +@@ -591,10 +591,10 @@ static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, + continue; + + /* We alias them or they alias us */ +- if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && +- pdev->dma_alias_devfn == tmp->devfn) || +- ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && +- tmp->dma_alias_devfn == pdev->devfn)) { ++ if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID) && ++ (pdev->dma_alias_devid & 0xff) == tmp->devfn) || ++ ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID) && ++ (tmp->dma_alias_devid & 0xff) == pdev->devfn)) { + + group = get_pci_alias_group(tmp, devfns); + if (group) { +@@ -737,7 +737,7 @@ static int add_iommu_group(struct device *dev, void *data) + const struct iommu_ops *ops = cb->ops; + + if (!ops->add_device) +- return -ENODEV; ++ return 0; + + WARN_ON(dev->iommu_group); + +@@ -818,7 +818,15 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) + kfree(nb); + return err; + } +- return bus_for_each_dev(bus, NULL, &cb, add_iommu_group); ++ ++ err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group); ++ if (err) { ++ bus_unregister_notifier(bus, nb); ++ kfree(nb); ++ return err; ++ } ++ ++ return 0; + } + + /** +@@ -836,13 +844,19 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) + */ + int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) + { ++ int err; ++ + if (bus->iommu_ops != NULL) + return -EBUSY; + + bus->iommu_ops = ops; + + /* Do IOMMU specific setup for this bus-type */ +- return iommu_bus_init(bus, ops); ++ err = iommu_bus_init(bus, ops); ++ if (err) ++ bus->iommu_ops = NULL; ++ ++ return err; + } + EXPORT_SYMBOL_GPL(bus_set_iommu); + +@@ -887,36 +901,24 @@ EXPORT_SYMBOL_GPL(iommu_set_fault_handler); + struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) + { + struct iommu_domain *domain; +- int ret; + + if (bus == NULL || bus->iommu_ops == NULL) + return NULL; + +- domain = kzalloc(sizeof(*domain), GFP_KERNEL); ++ domain = bus->iommu_ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); + if (!domain) + return NULL; + +- domain->ops = bus->iommu_ops; +- +- ret = domain->ops->domain_init(domain); +- if (ret) +- goto out_free; ++ domain->ops = bus->iommu_ops; ++ domain->type = IOMMU_DOMAIN_UNMANAGED; + + return domain; +- +-out_free: +- kfree(domain); +- +- return NULL; + } + EXPORT_SYMBOL_GPL(iommu_domain_alloc); + + void iommu_domain_free(struct iommu_domain *domain) + { +- if (likely(domain->ops->domain_destroy != NULL)) +- domain->ops->domain_destroy(domain); +- +- kfree(domain); ++ domain->ops->domain_free(domain); + } + EXPORT_SYMBOL_GPL(iommu_domain_free); + +@@ -943,6 +945,16 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) + } + EXPORT_SYMBOL_GPL(iommu_detach_device); + ++struct iommu_domain *iommu_get_dev_domain(struct device *dev) ++{ ++ const struct iommu_ops *ops = dev->bus->iommu_ops; ++ ++ if (unlikely(ops == NULL || ops->get_dev_iommu_domain == NULL)) ++ return NULL; ++ ++ return ops->get_dev_iommu_domain(dev); ++} ++EXPORT_SYMBOL_GPL(iommu_get_dev_domain); + /* + * IOMMU groups are really the natrual working unit of the IOMMU, but + * the IOMMU API works on domains and devices. Bridge that gap by +@@ -1035,6 +1047,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, + domain->ops->pgsize_bitmap == 0UL)) + return -ENODEV; + ++ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) ++ return -EINVAL; ++ + /* find out the minimum page size supported */ + min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); + +@@ -1070,7 +1085,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, + if (ret) + iommu_unmap(domain, orig_iova, orig_size - size); + else +- trace_map(iova, paddr, size); ++ trace_map(orig_iova, paddr, orig_size); + + return ret; + } +@@ -1080,11 +1095,15 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) + { + size_t unmapped_page, unmapped = 0; + unsigned int min_pagesz; ++ unsigned long orig_iova = iova; + + if (unlikely(domain->ops->unmap == NULL || + domain->ops->pgsize_bitmap == 0UL)) + return -ENODEV; + ++ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) ++ return -EINVAL; ++ + /* find out the minimum page size supported */ + min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); + +@@ -1119,11 +1138,53 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) + unmapped += unmapped_page; + } + +- trace_unmap(iova, 0, size); ++ trace_unmap(orig_iova, size, unmapped); + return unmapped; + } + EXPORT_SYMBOL_GPL(iommu_unmap); + ++size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ++ struct scatterlist *sg, unsigned int nents, int prot) ++{ ++ struct scatterlist *s; ++ size_t mapped = 0; ++ unsigned int i, min_pagesz; ++ int ret; ++ ++ if (unlikely(domain->ops->pgsize_bitmap == 0UL)) ++ return 0; ++ ++ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); ++ ++ for_each_sg(sg, s, nents, i) { ++ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; ++ ++ /* ++ * We are mapping on IOMMU page boundaries, so offset within ++ * the page must be 0. However, the IOMMU may support pages ++ * smaller than PAGE_SIZE, so s->offset may still represent ++ * an offset of that boundary within the CPU page. ++ */ ++ if (!IS_ALIGNED(s->offset, min_pagesz)) ++ goto out_err; ++ ++ ret = iommu_map(domain, iova + mapped, phys, s->length, prot); ++ if (ret) ++ goto out_err; ++ ++ mapped += s->length; ++ } ++ ++ return mapped; ++ ++out_err: ++ /* undo mappings already done */ ++ iommu_unmap(domain, iova, mapped); ++ ++ return 0; ++ ++} ++EXPORT_SYMBOL_GPL(default_iommu_map_sg); + + int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, + phys_addr_t paddr, u64 size, int prot) +diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c +index 7dab5cb..f3c5ab6 100644 +--- a/drivers/iommu/ipmmu-vmsa.c ++++ b/drivers/iommu/ipmmu-vmsa.c +@@ -1127,6 +1127,7 @@ static const struct iommu_ops ipmmu_ops = { + .detach_dev = ipmmu_detach_device, + .map = ipmmu_map, + .unmap = ipmmu_unmap, ++ .map_sg = default_iommu_map_sg, + .iova_to_phys = ipmmu_iova_to_phys, + .add_device = ipmmu_add_device, + .remove_device = ipmmu_remove_device, +@@ -1221,7 +1222,6 @@ static int ipmmu_remove(struct platform_device *pdev) + + static struct platform_driver ipmmu_driver = { + .driver = { +- .owner = THIS_MODULE, + .name = "ipmmu-vmsa", + }, + .probe = ipmmu_probe, +diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c +index 74a1767..2c3f5ad 100644 +--- a/drivers/iommu/irq_remapping.c ++++ b/drivers/iommu/irq_remapping.c +@@ -56,19 +56,13 @@ static int do_setup_msi_irqs(struct pci_dev *dev, int nvec) + unsigned int irq; + struct msi_desc *msidesc; + +- WARN_ON(!list_is_singular(&dev->msi_list)); + msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); +- WARN_ON(msidesc->irq); +- WARN_ON(msidesc->msi_attrib.multiple); +- WARN_ON(msidesc->nvec_used); + + irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev)); + if (irq == 0) + return -ENOSPC; + + nvec_pow2 = __roundup_pow_of_two(nvec); +- msidesc->nvec_used = nvec; +- msidesc->msi_attrib.multiple = ilog2(nvec_pow2); + for (sub_handle = 0; sub_handle < nvec; sub_handle++) { + if (!sub_handle) { + index = msi_alloc_remapped_irq(dev, irq, nvec_pow2); +@@ -96,8 +90,6 @@ error: + * IRQs from tearing down again in default_teardown_msi_irqs() + */ + msidesc->irq = 0; +- msidesc->nvec_used = 0; +- msidesc->msi_attrib.multiple = 0; + + return ret; + } +diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c +index 6e3dcc2..1c7b78e 100644 +--- a/drivers/iommu/msm_iommu.c ++++ b/drivers/iommu/msm_iommu.c +@@ -681,6 +681,7 @@ static const struct iommu_ops msm_iommu_ops = { + .detach_dev = msm_iommu_detach_dev, + .map = msm_iommu_map, + .unmap = msm_iommu_unmap, ++ .map_sg = default_iommu_map_sg, + .iova_to_phys = msm_iommu_iova_to_phys, + .pgsize_bitmap = MSM_IOMMU_PGSIZES, + }; +diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c +index e550ccb..43429ab 100644 +--- a/drivers/iommu/of_iommu.c ++++ b/drivers/iommu/of_iommu.c +@@ -18,9 +18,14 @@ + */ + + #include ++#include + #include + #include + #include ++#include ++ ++static const struct of_device_id __iommu_of_table_sentinel ++ __used __section(__iommu_of_table_end); + + /** + * of_get_dma_window - Parse *dma-window property and returns 0 if found. +@@ -89,3 +94,93 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index, + return 0; + } + EXPORT_SYMBOL_GPL(of_get_dma_window); ++ ++struct of_iommu_node { ++ struct list_head list; ++ struct device_node *np; ++ struct iommu_ops *ops; ++}; ++static LIST_HEAD(of_iommu_list); ++static DEFINE_SPINLOCK(of_iommu_lock); ++ ++void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops) ++{ ++ struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); ++ ++ if (WARN_ON(!iommu)) ++ return; ++ ++ INIT_LIST_HEAD(&iommu->list); ++ iommu->np = np; ++ iommu->ops = ops; ++ spin_lock(&of_iommu_lock); ++ list_add_tail(&iommu->list, &of_iommu_list); ++ spin_unlock(&of_iommu_lock); ++} ++ ++struct iommu_ops *of_iommu_get_ops(struct device_node *np) ++{ ++ struct of_iommu_node *node; ++ struct iommu_ops *ops = NULL; ++ ++ spin_lock(&of_iommu_lock); ++ list_for_each_entry(node, &of_iommu_list, list) ++ if (node->np == np) { ++ ops = node->ops; ++ break; ++ } ++ spin_unlock(&of_iommu_lock); ++ return ops; ++} ++ ++struct iommu_ops *of_iommu_configure(struct device *dev, ++ struct device_node *master_np) ++{ ++ struct of_phandle_args iommu_spec; ++ struct device_node *np; ++ struct iommu_ops *ops = NULL; ++ int idx = 0; ++ ++ if (dev_is_pci(dev)) { ++ dev_err(dev, "IOMMU is currently not supported for PCI\n"); ++ return NULL; ++ } ++ ++ /* ++ * We don't currently walk up the tree looking for a parent IOMMU. ++ * See the `Notes:' section of ++ * Documentation/devicetree/bindings/iommu/iommu.txt ++ */ ++ while (!of_parse_phandle_with_args(master_np, "iommus", ++ "#iommu-cells", idx, ++ &iommu_spec)) { ++ np = iommu_spec.np; ++ ops = of_iommu_get_ops(np); ++ ++ if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec)) ++ goto err_put_node; ++ ++ of_node_put(np); ++ idx++; ++ } ++ ++ return ops; ++ ++err_put_node: ++ of_node_put(np); ++ return NULL; ++} ++ ++void __init of_iommu_init(void) ++{ ++ struct device_node *np; ++ const struct of_device_id *match, *matches = &__iommu_of_table; ++ ++ for_each_matching_node_and_match(np, matches, &match) { ++ const of_iommu_init_fn init_fn = match->data; ++ ++ if (init_fn(np)) ++ pr_err("Failed to initialise IOMMU %s\n", ++ of_node_full_name(np)); ++ } ++} +diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c +index 3627887..18003c0 100644 +--- a/drivers/iommu/omap-iommu.c ++++ b/drivers/iommu/omap-iommu.c +@@ -1288,6 +1288,7 @@ static const struct iommu_ops omap_iommu_ops = { + .detach_dev = omap_iommu_detach_dev, + .map = omap_iommu_map, + .unmap = omap_iommu_unmap, ++ .map_sg = default_iommu_map_sg, + .iova_to_phys = omap_iommu_iova_to_phys, + .add_device = omap_iommu_add_device, + .remove_device = omap_iommu_remove_device, +diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c +index 1333e6f..f1b0077 100644 +--- a/drivers/iommu/shmobile-iommu.c ++++ b/drivers/iommu/shmobile-iommu.c +@@ -361,6 +361,7 @@ static const struct iommu_ops shmobile_iommu_ops = { + .detach_dev = shmobile_iommu_detach_device, + .map = shmobile_iommu_map, + .unmap = shmobile_iommu_unmap, ++ .map_sg = default_iommu_map_sg, + .iova_to_phys = shmobile_iommu_iova_to_phys, + .add_device = shmobile_iommu_add_device, + .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K, +diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c +index bd97ade..951651a 100644 +--- a/drivers/iommu/shmobile-ipmmu.c ++++ b/drivers/iommu/shmobile-ipmmu.c +@@ -118,7 +118,6 @@ static int ipmmu_probe(struct platform_device *pdev) + static struct platform_driver ipmmu_driver = { + .probe = ipmmu_probe, + .driver = { +- .owner = THIS_MODULE, + .name = "ipmmu", + }, + }; +diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c +index a6d76ab..f722a0c 100644 +--- a/drivers/iommu/tegra-gart.c ++++ b/drivers/iommu/tegra-gart.c +@@ -425,7 +425,6 @@ static struct platform_driver tegra_gart_driver = { + .probe = tegra_gart_probe, + .remove = tegra_gart_remove, + .driver = { +- .owner = THIS_MODULE, + .name = "tegra-gart", + .pm = &tegra_gart_pm_ops, + .of_match_table = tegra_gart_of_match, +diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c +index 3afdf43..cb0c9bf 100644 +--- a/drivers/iommu/tegra-smmu.c ++++ b/drivers/iommu/tegra-smmu.c +@@ -955,6 +955,7 @@ static const struct iommu_ops smmu_iommu_ops = { + .detach_dev = smmu_iommu_detach_dev, + .map = smmu_iommu_map, + .unmap = smmu_iommu_unmap, ++ .map_sg = default_iommu_map_sg, + .iova_to_phys = smmu_iommu_iova_to_phys, + .pgsize_bitmap = SMMU_IOMMU_PGSIZES, + }; +@@ -1269,7 +1270,6 @@ static struct platform_driver tegra_smmu_driver = { + .probe = tegra_smmu_probe, + .remove = tegra_smmu_remove, + .driver = { +- .owner = THIS_MODULE, + .name = "tegra-smmu", + .pm = &tegra_smmu_pm_ops, + .of_match_table = tegra_smmu_of_match, +diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig +index b21f12f..caf590c 100644 +--- a/drivers/irqchip/Kconfig ++++ b/drivers/irqchip/Kconfig +@@ -15,6 +15,10 @@ config ARM_GIC_V3 + select IRQ_DOMAIN + select MULTI_IRQ_HANDLER + ++config ARM_GIC_V3_ITS ++ bool ++ select PCI_MSI_IRQ_DOMAIN ++ + config ARM_NVIC + bool + select IRQ_DOMAIN +diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile +index 173bb5f..ec3621d 100644 +--- a/drivers/irqchip/Makefile ++++ b/drivers/irqchip/Makefile +@@ -20,6 +20,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o + obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o + obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o + obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o ++obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o + obj-$(CONFIG_ARM_NVIC) += irq-nvic.o + obj-$(CONFIG_ARM_VIC) += irq-vic.o + obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o +diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c +index 41ac85a..615075d 100644 +--- a/drivers/irqchip/irq-armada-370-xp.c ++++ b/drivers/irqchip/irq-armada-370-xp.c +@@ -131,7 +131,7 @@ static void armada_370_xp_free_msi(int hwirq) + mutex_unlock(&msi_used_lock); + } + +-static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, ++static int armada_370_xp_setup_msi_irq(struct msi_controller *chip, + struct pci_dev *pdev, + struct msi_desc *desc) + { +@@ -158,11 +158,11 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, + msg.address_hi = 0; + msg.data = 0xf00 | (hwirq + 16); + +- write_msi_msg(virq, &msg); ++ pci_write_msi_msg(virq, &msg); + return 0; + } + +-static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, ++static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip, + unsigned int irq) + { + struct irq_data *d = irq_get_irq_data(irq); +@@ -174,10 +174,10 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, + + static struct irq_chip armada_370_xp_msi_irq_chip = { + .name = "armada_370_xp_msi_irq", +- .irq_enable = unmask_msi_irq, +- .irq_disable = mask_msi_irq, +- .irq_mask = mask_msi_irq, +- .irq_unmask = unmask_msi_irq, ++ .irq_enable = pci_msi_unmask_irq, ++ .irq_disable = pci_msi_mask_irq, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, + }; + + static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, +@@ -197,7 +197,7 @@ static const struct irq_domain_ops armada_370_xp_msi_irq_ops = { + static int armada_370_xp_msi_init(struct device_node *node, + phys_addr_t main_int_phys_base) + { +- struct msi_chip *msi_chip; ++ struct msi_controller *msi_chip; + u32 reg; + int ret; + +diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c +index 9a2cf3c..27fdd8c 100644 +--- a/drivers/irqchip/irq-atmel-aic.c ++++ b/drivers/irqchip/irq-atmel-aic.c +@@ -65,11 +65,11 @@ aic_handle(struct pt_regs *regs) + u32 irqnr; + u32 irqstat; + +- irqnr = irq_reg_readl(gc->reg_base + AT91_AIC_IVR); +- irqstat = irq_reg_readl(gc->reg_base + AT91_AIC_ISR); ++ irqnr = irq_reg_readl(gc, AT91_AIC_IVR); ++ irqstat = irq_reg_readl(gc, AT91_AIC_ISR); + + if (!irqstat) +- irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR); ++ irq_reg_writel(gc, 0, AT91_AIC_EOICR); + else + handle_domain_irq(aic_domain, irqnr, regs); + } +@@ -80,7 +80,7 @@ static int aic_retrigger(struct irq_data *d) + + /* Enable interrupt on AIC5 */ + irq_gc_lock(gc); +- irq_reg_writel(d->mask, gc->reg_base + AT91_AIC_ISCR); ++ irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); + irq_gc_unlock(gc); + + return 0; +@@ -92,12 +92,12 @@ static int aic_set_type(struct irq_data *d, unsigned type) + unsigned int smr; + int ret; + +- smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(d->hwirq)); ++ smr = irq_reg_readl(gc, AT91_AIC_SMR(d->hwirq)); + ret = aic_common_set_type(d, type, &smr); + if (ret) + return ret; + +- irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(d->hwirq)); ++ irq_reg_writel(gc, smr, AT91_AIC_SMR(d->hwirq)); + + return 0; + } +@@ -108,8 +108,8 @@ static void aic_suspend(struct irq_data *d) + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + + irq_gc_lock(gc); +- irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IDCR); +- irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IECR); ++ irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR); ++ irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR); + irq_gc_unlock(gc); + } + +@@ -118,8 +118,8 @@ static void aic_resume(struct irq_data *d) + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + + irq_gc_lock(gc); +- irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IDCR); +- irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IECR); ++ irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR); ++ irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR); + irq_gc_unlock(gc); + } + +@@ -128,8 +128,8 @@ static void aic_pm_shutdown(struct irq_data *d) + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + + irq_gc_lock(gc); +- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR); +- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR); ++ irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); ++ irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); + irq_gc_unlock(gc); + } + #else +@@ -148,24 +148,24 @@ static void __init aic_hw_init(struct irq_domain *domain) + * will not Lock out nIRQ + */ + for (i = 0; i < 8; i++) +- irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR); ++ irq_reg_writel(gc, 0, AT91_AIC_EOICR); + + /* + * Spurious Interrupt ID in Spurious Vector Register. + * When there is no current interrupt, the IRQ Vector Register + * reads the value stored in AIC_SPU + */ +- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_SPU); ++ irq_reg_writel(gc, 0xffffffff, AT91_AIC_SPU); + + /* No debugging in AIC: Debug (Protect) Control Register */ +- irq_reg_writel(0, gc->reg_base + AT91_AIC_DCR); ++ irq_reg_writel(gc, 0, AT91_AIC_DCR); + + /* Disable and clear all interrupts initially */ +- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR); +- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR); ++ irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); ++ irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); + + for (i = 0; i < 32; i++) +- irq_reg_writel(i, gc->reg_base + AT91_AIC_SVR(i)); ++ irq_reg_writel(gc, i, AT91_AIC_SVR(i)); + } + + static int aic_irq_domain_xlate(struct irq_domain *d, +@@ -195,10 +195,10 @@ static int aic_irq_domain_xlate(struct irq_domain *d, + gc = dgc->gc[idx]; + + irq_gc_lock(gc); +- smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(*out_hwirq)); ++ smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); + ret = aic_common_set_priority(intspec[2], &smr); + if (!ret) +- irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(*out_hwirq)); ++ irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); + irq_gc_unlock(gc); + + return ret; +diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c +index a11aae8..a2e8c3f 100644 +--- a/drivers/irqchip/irq-atmel-aic5.c ++++ b/drivers/irqchip/irq-atmel-aic5.c +@@ -75,11 +75,11 @@ aic5_handle(struct pt_regs *regs) + u32 irqnr; + u32 irqstat; + +- irqnr = irq_reg_readl(gc->reg_base + AT91_AIC5_IVR); +- irqstat = irq_reg_readl(gc->reg_base + AT91_AIC5_ISR); ++ irqnr = irq_reg_readl(gc, AT91_AIC5_IVR); ++ irqstat = irq_reg_readl(gc, AT91_AIC5_ISR); + + if (!irqstat) +- irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR); ++ irq_reg_writel(gc, 0, AT91_AIC5_EOICR); + else + handle_domain_irq(aic5_domain, irqnr, regs); + } +@@ -92,8 +92,8 @@ static void aic5_mask(struct irq_data *d) + + /* Disable interrupt on AIC5 */ + irq_gc_lock(gc); +- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); +- irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR); ++ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); ++ irq_reg_writel(gc, 1, AT91_AIC5_IDCR); + gc->mask_cache &= ~d->mask; + irq_gc_unlock(gc); + } +@@ -106,8 +106,8 @@ static void aic5_unmask(struct irq_data *d) + + /* Enable interrupt on AIC5 */ + irq_gc_lock(gc); +- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); +- irq_reg_writel(1, gc->reg_base + AT91_AIC5_IECR); ++ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); ++ irq_reg_writel(gc, 1, AT91_AIC5_IECR); + gc->mask_cache |= d->mask; + irq_gc_unlock(gc); + } +@@ -120,8 +120,8 @@ static int aic5_retrigger(struct irq_data *d) + + /* Enable interrupt on AIC5 */ + irq_gc_lock(gc); +- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); +- irq_reg_writel(1, gc->reg_base + AT91_AIC5_ISCR); ++ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); ++ irq_reg_writel(gc, 1, AT91_AIC5_ISCR); + irq_gc_unlock(gc); + + return 0; +@@ -136,11 +136,11 @@ static int aic5_set_type(struct irq_data *d, unsigned type) + int ret; + + irq_gc_lock(gc); +- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); +- smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR); ++ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); ++ smr = irq_reg_readl(gc, AT91_AIC5_SMR); + ret = aic_common_set_type(d, type, &smr); + if (!ret) +- irq_reg_writel(smr, gc->reg_base + AT91_AIC5_SMR); ++ irq_reg_writel(gc, smr, AT91_AIC5_SMR); + irq_gc_unlock(gc); + + return ret; +@@ -162,12 +162,11 @@ static void aic5_suspend(struct irq_data *d) + if ((mask & gc->mask_cache) == (mask & gc->wake_active)) + continue; + +- irq_reg_writel(i + gc->irq_base, +- bgc->reg_base + AT91_AIC5_SSR); ++ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); + if (mask & gc->wake_active) +- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR); ++ irq_reg_writel(bgc, 1, AT91_AIC5_IECR); + else +- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR); ++ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); + } + irq_gc_unlock(bgc); + } +@@ -187,12 +186,11 @@ static void aic5_resume(struct irq_data *d) + if ((mask & gc->mask_cache) == (mask & gc->wake_active)) + continue; + +- irq_reg_writel(i + gc->irq_base, +- bgc->reg_base + AT91_AIC5_SSR); ++ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); + if (mask & gc->mask_cache) +- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR); ++ irq_reg_writel(bgc, 1, AT91_AIC5_IECR); + else +- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR); ++ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); + } + irq_gc_unlock(bgc); + } +@@ -207,10 +205,9 @@ static void aic5_pm_shutdown(struct irq_data *d) + + irq_gc_lock(bgc); + for (i = 0; i < dgc->irqs_per_chip; i++) { +- irq_reg_writel(i + gc->irq_base, +- bgc->reg_base + AT91_AIC5_SSR); +- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR); +- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_ICCR); ++ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); ++ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); ++ irq_reg_writel(bgc, 1, AT91_AIC5_ICCR); + } + irq_gc_unlock(bgc); + } +@@ -230,24 +227,24 @@ static void __init aic5_hw_init(struct irq_domain *domain) + * will not Lock out nIRQ + */ + for (i = 0; i < 8; i++) +- irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR); ++ irq_reg_writel(gc, 0, AT91_AIC5_EOICR); + + /* + * Spurious Interrupt ID in Spurious Vector Register. + * When there is no current interrupt, the IRQ Vector Register + * reads the value stored in AIC_SPU + */ +- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC5_SPU); ++ irq_reg_writel(gc, 0xffffffff, AT91_AIC5_SPU); + + /* No debugging in AIC: Debug (Protect) Control Register */ +- irq_reg_writel(0, gc->reg_base + AT91_AIC5_DCR); ++ irq_reg_writel(gc, 0, AT91_AIC5_DCR); + + /* Disable and clear all interrupts initially */ + for (i = 0; i < domain->revmap_size; i++) { +- irq_reg_writel(i, gc->reg_base + AT91_AIC5_SSR); +- irq_reg_writel(i, gc->reg_base + AT91_AIC5_SVR); +- irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR); +- irq_reg_writel(1, gc->reg_base + AT91_AIC5_ICCR); ++ irq_reg_writel(gc, i, AT91_AIC5_SSR); ++ irq_reg_writel(gc, i, AT91_AIC5_SVR); ++ irq_reg_writel(gc, 1, AT91_AIC5_IDCR); ++ irq_reg_writel(gc, 1, AT91_AIC5_ICCR); + } + } + +@@ -273,11 +270,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, + gc = dgc->gc[0]; + + irq_gc_lock(gc); +- irq_reg_writel(*out_hwirq, gc->reg_base + AT91_AIC5_SSR); +- smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR); ++ irq_reg_writel(gc, *out_hwirq, AT91_AIC5_SSR); ++ smr = irq_reg_readl(gc, AT91_AIC5_SMR); + ret = aic_common_set_priority(intspec[2], &smr); + if (!ret) +- irq_reg_writel(intspec[2] | smr, gc->reg_base + AT91_AIC5_SMR); ++ irq_reg_writel(gc, intspec[2] | smr, AT91_AIC5_SMR); + irq_gc_unlock(gc); + + return ret; +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c +new file mode 100644 +index 0000000..43c50ed +--- /dev/null ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -0,0 +1,1628 @@ ++/* ++ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. ++ * Author: Marc Zyngier ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++ ++#include "irqchip.h" ++ ++#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0) ++ ++#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) ++ ++/* ++ * Collection structure - just an ID, and a redistributor address to ++ * ping. We use one per CPU as a bag of interrupts assigned to this ++ * CPU. ++ */ ++struct its_collection { ++ u64 target_address; ++ u16 col_id; ++}; ++ ++/* ++ * The ITS structure - contains most of the infrastructure, with the ++ * msi_controller, the command queue, the collections, and the list of ++ * devices writing to it. ++ */ ++struct its_node { ++ raw_spinlock_t lock; ++ struct list_head entry; ++ struct msi_controller msi_chip; ++ struct irq_domain *domain; ++ void __iomem *base; ++ unsigned long phys_base; ++ struct its_cmd_block *cmd_base; ++ struct its_cmd_block *cmd_write; ++ void *tables[GITS_BASER_NR_REGS]; ++ struct its_collection *collections; ++ struct list_head its_device_list; ++ u64 flags; ++ u32 ite_size; ++}; ++ ++#define ITS_ITT_ALIGN SZ_256 ++ ++struct event_lpi_map { ++ unsigned long *lpi_map; ++ u16 *col_map; ++ irq_hw_number_t lpi_base; ++ int nr_lpis; ++}; ++ ++/* ++ * The ITS view of a device - belongs to an ITS, a collection, owns an ++ * interrupt translation table, and a list of interrupts. ++ */ ++struct its_device { ++ struct list_head entry; ++ struct its_node *its; ++ struct event_lpi_map event_map; ++ void *itt; ++ u32 nr_ites; ++ u32 device_id; ++}; ++ ++static LIST_HEAD(its_nodes); ++static DEFINE_SPINLOCK(its_lock); ++static struct device_node *gic_root_node; ++static struct rdists *gic_rdists; ++ ++#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) ++#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) ++ ++static struct its_collection *dev_event_to_col(struct its_device *its_dev, ++ u32 event) ++{ ++ struct its_node *its = its_dev->its; ++ ++ return its->collections + its_dev->event_map.col_map[event]; ++} ++ ++/* ++ * ITS command descriptors - parameters to be encoded in a command ++ * block. ++ */ ++struct its_cmd_desc { ++ union { ++ struct { ++ struct its_device *dev; ++ u32 event_id; ++ } its_inv_cmd; ++ ++ struct { ++ struct its_device *dev; ++ u32 event_id; ++ } its_int_cmd; ++ ++ struct { ++ struct its_device *dev; ++ int valid; ++ } its_mapd_cmd; ++ ++ struct { ++ struct its_collection *col; ++ int valid; ++ } its_mapc_cmd; ++ ++ struct { ++ struct its_device *dev; ++ u32 phys_id; ++ u32 event_id; ++ } its_mapvi_cmd; ++ ++ struct { ++ struct its_device *dev; ++ struct its_collection *col; ++ u32 event_id; ++ } its_movi_cmd; ++ ++ struct { ++ struct its_device *dev; ++ u32 event_id; ++ } its_discard_cmd; ++ ++ struct { ++ struct its_collection *col; ++ } its_invall_cmd; ++ }; ++}; ++ ++/* ++ * The ITS command block, which is what the ITS actually parses. ++ */ ++struct its_cmd_block { ++ u64 raw_cmd[4]; ++}; ++ ++#define ITS_CMD_QUEUE_SZ SZ_64K ++#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) ++ ++typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, ++ struct its_cmd_desc *); ++ ++static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) ++{ ++ cmd->raw_cmd[0] &= ~0xffUL; ++ cmd->raw_cmd[0] |= cmd_nr; ++} ++ ++static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) ++{ ++ cmd->raw_cmd[0] &= BIT_ULL(32) - 1; ++ cmd->raw_cmd[0] |= ((u64)devid) << 32; ++} ++ ++static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) ++{ ++ cmd->raw_cmd[1] &= ~0xffffffffUL; ++ cmd->raw_cmd[1] |= id; ++} ++ ++static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) ++{ ++ cmd->raw_cmd[1] &= 0xffffffffUL; ++ cmd->raw_cmd[1] |= ((u64)phys_id) << 32; ++} ++ ++static void its_encode_size(struct its_cmd_block *cmd, u8 size) ++{ ++ cmd->raw_cmd[1] &= ~0x1fUL; ++ cmd->raw_cmd[1] |= size & 0x1f; ++} ++ ++static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) ++{ ++ cmd->raw_cmd[2] &= ~0xffffffffffffUL; ++ cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL; ++} ++ ++static void its_encode_valid(struct its_cmd_block *cmd, int valid) ++{ ++ cmd->raw_cmd[2] &= ~(1UL << 63); ++ cmd->raw_cmd[2] |= ((u64)!!valid) << 63; ++} ++ ++static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) ++{ ++ cmd->raw_cmd[2] &= ~(0xffffffffUL << 16); ++ cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16)); ++} ++ ++static void its_encode_collection(struct its_cmd_block *cmd, u16 col) ++{ ++ cmd->raw_cmd[2] &= ~0xffffUL; ++ cmd->raw_cmd[2] |= col; ++} ++ ++static inline void its_fixup_cmd(struct its_cmd_block *cmd) ++{ ++ /* Let's fixup BE commands */ ++ cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); ++ cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); ++ cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); ++ cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); ++} ++ ++static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ unsigned long itt_addr; ++ u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); ++ ++ itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); ++ itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); ++ ++ its_encode_cmd(cmd, GITS_CMD_MAPD); ++ its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); ++ its_encode_size(cmd, size - 1); ++ its_encode_itt(cmd, itt_addr); ++ its_encode_valid(cmd, desc->its_mapd_cmd.valid); ++ ++ its_fixup_cmd(cmd); ++ ++ return NULL; ++} ++ ++static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ its_encode_cmd(cmd, GITS_CMD_MAPC); ++ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); ++ its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); ++ its_encode_valid(cmd, desc->its_mapc_cmd.valid); ++ ++ its_fixup_cmd(cmd); ++ ++ return desc->its_mapc_cmd.col; ++} ++ ++static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ struct its_collection *col; ++ ++ col = dev_event_to_col(desc->its_mapvi_cmd.dev, ++ desc->its_mapvi_cmd.event_id); ++ ++ its_encode_cmd(cmd, GITS_CMD_MAPVI); ++ its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id); ++ its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id); ++ its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id); ++ its_encode_collection(cmd, col->col_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return col; ++} ++ ++static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ struct its_collection *col; ++ ++ col = dev_event_to_col(desc->its_movi_cmd.dev, ++ desc->its_movi_cmd.event_id); ++ ++ its_encode_cmd(cmd, GITS_CMD_MOVI); ++ its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); ++ its_encode_event_id(cmd, desc->its_movi_cmd.event_id); ++ its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return col; ++} ++ ++static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ struct its_collection *col; ++ ++ col = dev_event_to_col(desc->its_discard_cmd.dev, ++ desc->its_discard_cmd.event_id); ++ ++ its_encode_cmd(cmd, GITS_CMD_DISCARD); ++ its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); ++ its_encode_event_id(cmd, desc->its_discard_cmd.event_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return col; ++} ++ ++static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ struct its_collection *col; ++ ++ col = dev_event_to_col(desc->its_inv_cmd.dev, ++ desc->its_inv_cmd.event_id); ++ ++ its_encode_cmd(cmd, GITS_CMD_INV); ++ its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); ++ its_encode_event_id(cmd, desc->its_inv_cmd.event_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return col; ++} ++ ++static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, ++ struct its_cmd_desc *desc) ++{ ++ its_encode_cmd(cmd, GITS_CMD_INVALL); ++ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); ++ ++ its_fixup_cmd(cmd); ++ ++ return NULL; ++} ++ ++static u64 its_cmd_ptr_to_offset(struct its_node *its, ++ struct its_cmd_block *ptr) ++{ ++ return (ptr - its->cmd_base) * sizeof(*ptr); ++} ++ ++static int its_queue_full(struct its_node *its) ++{ ++ int widx; ++ int ridx; ++ ++ widx = its->cmd_write - its->cmd_base; ++ ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); ++ ++ /* This is incredibly unlikely to happen, unless the ITS locks up. */ ++ if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) ++ return 1; ++ ++ return 0; ++} ++ ++static struct its_cmd_block *its_allocate_entry(struct its_node *its) ++{ ++ struct its_cmd_block *cmd; ++ u32 count = 1000000; /* 1s! */ ++ ++ while (its_queue_full(its)) { ++ count--; ++ if (!count) { ++ pr_err_ratelimited("ITS queue not draining\n"); ++ return NULL; ++ } ++ cpu_relax(); ++ udelay(1); ++ } ++ ++ cmd = its->cmd_write++; ++ ++ /* Handle queue wrapping */ ++ if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) ++ its->cmd_write = its->cmd_base; ++ ++ return cmd; ++} ++ ++static struct its_cmd_block *its_post_commands(struct its_node *its) ++{ ++ u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); ++ ++ writel_relaxed(wr, its->base + GITS_CWRITER); ++ ++ return its->cmd_write; ++} ++ ++static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) ++{ ++ /* ++ * Make sure the commands written to memory are observable by ++ * the ITS. ++ */ ++ if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) ++ __flush_dcache_area(cmd, sizeof(*cmd)); ++ else ++ dsb(ishst); ++} ++ ++static void its_wait_for_range_completion(struct its_node *its, ++ struct its_cmd_block *from, ++ struct its_cmd_block *to) ++{ ++ u64 rd_idx, from_idx, to_idx; ++ u32 count = 1000000; /* 1s! */ ++ ++ from_idx = its_cmd_ptr_to_offset(its, from); ++ to_idx = its_cmd_ptr_to_offset(its, to); ++ ++ while (1) { ++ rd_idx = readl_relaxed(its->base + GITS_CREADR); ++ if (rd_idx >= to_idx || rd_idx < from_idx) ++ break; ++ ++ count--; ++ if (!count) { ++ pr_err_ratelimited("ITS queue timeout\n"); ++ return; ++ } ++ cpu_relax(); ++ udelay(1); ++ } ++} ++ ++static void its_send_single_command(struct its_node *its, ++ its_cmd_builder_t builder, ++ struct its_cmd_desc *desc) ++{ ++ struct its_cmd_block *cmd, *sync_cmd, *next_cmd; ++ struct its_collection *sync_col; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&its->lock, flags); ++ ++ cmd = its_allocate_entry(its); ++ if (!cmd) { /* We're soooooo screewed... */ ++ pr_err_ratelimited("ITS can't allocate, dropping command\n"); ++ raw_spin_unlock_irqrestore(&its->lock, flags); ++ return; ++ } ++ sync_col = builder(cmd, desc); ++ its_flush_cmd(its, cmd); ++ ++ if (sync_col) { ++ sync_cmd = its_allocate_entry(its); ++ if (!sync_cmd) { ++ pr_err_ratelimited("ITS can't SYNC, skipping\n"); ++ goto post; ++ } ++ its_encode_cmd(sync_cmd, GITS_CMD_SYNC); ++ its_encode_target(sync_cmd, sync_col->target_address); ++ its_fixup_cmd(sync_cmd); ++ its_flush_cmd(its, sync_cmd); ++ } ++ ++post: ++ next_cmd = its_post_commands(its); ++ raw_spin_unlock_irqrestore(&its->lock, flags); ++ ++ its_wait_for_range_completion(its, cmd, next_cmd); ++} ++ ++static void its_send_inv(struct its_device *dev, u32 event_id) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_inv_cmd.dev = dev; ++ desc.its_inv_cmd.event_id = event_id; ++ ++ its_send_single_command(dev->its, its_build_inv_cmd, &desc); ++} ++ ++static void its_send_mapd(struct its_device *dev, int valid) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_mapd_cmd.dev = dev; ++ desc.its_mapd_cmd.valid = !!valid; ++ ++ its_send_single_command(dev->its, its_build_mapd_cmd, &desc); ++} ++ ++static void its_send_mapc(struct its_node *its, struct its_collection *col, ++ int valid) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_mapc_cmd.col = col; ++ desc.its_mapc_cmd.valid = !!valid; ++ ++ its_send_single_command(its, its_build_mapc_cmd, &desc); ++} ++ ++static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_mapvi_cmd.dev = dev; ++ desc.its_mapvi_cmd.phys_id = irq_id; ++ desc.its_mapvi_cmd.event_id = id; ++ ++ its_send_single_command(dev->its, its_build_mapvi_cmd, &desc); ++} ++ ++static void its_send_movi(struct its_device *dev, ++ struct its_collection *col, u32 id) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_movi_cmd.dev = dev; ++ desc.its_movi_cmd.col = col; ++ desc.its_movi_cmd.event_id = id; ++ ++ its_send_single_command(dev->its, its_build_movi_cmd, &desc); ++} ++ ++static void its_send_discard(struct its_device *dev, u32 id) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_discard_cmd.dev = dev; ++ desc.its_discard_cmd.event_id = id; ++ ++ its_send_single_command(dev->its, its_build_discard_cmd, &desc); ++} ++ ++static void its_send_invall(struct its_node *its, struct its_collection *col) ++{ ++ struct its_cmd_desc desc; ++ ++ desc.its_invall_cmd.col = col; ++ ++ its_send_single_command(its, its_build_invall_cmd, &desc); ++} ++ ++/* ++ * irqchip functions - assumes MSI, mostly. ++ */ ++ ++static inline u32 its_get_event_id(struct irq_data *d) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ return d->hwirq - its_dev->event_map.lpi_base; ++} ++ ++static void lpi_set_config(struct irq_data *d, bool enable) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ irq_hw_number_t hwirq = d->hwirq; ++ u32 id = its_get_event_id(d); ++ u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192; ++ ++ if (enable) ++ *cfg |= LPI_PROP_ENABLED; ++ else ++ *cfg &= ~LPI_PROP_ENABLED; ++ ++ /* ++ * Make the above write visible to the redistributors. ++ * And yes, we're flushing exactly: One. Single. Byte. ++ * Humpf... ++ */ ++ if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) ++ __flush_dcache_area(cfg, sizeof(*cfg)); ++ else ++ dsb(ishst); ++ its_send_inv(its_dev, id); ++} ++ ++static void its_mask_irq(struct irq_data *d) ++{ ++ lpi_set_config(d, false); ++} ++ ++static void its_unmask_irq(struct irq_data *d) ++{ ++ lpi_set_config(d, true); ++} ++ ++static void its_eoi_irq(struct irq_data *d) ++{ ++ gic_write_eoir(d->hwirq); ++} ++ ++static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ++ bool force) ++{ ++ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ struct its_collection *target_col; ++ u32 id = its_get_event_id(d); ++ ++ if (cpu >= nr_cpu_ids) ++ return -EINVAL; ++ ++ target_col = &its_dev->its->collections[cpu]; ++ its_send_movi(its_dev, target_col, id); ++ its_dev->event_map.col_map[id] = cpu; ++ ++ return IRQ_SET_MASK_OK_DONE; ++} ++ ++static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ struct its_node *its; ++ u64 addr; ++ ++ its = its_dev->its; ++ addr = its->phys_base + GITS_TRANSLATER; ++ ++ msg->address_lo = addr & ((1UL << 32) - 1); ++ msg->address_hi = addr >> 32; ++ msg->data = its_get_event_id(d); ++} ++ ++static struct irq_chip its_irq_chip = { ++ .name = "ITS", ++ .irq_mask = its_mask_irq, ++ .irq_unmask = its_unmask_irq, ++ .irq_eoi = its_eoi_irq, ++ .irq_set_affinity = its_set_affinity, ++ .irq_compose_msi_msg = its_irq_compose_msi_msg, ++}; ++ ++static void its_mask_msi_irq(struct irq_data *d) ++{ ++ pci_msi_mask_irq(d); ++ irq_chip_mask_parent(d); ++} ++ ++static void its_unmask_msi_irq(struct irq_data *d) ++{ ++ pci_msi_unmask_irq(d); ++ irq_chip_unmask_parent(d); ++} ++ ++static struct irq_chip its_msi_irq_chip = { ++ .name = "ITS-MSI", ++ .irq_unmask = its_unmask_msi_irq, ++ .irq_mask = its_mask_msi_irq, ++ .irq_eoi = irq_chip_eoi_parent, ++ .irq_write_msi_msg = pci_msi_domain_write_msg, ++}; ++ ++/* ++ * How we allocate LPIs: ++ * ++ * The GIC has id_bits bits for interrupt identifiers. From there, we ++ * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as ++ * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 ++ * bits to the right. ++ * ++ * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. ++ */ ++#define IRQS_PER_CHUNK_SHIFT 5 ++#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) ++ ++static unsigned long *lpi_bitmap; ++static u32 lpi_chunks; ++static DEFINE_SPINLOCK(lpi_lock); ++ ++static int its_lpi_to_chunk(int lpi) ++{ ++ return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; ++} ++ ++static int its_chunk_to_lpi(int chunk) ++{ ++ return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; ++} ++ ++static int its_lpi_init(u32 id_bits) ++{ ++ lpi_chunks = its_lpi_to_chunk(1UL << id_bits); ++ ++ lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long), ++ GFP_KERNEL); ++ if (!lpi_bitmap) { ++ lpi_chunks = 0; ++ return -ENOMEM; ++ } ++ ++ pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); ++ return 0; ++} ++ ++static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) ++{ ++ unsigned long *bitmap = NULL; ++ int chunk_id; ++ int nr_chunks; ++ int i; ++ ++ nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); ++ ++ spin_lock(&lpi_lock); ++ ++ do { ++ chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, ++ 0, nr_chunks, 0); ++ if (chunk_id < lpi_chunks) ++ break; ++ ++ nr_chunks--; ++ } while (nr_chunks > 0); ++ ++ if (!nr_chunks) ++ goto out; ++ ++ bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long), ++ GFP_ATOMIC); ++ if (!bitmap) ++ goto out; ++ ++ for (i = 0; i < nr_chunks; i++) ++ set_bit(chunk_id + i, lpi_bitmap); ++ ++ *base = its_chunk_to_lpi(chunk_id); ++ *nr_ids = nr_chunks * IRQS_PER_CHUNK; ++ ++out: ++ spin_unlock(&lpi_lock); ++ ++ if (!bitmap) ++ *base = *nr_ids = 0; ++ ++ return bitmap; ++} ++ ++static void its_lpi_free(struct event_lpi_map *map) ++{ ++ int base = map->lpi_base; ++ int nr_ids = map->nr_lpis; ++ int lpi; ++ ++ spin_lock(&lpi_lock); ++ ++ for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { ++ int chunk = its_lpi_to_chunk(lpi); ++ BUG_ON(chunk > lpi_chunks); ++ if (test_bit(chunk, lpi_bitmap)) { ++ clear_bit(chunk, lpi_bitmap); ++ } else { ++ pr_err("Bad LPI chunk %d\n", chunk); ++ } ++ } ++ ++ spin_unlock(&lpi_lock); ++ ++ kfree(map->lpi_map); ++ kfree(map->col_map); ++} ++ ++/* ++ * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to ++ * deal with (one configuration byte per interrupt). PENDBASE has to ++ * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). ++ */ ++#define LPI_PROPBASE_SZ SZ_64K ++#define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K) ++ ++/* ++ * This is how many bits of ID we need, including the useless ones. ++ */ ++#define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K) ++ ++#define LPI_PROP_DEFAULT_PRIO 0xa0 ++ ++static int __init its_alloc_lpi_tables(void) ++{ ++ phys_addr_t paddr; ++ ++ gic_rdists->prop_page = alloc_pages(GFP_NOWAIT, ++ get_order(LPI_PROPBASE_SZ)); ++ if (!gic_rdists->prop_page) { ++ pr_err("Failed to allocate PROPBASE\n"); ++ return -ENOMEM; ++ } ++ ++ paddr = page_to_phys(gic_rdists->prop_page); ++ pr_info("GIC: using LPI property table @%pa\n", &paddr); ++ ++ /* Priority 0xa0, Group-1, disabled */ ++ memset(page_address(gic_rdists->prop_page), ++ LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, ++ LPI_PROPBASE_SZ); ++ ++ /* Make sure the GIC will observe the written configuration */ ++ __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ); ++ ++ return 0; ++} ++ ++static const char *its_base_type_string[] = { ++ [GITS_BASER_TYPE_DEVICE] = "Devices", ++ [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", ++ [GITS_BASER_TYPE_CPU] = "Physical CPUs", ++ [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", ++ [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", ++ [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", ++ [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", ++}; ++ ++static void its_free_tables(struct its_node *its) ++{ ++ int i; ++ ++ for (i = 0; i < GITS_BASER_NR_REGS; i++) { ++ if (its->tables[i]) { ++ free_page((unsigned long)its->tables[i]); ++ its->tables[i] = NULL; ++ } ++ } ++} ++ ++static int its_alloc_tables(struct its_node *its) ++{ ++ int err; ++ int i; ++ int psz = SZ_64K; ++ u64 shr = GITS_BASER_InnerShareable; ++ u64 cache = GITS_BASER_WaWb; ++ ++ for (i = 0; i < GITS_BASER_NR_REGS; i++) { ++ u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); ++ u64 type = GITS_BASER_TYPE(val); ++ u64 entry_size = GITS_BASER_ENTRY_SIZE(val); ++ int order = get_order(psz); ++ int alloc_size; ++ u64 tmp; ++ void *base; ++ ++ if (type == GITS_BASER_TYPE_NONE) ++ continue; ++ ++ /* ++ * Allocate as many entries as required to fit the ++ * range of device IDs that the ITS can grok... The ID ++ * space being incredibly sparse, this results in a ++ * massive waste of memory. ++ * ++ * For other tables, only allocate a single page. ++ */ ++ if (type == GITS_BASER_TYPE_DEVICE) { ++ u64 typer = readq_relaxed(its->base + GITS_TYPER); ++ u32 ids = GITS_TYPER_DEVBITS(typer); ++ ++ /* ++ * 'order' was initialized earlier to the default page ++ * granule of the the ITS. We can't have an allocation ++ * smaller than that. If the requested allocation ++ * is smaller, round up to the default page granule. ++ */ ++ order = max(get_order((1UL << ids) * entry_size), ++ order); ++ if (order >= MAX_ORDER) { ++ order = MAX_ORDER - 1; ++ pr_warn("%s: Device Table too large, reduce its page order to %u\n", ++ its->msi_chip.of_node->full_name, order); ++ } ++ } ++ ++ alloc_size = (1 << order) * PAGE_SIZE; ++ base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); ++ if (!base) { ++ err = -ENOMEM; ++ goto out_free; ++ } ++ ++ its->tables[i] = base; ++ ++retry_baser: ++ val = (virt_to_phys(base) | ++ (type << GITS_BASER_TYPE_SHIFT) | ++ ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | ++ cache | ++ shr | ++ GITS_BASER_VALID); ++ ++ switch (psz) { ++ case SZ_4K: ++ val |= GITS_BASER_PAGE_SIZE_4K; ++ break; ++ case SZ_16K: ++ val |= GITS_BASER_PAGE_SIZE_16K; ++ break; ++ case SZ_64K: ++ val |= GITS_BASER_PAGE_SIZE_64K; ++ break; ++ } ++ ++ val |= (alloc_size / psz) - 1; ++ ++ writeq_relaxed(val, its->base + GITS_BASER + i * 8); ++ tmp = readq_relaxed(its->base + GITS_BASER + i * 8); ++ ++ if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { ++ /* ++ * Shareability didn't stick. Just use ++ * whatever the read reported, which is likely ++ * to be the only thing this redistributor ++ * supports. If that's zero, make it ++ * non-cacheable as well. ++ */ ++ shr = tmp & GITS_BASER_SHAREABILITY_MASK; ++ if (!shr) { ++ cache = GITS_BASER_nC; ++ __flush_dcache_area(base, alloc_size); ++ } ++ goto retry_baser; ++ } ++ ++ if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { ++ /* ++ * Page size didn't stick. Let's try a smaller ++ * size and retry. If we reach 4K, then ++ * something is horribly wrong... ++ */ ++ switch (psz) { ++ case SZ_16K: ++ psz = SZ_4K; ++ goto retry_baser; ++ case SZ_64K: ++ psz = SZ_16K; ++ goto retry_baser; ++ } ++ } ++ ++ if (val != tmp) { ++ pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n", ++ its->msi_chip.of_node->full_name, i, ++ (unsigned long) val, (unsigned long) tmp); ++ err = -ENXIO; ++ goto out_free; ++ } ++ ++ pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", ++ (int)(alloc_size / entry_size), ++ its_base_type_string[type], ++ (unsigned long)virt_to_phys(base), ++ psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); ++ } ++ ++ return 0; ++ ++out_free: ++ its_free_tables(its); ++ ++ return err; ++} ++ ++static int its_alloc_collections(struct its_node *its) ++{ ++ its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections), ++ GFP_KERNEL); ++ if (!its->collections) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static void its_cpu_init_lpis(void) ++{ ++ void __iomem *rbase = gic_data_rdist_rd_base(); ++ struct page *pend_page; ++ u64 val, tmp; ++ ++ /* If we didn't allocate the pending table yet, do it now */ ++ pend_page = gic_data_rdist()->pend_page; ++ if (!pend_page) { ++ phys_addr_t paddr; ++ /* ++ * The pending pages have to be at least 64kB aligned, ++ * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. ++ */ ++ pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO, ++ get_order(max(LPI_PENDBASE_SZ, SZ_64K))); ++ if (!pend_page) { ++ pr_err("Failed to allocate PENDBASE for CPU%d\n", ++ smp_processor_id()); ++ return; ++ } ++ ++ /* Make sure the GIC will observe the zero-ed page */ ++ __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ); ++ ++ paddr = page_to_phys(pend_page); ++ pr_info("CPU%d: using LPI pending table @%pa\n", ++ smp_processor_id(), &paddr); ++ gic_data_rdist()->pend_page = pend_page; ++ } ++ ++ /* Disable LPIs */ ++ val = readl_relaxed(rbase + GICR_CTLR); ++ val &= ~GICR_CTLR_ENABLE_LPIS; ++ writel_relaxed(val, rbase + GICR_CTLR); ++ ++ /* ++ * Make sure any change to the table is observable by the GIC. ++ */ ++ dsb(sy); ++ ++ /* set PROPBASE */ ++ val = (page_to_phys(gic_rdists->prop_page) | ++ GICR_PROPBASER_InnerShareable | ++ GICR_PROPBASER_WaWb | ++ ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); ++ ++ writeq_relaxed(val, rbase + GICR_PROPBASER); ++ tmp = readq_relaxed(rbase + GICR_PROPBASER); ++ ++ if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { ++ if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { ++ /* ++ * The HW reports non-shareable, we must ++ * remove the cacheability attributes as ++ * well. ++ */ ++ val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | ++ GICR_PROPBASER_CACHEABILITY_MASK); ++ val |= GICR_PROPBASER_nC; ++ writeq_relaxed(val, rbase + GICR_PROPBASER); ++ } ++ pr_info_once("GIC: using cache flushing for LPI property table\n"); ++ gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; ++ } ++ ++ /* set PENDBASE */ ++ val = (page_to_phys(pend_page) | ++ GICR_PENDBASER_InnerShareable | ++ GICR_PENDBASER_WaWb); ++ ++ writeq_relaxed(val, rbase + GICR_PENDBASER); ++ tmp = readq_relaxed(rbase + GICR_PENDBASER); ++ ++ if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { ++ /* ++ * The HW reports non-shareable, we must remove the ++ * cacheability attributes as well. ++ */ ++ val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | ++ GICR_PENDBASER_CACHEABILITY_MASK); ++ val |= GICR_PENDBASER_nC; ++ writeq_relaxed(val, rbase + GICR_PENDBASER); ++ } ++ ++ /* Enable LPIs */ ++ val = readl_relaxed(rbase + GICR_CTLR); ++ val |= GICR_CTLR_ENABLE_LPIS; ++ writel_relaxed(val, rbase + GICR_CTLR); ++ ++ /* Make sure the GIC has seen the above */ ++ dsb(sy); ++} ++ ++static void its_cpu_init_collection(void) ++{ ++ struct its_node *its; ++ int cpu; ++ ++ spin_lock(&its_lock); ++ cpu = smp_processor_id(); ++ ++ list_for_each_entry(its, &its_nodes, entry) { ++ u64 target; ++ ++ /* ++ * We now have to bind each collection to its target ++ * redistributor. ++ */ ++ if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) { ++ /* ++ * This ITS wants the physical address of the ++ * redistributor. ++ */ ++ target = gic_data_rdist()->phys_base; ++ } else { ++ /* ++ * This ITS wants a linear CPU number. ++ */ ++ target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); ++ target = GICR_TYPER_CPU_NUMBER(target) << 16; ++ } ++ ++ /* Perform collection mapping */ ++ its->collections[cpu].target_address = target; ++ its->collections[cpu].col_id = cpu; ++ ++ its_send_mapc(its, &its->collections[cpu], 1); ++ its_send_invall(its, &its->collections[cpu]); ++ } ++ ++ spin_unlock(&its_lock); ++} ++ ++static struct its_device *its_find_device(struct its_node *its, u32 dev_id) ++{ ++ struct its_device *its_dev = NULL, *tmp; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&its->lock, flags); ++ ++ list_for_each_entry(tmp, &its->its_device_list, entry) { ++ if (tmp->device_id == dev_id) { ++ its_dev = tmp; ++ break; ++ } ++ } ++ ++ raw_spin_unlock_irqrestore(&its->lock, flags); ++ ++ return its_dev; ++} ++ ++static struct its_device *its_create_device(struct its_node *its, u32 dev_id, ++ int nvecs) ++{ ++ struct its_device *dev; ++ unsigned long *lpi_map; ++ unsigned long flags; ++ u16 *col_map = NULL; ++ void *itt; ++ int lpi_base; ++ int nr_lpis; ++ int nr_ites; ++ int sz; ++ ++ dev = kzalloc(sizeof(*dev), GFP_KERNEL); ++ /* ++ * At least one bit of EventID is being used, hence a minimum ++ * of two entries. No, the architecture doesn't let you ++ * express an ITT with a single entry. ++ */ ++ nr_ites = max(2UL, roundup_pow_of_two(nvecs)); ++ sz = nr_ites * its->ite_size; ++ sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; ++ itt = kzalloc(sz, GFP_KERNEL); ++ lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); ++ if (lpi_map) ++ col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL); ++ ++ if (!dev || !itt || !lpi_map || !col_map) { ++ kfree(dev); ++ kfree(itt); ++ kfree(lpi_map); ++ kfree(col_map); ++ return NULL; ++ } ++ ++ __flush_dcache_area(itt, sz); ++ ++ dev->its = its; ++ dev->itt = itt; ++ dev->nr_ites = nr_ites; ++ dev->event_map.lpi_map = lpi_map; ++ dev->event_map.col_map = col_map; ++ dev->event_map.lpi_base = lpi_base; ++ dev->event_map.nr_lpis = nr_lpis; ++ dev->device_id = dev_id; ++ INIT_LIST_HEAD(&dev->entry); ++ ++ raw_spin_lock_irqsave(&its->lock, flags); ++ list_add(&dev->entry, &its->its_device_list); ++ raw_spin_unlock_irqrestore(&its->lock, flags); ++ ++ /* Map device to its ITT */ ++ its_send_mapd(dev, 1); ++ ++ return dev; ++} ++ ++static void its_free_device(struct its_device *its_dev) ++{ ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&its_dev->its->lock, flags); ++ list_del(&its_dev->entry); ++ raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); ++ kfree(its_dev->itt); ++ kfree(its_dev); ++} ++ ++static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) ++{ ++ int idx; ++ ++ idx = find_first_zero_bit(dev->event_map.lpi_map, ++ dev->event_map.nr_lpis); ++ if (idx == dev->event_map.nr_lpis) ++ return -ENOSPC; ++ ++ *hwirq = dev->event_map.lpi_base + idx; ++ set_bit(idx, dev->event_map.lpi_map); ++ ++ return 0; ++} ++ ++struct its_pci_alias { ++ struct pci_dev *pdev; ++ u32 dev_id; ++ u32 count; ++}; ++ ++static int its_pci_msi_vec_count(struct pci_dev *pdev) ++{ ++ int msi, msix; ++ ++ msi = max(pci_msi_vec_count(pdev), 0); ++ msix = max(pci_msix_vec_count(pdev), 0); ++ ++ return max(msi, msix); ++} ++ ++static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) ++{ ++ struct its_pci_alias *dev_alias = data; ++ ++ dev_alias->dev_id = alias; ++ if (pdev != dev_alias->pdev) ++ dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); ++ ++ return 0; ++} ++ ++int __its_msi_prepare(struct irq_domain *domain, u32 dev_id, ++ struct device *dev, int nvec, msi_alloc_info_t *info) ++{ ++ struct its_node *its; ++ struct its_device *its_dev; ++ ++ its = domain->parent->host_data; ++ ++ its_dev = its_find_device(its, dev_id); ++ if (its_dev) { ++ /* ++ * We already have seen this ID, probably through ++ * another alias (PCI bridge of some sort). No need to ++ * create the device. ++ */ ++ dev_dbg(dev, "Reusing ITT for devID %x\n", dev_id); ++ goto out; ++ } ++ ++ its_dev = its_create_device(its, dev_id, nvec); ++ if (!its_dev) ++ return -ENOMEM; ++ ++ dev_dbg(dev, "ITT %d entries, %d bits\n", ++ nvec, ilog2(nvec)); ++out: ++ info->scratchpad[0].ptr = its_dev; ++ info->scratchpad[1].ptr = dev; ++ ++ return 0; ++} ++ ++static int its_msi_prepare(struct irq_domain *domain, struct device *dev, ++ int nvec, msi_alloc_info_t *info) ++{ ++ struct pci_dev *pdev; ++ struct its_pci_alias dev_alias; ++ u32 dev_id; ++ ++ if (!dev_is_pci(dev)) ++ return -EINVAL; ++ ++ pdev = to_pci_dev(dev); ++ dev_alias.pdev = pdev; ++ dev_alias.count = nvec; ++ ++ pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); ++ ++ dev_dbg(dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); ++ dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); ++ return __its_msi_prepare(domain->parent, dev_alias.dev_id, dev, dev_alias.count, info); ++} ++ ++static struct msi_domain_ops its_pci_msi_ops = { ++ .msi_prepare = its_msi_prepare, ++}; ++ ++static struct msi_domain_info its_pci_msi_domain_info = { ++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), ++ .ops = &its_pci_msi_ops, ++ .chip = &its_msi_irq_chip, ++}; ++ ++static int its_irq_gic_domain_alloc(struct irq_domain *domain, ++ unsigned int virq, ++ irq_hw_number_t hwirq) ++{ ++ struct of_phandle_args args; ++ ++ args.np = domain->parent->of_node; ++ args.args_count = 3; ++ args.args[0] = GIC_IRQ_TYPE_LPI; ++ args.args[1] = hwirq; ++ args.args[2] = IRQ_TYPE_EDGE_RISING; ++ ++ return irq_domain_alloc_irqs_parent(domain, virq, 1, &args); ++} ++ ++static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *args) ++{ ++ msi_alloc_info_t *info = args; ++ struct its_device *its_dev = info->scratchpad[0].ptr; ++ irq_hw_number_t hwirq; ++ int err; ++ int i; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ err = its_alloc_device_irq(its_dev, &hwirq); ++ if (err) ++ return err; ++ ++ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); ++ if (err) ++ return err; ++ ++ irq_domain_set_hwirq_and_chip(domain, virq + i, ++ hwirq, &its_irq_chip, its_dev); ++ dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n", ++ (int)(hwirq - its_dev->event_map.lpi_base), ++ (int)hwirq, virq + i); ++ } ++ ++ return 0; ++} ++ ++static void its_irq_domain_activate(struct irq_domain *domain, ++ struct irq_data *d) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ u32 event = its_get_event_id(d); ++ ++ /* Bind the LPI to the first possible CPU */ ++ its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); ++ ++ /* Map the GIC IRQ and event to the device */ ++ its_send_mapvi(its_dev, d->hwirq, event); ++} ++ ++static void its_irq_domain_deactivate(struct irq_domain *domain, ++ struct irq_data *d) ++{ ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ u32 event = its_get_event_id(d); ++ ++ /* Stop the delivery of interrupts */ ++ its_send_discard(its_dev, event); ++} ++ ++static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ struct irq_data *d = irq_domain_get_irq_data(domain, virq); ++ struct its_device *its_dev = irq_data_get_irq_chip_data(d); ++ int i; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ struct irq_data *data = irq_domain_get_irq_data(domain, ++ virq + i); ++ u32 event = its_get_event_id(data); ++ ++ /* Mark interrupt index as unused */ ++ clear_bit(event, its_dev->event_map.lpi_map); ++ ++ /* Nuke the entry in the domain */ ++ irq_domain_reset_irq_data(data); ++ } ++ ++ /* If all interrupts have been freed, start mopping the floor */ ++ if (bitmap_empty(its_dev->event_map.lpi_map, ++ its_dev->event_map.nr_lpis)) { ++ its_lpi_free(&its_dev->event_map); ++ ++ /* Unmap device/itt */ ++ its_send_mapd(its_dev, 0); ++ its_free_device(its_dev); ++ } ++ ++ irq_domain_free_irqs_parent(domain, virq, nr_irqs); ++} ++ ++static const struct irq_domain_ops its_domain_ops = { ++ .alloc = its_irq_domain_alloc, ++ .free = its_irq_domain_free, ++ .activate = its_irq_domain_activate, ++ .deactivate = its_irq_domain_deactivate, ++}; ++ ++static int its_force_quiescent(void __iomem *base) ++{ ++ u32 count = 1000000; /* 1s */ ++ u32 val; ++ ++ val = readl_relaxed(base + GITS_CTLR); ++ if (val & GITS_CTLR_QUIESCENT) ++ return 0; ++ ++ /* Disable the generation of all interrupts to this ITS */ ++ val &= ~GITS_CTLR_ENABLE; ++ writel_relaxed(val, base + GITS_CTLR); ++ ++ /* Poll GITS_CTLR and wait until ITS becomes quiescent */ ++ while (1) { ++ val = readl_relaxed(base + GITS_CTLR); ++ if (val & GITS_CTLR_QUIESCENT) ++ return 0; ++ ++ count--; ++ if (!count) ++ return -EBUSY; ++ ++ cpu_relax(); ++ udelay(1); ++ } ++} ++ ++static int its_probe(struct device_node *node, struct irq_domain *parent) ++{ ++ struct resource res; ++ struct its_node *its; ++ void __iomem *its_base; ++ u32 val; ++ u64 baser, tmp; ++ int err; ++ ++ err = of_address_to_resource(node, 0, &res); ++ if (err) { ++ pr_warn("%s: no regs?\n", node->full_name); ++ return -ENXIO; ++ } ++ ++ its_base = ioremap(res.start, resource_size(&res)); ++ if (!its_base) { ++ pr_warn("%s: unable to map registers\n", node->full_name); ++ return -ENOMEM; ++ } ++ ++ val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; ++ if (val != 0x30 && val != 0x40) { ++ pr_warn("%s: no ITS detected, giving up\n", node->full_name); ++ err = -ENODEV; ++ goto out_unmap; ++ } ++ ++ err = its_force_quiescent(its_base); ++ if (err) { ++ pr_warn("%s: failed to quiesce, giving up\n", ++ node->full_name); ++ goto out_unmap; ++ } ++ ++ pr_info("ITS: %s\n", node->full_name); ++ ++ its = kzalloc(sizeof(*its), GFP_KERNEL); ++ if (!its) { ++ err = -ENOMEM; ++ goto out_unmap; ++ } ++ ++ raw_spin_lock_init(&its->lock); ++ INIT_LIST_HEAD(&its->entry); ++ INIT_LIST_HEAD(&its->its_device_list); ++ its->base = its_base; ++ its->phys_base = res.start; ++ its->msi_chip.of_node = node; ++ its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; ++ ++ its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); ++ if (!its->cmd_base) { ++ err = -ENOMEM; ++ goto out_free_its; ++ } ++ its->cmd_write = its->cmd_base; ++ ++ err = its_alloc_tables(its); ++ if (err) ++ goto out_free_cmd; ++ ++ err = its_alloc_collections(its); ++ if (err) ++ goto out_free_tables; ++ ++ baser = (virt_to_phys(its->cmd_base) | ++ GITS_CBASER_WaWb | ++ GITS_CBASER_InnerShareable | ++ (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | ++ GITS_CBASER_VALID); ++ ++ writeq_relaxed(baser, its->base + GITS_CBASER); ++ tmp = readq_relaxed(its->base + GITS_CBASER); ++ ++ if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { ++ if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { ++ /* ++ * The HW reports non-shareable, we must ++ * remove the cacheability attributes as ++ * well. ++ */ ++ baser &= ~(GITS_CBASER_SHAREABILITY_MASK | ++ GITS_CBASER_CACHEABILITY_MASK); ++ baser |= GITS_CBASER_nC; ++ writeq_relaxed(baser, its->base + GITS_CBASER); ++ } ++ pr_info("ITS: using cache flushing for cmd queue\n"); ++ its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; ++ } ++ ++ writeq_relaxed(0, its->base + GITS_CWRITER); ++ writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); ++ ++ if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { ++ its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its); ++ if (!its->domain) { ++ err = -ENOMEM; ++ goto out_free_tables; ++ } ++ ++ its->domain->parent = parent; ++ ++ its->msi_chip.domain = pci_msi_create_irq_domain(node, ++ &its_pci_msi_domain_info, ++ its->domain); ++ if (!its->msi_chip.domain) { ++ err = -ENOMEM; ++ goto out_free_domains; ++ } ++ ++ err = of_pci_msi_chip_add(&its->msi_chip); ++ if (err) ++ goto out_free_domains; ++ } ++ ++ spin_lock(&its_lock); ++ list_add(&its->entry, &its_nodes); ++ spin_unlock(&its_lock); ++ ++ return 0; ++ ++out_free_domains: ++ if (its->msi_chip.domain) ++ irq_domain_remove(its->msi_chip.domain); ++ if (its->domain) ++ irq_domain_remove(its->domain); ++out_free_tables: ++ its_free_tables(its); ++out_free_cmd: ++ kfree(its->cmd_base); ++out_free_its: ++ kfree(its); ++out_unmap: ++ iounmap(its_base); ++ pr_err("ITS: failed probing %s (%d)\n", node->full_name, err); ++ return err; ++} ++ ++static bool gic_rdists_supports_plpis(void) ++{ ++ return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); ++} ++ ++int its_cpu_init(void) ++{ ++ if (!list_empty(&its_nodes)) { ++ if (!gic_rdists_supports_plpis()) { ++ pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); ++ return -ENXIO; ++ } ++ its_cpu_init_lpis(); ++ its_cpu_init_collection(); ++ } ++ ++ return 0; ++} ++ ++static struct of_device_id its_device_id[] = { ++ { .compatible = "arm,gic-v3-its", }, ++ {}, ++}; ++ ++int its_init(struct device_node *node, struct rdists *rdists, ++ struct irq_domain *parent_domain) ++{ ++ struct device_node *np; ++ ++ for (np = of_find_matching_node(node, its_device_id); np; ++ np = of_find_matching_node(np, its_device_id)) { ++ its_probe(np, parent_domain); ++ } ++ ++ if (list_empty(&its_nodes)) { ++ pr_warn("ITS: No ITS available, not enabling LPIs\n"); ++ return -ENXIO; ++ } ++ ++ gic_rdists = rdists; ++ gic_root_node = node; ++ ++ its_alloc_lpi_tables(); ++ its_lpi_init(rdists->id_bits); ++ ++ return 0; ++} +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c +index aa17ae8..34feda3 100644 +--- a/drivers/irqchip/irq-gic-v3.c ++++ b/drivers/irqchip/irq-gic-v3.c +@@ -34,20 +34,25 @@ + #include "irq-gic-common.h" + #include "irqchip.h" + ++struct redist_region { ++ void __iomem *redist_base; ++ phys_addr_t phys_base; ++}; ++ + struct gic_chip_data { + void __iomem *dist_base; +- void __iomem **redist_base; +- void __iomem * __percpu *rdist; ++ struct redist_region *redist_regions; ++ struct rdists rdists; + struct irq_domain *domain; + u64 redist_stride; +- u32 redist_regions; ++ u32 nr_redist_regions; + unsigned int irq_nr; + }; + + static struct gic_chip_data gic_data __read_mostly; + +-#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist)) +-#define gic_data_rdist_rd_base() (*gic_data_rdist()) ++#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) ++#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) + #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) + + /* Our default, arbitrary priority value. Linux only uses one anyway. */ +@@ -71,9 +76,6 @@ static inline void __iomem *gic_dist_base(struct irq_data *d) + if (d->hwirq <= 1023) /* SPI -> dist_base */ + return gic_data.dist_base; + +- if (d->hwirq >= 8192) +- BUG(); /* LPI Detected!!! */ +- + return NULL; + } + +@@ -271,11 +273,11 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs + do { + irqnr = gic_read_iar(); + +- if (likely(irqnr > 15 && irqnr < 1020)) { ++ if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { + int err; + err = handle_domain_irq(gic_data.domain, irqnr, regs); + if (err) { +- WARN_ONCE(true, "Unexpected SPI received!\n"); ++ WARN_ONCE(true, "Unexpected interrupt received!\n"); + gic_write_eoir(irqnr); + } + continue; +@@ -333,8 +335,8 @@ static int gic_populate_rdist(void) + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + +- for (i = 0; i < gic_data.redist_regions; i++) { +- void __iomem *ptr = gic_data.redist_base[i]; ++ for (i = 0; i < gic_data.nr_redist_regions; i++) { ++ void __iomem *ptr = gic_data.redist_regions[i].redist_base; + u32 reg; + + reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; +@@ -347,10 +349,13 @@ static int gic_populate_rdist(void) + do { + typer = readq_relaxed(ptr + GICR_TYPER); + if ((typer >> 32) == aff) { ++ u64 offset = ptr - gic_data.redist_regions[i].redist_base; + gic_data_rdist_rd_base() = ptr; +- pr_info("CPU%d: found redistributor %llx @%p\n", ++ gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset; ++ pr_info("CPU%d: found redistributor %llx region %d:%pa\n", + smp_processor_id(), +- (unsigned long long)mpidr, ptr); ++ (unsigned long long)mpidr, ++ i, &gic_data_rdist()->phys_base); + return 0; + } + +@@ -385,6 +390,11 @@ static void gic_cpu_sys_reg_init(void) + gic_write_grpen1(1); + } + ++static int gic_dist_supports_lpis(void) ++{ ++ return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS); ++} ++ + static void gic_cpu_init(void) + { + void __iomem *rbase; +@@ -399,6 +409,10 @@ static void gic_cpu_init(void) + + gic_cpu_config(rbase, gic_redist_wait_for_rwp); + ++ /* Give LPIs a spin */ ++ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) ++ its_cpu_init(); ++ + /* initialise system registers */ + gic_cpu_sys_reg_init(); + } +@@ -585,12 +599,21 @@ static struct irq_chip gic_chip = { + .irq_set_affinity = gic_set_affinity, + }; + ++#define GIC_ID_NR (1U << gic_data.rdists.id_bits) ++ + static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) + { + /* SGIs are private to the core kernel */ + if (hw < 16) + return -EPERM; ++ /* Nothing here */ ++ if (hw >= gic_data.irq_nr && hw < 8192) ++ return -EPERM; ++ /* Off limits */ ++ if (hw >= GIC_ID_NR) ++ return -EPERM; ++ + /* PPIs */ + if (hw < 32) { + irq_set_percpu_devid(irq); +@@ -604,7 +627,15 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + handle_fasteoi_irq); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } +- irq_set_chip_data(irq, d->host_data); ++ /* LPIs */ ++ if (hw >= 8192 && hw < GIC_ID_NR) { ++ if (!gic_dist_supports_lpis()) ++ return -EPERM; ++ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, ++ handle_fasteoi_irq, NULL, NULL); ++ set_irq_flags(irq, IRQF_VALID); ++ } ++ + return 0; + } + +@@ -625,6 +656,9 @@ static int gic_irq_domain_xlate(struct irq_domain *d, + case 1: /* PPI */ + *out_hwirq = intspec[1] + 16; + break; ++ case GIC_IRQ_TYPE_LPI: /* LPI */ ++ *out_hwirq = intspec[1]; ++ break; + default: + return -EINVAL; + } +@@ -641,9 +675,10 @@ static const struct irq_domain_ops gic_irq_domain_ops = { + static int __init gic_of_init(struct device_node *node, struct device_node *parent) + { + void __iomem *dist_base; +- void __iomem **redist_base; ++ struct redist_region *rdist_regs; + u64 redist_stride; +- u32 redist_regions; ++ u32 nr_redist_regions; ++ u32 typer; + u32 reg; + int gic_irqs; + int err; +@@ -664,54 +699,63 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare + goto out_unmap_dist; + } + +- if (of_property_read_u32(node, "#redistributor-regions", &redist_regions)) +- redist_regions = 1; ++ if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) ++ nr_redist_regions = 1; + +- redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL); +- if (!redist_base) { ++ rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL); ++ if (!rdist_regs) { + err = -ENOMEM; + goto out_unmap_dist; + } + +- for (i = 0; i < redist_regions; i++) { +- redist_base[i] = of_iomap(node, 1 + i); +- if (!redist_base[i]) { ++ for (i = 0; i < nr_redist_regions; i++) { ++ struct resource res; ++ int ret; ++ ++ ret = of_address_to_resource(node, 1 + i, &res); ++ rdist_regs[i].redist_base = of_iomap(node, 1 + i); ++ if (ret || !rdist_regs[i].redist_base) { + pr_err("%s: couldn't map region %d\n", + node->full_name, i); + err = -ENODEV; + goto out_unmap_rdist; + } ++ rdist_regs[i].phys_base = res.start; + } + + if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) + redist_stride = 0; + + gic_data.dist_base = dist_base; +- gic_data.redist_base = redist_base; +- gic_data.redist_regions = redist_regions; ++ gic_data.redist_regions = rdist_regs; ++ gic_data.nr_redist_regions = nr_redist_regions; + gic_data.redist_stride = redist_stride; + + /* + * Find out how many interrupts are supported. + * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) + */ +- gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f; +- gic_irqs = (gic_irqs + 1) * 32; ++ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); ++ gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer); ++ gic_irqs = GICD_TYPER_IRQS(typer); + if (gic_irqs > 1020) + gic_irqs = 1020; + gic_data.irq_nr = gic_irqs; + + gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops, + &gic_data); +- gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist)); ++ gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + +- if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) { ++ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { + err = -ENOMEM; + goto out_free; + } + + set_handle_irq(gic_handle_irq); + ++ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) ++ its_init(node, &gic_data.rdists, gic_data.domain); ++ + gic_smp_init(); + gic_dist_init(); + gic_cpu_init(); +@@ -722,12 +766,12 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare + out_free: + if (gic_data.domain) + irq_domain_remove(gic_data.domain); +- free_percpu(gic_data.rdist); ++ free_percpu(gic_data.rdists.rdist); + out_unmap_rdist: +- for (i = 0; i < redist_regions; i++) +- if (redist_base[i]) +- iounmap(redist_base[i]); +- kfree(redist_base); ++ for (i = 0; i < nr_redist_regions; i++) ++ if (rdist_regs[i].redist_base) ++ iounmap(rdist_regs[i].redist_base); ++ kfree(rdist_regs); + out_unmap_dist: + iounmap(dist_base); + return err; +diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c +index eb9b59e..6b2b582 100644 +--- a/drivers/irqchip/irq-sunxi-nmi.c ++++ b/drivers/irqchip/irq-sunxi-nmi.c +@@ -50,12 +50,12 @@ static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = { + static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, + u32 val) + { +- irq_reg_writel(val, gc->reg_base + off); ++ irq_reg_writel(gc, val, off); + } + + static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off) + { +- return irq_reg_readl(gc->reg_base + off); ++ return irq_reg_readl(gc, off); + } + + static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc) +diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c +index 7c44c99..accc200 100644 +--- a/drivers/irqchip/irq-tb10x.c ++++ b/drivers/irqchip/irq-tb10x.c +@@ -43,12 +43,12 @@ + static inline void ab_irqctl_writereg(struct irq_chip_generic *gc, u32 reg, + u32 val) + { +- irq_reg_writel(val, gc->reg_base + reg); ++ irq_reg_writel(gc, val, reg); + } + + static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg) + { +- return irq_reg_readl(gc->reg_base + reg); ++ return irq_reg_readl(gc, reg); + } + + static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) +diff --git a/drivers/of/device.c b/drivers/of/device.c +index 46d6c75..20c1332 100644 +--- a/drivers/of/device.c ++++ b/drivers/of/device.c +@@ -2,6 +2,9 @@ + #include + #include + #include ++#include ++#include ++#include + #include + #include + #include +@@ -66,6 +69,87 @@ int of_device_add(struct platform_device *ofdev) + return device_add(&ofdev->dev); + } + ++/** ++ * of_dma_configure - Setup DMA configuration ++ * @dev: Device to apply DMA configuration ++ * @np: Pointer to OF node having DMA configuration ++ * ++ * Try to get devices's DMA configuration from DT and update it ++ * accordingly. ++ * ++ * If platform code needs to use its own special DMA configuration, it ++ * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events ++ * to fix up DMA configuration. ++ */ ++void of_dma_configure(struct device *dev, struct device_node *np) ++{ ++ u64 dma_addr, paddr, size; ++ int ret; ++ bool coherent; ++ unsigned long offset; ++ struct iommu_ops *iommu; ++ ++ /* ++ * Set default coherent_dma_mask to 32 bit. Drivers are expected to ++ * setup the correct supported mask. ++ */ ++ if (!dev->coherent_dma_mask) ++ dev->coherent_dma_mask = DMA_BIT_MASK(32); ++ ++ /* ++ * Set it to coherent_dma_mask by default if the architecture ++ * code has not set it. ++ */ ++ if (!dev->dma_mask) ++ dev->dma_mask = &dev->coherent_dma_mask; ++ ++ ret = of_dma_get_range(np, &dma_addr, &paddr, &size); ++ if (ret < 0) { ++ dma_addr = offset = 0; ++ size = dev->coherent_dma_mask + 1; ++ } else { ++ offset = PFN_DOWN(paddr - dma_addr); ++ ++ /* ++ * Add a work around to treat the size as mask + 1 in case ++ * it is defined in DT as a mask. ++ */ ++ if (size & 1) { ++ dev_warn(dev, "Invalid size 0x%llx for dma-range\n", ++ size); ++ size = size + 1; ++ } ++ ++ if (!size) { ++ dev_err(dev, "Adjusted size 0x%llx invalid\n", size); ++ return; ++ } ++ dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); ++ } ++ ++ dev->dma_pfn_offset = offset; ++ ++ /* ++ * Limit coherent and dma mask based on size and default mask ++ * set by the driver. ++ */ ++ dev->coherent_dma_mask = min(dev->coherent_dma_mask, ++ DMA_BIT_MASK(ilog2(dma_addr + size))); ++ *dev->dma_mask = min((*dev->dma_mask), ++ DMA_BIT_MASK(ilog2(dma_addr + size))); ++ ++ coherent = of_dma_is_coherent(np); ++ dev_dbg(dev, "device is%sdma coherent\n", ++ coherent ? " " : " not "); ++ ++ iommu = of_iommu_configure(dev, np); ++ dev_dbg(dev, "device is%sbehind an iommu\n", ++ iommu ? " " : " not "); ++ ++ arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent); ++} ++EXPORT_SYMBOL_GPL(of_dma_configure); ++ + int of_device_register(struct platform_device *pdev) + { + device_initialize(&pdev->dev); +diff --git a/drivers/of/irq.c b/drivers/of/irq.c +index b97363a..4419e62 100644 +--- a/drivers/of/irq.c ++++ b/drivers/of/irq.c +@@ -18,6 +18,7 @@ + * driver. + */ + ++#include + #include + #include + #include +@@ -576,3 +577,23 @@ err: + kfree(desc); + } + } ++ ++/** ++ * of_msi_configure - Set the msi_domain field of a device ++ * @dev: device structure to associate with an MSI irq domain ++ * @np: device node for that device ++ */ ++void of_msi_configure(struct device *dev, struct device_node *np) ++{ ++ struct device_node *msi_np; ++ struct irq_domain *d; ++ ++ msi_np = of_parse_phandle(np, "msi-parent", 0); ++ if (!msi_np) ++ return; ++ ++ d = irq_find_matching_host(msi_np, DOMAIN_BUS_PLATFORM_MSI); ++ if (!d) ++ d = irq_find_host(msi_np); ++ dev_set_msi_domain(dev, d); ++} +diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c +index ecc5fa5..5751dc5 100644 +--- a/drivers/of/of_pci.c ++++ b/drivers/of/of_pci.c +@@ -2,6 +2,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -116,6 +117,26 @@ int of_get_pci_domain_nr(struct device_node *node) + } + EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); + ++/** ++ * of_pci_dma_configure - Setup DMA configuration ++ * @dev: ptr to pci_dev struct of the PCI device ++ * ++ * Function to update PCI devices's DMA configuration using the same ++ * info from the OF node of host bridge's parent (if any). ++ */ ++void of_pci_dma_configure(struct pci_dev *pci_dev) ++{ ++ struct device *dev = &pci_dev->dev; ++ struct device *bridge = pci_get_host_bridge_device(pci_dev); ++ ++ if (!bridge->parent) ++ return; ++ ++ of_dma_configure(dev, bridge->parent->of_node); ++ pci_put_host_bridge_device(bridge); ++} ++EXPORT_SYMBOL_GPL(of_pci_dma_configure); ++ + #if defined(CONFIG_OF_ADDRESS) + /** + * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT +@@ -140,7 +161,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, + unsigned char busno, unsigned char bus_max, + struct list_head *resources, resource_size_t *io_base) + { +- struct pci_host_bridge_window *window; ++ struct resource_entry *window; + struct resource *res; + struct resource *bus_range; + struct of_pci_range range; +@@ -226,10 +247,9 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, + conversion_failed: + kfree(res); + parse_failed: +- list_for_each_entry(window, resources, list) ++ resource_list_for_each_entry(window, resources) + kfree(window->res); + pci_free_resource_list(resources); +- kfree(bus_range); + return err; + } + EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); +@@ -240,7 +260,7 @@ EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); + static LIST_HEAD(of_pci_msi_chip_list); + static DEFINE_MUTEX(of_pci_msi_chip_mutex); + +-int of_pci_msi_chip_add(struct msi_chip *chip) ++int of_pci_msi_chip_add(struct msi_controller *chip) + { + if (!of_property_read_bool(chip->of_node, "msi-controller")) + return -EINVAL; +@@ -253,7 +273,7 @@ int of_pci_msi_chip_add(struct msi_chip *chip) + } + EXPORT_SYMBOL_GPL(of_pci_msi_chip_add); + +-void of_pci_msi_chip_remove(struct msi_chip *chip) ++void of_pci_msi_chip_remove(struct msi_controller *chip) + { + mutex_lock(&of_pci_msi_chip_mutex); + list_del(&chip->list); +@@ -261,9 +281,9 @@ void of_pci_msi_chip_remove(struct msi_chip *chip) + } + EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove); + +-struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node) ++struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node) + { +- struct msi_chip *c; ++ struct msi_controller *c; + + mutex_lock(&of_pci_msi_chip_mutex); + list_for_each_entry(c, &of_pci_msi_chip_list, list) { +diff --git a/drivers/of/platform.c b/drivers/of/platform.c +index 3b64d0b..8a002d6 100644 +--- a/drivers/of/platform.c ++++ b/drivers/of/platform.c +@@ -25,6 +25,7 @@ + + const struct of_device_id of_default_bus_match_table[] = { + { .compatible = "simple-bus", }, ++ { .compatible = "simple-mfd", }, + #ifdef CONFIG_ARM_AMBA + { .compatible = "arm,amba-bus", }, + #endif /* CONFIG_ARM_AMBA */ +@@ -138,7 +139,7 @@ struct platform_device *of_device_alloc(struct device_node *np, + } + + dev->dev.of_node = of_node_get(np); +- dev->dev.parent = parent; ++ dev->dev.parent = parent ? : &platform_bus; + + if (bus_id) + dev_set_name(&dev->dev, "%s", bus_id); +@@ -149,57 +150,9 @@ struct platform_device *of_device_alloc(struct device_node *np, + } + EXPORT_SYMBOL(of_device_alloc); + +-/** +- * of_dma_configure - Setup DMA configuration +- * @dev: Device to apply DMA configuration +- * +- * Try to get devices's DMA configuration from DT and update it +- * accordingly. +- * +- * In case if platform code need to use own special DMA configuration,it +- * can use Platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE event +- * to fix up DMA configuration. +- */ +-static void of_dma_configure(struct device *dev) ++static void of_dma_deconfigure(struct device *dev) + { +- u64 dma_addr, paddr, size; +- int ret; +- +- /* +- * Set default dma-mask to 32 bit. Drivers are expected to setup +- * the correct supported dma_mask. +- */ +- dev->coherent_dma_mask = DMA_BIT_MASK(32); +- +- /* +- * Set it to coherent_dma_mask by default if the architecture +- * code has not set it. +- */ +- if (!dev->dma_mask) +- dev->dma_mask = &dev->coherent_dma_mask; +- +- /* +- * if dma-coherent property exist, call arch hook to setup +- * dma coherent operations. +- */ +- if (of_dma_is_coherent(dev->of_node)) { +- set_arch_dma_coherent_ops(dev); +- dev_dbg(dev, "device is dma coherent\n"); +- } +- +- /* +- * if dma-ranges property doesn't exist - just return else +- * setup the dma offset +- */ +- ret = of_dma_get_range(dev->of_node, &dma_addr, &paddr, &size); +- if (ret < 0) { +- dev_dbg(dev, "no dma range information to setup\n"); +- return; +- } +- +- /* DMA ranges found. Calculate and set dma_pfn_offset */ +- dev->dma_pfn_offset = PFN_DOWN(paddr - dma_addr); +- dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset); ++ arch_teardown_dma_ops(dev); + } + + /** +@@ -228,16 +181,13 @@ static struct platform_device *of_platform_device_create_pdata( + if (!dev) + goto err_clear_flag; + +- of_dma_configure(&dev->dev); + dev->dev.bus = &platform_bus_type; + dev->dev.platform_data = platform_data; +- +- /* We do not fill the DMA ops for platform devices by default. +- * This is currently the responsibility of the platform code +- * to do such, possibly using a device notifier +- */ ++ of_dma_configure(&dev->dev, dev->dev.of_node); ++ of_msi_configure(&dev->dev, dev->dev.of_node); + + if (of_device_add(dev) != 0) { ++ of_dma_deconfigure(&dev->dev); + platform_device_put(dev); + goto err_clear_flag; + } +@@ -291,13 +241,13 @@ static struct amba_device *of_amba_device_create(struct device_node *node, + + /* setup generic device info */ + dev->dev.of_node = of_node_get(node); +- dev->dev.parent = parent; ++ dev->dev.parent = parent ? : &platform_bus; + dev->dev.platform_data = platform_data; + if (bus_id) + dev_set_name(&dev->dev, "%s", bus_id); + else + of_device_make_bus_id(&dev->dev); +- of_dma_configure(&dev->dev); ++ of_dma_configure(&dev->dev, dev->dev.of_node); + + /* Allow the HW Peripheral ID to be overridden */ + prop = of_get_property(node, "arm,primecell-periphid", NULL); +@@ -500,6 +450,7 @@ int of_platform_populate(struct device_node *root, + if (rc) + break; + } ++ of_node_set_flag(root, OF_POPULATED_BUS); + + of_node_put(root); + return rc; +@@ -523,6 +474,7 @@ static int of_platform_device_destroy(struct device *dev, void *data) + amba_device_unregister(to_amba_device(dev)); + #endif + ++ of_dma_deconfigure(dev); + of_node_clear_flag(dev->of_node, OF_POPULATED); + of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); + return 0; +@@ -542,8 +494,75 @@ static int of_platform_device_destroy(struct device *dev, void *data) + */ + void of_platform_depopulate(struct device *parent) + { +- device_for_each_child(parent, NULL, of_platform_device_destroy); ++ if (parent->of_node && of_node_check_flag(parent->of_node, OF_POPULATED_BUS)) { ++ device_for_each_child(parent, NULL, of_platform_device_destroy); ++ of_node_clear_flag(parent->of_node, OF_POPULATED_BUS); ++ } + } + EXPORT_SYMBOL_GPL(of_platform_depopulate); + ++#ifdef CONFIG_OF_DYNAMIC ++static int of_platform_notify(struct notifier_block *nb, ++ unsigned long action, void *arg) ++{ ++ struct of_reconfig_data *rd = arg; ++ struct platform_device *pdev_parent, *pdev; ++ bool children_left; ++ ++ switch (of_reconfig_get_state_change(action, rd)) { ++ case OF_RECONFIG_CHANGE_ADD: ++ /* verify that the parent is a bus */ ++ if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS)) ++ return NOTIFY_OK; /* not for us */ ++ ++ /* already populated? (driver using of_populate manually) */ ++ if (of_node_check_flag(rd->dn, OF_POPULATED)) ++ return NOTIFY_OK; ++ ++ /* pdev_parent may be NULL when no bus platform device */ ++ pdev_parent = of_find_device_by_node(rd->dn->parent); ++ pdev = of_platform_device_create(rd->dn, NULL, ++ pdev_parent ? &pdev_parent->dev : NULL); ++ of_dev_put(pdev_parent); ++ ++ if (pdev == NULL) { ++ pr_err("%s: failed to create for '%s'\n", ++ __func__, rd->dn->full_name); ++ /* of_platform_device_create tosses the error code */ ++ return notifier_from_errno(-EINVAL); ++ } ++ break; ++ ++ case OF_RECONFIG_CHANGE_REMOVE: ++ ++ /* already depopulated? */ ++ if (!of_node_check_flag(rd->dn, OF_POPULATED)) ++ return NOTIFY_OK; ++ ++ /* find our device by node */ ++ pdev = of_find_device_by_node(rd->dn); ++ if (pdev == NULL) ++ return NOTIFY_OK; /* no? not meant for us */ ++ ++ /* unregister takes one ref away */ ++ of_platform_device_destroy(&pdev->dev, &children_left); ++ ++ /* and put the reference of the find */ ++ of_dev_put(pdev); ++ break; ++ } ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block platform_of_notifier = { ++ .notifier_call = of_platform_notify, ++}; ++ ++void of_platform_register_reconfig_notifier(void) ++{ ++ WARN_ON(of_reconfig_notifier_register(&platform_of_notifier)); ++} ++#endif /* CONFIG_OF_DYNAMIC */ ++ + #endif /* CONFIG_OF_ADDRESS */ +diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig +index 893503f..cced842 100644 +--- a/drivers/pci/Kconfig ++++ b/drivers/pci/Kconfig +@@ -4,6 +4,7 @@ + config PCI_MSI + bool "Message Signaled Interrupts (MSI and MSI-X)" + depends on PCI ++ select GENERIC_MSI_IRQ + help + This allows device drivers to enable MSI (Message Signaled + Interrupts). Message Signaled Interrupts enable a device to +@@ -16,6 +17,11 @@ config PCI_MSI + + If you don't know what to do here, say Y. + ++config PCI_MSI_IRQ_DOMAIN ++ bool ++ depends on PCI_MSI ++ select GENERIC_MSI_IRQ_DOMAIN ++ + config PCI_DEBUG + bool "PCI Debugging" + depends on PCI && DEBUG_KERNEL +diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c +index 8fb1618..90fa3a7 100644 +--- a/drivers/pci/bus.c ++++ b/drivers/pci/bus.c +@@ -20,17 +20,16 @@ + void pci_add_resource_offset(struct list_head *resources, struct resource *res, + resource_size_t offset) + { +- struct pci_host_bridge_window *window; ++ struct resource_entry *entry; + +- window = kzalloc(sizeof(struct pci_host_bridge_window), GFP_KERNEL); +- if (!window) { ++ entry = resource_list_create_entry(res, 0); ++ if (!entry) { + printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res); + return; + } + +- window->res = res; +- window->offset = offset; +- list_add_tail(&window->list, resources); ++ entry->offset = offset; ++ resource_list_add_tail(entry, resources); + } + EXPORT_SYMBOL(pci_add_resource_offset); + +@@ -42,12 +41,7 @@ EXPORT_SYMBOL(pci_add_resource); + + void pci_free_resource_list(struct list_head *resources) + { +- struct pci_host_bridge_window *window, *tmp; +- +- list_for_each_entry_safe(window, tmp, resources, list) { +- list_del(&window->list); +- kfree(window); +- } ++ resource_list_free(resources); + } + EXPORT_SYMBOL(pci_free_resource_list); + +diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c +index 0e5f3c9..3e5bbf9 100644 +--- a/drivers/pci/host-bridge.c ++++ b/drivers/pci/host-bridge.c +@@ -23,6 +23,20 @@ static struct pci_host_bridge *find_pci_host_bridge(struct pci_bus *bus) + return to_pci_host_bridge(root_bus->bridge); + } + ++struct device *pci_get_host_bridge_device(struct pci_dev *dev) ++{ ++ struct pci_bus *root_bus = find_pci_root_bus(dev->bus); ++ struct device *bridge = root_bus->bridge; ++ ++ kobject_get(&bridge->kobj); ++ return bridge; ++} ++ ++void pci_put_host_bridge_device(struct device *dev) ++{ ++ kobject_put(&dev->kobj); ++} ++ + void pci_set_host_bridge_release(struct pci_host_bridge *bridge, + void (*release_fn)(struct pci_host_bridge *), + void *release_data) +@@ -35,10 +49,10 @@ void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, + struct resource *res) + { + struct pci_host_bridge *bridge = find_pci_host_bridge(bus); +- struct pci_host_bridge_window *window; ++ struct resource_entry *window; + resource_size_t offset = 0; + +- list_for_each_entry(window, &bridge->windows, list) { ++ resource_list_for_each_entry(window, &bridge->windows) { + if (resource_contains(window->res, res)) { + offset = window->offset; + break; +@@ -60,10 +74,10 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res, + struct pci_bus_region *region) + { + struct pci_host_bridge *bridge = find_pci_host_bridge(bus); +- struct pci_host_bridge_window *window; ++ struct resource_entry *window; + resource_size_t offset = 0; + +- list_for_each_entry(window, &bridge->windows, list) { ++ resource_list_for_each_entry(window, &bridge->windows) { + struct pci_bus_region bus_region; + + if (resource_type(res) != resource_type(window->res)) +diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig +index 3dc25fa..96586b1 100644 +--- a/drivers/pci/host/Kconfig ++++ b/drivers/pci/host/Kconfig +@@ -86,9 +86,26 @@ config PCI_XGENE + depends on ARCH_XGENE + depends on OF + select PCIEPORTBUS ++ select PCI_MSI_IRQ_DOMAIN if PCI_MSI + help + Say Y here if you want internal PCI support on APM X-Gene SoC. + There are 5 internal PCIe ports available. Each port is GEN3 capable + and have varied lanes from x1 to x8. + ++config PCI_XGENE_MSI ++ bool "X-Gene v1 PCIe MSI feature" ++ depends on PCI_XGENE && PCI_MSI ++ default y ++ help ++ Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. ++ This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC. ++ ++config PCI_LAYERSCAPE ++ bool "Freescale Layerscape PCIe controller" ++ depends on OF && (ARM || ARCH_LAYERSCAPE) ++ select PCIE_DW ++ select MFD_SYSCON ++ help ++ Say Y here if you want PCIe controller support on Layerscape SoCs. ++ + endmenu +diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile +index 26b3461..029685e 100644 +--- a/drivers/pci/host/Makefile ++++ b/drivers/pci/host/Makefile +@@ -1,3 +1,4 @@ ++obj-$(CONFIG_PCIE_DW_BASE) += pcie-designware-base.o + obj-$(CONFIG_PCIE_DW) += pcie-designware.o + obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o + obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o +@@ -11,3 +12,5 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o + obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o + obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o + obj-$(CONFIG_PCI_XGENE) += pci-xgene.o ++obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o ++obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o +diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c +index 52b34fe..84a45cf 100644 +--- a/drivers/pci/host/pci-dra7xx.c ++++ b/drivers/pci/host/pci-dra7xx.c +@@ -61,6 +61,7 @@ + + #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C + #define LINK_UP BIT(16) ++#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF + + struct dra7xx_pcie { + void __iomem *base; +@@ -144,6 +145,12 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) + static void dra7xx_pcie_host_init(struct pcie_port *pp) + { + dw_pcie_setup_rc(pp); ++ ++ pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR; ++ pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR; ++ pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR; ++ pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR; ++ + dra7xx_pcie_establish_link(pp); + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); +@@ -160,7 +167,6 @@ static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + { + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); +- set_irq_flags(irq, IRQF_VALID); + + return 0; + } +diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c +index c5d0ca3..2fd6b4e 100644 +--- a/drivers/pci/host/pci-exynos.c ++++ b/drivers/pci/host/pci-exynos.c +@@ -466,7 +466,7 @@ static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + int ret; + + exynos_pcie_sideband_dbi_r_mode(pp, true); +- ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); ++ ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val); + exynos_pcie_sideband_dbi_r_mode(pp, false); + return ret; + } +@@ -477,8 +477,7 @@ static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, + int ret; + + exynos_pcie_sideband_dbi_w_mode(pp, true); +- ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), +- where, size, val); ++ ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val); + exynos_pcie_sideband_dbi_w_mode(pp, false); + return ret; + } +diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c +index 3d2076f..83fb705 100644 +--- a/drivers/pci/host/pci-host-generic.c ++++ b/drivers/pci/host/pci-host-generic.c +@@ -32,13 +32,22 @@ struct gen_pci_cfg_bus_ops { + + struct gen_pci_cfg_windows { + struct resource res; +- struct resource bus_range; ++ struct resource *bus_range; + void __iomem **win; + + const struct gen_pci_cfg_bus_ops *ops; + }; + ++/* ++ * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI ++ * sysdata. Add pci_sys_data as the first element in struct gen_pci so ++ * that when we use a gen_pci pointer as sysdata, it is also a pointer to ++ * a struct pci_sys_data. ++ */ + struct gen_pci { ++#ifdef CONFIG_ARM ++ struct pci_sys_data sys; ++#endif + struct pci_host_bridge host; + struct gen_pci_cfg_windows cfg; + struct list_head resources; +@@ -48,9 +57,8 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus, + unsigned int devfn, + int where) + { +- struct pci_sys_data *sys = bus->sysdata; +- struct gen_pci *pci = sys->private_data; +- resource_size_t idx = bus->number - pci->cfg.bus_range.start; ++ struct gen_pci *pci = bus->sysdata; ++ resource_size_t idx = bus->number - pci->cfg.bus_range->start; + + return pci->cfg.win[idx] + ((devfn << 8) | where); + } +@@ -64,9 +72,8 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus, + unsigned int devfn, + int where) + { +- struct pci_sys_data *sys = bus->sysdata; +- struct gen_pci *pci = sys->private_data; +- resource_size_t idx = bus->number - pci->cfg.bus_range.start; ++ struct gen_pci *pci = bus->sysdata; ++ resource_size_t idx = bus->number - pci->cfg.bus_range->start; + + return pci->cfg.win[idx] + ((devfn << 12) | where); + } +@@ -76,55 +83,9 @@ static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = { + .map_bus = gen_pci_map_cfg_bus_ecam, + }; + +-static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn, +- int where, int size, u32 *val) +-{ +- void __iomem *addr; +- struct pci_sys_data *sys = bus->sysdata; +- struct gen_pci *pci = sys->private_data; +- +- addr = pci->cfg.ops->map_bus(bus, devfn, where); +- +- switch (size) { +- case 1: +- *val = readb(addr); +- break; +- case 2: +- *val = readw(addr); +- break; +- default: +- *val = readl(addr); +- } +- +- return PCIBIOS_SUCCESSFUL; +-} +- +-static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn, +- int where, int size, u32 val) +-{ +- void __iomem *addr; +- struct pci_sys_data *sys = bus->sysdata; +- struct gen_pci *pci = sys->private_data; +- +- addr = pci->cfg.ops->map_bus(bus, devfn, where); +- +- switch (size) { +- case 1: +- writeb(val, addr); +- break; +- case 2: +- writew(val, addr); +- break; +- default: +- writel(val, addr); +- } +- +- return PCIBIOS_SUCCESSFUL; +-} +- + static struct pci_ops gen_pci_ops = { +- .read = gen_pci_config_read, +- .write = gen_pci_config_write, ++ .read = pci_generic_config_read, ++ .write = pci_generic_config_write, + }; + + static const struct of_device_id gen_pci_of_match[] = { +@@ -138,106 +99,50 @@ static const struct of_device_id gen_pci_of_match[] = { + }; + MODULE_DEVICE_TABLE(of, gen_pci_of_match); + +-static int gen_pci_calc_io_offset(struct device *dev, +- struct of_pci_range *range, +- struct resource *res, +- resource_size_t *offset) +-{ +- static atomic_t wins = ATOMIC_INIT(0); +- int err, idx, max_win; +- unsigned int window; +- +- if (!PAGE_ALIGNED(range->cpu_addr)) +- return -EINVAL; +- +- max_win = (IO_SPACE_LIMIT + 1) / SZ_64K; +- idx = atomic_inc_return(&wins); +- if (idx > max_win) +- return -ENOSPC; +- +- window = (idx - 1) * SZ_64K; +- err = pci_ioremap_io(window, range->cpu_addr); +- if (err) +- return err; +- +- of_pci_range_to_resource(range, dev->of_node, res); +- res->start = window; +- res->end = res->start + range->size - 1; +- *offset = window - range->pci_addr; +- return 0; +-} +- +-static int gen_pci_calc_mem_offset(struct device *dev, +- struct of_pci_range *range, +- struct resource *res, +- resource_size_t *offset) +-{ +- of_pci_range_to_resource(range, dev->of_node, res); +- *offset = range->cpu_addr - range->pci_addr; +- return 0; +-} +- + static void gen_pci_release_of_pci_ranges(struct gen_pci *pci) + { +- struct pci_host_bridge_window *win; +- +- list_for_each_entry(win, &pci->resources, list) +- release_resource(win->res); +- + pci_free_resource_list(&pci->resources); + } + + static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci) + { +- struct of_pci_range range; +- struct of_pci_range_parser parser; + int err, res_valid = 0; + struct device *dev = pci->host.dev.parent; + struct device_node *np = dev->of_node; ++ resource_size_t iobase; ++ struct resource_entry *win; + +- if (of_pci_range_parser_init(&parser, np)) { +- dev_err(dev, "missing \"ranges\" property\n"); +- return -EINVAL; +- } +- +- for_each_of_pci_range(&parser, &range) { +- struct resource *parent, *res; +- resource_size_t offset; +- u32 restype = range.flags & IORESOURCE_TYPE_BITS; ++ err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, ++ &iobase); ++ if (err) ++ return err; + +- res = devm_kmalloc(dev, sizeof(*res), GFP_KERNEL); +- if (!res) { +- err = -ENOMEM; +- goto out_release_res; +- } ++ resource_list_for_each_entry(win, &pci->resources) { ++ struct resource *parent, *res = win->res; + +- switch (restype) { ++ switch (resource_type(res)) { + case IORESOURCE_IO: + parent = &ioport_resource; +- err = gen_pci_calc_io_offset(dev, &range, res, &offset); ++ err = pci_remap_iospace(res, iobase); ++ if (err) { ++ dev_warn(dev, "error %d: failed to map resource %pR\n", ++ err, res); ++ continue; ++ } + break; + case IORESOURCE_MEM: + parent = &iomem_resource; +- err = gen_pci_calc_mem_offset(dev, &range, res, &offset); +- res_valid |= !(res->flags & IORESOURCE_PREFETCH || err); ++ res_valid |= !(res->flags & IORESOURCE_PREFETCH); + break; ++ case IORESOURCE_BUS: ++ pci->cfg.bus_range = res; + default: +- err = -EINVAL; + continue; + } + +- if (err) { +- dev_warn(dev, +- "error %d: failed to add resource [type 0x%x, %lld bytes]\n", +- err, restype, range.size); +- continue; +- } +- +- err = request_resource(parent, res); ++ err = devm_request_resource(dev, parent, res); + if (err) + goto out_release_res; +- +- pci_add_resource_offset(&pci->resources, res, offset); + } + + if (!res_valid) { +@@ -262,38 +167,30 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) + struct device *dev = pci->host.dev.parent; + struct device_node *np = dev->of_node; + +- if (of_pci_parse_bus_range(np, &pci->cfg.bus_range)) +- pci->cfg.bus_range = (struct resource) { +- .name = np->name, +- .start = 0, +- .end = 0xff, +- .flags = IORESOURCE_BUS, +- }; +- + err = of_address_to_resource(np, 0, &pci->cfg.res); + if (err) { + dev_err(dev, "missing \"reg\" property\n"); + return err; + } + +- pci->cfg.win = devm_kcalloc(dev, resource_size(&pci->cfg.bus_range), ++ /* Limit the bus-range to fit within reg */ ++ bus_max = pci->cfg.bus_range->start + ++ (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1; ++ pci->cfg.bus_range->end = min_t(resource_size_t, ++ pci->cfg.bus_range->end, bus_max); ++ ++ pci->cfg.win = devm_kcalloc(dev, resource_size(pci->cfg.bus_range), + sizeof(*pci->cfg.win), GFP_KERNEL); + if (!pci->cfg.win) + return -ENOMEM; + +- /* Limit the bus-range to fit within reg */ +- bus_max = pci->cfg.bus_range.start + +- (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1; +- pci->cfg.bus_range.end = min_t(resource_size_t, pci->cfg.bus_range.end, +- bus_max); +- + /* Map our Configuration Space windows */ + if (!devm_request_mem_region(dev, pci->cfg.res.start, + resource_size(&pci->cfg.res), + "Configuration Space")) + return -ENOMEM; + +- bus_range = &pci->cfg.bus_range; ++ bus_range = pci->cfg.bus_range; + for (busn = bus_range->start; busn <= bus_range->end; ++busn) { + u32 idx = busn - bus_range->start; + u32 sz = 1 << pci->cfg.ops->bus_shift; +@@ -305,18 +202,9 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) + return -ENOMEM; + } + +- /* Register bus resource */ +- pci_add_resource(&pci->resources, bus_range); + return 0; + } + +-static int gen_pci_setup(int nr, struct pci_sys_data *sys) +-{ +- struct gen_pci *pci = sys->private_data; +- list_splice_init(&pci->resources, &sys->resources); +- return 1; +-} +- + static int gen_pci_probe(struct platform_device *pdev) + { + int err; +@@ -326,13 +214,7 @@ static int gen_pci_probe(struct platform_device *pdev) + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); +- struct hw_pci hw = { +- .nr_controllers = 1, +- .private_data = (void **)&pci, +- .setup = gen_pci_setup, +- .map_irq = of_irq_parse_and_map_pci, +- .ops = &gen_pci_ops, +- }; ++ struct pci_bus *bus, *child; + + if (!pci) + return -ENOMEM; +@@ -353,6 +235,7 @@ static int gen_pci_probe(struct platform_device *pdev) + + of_id = of_match_node(gen_pci_of_match, np); + pci->cfg.ops = of_id->data; ++ gen_pci_ops.map_bus = pci->cfg.ops->map_bus; + pci->host.dev.parent = dev; + INIT_LIST_HEAD(&pci->host.windows); + INIT_LIST_HEAD(&pci->resources); +@@ -369,7 +252,27 @@ static int gen_pci_probe(struct platform_device *pdev) + return err; + } + +- pci_common_init_dev(dev, &hw); ++ /* Do not reassign resources if probe only */ ++ if (!pci_has_flag(PCI_PROBE_ONLY)) ++ pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); ++ ++ bus = pci_scan_root_bus(dev, 0, &gen_pci_ops, pci, &pci->resources); ++ if (!bus) { ++ dev_err(dev, "Scanning rootbus failed"); ++ return -ENODEV; ++ } ++ ++ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); ++ ++ if (!pci_has_flag(PCI_PROBE_ONLY)) { ++ pci_bus_size_bridges(bus); ++ pci_bus_assign_resources(bus); ++ ++ list_for_each_entry(child, &bus->children, node) ++ pcie_bus_configure_settings(child); ++ } ++ ++ pci_bus_add_devices(bus); + return 0; + } + +diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c +index 34086ce..c1b5980 100644 +--- a/drivers/pci/host/pci-keystone-dw.c ++++ b/drivers/pci/host/pci-keystone-dw.c +@@ -70,7 +70,7 @@ static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, + *bit_pos = offset >> 3; + } + +-u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp) ++phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) + { + struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); + +@@ -104,14 +104,13 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d) + { + u32 offset, reg_offset, bit_pos; + struct keystone_pcie *ks_pcie; +- unsigned int irq = d->irq; + struct msi_desc *msi; + struct pcie_port *pp; + +- msi = irq_get_msi_desc(irq); +- pp = sys_to_pcie(msi->dev->bus->sysdata); ++ msi = irq_data_get_msi_desc(d); ++ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); + ks_pcie = to_keystone_pcie(pp); +- offset = irq - irq_linear_revmap(pp->irq_domain, 0); ++ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); + update_reg_offset_bit_pos(offset, ®_offset, &bit_pos); + + writel(BIT(bit_pos), +@@ -142,20 +141,19 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) + static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) + { + struct keystone_pcie *ks_pcie; +- unsigned int irq = d->irq; + struct msi_desc *msi; + struct pcie_port *pp; + u32 offset; + +- msi = irq_get_msi_desc(irq); +- pp = sys_to_pcie(msi->dev->bus->sysdata); ++ msi = irq_data_get_msi_desc(d); ++ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); + ks_pcie = to_keystone_pcie(pp); +- offset = irq - irq_linear_revmap(pp->irq_domain, 0); ++ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); + + /* Mask the end point if PVM implemented */ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + if (msi->msi_attrib.maskbit) +- mask_msi_irq(d); ++ pci_msi_mask_irq(d); + } + + ks_dw_pcie_msi_clear_irq(pp, offset); +@@ -164,20 +162,19 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) + static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d) + { + struct keystone_pcie *ks_pcie; +- unsigned int irq = d->irq; + struct msi_desc *msi; + struct pcie_port *pp; + u32 offset; + +- msi = irq_get_msi_desc(irq); +- pp = sys_to_pcie(msi->dev->bus->sysdata); ++ msi = irq_data_get_msi_desc(d); ++ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); + ks_pcie = to_keystone_pcie(pp); +- offset = irq - irq_linear_revmap(pp->irq_domain, 0); ++ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); + + /* Mask the end point if PVM implemented */ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + if (msi->msi_attrib.maskbit) +- unmask_msi_irq(d); ++ pci_msi_unmask_irq(d); + } + + ks_dw_pcie_msi_set_irq(pp, offset); +@@ -196,7 +193,6 @@ static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, + irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, domain->host_data); +- set_irq_flags(irq, IRQF_VALID); + + return 0; + } +@@ -205,7 +201,7 @@ const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = { + .map = ks_dw_pcie_msi_map, + }; + +-int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip) ++int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip) + { + struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); + int i; +@@ -277,7 +273,6 @@ static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d, + irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, d->host_data); +- set_irq_flags(irq, IRQF_VALID); + + return 0; + } +@@ -327,7 +322,7 @@ static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt) + void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) + { + struct pcie_port *pp = &ks_pcie->pp; +- u32 start = pp->mem.start, end = pp->mem.end; ++ u32 start = pp->mem->start, end = pp->mem->end; + int i, tr_size; + + /* Disable BARs for inbound access */ +@@ -403,7 +398,7 @@ int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + + addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); + +- return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val); ++ return dw_pcie_cfg_read(addr + where, size, val); + } + + int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, +@@ -415,7 +410,7 @@ int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + + addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); + +- return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val); ++ return dw_pcie_cfg_write(addr + where, size, val); + } + + /** +diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h +index 1fc1fce..f0944e8 100644 +--- a/drivers/pci/host/pci-keystone.h ++++ b/drivers/pci/host/pci-keystone.h +@@ -37,7 +37,7 @@ struct keystone_pcie { + + /* Keystone DW specific MSI controller APIs/definitions */ + void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset); +-u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp); ++phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); + + /* Keystone specific PCI controller APIs */ + void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); +@@ -55,4 +55,4 @@ void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq); + void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); + void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); + int ks_dw_pcie_msi_host_init(struct pcie_port *pp, +- struct msi_chip *chip); ++ struct msi_controller *chip); +diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c +new file mode 100644 +index 0000000..d491b0f +--- /dev/null ++++ b/drivers/pci/host/pci-layerscape.c +@@ -0,0 +1,669 @@ ++/* ++ * PCIe host controller driver for Freescale Layerscape SoCs ++ * ++ * Copyright (C) 2014 Freescale Semiconductor. ++ * ++ * Author: Minghuan Lian ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pcie-designware.h" ++ ++/* PEX1/2 Misc Ports Status Register */ ++#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4) ++#define SCFG_PEXPMWRCR(pex_idx) (0x5c + (pex_idx) * 0x64) ++#define LTSSM_STATE_SHIFT 20 ++#define LTSSM_STATE_MASK 0x3f ++#define LTSSM_PCIE_L0 0x11 /* L0 state */ ++#define LTSSM_PCIE_L2_IDLE 0x15 /* L2 idle state */ ++ ++/* PEX Internal Configuration Registers */ ++#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ ++#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ ++ ++/* PEX LUT registers */ ++#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */ ++#define PCIE_LUT_UDR(n) (0x800 + (n) * 8) ++#define PCIE_LUT_LDR(n) (0x804 + (n) * 8) ++#define PCIE_LUT_MASK_ALL 0xffff ++#define PCIE_LUT_DR_NUM 32 ++#define PCIE_LUT_ENABLE (1 << 31) ++ ++#define PCIE_PM_SCR 0x44 ++#define PCIE_PM_SCR_PMEEN 0x10 ++#define PCIE_PM_SCR_PMEPS_D0 0xfffc ++#define PCIE_PM_SCR_PMEPS_D3 0x3 ++#define PCIE_PM_SCR_PME_STATE 0x8000 ++ ++#define PCIE_PEX_DCR 0x78 ++#define PCIE_PEX_DCR_AUXPOWEREN 0x0400 ++ ++#define PCIE_PEX_SSR 0x8a ++#define PCIE_PEX_SSR_PDS 0x40 ++ ++#define PCIE_PEX_RCR 0x8c ++#define PCIE_PEX_RCR_PMEIE 0x0008 ++ ++#define PCIE_PEX_RSR 0x90 ++#define PCIE_PEX_PMES 0x00010000 ++ ++#define QIXIS_RST_FORCE_3 0x45 ++#define QIXIS_RST_FORCE_3_PCIESLOT 0xe0 ++ ++#define CPLD_RST_PCIE_SLOT 0x14 ++#define CPLD_RST_PCIESLOT 0x3 ++ ++struct ls_pcie; ++ ++struct ls_pcie_pm_data { ++ void __iomem *fpga; ++ void __iomem *cpld; ++}; ++ ++struct ls_pcie_pm_ops { ++ u32 (*get_link_state)(struct ls_pcie *pcie); ++ int (*send_turn_off_message)(struct ls_pcie *pcie); ++ void (*clear_turn_off_message)(struct ls_pcie *pcie); ++ void (*reset_slot)(struct ls_pcie *pcie, ++ struct ls_pcie_pm_data *pm_data); ++}; ++ ++struct ls_pcie_drvdata { ++ u32 lut_offset; ++ u32 ltssm_shift; ++ struct pcie_host_ops *ops; ++ struct ls_pcie_pm_ops *pm; ++}; ++ ++struct ls_pcie { ++ struct list_head list_node; ++ void __iomem *dbi; ++ void __iomem *lut; ++ struct regmap *scfg; ++ struct pcie_port pp; ++ const struct ls_pcie_drvdata *drvdata; ++ struct ls_pcie_pm_data pm_data; ++ int index; ++ const u32 *avail_streamids; ++ int streamid_index; ++ int pme_irq; ++ bool in_slot; ++}; ++ ++#define to_ls_pcie(x) container_of(x, struct ls_pcie, pp) ++ ++u32 set_pcie_streamid_translation(struct pci_dev *pdev, u32 devid) ++{ ++ u32 index, streamid; ++ struct pcie_port *pp = pdev->bus->sysdata; ++ struct ls_pcie *pcie = to_ls_pcie(pp); ++ ++ if (!pcie->avail_streamids || !pcie->streamid_index) ++ return ~(u32)0; ++ ++ index = --pcie->streamid_index; ++ /* mask is set as all zeroes, want to match all bits */ ++ iowrite32((devid << 16), pcie->lut + PCIE_LUT_UDR(index)); ++ streamid = be32_to_cpup(&pcie->avail_streamids[index]); ++ iowrite32(streamid | PCIE_LUT_ENABLE, pcie->lut + PCIE_LUT_LDR(index)); ++ ++ return streamid; ++} ++ ++LIST_HEAD(hose_list); ++ ++static bool ls_pcie_is_bridge(struct ls_pcie *pcie) ++{ ++ u32 header_type; ++ ++ header_type = ioread8(pcie->dbi + PCI_HEADER_TYPE); ++ header_type &= 0x7f; ++ ++ return header_type == PCI_HEADER_TYPE_BRIDGE; ++} ++ ++/* Clear multi-function bit */ ++static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) ++{ ++ iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE); ++} ++ ++/* Fix class value */ ++static void ls_pcie_fix_class(struct ls_pcie *pcie) ++{ ++ iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); ++} ++ ++/* Drop MSG TLP except for Vendor MSG */ ++static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) ++{ ++ u32 val; ++ ++ val = ioread32(pcie->dbi + PCIE_STRFMR1); ++ val &= 0xDFFFFFFF; ++ iowrite32(val, pcie->dbi + PCIE_STRFMR1); ++} ++ ++static int ls1021_pcie_link_up(struct pcie_port *pp) ++{ ++ u32 state; ++ struct ls_pcie *pcie = to_ls_pcie(pp); ++ ++ if (!pcie->scfg) ++ return 0; ++ ++ regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); ++ state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; ++ ++ if (state < LTSSM_PCIE_L0) ++ return 0; ++ ++ return 1; ++} ++ ++static u32 ls1021_pcie_get_link_state(struct ls_pcie *pcie) ++{ ++ u32 state; ++ ++ if (!pcie->scfg) ++ return 0; ++ ++ regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); ++ state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; ++ ++ return state; ++} ++ ++static int ls1021_pcie_send_turn_off_message(struct ls_pcie *pcie) ++{ ++ u32 val; ++ ++ if (!pcie->scfg) ++ return -EINVAL; ++ ++ /* Send Turn_off message */ ++ regmap_read(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), &val); ++ val |= 0x80000000; ++ regmap_write(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), val); ++ ++ return 0; ++} ++ ++static void ls1021_pcie_clear_turn_off_message(struct ls_pcie *pcie) ++{ ++ u32 val; ++ ++ if (!pcie->scfg) ++ return; ++ ++ /* Clear Turn_off message */ ++ regmap_read(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), &val); ++ val &= 0x00000000; ++ regmap_write(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), val); ++} ++ ++static void ls1021_pcie_reset_slot(struct ls_pcie *pcie, ++ struct ls_pcie_pm_data *pm_data) ++{ ++ u8 val; ++ ++ /* Try to reset PCIe slot to relink EP */ ++ if (pm_data->fpga) { ++ /* PULL DOWN PCIe RST# */ ++ val = ioread8(pm_data->fpga + QIXIS_RST_FORCE_3); ++ val |= QIXIS_RST_FORCE_3_PCIESLOT; ++ iowrite8(val, pm_data->fpga + QIXIS_RST_FORCE_3); ++ ++ /* PULL ON PCIe RST# */ ++ val = ioread8(pm_data->fpga + QIXIS_RST_FORCE_3); ++ val &= 0x0; ++ iowrite8(val, pm_data->fpga + QIXIS_RST_FORCE_3); ++ } ++ ++ if (pm_data->cpld) { ++ /* PULL DOWN PCIe RST# */ ++ val = ioread8(pm_data->cpld + CPLD_RST_PCIE_SLOT); ++ val &= 0x0; ++ iowrite8(val, pm_data->cpld + CPLD_RST_PCIE_SLOT); ++ ++ /* PULL ON PCIe RST# */ ++ val = ioread8(pm_data->cpld + CPLD_RST_PCIE_SLOT); ++ val |= CPLD_RST_PCIESLOT; ++ iowrite8(val, pm_data->cpld + CPLD_RST_PCIE_SLOT); ++ } ++} ++ ++static void ls1021_pcie_host_init(struct pcie_port *pp) ++{ ++ struct ls_pcie *pcie = to_ls_pcie(pp); ++ u32 index[2]; ++ ++ pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node, ++ "fsl,pcie-scfg"); ++ if (IS_ERR(pcie->scfg)) { ++ dev_err(pp->dev, "No syscfg phandle specified\n"); ++ pcie->scfg = NULL; ++ return; ++ } ++ ++ if (of_property_read_u32_array(pp->dev->of_node, ++ "fsl,pcie-scfg", index, 2)) { ++ pcie->scfg = NULL; ++ return; ++ } ++ pcie->index = index[1]; ++ ++ dw_pcie_setup_rc(pp); ++ ++ ls_pcie_drop_msg_tlp(pcie); ++} ++ ++static int ls_pcie_link_up(struct pcie_port *pp) ++{ ++ struct ls_pcie *pcie = to_ls_pcie(pp); ++ u32 state; ++ ++ state = (ioread32(pcie->lut + PCIE_LUT_DBG) >> ++ pcie->drvdata->ltssm_shift) & ++ LTSSM_STATE_MASK; ++ ++ if (state < LTSSM_PCIE_L0) ++ return 0; ++ ++ return 1; ++} ++ ++static u32 ls_pcie_get_link_state(struct ls_pcie *pcie) ++{ ++ return (ioread32(pcie->lut + PCIE_LUT_DBG) >> ++ pcie->drvdata->ltssm_shift) & ++ LTSSM_STATE_MASK; ++} ++ ++static void ls_pcie_host_init(struct pcie_port *pp) ++{ ++ struct ls_pcie *pcie = to_ls_pcie(pp); ++ ++ iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); ++ ls_pcie_fix_class(pcie); ++ ls_pcie_clear_multifunction(pcie); ++ ls_pcie_drop_msg_tlp(pcie); ++ iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); ++} ++ ++static int ls_pcie_msi_host_init(struct pcie_port *pp, ++ struct msi_controller *chip) ++{ ++ struct device_node *msi_node; ++ struct device_node *np = pp->dev->of_node; ++ ++ /* ++ * The MSI domain is set by the generic of_msi_configure(). This ++ * .msi_host_init() function keeps us from doing the default MSI ++ * domain setup in dw_pcie_host_init() and also enforces the ++ * requirement that "msi-parent" exists. ++ */ ++ msi_node = of_parse_phandle(np, "msi-parent", 0); ++ if (!msi_node) { ++ dev_err(pp->dev, "failed to find msi-parent\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static struct pcie_host_ops ls1021_pcie_host_ops = { ++ .link_up = ls1021_pcie_link_up, ++ .host_init = ls1021_pcie_host_init, ++ .msi_host_init = ls_pcie_msi_host_init, ++}; ++ ++static struct ls_pcie_pm_ops ls1021_pcie_host_pm_ops = { ++ .get_link_state = &ls1021_pcie_get_link_state, ++ .send_turn_off_message = &ls1021_pcie_send_turn_off_message, ++ .clear_turn_off_message = &ls1021_pcie_clear_turn_off_message, ++ .reset_slot = &ls1021_pcie_reset_slot, ++}; ++ ++static struct pcie_host_ops ls_pcie_host_ops = { ++ .link_up = ls_pcie_link_up, ++ .host_init = ls_pcie_host_init, ++ .msi_host_init = ls_pcie_msi_host_init, ++}; ++ ++static struct ls_pcie_pm_ops ls_pcie_host_pm_ops = { ++ .get_link_state = &ls_pcie_get_link_state, ++}; ++ ++static struct ls_pcie_drvdata ls1021_drvdata = { ++ .ops = &ls1021_pcie_host_ops, ++ .pm = &ls1021_pcie_host_pm_ops, ++}; ++ ++static struct ls_pcie_drvdata ls1043_drvdata = { ++ .lut_offset = 0x10000, ++ .ltssm_shift = 24, ++ .ops = &ls_pcie_host_ops, ++ .pm = &ls_pcie_host_pm_ops, ++}; ++ ++static struct ls_pcie_drvdata ls2080_drvdata = { ++ .lut_offset = 0x80000, ++ .ltssm_shift = 0, ++ .ops = &ls_pcie_host_ops, ++ .pm = &ls_pcie_host_pm_ops, ++}; ++ ++static const struct of_device_id ls_pcie_of_match[] = { ++ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, ++ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, ++ { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, ++ { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, ls_pcie_of_match); ++ ++static void ls_pcie_host_hack_pm_init(struct ls_pcie *pcie) ++{ ++ struct device_node *np; ++ struct ls_pcie_pm_data *pm_data = &pcie->pm_data; ++ ++ np = of_find_compatible_node(NULL, NULL, "fsl,ls1021aqds-fpga"); ++ if (np) ++ pm_data->fpga = of_iomap(np, 0); ++ ++ of_node_put(np); ++ ++ np = of_find_compatible_node(NULL, NULL, "fsl,ls1021atwr-cpld"); ++ if (np) ++ pm_data->cpld = of_iomap(np, 0); ++ ++ of_node_put(np); ++} ++ ++static irqreturn_t ls_pcie_pme_irq_handler(int irq, void *data) ++{ ++ struct pcie_port *pp = data; ++ struct ls_pcie *pcie = to_ls_pcie(pp); ++ u32 val; ++ ++ if (pcie->drvdata->pm->clear_turn_off_message) ++ pcie->drvdata->pm->clear_turn_off_message(pcie); ++ ++ /* Clear Host root PME_STATE bit */ ++ val = ioread32(pcie->dbi + PCIE_PEX_RSR); ++ val |= PCIE_PEX_PMES; ++ iowrite32(val, pcie->dbi + PCIE_PEX_RSR); ++ ++ return IRQ_HANDLED; ++} ++ ++static int ls_pcie_host_pme_init(struct ls_pcie *pcie, ++ struct platform_device *pdev) ++{ ++ struct pcie_port *pp; ++ int ret; ++ u16 val; ++ ++ pp = &pcie->pp; ++ ++ pcie->pme_irq = platform_get_irq_byname(pdev, "pme"); ++ if (pcie->pme_irq < 0) { ++ dev_err(&pdev->dev, ++ "failed to get PME IRQ: %d\n", pcie->pme_irq); ++ return pcie->pme_irq; ++ } ++ ++ ret = devm_request_irq(pp->dev, pcie->pme_irq, ls_pcie_pme_irq_handler, ++ IRQF_SHARED, "ls-pcie-pme", pp); ++ if (ret) { ++ dev_err(pp->dev, "Failed to request pme irq\n"); ++ return ret; ++ } ++ ++ ls_pcie_host_hack_pm_init(pcie); ++ ++ /* AUX Power PM Enable */ ++ val = ioread16(pcie->dbi + PCIE_PEX_DCR); ++ val |= PCIE_PEX_DCR_AUXPOWEREN; ++ iowrite16(val, pcie->dbi + PCIE_PEX_DCR); ++ ++ /* Enable PME message */ ++ val = ioread16(pcie->dbi + PCIE_PM_SCR); ++ val |= PCIE_PM_SCR_PMEEN; ++ iowrite16(val, pcie->dbi + PCIE_PM_SCR); ++ ++ /* Clear Host PME_STATE bit */ ++ val = ioread16(pcie->dbi + PCIE_PM_SCR); ++ val |= PCIE_PM_SCR_PME_STATE; ++ iowrite16(val, pcie->dbi + PCIE_PM_SCR); ++ ++ /* Enable Host %d interrupt */ ++ val = ioread16(pcie->dbi + PCIE_PEX_RCR); ++ val |= PCIE_PEX_RCR_PMEIE; ++ iowrite16(val, pcie->dbi + PCIE_PEX_RCR); ++ ++ if (dw_pcie_link_up(&pcie->pp)) ++ pcie->in_slot = true; ++ else ++ pcie->in_slot = false; ++ ++ return 0; ++} ++ ++static int __init ls_add_pcie_port(struct pcie_port *pp, ++ struct platform_device *pdev) ++{ ++ int ret; ++ struct ls_pcie *pcie = to_ls_pcie(pp); ++ ++ pp->dev = &pdev->dev; ++ pp->dbi_base = pcie->dbi; ++ pp->ops = pcie->drvdata->ops; ++ ++ ret = dw_pcie_host_init(pp); ++ if (ret) { ++ dev_err(pp->dev, "failed to initialize host\n"); ++ return ret; ++ } ++ ++ ret = ls_pcie_host_pme_init(pcie, pdev); ++ if (ret) ++ dev_warn(pp->dev, "failed to initialize PME\n"); ++ ++ return 0; ++} ++ ++static int ls_pcie_probe(struct platform_device *pdev) ++{ ++ const struct of_device_id *match; ++ struct ls_pcie *pcie; ++ struct resource *dbi_base; ++ int ret; ++ ++ match = of_match_device(ls_pcie_of_match, &pdev->dev); ++ if (!match) ++ return -ENODEV; ++ ++ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); ++ if (!pcie) ++ return -ENOMEM; ++ ++ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); ++ pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base); ++ if (IS_ERR(pcie->dbi)) { ++ dev_err(&pdev->dev, "missing *regs* space\n"); ++ return PTR_ERR(pcie->dbi); ++ } ++ ++ pcie->drvdata = match->data; ++ pcie->lut = pcie->dbi + pcie->drvdata->lut_offset; ++ /* Disable LDR zero */ ++ iowrite32(0, pcie->lut + PCIE_LUT_LDR(0)); ++ ++ if (!ls_pcie_is_bridge(pcie)) ++ return -ENODEV; ++ ++ if (of_device_is_compatible(pdev->dev.of_node, "fsl,ls2085a-pcie") || ++ of_device_is_compatible(pdev->dev.of_node, "fsl,ls2080a-pcie")) { ++ int len; ++ const u32 *prop; ++ struct device_node *np; ++ ++ np = pdev->dev.of_node; ++ prop = (u32 *)of_get_property(np, "available-stream-ids", &len); ++ if (prop) { ++ pcie->avail_streamids = prop; ++ pcie->streamid_index = len/sizeof(u32); ++ } else ++ dev_err(&pdev->dev, "PCIe endpoint partitioning not possible\n"); ++ } ++ ++ ret = ls_add_pcie_port(&pcie->pp, pdev); ++ if (ret < 0) ++ return ret; ++ ++ list_add_tail(&pcie->list_node, &hose_list); ++ ++ platform_set_drvdata(pdev, pcie); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int ls_pcie_pm_do_suspend(struct ls_pcie *pcie) ++{ ++ u32 state; ++ int i = 0; ++ int ret; ++ u16 val; ++ ++ if (!pcie->in_slot) ++ return 0; ++ ++ if (!pcie->drvdata->pm->send_turn_off_message) ++ return 0; ++ ++ ret = pcie->drvdata->pm->send_turn_off_message(pcie); ++ if (ret) ++ return -EINVAL; ++ ++ while (i < 100) { ++ state = pcie->drvdata->pm->get_link_state(pcie); ++ if (state == LTSSM_PCIE_L2_IDLE) ++ break; ++ i++; ++ mdelay(1); ++ } ++ ++ /* Put RC in D3 */ ++ val = ioread16(pcie->dbi + PCIE_PM_SCR); ++ val |= PCIE_PM_SCR_PMEPS_D3; ++ iowrite16(val, pcie->dbi + PCIE_PM_SCR); ++ ++ mdelay(10); ++ ++ return 0; ++} ++ ++static int ls_pcie_pm_do_resume(struct ls_pcie *pcie) ++{ ++ u32 state; ++ int i = 0; ++ u16 val; ++ ++ ls_pcie_host_init(&pcie->pp); ++ ++ if (!pcie->in_slot) ++ return 0; ++ ++ /* Put RC in D0 */ ++ val = ioread16(pcie->dbi + PCIE_PM_SCR); ++ val &= PCIE_PM_SCR_PMEPS_D0; ++ iowrite16(val, pcie->dbi + PCIE_PM_SCR); ++ ++ mdelay(10); ++ ++ state = pcie->drvdata->pm->get_link_state(pcie); ++ if (state == LTSSM_PCIE_L0) ++ return 0; ++ ++ if (!pcie->drvdata->pm->reset_slot) ++ return -EINVAL; ++ ++ pcie->drvdata->pm->reset_slot(pcie, &pcie->pm_data); ++ ++ while (i < 100) { ++ state = pcie->drvdata->pm->get_link_state(pcie); ++ if (state == LTSSM_PCIE_L0) ++ return 0; ++ i++; ++ mdelay(1); ++ } ++ ++ return -EINVAL; ++} ++ ++static int ls_pcie_pm_suspend(void) ++{ ++ struct ls_pcie *hose, *tmp; ++ ++ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) ++ ls_pcie_pm_do_suspend(hose); ++ ++ return 0; ++} ++ ++static void ls_pcie_pm_resume(void) ++{ ++ struct ls_pcie *hose, *tmp; ++ ++ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) ++ ls_pcie_pm_do_resume(hose); ++} ++ ++static struct syscore_ops ls_pcie_syscore_pm_ops = { ++ .suspend = ls_pcie_pm_suspend, ++ .resume = ls_pcie_pm_resume, ++}; ++#endif /* CONFIG_PM_SLEEP */ ++ ++static struct platform_driver ls_pcie_driver = { ++ .probe = ls_pcie_probe, ++ .driver = { ++ .name = "layerscape-pcie", ++ .of_match_table = ls_pcie_of_match, ++ }, ++}; ++ ++static int __init fsl_pci_init(void) ++{ ++#ifdef CONFIG_PM_SLEEP ++ register_syscore_ops(&ls_pcie_syscore_pm_ops); ++#endif ++ return platform_driver_register(&ls_pcie_driver); ++} ++module_init(fsl_pci_init); ++ ++MODULE_AUTHOR("Minghuan Lian "); ++MODULE_DESCRIPTION("Freescale Layerscape PCIe host controller driver"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/pci/host/pci-layerscape.h b/drivers/pci/host/pci-layerscape.h +new file mode 100644 +index 0000000..e90e114 +--- /dev/null ++++ b/drivers/pci/host/pci-layerscape.h +@@ -0,0 +1,13 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#ifndef _PCI_LAYERSCAPE_H ++#define _PCI_LAYERSCAPE_H ++ ++/* function for setting up stream id to device id translation */ ++u32 set_pcie_streamid_translation(struct pci_dev *pdev, u32 devid); ++ ++#endif /* _PCI_LAYERSCAPE_H */ +diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c +index b1315e1..94b42d1 100644 +--- a/drivers/pci/host/pci-mvebu.c ++++ b/drivers/pci/host/pci-mvebu.c +@@ -99,11 +99,9 @@ struct mvebu_pcie_port; + struct mvebu_pcie { + struct platform_device *pdev; + struct mvebu_pcie_port *ports; +- struct msi_chip *msi; ++ struct msi_controller *msi; + struct resource io; +- char io_name[30]; + struct resource realio; +- char mem_name[30]; + struct resource mem; + struct resource busn; + int nports; +@@ -722,18 +720,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) + { + struct mvebu_pcie *pcie = sys_to_pcie(sys); + int i; +- int domain = 0; + +-#ifdef CONFIG_PCI_DOMAINS +- domain = sys->domain; +-#endif +- +- snprintf(pcie->mem_name, sizeof(pcie->mem_name), "PCI MEM %04x", +- domain); +- pcie->mem.name = pcie->mem_name; +- +- snprintf(pcie->io_name, sizeof(pcie->io_name), "PCI I/O %04x", domain); +- pcie->realio.name = pcie->io_name; ++ pcie->mem.name = "PCI MEM"; ++ pcie->realio.name = "PCI I/O"; + + if (request_resource(&iomem_resource, &pcie->mem)) + return 0; +diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c +index 19bb19c..971d8d7 100644 +--- a/drivers/pci/host/pci-tegra.c ++++ b/drivers/pci/host/pci-tegra.c +@@ -238,7 +238,7 @@ + ) + + struct tegra_msi { +- struct msi_chip chip; ++ struct msi_controller chip; + DECLARE_BITMAP(used, INT_PCI_MSI_NR); + struct irq_domain *domain; + unsigned long pages; +@@ -259,7 +259,7 @@ struct tegra_pcie_soc_data { + bool has_gen2; + }; + +-static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip) ++static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip) + { + return container_of(chip, struct tegra_msi, chip); + } +@@ -1280,8 +1280,8 @@ static irqreturn_t tegra_pcie_msi_irq(int irq, void *data) + return processed > 0 ? IRQ_HANDLED : IRQ_NONE; + } + +-static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, +- struct msi_desc *desc) ++static int tegra_msi_setup_irq(struct msi_controller *chip, ++ struct pci_dev *pdev, struct msi_desc *desc) + { + struct tegra_msi *msi = to_tegra_msi(chip); + struct msi_msg msg; +@@ -1305,12 +1305,13 @@ static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, + msg.address_hi = 0; + msg.data = hwirq; + +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + + return 0; + } + +-static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) ++static void tegra_msi_teardown_irq(struct msi_controller *chip, ++ unsigned int irq) + { + struct tegra_msi *msi = to_tegra_msi(chip); + struct irq_data *d = irq_get_irq_data(irq); +@@ -1322,10 +1323,10 @@ static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) + + static struct irq_chip tegra_msi_irq_chip = { + .name = "Tegra PCIe MSI", +- .irq_enable = unmask_msi_irq, +- .irq_disable = mask_msi_irq, +- .irq_mask = mask_msi_irq, +- .irq_unmask = unmask_msi_irq, ++ .irq_enable = pci_msi_unmask_irq, ++ .irq_disable = pci_msi_mask_irq, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, + }; + + static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, +@@ -1333,7 +1334,6 @@ static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, + { + irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); +- set_irq_flags(irq, IRQF_VALID); + + tegra_cpuidle_pcie_irqs_in_use(); + +diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c +new file mode 100644 +index 0000000..8e559d1 +--- /dev/null ++++ b/drivers/pci/host/pci-xgene-msi.c +@@ -0,0 +1,595 @@ ++/* ++ * APM X-Gene MSI Driver ++ * ++ * Copyright (c) 2014, Applied Micro Circuits Corporation ++ * Author: Tanmay Inamdar ++ * Duc Dang ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define MSI_IR0 0x000000 ++#define MSI_INT0 0x800000 ++#define IDX_PER_GROUP 8 ++#define IRQS_PER_IDX 16 ++#define NR_HW_IRQS 16 ++#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) ++ ++struct xgene_msi_group { ++ struct xgene_msi *msi; ++ int gic_irq; ++ u32 msi_grp; ++}; ++ ++struct xgene_msi { ++ struct device_node *node; ++ struct msi_controller mchip; ++ struct irq_domain *domain; ++ u64 msi_addr; ++ void __iomem *msi_regs; ++ unsigned long *bitmap; ++ struct mutex bitmap_lock; ++ struct xgene_msi_group *msi_groups; ++ int num_cpus; ++}; ++ ++/* Global data */ ++static struct xgene_msi xgene_msi_ctrl; ++ ++static struct irq_chip xgene_msi_top_irq_chip = { ++ .name = "X-Gene1 MSI", ++ .irq_enable = pci_msi_unmask_irq, ++ .irq_disable = pci_msi_mask_irq, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, ++}; ++ ++static struct msi_domain_info xgene_msi_domain_info = { ++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_PCI_MSIX), ++ .chip = &xgene_msi_top_irq_chip, ++}; ++ ++/* ++ * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where ++ * n is group number (0..F), x is index of registers in each group (0..7) ++ * The register layout is as follows: ++ * MSI0IR0 base_addr ++ * MSI0IR1 base_addr + 0x10000 ++ * ... ... ++ * MSI0IR6 base_addr + 0x60000 ++ * MSI0IR7 base_addr + 0x70000 ++ * MSI1IR0 base_addr + 0x80000 ++ * MSI1IR1 base_addr + 0x90000 ++ * ... ... ++ * MSI1IR7 base_addr + 0xF0000 ++ * MSI2IR0 base_addr + 0x100000 ++ * ... ... ++ * MSIFIR0 base_addr + 0x780000 ++ * MSIFIR1 base_addr + 0x790000 ++ * ... ... ++ * MSIFIR7 base_addr + 0x7F0000 ++ * MSIINT0 base_addr + 0x800000 ++ * MSIINT1 base_addr + 0x810000 ++ * ... ... ++ * MSIINTF base_addr + 0x8F0000 ++ * ++ * Each index register supports 16 MSI vectors (0..15) to generate interrupt. ++ * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination ++ * registers. ++ * ++ * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate ++ * the MSI pending status caused by 1 of its 8 index registers. ++ */ ++ ++/* MSInIRx read helper */ ++static u32 xgene_msi_ir_read(struct xgene_msi *msi, ++ u32 msi_grp, u32 msir_idx) ++{ ++ return readl_relaxed(msi->msi_regs + MSI_IR0 + ++ (msi_grp << 19) + (msir_idx << 16)); ++} ++ ++/* MSIINTn read helper */ ++static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) ++{ ++ return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); ++} ++ ++/* ++ * With 2048 MSI vectors supported, the MSI message can be constructed using ++ * following scheme: ++ * - Divide into 8 256-vector groups ++ * Group 0: 0-255 ++ * Group 1: 256-511 ++ * Group 2: 512-767 ++ * ... ++ * Group 7: 1792-2047 ++ * - Each 256-vector group is divided into 16 16-vector groups ++ * As an example: 16 16-vector groups for 256-vector group 0-255 is ++ * Group 0: 0-15 ++ * Group 1: 16-32 ++ * ... ++ * Group 15: 240-255 ++ * - The termination address of MSI vector in 256-vector group n and 16-vector ++ * group x is the address of MSIxIRn ++ * - The data for MSI vector in 16-vector group x is x ++ */ ++static u32 hwirq_to_reg_set(unsigned long hwirq) ++{ ++ return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); ++} ++ ++static u32 hwirq_to_group(unsigned long hwirq) ++{ ++ return (hwirq % NR_HW_IRQS); ++} ++ ++static u32 hwirq_to_msi_data(unsigned long hwirq) ++{ ++ return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); ++} ++ ++static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ++{ ++ struct xgene_msi *msi = irq_data_get_irq_chip_data(data); ++ u32 reg_set = hwirq_to_reg_set(data->hwirq); ++ u32 group = hwirq_to_group(data->hwirq); ++ u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); ++ ++ msg->address_hi = upper_32_bits(target_addr); ++ msg->address_lo = lower_32_bits(target_addr); ++ msg->data = hwirq_to_msi_data(data->hwirq); ++} ++ ++/* ++ * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain ++ * the expected behaviour of .set_affinity for each MSI interrupt, the 16 ++ * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs ++ * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another ++ * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a ++ * consequence, the total MSI vectors that X-Gene v1 supports will be ++ * reduced to 256 (2048/8) vectors. ++ */ ++static int hwirq_to_cpu(unsigned long hwirq) ++{ ++ return (hwirq % xgene_msi_ctrl.num_cpus); ++} ++ ++static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) ++{ ++ return (hwirq - hwirq_to_cpu(hwirq)); ++} ++ ++static int xgene_msi_set_affinity(struct irq_data *irqdata, ++ const struct cpumask *mask, bool force) ++{ ++ int target_cpu = cpumask_first(mask); ++ int curr_cpu; ++ ++ curr_cpu = hwirq_to_cpu(irqdata->hwirq); ++ if (curr_cpu == target_cpu) ++ return IRQ_SET_MASK_OK_DONE; ++ ++ /* Update MSI number to target the new CPU */ ++ irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; ++ ++ return IRQ_SET_MASK_OK; ++} ++ ++static struct irq_chip xgene_msi_bottom_irq_chip = { ++ .name = "MSI", ++ .irq_set_affinity = xgene_msi_set_affinity, ++ .irq_compose_msi_msg = xgene_compose_msi_msg, ++}; ++ ++static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *args) ++{ ++ struct xgene_msi *msi = domain->host_data; ++ int msi_irq; ++ ++ mutex_lock(&msi->bitmap_lock); ++ ++ msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, ++ msi->num_cpus, 0); ++ if (msi_irq < NR_MSI_VEC) ++ bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); ++ else ++ msi_irq = -ENOSPC; ++ ++ mutex_unlock(&msi->bitmap_lock); ++ ++ if (msi_irq < 0) ++ return msi_irq; ++ ++ irq_domain_set_info(domain, virq, msi_irq, ++ &xgene_msi_bottom_irq_chip, domain->host_data, ++ handle_simple_irq, NULL, NULL); ++ ++ return 0; ++} ++ ++static void xgene_irq_domain_free(struct irq_domain *domain, ++ unsigned int virq, unsigned int nr_irqs) ++{ ++ struct irq_data *d = irq_domain_get_irq_data(domain, virq); ++ struct xgene_msi *msi = irq_data_get_irq_chip_data(d); ++ u32 hwirq; ++ ++ mutex_lock(&msi->bitmap_lock); ++ ++ hwirq = hwirq_to_canonical_hwirq(d->hwirq); ++ bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); ++ ++ mutex_unlock(&msi->bitmap_lock); ++ ++ irq_domain_free_irqs_parent(domain, virq, nr_irqs); ++} ++ ++static const struct irq_domain_ops msi_domain_ops = { ++ .alloc = xgene_irq_domain_alloc, ++ .free = xgene_irq_domain_free, ++}; ++ ++static int xgene_allocate_domains(struct xgene_msi *msi) ++{ ++ msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC, ++ &msi_domain_ops, msi); ++ if (!msi->domain) ++ return -ENOMEM; ++ ++ msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node, ++ &xgene_msi_domain_info, ++ msi->domain); ++ ++ if (!msi->mchip.domain) { ++ irq_domain_remove(msi->domain); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static void xgene_free_domains(struct xgene_msi *msi) ++{ ++ if (msi->mchip.domain) ++ irq_domain_remove(msi->mchip.domain); ++ if (msi->domain) ++ irq_domain_remove(msi->domain); ++} ++ ++static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) ++{ ++ int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long); ++ ++ xgene_msi->bitmap = kzalloc(size, GFP_KERNEL); ++ if (!xgene_msi->bitmap) ++ return -ENOMEM; ++ ++ mutex_init(&xgene_msi->bitmap_lock); ++ ++ xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, ++ sizeof(struct xgene_msi_group), ++ GFP_KERNEL); ++ if (!xgene_msi->msi_groups) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc) ++{ ++ struct irq_chip *chip = irq_desc_get_chip(desc); ++ struct xgene_msi_group *msi_groups; ++ struct xgene_msi *xgene_msi; ++ unsigned int virq; ++ int msir_index, msir_val, hw_irq; ++ u32 intr_index, grp_select, msi_grp; ++ ++ chained_irq_enter(chip, desc); ++ ++ msi_groups = irq_desc_get_handler_data(desc); ++ xgene_msi = msi_groups->msi; ++ msi_grp = msi_groups->msi_grp; ++ ++ /* ++ * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt ++ * If bit x of this register is set (x is 0..7), one or more interupts ++ * corresponding to MSInIRx is set. ++ */ ++ grp_select = xgene_msi_int_read(xgene_msi, msi_grp); ++ while (grp_select) { ++ msir_index = ffs(grp_select) - 1; ++ /* ++ * Calculate MSInIRx address to read to check for interrupts ++ * (refer to termination address and data assignment ++ * described in xgene_compose_msi_msg() ) ++ */ ++ msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); ++ while (msir_val) { ++ intr_index = ffs(msir_val) - 1; ++ /* ++ * Calculate MSI vector number (refer to the termination ++ * address and data assignment described in ++ * xgene_compose_msi_msg function) ++ */ ++ hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * ++ NR_HW_IRQS) + msi_grp; ++ /* ++ * As we have multiple hw_irq that maps to single MSI, ++ * always look up the virq using the hw_irq as seen from ++ * CPU0 ++ */ ++ hw_irq = hwirq_to_canonical_hwirq(hw_irq); ++ virq = irq_find_mapping(xgene_msi->domain, hw_irq); ++ WARN_ON(!virq); ++ if (virq != 0) ++ generic_handle_irq(virq); ++ msir_val &= ~(1 << intr_index); ++ } ++ grp_select &= ~(1 << msir_index); ++ ++ if (!grp_select) { ++ /* ++ * We handled all interrupts happened in this group, ++ * resample this group MSI_INTx register in case ++ * something else has been made pending in the meantime ++ */ ++ grp_select = xgene_msi_int_read(xgene_msi, msi_grp); ++ } ++ } ++ ++ chained_irq_exit(chip, desc); ++} ++ ++static int xgene_msi_remove(struct platform_device *pdev) ++{ ++ int virq, i; ++ struct xgene_msi *msi = platform_get_drvdata(pdev); ++ ++ for (i = 0; i < NR_HW_IRQS; i++) { ++ virq = msi->msi_groups[i].gic_irq; ++ if (virq != 0) { ++ irq_set_chained_handler(virq, NULL); ++ irq_set_handler_data(virq, NULL); ++ } ++ } ++ kfree(msi->msi_groups); ++ ++ kfree(msi->bitmap); ++ msi->bitmap = NULL; ++ ++ xgene_free_domains(msi); ++ ++ return 0; ++} ++ ++static int xgene_msi_hwirq_alloc(unsigned int cpu) ++{ ++ struct xgene_msi *msi = &xgene_msi_ctrl; ++ struct xgene_msi_group *msi_group; ++ cpumask_var_t mask; ++ int i; ++ int err; ++ ++ for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { ++ msi_group = &msi->msi_groups[i]; ++ if (!msi_group->gic_irq) ++ continue; ++ ++ irq_set_chained_handler(msi_group->gic_irq, ++ xgene_msi_isr); ++ err = irq_set_handler_data(msi_group->gic_irq, msi_group); ++ if (err) { ++ pr_err("failed to register GIC IRQ handler\n"); ++ return -EINVAL; ++ } ++ /* ++ * Statically allocate MSI GIC IRQs to each CPU core. ++ * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated ++ * to each core. ++ */ ++ if (alloc_cpumask_var(&mask, GFP_KERNEL)) { ++ cpumask_clear(mask); ++ cpumask_set_cpu(cpu, mask); ++ err = irq_set_affinity(msi_group->gic_irq, mask); ++ if (err) ++ pr_err("failed to set affinity for GIC IRQ"); ++ free_cpumask_var(mask); ++ } else { ++ pr_err("failed to alloc CPU mask for affinity\n"); ++ err = -EINVAL; ++ } ++ ++ if (err) { ++ irq_set_chained_handler(msi_group->gic_irq, NULL); ++ irq_set_handler_data(msi_group->gic_irq, NULL); ++ return err; ++ } ++ } ++ ++ return 0; ++} ++ ++static void xgene_msi_hwirq_free(unsigned int cpu) ++{ ++ struct xgene_msi *msi = &xgene_msi_ctrl; ++ struct xgene_msi_group *msi_group; ++ int i; ++ ++ for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { ++ msi_group = &msi->msi_groups[i]; ++ if (!msi_group->gic_irq) ++ continue; ++ ++ irq_set_chained_handler(msi_group->gic_irq, NULL); ++ irq_set_handler_data(msi_group->gic_irq, NULL); ++ } ++} ++ ++static int xgene_msi_cpu_callback(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ unsigned cpu = (unsigned long)hcpu; ++ ++ switch (action) { ++ case CPU_ONLINE: ++ case CPU_ONLINE_FROZEN: ++ xgene_msi_hwirq_alloc(cpu); ++ break; ++ case CPU_DEAD: ++ case CPU_DEAD_FROZEN: ++ xgene_msi_hwirq_free(cpu); ++ break; ++ default: ++ break; ++ } ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block xgene_msi_cpu_notifier = { ++ .notifier_call = xgene_msi_cpu_callback, ++}; ++ ++static const struct of_device_id xgene_msi_match_table[] = { ++ {.compatible = "apm,xgene1-msi"}, ++ {}, ++}; ++ ++static int xgene_msi_probe(struct platform_device *pdev) ++{ ++ struct resource *res; ++ int rc, irq_index; ++ struct xgene_msi *xgene_msi; ++ unsigned int cpu; ++ int virt_msir; ++ u32 msi_val, msi_idx; ++ ++ xgene_msi = &xgene_msi_ctrl; ++ ++ platform_set_drvdata(pdev, xgene_msi); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(xgene_msi->msi_regs)) { ++ dev_err(&pdev->dev, "no reg space\n"); ++ rc = -EINVAL; ++ goto error; ++ } ++ xgene_msi->msi_addr = res->start; ++ ++ xgene_msi->num_cpus = num_possible_cpus(); ++ ++ rc = xgene_msi_init_allocator(xgene_msi); ++ if (rc) { ++ dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); ++ goto error; ++ } ++ ++ rc = xgene_allocate_domains(xgene_msi); ++ if (rc) { ++ dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); ++ goto error; ++ } ++ ++ for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { ++ virt_msir = platform_get_irq(pdev, irq_index); ++ if (virt_msir < 0) { ++ dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", ++ irq_index); ++ rc = -EINVAL; ++ goto error; ++ } ++ xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; ++ xgene_msi->msi_groups[irq_index].msi_grp = irq_index; ++ xgene_msi->msi_groups[irq_index].msi = xgene_msi; ++ } ++ ++ /* ++ * MSInIRx registers are read-to-clear; before registering ++ * interrupt handlers, read all of them to clear spurious ++ * interrupts that may occur before the driver is probed. ++ */ ++ for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { ++ for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) ++ msi_val = xgene_msi_ir_read(xgene_msi, irq_index, ++ msi_idx); ++ /* Read MSIINTn to confirm */ ++ msi_val = xgene_msi_int_read(xgene_msi, irq_index); ++ if (msi_val) { ++ dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); ++ rc = -EINVAL; ++ goto error; ++ } ++ } ++ ++ cpu_notifier_register_begin(); ++ ++ for_each_online_cpu(cpu) ++ if (xgene_msi_hwirq_alloc(cpu)) { ++ dev_err(&pdev->dev, "failed to register MSI handlers\n"); ++ cpu_notifier_register_done(); ++ goto error; ++ } ++ ++ rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier); ++ if (rc) { ++ dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); ++ cpu_notifier_register_done(); ++ goto error; ++ } ++ ++ cpu_notifier_register_done(); ++ ++ xgene_msi->mchip.of_node = pdev->dev.of_node; ++ rc = of_pci_msi_chip_add(&xgene_msi->mchip); ++ if (rc) { ++ dev_err(&pdev->dev, "failed to add MSI controller chip\n"); ++ goto error_notifier; ++ } ++ ++ dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); ++ ++ return 0; ++ ++error_notifier: ++ unregister_hotcpu_notifier(&xgene_msi_cpu_notifier); ++error: ++ xgene_msi_remove(pdev); ++ return rc; ++} ++ ++static struct platform_driver xgene_msi_driver = { ++ .driver = { ++ .name = "xgene-msi", ++ .owner = THIS_MODULE, ++ .of_match_table = xgene_msi_match_table, ++ }, ++ .probe = xgene_msi_probe, ++ .remove = xgene_msi_remove, ++}; ++ ++static int __init xgene_pcie_msi_init(void) ++{ ++ return platform_driver_register(&xgene_msi_driver); ++} ++subsys_initcall(xgene_pcie_msi_init); +diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c +index 2988fe1..0dac1fb 100644 +--- a/drivers/pci/host/pci-xgene.c ++++ b/drivers/pci/host/pci-xgene.c +@@ -401,11 +401,11 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, + struct list_head *res, + resource_size_t io_base) + { +- struct pci_host_bridge_window *window; ++ struct resource_entry *window; + struct device *dev = port->dev; + int ret; + +- list_for_each_entry(window, res, list) { ++ resource_list_for_each_entry(window, res) { + struct resource *res = window->res; + u64 restype = resource_type(res); + +@@ -600,6 +600,23 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, + return 0; + } + ++static int xgene_pcie_msi_enable(struct pci_bus *bus) ++{ ++ struct device_node *msi_node; ++ ++ msi_node = of_parse_phandle(bus->dev.of_node, ++ "msi-parent", 0); ++ if (!msi_node) ++ return -ENODEV; ++ ++ bus->msi = of_pci_find_msi_chip_by_node(msi_node); ++ if (!bus->msi) ++ return -ENODEV; ++ ++ bus->msi->dev = &bus->dev; ++ return 0; ++} ++ + static int xgene_pcie_probe_bridge(struct platform_device *pdev) + { + struct device_node *dn = pdev->dev.of_node; +@@ -636,6 +653,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) + if (!bus) + return -ENOMEM; + ++ if (IS_ENABLED(CONFIG_PCI_MSI)) ++ if (xgene_pcie_msi_enable(bus)) ++ dev_info(port->dev, "failed to enable MSI\n"); ++ + pci_scan_child_bus(bus); + pci_assign_unassigned_bus_resources(bus); + pci_bus_add_devices(bus); +diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c +index f69b0d0..8a9241b 100644 +--- a/drivers/pci/host/pcie-designware.c ++++ b/drivers/pci/host/pcie-designware.c +@@ -15,7 +15,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -31,6 +30,7 @@ + #define PORT_LINK_MODE_1_LANES (0x1 << 16) + #define PORT_LINK_MODE_2_LANES (0x3 << 16) + #define PORT_LINK_MODE_4_LANES (0x7 << 16) ++#define PORT_LINK_MODE_8_LANES (0xf << 16) + + #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C + #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) +@@ -38,12 +38,7 @@ + #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) + #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) + #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) +- +-#define PCIE_MSI_ADDR_LO 0x820 +-#define PCIE_MSI_ADDR_HI 0x824 +-#define PCIE_MSI_INTR0_ENABLE 0x828 +-#define PCIE_MSI_INTR0_MASK 0x82C +-#define PCIE_MSI_INTR0_STATUS 0x830 ++#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) + + #define PCIE_ATU_VIEWPORT 0x900 + #define PCIE_ATU_REGION_INBOUND (0x1 << 31) +@@ -67,39 +62,40 @@ + #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) + #define PCIE_ATU_UPPER_TARGET 0x91C + +-static struct hw_pci dw_pci; +- +-static unsigned long global_io_offset; +- +-static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) +-{ +- BUG_ON(!sys->private_data); +- +- return sys->private_data; +-} ++static struct pci_ops dw_pcie_ops; + +-int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val) ++int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val) + { +- *val = readl(addr); ++ if ((uintptr_t)addr & (size - 1)) { ++ *val = 0; ++ return PCIBIOS_BAD_REGISTER_NUMBER; ++ } + +- if (size == 1) +- *val = (*val >> (8 * (where & 3))) & 0xff; ++ if (size == 4) ++ *val = readl(addr); + else if (size == 2) +- *val = (*val >> (8 * (where & 3))) & 0xffff; +- else if (size != 4) ++ *val = readw(addr); ++ else if (size == 1) ++ *val = readb(addr); ++ else { ++ *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; ++ } + + return PCIBIOS_SUCCESSFUL; + } + +-int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val) ++int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val) + { ++ if ((uintptr_t)addr & (size - 1)) ++ return PCIBIOS_BAD_REGISTER_NUMBER; ++ + if (size == 4) + writel(val, addr); + else if (size == 2) +- writew(val, addr + (where & 2)); ++ writew(val, addr); + else if (size == 1) +- writeb(val, addr + (where & 3)); ++ writeb(val, addr); + else + return PCIBIOS_BAD_REGISTER_NUMBER; + +@@ -130,8 +126,7 @@ static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + if (pp->ops->rd_own_conf) + ret = pp->ops->rd_own_conf(pp, where, size, val); + else +- ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, +- size, val); ++ ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val); + + return ret; + } +@@ -144,182 +139,26 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, + if (pp->ops->wr_own_conf) + ret = pp->ops->wr_own_conf(pp, where, size, val); + else +- ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where, +- size, val); ++ ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val); + + return ret; + } + +-static struct irq_chip dw_msi_irq_chip = { +- .name = "PCI-MSI", +- .irq_enable = unmask_msi_irq, +- .irq_disable = mask_msi_irq, +- .irq_mask = mask_msi_irq, +- .irq_unmask = unmask_msi_irq, +-}; +- +-/* MSI int handler */ +-irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) ++static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, ++ int type, u64 cpu_addr, u64 pci_addr, u32 size) + { +- unsigned long val; +- int i, pos, irq; +- irqreturn_t ret = IRQ_NONE; +- +- for (i = 0; i < MAX_MSI_CTRLS; i++) { +- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, +- (u32 *)&val); +- if (val) { +- ret = IRQ_HANDLED; +- pos = 0; +- while ((pos = find_next_bit(&val, 32, pos)) != 32) { +- irq = irq_find_mapping(pp->irq_domain, +- i * 32 + pos); +- dw_pcie_wr_own_conf(pp, +- PCIE_MSI_INTR0_STATUS + i * 12, +- 4, 1 << pos); +- generic_handle_irq(irq); +- pos++; +- } +- } +- } +- +- return ret; +-} +- +-void dw_pcie_msi_init(struct pcie_port *pp) +-{ +- pp->msi_data = __get_free_pages(GFP_KERNEL, 0); +- +- /* program the msi_data */ +- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, +- virt_to_phys((void *)pp->msi_data)); +- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0); +-} +- +-static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) +-{ +- unsigned int res, bit, val; +- +- res = (irq / 32) * 12; +- bit = irq % 32; +- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); +- val &= ~(1 << bit); +- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); +-} +- +-static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base, +- unsigned int nvec, unsigned int pos) +-{ +- unsigned int i; +- +- for (i = 0; i < nvec; i++) { +- irq_set_msi_desc_off(irq_base, i, NULL); +- /* Disable corresponding interrupt on MSI controller */ +- if (pp->ops->msi_clear_irq) +- pp->ops->msi_clear_irq(pp, pos + i); +- else +- dw_pcie_msi_clear_irq(pp, pos + i); +- } +- +- bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec)); +-} +- +-static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) +-{ +- unsigned int res, bit, val; +- +- res = (irq / 32) * 12; +- bit = irq % 32; +- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); +- val |= 1 << bit; +- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); +-} +- +-static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) +-{ +- int irq, pos0, i; +- struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata); +- +- pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS, +- order_base_2(no_irqs)); +- if (pos0 < 0) +- goto no_valid_irq; +- +- irq = irq_find_mapping(pp->irq_domain, pos0); +- if (!irq) +- goto no_valid_irq; +- +- /* +- * irq_create_mapping (called from dw_pcie_host_init) pre-allocates +- * descs so there is no need to allocate descs here. We can therefore +- * assume that if irq_find_mapping above returns non-zero, then the +- * descs are also successfully allocated. +- */ +- +- for (i = 0; i < no_irqs; i++) { +- if (irq_set_msi_desc_off(irq, i, desc) != 0) { +- clear_irq_range(pp, irq, i, pos0); +- goto no_valid_irq; +- } +- /*Enable corresponding interrupt in MSI interrupt controller */ +- if (pp->ops->msi_set_irq) +- pp->ops->msi_set_irq(pp, pos0 + i); +- else +- dw_pcie_msi_set_irq(pp, pos0 + i); +- } +- +- *pos = pos0; +- return irq; +- +-no_valid_irq: +- *pos = pos0; +- return -ENOSPC; +-} +- +-static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, +- struct msi_desc *desc) +-{ +- int irq, pos; +- struct msi_msg msg; +- struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); +- +- if (desc->msi_attrib.is_msix) +- return -EINVAL; +- +- irq = assign_irq(1, desc, &pos); +- if (irq < 0) +- return irq; +- +- if (pp->ops->get_msi_addr) +- msg.address_lo = pp->ops->get_msi_addr(pp); +- else +- msg.address_lo = virt_to_phys((void *)pp->msi_data); +- msg.address_hi = 0x0; +- +- if (pp->ops->get_msi_data) +- msg.data = pp->ops->get_msi_data(pp, pos); +- else +- msg.data = pos; +- +- write_msi_msg(irq, &msg); +- +- return 0; +-} +- +-static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) +-{ +- struct irq_data *data = irq_get_irq_data(irq); +- struct msi_desc *msi = irq_data_get_msi(data); +- struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata); +- +- clear_irq_range(pp, irq, 1, data->hwirq); ++ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, ++ PCIE_ATU_VIEWPORT); ++ dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE); ++ dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE); ++ dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1), ++ PCIE_ATU_LIMIT); ++ dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET); ++ dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); ++ dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); ++ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); + } + +-static struct msi_chip dw_pcie_msi_chip = { +- .setup_irq = dw_msi_setup_irq, +- .teardown_irq = dw_msi_teardown_irq, +-}; +- + int dw_pcie_link_up(struct pcie_port *pp) + { + if (pp->ops->link_up) +@@ -328,36 +167,42 @@ int dw_pcie_link_up(struct pcie_port *pp) + return 0; + } + +-static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, +- irq_hw_number_t hwirq) ++static int dw_pcie_msi_ctrl_init(struct pcie_port *pp) + { +- irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq); +- irq_set_chip_data(irq, domain->host_data); +- set_irq_flags(irq, IRQF_VALID); ++ struct device_node *msi_node; ++ ++ if (!IS_ENABLED(CONFIG_PCI_MSI)) { ++ pp->msi = NULL; ++ return 0; ++ } ++ ++ if (pp->msi) ++ return 0; ++ ++ msi_node = of_parse_phandle(pp->dev->of_node, "msi-parent", 0); ++ if (msi_node) { ++ pp->msi = of_pci_find_msi_chip_by_node(msi_node); ++ if (!pp->msi) { ++ dev_err(pp->dev, "Cannot find msi chip of %s\n", ++ msi_node->full_name); ++ return -ENODEV; ++ } else ++ return 0; ++ } + + return 0; + } + +-static const struct irq_domain_ops msi_domain_ops = { +- .map = dw_pcie_msi_map, +-}; +- + int dw_pcie_host_init(struct pcie_port *pp) + { + struct device_node *np = pp->dev->of_node; + struct platform_device *pdev = to_platform_device(pp->dev); +- struct of_pci_range range; +- struct of_pci_range_parser parser; ++ struct pci_bus *bus, *child; + struct resource *cfg_res; +- u32 val, na, ns; +- const __be32 *addrp; +- int i, index, ret; +- +- /* Find the address cell size and the number of cells in order to get +- * the untranslated address. +- */ +- of_property_read_u32(np, "#address-cells", &na); +- ns = of_n_size_cells(np); ++ u32 val; ++ int ret; ++ LIST_HEAD(res); ++ struct resource_entry *win; + + cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); + if (cfg_res) { +@@ -365,87 +210,61 @@ int dw_pcie_host_init(struct pcie_port *pp) + pp->cfg1_size = resource_size(cfg_res)/2; + pp->cfg0_base = cfg_res->start; + pp->cfg1_base = cfg_res->start + pp->cfg0_size; +- +- /* Find the untranslated configuration space address */ +- index = of_property_match_string(np, "reg-names", "config"); +- addrp = of_get_address(np, index, NULL, NULL); +- pp->cfg0_mod_base = of_read_number(addrp, ns); +- pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size; +- } else { ++ } else if (!pp->va_cfg0_base) { + dev_err(pp->dev, "missing *config* reg space\n"); + } + +- if (of_pci_range_parser_init(&parser, np)) { +- dev_err(pp->dev, "missing ranges property\n"); +- return -EINVAL; +- } ++ ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base); ++ if (ret) ++ return ret; + + /* Get the I/O and memory ranges from DT */ +- for_each_of_pci_range(&parser, &range) { +- unsigned long restype = range.flags & IORESOURCE_TYPE_BITS; +- if (restype == IORESOURCE_IO) { +- of_pci_range_to_resource(&range, np, &pp->io); +- pp->io.name = "I/O"; +- pp->io.start = max_t(resource_size_t, +- PCIBIOS_MIN_IO, +- range.pci_addr + global_io_offset); +- pp->io.end = min_t(resource_size_t, +- IO_SPACE_LIMIT, +- range.pci_addr + range.size +- + global_io_offset - 1); +- pp->io_size = resource_size(&pp->io); +- pp->io_bus_addr = range.pci_addr; +- pp->io_base = range.cpu_addr; +- +- /* Find the untranslated IO space address */ +- pp->io_mod_base = of_read_number(parser.range - +- parser.np + na, ns); +- } +- if (restype == IORESOURCE_MEM) { +- of_pci_range_to_resource(&range, np, &pp->mem); +- pp->mem.name = "MEM"; +- pp->mem_size = resource_size(&pp->mem); +- pp->mem_bus_addr = range.pci_addr; +- +- /* Find the untranslated MEM space address */ +- pp->mem_mod_base = of_read_number(parser.range - +- parser.np + na, ns); +- } +- if (restype == 0) { +- of_pci_range_to_resource(&range, np, &pp->cfg); +- pp->cfg0_size = resource_size(&pp->cfg)/2; +- pp->cfg1_size = resource_size(&pp->cfg)/2; +- pp->cfg0_base = pp->cfg.start; +- pp->cfg1_base = pp->cfg.start + pp->cfg0_size; +- +- /* Find the untranslated configuration space address */ +- pp->cfg0_mod_base = of_read_number(parser.range - +- parser.np + na, ns); +- pp->cfg1_mod_base = pp->cfg0_mod_base + +- pp->cfg0_size; ++ resource_list_for_each_entry(win, &res) { ++ switch (resource_type(win->res)) { ++ case IORESOURCE_IO: ++ pp->io = win->res; ++ pp->io->name = "I/O"; ++ pp->io_size = resource_size(pp->io); ++ pp->io_bus_addr = pp->io->start - win->offset; ++ ret = pci_remap_iospace(pp->io, pp->io_base); ++ if (ret) { ++ dev_warn(pp->dev, "error %d: failed to map resource %pR\n", ++ ret, pp->io); ++ continue; ++ } ++ pp->io_base = pp->io->start; ++ break; ++ case IORESOURCE_MEM: ++ pp->mem = win->res; ++ pp->mem->name = "MEM"; ++ pp->mem_size = resource_size(pp->mem); ++ pp->mem_bus_addr = pp->mem->start - win->offset; ++ break; ++ case 0: ++ pp->cfg = win->res; ++ pp->cfg0_size = resource_size(pp->cfg)/2; ++ pp->cfg1_size = resource_size(pp->cfg)/2; ++ pp->cfg0_base = pp->cfg->start; ++ pp->cfg1_base = pp->cfg->start + pp->cfg0_size; ++ break; ++ case IORESOURCE_BUS: ++ pp->busn = win->res; ++ break; ++ default: ++ continue; + } + } + +- ret = of_pci_parse_bus_range(np, &pp->busn); +- if (ret < 0) { +- pp->busn.name = np->name; +- pp->busn.start = 0; +- pp->busn.end = 0xff; +- pp->busn.flags = IORESOURCE_BUS; +- dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n", +- ret, &pp->busn); +- } +- + if (!pp->dbi_base) { +- pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start, +- resource_size(&pp->cfg)); ++ pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start, ++ resource_size(pp->cfg)); + if (!pp->dbi_base) { + dev_err(pp->dev, "error with ioremap\n"); + return -ENOMEM; + } + } + +- pp->mem_base = pp->mem.start; ++ pp->mem_base = pp->mem->start; + + if (!pp->va_cfg0_base) { + pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, +@@ -465,33 +284,18 @@ int dw_pcie_host_init(struct pcie_port *pp) + } + } + +- if (of_property_read_u32(np, "num-lanes", &pp->lanes)) { +- dev_err(pp->dev, "Failed to parse the number of lanes\n"); +- return -EINVAL; +- } +- +- if (IS_ENABLED(CONFIG_PCI_MSI)) { +- if (!pp->ops->msi_host_init) { +- pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, +- MAX_MSI_IRQS, &msi_domain_ops, +- &dw_pcie_msi_chip); +- if (!pp->irq_domain) { +- dev_err(pp->dev, "irq domain init failed\n"); +- return -ENXIO; +- } +- +- for (i = 0; i < MAX_MSI_IRQS; i++) +- irq_create_mapping(pp->irq_domain, i); +- } else { +- ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip); +- if (ret < 0) +- return ret; +- } +- } ++ ret = of_property_read_u32(np, "num-lanes", &pp->lanes); ++ if (ret) ++ pp->lanes = 0; + + if (pp->ops->host_init) + pp->ops->host_init(pp); + ++ if (!pp->ops->rd_other_conf) ++ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, ++ PCIE_ATU_TYPE_MEM, pp->mem_base, ++ pp->mem_bus_addr, pp->mem_size); ++ + dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); + + /* program correct class for RC */ +@@ -501,126 +305,113 @@ int dw_pcie_host_init(struct pcie_port *pp) + val |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); + +- dw_pci.nr_controllers = 1; +- dw_pci.private_data = (void **)&pp; ++ pp->root_bus_nr = pp->busn->start; ++#if 0 ++ bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops, ++ pp, &res); ++ if (!bus) ++ return -ENOMEM; ++#else ++ bus = pci_create_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops, ++ pp, &res); ++ if (!bus) ++ return -ENODEV; ++ ++ ret = dw_pcie_msi_ctrl_init(pp); ++ if (ret) ++ return ret; + +- pci_common_init_dev(pp->dev, &dw_pci); +-#ifdef CONFIG_PCI_DOMAINS +- dw_pci.domain++; ++ bus->msi = pp->msi; ++ ++ pci_scan_child_bus(bus); + #endif + +- return 0; +-} ++ if (pp->ops->scan_bus) ++ pp->ops->scan_bus(pp); + +-static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) +-{ +- /* Program viewport 0 : OUTBOUND : CFG0 */ +- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, +- PCIE_ATU_VIEWPORT); +- dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE); +- dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE); +- dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1, +- PCIE_ATU_LIMIT); +- dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); +- dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); +- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1); +- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); +-} ++#ifdef CONFIG_ARM ++ /* support old dtbs that incorrectly describe IRQs */ ++ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); ++#endif + +-static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) +-{ +- /* Program viewport 1 : OUTBOUND : CFG1 */ +- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, +- PCIE_ATU_VIEWPORT); +- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); +- dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE); +- dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE); +- dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1, +- PCIE_ATU_LIMIT); +- dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); +- dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); +- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); +-} ++ if (!pci_has_flag(PCI_PROBE_ONLY)) { ++ pci_bus_size_bridges(bus); ++ pci_bus_assign_resources(bus); + +-static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) +-{ +- /* Program viewport 0 : OUTBOUND : MEM */ +- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, +- PCIE_ATU_VIEWPORT); +- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); +- dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE); +- dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE); +- dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1, +- PCIE_ATU_LIMIT); +- dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET); +- dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr), +- PCIE_ATU_UPPER_TARGET); +- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); +-} ++ list_for_each_entry(child, &bus->children, node) ++ pcie_bus_configure_settings(child); ++ } + +-static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) +-{ +- /* Program viewport 1 : OUTBOUND : IO */ +- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, +- PCIE_ATU_VIEWPORT); +- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); +- dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE); +- dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE); +- dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1, +- PCIE_ATU_LIMIT); +- dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET); +- dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr), +- PCIE_ATU_UPPER_TARGET); +- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); ++ pci_bus_add_devices(bus); ++ ++ return 0; + } + + static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + u32 devfn, int where, int size, u32 *val) + { +- int ret = PCIBIOS_SUCCESSFUL; +- u32 address, busdev; ++ int ret, type; ++ u32 busdev, cfg_size; ++ u64 cpu_addr; ++ void __iomem *va_cfg_base; + + busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | + PCIE_ATU_FUNC(PCI_FUNC(devfn)); +- address = where & ~0x3; + + if (bus->parent->number == pp->root_bus_nr) { +- dw_pcie_prog_viewport_cfg0(pp, busdev); +- ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size, +- val); +- dw_pcie_prog_viewport_mem_outbound(pp); ++ type = PCIE_ATU_TYPE_CFG0; ++ cpu_addr = pp->cfg0_base; ++ cfg_size = pp->cfg0_size; ++ va_cfg_base = pp->va_cfg0_base; + } else { +- dw_pcie_prog_viewport_cfg1(pp, busdev); +- ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size, +- val); +- dw_pcie_prog_viewport_io_outbound(pp); ++ type = PCIE_ATU_TYPE_CFG1; ++ cpu_addr = pp->cfg1_base; ++ cfg_size = pp->cfg1_size; ++ va_cfg_base = pp->va_cfg1_base; + } + ++ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, ++ type, cpu_addr, ++ busdev, cfg_size); ++ ret = dw_pcie_cfg_read(va_cfg_base + where, size, val); ++ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, ++ PCIE_ATU_TYPE_IO, pp->io_base, ++ pp->io_bus_addr, pp->io_size); ++ + return ret; + } + + static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + u32 devfn, int where, int size, u32 val) + { +- int ret = PCIBIOS_SUCCESSFUL; +- u32 address, busdev; ++ int ret, type; ++ u32 busdev, cfg_size; ++ u64 cpu_addr; ++ void __iomem *va_cfg_base; + + busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | + PCIE_ATU_FUNC(PCI_FUNC(devfn)); +- address = where & ~0x3; + + if (bus->parent->number == pp->root_bus_nr) { +- dw_pcie_prog_viewport_cfg0(pp, busdev); +- ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size, +- val); +- dw_pcie_prog_viewport_mem_outbound(pp); ++ type = PCIE_ATU_TYPE_CFG0; ++ cpu_addr = pp->cfg0_base; ++ cfg_size = pp->cfg0_size; ++ va_cfg_base = pp->va_cfg0_base; + } else { +- dw_pcie_prog_viewport_cfg1(pp, busdev); +- ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size, +- val); +- dw_pcie_prog_viewport_io_outbound(pp); ++ type = PCIE_ATU_TYPE_CFG1; ++ cpu_addr = pp->cfg1_base; ++ cfg_size = pp->cfg1_size; ++ va_cfg_base = pp->va_cfg1_base; + } + ++ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, ++ type, cpu_addr, ++ busdev, cfg_size); ++ ret = dw_pcie_cfg_write(va_cfg_base + where, size, val); ++ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, ++ PCIE_ATU_TYPE_IO, pp->io_base, ++ pp->io_bus_addr, pp->io_size); ++ + return ret; + } + +@@ -650,7 +441,7 @@ static int dw_pcie_valid_config(struct pcie_port *pp, + static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, + int size, u32 *val) + { +- struct pcie_port *pp = sys_to_pcie(bus->sysdata); ++ struct pcie_port *pp = bus->sysdata; + int ret; + + if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { +@@ -674,7 +465,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, + static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) + { +- struct pcie_port *pp = sys_to_pcie(bus->sysdata); ++ struct pcie_port *pp = bus->sysdata; + int ret; + + if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) +@@ -698,75 +489,6 @@ static struct pci_ops dw_pcie_ops = { + .write = dw_pcie_wr_conf, + }; + +-static int dw_pcie_setup(int nr, struct pci_sys_data *sys) +-{ +- struct pcie_port *pp; +- +- pp = sys_to_pcie(sys); +- +- if (global_io_offset < SZ_1M && pp->io_size > 0) { +- sys->io_offset = global_io_offset - pp->io_bus_addr; +- pci_ioremap_io(global_io_offset, pp->io_base); +- global_io_offset += SZ_64K; +- pci_add_resource_offset(&sys->resources, &pp->io, +- sys->io_offset); +- } +- +- sys->mem_offset = pp->mem.start - pp->mem_bus_addr; +- pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); +- pci_add_resource(&sys->resources, &pp->busn); +- +- return 1; +-} +- +-static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys) +-{ +- struct pci_bus *bus; +- struct pcie_port *pp = sys_to_pcie(sys); +- +- pp->root_bus_nr = sys->busnr; +- bus = pci_create_root_bus(pp->dev, sys->busnr, +- &dw_pcie_ops, sys, &sys->resources); +- if (!bus) +- return NULL; +- +- pci_scan_child_bus(bus); +- +- if (bus && pp->ops->scan_bus) +- pp->ops->scan_bus(pp); +- +- return bus; +-} +- +-static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +-{ +- struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata); +- int irq; +- +- irq = of_irq_parse_and_map_pci(dev, slot, pin); +- if (!irq) +- irq = pp->irq; +- +- return irq; +-} +- +-static void dw_pcie_add_bus(struct pci_bus *bus) +-{ +- if (IS_ENABLED(CONFIG_PCI_MSI)) { +- struct pcie_port *pp = sys_to_pcie(bus->sysdata); +- +- dw_pcie_msi_chip.dev = pp->dev; +- bus->msi = &dw_pcie_msi_chip; +- } +-} +- +-static struct hw_pci dw_pci = { +- .setup = dw_pcie_setup, +- .scan = dw_pcie_scan_bus, +- .map_irq = dw_pcie_map_irq, +- .add_bus = dw_pcie_add_bus, +-}; +- + void dw_pcie_setup_rc(struct pcie_port *pp) + { + u32 val; +@@ -786,6 +508,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp) + case 4: + val |= PORT_LINK_MODE_4_LANES; + break; ++ case 8: ++ val |= PORT_LINK_MODE_8_LANES; ++ break; ++ default: ++ dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes); ++ return; + } + dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL); + +@@ -802,6 +530,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp) + case 4: + val |= PORT_LOGIC_LINK_WIDTH_4_LANES; + break; ++ case 8: ++ val |= PORT_LOGIC_LINK_WIDTH_8_LANES; ++ break; + } + dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL); + +diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h +index c625675..2f01284 100644 +--- a/drivers/pci/host/pcie-designware.h ++++ b/drivers/pci/host/pcie-designware.h +@@ -27,28 +27,25 @@ struct pcie_port { + u8 root_bus_nr; + void __iomem *dbi_base; + u64 cfg0_base; +- u64 cfg0_mod_base; + void __iomem *va_cfg0_base; + u32 cfg0_size; + u64 cfg1_base; +- u64 cfg1_mod_base; + void __iomem *va_cfg1_base; + u32 cfg1_size; +- u64 io_base; +- u64 io_mod_base; ++ resource_size_t io_base; + phys_addr_t io_bus_addr; + u32 io_size; + u64 mem_base; +- u64 mem_mod_base; + phys_addr_t mem_bus_addr; + u32 mem_size; +- struct resource cfg; +- struct resource io; +- struct resource mem; +- struct resource busn; ++ struct resource *cfg; ++ struct resource *io; ++ struct resource *mem; ++ struct resource *busn; + int irq; + u32 lanes; + struct pcie_host_ops *ops; ++ struct msi_controller *msi; + int msi_irq; + struct irq_domain *irq_domain; + unsigned long msi_data; +@@ -70,14 +67,14 @@ struct pcie_host_ops { + void (*host_init)(struct pcie_port *pp); + void (*msi_set_irq)(struct pcie_port *pp, int irq); + void (*msi_clear_irq)(struct pcie_port *pp, int irq); +- u32 (*get_msi_addr)(struct pcie_port *pp); ++ phys_addr_t (*get_msi_addr)(struct pcie_port *pp); + u32 (*get_msi_data)(struct pcie_port *pp, int pos); + void (*scan_bus)(struct pcie_port *pp); +- int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip); ++ int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip); + }; + +-int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); +-int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val); ++int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val); ++int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val); + irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); + void dw_pcie_msi_init(struct pcie_port *pp); + int dw_pcie_link_up(struct pcie_port *pp); +diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c +index 61158e0..f8ec96d 100644 +--- a/drivers/pci/host/pcie-rcar.c ++++ b/drivers/pci/host/pcie-rcar.c +@@ -111,14 +111,14 @@ + struct rcar_msi { + DECLARE_BITMAP(used, INT_PCI_MSI_NR); + struct irq_domain *domain; +- struct msi_chip chip; ++ struct msi_controller chip; + unsigned long pages; + struct mutex lock; + int irq1; + int irq2; + }; + +-static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip) ++static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip) + { + return container_of(chip, struct rcar_msi, chip); + } +@@ -404,9 +404,6 @@ static void rcar_pcie_enable(struct rcar_pcie *pcie) + rcar_pci.private_data = (void **)&pcie; + + pci_common_init_dev(&pdev->dev, &rcar_pci); +-#ifdef CONFIG_PCI_DOMAINS +- rcar_pci.domain++; +-#endif + } + + static int phy_wait_for_ack(struct rcar_pcie *pcie) +@@ -622,7 +619,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) + return IRQ_HANDLED; + } + +-static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, ++static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, + struct msi_desc *desc) + { + struct rcar_msi *msi = to_rcar_msi(chip); +@@ -647,12 +644,12 @@ static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, + msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); + msg.data = hwirq; + +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + + return 0; + } + +-static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) ++static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) + { + struct rcar_msi *msi = to_rcar_msi(chip); + struct irq_data *d = irq_get_irq_data(irq); +@@ -662,10 +659,10 @@ static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) + + static struct irq_chip rcar_msi_irq_chip = { + .name = "R-Car PCIe MSI", +- .irq_enable = unmask_msi_irq, +- .irq_disable = mask_msi_irq, +- .irq_mask = mask_msi_irq, +- .irq_unmask = unmask_msi_irq, ++ .irq_enable = pci_msi_unmask_irq, ++ .irq_disable = pci_msi_mask_irq, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, + }; + + static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, +@@ -673,7 +670,6 @@ static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, + { + irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); +- set_irq_flags(irq, IRQF_VALID); + + return 0; + } +diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c +index ccc496b..eef849c 100644 +--- a/drivers/pci/host/pcie-xilinx.c ++++ b/drivers/pci/host/pcie-xilinx.c +@@ -297,18 +297,16 @@ static struct pci_ops xilinx_pcie_ops = { + */ + static void xilinx_pcie_destroy_msi(unsigned int irq) + { +- struct irq_desc *desc; + struct msi_desc *msi; + struct xilinx_pcie_port *port; + +- desc = irq_to_desc(irq); +- msi = irq_desc_get_msi_desc(desc); +- port = sys_to_pcie(msi->dev->bus->sysdata); +- +- if (!test_bit(irq, msi_irq_in_use)) ++ if (!test_bit(irq, msi_irq_in_use)) { ++ msi = irq_get_msi_desc(irq); ++ port = sys_to_pcie(msi_desc_to_pci_sys_data(msi)); + dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); +- else ++ } else { + clear_bit(irq, msi_irq_in_use); ++ } + } + + /** +@@ -335,7 +333,8 @@ static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port) + * @chip: MSI Chip descriptor + * @irq: MSI IRQ to destroy + */ +-static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) ++static void xilinx_msi_teardown_irq(struct msi_controller *chip, ++ unsigned int irq) + { + xilinx_pcie_destroy_msi(irq); + } +@@ -348,7 +347,7 @@ static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) + * + * Return: '0' on success and error value on failure + */ +-static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip, ++static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip, + struct pci_dev *pdev, + struct msi_desc *desc) + { +@@ -374,13 +373,13 @@ static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip, + msg.address_lo = msg_addr; + msg.data = irq; + +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + + return 0; + } + + /* MSI Chip Descriptor */ +-static struct msi_chip xilinx_pcie_msi_chip = { ++static struct msi_controller xilinx_pcie_msi_chip = { + .setup_irq = xilinx_pcie_msi_setup_irq, + .teardown_irq = xilinx_msi_teardown_irq, + }; +@@ -388,10 +387,10 @@ static struct msi_chip xilinx_pcie_msi_chip = { + /* HW Interrupt Chip Descriptor */ + static struct irq_chip xilinx_msi_irq_chip = { + .name = "Xilinx PCIe MSI", +- .irq_enable = unmask_msi_irq, +- .irq_disable = mask_msi_irq, +- .irq_mask = mask_msi_irq, +- .irq_unmask = unmask_msi_irq, ++ .irq_enable = pci_msi_unmask_irq, ++ .irq_disable = pci_msi_mask_irq, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, + }; + + /** +@@ -407,7 +406,6 @@ static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq, + { + irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); +- set_irq_flags(irq, IRQF_VALID); + + return 0; + } +@@ -431,20 +429,6 @@ static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) + pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); + } + +-/** +- * xilinx_pcie_add_bus - Add MSI chip info to PCIe bus +- * @bus: PCIe bus +- */ +-static void xilinx_pcie_add_bus(struct pci_bus *bus) +-{ +- if (IS_ENABLED(CONFIG_PCI_MSI)) { +- struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata); +- +- xilinx_pcie_msi_chip.dev = port->dev; +- bus->msi = &xilinx_pcie_msi_chip; +- } +-} +- + /* INTx Functions */ + + /** +@@ -460,7 +444,6 @@ static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + { + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); +- set_irq_flags(irq, IRQF_VALID); + + return 0; + } +@@ -730,9 +713,15 @@ static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys) + struct pci_bus *bus; + + port->root_busno = sys->busnr; +- bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops, +- sys, &sys->resources); + ++ if (IS_ENABLED(CONFIG_PCI_MSI)) ++ bus = pci_scan_root_bus_msi(port->dev, sys->busnr, ++ &xilinx_pcie_ops, sys, ++ &sys->resources, ++ &xilinx_pcie_msi_chip); ++ else ++ bus = pci_scan_root_bus(port->dev, sys->busnr, ++ &xilinx_pcie_ops, sys, &sys->resources); + return bus; + } + +@@ -750,7 +739,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port) + resource_size_t offset; + struct of_pci_range_parser parser; + struct of_pci_range range; +- struct pci_host_bridge_window *win; ++ struct resource_entry *win; + int err = 0, mem_resno = 0; + + /* Get the ranges */ +@@ -820,7 +809,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port) + + free_resources: + release_child_resources(&iomem_resource); +- list_for_each_entry(win, &port->resources, list) ++ resource_list_for_each_entry(win, &port->resources) + devm_kfree(dev, win->res); + pci_free_resource_list(&port->resources); + +@@ -924,10 +913,13 @@ static int xilinx_pcie_probe(struct platform_device *pdev) + .private_data = (void **)&port, + .setup = xilinx_pcie_setup, + .map_irq = of_irq_parse_and_map_pci, +- .add_bus = xilinx_pcie_add_bus, + .scan = xilinx_pcie_scan_bus, + .ops = &xilinx_pcie_ops, + }; ++ ++#ifdef CONFIG_PCI_MSI ++ xilinx_pcie_msi_chip.dev = port->dev; ++#endif + pci_common_init_dev(dev, &hw); + + return 0; +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c +index 084587d..5dd4c96 100644 +--- a/drivers/pci/msi.c ++++ b/drivers/pci/msi.c +@@ -19,19 +19,81 @@ + #include + #include + #include ++#include + + #include "pci.h" + + static int pci_msi_enable = 1; ++int pci_msi_ignore_mask; + + #define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) + ++#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN ++static struct irq_domain *pci_msi_default_domain; ++static DEFINE_MUTEX(pci_msi_domain_lock); ++ ++struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev) ++{ ++ return pci_msi_default_domain; ++} ++ ++static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev) ++{ ++ struct irq_domain *domain; ++ ++ domain = dev_get_msi_domain(&dev->dev); ++ if (domain) ++ return domain; ++ ++ return arch_get_pci_msi_domain(dev); ++} ++ ++static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) ++{ ++ struct irq_domain *domain; ++ ++ domain = pci_msi_get_domain(dev); ++ if (domain) ++ return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); ++ ++ return arch_setup_msi_irqs(dev, nvec, type); ++} ++ ++static void pci_msi_teardown_msi_irqs(struct pci_dev *dev) ++{ ++ struct irq_domain *domain; ++ ++ domain = pci_msi_get_domain(dev); ++ if (domain) ++ pci_msi_domain_free_irqs(domain, dev); ++ else ++ arch_teardown_msi_irqs(dev); ++} ++#else ++#define pci_msi_setup_msi_irqs arch_setup_msi_irqs ++#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs ++#endif + + /* Arch hooks */ + ++struct msi_controller * __weak pcibios_msi_controller(struct pci_dev *dev) ++{ ++ return NULL; ++} ++ ++static struct msi_controller *pci_msi_controller(struct pci_dev *dev) ++{ ++ struct msi_controller *msi_ctrl = dev->bus->msi; ++ ++ if (msi_ctrl) ++ return msi_ctrl; ++ ++ return pcibios_msi_controller(dev); ++} ++ + int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) + { +- struct msi_chip *chip = dev->bus->msi; ++ struct msi_controller *chip = pci_msi_controller(dev); + int err; + + if (!chip || !chip->setup_irq) +@@ -48,7 +110,7 @@ int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) + + void __weak arch_teardown_msi_irq(unsigned int irq) + { +- struct msi_chip *chip = irq_get_chip_data(irq); ++ struct msi_controller *chip = irq_get_chip_data(irq); + + if (!chip || !chip->teardown_irq) + return; +@@ -58,9 +120,12 @@ void __weak arch_teardown_msi_irq(unsigned int irq) + + int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + { ++ struct msi_controller *chip = dev->bus->msi; + struct msi_desc *entry; + int ret; + ++ if (chip && chip->setup_irqs) ++ return chip->setup_irqs(chip, dev, nvec, type); + /* + * If an architecture wants to support multiple MSI, it needs to + * override arch_setup_msi_irqs() +@@ -68,7 +133,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + if (type == PCI_CAP_ID_MSI && nvec > 1) + return 1; + +- list_for_each_entry(entry, &dev->msi_list, list) { ++ for_each_pci_msi_entry(entry, dev) { + ret = arch_setup_msi_irq(dev, entry); + if (ret < 0) + return ret; +@@ -85,19 +150,13 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) + */ + void default_teardown_msi_irqs(struct pci_dev *dev) + { ++ int i; + struct msi_desc *entry; + +- list_for_each_entry(entry, &dev->msi_list, list) { +- int i, nvec; +- if (entry->irq == 0) +- continue; +- if (entry->nvec_used) +- nvec = entry->nvec_used; +- else +- nvec = 1 << entry->msi_attrib.multiple; +- for (i = 0; i < nvec; i++) +- arch_teardown_msi_irq(entry->irq + i); +- } ++ for_each_pci_msi_entry(entry, dev) ++ if (entry->irq) ++ for (i = 0; i < entry->nvec_used; i++) ++ arch_teardown_msi_irq(entry->irq + i); + } + + void __weak arch_teardown_msi_irqs(struct pci_dev *dev) +@@ -111,7 +170,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq) + + entry = NULL; + if (dev->msix_enabled) { +- list_for_each_entry(entry, &dev->msi_list, list) { ++ for_each_pci_msi_entry(entry, dev) { + if (irq == entry->irq) + break; + } +@@ -120,7 +179,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq) + } + + if (entry) +- __write_msi_msg(entry, &entry->msg); ++ __pci_write_msi_msg(entry, &entry->msg); + } + + void __weak arch_restore_msi_irqs(struct pci_dev *dev) +@@ -128,27 +187,6 @@ void __weak arch_restore_msi_irqs(struct pci_dev *dev) + return default_restore_msi_irqs(dev); + } + +-static void msi_set_enable(struct pci_dev *dev, int enable) +-{ +- u16 control; +- +- pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); +- control &= ~PCI_MSI_FLAGS_ENABLE; +- if (enable) +- control |= PCI_MSI_FLAGS_ENABLE; +- pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); +-} +- +-static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) +-{ +- u16 ctrl; +- +- pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); +- ctrl &= ~clear; +- ctrl |= set; +- pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); +-} +- + static inline __attribute_const__ u32 msi_mask(unsigned x) + { + /* Don't shift by >= width of type */ +@@ -163,28 +201,24 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) + * reliably as devices without an INTx disable bit will then generate a + * level IRQ which will never be cleared. + */ +-u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) ++u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) + { + u32 mask_bits = desc->masked; + +- if (!desc->msi_attrib.maskbit) ++ if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) + return 0; + + mask_bits &= ~mask; + mask_bits |= flag; +- pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); ++ pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, ++ mask_bits); + + return mask_bits; + } + +-__weak u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +-{ +- return default_msi_mask_irq(desc, mask, flag); +-} +- + static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) + { +- desc->masked = arch_msi_mask_irq(desc, mask, flag); ++ desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); + } + + /* +@@ -194,11 +228,15 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) + * file. This saves a few milliseconds when initialising devices with lots + * of MSI-X interrupts. + */ +-u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) ++u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) + { + u32 mask_bits = desc->masked; + unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_VECTOR_CTRL; ++ ++ if (pci_msi_ignore_mask) ++ return 0; ++ + mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; + if (flag) + mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; +@@ -207,19 +245,14 @@ u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) + return mask_bits; + } + +-__weak u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag) +-{ +- return default_msix_mask_irq(desc, flag); +-} +- + static void msix_mask_irq(struct msi_desc *desc, u32 flag) + { +- desc->masked = arch_msix_mask_irq(desc, flag); ++ desc->masked = __pci_msix_desc_mask_irq(desc, flag); + } + + static void msi_set_mask_bit(struct irq_data *data, u32 flag) + { +- struct msi_desc *desc = irq_data_get_msi(data); ++ struct msi_desc *desc = irq_data_get_msi_desc(data); + + if (desc->msi_attrib.is_msix) { + msix_mask_irq(desc, flag); +@@ -230,12 +263,20 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag) + } + } + +-void mask_msi_irq(struct irq_data *data) ++/** ++ * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts ++ * @data: pointer to irqdata associated to that interrupt ++ */ ++void pci_msi_mask_irq(struct irq_data *data) + { + msi_set_mask_bit(data, 1); + } + +-void unmask_msi_irq(struct irq_data *data) ++/** ++ * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts ++ * @data: pointer to irqdata associated to that interrupt ++ */ ++void pci_msi_unmask_irq(struct irq_data *data) + { + msi_set_mask_bit(data, 0); + } +@@ -244,14 +285,15 @@ void default_restore_msi_irqs(struct pci_dev *dev) + { + struct msi_desc *entry; + +- list_for_each_entry(entry, &dev->msi_list, list) { ++ for_each_pci_msi_entry(entry, dev) + default_restore_msi_irq(dev, entry->irq); +- } + } + +-void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) ++void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) + { +- BUG_ON(entry->dev->current_state != PCI_D0); ++ struct pci_dev *dev = msi_desc_to_pci_dev(entry); ++ ++ BUG_ON(dev->current_state != PCI_D0); + + if (entry->msi_attrib.is_msix) { + void __iomem *base = entry->mask_base + +@@ -261,7 +303,6 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) + msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); + msg->data = readl(base + PCI_MSIX_ENTRY_DATA); + } else { +- struct pci_dev *dev = entry->dev; + int pos = dev->msi_cap; + u16 data; + +@@ -279,34 +320,11 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) + } + } + +-void read_msi_msg(unsigned int irq, struct msi_msg *msg) +-{ +- struct msi_desc *entry = irq_get_msi_desc(irq); +- +- __read_msi_msg(entry, msg); +-} +- +-void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +-{ +- /* Assert that the cache is valid, assuming that +- * valid messages are not all-zeroes. */ +- BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | +- entry->msg.data)); +- +- *msg = entry->msg; +-} +- +-void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) ++void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) + { +- struct msi_desc *entry = irq_get_msi_desc(irq); +- +- __get_cached_msi_msg(entry, msg); +-} +-EXPORT_SYMBOL_GPL(get_cached_msi_msg); ++ struct pci_dev *dev = msi_desc_to_pci_dev(entry); + +-void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +-{ +- if (entry->dev->current_state != PCI_D0) { ++ if (dev->current_state != PCI_D0) { + /* Don't touch the hardware now */ + } else if (entry->msi_attrib.is_msix) { + void __iomem *base; +@@ -317,7 +335,6 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) + writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(msg->data, base + PCI_MSIX_ENTRY_DATA); + } else { +- struct pci_dev *dev = entry->dev; + int pos = dev->msi_cap; + u16 msgctl; + +@@ -341,38 +358,32 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) + entry->msg = *msg; + } + +-void write_msi_msg(unsigned int irq, struct msi_msg *msg) ++void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) + { + struct msi_desc *entry = irq_get_msi_desc(irq); + +- __write_msi_msg(entry, msg); ++ __pci_write_msi_msg(entry, msg); + } +-EXPORT_SYMBOL_GPL(write_msi_msg); ++EXPORT_SYMBOL_GPL(pci_write_msi_msg); + + static void free_msi_irqs(struct pci_dev *dev) + { ++ struct list_head *msi_list = dev_to_msi_list(&dev->dev); + struct msi_desc *entry, *tmp; + struct attribute **msi_attrs; + struct device_attribute *dev_attr; +- int count = 0; ++ int i, count = 0; + +- list_for_each_entry(entry, &dev->msi_list, list) { +- int i, nvec; +- if (!entry->irq) +- continue; +- if (entry->nvec_used) +- nvec = entry->nvec_used; +- else +- nvec = 1 << entry->msi_attrib.multiple; +- for (i = 0; i < nvec; i++) +- BUG_ON(irq_has_action(entry->irq + i)); +- } ++ for_each_pci_msi_entry(entry, dev) ++ if (entry->irq) ++ for (i = 0; i < entry->nvec_used; i++) ++ BUG_ON(irq_has_action(entry->irq + i)); + +- arch_teardown_msi_irqs(dev); ++ pci_msi_teardown_msi_irqs(dev); + +- list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { ++ list_for_each_entry_safe(entry, tmp, msi_list, list) { + if (entry->msi_attrib.is_msix) { +- if (list_is_last(&entry->list, &dev->msi_list)) ++ if (list_is_last(&entry->list, msi_list)) + iounmap(entry->mask_base); + } + +@@ -397,18 +408,6 @@ static void free_msi_irqs(struct pci_dev *dev) + } + } + +-static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) +-{ +- struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); +- if (!desc) +- return NULL; +- +- INIT_LIST_HEAD(&desc->list); +- desc->dev = dev; +- +- return desc; +-} +- + static void pci_intx_for_msi(struct pci_dev *dev, int enable) + { + if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) +@@ -426,7 +425,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev) + entry = irq_get_msi_desc(dev->irq); + + pci_intx_for_msi(dev, 0); +- msi_set_enable(dev, 0); ++ pci_msi_set_enable(dev, 0); + arch_restore_msi_irqs(dev); + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); +@@ -443,19 +442,18 @@ static void __pci_restore_msix_state(struct pci_dev *dev) + + if (!dev->msix_enabled) + return; +- BUG_ON(list_empty(&dev->msi_list)); ++ BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); + + /* route the table */ + pci_intx_for_msi(dev, 0); +- msix_clear_and_set_ctrl(dev, 0, ++ pci_msix_clear_and_set_ctrl(dev, 0, + PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); + + arch_restore_msi_irqs(dev); +- list_for_each_entry(entry, &dev->msi_list, list) { ++ for_each_pci_msi_entry(entry, dev) + msix_mask_irq(entry, entry->masked); +- } + +- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); ++ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); + } + + void pci_restore_msi_state(struct pci_dev *dev) +@@ -497,9 +495,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev) + int count = 0; + + /* Determine how many msi entries we have */ +- list_for_each_entry(entry, &pdev->msi_list, list) { ++ for_each_pci_msi_entry(entry, pdev) + ++num_msi; +- } + if (!num_msi) + return 0; + +@@ -507,7 +504,7 @@ static int populate_msi_sysfs(struct pci_dev *pdev) + msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL); + if (!msi_attrs) + return -ENOMEM; +- list_for_each_entry(entry, &pdev->msi_list, list) { ++ for_each_pci_msi_entry(entry, pdev) { + msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); + if (!msi_dev_attr) + goto error_attrs; +@@ -559,13 +556,13 @@ error_attrs: + return ret; + } + +-static struct msi_desc *msi_setup_entry(struct pci_dev *dev) ++static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec) + { + u16 control; + struct msi_desc *entry; + + /* MSI Entry Initialization */ +- entry = alloc_msi_entry(dev); ++ entry = alloc_msi_entry(&dev->dev); + if (!entry) + return NULL; + +@@ -577,6 +574,8 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev) + entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); + entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ + entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; ++ entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); ++ entry->nvec_used = nvec; + + if (control & PCI_MSI_FLAGS_64BIT) + entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; +@@ -594,7 +593,7 @@ static int msi_verify_entries(struct pci_dev *dev) + { + struct msi_desc *entry; + +- list_for_each_entry(entry, &dev->msi_list, list) { ++ for_each_pci_msi_entry(entry, dev) { + if (!dev->no_64bit_msi || !entry->msg.address_hi) + continue; + dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" +@@ -621,9 +620,9 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) + int ret; + unsigned mask; + +- msi_set_enable(dev, 0); /* Disable MSI during set up */ ++ pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ + +- entry = msi_setup_entry(dev); ++ entry = msi_setup_entry(dev, nvec); + if (!entry) + return -ENOMEM; + +@@ -631,10 +630,10 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) + mask = msi_mask(entry->msi_attrib.multi_cap); + msi_mask_irq(entry, mask, mask); + +- list_add_tail(&entry->list, &dev->msi_list); ++ list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); + + /* Configure MSI capability structure */ +- ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); ++ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); + if (ret) { + msi_mask_irq(entry, mask, ~mask); + free_msi_irqs(dev); +@@ -657,7 +656,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) + + /* Set MSI enabled bits */ + pci_intx_for_msi(dev, 0); +- msi_set_enable(dev, 1); ++ pci_msi_set_enable(dev, 1); + dev->msi_enabled = 1; + + dev->irq = entry->irq; +@@ -686,7 +685,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, + int i; + + for (i = 0; i < nvec; i++) { +- entry = alloc_msi_entry(dev); ++ entry = alloc_msi_entry(&dev->dev); + if (!entry) { + if (!i) + iounmap(base); +@@ -701,8 +700,9 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, + entry->msi_attrib.entry_nr = entries[i].entry; + entry->msi_attrib.default_irq = dev->irq; + entry->mask_base = base; ++ entry->nvec_used = 1; + +- list_add_tail(&entry->list, &dev->msi_list); ++ list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); + } + + return 0; +@@ -714,12 +714,11 @@ static void msix_program_entries(struct pci_dev *dev, + struct msi_desc *entry; + int i = 0; + +- list_for_each_entry(entry, &dev->msi_list, list) { ++ for_each_pci_msi_entry(entry, dev) { + int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_VECTOR_CTRL; + + entries[i].vector = entry->irq; +- irq_set_msi_desc(entry->irq, entry); + entry->masked = readl(entry->mask_base + offset); + msix_mask_irq(entry, 1); + i++; +@@ -744,7 +743,7 @@ static int msix_capability_init(struct pci_dev *dev, + void __iomem *base; + + /* Ensure MSI-X is disabled while it is set up */ +- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); ++ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + + pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); + /* Request & Map MSI-X table region */ +@@ -756,7 +755,7 @@ static int msix_capability_init(struct pci_dev *dev, + if (ret) + return ret; + +- ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); ++ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); + if (ret) + goto out_avail; + +@@ -770,7 +769,7 @@ static int msix_capability_init(struct pci_dev *dev, + * MSI-X registers. We need to mask all the vectors to prevent + * interrupts coming in before they're fully set up. + */ +- msix_clear_and_set_ctrl(dev, 0, ++ pci_msix_clear_and_set_ctrl(dev, 0, + PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); + + msix_program_entries(dev, entries); +@@ -783,7 +782,7 @@ static int msix_capability_init(struct pci_dev *dev, + pci_intx_for_msi(dev, 0); + dev->msix_enabled = 1; + +- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); ++ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); + + return 0; + +@@ -796,7 +795,7 @@ out_avail: + struct msi_desc *entry; + int avail = 0; + +- list_for_each_entry(entry, &dev->msi_list, list) { ++ for_each_pci_msi_entry(entry, dev) { + if (entry->irq != 0) + avail++; + } +@@ -885,17 +884,17 @@ void pci_msi_shutdown(struct pci_dev *dev) + if (!pci_msi_enable || !dev || !dev->msi_enabled) + return; + +- BUG_ON(list_empty(&dev->msi_list)); +- desc = list_first_entry(&dev->msi_list, struct msi_desc, list); ++ BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); ++ desc = first_pci_msi_entry(dev); + +- msi_set_enable(dev, 0); ++ pci_msi_set_enable(dev, 0); + pci_intx_for_msi(dev, 1); + dev->msi_enabled = 0; + + /* Return the device with MSI unmasked as initial states */ + mask = msi_mask(desc->msi_attrib.multi_cap); + /* Keep cached state to be restored */ +- arch_msi_mask_irq(desc, mask, ~mask); ++ __pci_msi_desc_mask_irq(desc, mask, ~mask); + + /* Restore dev->irq to its default pin-assertion irq */ + dev->irq = desc->msi_attrib.default_irq; +@@ -991,12 +990,12 @@ void pci_msix_shutdown(struct pci_dev *dev) + return; + + /* Return the device with MSI-X masked as initial states */ +- list_for_each_entry(entry, &dev->msi_list, list) { ++ for_each_pci_msi_entry(entry, dev) { + /* Keep cached states to be restored */ +- arch_msix_mask_irq(entry, 1); ++ __pci_msix_desc_mask_irq(entry, 1); + } + +- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); ++ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + pci_intx_for_msi(dev, 1); + dev->msix_enabled = 0; + } +@@ -1030,19 +1029,6 @@ EXPORT_SYMBOL(pci_msi_enabled); + + void pci_msi_init_pci_dev(struct pci_dev *dev) + { +- INIT_LIST_HEAD(&dev->msi_list); +- +- /* Disable the msi hardware to avoid screaming interrupts +- * during boot. This is the power on reset default so +- * usually this should be a noop. +- */ +- dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); +- if (dev->msi_cap) +- msi_set_enable(dev, 0); +- +- dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); +- if (dev->msix_cap) +- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + } + + /** +@@ -1138,3 +1124,217 @@ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + return nvec; + } + EXPORT_SYMBOL(pci_enable_msix_range); ++ ++struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) ++{ ++ return to_pci_dev(desc->dev); ++} ++ ++void *msi_desc_to_pci_sysdata(struct msi_desc *desc) ++{ ++ struct pci_dev *dev = msi_desc_to_pci_dev(desc); ++ ++ return dev->bus->sysdata; ++} ++EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata); ++ ++#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN ++/** ++ * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space ++ * @irq_data: Pointer to interrupt data of the MSI interrupt ++ * @msg: Pointer to the message ++ */ ++void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) ++{ ++ struct msi_desc *desc = irq_data->msi_desc; ++ ++ /* ++ * For MSI-X desc->irq is always equal to irq_data->irq. For ++ * MSI only the first interrupt of MULTI MSI passes the test. ++ */ ++ if (desc->irq == irq_data->irq) ++ __pci_write_msi_msg(desc, msg); ++} ++ ++/** ++ * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source ++ * @dev: Pointer to the PCI device ++ * @desc: Pointer to the msi descriptor ++ * ++ * The ID number is only used within the irqdomain. ++ */ ++irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, ++ struct msi_desc *desc) ++{ ++ return (irq_hw_number_t)desc->msi_attrib.entry_nr | ++ PCI_DEVID(dev->bus->number, dev->devfn) << 11 | ++ (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; ++} ++ ++static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) ++{ ++ return !desc->msi_attrib.is_msix && desc->nvec_used > 1; ++} ++ ++/** ++ * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev ++ * @domain: The interrupt domain to check ++ * @info: The domain info for verification ++ * @dev: The device to check ++ * ++ * Returns: ++ * 0 if the functionality is supported ++ * 1 if Multi MSI is requested, but the domain does not support it ++ * -ENOTSUPP otherwise ++ */ ++int pci_msi_domain_check_cap(struct irq_domain *domain, ++ struct msi_domain_info *info, struct device *dev) ++{ ++ struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev)); ++ ++ /* Special handling to support pci_enable_msi_range() */ ++ if (pci_msi_desc_is_multi_msi(desc) && ++ !(info->flags & MSI_FLAG_MULTI_PCI_MSI)) ++ return 1; ++ else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX)) ++ return -ENOTSUPP; ++ ++ return 0; ++} ++ ++static int pci_msi_domain_handle_error(struct irq_domain *domain, ++ struct msi_desc *desc, int error) ++{ ++ /* Special handling to support pci_enable_msi_range() */ ++ if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC) ++ return 1; ++ ++ return error; ++} ++ ++#ifdef GENERIC_MSI_DOMAIN_OPS ++static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, ++ struct msi_desc *desc) ++{ ++ arg->desc = desc; ++ arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc), ++ desc); ++} ++#else ++#define pci_msi_domain_set_desc NULL ++#endif ++ ++static struct msi_domain_ops pci_msi_domain_ops_default = { ++ .set_desc = pci_msi_domain_set_desc, ++ .msi_check = pci_msi_domain_check_cap, ++ .handle_error = pci_msi_domain_handle_error, ++}; ++ ++static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info) ++{ ++ struct msi_domain_ops *ops = info->ops; ++ ++ if (ops == NULL) { ++ info->ops = &pci_msi_domain_ops_default; ++ } else { ++ if (ops->set_desc == NULL) ++ ops->set_desc = pci_msi_domain_set_desc; ++ if (ops->msi_check == NULL) ++ ops->msi_check = pci_msi_domain_check_cap; ++ if (ops->handle_error == NULL) ++ ops->handle_error = pci_msi_domain_handle_error; ++ } ++} ++ ++static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) ++{ ++ struct irq_chip *chip = info->chip; ++ ++ BUG_ON(!chip); ++ if (!chip->irq_write_msi_msg) ++ chip->irq_write_msi_msg = pci_msi_domain_write_msg; ++} ++ ++/** ++ * pci_msi_create_irq_domain - Creat a MSI interrupt domain ++ * @node: Optional device-tree node of the interrupt controller ++ * @info: MSI domain info ++ * @parent: Parent irq domain ++ * ++ * Updates the domain and chip ops and creates a MSI interrupt domain. ++ * ++ * Returns: ++ * A domain pointer or NULL in case of failure. ++ */ ++struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, ++ struct msi_domain_info *info, ++ struct irq_domain *parent) ++{ ++ struct irq_domain *domain; ++ ++ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) ++ pci_msi_domain_update_dom_ops(info); ++ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) ++ pci_msi_domain_update_chip_ops(info); ++ ++ domain = msi_create_irq_domain(node, info, parent); ++ if (!domain) ++ return NULL; ++ ++ domain->bus_token = DOMAIN_BUS_PCI_MSI; ++ return domain; ++} ++ ++/** ++ * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain ++ * @domain: The interrupt domain to allocate from ++ * @dev: The device for which to allocate ++ * @nvec: The number of interrupts to allocate ++ * @type: Unused to allow simpler migration from the arch_XXX interfaces ++ * ++ * Returns: ++ * A virtual interrupt number or an error code in case of failure ++ */ ++int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, ++ int nvec, int type) ++{ ++ return msi_domain_alloc_irqs(domain, &dev->dev, nvec); ++} ++ ++/** ++ * pci_msi_domain_free_irqs - Free interrupts for @dev in @domain ++ * @domain: The interrupt domain ++ * @dev: The device for which to free interrupts ++ */ ++void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev) ++{ ++ msi_domain_free_irqs(domain, &dev->dev); ++} ++ ++/** ++ * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain ++ * @node: Optional device-tree node of the interrupt controller ++ * @info: MSI domain info ++ * @parent: Parent irq domain ++ * ++ * Returns: A domain pointer or NULL in case of failure. If successful ++ * the default PCI/MSI irqdomain pointer is updated. ++ */ ++struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, ++ struct msi_domain_info *info, struct irq_domain *parent) ++{ ++ struct irq_domain *domain; ++ ++ mutex_lock(&pci_msi_domain_lock); ++ if (pci_msi_default_domain) { ++ pr_err("PCI: default irq domain for PCI MSI has already been created.\n"); ++ domain = NULL; ++ } else { ++ domain = pci_msi_create_irq_domain(node, info, parent); ++ pci_msi_default_domain = domain; ++ } ++ mutex_unlock(&pci_msi_domain_lock); ++ ++ return domain; ++} ++#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ +diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h +index b5defca..df2169e 100644 +--- a/drivers/pci/pci.h ++++ b/drivers/pci/pci.h +@@ -140,6 +140,27 @@ static inline void pci_no_msi(void) { } + static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } + #endif + ++static inline void pci_msi_set_enable(struct pci_dev *dev, int enable) ++{ ++ u16 control; ++ ++ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); ++ control &= ~PCI_MSI_FLAGS_ENABLE; ++ if (enable) ++ control |= PCI_MSI_FLAGS_ENABLE; ++ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); ++} ++ ++static inline void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) ++{ ++ u16 ctrl; ++ ++ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); ++ ctrl &= ~clear; ++ ctrl |= set; ++ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); ++} ++ + void pci_realloc_get_opt(char *); + + static inline int pci_no_d1d2(struct pci_dev *dev) +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 3010ffc..6bdeb75 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -1097,6 +1097,22 @@ int pci_cfg_space_size(struct pci_dev *dev) + + #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) + ++static void pci_msi_setup_pci_dev(struct pci_dev *dev) ++{ ++ /* ++ * Disable the MSI hardware to avoid screaming interrupts ++ * during boot. This is the power on reset default so ++ * usually this should be a noop. ++ */ ++ dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); ++ if (dev->msi_cap) ++ pci_msi_set_enable(dev, 0); ++ ++ dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); ++ if (dev->msix_cap) ++ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); ++} ++ + /** + * pci_setup_device - fill in class and map information of a device + * @dev: the device structure to fill +@@ -1152,6 +1168,8 @@ int pci_setup_device(struct pci_dev *dev) + /* "Unknown power state" */ + dev->current_state = PCI_UNKNOWN; + ++ pci_msi_setup_pci_dev(dev); ++ + /* Early fixups, before probing the BARs */ + pci_fixup_device(pci_fixup_early, dev); + /* device class may be changed after fixup */ +@@ -1908,7 +1926,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus, + int error; + struct pci_host_bridge *bridge; + struct pci_bus *b, *b2; +- struct pci_host_bridge_window *window, *n; ++ struct resource_entry *window, *n; + struct resource *res; + resource_size_t offset; + char bus_addr[64]; +@@ -1972,8 +1990,8 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus, + printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); + + /* Add initial resources to the bus */ +- list_for_each_entry_safe(window, n, resources, list) { +- list_move_tail(&window->list, &bridge->windows); ++ resource_list_for_each_entry_safe(window, n, resources) { ++ list_move_tail(&window->node, &bridge->windows); + res = window->res; + offset = window->offset; + if (res->flags & IORESOURCE_BUS) +@@ -2073,12 +2091,12 @@ void pci_bus_release_busn_res(struct pci_bus *b) + struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, + struct pci_ops *ops, void *sysdata, struct list_head *resources) + { +- struct pci_host_bridge_window *window; ++ struct resource_entry *window; + bool found = false; + struct pci_bus *b; + int max; + +- list_for_each_entry(window, resources, list) ++ resource_list_for_each_entry(window, resources) + if (window->res->flags & IORESOURCE_BUS) { + found = true; + break; +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index b6d646a..f3681e2 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -3516,8 +3516,9 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe) + static void quirk_dma_func0_alias(struct pci_dev *dev) + { + if (PCI_FUNC(dev->devfn) != 0) { +- dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); +- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; ++ dev->dma_alias_devid = PCI_DEVID(dev->bus->number, ++ PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); ++ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; + } + } + +@@ -3532,8 +3533,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias); + static void quirk_dma_func1_alias(struct pci_dev *dev) + { + if (PCI_FUNC(dev->devfn) != 1) { +- dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1); +- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; ++ dev->dma_alias_devid = PCI_DEVID(dev->bus->number, ++ PCI_DEVFN(PCI_SLOT(dev->devfn), 1)); ++ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; + } + } + +diff --git a/drivers/pci/search.c b/drivers/pci/search.c +index a81f413..a00924f 100644 +--- a/drivers/pci/search.c ++++ b/drivers/pci/search.c +@@ -40,9 +40,8 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, + * If the device is broken and uses an alias requester ID for + * DMA, iterate over that too. + */ +- if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) { +- ret = fn(pdev, PCI_DEVID(pdev->bus->number, +- pdev->dma_alias_devfn), data); ++ if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID)) { ++ ret = fn(pdev, pdev->dma_alias_devid, data); + if (ret) + return ret; + } +diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c +index 116ca37..37d4218 100644 +--- a/drivers/pci/xen-pcifront.c ++++ b/drivers/pci/xen-pcifront.c +@@ -267,7 +267,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev, + } + + i = 0; +- list_for_each_entry(entry, &dev->msi_list, list) { ++ for_each_pci_msi_entry(entry, dev) { + op.msix_entries[i].entry = entry->msi_attrib.entry_nr; + /* Vector is useless at this point. */ + op.msix_entries[i].vector = -1; +diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig +index f65ff49..b56b084 100644 +--- a/drivers/power/reset/Kconfig ++++ b/drivers/power/reset/Kconfig +@@ -150,5 +150,11 @@ config POWER_RESET_SYSCON + help + Reboot support for generic SYSCON mapped register reset. + ++config POWER_RESET_LAYERSCAPE ++ bool "Freescale LayerScape reset driver" ++ depends on ARCH_LAYERSCAPE ++ help ++ Reboot support for the Freescale LayerScape SoCs. ++ + endif + +diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile +index 76ce1c5..d924bdb 100644 +--- a/drivers/power/reset/Makefile ++++ b/drivers/power/reset/Makefile +@@ -17,3 +17,4 @@ obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o + obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o + obj-$(CONFIG_POWER_RESET_KEYSTONE) += keystone-reset.o + obj-$(CONFIG_POWER_RESET_SYSCON) += syscon-reboot.o ++obj-$(CONFIG_POWER_RESET_LAYERSCAPE) += ls-reboot.o +diff --git a/drivers/power/reset/ls-reboot.c b/drivers/power/reset/ls-reboot.c +new file mode 100644 +index 0000000..fa1152c +--- /dev/null ++++ b/drivers/power/reset/ls-reboot.c +@@ -0,0 +1,93 @@ ++/* ++ * Freescale LayerScape reboot driver ++ * ++ * Copyright (c) 2015, Freescale Semiconductor. ++ * Author: Pankaj Chauhan ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct ls_reboot_priv { ++ struct device *dev; ++ u32 *rstcr; ++}; ++ ++static struct ls_reboot_priv *ls_reboot_priv; ++ ++static void ls_reboot(enum reboot_mode reboot_mode, const char *cmd) ++{ ++ struct ls_reboot_priv *priv = ls_reboot_priv; ++ u32 val; ++ unsigned long timeout; ++ ++ if (ls_reboot_priv) { ++ val = readl(priv->rstcr); ++ val |= 0x02; ++ writel(val, priv->rstcr); ++ } ++ ++ timeout = jiffies + HZ; ++ while (time_before(jiffies, timeout)) ++ cpu_relax(); ++ ++} ++ ++static int ls_reboot_probe(struct platform_device *pdev) ++{ ++ ls_reboot_priv = devm_kzalloc(&pdev->dev, ++ sizeof(*ls_reboot_priv), GFP_KERNEL); ++ if (!ls_reboot_priv) { ++ dev_err(&pdev->dev, "out of memory for context\n"); ++ return -ENODEV; ++ } ++ ++ ls_reboot_priv->rstcr = of_iomap(pdev->dev.of_node, 0); ++ if (!ls_reboot_priv->rstcr) { ++ devm_kfree(&pdev->dev, ls_reboot_priv); ++ dev_err(&pdev->dev, "can not map resource\n"); ++ return -ENODEV; ++ } ++ ++ ls_reboot_priv->dev = &pdev->dev; ++ ++ arm_pm_restart = ls_reboot; ++ ++ return 0; ++} ++ ++static struct of_device_id ls_reboot_of_match[] = { ++ { .compatible = "fsl,ls-reset" }, ++ {} ++}; ++ ++static struct platform_driver ls_reboot_driver = { ++ .probe = ls_reboot_probe, ++ .driver = { ++ .name = "ls-reset", ++ .of_match_table = ls_reboot_of_match, ++ }, ++}; ++ ++static int __init ls_reboot_init(void) ++{ ++ return platform_driver_register(&ls_reboot_driver); ++} ++device_initcall(ls_reboot_init); +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index b9ddf0c..894894f 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -115,7 +115,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, + USB_SS_MULT(desc->bmAttributes) > 3) { + dev_warn(ddev, "Isoc endpoint has Mult of %d in " + "config %d interface %d altsetting %d ep %d: " +- "setting to 3\n", desc->bmAttributes + 1, ++ "setting to 3\n", ++ USB_SS_MULT(desc->bmAttributes), + cfgno, inum, asnum, ep->desc.bEndpointAddress); + ep->ss_ep_comp.bmAttributes = 2; + } +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c +index d7a6d8b..66be3b4 100644 +--- a/drivers/usb/core/driver.c ++++ b/drivers/usb/core/driver.c +@@ -499,11 +499,15 @@ static int usb_unbind_interface(struct device *dev) + int usb_driver_claim_interface(struct usb_driver *driver, + struct usb_interface *iface, void *priv) + { +- struct device *dev = &iface->dev; ++ struct device *dev; + struct usb_device *udev; + int retval = 0; + int lpm_disable_error; + ++ if (!iface) ++ return -ENODEV; ++ ++ dev = &iface->dev; + if (dev->driver) + return -EBUSY; + +diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c +index efc9531..a4c0b85 100644 +--- a/drivers/usb/core/hcd-pci.c ++++ b/drivers/usb/core/hcd-pci.c +@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, + if (companion->bus != pdev->bus || + PCI_SLOT(companion->devfn) != slot) + continue; ++ ++ /* ++ * Companion device should be either UHCI,OHCI or EHCI host ++ * controller, otherwise skip. ++ */ ++ if (companion->class != CL_UHCI && companion->class != CL_OHCI && ++ companion->class != CL_EHCI) ++ continue; ++ + companion_hcd = pci_get_drvdata(companion); + if (!companion_hcd || !companion_hcd->self.root_hub) + continue; +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 2222899..d8e1d5c 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -124,6 +124,10 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) + + static int usb_device_supports_lpm(struct usb_device *udev) + { ++ /* Some devices have trouble with LPM */ ++ if (udev->quirks & USB_QUIRK_NO_LPM) ++ return 0; ++ + /* USB 2.1 (and greater) devices indicate LPM support through + * their USB 2.0 Extended Capabilities BOS descriptor. + */ +@@ -1030,10 +1034,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) + unsigned delay; + + /* Continue a partial initialization */ +- if (type == HUB_INIT2) +- goto init2; +- if (type == HUB_INIT3) ++ if (type == HUB_INIT2 || type == HUB_INIT3) { ++ device_lock(hub->intfdev); ++ ++ /* Was the hub disconnected while we were waiting? */ ++ if (hub->disconnected) { ++ device_unlock(hub->intfdev); ++ kref_put(&hub->kref, hub_release); ++ return; ++ } ++ if (type == HUB_INIT2) ++ goto init2; + goto init3; ++ } ++ kref_get(&hub->kref); + + /* The superspeed hub except for root hub has to use Hub Depth + * value as an offset into the route string to locate the bits +@@ -1231,6 +1245,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) + queue_delayed_work(system_power_efficient_wq, + &hub->init_work, + msecs_to_jiffies(delay)); ++ device_unlock(hub->intfdev); + return; /* Continues at init3: below */ + } else { + msleep(delay); +@@ -1252,6 +1267,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) + /* Allow autosuspend if it was suppressed */ + if (type <= HUB_INIT3) + usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); ++ ++ if (type == HUB_INIT2 || type == HUB_INIT3) ++ device_unlock(hub->intfdev); ++ ++ kref_put(&hub->kref, hub_release); + } + + /* Implement the continuations for the delays above */ +@@ -4222,7 +4242,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, + { + struct usb_device *hdev = hub->hdev; + struct usb_hcd *hcd = bus_to_hcd(hdev->bus); +- int i, j, retval; ++ int retries, operations, retval, i; + unsigned delay = HUB_SHORT_RESET_TIME; + enum usb_device_speed oldspeed = udev->speed; + const char *speed; +@@ -4324,7 +4344,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, + * first 8 bytes of the device descriptor to get the ep0 maxpacket + * value. + */ +- for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) { ++ for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) { + bool did_new_scheme = false; + + if (use_new_scheme(udev, retry_counter)) { +@@ -4351,7 +4371,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, + * 255 is for WUSB devices, we actually need to use + * 512 (WUSB1.0[4.8.1]). + */ +- for (j = 0; j < 3; ++j) { ++ for (operations = 0; operations < 3; ++operations) { + buf->bMaxPacketSize0 = 0; + r = usb_control_msg(udev, usb_rcvaddr0pipe(), + USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, +@@ -4371,7 +4391,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, + r = -EPROTO; + break; + } +- if (r == 0) ++ /* ++ * Some devices time out if they are powered on ++ * when already connected. They need a second ++ * reset. But only on the first attempt, ++ * lest we get into a time out/reset loop ++ */ ++ if (r == 0 || (r == -ETIMEDOUT && retries == 0)) + break; + } + udev->descriptor.bMaxPacketSize0 = +@@ -4403,7 +4429,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, + * authorization will assign the final address. + */ + if (udev->wusb == 0) { +- for (j = 0; j < SET_ADDRESS_TRIES; ++j) { ++ for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) { + retval = hub_set_address(udev, devnum); + if (retval >= 0) + break; +@@ -4498,6 +4524,8 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, + goto fail; + } + ++ usb_detect_quirks(udev); ++ + if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { + retval = usb_get_bos_descriptor(udev); + if (!retval) { +@@ -4692,7 +4720,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, + if (status < 0) + goto loop; + +- usb_detect_quirks(udev); + if (udev->quirks & USB_QUIRK_DELAY_INIT) + msleep(1000); + +@@ -5324,9 +5351,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev) + if (udev->usb2_hw_lpm_enabled == 1) + usb_set_usb2_hardware_lpm(udev, 0); + +- bos = udev->bos; +- udev->bos = NULL; +- + /* Disable LPM and LTM while we reset the device and reinstall the alt + * settings. Device-initiated LPM settings, and system exit latency + * settings are cleared when the device is reset, so we have to set +@@ -5335,15 +5359,17 @@ static int usb_reset_and_verify_device(struct usb_device *udev) + ret = usb_unlocked_disable_lpm(udev); + if (ret) { + dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__); +- goto re_enumerate; ++ goto re_enumerate_no_bos; + } + ret = usb_disable_ltm(udev); + if (ret) { + dev_err(&udev->dev, "%s Failed to disable LTM\n.", + __func__); +- goto re_enumerate; ++ goto re_enumerate_no_bos; + } + ++ bos = udev->bos; ++ + for (i = 0; i < SET_CONFIG_TRIES; ++i) { + + /* ep0 maxpacket size may change; let the HCD know about it. +@@ -5435,15 +5461,19 @@ done: + usb_set_usb2_hardware_lpm(udev, 1); + usb_unlocked_enable_lpm(udev); + usb_enable_ltm(udev); +- usb_release_bos_descriptor(udev); +- udev->bos = bos; ++ /* release the new BOS descriptor allocated by hub_port_init() */ ++ if (udev->bos != bos) { ++ usb_release_bos_descriptor(udev); ++ udev->bos = bos; ++ } + return 0; + + re_enumerate: +- /* LPM state doesn't matter when we're about to destroy the device. */ +- hub_port_logical_disconnect(parent_hub, port1); + usb_release_bos_descriptor(udev); + udev->bos = bos; ++re_enumerate_no_bos: ++ /* LPM state doesn't matter when we're about to destroy the device. */ ++ hub_port_logical_disconnect(parent_hub, port1); + return -ENODEV; + } + +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 8a77a41..6b53fc3 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -196,6 +196,12 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x1a0a, 0x0200), .driver_info = + USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + ++ /* Blackmagic Design Intensity Shuttle */ ++ { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM }, ++ ++ /* Blackmagic Design UltraStudio SDI */ ++ { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, ++ + { } /* terminating entry must be last */ + }; + +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index b0f4d52..17eeab8 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -673,22 +673,20 @@ static int dwc3_probe(struct platform_device *pdev) + * since it will be requested by the xhci-plat driver. + */ + regs = devm_ioremap_resource(dev, res); +- if (IS_ERR(regs)) +- return PTR_ERR(regs); ++ if (IS_ERR(regs)) { ++ ret = PTR_ERR(regs); ++ goto err0; ++ } + + dwc->regs = regs; + dwc->regs_size = resource_size(res); +- /* +- * restore res->start back to its original value so that, +- * in case the probe is deferred, we don't end up getting error in +- * request the memory region the next time probe is called. +- */ +- res->start -= DWC3_GLOBALS_REGS_START; + + if (node) { + dwc->maximum_speed = of_usb_get_maximum_speed(node); + + dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize"); ++ dwc->configure_gfladj = ++ of_property_read_bool(node, "configure-gfladj"); + dwc->dr_mode = of_usb_get_dr_mode(node); + } else if (pdata) { + dwc->maximum_speed = pdata->maximum_speed; +@@ -703,7 +701,7 @@ static int dwc3_probe(struct platform_device *pdev) + + ret = dwc3_core_get_phy(dwc); + if (ret) +- return ret; ++ goto err0; + + spin_lock_init(&dwc->lock); + platform_set_drvdata(pdev, dwc); +@@ -722,7 +720,25 @@ static int dwc3_probe(struct platform_device *pdev) + if (ret) { + dev_err(dwc->dev, "failed to allocate event buffers\n"); + ret = -ENOMEM; +- goto err0; ++ goto err1; ++ } ++ ++ /* Adjust Frame Length */ ++ if (dwc->configure_gfladj) ++ dwc3_writel(dwc->regs, DWC3_GFLADJ, GFLADJ_30MHZ_REG_SEL | ++ GFLADJ_30MHZ(GFLADJ_30MHZ_DEFAULT)); ++ ++ /* Change burst beat and outstanding pipelined transfers requests */ ++ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, ++ (dwc3_readl(dwc->regs, DWC3_GSBUSCFG0) & ~0xff) | 0xf); ++ dwc3_writel(dwc->regs, DWC3_GSBUSCFG1, ++ dwc3_readl(dwc->regs, DWC3_GSBUSCFG1) | 0xf00); ++ ++ /* Enable Snooping */ ++ if (node && of_dma_is_coherent(node)) { ++ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, ++ dwc3_readl(dwc->regs, DWC3_GSBUSCFG0) | 0x22220000); ++ dev_dbg(dev, "enabled snooping for usb\n"); + } + + if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) +@@ -736,65 +752,81 @@ static int dwc3_probe(struct platform_device *pdev) + ret = dwc3_core_init(dwc); + if (ret) { + dev_err(dev, "failed to initialize core\n"); +- goto err0; ++ goto err1; + } + + usb_phy_set_suspend(dwc->usb2_phy, 0); + usb_phy_set_suspend(dwc->usb3_phy, 0); + ret = phy_power_on(dwc->usb2_generic_phy); + if (ret < 0) +- goto err1; ++ goto err2; + + ret = phy_power_on(dwc->usb3_generic_phy); + if (ret < 0) +- goto err_usb2phy_power; ++ goto err3; + + ret = dwc3_event_buffers_setup(dwc); + if (ret) { + dev_err(dwc->dev, "failed to setup event buffers\n"); +- goto err_usb3phy_power; ++ goto err4; + } + + ret = dwc3_core_init_mode(dwc); + if (ret) +- goto err2; ++ goto err5; + + ret = dwc3_debugfs_init(dwc); + if (ret) { + dev_err(dev, "failed to initialize debugfs\n"); +- goto err3; ++ goto err6; + } + + pm_runtime_allow(dev); + + return 0; + +-err3: ++err6: + dwc3_core_exit_mode(dwc); + +-err2: ++err5: + dwc3_event_buffers_cleanup(dwc); + +-err_usb3phy_power: ++err4: + phy_power_off(dwc->usb3_generic_phy); + +-err_usb2phy_power: ++err3: + phy_power_off(dwc->usb2_generic_phy); + +-err1: ++err2: + usb_phy_set_suspend(dwc->usb2_phy, 1); + usb_phy_set_suspend(dwc->usb3_phy, 1); + dwc3_core_exit(dwc); + +-err0: ++err1: + dwc3_free_event_buffers(dwc); + ++err0: ++ /* ++ * restore res->start back to its original value so that, in case the ++ * probe is deferred, we don't end up getting error in request the ++ * memory region the next time probe is called. ++ */ ++ res->start -= DWC3_GLOBALS_REGS_START; ++ + return ret; + } + + static int dwc3_remove(struct platform_device *pdev) + { + struct dwc3 *dwc = platform_get_drvdata(pdev); ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ ++ /* ++ * restore res->start back to its original value so that, in case the ++ * probe is deferred, we don't end up getting error in request the ++ * memory region the next time probe is called. ++ */ ++ res->start -= DWC3_GLOBALS_REGS_START; + + dwc3_debugfs_exit(dwc); + dwc3_core_exit_mode(dwc); +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h +index 66f6256..aec8953 100644 +--- a/drivers/usb/dwc3/core.h ++++ b/drivers/usb/dwc3/core.h +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -123,6 +124,7 @@ + #define DWC3_GEVNTCOUNT(n) (0xc40c + (n * 0x10)) + + #define DWC3_GHWPARAMS8 0xc600 ++#define DWC3_GFLADJ 0xc630 + + /* Device Registers */ + #define DWC3_DCFG 0xc700 +@@ -210,6 +212,11 @@ + #define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n) (((n) & (0x0f << 13)) >> 13) + #define DWC3_MAX_HIBER_SCRATCHBUFS 15 + ++/* Global Frame Length Adjustment Register */ ++#define GFLADJ_30MHZ_REG_SEL (1 << 7) ++#define GFLADJ_30MHZ(n) ((n) & 0x3f) ++#define GFLADJ_30MHZ_DEFAULT 0x20 ++ + /* Device Configuration Register */ + #define DWC3_DCFG_DEVADDR(addr) ((addr) << 3) + #define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f) +@@ -766,6 +773,7 @@ struct dwc3 { + unsigned has_hibernation:1; + unsigned is_selfpowered:1; + unsigned needs_fifo_resize:1; ++ unsigned configure_gfladj:1; + unsigned pullups_connected:1; + unsigned resize_fifos:1; + unsigned setup_packet_pending:1; +diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c +index dcb8ca0..c41d46c 100644 +--- a/drivers/usb/dwc3/host.c ++++ b/drivers/usb/dwc3/host.c +@@ -39,6 +39,12 @@ int dwc3_host_init(struct dwc3 *dwc) + xhci->dev.dma_mask = dwc->dev->dma_mask; + xhci->dev.dma_parms = dwc->dev->dma_parms; + ++ /* set DMA operations */ ++ if (dwc->dev->of_node && of_dma_is_coherent(dwc->dev->of_node)) { ++ xhci->dev.archdata.dma_ops = dwc->dev->archdata.dma_ops; ++ dev_dbg(dwc->dev, "set dma_ops for usb\n"); ++ } ++ + dwc->xhci = xhci; + + ret = platform_device_add_resources(xhci, dwc->xhci_resources, +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 7e5c90e..c6027ac 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -23,10 +23,17 @@ + #include + #include + #include ++#include + + #include "xhci.h" + #include "xhci-trace.h" + ++#define SSIC_PORT_NUM 2 ++#define SSIC_PORT_CFG2 0x880c ++#define SSIC_PORT_CFG2_OFFSET 0x30 ++#define PROG_DONE (1 << 30) ++#define SSIC_PORT_UNUSED (1 << 31) ++ + /* Device for a quirk */ + #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 + #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 +@@ -40,6 +47,8 @@ + #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f ++#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 ++#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 + + static const char hcd_name[] = "xhci_hcd"; + +@@ -140,9 +149,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || +- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) { ++ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) { + xhci->quirks |= XHCI_PME_STUCK_QUIRK; + } ++ if (pdev->vendor == PCI_VENDOR_ID_INTEL && ++ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { ++ xhci->quirks |= XHCI_SSIC_PORT_UNUSED; ++ } + if (pdev->vendor == PCI_VENDOR_ID_ETRON && + pdev->device == PCI_DEVICE_ID_EJ168) { + xhci->quirks |= XHCI_RESET_ON_RESUME; +@@ -169,20 +184,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + "QUIRK: Resetting on resume"); + } + +-/* +- * Make sure PME works on some Intel xHCI controllers by writing 1 to clear +- * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 +- */ +-static void xhci_pme_quirk(struct xhci_hcd *xhci) ++#ifdef CONFIG_ACPI ++static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) + { +- u32 val; +- void __iomem *reg; +- +- reg = (void __iomem *) xhci->cap_regs + 0x80a4; +- val = readl(reg); +- writel(val | BIT(28), reg); +- readl(reg); ++ static const u8 intel_dsm_uuid[] = { ++ 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45, ++ 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23, ++ }; ++ acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL); + } ++#else ++ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } ++#endif /* CONFIG_ACPI */ + + /* called during probe() after chip reset completes */ + static int xhci_pci_setup(struct usb_hcd *hcd) +@@ -263,6 +276,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) + HCC_MAX_PSA(xhci->hcc_params) >= 4) + xhci->shared_hcd->can_do_streams = 1; + ++ if (xhci->quirks & XHCI_PME_STUCK_QUIRK) ++ xhci_pme_acpi_rtd3_enable(dev); ++ + /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ + pm_runtime_put_noidle(&dev->dev); + +@@ -282,6 +298,7 @@ static void xhci_pci_remove(struct pci_dev *dev) + struct xhci_hcd *xhci; + + xhci = hcd_to_xhci(pci_get_drvdata(dev)); ++ xhci->xhc_state |= XHCI_STATE_REMOVING; + if (xhci->shared_hcd) { + usb_remove_hcd(xhci->shared_hcd); + usb_put_hcd(xhci->shared_hcd); +@@ -296,10 +313,65 @@ static void xhci_pci_remove(struct pci_dev *dev) + } + + #ifdef CONFIG_PM ++/* ++ * In some Intel xHCI controllers, in order to get D3 working, ++ * through a vendor specific SSIC CONFIG register at offset 0x883c, ++ * SSIC PORT need to be marked as "unused" before putting xHCI ++ * into D3. After D3 exit, the SSIC port need to be marked as "used". ++ * Without this change, xHCI might not enter D3 state. ++ */ ++static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend) ++{ ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ u32 val; ++ void __iomem *reg; ++ int i; ++ ++ for (i = 0; i < SSIC_PORT_NUM; i++) { ++ reg = (void __iomem *) xhci->cap_regs + ++ SSIC_PORT_CFG2 + ++ i * SSIC_PORT_CFG2_OFFSET; ++ ++ /* Notify SSIC that SSIC profile programming is not done. */ ++ val = readl(reg) & ~PROG_DONE; ++ writel(val, reg); ++ ++ /* Mark SSIC port as unused(suspend) or used(resume) */ ++ val = readl(reg); ++ if (suspend) ++ val |= SSIC_PORT_UNUSED; ++ else ++ val &= ~SSIC_PORT_UNUSED; ++ writel(val, reg); ++ ++ /* Notify SSIC that SSIC profile programming is done */ ++ val = readl(reg) | PROG_DONE; ++ writel(val, reg); ++ readl(reg); ++ } ++} ++ ++/* ++ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear ++ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 ++ */ ++static void xhci_pme_quirk(struct usb_hcd *hcd) ++{ ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ void __iomem *reg; ++ u32 val; ++ ++ reg = (void __iomem *) xhci->cap_regs + 0x80a4; ++ val = readl(reg); ++ writel(val | BIT(28), reg); ++ readl(reg); ++} ++ + static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) + { + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); ++ int ret; + + /* + * Systems with the TI redriver that loses port status change events +@@ -309,9 +381,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) + pdev->no_d3cold = true; + + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) +- xhci_pme_quirk(xhci); ++ xhci_pme_quirk(hcd); ++ ++ if (xhci->quirks & XHCI_SSIC_PORT_UNUSED) ++ xhci_ssic_port_unused_quirk(hcd, true); + +- return xhci_suspend(xhci, do_wakeup); ++ ret = xhci_suspend(xhci, do_wakeup); ++ if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED)) ++ xhci_ssic_port_unused_quirk(hcd, false); ++ ++ return ret; + } + + static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) +@@ -341,8 +420,11 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) + if (pdev->vendor == PCI_VENDOR_ID_INTEL) + usb_enable_intel_xhci_ports(pdev); + ++ if (xhci->quirks & XHCI_SSIC_PORT_UNUSED) ++ xhci_ssic_port_unused_quirk(hcd, false); ++ + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) +- xhci_pme_quirk(xhci); ++ xhci_pme_quirk(hcd); + + retval = xhci_resume(xhci, hibernated); + return retval; +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 1e5fb8c..04e7525 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -3840,8 +3840,12 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, + { + int reserved_trbs = xhci->cmd_ring_reserved_trbs; + int ret; +- if (xhci->xhc_state & XHCI_STATE_DYING) ++ ++ if ((xhci->xhc_state & XHCI_STATE_DYING) || ++ (xhci->xhc_state & XHCI_STATE_HALTED)) { ++ xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); + return -ESHUTDOWN; ++ } + + if (!command_must_succeed) + reserved_trbs++; +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 98380fa..f951b75 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci) + "waited %u microseconds.\n", + XHCI_MAX_HALT_USEC); + if (!ret) +- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING); ++ /* clear state flags. Including dying, halted or removing */ ++ xhci->xhc_state = 0; + + return ret; + } +@@ -1102,8 +1103,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) + /* Resume root hubs only when have pending events. */ + status = readl(&xhci->op_regs->status); + if (status & STS_EINT) { +- usb_hcd_resume_root_hub(hcd); + usb_hcd_resume_root_hub(xhci->shared_hcd); ++ usb_hcd_resume_root_hub(hcd); + } + } + +@@ -1118,10 +1119,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) + + /* Re-enable port polling. */ + xhci_dbg(xhci, "%s: starting port polling.\n", __func__); +- set_bit(HCD_FLAG_POLL_RH, &hcd->flags); +- usb_hcd_poll_rh_status(hcd); + set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); + usb_hcd_poll_rh_status(xhci->shared_hcd); ++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags); ++ usb_hcd_poll_rh_status(hcd); + + return retval; + } +@@ -1548,7 +1549,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "HW died, freeing TD."); + urb_priv = urb->hcpriv; +- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { ++ for (i = urb_priv->td_cnt; ++ i < urb_priv->length && xhci->devs[urb->dev->slot_id]; ++ i++) { + td = urb_priv->td[i]; + if (!list_empty(&td->td_list)) + list_del_init(&td->td_list); +@@ -2751,7 +2754,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) + if (ret <= 0) + return ret; + xhci = hcd_to_xhci(hcd); +- if (xhci->xhc_state & XHCI_STATE_DYING) ++ if ((xhci->xhc_state & XHCI_STATE_DYING) || ++ (xhci->xhc_state & XHCI_STATE_REMOVING)) + return -ENODEV; + + xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); +@@ -3793,7 +3797,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, + u64 temp_64; + struct xhci_command *command; + +- if (xhci->xhc_state) /* dying or halted */ ++ if (xhci->xhc_state) /* dying, removing or halted */ + return -EINVAL; + + if (!udev->slot_id) { +@@ -4912,6 +4916,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) + goto error; + xhci_dbg(xhci, "Reset complete\n"); + ++ /* ++ * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) ++ * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit ++ * address memory pointers actually. So, this driver clears the AC64 ++ * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, ++ * DMA_BIT_MASK(32)) in this xhci_gen_setup(). ++ */ ++ if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) ++ xhci->hcc_params &= ~BIT(0); ++ + /* Set dma_mask and coherent_dma_mask to 64-bits, + * if xHC supports 64-bit addressing */ + if (HCC_64BIT_ADDR(xhci->hcc_params) && +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 54f386f..3850cb2 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1531,6 +1531,7 @@ struct xhci_hcd { + */ + #define XHCI_STATE_DYING (1 << 0) + #define XHCI_STATE_HALTED (1 << 1) ++#define XHCI_STATE_REMOVING (1 << 2) + /* Statistics */ + int error_bitmask; + unsigned int quirks; +@@ -1565,6 +1566,8 @@ struct xhci_hcd { + /* For controllers with a broken beyond repair streams implementation */ + #define XHCI_BROKEN_STREAMS (1 << 19) + #define XHCI_PME_STUCK_QUIRK (1 << 20) ++#define XHCI_SSIC_PORT_UNUSED (1 << 22) ++#define XHCI_NO_64BIT_SUPPORT (1 << 23) + unsigned int num_active_eps; + unsigned int limit_active_eps; + /* There are two roothubs to keep track of bus suspend info for */ +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c +index 553212f..e8d695b 100644 +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -560,7 +560,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, + struct msi_msg msg; + + get_cached_msi_msg(irq, &msg); +- write_msi_msg(irq, &msg); ++ pci_write_msi_msg(irq, &msg); + } + + ret = request_irq(irq, vfio_msihandler, 0, +diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h +new file mode 100644 +index 0000000..61c58d8 +--- /dev/null ++++ b/include/asm-generic/msi.h +@@ -0,0 +1,32 @@ ++#ifndef __ASM_GENERIC_MSI_H ++#define __ASM_GENERIC_MSI_H ++ ++#include ++ ++#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS ++# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2 ++#endif ++ ++struct msi_desc; ++ ++/** ++ * struct msi_alloc_info - Default structure for MSI interrupt allocation. ++ * @desc: Pointer to msi descriptor ++ * @hwirq: Associated hw interrupt number in the domain ++ * @scratchpad: Storage for implementation specific scratch data ++ * ++ * Architectures can provide their own implementation by not including ++ * asm-generic/msi.h into their arch specific header file. ++ */ ++typedef struct msi_alloc_info { ++ struct msi_desc *desc; ++ irq_hw_number_t hwirq; ++ union { ++ unsigned long ul; ++ void *ptr; ++ } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS]; ++} msi_alloc_info_t; ++ ++#define GENERIC_MSI_DOMAIN_OPS 1 ++ ++#endif +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h +index aa70cbd..bee5d68 100644 +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -164,6 +164,7 @@ + #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) + #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) + #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) ++#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) + #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) + #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) + #define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) +@@ -497,6 +498,7 @@ + CLK_OF_TABLES() \ + RESERVEDMEM_OF_TABLES() \ + CLKSRC_OF_TABLES() \ ++ IOMMU_OF_TABLES() \ + CPU_METHOD_OF_TABLES() \ + KERNEL_DTB() \ + IRQCHIP_OF_MATCH_TABLE() \ +diff --git a/include/linux/acpi.h b/include/linux/acpi.h +index 1c7eaa7..d017dbf 100644 +--- a/include/linux/acpi.h ++++ b/include/linux/acpi.h +@@ -27,6 +27,7 @@ + + #include + #include /* for struct resource */ ++#include + #include + #include + +@@ -290,11 +291,6 @@ unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); + bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, + struct resource *res); + +-struct resource_list_entry { +- struct list_head node; +- struct resource res; +-}; +- + void acpi_dev_free_resource_list(struct list_head *list); + int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, + int (*preproc)(struct acpi_resource *, void *), +diff --git a/include/linux/device.h b/include/linux/device.h +index ce1f216..941d97b 100644 +--- a/include/linux/device.h ++++ b/include/linux/device.h +@@ -690,6 +690,8 @@ struct acpi_dev_node { + * along with subsystem-level and driver-level callbacks. + * @pins: For device pin management. + * See Documentation/pinctrl.txt for details. ++ * @msi_list: Hosts MSI descriptors ++ * @msi_domain: The generic MSI domain this device is using. + * @numa_node: NUMA node this device is close to. + * @dma_mask: Dma mask (if dma'ble device). + * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all +@@ -750,9 +752,15 @@ struct device { + struct dev_pm_info power; + struct dev_pm_domain *pm_domain; + ++#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN ++ struct irq_domain *msi_domain; ++#endif + #ifdef CONFIG_PINCTRL + struct dev_pin_info *pins; + #endif ++#ifdef CONFIG_GENERIC_MSI_IRQ ++ struct list_head msi_list; ++#endif + + #ifdef CONFIG_NUMA + int numa_node; /* NUMA node this device is close to */ +@@ -837,6 +845,22 @@ static inline void set_dev_node(struct device *dev, int node) + } + #endif + ++static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) ++{ ++#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN ++ return dev->msi_domain; ++#else ++ return NULL; ++#endif ++} ++ ++static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) ++{ ++#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN ++ dev->msi_domain = d; ++#endif ++} ++ + static inline void *dev_get_drvdata(const struct device *dev) + { + return dev->driver_data; +diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h +index d5d3881..c3007cb 100644 +--- a/include/linux/dma-mapping.h ++++ b/include/linux/dma-mapping.h +@@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) + + extern u64 dma_get_required_mask(struct device *dev); + +-#ifndef set_arch_dma_coherent_ops +-static inline int set_arch_dma_coherent_ops(struct device *dev) +-{ +- return 0; +-} ++#ifndef arch_setup_dma_ops ++static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, ++ u64 size, struct iommu_ops *iommu, ++ bool coherent) { } ++#endif ++ ++#ifndef arch_teardown_dma_ops ++static inline void arch_teardown_dma_ops(struct device *dev) { } + #endif + + static inline unsigned int dma_get_max_seg_size(struct device *dev) +diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h +new file mode 100644 +index 0000000..84d971f +--- /dev/null ++++ b/include/linux/fsl/guts.h +@@ -0,0 +1,192 @@ ++/** ++ * Freecale 85xx and 86xx Global Utilties register set ++ * ++ * Authors: Jeff Brown ++ * Timur Tabi ++ * ++ * Copyright 2004,2007,2012 Freescale Semiconductor, Inc ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ */ ++ ++#ifndef __FSL_GUTS_H__ ++#define __FSL_GUTS_H__ ++ ++#include ++ ++/** ++ * Global Utility Registers. ++ * ++ * Not all registers defined in this structure are available on all chips, so ++ * you are expected to know whether a given register actually exists on your ++ * chip before you access it. ++ * ++ * Also, some registers are similar on different chips but have slightly ++ * different names. In these cases, one name is chosen to avoid extraneous ++ * #ifdefs. ++ */ ++struct ccsr_guts { ++ __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ ++ __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ ++ __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ ++ __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ ++ __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ ++ __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ ++ u8 res018[0x20 - 0x18]; ++ __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ ++ u8 res024[0x30 - 0x24]; ++ __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ ++ u8 res034[0x40 - 0x34]; ++ __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ ++ u8 res044[0x50 - 0x44]; ++ __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ ++ u8 res054[0x60 - 0x54]; ++ __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ ++ __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ ++ __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ ++ u8 res06c[0x70 - 0x6c]; ++ __be32 devdisr; /* 0x.0070 - Device Disable Control */ ++#define CCSR_GUTS_DEVDISR_TB1 0x00001000 ++#define CCSR_GUTS_DEVDISR_TB0 0x00004000 ++ __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ ++ u8 res078[0x7c - 0x78]; ++ __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ ++ __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ ++ __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ ++ __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ ++ __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ ++ __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ ++ __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ ++ __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ ++ __be32 autorstsr; /* 0x.009c - Automatic reset status register */ ++ __be32 pvr; /* 0x.00a0 - Processor Version Register */ ++ __be32 svr; /* 0x.00a4 - System Version Register */ ++ u8 res0a8[0xb0 - 0xa8]; ++ __be32 rstcr; /* 0x.00b0 - Reset Control Register */ ++ u8 res0b4[0xc0 - 0xb4]; ++ __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register ++ Called 'elbcvselcr' on 86xx SOCs */ ++ u8 res0c4[0x100 - 0xc4]; ++ __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers ++ There are 16 registers */ ++ u8 res140[0x224 - 0x140]; ++ __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ ++ __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ ++ u8 res22c[0x604 - 0x22c]; ++ __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ ++ u8 res608[0x800 - 0x608]; ++ __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ ++ u8 res804[0x900 - 0x804]; ++ __be32 ircr; /* 0x.0900 - Infrared Control Register */ ++ u8 res904[0x908 - 0x904]; ++ __be32 dmacr; /* 0x.0908 - DMA Control Register */ ++ u8 res90c[0x914 - 0x90c]; ++ __be32 elbccr; /* 0x.0914 - eLBC Control Register */ ++ u8 res918[0xb20 - 0x918]; ++ __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ ++ __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ ++ __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ ++ u8 resb2c[0xe00 - 0xb2c]; ++ __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ ++ u8 rese04[0xe10 - 0xe04]; ++ __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ ++ u8 rese14[0xe20 - 0xe14]; ++ __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ ++ __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ ++ u8 rese28[0xf04 - 0xe28]; ++ __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ ++ __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ ++ u8 resf0c[0xf2c - 0xf0c]; ++ __be32 itcr; /* 0x.0f2c - Internal transaction control register */ ++ u8 resf30[0xf40 - 0xf30]; ++ __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ ++ __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ ++} __attribute__ ((packed)); ++ ++ ++/* Alternate function signal multiplex control */ ++#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) ++ ++#ifdef CONFIG_PPC_86xx ++ ++#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ ++#define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */ ++ ++/* ++ * Set the DMACR register in the GUTS ++ * ++ * The DMACR register determines the source of initiated transfers for each ++ * channel on each DMA controller. Rather than have a bunch of repetitive ++ * macros for the bit patterns, we just have a function that calculates ++ * them. ++ * ++ * guts: Pointer to GUTS structure ++ * co: The DMA controller (0 or 1) ++ * ch: The channel on the DMA controller (0, 1, 2, or 3) ++ * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx) ++ */ ++static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts, ++ unsigned int co, unsigned int ch, unsigned int device) ++{ ++ unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); ++ ++ clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift); ++} ++ ++#define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000 ++#define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */ ++#define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */ ++#define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */ ++#define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */ ++#define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */ ++#define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */ ++#define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */ ++#define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */ ++#define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */ ++#define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */ ++#define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */ ++#define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008 ++#define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004 ++#define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002 ++#define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001 ++ ++/* ++ * Set the DMA external control bits in the GUTS ++ * ++ * The DMA external control bits in the PMUXCR are only meaningful for ++ * channels 0 and 3. Any other channels are ignored. ++ * ++ * guts: Pointer to GUTS structure ++ * co: The DMA controller (0 or 1) ++ * ch: The channel on the DMA controller (0, 1, 2, or 3) ++ * value: the new value for the bit (0 or 1) ++ */ ++static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts, ++ unsigned int co, unsigned int ch, unsigned int value) ++{ ++ if ((ch == 0) || (ch == 3)) { ++ unsigned int shift = 2 * (co + 1) - (ch & 1) - 1; ++ ++ clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift); ++ } ++} ++ ++#define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000 ++#define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000 ++#define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000 ++#define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25 ++#define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000 ++#define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \ ++ (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT) ++#define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16 ++#define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000 ++#define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT) ++#define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF ++#define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK) ++ ++#endif ++ ++#endif +diff --git a/include/linux/iommu.h b/include/linux/iommu.h +index e6a7c9f..04229cb 100644 +--- a/include/linux/iommu.h ++++ b/include/linux/iommu.h +@@ -21,13 +21,15 @@ + + #include + #include ++#include + #include ++#include + #include + + #define IOMMU_READ (1 << 0) + #define IOMMU_WRITE (1 << 1) + #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ +-#define IOMMU_EXEC (1 << 3) ++#define IOMMU_NOEXEC (1 << 3) + + struct iommu_ops; + struct iommu_group; +@@ -49,9 +51,33 @@ struct iommu_domain_geometry { + bool force_aperture; /* DMA only allowed in mappable range? */ + }; + ++/* Domain feature flags */ ++#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ ++#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API ++ implementation */ ++#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ ++ ++/* ++ * This are the possible domain-types ++ * ++ * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate ++ * devices ++ * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses ++ * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used ++ * for VMs ++ * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. ++ * This flag allows IOMMU drivers to implement ++ * certain optimizations for these domains ++ */ ++#define IOMMU_DOMAIN_BLOCKED (0U) ++#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) ++#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) ++#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ ++ __IOMMU_DOMAIN_DMA_API) ++ + struct iommu_domain { ++ unsigned type; + const struct iommu_ops *ops; +- void *priv; + iommu_fault_handler_t handler; + void *handler_token; + struct iommu_domain_geometry geometry; +@@ -61,6 +87,7 @@ enum iommu_cap { + IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA + transactions */ + IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ ++ IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ + }; + + /* +@@ -97,23 +124,32 @@ enum iommu_attr { + * @detach_dev: detach device from an iommu domain + * @map: map a physically contiguous memory region to an iommu domain + * @unmap: unmap a physically contiguous memory region from an iommu domain ++ * @map_sg: map a scatter-gather list of physically contiguous memory chunks ++ * to an iommu domain + * @iova_to_phys: translate iova to physical address + * @add_device: add device to iommu grouping + * @remove_device: remove device from iommu grouping + * @domain_get_attr: Query domain attributes + * @domain_set_attr: Change domain attributes ++ * @of_xlate: add OF master IDs to iommu grouping + * @pgsize_bitmap: bitmap of supported page sizes ++ * @priv: per-instance data private to the iommu driver + */ + struct iommu_ops { + bool (*capable)(enum iommu_cap); +- int (*domain_init)(struct iommu_domain *domain); +- void (*domain_destroy)(struct iommu_domain *domain); ++ ++ /* Domain allocation and freeing by the iommu driver */ ++ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); ++ void (*domain_free)(struct iommu_domain *); ++ + int (*attach_dev)(struct iommu_domain *domain, struct device *dev); + void (*detach_dev)(struct iommu_domain *domain, struct device *dev); + int (*map)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); + size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, + size_t size); ++ size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova, ++ struct scatterlist *sg, unsigned int nents, int prot); + phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); + int (*add_device)(struct device *dev); + void (*remove_device)(struct device *dev); +@@ -131,8 +167,14 @@ struct iommu_ops { + int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count); + /* Get the numer of window per domain */ + u32 (*domain_get_windows)(struct iommu_domain *domain); ++ struct iommu_domain *(*get_dev_iommu_domain)(struct device *dev); ++ ++#ifdef CONFIG_OF_IOMMU ++ int (*of_xlate)(struct device *dev, struct of_phandle_args *args); ++#endif + + unsigned long pgsize_bitmap; ++ void *priv; + }; + + #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ +@@ -156,6 +198,9 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); + extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, + size_t size); ++extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ++ struct scatterlist *sg,unsigned int nents, ++ int prot); + extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); + extern void iommu_set_fault_handler(struct iommu_domain *domain, + iommu_fault_handler_t handler, void *token); +@@ -200,6 +245,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, + phys_addr_t offset, u64 size, + int prot); + extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); ++ ++extern struct iommu_domain *iommu_get_dev_domain(struct device *dev); ++ + /** + * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework + * @domain: the iommu domain where the fault has happened +@@ -241,6 +289,13 @@ static inline int report_iommu_fault(struct iommu_domain *domain, + return ret; + } + ++static inline size_t iommu_map_sg(struct iommu_domain *domain, ++ unsigned long iova, struct scatterlist *sg, ++ unsigned int nents, int prot) ++{ ++ return domain->ops->map_sg(domain, iova, sg, nents, prot); ++} ++ + #else /* CONFIG_IOMMU_API */ + + struct iommu_ops {}; +@@ -293,6 +348,13 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, + return -ENODEV; + } + ++static inline size_t iommu_map_sg(struct iommu_domain *domain, ++ unsigned long iova, struct scatterlist *sg, ++ unsigned int nents, int prot) ++{ ++ return -ENODEV; ++} ++ + static inline int iommu_domain_window_enable(struct iommu_domain *domain, + u32 wnd_nr, phys_addr_t paddr, + u64 size, int prot) +@@ -424,6 +486,11 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link) + { + } + ++static inline struct iommu_domain *iommu_get_dev_domain(struct device *dev) ++{ ++ return NULL; ++} ++ + #endif /* CONFIG_IOMMU_API */ + + #endif /* __LINUX_IOMMU_H */ +diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h +new file mode 100644 +index 0000000..1c30014 +--- /dev/null ++++ b/include/linux/iopoll.h +@@ -0,0 +1,144 @@ ++/* ++ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef _LINUX_IOPOLL_H ++#define _LINUX_IOPOLL_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/** ++ * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs ++ * @op: accessor function (takes @addr as its only argument) ++ * @addr: Address to poll ++ * @val: Variable to read the value into ++ * @cond: Break condition (usually involving @val) ++ * @sleep_us: Maximum time to sleep between reads in us (0 ++ * tight-loops). Should be less than ~20ms since usleep_range ++ * is used (see Documentation/timers/timers-howto.txt). ++ * @timeout_us: Timeout in us, 0 means never timeout ++ * ++ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either ++ * case, the last read value at @addr is stored in @val. Must not ++ * be called from atomic context if sleep_us or timeout_us are used. ++ * ++ * When available, you'll probably want to use one of the specialized ++ * macros defined below rather than this macro directly. ++ */ ++#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \ ++({ \ ++ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ ++ might_sleep_if(sleep_us); \ ++ for (;;) { \ ++ (val) = op(addr); \ ++ if (cond) \ ++ break; \ ++ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ ++ (val) = op(addr); \ ++ break; \ ++ } \ ++ if (sleep_us) \ ++ usleep_range((sleep_us >> 2) + 1, sleep_us); \ ++ } \ ++ (cond) ? 0 : -ETIMEDOUT; \ ++}) ++ ++/** ++ * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs ++ * @op: accessor function (takes @addr as its only argument) ++ * @addr: Address to poll ++ * @val: Variable to read the value into ++ * @cond: Break condition (usually involving @val) ++ * @delay_us: Time to udelay between reads in us (0 tight-loops). Should ++ * be less than ~10us since udelay is used (see ++ * Documentation/timers/timers-howto.txt). ++ * @timeout_us: Timeout in us, 0 means never timeout ++ * ++ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either ++ * case, the last read value at @addr is stored in @val. ++ * ++ * When available, you'll probably want to use one of the specialized ++ * macros defined below rather than this macro directly. ++ */ ++#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \ ++({ \ ++ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ ++ for (;;) { \ ++ (val) = op(addr); \ ++ if (cond) \ ++ break; \ ++ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ ++ (val) = op(addr); \ ++ break; \ ++ } \ ++ if (delay_us) \ ++ udelay(delay_us); \ ++ } \ ++ (cond) ? 0 : -ETIMEDOUT; \ ++}) ++ ++ ++#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us) ++ ++#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us) ++ ++#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us) ++ ++#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us) ++ ++#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us) ++ ++#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us) ++ ++#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us) ++ ++#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us) ++ ++#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us) ++ ++#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us) ++ ++#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us) ++ ++#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us) ++ ++#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us) ++ ++#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us) ++ ++#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us) ++ ++#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ ++ readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us) ++ ++#endif /* _LINUX_IOPOLL_H */ +diff --git a/include/linux/irq.h b/include/linux/irq.h +index 03f48d9..9ba173b 100644 +--- a/include/linux/irq.h ++++ b/include/linux/irq.h +@@ -15,11 +15,13 @@ + #include + #include + #include ++#include + #include + #include + #include + #include + #include ++#include + + #include + #include +@@ -27,11 +29,7 @@ + + struct seq_file; + struct module; +-struct irq_desc; +-struct irq_data; +-typedef void (*irq_flow_handler_t)(unsigned int irq, +- struct irq_desc *desc); +-typedef void (*irq_preflow_handler_t)(struct irq_data *data); ++struct msi_msg; + + /* + * IRQ line status. +@@ -113,10 +111,14 @@ enum { + * + * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity + * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity ++ * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to ++ * support stacked irqchips, which indicates skipping ++ * all descendent irqchips. + */ + enum { + IRQ_SET_MASK_OK = 0, + IRQ_SET_MASK_OK_NOCOPY, ++ IRQ_SET_MASK_OK_DONE, + }; + + struct msi_desc; +@@ -133,6 +135,8 @@ struct irq_domain; + * @chip: low level interrupt hardware access + * @domain: Interrupt translation domain; responsible for mapping + * between hwirq number and linux irq number. ++ * @parent_data: pointer to parent struct irq_data to support hierarchy ++ * irq_domain + * @handler_data: per-IRQ data for the irq_chip methods + * @chip_data: platform-specific per-chip private data for the chip + * methods, to allow shared chip implementations +@@ -151,6 +155,9 @@ struct irq_data { + unsigned int state_use_accessors; + struct irq_chip *chip; + struct irq_domain *domain; ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++ struct irq_data *parent_data; ++#endif + void *handler_data; + void *chip_data; + struct msi_desc *msi_desc; +@@ -315,6 +322,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) + * any other callback related to this irq + * @irq_release_resources: optional to release resources acquired with + * irq_request_resources ++ * @irq_compose_msi_msg: optional to compose message content for MSI ++ * @irq_write_msi_msg: optional to write message content for MSI + * @flags: chip specific flags + */ + struct irq_chip { +@@ -351,6 +360,9 @@ struct irq_chip { + int (*irq_request_resources)(struct irq_data *data); + void (*irq_release_resources)(struct irq_data *data); + ++ void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); ++ void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); ++ + unsigned long flags; + }; + +@@ -438,6 +450,18 @@ extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); + extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); + extern void handle_nested_irq(unsigned int irq); + ++extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++extern void irq_chip_ack_parent(struct irq_data *data); ++extern int irq_chip_retrigger_hierarchy(struct irq_data *data); ++extern void irq_chip_mask_parent(struct irq_data *data); ++extern void irq_chip_unmask_parent(struct irq_data *data); ++extern void irq_chip_eoi_parent(struct irq_data *data); ++extern int irq_chip_set_affinity_parent(struct irq_data *data, ++ const struct cpumask *dest, ++ bool force); ++#endif ++ + /* Handling of unhandled and spurious interrupts: */ + extern void note_interrupt(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret); +@@ -582,7 +606,7 @@ static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) + return d ? d->msi_desc : NULL; + } + +-static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) ++static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) + { + return d->msi_desc; + } +@@ -639,13 +663,6 @@ void arch_teardown_hwirq(unsigned int irq); + void irq_init_desc(unsigned int irq); + #endif + +-#ifndef irq_reg_writel +-# define irq_reg_writel(val, addr) writel(val, addr) +-#endif +-#ifndef irq_reg_readl +-# define irq_reg_readl(addr) readl(addr) +-#endif +- + /** + * struct irq_chip_regs - register offsets for struct irq_gci + * @enable: Enable register offset to reg_base +@@ -692,6 +709,8 @@ struct irq_chip_type { + * struct irq_chip_generic - Generic irq chip data structure + * @lock: Lock to protect register and cache data access + * @reg_base: Register base address (virtual) ++ * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) ++ * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) + * @irq_base: Interrupt base nr for this chip + * @irq_cnt: Number of interrupts handled by this chip + * @mask_cache: Cached mask register shared between all chip types +@@ -716,6 +735,8 @@ struct irq_chip_type { + struct irq_chip_generic { + raw_spinlock_t lock; + void __iomem *reg_base; ++ u32 (*reg_readl)(void __iomem *addr); ++ void (*reg_writel)(u32 val, void __iomem *addr); + unsigned int irq_base; + unsigned int irq_cnt; + u32 mask_cache; +@@ -740,12 +761,14 @@ struct irq_chip_generic { + * the parent irq. Usually GPIO implementations + * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private + * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask ++ * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE) + */ + enum irq_gc_flags { + IRQ_GC_INIT_MASK_CACHE = 1 << 0, + IRQ_GC_INIT_NESTED_LOCK = 1 << 1, + IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, + IRQ_GC_NO_MASK = 1 << 3, ++ IRQ_GC_BE_IO = 1 << 4, + }; + + /* +@@ -821,4 +844,22 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { } + static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } + #endif + ++static inline void irq_reg_writel(struct irq_chip_generic *gc, ++ u32 val, int reg_offset) ++{ ++ if (gc->reg_writel) ++ gc->reg_writel(val, gc->reg_base + reg_offset); ++ else ++ writel(val, gc->reg_base + reg_offset); ++} ++ ++static inline u32 irq_reg_readl(struct irq_chip_generic *gc, ++ int reg_offset) ++{ ++ if (gc->reg_readl) ++ return gc->reg_readl(gc->reg_base + reg_offset); ++ else ++ return readl(gc->reg_base + reg_offset); ++} ++ + #endif /* _LINUX_IRQ_H */ +diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h +index 03a4ea3..da1aa15 100644 +--- a/include/linux/irqchip/arm-gic-v3.h ++++ b/include/linux/irqchip/arm-gic-v3.h +@@ -49,6 +49,10 @@ + #define GICD_CTLR_ENABLE_G1A (1U << 1) + #define GICD_CTLR_ENABLE_G1 (1U << 0) + ++#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) ++#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) ++#define GICD_TYPER_LPIS (1U << 17) ++ + #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) + #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) + +@@ -76,9 +80,42 @@ + #define GICR_MOVALLR 0x0110 + #define GICR_PIDR2 GICD_PIDR2 + ++#define GICR_CTLR_ENABLE_LPIS (1UL << 0) ++ ++#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) ++ + #define GICR_WAKER_ProcessorSleep (1U << 1) + #define GICR_WAKER_ChildrenAsleep (1U << 2) + ++#define GICR_PROPBASER_NonShareable (0U << 10) ++#define GICR_PROPBASER_InnerShareable (1U << 10) ++#define GICR_PROPBASER_OuterShareable (2U << 10) ++#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10) ++#define GICR_PROPBASER_nCnB (0U << 7) ++#define GICR_PROPBASER_nC (1U << 7) ++#define GICR_PROPBASER_RaWt (2U << 7) ++#define GICR_PROPBASER_RaWb (3U << 7) ++#define GICR_PROPBASER_WaWt (4U << 7) ++#define GICR_PROPBASER_WaWb (5U << 7) ++#define GICR_PROPBASER_RaWaWt (6U << 7) ++#define GICR_PROPBASER_RaWaWb (7U << 7) ++#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7) ++#define GICR_PROPBASER_IDBITS_MASK (0x1f) ++ ++#define GICR_PENDBASER_NonShareable (0U << 10) ++#define GICR_PENDBASER_InnerShareable (1U << 10) ++#define GICR_PENDBASER_OuterShareable (2U << 10) ++#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10) ++#define GICR_PENDBASER_nCnB (0U << 7) ++#define GICR_PENDBASER_nC (1U << 7) ++#define GICR_PENDBASER_RaWt (2U << 7) ++#define GICR_PENDBASER_RaWb (3U << 7) ++#define GICR_PENDBASER_WaWt (4U << 7) ++#define GICR_PENDBASER_WaWb (5U << 7) ++#define GICR_PENDBASER_RaWaWt (6U << 7) ++#define GICR_PENDBASER_RaWaWb (7U << 7) ++#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7) ++ + /* + * Re-Distributor registers, offsets from SGI_base + */ +@@ -91,9 +128,100 @@ + #define GICR_IPRIORITYR0 GICD_IPRIORITYR + #define GICR_ICFGR0 GICD_ICFGR + ++#define GICR_TYPER_PLPIS (1U << 0) + #define GICR_TYPER_VLPIS (1U << 1) + #define GICR_TYPER_LAST (1U << 4) + ++#define LPI_PROP_GROUP1 (1 << 1) ++#define LPI_PROP_ENABLED (1 << 0) ++ ++/* ++ * ITS registers, offsets from ITS_base ++ */ ++#define GITS_CTLR 0x0000 ++#define GITS_IIDR 0x0004 ++#define GITS_TYPER 0x0008 ++#define GITS_CBASER 0x0080 ++#define GITS_CWRITER 0x0088 ++#define GITS_CREADR 0x0090 ++#define GITS_BASER 0x0100 ++#define GITS_PIDR2 GICR_PIDR2 ++ ++#define GITS_TRANSLATER 0x10040 ++ ++#define GITS_CTLR_ENABLE (1U << 0) ++#define GITS_CTLR_QUIESCENT (1U << 31) ++ ++#define GITS_TYPER_DEVBITS_SHIFT 13 ++#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) ++#define GITS_TYPER_PTA (1UL << 19) ++ ++#define GITS_CBASER_VALID (1UL << 63) ++#define GITS_CBASER_nCnB (0UL << 59) ++#define GITS_CBASER_nC (1UL << 59) ++#define GITS_CBASER_RaWt (2UL << 59) ++#define GITS_CBASER_RaWb (3UL << 59) ++#define GITS_CBASER_WaWt (4UL << 59) ++#define GITS_CBASER_WaWb (5UL << 59) ++#define GITS_CBASER_RaWaWt (6UL << 59) ++#define GITS_CBASER_RaWaWb (7UL << 59) ++#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59) ++#define GITS_CBASER_NonShareable (0UL << 10) ++#define GITS_CBASER_InnerShareable (1UL << 10) ++#define GITS_CBASER_OuterShareable (2UL << 10) ++#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10) ++ ++#define GITS_BASER_NR_REGS 8 ++ ++#define GITS_BASER_VALID (1UL << 63) ++#define GITS_BASER_nCnB (0UL << 59) ++#define GITS_BASER_nC (1UL << 59) ++#define GITS_BASER_RaWt (2UL << 59) ++#define GITS_BASER_RaWb (3UL << 59) ++#define GITS_BASER_WaWt (4UL << 59) ++#define GITS_BASER_WaWb (5UL << 59) ++#define GITS_BASER_RaWaWt (6UL << 59) ++#define GITS_BASER_RaWaWb (7UL << 59) ++#define GITS_BASER_CACHEABILITY_MASK (7UL << 59) ++#define GITS_BASER_TYPE_SHIFT (56) ++#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) ++#define GITS_BASER_ENTRY_SIZE_SHIFT (48) ++#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1) ++#define GITS_BASER_NonShareable (0UL << 10) ++#define GITS_BASER_InnerShareable (1UL << 10) ++#define GITS_BASER_OuterShareable (2UL << 10) ++#define GITS_BASER_SHAREABILITY_SHIFT (10) ++#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT) ++#define GITS_BASER_PAGE_SIZE_SHIFT (8) ++#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT) ++#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) ++#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) ++#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) ++ ++#define GITS_BASER_TYPE_NONE 0 ++#define GITS_BASER_TYPE_DEVICE 1 ++#define GITS_BASER_TYPE_VCPU 2 ++#define GITS_BASER_TYPE_CPU 3 ++#define GITS_BASER_TYPE_COLLECTION 4 ++#define GITS_BASER_TYPE_RESERVED5 5 ++#define GITS_BASER_TYPE_RESERVED6 6 ++#define GITS_BASER_TYPE_RESERVED7 7 ++ ++/* ++ * ITS commands ++ */ ++#define GITS_CMD_MAPD 0x08 ++#define GITS_CMD_MAPC 0x09 ++#define GITS_CMD_MAPVI 0x0a ++#define GITS_CMD_MOVI 0x01 ++#define GITS_CMD_DISCARD 0x0f ++#define GITS_CMD_INV 0x0c ++#define GITS_CMD_MOVALL 0x0e ++#define GITS_CMD_INVALL 0x0d ++#define GITS_CMD_INT 0x03 ++#define GITS_CMD_CLEAR 0x04 ++#define GITS_CMD_SYNC 0x05 ++ + /* + * CPU interface registers + */ +@@ -188,6 +316,24 @@ + #ifndef __ASSEMBLY__ + + #include ++#include ++ ++/* ++ * We need a value to serve as a irq-type for LPIs. Choose one that will ++ * hopefully pique the interest of the reviewer. ++ */ ++#define GIC_IRQ_TYPE_LPI 0xa110c8ed ++ ++struct rdists { ++ struct { ++ void __iomem *rd_base; ++ struct page *pend_page; ++ phys_addr_t phys_base; ++ } __percpu *rdist; ++ struct page *prop_page; ++ int id_bits; ++ u64 flags; ++}; + + static inline void gic_write_eoir(u64 irq) + { +@@ -195,6 +341,13 @@ static inline void gic_write_eoir(u64 irq) + isb(); + } + ++struct irq_domain; ++int its_cpu_init(void); ++int its_init(struct device_node *node, struct rdists *rdists, ++ struct irq_domain *domain); ++int __its_msi_prepare(struct irq_domain *domain, u32 dev_id, ++ struct device *dev, int nvec, msi_alloc_info_t *info); ++ + #endif + + #endif +diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h +index b0f9d16..ebace05 100644 +--- a/include/linux/irqdomain.h ++++ b/include/linux/irqdomain.h +@@ -33,15 +33,31 @@ + #define _LINUX_IRQDOMAIN_H + + #include ++#include + #include + + struct device_node; + struct irq_domain; + struct of_device_id; ++struct irq_chip; ++struct irq_data; + + /* Number of irqs reserved for a legacy isa controller */ + #define NUM_ISA_INTERRUPTS 16 + ++/* ++ * Should several domains have the same device node, but serve ++ * different purposes (for example one domain is for PCI/MSI, and the ++ * other for wired IRQs), they can be distinguished using a ++ * bus-specific token. Most domains are expected to only carry ++ * DOMAIN_BUS_ANY. ++ */ ++enum irq_domain_bus_token { ++ DOMAIN_BUS_ANY = 0, ++ DOMAIN_BUS_PCI_MSI, ++ DOMAIN_BUS_PLATFORM_MSI, ++}; ++ + /** + * struct irq_domain_ops - Methods for irq_domain objects + * @match: Match an interrupt controller device node to a host, returns +@@ -58,12 +74,23 @@ struct of_device_id; + * to setup the irq_desc when returning from map(). + */ + struct irq_domain_ops { +- int (*match)(struct irq_domain *d, struct device_node *node); ++ int (*match)(struct irq_domain *d, struct device_node *node, ++ enum irq_domain_bus_token bus_token); + int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); + void (*unmap)(struct irq_domain *d, unsigned int virq); + int (*xlate)(struct irq_domain *d, struct device_node *node, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type); ++ ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++ /* extended V2 interfaces to support hierarchy irq_domains */ ++ int (*alloc)(struct irq_domain *d, unsigned int virq, ++ unsigned int nr_irqs, void *arg); ++ void (*free)(struct irq_domain *d, unsigned int virq, ++ unsigned int nr_irqs); ++ void (*activate)(struct irq_domain *d, struct irq_data *irq_data); ++ void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); ++#endif + }; + + extern struct irq_domain_ops irq_generic_chip_ops; +@@ -77,6 +104,7 @@ struct irq_domain_chip_generic; + * @ops: pointer to irq_domain methods + * @host_data: private data pointer for use by owner. Not touched by irq_domain + * core code. ++ * @flags: host per irq_domain flags + * + * Optional elements + * @of_node: Pointer to device tree nodes associated with the irq_domain. Used +@@ -84,6 +112,7 @@ struct irq_domain_chip_generic; + * @gc: Pointer to a list of generic chips. There is a helper function for + * setting up one or more generic chips for interrupt controllers + * drivers using the generic chip library which uses this pointer. ++ * @parent: Pointer to parent irq_domain to support hierarchy irq_domains + * + * Revmap data, used internally by irq_domain + * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that +@@ -97,10 +126,15 @@ struct irq_domain { + const char *name; + const struct irq_domain_ops *ops; + void *host_data; ++ unsigned int flags; + + /* Optional data */ + struct device_node *of_node; ++ enum irq_domain_bus_token bus_token; + struct irq_domain_chip_generic *gc; ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++ struct irq_domain *parent; ++#endif + + /* reverse map data. The linear map gets appended to the irq_domain */ + irq_hw_number_t hwirq_max; +@@ -110,6 +144,22 @@ struct irq_domain { + unsigned int linear_revmap[]; + }; + ++/* Irq domain flags */ ++enum { ++ /* Irq domain is hierarchical */ ++ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), ++ ++ /* Core calls alloc/free recursive through the domain hierarchy. */ ++ IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1), ++ ++ /* ++ * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved ++ * for implementation specific purposes and ignored by the ++ * core code. ++ */ ++ IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), ++}; ++ + #ifdef CONFIG_IRQ_DOMAIN + struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, + irq_hw_number_t hwirq_max, int direct_max, +@@ -126,9 +176,15 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, + irq_hw_number_t first_hwirq, + const struct irq_domain_ops *ops, + void *host_data); +-extern struct irq_domain *irq_find_host(struct device_node *node); ++extern struct irq_domain *irq_find_matching_host(struct device_node *node, ++ enum irq_domain_bus_token bus_token); + extern void irq_set_default_host(struct irq_domain *host); + ++static inline struct irq_domain *irq_find_host(struct device_node *node) ++{ ++ return irq_find_matching_host(node, DOMAIN_BUS_ANY); ++} ++ + /** + * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. + * @of_node: pointer to interrupt controller's device tree node. +@@ -220,8 +276,74 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type); + ++/* V2 interfaces to support hierarchy IRQ domains. */ ++extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, ++ unsigned int virq); ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, ++ unsigned int flags, unsigned int size, ++ struct device_node *node, ++ const struct irq_domain_ops *ops, void *host_data); ++extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, ++ unsigned int nr_irqs, int node, void *arg, ++ bool realloc); ++extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); ++extern void irq_domain_activate_irq(struct irq_data *irq_data); ++extern void irq_domain_deactivate_irq(struct irq_data *irq_data); ++ ++static inline int irq_domain_alloc_irqs(struct irq_domain *domain, ++ unsigned int nr_irqs, int node, void *arg) ++{ ++ return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); ++} ++ ++extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, ++ unsigned int virq, ++ irq_hw_number_t hwirq, ++ struct irq_chip *chip, ++ void *chip_data); ++extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, ++ irq_hw_number_t hwirq, struct irq_chip *chip, ++ void *chip_data, irq_flow_handler_t handler, ++ void *handler_data, const char *handler_name); ++extern void irq_domain_reset_irq_data(struct irq_data *irq_data); ++extern void irq_domain_free_irqs_common(struct irq_domain *domain, ++ unsigned int virq, ++ unsigned int nr_irqs); ++extern void irq_domain_free_irqs_top(struct irq_domain *domain, ++ unsigned int virq, unsigned int nr_irqs); ++ ++extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, ++ unsigned int irq_base, ++ unsigned int nr_irqs, void *arg); ++ ++extern void irq_domain_free_irqs_parent(struct irq_domain *domain, ++ unsigned int irq_base, ++ unsigned int nr_irqs); ++ ++static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) ++{ ++ return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; ++} ++#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ ++static inline void irq_domain_activate_irq(struct irq_data *data) { } ++static inline void irq_domain_deactivate_irq(struct irq_data *data) { } ++static inline int irq_domain_alloc_irqs(struct irq_domain *domain, ++ unsigned int nr_irqs, int node, void *arg) ++{ ++ return -1; ++} ++ ++static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) ++{ ++ return false; ++} ++#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ ++ + #else /* CONFIG_IRQ_DOMAIN */ + static inline void irq_dispose_mapping(unsigned int virq) { } ++static inline void irq_domain_activate_irq(struct irq_data *data) { } ++static inline void irq_domain_deactivate_irq(struct irq_data *data) { } + #endif /* !CONFIG_IRQ_DOMAIN */ + + #endif /* _LINUX_IRQDOMAIN_H */ +diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h +new file mode 100644 +index 0000000..62d5430 +--- /dev/null ++++ b/include/linux/irqhandler.h +@@ -0,0 +1,14 @@ ++#ifndef _LINUX_IRQHANDLER_H ++#define _LINUX_IRQHANDLER_H ++ ++/* ++ * Interrupt flow handler typedefs are defined here to avoid circular ++ * include dependencies. ++ */ ++ ++struct irq_desc; ++struct irq_data; ++typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); ++typedef void (*irq_preflow_handler_t)(struct irq_data *data); ++ ++#endif +diff --git a/include/linux/msi.h b/include/linux/msi.h +index 44f4746..788d65b 100644 +--- a/include/linux/msi.h ++++ b/include/linux/msi.h +@@ -10,17 +10,13 @@ struct msi_msg { + u32 data; /* 16 bits of msi message data */ + }; + ++extern int pci_msi_ignore_mask; + /* Helper functions */ + struct irq_data; + struct msi_desc; +-void mask_msi_irq(struct irq_data *data); +-void unmask_msi_irq(struct irq_data *data); +-void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); ++struct pci_dev; + void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +-void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +-void read_msi_msg(unsigned int irq, struct msi_msg *msg); + void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); +-void write_msi_msg(unsigned int irq, struct msi_msg *msg); + + struct msi_desc { + struct { +@@ -42,12 +38,63 @@ struct msi_desc { + void __iomem *mask_base; + u8 mask_pos; + }; +- struct pci_dev *dev; ++ struct device *dev; + + /* Last set MSI message */ + struct msi_msg msg; + }; + ++/* Helpers to hide struct msi_desc implementation details */ ++#define msi_desc_to_dev(desc) ((desc)->dev) ++#define dev_to_msi_list(dev) (&(dev)->msi_list) ++#define first_msi_entry(dev) \ ++ list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) ++#define for_each_msi_entry(desc, dev) \ ++ list_for_each_entry((desc), dev_to_msi_list((dev)), list) ++ ++#ifdef CONFIG_PCI_MSI ++#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) ++#define for_each_pci_msi_entry(desc, pdev) \ ++ for_each_msi_entry((desc), &(pdev)->dev) ++ ++struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); ++void *msi_desc_to_pci_sysdata(struct msi_desc *desc); ++#else /* CONFIG_PCI_MSI */ ++static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) ++{ ++ return NULL; ++} ++#endif /* CONFIG_PCI_MSI */ ++ ++struct msi_desc *alloc_msi_entry(struct device *dev); ++void free_msi_entry(struct msi_desc *entry); ++void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); ++void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); ++void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); ++ ++u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); ++u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); ++void pci_msi_mask_irq(struct irq_data *data); ++void pci_msi_unmask_irq(struct irq_data *data); ++ ++/* Conversion helpers. Should be removed after merging */ ++static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) ++{ ++ __pci_write_msi_msg(entry, msg); ++} ++static inline void write_msi_msg(int irq, struct msi_msg *msg) ++{ ++ pci_write_msi_msg(irq, msg); ++} ++static inline void mask_msi_irq(struct irq_data *data) ++{ ++ pci_msi_mask_irq(data); ++} ++static inline void unmask_msi_irq(struct irq_data *data) ++{ ++ pci_msi_unmask_irq(data); ++} ++ + /* + * The arch hooks to setup up msi irqs. Those functions are + * implemented as weak symbols so that they /can/ be overriden by +@@ -61,18 +108,146 @@ void arch_restore_msi_irqs(struct pci_dev *dev); + + void default_teardown_msi_irqs(struct pci_dev *dev); + void default_restore_msi_irqs(struct pci_dev *dev); +-u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); +-u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); ++#define default_msi_mask_irq __msi_mask_irq ++#define default_msix_mask_irq __msix_mask_irq + +-struct msi_chip { ++struct msi_controller { + struct module *owner; + struct device *dev; + struct device_node *of_node; + struct list_head list; ++#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN ++ struct irq_domain *domain; ++#endif + +- int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, ++ int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, + struct msi_desc *desc); +- void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); ++ int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, ++ int nvec, int type); ++ void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); + }; + ++#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN ++ ++#include ++#include ++ ++struct irq_domain; ++struct irq_chip; ++struct device_node; ++struct msi_domain_info; ++ ++/** ++ * struct msi_domain_ops - MSI interrupt domain callbacks ++ * @get_hwirq: Retrieve the resulting hw irq number ++ * @msi_init: Domain specific init function for MSI interrupts ++ * @msi_free: Domain specific function to free a MSI interrupts ++ * @msi_check: Callback for verification of the domain/info/dev data ++ * @msi_prepare: Prepare the allocation of the interrupts in the domain ++ * @msi_finish: Optional callbacl to finalize the allocation ++ * @set_desc: Set the msi descriptor for an interrupt ++ * @handle_error: Optional error handler if the allocation fails ++ * ++ * @get_hwirq, @msi_init and @msi_free are callbacks used by ++ * msi_create_irq_domain() and related interfaces ++ * ++ * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error ++ * are callbacks used by msi_irq_domain_alloc_irqs() and related ++ * interfaces which are based on msi_desc. ++ */ ++struct msi_domain_ops { ++ irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, ++ msi_alloc_info_t *arg); ++ int (*msi_init)(struct irq_domain *domain, ++ struct msi_domain_info *info, ++ unsigned int virq, irq_hw_number_t hwirq, ++ msi_alloc_info_t *arg); ++ void (*msi_free)(struct irq_domain *domain, ++ struct msi_domain_info *info, ++ unsigned int virq); ++ int (*msi_check)(struct irq_domain *domain, ++ struct msi_domain_info *info, ++ struct device *dev); ++ int (*msi_prepare)(struct irq_domain *domain, ++ struct device *dev, int nvec, ++ msi_alloc_info_t *arg); ++ void (*msi_finish)(msi_alloc_info_t *arg, int retval); ++ void (*set_desc)(msi_alloc_info_t *arg, ++ struct msi_desc *desc); ++ int (*handle_error)(struct irq_domain *domain, ++ struct msi_desc *desc, int error); ++}; ++ ++/** ++ * struct msi_domain_info - MSI interrupt domain data ++ * @flags: Flags to decribe features and capabilities ++ * @ops: The callback data structure ++ * @chip: Optional: associated interrupt chip ++ * @chip_data: Optional: associated interrupt chip data ++ * @handler: Optional: associated interrupt flow handler ++ * @handler_data: Optional: associated interrupt flow handler data ++ * @handler_name: Optional: associated interrupt flow handler name ++ * @data: Optional: domain specific data ++ */ ++struct msi_domain_info { ++ u32 flags; ++ struct msi_domain_ops *ops; ++ struct irq_chip *chip; ++ void *chip_data; ++ irq_flow_handler_t handler; ++ void *handler_data; ++ const char *handler_name; ++ void *data; ++}; ++ ++/* Flags for msi_domain_info */ ++enum { ++ /* ++ * Init non implemented ops callbacks with default MSI domain ++ * callbacks. ++ */ ++ MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), ++ /* ++ * Init non implemented chip callbacks with default MSI chip ++ * callbacks. ++ */ ++ MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), ++ /* Build identity map between hwirq and irq */ ++ MSI_FLAG_IDENTITY_MAP = (1 << 2), ++ /* Support multiple PCI MSI interrupts */ ++ MSI_FLAG_MULTI_PCI_MSI = (1 << 3), ++ /* Support PCI MSIX interrupts */ ++ MSI_FLAG_PCI_MSIX = (1 << 4), ++}; ++ ++int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, ++ bool force); ++ ++struct irq_domain *msi_create_irq_domain(struct device_node *of_node, ++ struct msi_domain_info *info, ++ struct irq_domain *parent); ++int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, ++ int nvec); ++void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); ++struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); ++ ++#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ ++ ++#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN ++void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); ++struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, ++ struct msi_domain_info *info, ++ struct irq_domain *parent); ++int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, ++ int nvec, int type); ++void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); ++struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, ++ struct msi_domain_info *info, struct irq_domain *parent); ++ ++irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, ++ struct msi_desc *desc); ++int pci_msi_domain_check_cap(struct irq_domain *domain, ++ struct msi_domain_info *info, struct device *dev); ++#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ ++ + #endif /* LINUX_MSI_H */ +diff --git a/include/linux/of_device.h b/include/linux/of_device.h +index ef37021..22801b1 100644 +--- a/include/linux/of_device.h ++++ b/include/linux/of_device.h +@@ -53,6 +53,7 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) + return of_node_get(cpu_dev->of_node); + } + ++void of_dma_configure(struct device *dev, struct device_node *np); + #else /* CONFIG_OF */ + + static inline int of_driver_match_device(struct device *dev, +@@ -90,6 +91,8 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) + { + return NULL; + } ++static inline void of_dma_configure(struct device *dev, struct device_node *np) ++{} + #endif /* CONFIG_OF */ + + #endif /* _LINUX_OF_DEVICE_H */ +diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h +index 51a560f..ffbe470 100644 +--- a/include/linux/of_iommu.h ++++ b/include/linux/of_iommu.h +@@ -1,12 +1,20 @@ + #ifndef __OF_IOMMU_H + #define __OF_IOMMU_H + ++#include ++#include ++#include ++ + #ifdef CONFIG_OF_IOMMU + + extern int of_get_dma_window(struct device_node *dn, const char *prefix, + int index, unsigned long *busno, dma_addr_t *addr, + size_t *size); + ++extern void of_iommu_init(void); ++extern struct iommu_ops *of_iommu_configure(struct device *dev, ++ struct device_node *master_np); ++ + #else + + static inline int of_get_dma_window(struct device_node *dn, const char *prefix, +@@ -16,6 +24,23 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, + return -EINVAL; + } + ++static inline void of_iommu_init(void) { } ++static inline struct iommu_ops *of_iommu_configure(struct device *dev, ++ struct device_node *master_np) ++{ ++ return NULL; ++} ++ + #endif /* CONFIG_OF_IOMMU */ + ++void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops); ++struct iommu_ops *of_iommu_get_ops(struct device_node *np); ++ ++extern struct of_device_id __iommu_of_table; ++ ++typedef int (*of_iommu_init_fn)(struct device_node *); ++ ++#define IOMMU_OF_DECLARE(name, compat, fn) \ ++ _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn) ++ + #endif /* __OF_IOMMU_H */ +diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h +index bfec136..563ad28 100644 +--- a/include/linux/of_irq.h ++++ b/include/linux/of_irq.h +@@ -69,6 +69,7 @@ static inline int of_irq_get_byname(struct device_node *dev, const char *name) + */ + extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); + extern struct device_node *of_irq_find_parent(struct device_node *child); ++extern void of_msi_configure(struct device *dev, struct device_node *np); + + #else /* !CONFIG_OF */ + static inline unsigned int irq_of_parse_and_map(struct device_node *dev, +diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h +index 1fd207e..29fd3fe 100644 +--- a/include/linux/of_pci.h ++++ b/include/linux/of_pci.h +@@ -16,6 +16,7 @@ int of_pci_get_devfn(struct device_node *np); + int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); + int of_pci_parse_bus_range(struct device_node *node, struct resource *res); + int of_get_pci_domain_nr(struct device_node *node); ++void of_pci_dma_configure(struct pci_dev *pci_dev); + #else + static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) + { +@@ -50,6 +51,8 @@ of_get_pci_domain_nr(struct device_node *node) + { + return -1; + } ++ ++static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { } + #endif + + #if defined(CONFIG_OF_ADDRESS) +@@ -59,13 +62,13 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, + #endif + + #if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) +-int of_pci_msi_chip_add(struct msi_chip *chip); +-void of_pci_msi_chip_remove(struct msi_chip *chip); +-struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node); ++int of_pci_msi_chip_add(struct msi_controller *chip); ++void of_pci_msi_chip_remove(struct msi_controller *chip); ++struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node); + #else +-static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; } +-static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { } +-static inline struct msi_chip * ++static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; } ++static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { } ++static inline struct msi_controller * + of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; } + #endif + +diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h +index c2b0627..8a860f0 100644 +--- a/include/linux/of_platform.h ++++ b/include/linux/of_platform.h +@@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root, + static inline void of_platform_depopulate(struct device *parent) { } + #endif + ++#ifdef CONFIG_OF_DYNAMIC ++extern void of_platform_register_reconfig_notifier(void); ++#else ++static inline void of_platform_register_reconfig_notifier(void) { } ++#endif ++ + #endif /* _LINUX_OF_PLATFORM_H */ +diff --git a/include/linux/pci.h b/include/linux/pci.h +index 7a34844..a99f301 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -171,8 +172,8 @@ enum pci_dev_flags { + PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2), + /* Flag for quirk use to store if quirk-specific ACS is enabled */ + PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3), +- /* Flag to indicate the device uses dma_alias_devfn */ +- PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4), ++ /* Flag to indicate the device uses dma_alias_devid */ ++ PCI_DEV_FLAGS_DMA_ALIAS_DEVID = (__force pci_dev_flags_t) (1 << 4), + /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ + PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), + /* Do not use bus resets for device */ +@@ -278,7 +279,7 @@ struct pci_dev { + u8 rom_base_reg; /* which config register controls the ROM */ + u8 pin; /* which interrupt pin this device uses */ + u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ +- u8 dma_alias_devfn;/* devfn of DMA alias, if any */ ++ u32 dma_alias_devid;/* devid of DMA alias */ + + struct pci_driver *driver; /* which driver has allocated this device */ + u64 dma_mask; /* Mask of the bits of bus address this +@@ -365,7 +366,6 @@ struct pci_dev { + struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ + struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ + #ifdef CONFIG_PCI_MSI +- struct list_head msi_list; + const struct attribute_group **msi_irq_groups; + #endif + struct pci_vpd *vpd; +@@ -400,16 +400,10 @@ static inline int pci_channel_offline(struct pci_dev *pdev) + return (pdev->error_state != pci_channel_io_normal); + } + +-struct pci_host_bridge_window { +- struct list_head list; +- struct resource *res; /* host bridge aperture (CPU address) */ +- resource_size_t offset; /* bus address + offset = CPU address */ +-}; +- + struct pci_host_bridge { + struct device dev; + struct pci_bus *bus; /* root bus */ +- struct list_head windows; /* pci_host_bridge_windows */ ++ struct list_head windows; /* resource_entry */ + void (*release_fn)(struct pci_host_bridge *); + void *release_data; + }; +@@ -456,7 +450,7 @@ struct pci_bus { + struct resource busn_res; /* bus numbers routed to this bus */ + + struct pci_ops *ops; /* configuration access functions */ +- struct msi_chip *msi; /* MSI controller */ ++ struct msi_controller *msi; /* MSI controller */ + void *sysdata; /* hook for sys-specific extension */ + struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ + +@@ -516,6 +510,9 @@ static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) + return dev->bus->self; + } + ++struct device *pci_get_host_bridge_device(struct pci_dev *dev); ++void pci_put_host_bridge_device(struct device *dev); ++ + #ifdef CONFIG_PCI_MSI + static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) + { +diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h +new file mode 100644 +index 0000000..e2bf63d +--- /dev/null ++++ b/include/linux/resource_ext.h +@@ -0,0 +1,77 @@ ++/* ++ * Copyright (C) 2015, Intel Corporation ++ * Author: Jiang Liu ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms and conditions of the GNU General Public License, ++ * version 2, as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ */ ++#ifndef _LINUX_RESOURCE_EXT_H ++#define _LINUX_RESOURCE_EXT_H ++#include ++#include ++#include ++#include ++ ++/* Represent resource window for bridge devices */ ++struct resource_win { ++ struct resource res; /* In master (CPU) address space */ ++ resource_size_t offset; /* Translation offset for bridge */ ++}; ++ ++/* ++ * Common resource list management data structure and interfaces to support ++ * ACPI, PNP and PCI host bridge etc. ++ */ ++struct resource_entry { ++ struct list_head node; ++ struct resource *res; /* In master (CPU) address space */ ++ resource_size_t offset; /* Translation offset for bridge */ ++ struct resource __res; /* Default storage for res */ ++}; ++ ++extern struct resource_entry * ++resource_list_create_entry(struct resource *res, size_t extra_size); ++extern void resource_list_free(struct list_head *head); ++ ++static inline void resource_list_add(struct resource_entry *entry, ++ struct list_head *head) ++{ ++ list_add(&entry->node, head); ++} ++ ++static inline void resource_list_add_tail(struct resource_entry *entry, ++ struct list_head *head) ++{ ++ list_add_tail(&entry->node, head); ++} ++ ++static inline void resource_list_del(struct resource_entry *entry) ++{ ++ list_del(&entry->node); ++} ++ ++static inline void resource_list_free_entry(struct resource_entry *entry) ++{ ++ kfree(entry); ++} ++ ++static inline void ++resource_list_destroy_entry(struct resource_entry *entry) ++{ ++ resource_list_del(entry); ++ resource_list_free_entry(entry); ++} ++ ++#define resource_list_for_each_entry(entry, list) \ ++ list_for_each_entry((entry), (list), node) ++ ++#define resource_list_for_each_entry_safe(entry, tmp, list) \ ++ list_for_each_entry_safe((entry), (tmp), (list), node) ++ ++#endif /* _LINUX_RESOURCE_EXT_H */ +diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h +index 9948c87..1d0043d 100644 +--- a/include/linux/usb/quirks.h ++++ b/include/linux/usb/quirks.h +@@ -47,4 +47,7 @@ + /* device generates spurious wakeup, ignore remote wakeup capability */ + #define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9) + ++/* device can't handle Link Power Management */ ++#define USB_QUIRK_NO_LPM BIT(10) ++ + #endif /* __LINUX_USB_QUIRKS_H */ +diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h +index a8f5c32..2c7befb 100644 +--- a/include/trace/events/iommu.h ++++ b/include/trace/events/iommu.h +@@ -83,7 +83,7 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain, + TP_ARGS(dev) + ); + +-DECLARE_EVENT_CLASS(iommu_map_unmap, ++TRACE_EVENT(map, + + TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), + +@@ -92,7 +92,7 @@ DECLARE_EVENT_CLASS(iommu_map_unmap, + TP_STRUCT__entry( + __field(u64, iova) + __field(u64, paddr) +- __field(int, size) ++ __field(size_t, size) + ), + + TP_fast_assign( +@@ -101,26 +101,31 @@ DECLARE_EVENT_CLASS(iommu_map_unmap, + __entry->size = size; + ), + +- TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x", ++ TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu", + __entry->iova, __entry->paddr, __entry->size + ) + ); + +-DEFINE_EVENT(iommu_map_unmap, map, ++TRACE_EVENT(unmap, + +- TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), +- +- TP_ARGS(iova, paddr, size) +-); ++ TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), + +-DEFINE_EVENT_PRINT(iommu_map_unmap, unmap, ++ TP_ARGS(iova, size, unmapped_size), + +- TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), ++ TP_STRUCT__entry( ++ __field(u64, iova) ++ __field(size_t, size) ++ __field(size_t, unmapped_size) ++ ), + +- TP_ARGS(iova, paddr, size), ++ TP_fast_assign( ++ __entry->iova = iova; ++ __entry->size = size; ++ __entry->unmapped_size = unmapped_size; ++ ), + +- TP_printk("IOMMU: iova=0x%016llx size=0x%x", +- __entry->iova, __entry->size ++ TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu", ++ __entry->iova, __entry->size, __entry->unmapped_size + ) + ); + +diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig +index 225086b..9a76e3b 100644 +--- a/kernel/irq/Kconfig ++++ b/kernel/irq/Kconfig +@@ -55,6 +55,21 @@ config GENERIC_IRQ_CHIP + config IRQ_DOMAIN + bool + ++# Support for hierarchical irq domains ++config IRQ_DOMAIN_HIERARCHY ++ bool ++ select IRQ_DOMAIN ++ ++# Generic MSI interrupt support ++config GENERIC_MSI_IRQ ++ bool ++ ++# Generic MSI hierarchical interrupt domain support ++config GENERIC_MSI_IRQ_DOMAIN ++ bool ++ select IRQ_DOMAIN_HIERARCHY ++ select GENERIC_MSI_IRQ ++ + config HANDLE_DOMAIN_IRQ + bool + +diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile +index fff1738..d121235 100644 +--- a/kernel/irq/Makefile ++++ b/kernel/irq/Makefile +@@ -6,3 +6,4 @@ obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o + obj-$(CONFIG_PROC_FS) += proc.o + obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o + obj-$(CONFIG_PM_SLEEP) += pm.o ++obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c +index e5202f0..63c16d1 100644 +--- a/kernel/irq/chip.c ++++ b/kernel/irq/chip.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include + +@@ -178,6 +179,7 @@ int irq_startup(struct irq_desc *desc, bool resend) + irq_state_clr_disabled(desc); + desc->depth = 0; + ++ irq_domain_activate_irq(&desc->irq_data); + if (desc->irq_data.chip->irq_startup) { + ret = desc->irq_data.chip->irq_startup(&desc->irq_data); + irq_state_clr_masked(desc); +@@ -199,6 +201,7 @@ void irq_shutdown(struct irq_desc *desc) + desc->irq_data.chip->irq_disable(&desc->irq_data); + else + desc->irq_data.chip->irq_mask(&desc->irq_data); ++ irq_domain_deactivate_irq(&desc->irq_data); + irq_state_set_masked(desc); + } + +@@ -847,3 +850,105 @@ void irq_cpu_offline(void) + raw_spin_unlock_irqrestore(&desc->lock, flags); + } + } ++ ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++/** ++ * irq_chip_ack_parent - Acknowledge the parent interrupt ++ * @data: Pointer to interrupt specific data ++ */ ++void irq_chip_ack_parent(struct irq_data *data) ++{ ++ data = data->parent_data; ++ data->chip->irq_ack(data); ++} ++ ++/** ++ * irq_chip_mask_parent - Mask the parent interrupt ++ * @data: Pointer to interrupt specific data ++ */ ++void irq_chip_mask_parent(struct irq_data *data) ++{ ++ data = data->parent_data; ++ data->chip->irq_mask(data); ++} ++ ++/** ++ * irq_chip_unmask_parent - Unmask the parent interrupt ++ * @data: Pointer to interrupt specific data ++ */ ++void irq_chip_unmask_parent(struct irq_data *data) ++{ ++ data = data->parent_data; ++ data->chip->irq_unmask(data); ++} ++ ++/** ++ * irq_chip_eoi_parent - Invoke EOI on the parent interrupt ++ * @data: Pointer to interrupt specific data ++ */ ++void irq_chip_eoi_parent(struct irq_data *data) ++{ ++ data = data->parent_data; ++ data->chip->irq_eoi(data); ++} ++ ++/** ++ * irq_chip_set_affinity_parent - Set affinity on the parent interrupt ++ * @data: Pointer to interrupt specific data ++ * @dest: The affinity mask to set ++ * @force: Flag to enforce setting (disable online checks) ++ * ++ * Conditinal, as the underlying parent chip might not implement it. ++ */ ++int irq_chip_set_affinity_parent(struct irq_data *data, ++ const struct cpumask *dest, bool force) ++{ ++ data = data->parent_data; ++ if (data->chip->irq_set_affinity) ++ return data->chip->irq_set_affinity(data, dest, force); ++ ++ return -ENOSYS; ++} ++ ++/** ++ * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware ++ * @data: Pointer to interrupt specific data ++ * ++ * Iterate through the domain hierarchy of the interrupt and check ++ * whether a hw retrigger function exists. If yes, invoke it. ++ */ ++int irq_chip_retrigger_hierarchy(struct irq_data *data) ++{ ++ for (data = data->parent_data; data; data = data->parent_data) ++ if (data->chip && data->chip->irq_retrigger) ++ return data->chip->irq_retrigger(data); ++ ++ return -ENOSYS; ++} ++#endif ++ ++/** ++ * irq_chip_compose_msi_msg - Componse msi message for a irq chip ++ * @data: Pointer to interrupt specific data ++ * @msg: Pointer to the MSI message ++ * ++ * For hierarchical domains we find the first chip in the hierarchy ++ * which implements the irq_compose_msi_msg callback. For non ++ * hierarchical we use the top level chip. ++ */ ++int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) ++{ ++ struct irq_data *pos = NULL; ++ ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++ for (; data; data = data->parent_data) ++#endif ++ if (data->chip && data->chip->irq_compose_msi_msg) ++ pos = data; ++ if (!pos) ++ return -ENOSYS; ++ ++ pos->chip->irq_compose_msi_msg(pos, msg); ++ ++ return 0; ++} +diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c +index cf80e7b..61024e8 100644 +--- a/kernel/irq/generic-chip.c ++++ b/kernel/irq/generic-chip.c +@@ -39,7 +39,7 @@ void irq_gc_mask_disable_reg(struct irq_data *d) + u32 mask = d->mask; + + irq_gc_lock(gc); +- irq_reg_writel(mask, gc->reg_base + ct->regs.disable); ++ irq_reg_writel(gc, mask, ct->regs.disable); + *ct->mask_cache &= ~mask; + irq_gc_unlock(gc); + } +@@ -59,7 +59,7 @@ void irq_gc_mask_set_bit(struct irq_data *d) + + irq_gc_lock(gc); + *ct->mask_cache |= mask; +- irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask); ++ irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); + irq_gc_unlock(gc); + } + EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit); +@@ -79,7 +79,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d) + + irq_gc_lock(gc); + *ct->mask_cache &= ~mask; +- irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask); ++ irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); + irq_gc_unlock(gc); + } + EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit); +@@ -98,7 +98,7 @@ void irq_gc_unmask_enable_reg(struct irq_data *d) + u32 mask = d->mask; + + irq_gc_lock(gc); +- irq_reg_writel(mask, gc->reg_base + ct->regs.enable); ++ irq_reg_writel(gc, mask, ct->regs.enable); + *ct->mask_cache |= mask; + irq_gc_unlock(gc); + } +@@ -114,7 +114,7 @@ void irq_gc_ack_set_bit(struct irq_data *d) + u32 mask = d->mask; + + irq_gc_lock(gc); +- irq_reg_writel(mask, gc->reg_base + ct->regs.ack); ++ irq_reg_writel(gc, mask, ct->regs.ack); + irq_gc_unlock(gc); + } + EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit); +@@ -130,7 +130,7 @@ void irq_gc_ack_clr_bit(struct irq_data *d) + u32 mask = ~d->mask; + + irq_gc_lock(gc); +- irq_reg_writel(mask, gc->reg_base + ct->regs.ack); ++ irq_reg_writel(gc, mask, ct->regs.ack); + irq_gc_unlock(gc); + } + +@@ -145,8 +145,8 @@ void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) + u32 mask = d->mask; + + irq_gc_lock(gc); +- irq_reg_writel(mask, gc->reg_base + ct->regs.mask); +- irq_reg_writel(mask, gc->reg_base + ct->regs.ack); ++ irq_reg_writel(gc, mask, ct->regs.mask); ++ irq_reg_writel(gc, mask, ct->regs.ack); + irq_gc_unlock(gc); + } + +@@ -161,7 +161,7 @@ void irq_gc_eoi(struct irq_data *d) + u32 mask = d->mask; + + irq_gc_lock(gc); +- irq_reg_writel(mask, gc->reg_base + ct->regs.eoi); ++ irq_reg_writel(gc, mask, ct->regs.eoi); + irq_gc_unlock(gc); + } + +@@ -191,6 +191,16 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on) + return 0; + } + ++static u32 irq_readl_be(void __iomem *addr) ++{ ++ return ioread32be(addr); ++} ++ ++static void irq_writel_be(u32 val, void __iomem *addr) ++{ ++ iowrite32be(val, addr); ++} ++ + static void + irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, + int num_ct, unsigned int irq_base, +@@ -245,7 +255,7 @@ irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags) + } + ct[i].mask_cache = mskptr; + if (flags & IRQ_GC_INIT_MASK_CACHE) +- *mskptr = irq_reg_readl(gc->reg_base + mskreg); ++ *mskptr = irq_reg_readl(gc, mskreg); + } + } + +@@ -300,7 +310,13 @@ int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, + dgc->gc[i] = gc = tmp; + irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip, + NULL, handler); ++ + gc->domain = d; ++ if (gcflags & IRQ_GC_BE_IO) { ++ gc->reg_readl = &irq_readl_be; ++ gc->reg_writel = &irq_writel_be; ++ } ++ + raw_spin_lock_irqsave(&gc_lock, flags); + list_add_tail(&gc->list, &gc_list); + raw_spin_unlock_irqrestore(&gc_lock, flags); +diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c +index 6534ff6..021f823 100644 +--- a/kernel/irq/irqdomain.c ++++ b/kernel/irq/irqdomain.c +@@ -23,6 +23,10 @@ static DEFINE_MUTEX(irq_domain_mutex); + static DEFINE_MUTEX(revmap_trees_mutex); + static struct irq_domain *irq_default_domain; + ++static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, ++ irq_hw_number_t hwirq, int node); ++static void irq_domain_check_hierarchy(struct irq_domain *domain); ++ + /** + * __irq_domain_add() - Allocate a new irq_domain data structure + * @of_node: optional device-tree node of the interrupt controller +@@ -30,7 +34,7 @@ static struct irq_domain *irq_default_domain; + * @hwirq_max: Maximum number of interrupts supported by controller + * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no + * direct mapping +- * @ops: map/unmap domain callbacks ++ * @ops: domain callbacks + * @host_data: Controller private data pointer + * + * Allocates and initialize and irq_domain structure. +@@ -56,6 +60,7 @@ struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, + domain->hwirq_max = hwirq_max; + domain->revmap_size = size; + domain->revmap_direct_max_irq = direct_max; ++ irq_domain_check_hierarchy(domain); + + mutex_lock(&irq_domain_mutex); + list_add(&domain->link, &irq_domain_list); +@@ -109,7 +114,7 @@ EXPORT_SYMBOL_GPL(irq_domain_remove); + * @first_irq: first number of irq block assigned to the domain, + * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then + * pre-map all of the irqs in the domain to virqs starting at first_irq. +- * @ops: map/unmap domain callbacks ++ * @ops: domain callbacks + * @host_data: Controller private data pointer + * + * Allocates an irq_domain, and optionally if first_irq is positive then also +@@ -174,20 +179,20 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, + + domain = __irq_domain_add(of_node, first_hwirq + size, + first_hwirq + size, 0, ops, host_data); +- if (!domain) +- return NULL; +- +- irq_domain_associate_many(domain, first_irq, first_hwirq, size); ++ if (domain) ++ irq_domain_associate_many(domain, first_irq, first_hwirq, size); + + return domain; + } + EXPORT_SYMBOL_GPL(irq_domain_add_legacy); + + /** +- * irq_find_host() - Locates a domain for a given device node ++ * irq_find_matching_host() - Locates a domain for a given device node + * @node: device-tree node of the interrupt controller ++ * @bus_token: domain-specific data + */ +-struct irq_domain *irq_find_host(struct device_node *node) ++struct irq_domain *irq_find_matching_host(struct device_node *node, ++ enum irq_domain_bus_token bus_token) + { + struct irq_domain *h, *found = NULL; + int rc; +@@ -196,13 +201,19 @@ struct irq_domain *irq_find_host(struct device_node *node) + * it might potentially be set to match all interrupts in + * the absence of a device node. This isn't a problem so far + * yet though... ++ * ++ * bus_token == DOMAIN_BUS_ANY matches any domain, any other ++ * values must generate an exact match for the domain to be ++ * selected. + */ + mutex_lock(&irq_domain_mutex); + list_for_each_entry(h, &irq_domain_list, link) { + if (h->ops->match) +- rc = h->ops->match(h, node); ++ rc = h->ops->match(h, node, bus_token); + else +- rc = (h->of_node != NULL) && (h->of_node == node); ++ rc = ((h->of_node != NULL) && (h->of_node == node) && ++ ((bus_token == DOMAIN_BUS_ANY) || ++ (h->bus_token == bus_token))); + + if (rc) { + found = h; +@@ -212,7 +223,7 @@ struct irq_domain *irq_find_host(struct device_node *node) + mutex_unlock(&irq_domain_mutex); + return found; + } +-EXPORT_SYMBOL_GPL(irq_find_host); ++EXPORT_SYMBOL_GPL(irq_find_matching_host); + + /** + * irq_set_default_host() - Set a "default" irq domain +@@ -388,7 +399,6 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping); + unsigned int irq_create_mapping(struct irq_domain *domain, + irq_hw_number_t hwirq) + { +- unsigned int hint; + int virq; + + pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); +@@ -410,12 +420,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain, + } + + /* Allocate a virtual interrupt number */ +- hint = hwirq % nr_irqs; +- if (hint == 0) +- hint++; +- virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node)); +- if (virq <= 0) +- virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); ++ virq = irq_domain_alloc_descs(-1, 1, hwirq, ++ of_node_to_nid(domain->of_node)); + if (virq <= 0) { + pr_debug("-> virq allocation failed\n"); + return 0; +@@ -471,7 +477,7 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) + struct irq_domain *domain; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; +- unsigned int virq; ++ int virq; + + domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain; + if (!domain) { +@@ -489,10 +495,24 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) + return 0; + } + +- /* Create mapping */ +- virq = irq_create_mapping(domain, hwirq); +- if (!virq) +- return virq; ++ if (irq_domain_is_hierarchy(domain)) { ++ /* ++ * If we've already configured this interrupt, ++ * don't do it again, or hell will break loose. ++ */ ++ virq = irq_find_mapping(domain, hwirq); ++ if (virq) ++ return virq; ++ ++ virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, irq_data); ++ if (virq <= 0) ++ return 0; ++ } else { ++ /* Create mapping */ ++ virq = irq_create_mapping(domain, hwirq); ++ if (!virq) ++ return virq; ++ } + + /* Set type if specified and different than the current one */ + if (type != IRQ_TYPE_NONE && +@@ -540,8 +560,8 @@ unsigned int irq_find_mapping(struct irq_domain *domain, + return 0; + + if (hwirq < domain->revmap_direct_max_irq) { +- data = irq_get_irq_data(hwirq); +- if (data && (data->domain == domain) && (data->hwirq == hwirq)) ++ data = irq_domain_get_irq_data(domain, hwirq); ++ if (data && data->hwirq == hwirq) + return hwirq; + } + +@@ -709,3 +729,518 @@ const struct irq_domain_ops irq_domain_simple_ops = { + .xlate = irq_domain_xlate_onetwocell, + }; + EXPORT_SYMBOL_GPL(irq_domain_simple_ops); ++ ++static int irq_domain_alloc_descs(int virq, unsigned int cnt, ++ irq_hw_number_t hwirq, int node) ++{ ++ unsigned int hint; ++ ++ if (virq >= 0) { ++ virq = irq_alloc_descs(virq, virq, cnt, node); ++ } else { ++ hint = hwirq % nr_irqs; ++ if (hint == 0) ++ hint++; ++ virq = irq_alloc_descs_from(hint, cnt, node); ++ if (virq <= 0 && hint > 1) ++ virq = irq_alloc_descs_from(1, cnt, node); ++ } ++ ++ return virq; ++} ++ ++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY ++/** ++ * irq_domain_add_hierarchy - Add a irqdomain into the hierarchy ++ * @parent: Parent irq domain to associate with the new domain ++ * @flags: Irq domain flags associated to the domain ++ * @size: Size of the domain. See below ++ * @node: Optional device-tree node of the interrupt controller ++ * @ops: Pointer to the interrupt domain callbacks ++ * @host_data: Controller private data pointer ++ * ++ * If @size is 0 a tree domain is created, otherwise a linear domain. ++ * ++ * If successful the parent is associated to the new domain and the ++ * domain flags are set. ++ * Returns pointer to IRQ domain, or NULL on failure. ++ */ ++struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, ++ unsigned int flags, ++ unsigned int size, ++ struct device_node *node, ++ const struct irq_domain_ops *ops, ++ void *host_data) ++{ ++ struct irq_domain *domain; ++ ++ if (size) ++ domain = irq_domain_add_linear(node, size, ops, host_data); ++ else ++ domain = irq_domain_add_tree(node, ops, host_data); ++ if (domain) { ++ domain->parent = parent; ++ domain->flags |= flags; ++ } ++ ++ return domain; ++} ++ ++static void irq_domain_insert_irq(int virq) ++{ ++ struct irq_data *data; ++ ++ for (data = irq_get_irq_data(virq); data; data = data->parent_data) { ++ struct irq_domain *domain = data->domain; ++ irq_hw_number_t hwirq = data->hwirq; ++ ++ if (hwirq < domain->revmap_size) { ++ domain->linear_revmap[hwirq] = virq; ++ } else { ++ mutex_lock(&revmap_trees_mutex); ++ radix_tree_insert(&domain->revmap_tree, hwirq, data); ++ mutex_unlock(&revmap_trees_mutex); ++ } ++ ++ /* If not already assigned, give the domain the chip's name */ ++ if (!domain->name && data->chip) ++ domain->name = data->chip->name; ++ } ++ ++ irq_clear_status_flags(virq, IRQ_NOREQUEST); ++} ++ ++static void irq_domain_remove_irq(int virq) ++{ ++ struct irq_data *data; ++ ++ irq_set_status_flags(virq, IRQ_NOREQUEST); ++ irq_set_chip_and_handler(virq, NULL, NULL); ++ synchronize_irq(virq); ++ smp_mb(); ++ ++ for (data = irq_get_irq_data(virq); data; data = data->parent_data) { ++ struct irq_domain *domain = data->domain; ++ irq_hw_number_t hwirq = data->hwirq; ++ ++ if (hwirq < domain->revmap_size) { ++ domain->linear_revmap[hwirq] = 0; ++ } else { ++ mutex_lock(&revmap_trees_mutex); ++ radix_tree_delete(&domain->revmap_tree, hwirq); ++ mutex_unlock(&revmap_trees_mutex); ++ } ++ } ++} ++ ++static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, ++ struct irq_data *child) ++{ ++ struct irq_data *irq_data; ++ ++ irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, child->node); ++ if (irq_data) { ++ child->parent_data = irq_data; ++ irq_data->irq = child->irq; ++ irq_data->node = child->node; ++ irq_data->domain = domain; ++ } ++ ++ return irq_data; ++} ++ ++static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs) ++{ ++ struct irq_data *irq_data, *tmp; ++ int i; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ irq_data = irq_get_irq_data(virq + i); ++ tmp = irq_data->parent_data; ++ irq_data->parent_data = NULL; ++ irq_data->domain = NULL; ++ ++ while (tmp) { ++ irq_data = tmp; ++ tmp = tmp->parent_data; ++ kfree(irq_data); ++ } ++ } ++} ++ ++static int irq_domain_alloc_irq_data(struct irq_domain *domain, ++ unsigned int virq, unsigned int nr_irqs) ++{ ++ struct irq_data *irq_data; ++ struct irq_domain *parent; ++ int i; ++ ++ /* The outermost irq_data is embedded in struct irq_desc */ ++ for (i = 0; i < nr_irqs; i++) { ++ irq_data = irq_get_irq_data(virq + i); ++ irq_data->domain = domain; ++ ++ for (parent = domain->parent; parent; parent = parent->parent) { ++ irq_data = irq_domain_insert_irq_data(parent, irq_data); ++ if (!irq_data) { ++ irq_domain_free_irq_data(virq, i + 1); ++ return -ENOMEM; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++/** ++ * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain ++ * @domain: domain to match ++ * @virq: IRQ number to get irq_data ++ */ ++struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, ++ unsigned int virq) ++{ ++ struct irq_data *irq_data; ++ ++ for (irq_data = irq_get_irq_data(virq); irq_data; ++ irq_data = irq_data->parent_data) ++ if (irq_data->domain == domain) ++ return irq_data; ++ ++ return NULL; ++} ++ ++/** ++ * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain ++ * @domain: Interrupt domain to match ++ * @virq: IRQ number ++ * @hwirq: The hwirq number ++ * @chip: The associated interrupt chip ++ * @chip_data: The associated chip data ++ */ ++int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, ++ irq_hw_number_t hwirq, struct irq_chip *chip, ++ void *chip_data) ++{ ++ struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); ++ ++ if (!irq_data) ++ return -ENOENT; ++ ++ irq_data->hwirq = hwirq; ++ irq_data->chip = chip ? chip : &no_irq_chip; ++ irq_data->chip_data = chip_data; ++ ++ return 0; ++} ++ ++/** ++ * irq_domain_set_info - Set the complete data for a @virq in @domain ++ * @domain: Interrupt domain to match ++ * @virq: IRQ number ++ * @hwirq: The hardware interrupt number ++ * @chip: The associated interrupt chip ++ * @chip_data: The associated interrupt chip data ++ * @handler: The interrupt flow handler ++ * @handler_data: The interrupt flow handler data ++ * @handler_name: The interrupt handler name ++ */ ++void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, ++ irq_hw_number_t hwirq, struct irq_chip *chip, ++ void *chip_data, irq_flow_handler_t handler, ++ void *handler_data, const char *handler_name) ++{ ++ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data); ++ __irq_set_handler(virq, handler, 0, handler_name); ++ irq_set_handler_data(virq, handler_data); ++} ++ ++/** ++ * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data ++ * @irq_data: The pointer to irq_data ++ */ ++void irq_domain_reset_irq_data(struct irq_data *irq_data) ++{ ++ irq_data->hwirq = 0; ++ irq_data->chip = &no_irq_chip; ++ irq_data->chip_data = NULL; ++} ++ ++/** ++ * irq_domain_free_irqs_common - Clear irq_data and free the parent ++ * @domain: Interrupt domain to match ++ * @virq: IRQ number to start with ++ * @nr_irqs: The number of irqs to free ++ */ ++void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ struct irq_data *irq_data; ++ int i; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ irq_data = irq_domain_get_irq_data(domain, virq + i); ++ if (irq_data) ++ irq_domain_reset_irq_data(irq_data); ++ } ++ irq_domain_free_irqs_parent(domain, virq, nr_irqs); ++} ++ ++/** ++ * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent ++ * @domain: Interrupt domain to match ++ * @virq: IRQ number to start with ++ * @nr_irqs: The number of irqs to free ++ */ ++void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ int i; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ irq_set_handler_data(virq + i, NULL); ++ irq_set_handler(virq + i, NULL); ++ } ++ irq_domain_free_irqs_common(domain, virq, nr_irqs); ++} ++ ++static bool irq_domain_is_auto_recursive(struct irq_domain *domain) ++{ ++ return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE; ++} ++ ++static void irq_domain_free_irqs_recursive(struct irq_domain *domain, ++ unsigned int irq_base, ++ unsigned int nr_irqs) ++{ ++ domain->ops->free(domain, irq_base, nr_irqs); ++ if (irq_domain_is_auto_recursive(domain)) { ++ BUG_ON(!domain->parent); ++ irq_domain_free_irqs_recursive(domain->parent, irq_base, ++ nr_irqs); ++ } ++} ++ ++static int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, ++ unsigned int irq_base, ++ unsigned int nr_irqs, void *arg) ++{ ++ int ret = 0; ++ struct irq_domain *parent = domain->parent; ++ bool recursive = irq_domain_is_auto_recursive(domain); ++ ++ BUG_ON(recursive && !parent); ++ if (recursive) ++ ret = irq_domain_alloc_irqs_recursive(parent, irq_base, ++ nr_irqs, arg); ++ if (ret >= 0) ++ ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); ++ if (ret < 0 && recursive) ++ irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs); ++ ++ return ret; ++} ++ ++/** ++ * __irq_domain_alloc_irqs - Allocate IRQs from domain ++ * @domain: domain to allocate from ++ * @irq_base: allocate specified IRQ nubmer if irq_base >= 0 ++ * @nr_irqs: number of IRQs to allocate ++ * @node: NUMA node id for memory allocation ++ * @arg: domain specific argument ++ * @realloc: IRQ descriptors have already been allocated if true ++ * ++ * Allocate IRQ numbers and initialized all data structures to support ++ * hierarchy IRQ domains. ++ * Parameter @realloc is mainly to support legacy IRQs. ++ * Returns error code or allocated IRQ number ++ * ++ * The whole process to setup an IRQ has been split into two steps. ++ * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ ++ * descriptor and required hardware resources. The second step, ++ * irq_domain_activate_irq(), is to program hardwares with preallocated ++ * resources. In this way, it's easier to rollback when failing to ++ * allocate resources. ++ */ ++int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, ++ unsigned int nr_irqs, int node, void *arg, ++ bool realloc) ++{ ++ int i, ret, virq; ++ ++ if (domain == NULL) { ++ domain = irq_default_domain; ++ if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) ++ return -EINVAL; ++ } ++ ++ if (!domain->ops->alloc) { ++ pr_debug("domain->ops->alloc() is NULL\n"); ++ return -ENOSYS; ++ } ++ ++ if (realloc && irq_base >= 0) { ++ virq = irq_base; ++ } else { ++ virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node); ++ if (virq < 0) { ++ pr_debug("cannot allocate IRQ(base %d, count %d)\n", ++ irq_base, nr_irqs); ++ return virq; ++ } ++ } ++ ++ if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { ++ pr_debug("cannot allocate memory for IRQ%d\n", virq); ++ ret = -ENOMEM; ++ goto out_free_desc; ++ } ++ ++ mutex_lock(&irq_domain_mutex); ++ ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg); ++ if (ret < 0) { ++ mutex_unlock(&irq_domain_mutex); ++ goto out_free_irq_data; ++ } ++ for (i = 0; i < nr_irqs; i++) ++ irq_domain_insert_irq(virq + i); ++ mutex_unlock(&irq_domain_mutex); ++ ++ return virq; ++ ++out_free_irq_data: ++ irq_domain_free_irq_data(virq, nr_irqs); ++out_free_desc: ++ irq_free_descs(virq, nr_irqs); ++ return ret; ++} ++ ++/** ++ * irq_domain_free_irqs - Free IRQ number and associated data structures ++ * @virq: base IRQ number ++ * @nr_irqs: number of IRQs to free ++ */ ++void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) ++{ ++ struct irq_data *data = irq_get_irq_data(virq); ++ int i; ++ ++ if (WARN(!data || !data->domain || !data->domain->ops->free, ++ "NULL pointer, cannot free irq\n")) ++ return; ++ ++ mutex_lock(&irq_domain_mutex); ++ for (i = 0; i < nr_irqs; i++) ++ irq_domain_remove_irq(virq + i); ++ irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs); ++ mutex_unlock(&irq_domain_mutex); ++ ++ irq_domain_free_irq_data(virq, nr_irqs); ++ irq_free_descs(virq, nr_irqs); ++} ++ ++/** ++ * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain ++ * @irq_base: Base IRQ number ++ * @nr_irqs: Number of IRQs to allocate ++ * @arg: Allocation data (arch/domain specific) ++ * ++ * Check whether the domain has been setup recursive. If not allocate ++ * through the parent domain. ++ */ ++int irq_domain_alloc_irqs_parent(struct irq_domain *domain, ++ unsigned int irq_base, unsigned int nr_irqs, ++ void *arg) ++{ ++ /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */ ++ if (irq_domain_is_auto_recursive(domain)) ++ return 0; ++ ++ domain = domain->parent; ++ if (domain) ++ return irq_domain_alloc_irqs_recursive(domain, irq_base, ++ nr_irqs, arg); ++ return -ENOSYS; ++} ++ ++/** ++ * irq_domain_free_irqs_parent - Free interrupts from parent domain ++ * @irq_base: Base IRQ number ++ * @nr_irqs: Number of IRQs to free ++ * ++ * Check whether the domain has been setup recursive. If not free ++ * through the parent domain. ++ */ ++void irq_domain_free_irqs_parent(struct irq_domain *domain, ++ unsigned int irq_base, unsigned int nr_irqs) ++{ ++ /* irq_domain_free_irqs_recursive() will call parent's free */ ++ if (!irq_domain_is_auto_recursive(domain) && domain->parent) ++ irq_domain_free_irqs_recursive(domain->parent, irq_base, ++ nr_irqs); ++} ++ ++/** ++ * irq_domain_activate_irq - Call domain_ops->activate recursively to activate ++ * interrupt ++ * @irq_data: outermost irq_data associated with interrupt ++ * ++ * This is the second step to call domain_ops->activate to program interrupt ++ * controllers, so the interrupt could actually get delivered. ++ */ ++void irq_domain_activate_irq(struct irq_data *irq_data) ++{ ++ if (irq_data && irq_data->domain) { ++ struct irq_domain *domain = irq_data->domain; ++ ++ if (irq_data->parent_data) ++ irq_domain_activate_irq(irq_data->parent_data); ++ if (domain->ops->activate) ++ domain->ops->activate(domain, irq_data); ++ } ++} ++ ++/** ++ * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to ++ * deactivate interrupt ++ * @irq_data: outermost irq_data associated with interrupt ++ * ++ * It calls domain_ops->deactivate to program interrupt controllers to disable ++ * interrupt delivery. ++ */ ++void irq_domain_deactivate_irq(struct irq_data *irq_data) ++{ ++ if (irq_data && irq_data->domain) { ++ struct irq_domain *domain = irq_data->domain; ++ ++ if (domain->ops->deactivate) ++ domain->ops->deactivate(domain, irq_data); ++ if (irq_data->parent_data) ++ irq_domain_deactivate_irq(irq_data->parent_data); ++ } ++} ++ ++static void irq_domain_check_hierarchy(struct irq_domain *domain) ++{ ++ /* Hierarchy irq_domains must implement callback alloc() */ ++ if (domain->ops->alloc) ++ domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; ++} ++#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ ++/** ++ * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain ++ * @domain: domain to match ++ * @virq: IRQ number to get irq_data ++ */ ++struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, ++ unsigned int virq) ++{ ++ struct irq_data *irq_data = irq_get_irq_data(virq); ++ ++ return (irq_data && irq_data->domain == domain) ? irq_data : NULL; ++} ++ ++static void irq_domain_check_hierarchy(struct irq_domain *domain) ++{ ++} ++#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index 0a9104b..8069237 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -183,6 +183,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, + ret = chip->irq_set_affinity(data, mask, force); + switch (ret) { + case IRQ_SET_MASK_OK: ++ case IRQ_SET_MASK_OK_DONE: + cpumask_copy(data->affinity, mask); + case IRQ_SET_MASK_OK_NOCOPY: + irq_set_thread_affinity(desc); +@@ -600,6 +601,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, + + switch (ret) { + case IRQ_SET_MASK_OK: ++ case IRQ_SET_MASK_OK_DONE: + irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); + irqd_set(&desc->irq_data, flags); + +diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c +new file mode 100644 +index 0000000..2495ed0 +--- /dev/null ++++ b/kernel/irq/msi.c +@@ -0,0 +1,347 @@ ++/* ++ * linux/kernel/irq/msi.c ++ * ++ * Copyright (C) 2014 Intel Corp. ++ * Author: Jiang Liu ++ * ++ * This file is licensed under GPLv2. ++ * ++ * This file contains common code to support Message Signalled Interrupt for ++ * PCI compatible and non PCI compatible devices. ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++/* Temparory solution for building, will be removed later */ ++#include ++ ++struct msi_desc *alloc_msi_entry(struct device *dev) ++{ ++ struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); ++ if (!desc) ++ return NULL; ++ ++ INIT_LIST_HEAD(&desc->list); ++ desc->dev = dev; ++ ++ return desc; ++} ++ ++void free_msi_entry(struct msi_desc *entry) ++{ ++ kfree(entry); ++} ++ ++void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) ++{ ++ *msg = entry->msg; ++} ++ ++void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) ++{ ++ struct msi_desc *entry = irq_get_msi_desc(irq); ++ ++ __get_cached_msi_msg(entry, msg); ++} ++EXPORT_SYMBOL_GPL(get_cached_msi_msg); ++ ++#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN ++static inline void irq_chip_write_msi_msg(struct irq_data *data, ++ struct msi_msg *msg) ++{ ++ data->chip->irq_write_msi_msg(data, msg); ++} ++ ++/** ++ * msi_domain_set_affinity - Generic affinity setter function for MSI domains ++ * @irq_data: The irq data associated to the interrupt ++ * @mask: The affinity mask to set ++ * @force: Flag to enforce setting (disable online checks) ++ * ++ * Intended to be used by MSI interrupt controllers which are ++ * implemented with hierarchical domains. ++ */ ++int msi_domain_set_affinity(struct irq_data *irq_data, ++ const struct cpumask *mask, bool force) ++{ ++ struct irq_data *parent = irq_data->parent_data; ++ struct msi_msg msg; ++ int ret; ++ ++ ret = parent->chip->irq_set_affinity(parent, mask, force); ++ if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) { ++ BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); ++ irq_chip_write_msi_msg(irq_data, &msg); ++ } ++ ++ return ret; ++} ++ ++static void msi_domain_activate(struct irq_domain *domain, ++ struct irq_data *irq_data) ++{ ++ struct msi_msg msg; ++ ++ BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); ++ irq_chip_write_msi_msg(irq_data, &msg); ++} ++ ++static void msi_domain_deactivate(struct irq_domain *domain, ++ struct irq_data *irq_data) ++{ ++ struct msi_msg msg; ++ ++ memset(&msg, 0, sizeof(msg)); ++ irq_chip_write_msi_msg(irq_data, &msg); ++} ++ ++static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs, void *arg) ++{ ++ struct msi_domain_info *info = domain->host_data; ++ struct msi_domain_ops *ops = info->ops; ++ irq_hw_number_t hwirq = ops->get_hwirq(info, arg); ++ int i, ret; ++ ++ if (irq_find_mapping(domain, hwirq) > 0) ++ return -EEXIST; ++ ++ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); ++ if (ret < 0) ++ return ret; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); ++ if (ret < 0) { ++ if (ops->msi_free) { ++ for (i--; i > 0; i--) ++ ops->msi_free(domain, info, virq + i); ++ } ++ irq_domain_free_irqs_top(domain, virq, nr_irqs); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++static void msi_domain_free(struct irq_domain *domain, unsigned int virq, ++ unsigned int nr_irqs) ++{ ++ struct msi_domain_info *info = domain->host_data; ++ int i; ++ ++ if (info->ops->msi_free) { ++ for (i = 0; i < nr_irqs; i++) ++ info->ops->msi_free(domain, info, virq + i); ++ } ++ irq_domain_free_irqs_top(domain, virq, nr_irqs); ++} ++ ++static struct irq_domain_ops msi_domain_ops = { ++ .alloc = msi_domain_alloc, ++ .free = msi_domain_free, ++ .activate = msi_domain_activate, ++ .deactivate = msi_domain_deactivate, ++}; ++ ++#ifdef GENERIC_MSI_DOMAIN_OPS ++static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, ++ msi_alloc_info_t *arg) ++{ ++ return arg->hwirq; ++} ++ ++static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, ++ int nvec, msi_alloc_info_t *arg) ++{ ++ memset(arg, 0, sizeof(*arg)); ++ return 0; ++} ++ ++static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, ++ struct msi_desc *desc) ++{ ++ arg->desc = desc; ++} ++#else ++#define msi_domain_ops_get_hwirq NULL ++#define msi_domain_ops_prepare NULL ++#define msi_domain_ops_set_desc NULL ++#endif /* !GENERIC_MSI_DOMAIN_OPS */ ++ ++static int msi_domain_ops_init(struct irq_domain *domain, ++ struct msi_domain_info *info, ++ unsigned int virq, irq_hw_number_t hwirq, ++ msi_alloc_info_t *arg) ++{ ++ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, ++ info->chip_data); ++ if (info->handler && info->handler_name) { ++ __irq_set_handler(virq, info->handler, 0, info->handler_name); ++ if (info->handler_data) ++ irq_set_handler_data(virq, info->handler_data); ++ } ++ return 0; ++} ++ ++static int msi_domain_ops_check(struct irq_domain *domain, ++ struct msi_domain_info *info, ++ struct device *dev) ++{ ++ return 0; ++} ++ ++static struct msi_domain_ops msi_domain_ops_default = { ++ .get_hwirq = msi_domain_ops_get_hwirq, ++ .msi_init = msi_domain_ops_init, ++ .msi_check = msi_domain_ops_check, ++ .msi_prepare = msi_domain_ops_prepare, ++ .set_desc = msi_domain_ops_set_desc, ++}; ++ ++static void msi_domain_update_dom_ops(struct msi_domain_info *info) ++{ ++ struct msi_domain_ops *ops = info->ops; ++ ++ if (ops == NULL) { ++ info->ops = &msi_domain_ops_default; ++ return; ++ } ++ ++ if (ops->get_hwirq == NULL) ++ ops->get_hwirq = msi_domain_ops_default.get_hwirq; ++ if (ops->msi_init == NULL) ++ ops->msi_init = msi_domain_ops_default.msi_init; ++ if (ops->msi_check == NULL) ++ ops->msi_check = msi_domain_ops_default.msi_check; ++ if (ops->msi_prepare == NULL) ++ ops->msi_prepare = msi_domain_ops_default.msi_prepare; ++ if (ops->set_desc == NULL) ++ ops->set_desc = msi_domain_ops_default.set_desc; ++} ++ ++static void msi_domain_update_chip_ops(struct msi_domain_info *info) ++{ ++ struct irq_chip *chip = info->chip; ++ ++ BUG_ON(!chip); ++ if (!chip->irq_mask) ++ chip->irq_mask = pci_msi_mask_irq; ++ if (!chip->irq_unmask) ++ chip->irq_unmask = pci_msi_unmask_irq; ++ if (!chip->irq_set_affinity) ++ chip->irq_set_affinity = msi_domain_set_affinity; ++} ++ ++/** ++ * msi_create_irq_domain - Create a MSI interrupt domain ++ * @of_node: Optional device-tree node of the interrupt controller ++ * @info: MSI domain info ++ * @parent: Parent irq domain ++ */ ++struct irq_domain *msi_create_irq_domain(struct device_node *node, ++ struct msi_domain_info *info, ++ struct irq_domain *parent) ++{ ++ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) ++ msi_domain_update_dom_ops(info); ++ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) ++ msi_domain_update_chip_ops(info); ++ ++ return irq_domain_add_hierarchy(parent, 0, 0, node, &msi_domain_ops, ++ info); ++} ++ ++/** ++ * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain ++ * @domain: The domain to allocate from ++ * @dev: Pointer to device struct of the device for which the interrupts ++ * are allocated ++ * @nvec: The number of interrupts to allocate ++ * ++ * Returns 0 on success or an error code. ++ */ ++int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, ++ int nvec) ++{ ++ struct msi_domain_info *info = domain->host_data; ++ struct msi_domain_ops *ops = info->ops; ++ msi_alloc_info_t arg; ++ struct msi_desc *desc; ++ int i, ret, virq = -1; ++ ++ ret = ops->msi_check(domain, info, dev); ++ if (ret == 0) ++ ret = ops->msi_prepare(domain, dev, nvec, &arg); ++ if (ret) ++ return ret; ++ ++ for_each_msi_entry(desc, dev) { ++ ops->set_desc(&arg, desc); ++ if (info->flags & MSI_FLAG_IDENTITY_MAP) ++ virq = (int)ops->get_hwirq(info, &arg); ++ else ++ virq = -1; ++ ++ virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used, ++ dev_to_node(dev), &arg, false); ++ if (virq < 0) { ++ ret = -ENOSPC; ++ if (ops->handle_error) ++ ret = ops->handle_error(domain, desc, ret); ++ if (ops->msi_finish) ++ ops->msi_finish(&arg, ret); ++ return ret; ++ } ++ ++ for (i = 0; i < desc->nvec_used; i++) ++ irq_set_msi_desc_off(virq, i, desc); ++ } ++ ++ if (ops->msi_finish) ++ ops->msi_finish(&arg, 0); ++ ++ for_each_msi_entry(desc, dev) { ++ if (desc->nvec_used == 1) ++ dev_dbg(dev, "irq %d for MSI\n", virq); ++ else ++ dev_dbg(dev, "irq [%d-%d] for MSI\n", ++ virq, virq + desc->nvec_used - 1); ++ } ++ ++ return 0; ++} ++ ++/** ++ * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev ++ * @domain: The domain to managing the interrupts ++ * @dev: Pointer to device struct of the device for which the interrupts ++ * are free ++ */ ++void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) ++{ ++ struct msi_desc *desc; ++ ++ for_each_msi_entry(desc, dev) { ++ irq_domain_free_irqs(desc->irq, desc->nvec_used); ++ desc->irq = 0; ++ } ++} ++ ++/** ++ * msi_get_domain_info - Get the MSI interrupt domain info for @domain ++ * @domain: The interrupt domain to retrieve data from ++ * ++ * Returns the pointer to the msi_domain_info stored in ++ * @domain->host_data. ++ */ ++struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) ++{ ++ return (struct msi_domain_info *)domain->host_data; ++} ++ ++#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ +diff --git a/kernel/resource.c b/kernel/resource.c +index 0bcebff..19f2357 100644 +--- a/kernel/resource.c ++++ b/kernel/resource.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + + +@@ -1529,6 +1530,30 @@ int iomem_is_exclusive(u64 addr) + return err; + } + ++struct resource_entry *resource_list_create_entry(struct resource *res, ++ size_t extra_size) ++{ ++ struct resource_entry *entry; ++ ++ entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL); ++ if (entry) { ++ INIT_LIST_HEAD(&entry->node); ++ entry->res = res ? res : &entry->__res; ++ } ++ ++ return entry; ++} ++EXPORT_SYMBOL(resource_list_create_entry); ++ ++void resource_list_free(struct list_head *head) ++{ ++ struct resource_entry *entry, *tmp; ++ ++ list_for_each_entry_safe(entry, tmp, head, node) ++ resource_list_destroy_entry(entry); ++} ++EXPORT_SYMBOL(resource_list_free); ++ + static int __init strict_iomem(char *str) + { + if (strstr(str, "relaxed")) +diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include +index 65e7b08..5374b1b 100644 +--- a/scripts/Kbuild.include ++++ b/scripts/Kbuild.include +@@ -179,6 +179,12 @@ build := -f $(srctree)/scripts/Makefile.build obj + # $(Q)$(MAKE) $(modbuiltin)=dir + modbuiltin := -f $(srctree)/scripts/Makefile.modbuiltin obj + ++### ++# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.dtbinst obj= ++# Usage: ++# $(Q)$(MAKE) $(dtbinst)=dir ++dtbinst := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.dtbinst obj ++ + # Prefix -I with $(srctree) if it is not an absolute path. + # skip if -I has no parameter + addtree = $(if $(patsubst -I%,%,$(1)), \ +diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib +index 54be19a..5117552 100644 +--- a/scripts/Makefile.lib ++++ b/scripts/Makefile.lib +@@ -283,18 +283,6 @@ $(obj)/%.dtb: $(src)/%.dts FORCE + + dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp) + +-# Helper targets for Installing DTBs into the boot directory +-quiet_cmd_dtb_install = INSTALL $< +- cmd_dtb_install = cp $< $(2) +- +-_dtbinst_pre_: +- $(Q)if [ -d $(INSTALL_DTBS_PATH).old ]; then rm -rf $(INSTALL_DTBS_PATH).old; fi +- $(Q)if [ -d $(INSTALL_DTBS_PATH) ]; then mv $(INSTALL_DTBS_PATH) $(INSTALL_DTBS_PATH).old; fi +- $(Q)mkdir -p $(INSTALL_DTBS_PATH) +- +-%.dtb_dtbinst_: $(obj)/%.dtb _dtbinst_pre_ +- $(call cmd,dtb_install,$(INSTALL_DTBS_PATH)) +- + # Bzip2 + # --------------------------------------------------------------------------- + +-- +2.1.0.27.g96db324 + diff --git a/packages/base/any/kernels/3.18.25/patches/series b/packages/base/any/kernels/3.18.25/patches/series index 661d5eac..7954e123 100644 --- a/packages/base/any/kernels/3.18.25/patches/series +++ b/packages/base/any/kernels/3.18.25/patches/series @@ -1,2 +1,3 @@ aufs.patch driver-support-intel-igb-bcm54616-phy.patch +add-kernel-patches-for-nxp-arm64-ls2080ardb-based-on.patch diff --git a/packages/base/arm64/kernels/Makefile b/packages/base/arm64/kernels/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/base/arm64/kernels/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/Makefile b/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/PKG.yml b/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/PKG.yml new file mode 100644 index 00000000..19302944 --- /dev/null +++ b/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/PKG.yml @@ -0,0 +1,18 @@ + +common: + arch: arm64 + version: 1.0.0 + copyright: Copyright 2013, 2014, 2015 Big Switch Networks + maintainer: support@bigswitch.com + +packages: + - name: onl-kernel-3.18.25-arm64-all + version: 1.0.0 + summary: Open Network Linux Kernel 3.18.25 for ARM64 Integrated Processor Platforms. + + files: + builds/kernel-3.18.25-arm64-all.bin.gz : $$PKG_INSTALL/ + builds/linux-3.18.25-mbuild : $$PKG_INSTALL/mbuilds + builds/linux-3.18.25-dtbs : $$PKG_INSTALL/dtbs + + changelog: Change changes changes., diff --git a/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/builds/Makefile b/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/builds/Makefile new file mode 100644 index 00000000..deb5823c --- /dev/null +++ b/packages/base/arm64/kernels/kernel-3.18.25-arm64-all/builds/Makefile @@ -0,0 +1,10 @@ +# -*- Makefile -*- +THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) + +include $(ONL)/make/config.mk + +kernel: + $(MAKE) -C $(ONL)/packages/base/any/kernels/3.18.25/configs/arm64-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL) + +clean: + rm -rf linux-3.18.25*