From 26e63c4450b1e4dcef3f9256663f36253e6b62d2 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Wed, 8 Nov 2017 13:48:40 +0100 Subject: [PATCH 01/18] ARMv7 target is driven by ARM_ARCH_MAJOR==7 External build environment shall sets directive ARM_ARCH_MAJOR to 7 to specify a target ARMv7-A core. As ARM-TF expects AARCH to be set, ARM_ARCH_MAJOR==7 mandates AARCH=aarch32. The toolchain target architecture/cpu is delegated after the platform configuration is parsed. Platform shall define target core through ARM_CORTEX_A=yes, being 5, 7, 9, 12, 15 and/or 17. Platform can bypass ARM_CORTEX_A=yes directive and provide straight the toolchain target directive through MARCH32_DIRECTIVE. Signed-off-by: Etienne Carriere --- Makefile | 20 ++++++++++++++++---- docs/firmware-design.rst | 29 +++++++++++++++++++++++++++++ docs/user-guide.rst | 3 ++- maintainers.rst | 6 ++++++ make_helpers/armv7-a-cpus.mk | 33 +++++++++++++++++++++++++++++++++ 5 files changed, 86 insertions(+), 5 deletions(-) create mode 100644 make_helpers/armv7-a-cpus.mk diff --git a/Makefile b/Makefile index a648d8c0b3..39b6afd515 100644 --- a/Makefile +++ b/Makefile @@ -126,20 +126,28 @@ OD := ${CROSS_COMPILE}objdump NM := ${CROSS_COMPILE}nm PP := ${CROSS_COMPILE}gcc -E +ifeq (${ARM_ARCH_MAJOR},7) +target32-directive = -target arm-none-eabi +# Will set march32-directive from platform configuration +else +target32-directive = -target armv8a-none-eabi +march32-directive = -march armv8-a +endif + ifeq ($(notdir $(CC)),armclang) -TF_CFLAGS_aarch32 = -target arm-arm-none-eabi -march=armv8-a +TF_CFLAGS_aarch32 = -target arm-arm-none-eabi $(march32-directive) TF_CFLAGS_aarch64 = -target aarch64-arm-none-eabi -march=armv8-a else ifneq ($(findstring clang,$(notdir $(CC))),) -TF_CFLAGS_aarch32 = -target armv8a-none-eabi +TF_CFLAGS_aarch32 = $(target32-directive) TF_CFLAGS_aarch64 = -target aarch64-elf else -TF_CFLAGS_aarch32 = -march=armv8-a +TF_CFLAGS_aarch32 = $(march32-directive) TF_CFLAGS_aarch64 = -march=armv8-a endif TF_CFLAGS_aarch64 += -mgeneral-regs-only -mstrict-align -ASFLAGS_aarch32 = -march=armv8-a +ASFLAGS_aarch32 = $(march32-directive) ASFLAGS_aarch64 = -march=armv8-a CPPFLAGS = ${DEFINES} ${INCLUDES} -nostdinc \ @@ -262,6 +270,10 @@ include ${PLAT_MAKEFILE_FULL} $(eval $(call MAKE_PREREQ_DIR,${BUILD_PLAT})) +ifeq (${ARM_ARCH_MAJOR},7) +include make_helpers/armv7-a-cpus.mk +endif + # Platform compatibility is not supported in AArch32 ifneq (${ARCH},aarch32) # If the platform has not defined ENABLE_PLAT_COMPAT, then enable it by default diff --git a/docs/firmware-design.rst b/docs/firmware-design.rst index 7cc197096b..93c13d250c 100644 --- a/docs/firmware-design.rst +++ b/docs/firmware-design.rst @@ -2520,6 +2520,35 @@ This Architecture Extension is targeted when ``ARM_ARCH_MAJOR`` == 8 and table entries for a given stage of translation for a particular translation regime. +ARMv7 +~~~~~ + +This Architecture Extension is targeted when ``ARM_ARCH_MAJOR`` == 7. + +There are several ARMv7 extensions available. Obviously the TrustZone +extension is mandatory to support the ARM Trusted Firmware bootloader +and runtime services. + +Platform implementing an ARMv7 system can to define from its target +Cortex-A architecture through ``ARM_CORTEX_A = yes`` in their +``plaform.mk`` script. For example ``ARM_CORTEX_A15=yes`` for a +Cortex-A15 target. + +Platform can also set ``ARM_WITH_NEON=yes`` to enable neon support. +Note that using neon at runtime has constraints on non secure wolrd context. +The trusted firmware does not yet provide VFP context management. + +Directive ``ARM_CORTEX_A`` and ``ARM_WITH_NEON`` are used to set +the toolchain target architecture directive. + +Platform may choose to not define straight the toolchain target architecture +directive by defining ``MARCH32_DIRECTIVE``. +I.e: + +:: + + MARCH32_DIRECTIVE := -mach=armv7-a + Code Structure -------------- diff --git a/docs/user-guide.rst b/docs/user-guide.rst index 4df75908c9..d175ebd115 100644 --- a/docs/user-guide.rst +++ b/docs/user-guide.rst @@ -219,7 +219,8 @@ Common build options - ``ARM_ARCH_MAJOR``: The major version of ARM Architecture to target when compiling ARM Trusted Firmware. Its value must be numeric, and defaults to - 8 . See also, *ARMv8 Architecture Extensions* in `Firmware Design`_. + 8 . See also, *ARMv8 Architecture Extensions* and + *ARMv7 Architecture Extensions* in `Firmware Design`_. - ``ARM_ARCH_MINOR``: The minor version of ARM Architecture to target when compiling ARM Trusted Firmware. Its value must be a numeric, and defaults diff --git a/maintainers.rst b/maintainers.rst index 388073e435..701ea17ec4 100644 --- a/maintainers.rst +++ b/maintainers.rst @@ -91,6 +91,11 @@ Files: - docs/plat/xilinx-zynqmp.md - plat/xilinx/\* +ARMv7 architecture sub-maintainer +--------------------------------- + +Etienne Carriere (etienne.carriere@linaro.org, `etienne-lms`_) + .. _danh-arm: https://github.com/danh-arm .. _davidcunado-arm: https://github.com/davidcunado-arm .. _jenswi-linaro: https://github.com/jenswi-linaro @@ -100,3 +105,4 @@ Files: .. _TonyXie06: https://github.com/TonyXie06 .. _rkchrome: https://github.com/rkchrome .. _sorenb-xlnx: https://github.com/sorenb-xlnx +.. _etienne-lms: https://github.com/etienne-lms diff --git a/make_helpers/armv7-a-cpus.mk b/make_helpers/armv7-a-cpus.mk new file mode 100644 index 0000000000..5a1c75ce5f --- /dev/null +++ b/make_helpers/armv7-a-cpus.mk @@ -0,0 +1,33 @@ +# +# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +ifneq (${ARCH},aarch32) +$(error ARM_ARCH_MAJOR=7 mandates ARCH=aarch32) +endif + +# For ARMv7, set march32 from platform directive ARMV7_CORTEX_Ax=yes +# and ARM_WITH_NEON=yes/no. +# +# GCC and Clang require -march=armv7-a for C-A9 and -march=armv7ve for C-A15. +# armClang requires -march=armv7-a for all ARMv7 Cortex-A. To comply with +# all, just drop -march and supply only -mcpu. + +# Platform can override march32-directive through MARCH32_DIRECTIVE +ifdef MARCH32_DIRECTIVE +march32-directive := $(MARCH32_DIRECTIVE) +else +march32-set-${ARM_CORTEX_A5} := -mcpu=cortex-a5 +march32-set-${ARM_CORTEX_A7} := -mcpu=cortex-a7 +march32-set-${ARM_CORTEX_A9} := -mcpu=cortex-a9 +march32-set-${ARM_CORTEX_A12} := -mcpu=cortex-a12 +march32-set-${ARM_CORTEX_A15} := -mcpu=cortex-a15 +march32-set-${ARM_CORTEX_A17} := -mcpu=cortex-a17 +march32-neon-$(ARM_WITH_NEON) := -mfpu=neon + +# default to -march=armv7-a as target directive +march32-set-yes ?= -march=armv7-a +march32-directive := ${march32-set-yes} ${march32-neon-yes} +endif From 70896274bac4b2c420f115fb71a677e516048797 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Wed, 8 Nov 2017 13:49:12 +0100 Subject: [PATCH 02/18] ARMv7 requires the clear exclusive access at monitor entry Clear exclusive monitor on SMC and FIQ entry for ARMv7 cores. Signed-off-by: Etienne Carriere --- bl32/sp_min/aarch32/entrypoint.S | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/bl32/sp_min/aarch32/entrypoint.S b/bl32/sp_min/aarch32/entrypoint.S index cd9fe5cb74..b2b7953f82 100644 --- a/bl32/sp_min/aarch32/entrypoint.S +++ b/bl32/sp_min/aarch32/entrypoint.S @@ -30,6 +30,16 @@ stcopr \reg, SCR .endm + .macro clrex_on_monitor_entry +#if (ARM_ARCH_MAJOR == 7) + /* + * ARMv7 architectures need to clear the exclusive access when + * entering Monitor mode. + */ + clrex +#endif + .endm + vector_base sp_min_vector_table b sp_min_entrypoint b plat_panic_handler /* Undef */ @@ -147,6 +157,8 @@ func handle_smc smcc_save_gp_mode_regs + clrex_on_monitor_entry + /* * `sp` still points to `smc_ctx_t`. Save it to a register * and restore the C runtime stack pointer to `sp`. @@ -203,11 +215,7 @@ func handle_fiq smcc_save_gp_mode_regs - /* - * AArch32 architectures need to clear the exclusive access when - * entering Monitor mode. - */ - clrex + clrex_on_monitor_entry /* load run-time stack */ mov r2, sp From 0147bef523e27e26c0240fef4b47deca6720566c Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:55:47 +0100 Subject: [PATCH 03/18] ARMv7 does not support STL instruction Also need to add a SEV instruction in ARMv7 spin_unlock which is implicit in ARMv8. Signed-off-by: Etienne Carriere --- include/common/aarch32/asm_macros.S | 9 +++++++++ lib/locks/exclusive/aarch32/spinlock.S | 12 ++++++++++++ 2 files changed, 21 insertions(+) diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S index f5737449e8..0d1a37d1ec 100644 --- a/include/common/aarch32/asm_macros.S +++ b/include/common/aarch32/asm_macros.S @@ -79,6 +79,15 @@ ldr r0, =(\_name + \_size) .endm +#if (ARM_ARCH_MAJOR == 7) + /* ARMv7 does not support stl instruction */ + .macro stl _reg, _write_lock + dmb + str \_reg, \_write_lock + dsb + .endm +#endif + /* * Helper macro to generate the best mov/movw/movt combinations * according to the value to be moved. diff --git a/lib/locks/exclusive/aarch32/spinlock.S b/lib/locks/exclusive/aarch32/spinlock.S index bc77bc9c4d..9492cc0813 100644 --- a/lib/locks/exclusive/aarch32/spinlock.S +++ b/lib/locks/exclusive/aarch32/spinlock.S @@ -9,6 +9,17 @@ .globl spin_lock .globl spin_unlock +#if ARM_ARCH_AT_LEAST(8, 0) +/* + * According to the ARMv8-A Architecture Reference Manual, "when the global + * monitor for a PE changes from Exclusive Access state to Open Access state, + * an event is generated.". This applies to both AArch32 and AArch64 modes of + * ARMv8-A. As a result, no explicit SEV with unlock is required. + */ +#define COND_SEV() +#else +#define COND_SEV() sev +#endif func spin_lock mov r2, #1 @@ -27,5 +38,6 @@ endfunc spin_lock func spin_unlock mov r1, #0 stl r1, [r0] + COND_SEV() bx lr endfunc spin_unlock From 908cf7059795eee96e1120ecbedc13eae3d607aa Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:55:55 +0100 Subject: [PATCH 04/18] ARMv7 does not support SDCR Signed-off-by: Etienne Carriere --- include/common/aarch32/el3_common_macros.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/common/aarch32/el3_common_macros.S b/include/common/aarch32/el3_common_macros.S index 6fc00dd228..59e99f89aa 100644 --- a/include/common/aarch32/el3_common_macros.S +++ b/include/common/aarch32/el3_common_macros.S @@ -107,6 +107,7 @@ vmsr FPEXC, r0 isb +#if (ARM_ARCH_MAJOR > 7) /* --------------------------------------------------------------------- * Initialise SDCR, setting all the fields rather than relying on hw. * @@ -116,6 +117,7 @@ */ ldr r0, =(SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE)) stcopr r0, SDCR +#endif .endm From 94f4700017bbcabc46d76c2d4fba24b9be73fa6c Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:56:03 +0100 Subject: [PATCH 05/18] ARMv7 architecture have specific system registers Signed-off-by: Etienne Carriere --- include/lib/aarch32/arch.h | 16 ++++++++++++++-- include/lib/aarch32/arch_helpers.h | 8 ++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h index 3846bec4d5..c8940303c6 100644 --- a/include/lib/aarch32/arch.h +++ b/include/lib/aarch32/arch.h @@ -87,15 +87,21 @@ #define ID_PFR1_GIC_MASK 0xf /* SCTLR definitions */ -#define SCTLR_RES1 ((1 << 23) | (1 << 22) | (1 << 11) | (1 << 4) | \ - (1 << 3)) +#define SCTLR_RES1_DEF ((1 << 23) | (1 << 22) | (1 << 4) | (1 << 3)) +#if ARM_ARCH_MAJOR == 7 +#define SCTLR_RES1 SCTLR_RES1_DEF +#else +#define SCTLR_RES1 (SCTLR_RES1_DEF | (1 << 11)) +#endif #define SCTLR_M_BIT (1 << 0) #define SCTLR_A_BIT (1 << 1) #define SCTLR_C_BIT (1 << 2) #define SCTLR_CP15BEN_BIT (1 << 5) #define SCTLR_ITD_BIT (1 << 7) +#define SCTLR_Z_BIT (1 << 11) #define SCTLR_I_BIT (1 << 12) #define SCTLR_V_BIT (1 << 13) +#define SCTLR_RR_BIT (1 << 14) #define SCTLR_NTWI_BIT (1 << 16) #define SCTLR_NTWE_BIT (1 << 18) #define SCTLR_WXN_BIT (1 << 19) @@ -385,6 +391,7 @@ /* System register defines The format is: coproc, opt1, CRn, CRm, opt2 */ #define SCR p15, 0, c1, c1, 0 #define SCTLR p15, 0, c1, c0, 0 +#define ACTLR p15, 0, c1, c0, 1 #define SDCR p15, 0, c1, c3, 1 #define MPIDR p15, 0, c0, c0, 5 #define MIDR p15, 0, c0, c0, 0 @@ -431,6 +438,11 @@ #define PMCR p15, 0, c9, c12, 0 #define CNTHP_CTL p15, 4, c14, c2, 1 +/* AArch32 coproc registers for 32bit MMU descriptor support */ +#define PRRR p15, 0, c10, c2, 0 +#define NMRR p15, 0, c10, c2, 1 +#define DACR p15, 0, c3, c0, 0 + /* GICv3 CPU Interface system register defines. The format is: coproc, opt1, CRn, CRm, opt2 */ #define ICC_IAR1 p15, 0, c12, c12, 0 #define ICC_IAR0 p15, 0, c12, c8, 0 diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h index 469e9b0d05..ede6d2ed31 100644 --- a/include/lib/aarch32/arch_helpers.h +++ b/include/lib/aarch32/arch_helpers.h @@ -235,6 +235,7 @@ DEFINE_COPROCR_READ_FUNC_64(cntpct, CNTPCT_64) DEFINE_COPROCR_RW_FUNCS(scr, SCR) DEFINE_COPROCR_RW_FUNCS(ctr, CTR) DEFINE_COPROCR_RW_FUNCS(sctlr, SCTLR) +DEFINE_COPROCR_RW_FUNCS(actlr, ACTLR) DEFINE_COPROCR_RW_FUNCS(hsctlr, HSCTLR) DEFINE_COPROCR_RW_FUNCS(hcr, HCR) DEFINE_COPROCR_RW_FUNCS(hcptr, HCPTR) @@ -273,6 +274,13 @@ DEFINE_COPROCR_RW_FUNCS(hdcr, HDCR) DEFINE_COPROCR_RW_FUNCS(cnthp_ctl, CNTHP_CTL) DEFINE_COPROCR_READ_FUNC(pmcr, PMCR) +DEFINE_COPROCR_RW_FUNCS(nsacr, NSACR) + +/* AArch32 coproc registers for 32bit MMU descriptor support */ +DEFINE_COPROCR_RW_FUNCS(prrr, PRRR) +DEFINE_COPROCR_RW_FUNCS(nmrr, NMRR) +DEFINE_COPROCR_RW_FUNCS(dacr, DACR) + /* * TLBI operation prototypes */ From 10922e7ade06906bc762e4a7e171bc2a910a9ecc Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:56:10 +0100 Subject: [PATCH 06/18] ARMv7: introduce Cortex-A15 Signed-off-by: Etienne Carriere --- include/lib/cpus/aarch32/cortex_a15.h | 20 +++++++ lib/cpus/aarch32/cortex_a15.S | 81 +++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) create mode 100644 include/lib/cpus/aarch32/cortex_a15.h create mode 100644 lib/cpus/aarch32/cortex_a15.S diff --git a/include/lib/cpus/aarch32/cortex_a15.h b/include/lib/cpus/aarch32/cortex_a15.h new file mode 100644 index 0000000000..905c139dab --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a15.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __CORTEX_A15_H__ +#define __CORTEX_A15_H__ + +/******************************************************************************* + * Cortex-A15 midr with version/revision set to 0 + ******************************************************************************/ +#define CORTEX_A15_MIDR 0x410FC0F0 + +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CORTEX_A15_ACTLR_SMP_BIT (1 << 6) + +#endif /* __CORTEX_A15_H__ */ diff --git a/lib/cpus/aarch32/cortex_a15.S b/lib/cpus/aarch32/cortex_a15.S new file mode 100644 index 0000000000..0d5a116582 --- /dev/null +++ b/lib/cpus/aarch32/cortex_a15.S @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include + +/* + * Cortex-A15 support LPAE and Virtualization Extensions. + * Don't care if confiugration uses or not LPAE and VE. + * Therefore, where we don't check ARCH_IS_ARMV7_WITH_LPAE/VE + */ + + .macro assert_cache_enabled +#if ENABLE_ASSERTIONS + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + .endm + +func cortex_a15_disable_smp + ldcopr r0, ACTLR + bic r0, #CORTEX_A15_ACTLR_SMP_BIT + stcopr r0, ACTLR + isb + dsb sy + bx lr +endfunc cortex_a15_disable_smp + +func cortex_a15_enable_smp + ldcopr r0, ACTLR + orr r0, #CORTEX_A15_ACTLR_SMP_BIT + stcopr r0, ACTLR + isb + bx lr +endfunc cortex_a15_enable_smp + +func cortex_a15_reset_func + b cortex_a15_enable_smp +endfunc cortex_a15_reset_func + +func cortex_a15_core_pwr_dwn + push {r12, lr} + + assert_cache_enabled + + /* Flush L1 cache */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* Exit cluster coherency */ + pop {r12, lr} + b cortex_a15_disable_smp +endfunc cortex_a15_core_pwr_dwn + +func cortex_a15_cluster_pwr_dwn + push {r12, lr} + + assert_cache_enabled + + /* Flush L1 caches */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + bl plat_disable_acp + + /* Exit cluster coherency */ + pop {r12, lr} + b cortex_a15_disable_smp +endfunc cortex_a15_cluster_pwr_dwn + +declare_cpu_ops cortex_a15, CORTEX_A15_MIDR, \ + cortex_a15_reset_func, \ + cortex_a15_core_pwr_dwn, \ + cortex_a15_cluster_pwr_dwn From e3148c2b5339da8201033bcc2e88e27cf46fd48b Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:56:19 +0100 Subject: [PATCH 07/18] ARMv7: introduce Cortex-A9 As Cortex-A9 needs to manually enable program flow prediction, do not reset SCTLR[Z] at entry. Platform should enable it only once MMU is enabled. Change-Id: I34e1ee2da73221903f7767f23bc6fc10ad01e3de Signed-off-by: Etienne Carriere --- include/lib/cpus/aarch32/cortex_a9.h | 31 ++++++++++++ lib/cpus/aarch32/cortex_a9.S | 75 ++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+) create mode 100644 include/lib/cpus/aarch32/cortex_a9.h create mode 100644 lib/cpus/aarch32/cortex_a9.S diff --git a/include/lib/cpus/aarch32/cortex_a9.h b/include/lib/cpus/aarch32/cortex_a9.h new file mode 100644 index 0000000000..be85f9beaf --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a9.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __CORTEX_A9_H__ +#define __CORTEX_A9_H__ + +/******************************************************************************* + * Cortex-A9 midr with version/revision set to 0 + ******************************************************************************/ +#define CORTEX_A9_MIDR 0x410FC090 + +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CORTEX_A9_ACTLR_SMP_BIT (1 << 6) +#define CORTEX_A9_ACTLR_FLZW_BIT (1 << 3) + +/******************************************************************************* + * CPU Power Control Register + ******************************************************************************/ +#define PCR p15, 0, c15, c0, 0 + +#ifndef __ASSEMBLY__ +#include +DEFINE_COPROCR_RW_FUNCS(pcr, PCR) +#endif + +#endif /* __CORTEX_A9_H__ */ diff --git a/lib/cpus/aarch32/cortex_a9.S b/lib/cpus/aarch32/cortex_a9.S new file mode 100644 index 0000000000..4f30f84a94 --- /dev/null +++ b/lib/cpus/aarch32/cortex_a9.S @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include + + .macro assert_cache_enabled +#if ENABLE_ASSERTIONS + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + .endm + +func cortex_a9_disable_smp + ldcopr r0, ACTLR + bic r0, #CORTEX_A9_ACTLR_SMP_BIT + stcopr r0, ACTLR + isb + dsb sy + bx lr +endfunc cortex_a9_disable_smp + +func cortex_a9_enable_smp + ldcopr r0, ACTLR + orr r0, #CORTEX_A9_ACTLR_SMP_BIT + stcopr r0, ACTLR + isb + bx lr +endfunc cortex_a9_enable_smp + +func cortex_a9_reset_func + b cortex_a9_enable_smp +endfunc cortex_a9_reset_func + +func cortex_a9_core_pwr_dwn + push {r12, lr} + + assert_cache_enabled + + /* Flush L1 cache */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* Exit cluster coherency */ + pop {r12, lr} + b cortex_a9_disable_smp +endfunc cortex_a9_core_pwr_dwn + +func cortex_a9_cluster_pwr_dwn + push {r12, lr} + + assert_cache_enabled + + /* Flush L1 caches */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + bl plat_disable_acp + + /* Exit cluster coherency */ + pop {r12, lr} + b cortex_a9_disable_smp +endfunc cortex_a9_cluster_pwr_dwn + +declare_cpu_ops cortex_a9, CORTEX_A9_MIDR, \ + cortex_a9_reset_func, \ + cortex_a9_core_pwr_dwn, \ + cortex_a9_cluster_pwr_dwn From d56a846121136ceacca1eeabfbf440059e99acba Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:56:26 +0100 Subject: [PATCH 08/18] ARMv7: introduce Cortex-A5 Signed-off-by: Etienne Carriere --- include/lib/cpus/aarch32/cortex_a5.h | 20 ++++++++ lib/cpus/aarch32/cortex_a5.S | 75 ++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 include/lib/cpus/aarch32/cortex_a5.h create mode 100644 lib/cpus/aarch32/cortex_a5.S diff --git a/include/lib/cpus/aarch32/cortex_a5.h b/include/lib/cpus/aarch32/cortex_a5.h new file mode 100644 index 0000000000..0a0b7ffa52 --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a5.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __CORTEX_A5_H__ +#define __CORTEX_A5_H__ + +/******************************************************************************* + * Cortex-A8 midr with version/revision set to 0 + ******************************************************************************/ +#define CORTEX_A5_MIDR 0x410FC050 + +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CORTEX_A5_ACTLR_SMP_BIT (1 << 6) + +#endif /* __CORTEX_A5_H__ */ diff --git a/lib/cpus/aarch32/cortex_a5.S b/lib/cpus/aarch32/cortex_a5.S new file mode 100644 index 0000000000..c07c13ea1d --- /dev/null +++ b/lib/cpus/aarch32/cortex_a5.S @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include + + .macro assert_cache_enabled +#if ENABLE_ASSERTIONS + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + .endm + +func cortex_a5_disable_smp + ldcopr r0, ACTLR + bic r0, #CORTEX_A5_ACTLR_SMP_BIT + stcopr r0, ACTLR + isb + dsb sy + bx lr +endfunc cortex_a5_disable_smp + +func cortex_a5_enable_smp + ldcopr r0, ACTLR + orr r0, #CORTEX_A5_ACTLR_SMP_BIT + stcopr r0, ACTLR + isb + bx lr +endfunc cortex_a5_enable_smp + +func cortex_a5_reset_func + b cortex_a5_enable_smp +endfunc cortex_a5_reset_func + +func cortex_a5_core_pwr_dwn + push {r12, lr} + + assert_cache_enabled + + /* Flush L1 cache */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* Exit cluster coherency */ + pop {r12, lr} + b cortex_a5_disable_smp +endfunc cortex_a5_core_pwr_dwn + +func cortex_a5_cluster_pwr_dwn + push {r12, lr} + + assert_cache_enabled + + /* Flush L1 caches */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + bl plat_disable_acp + + /* Exit cluster coherency */ + pop {r12, lr} + b cortex_a5_disable_smp +endfunc cortex_a5_cluster_pwr_dwn + +declare_cpu_ops cortex_a5, CORTEX_A5_MIDR, \ + cortex_a5_reset_func, \ + cortex_a5_core_pwr_dwn, \ + cortex_a5_cluster_pwr_dwn From 6ff43c26395bb74a07e0572a61f618868837bc22 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:56:34 +0100 Subject: [PATCH 09/18] ARMv7: introduce Cortex-A7 Signed-off-by: Etienne Carriere --- include/lib/cpus/aarch32/cortex_a7.h | 20 ++++++++ lib/cpus/aarch32/cortex_a7.S | 75 ++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 include/lib/cpus/aarch32/cortex_a7.h create mode 100644 lib/cpus/aarch32/cortex_a7.S diff --git a/include/lib/cpus/aarch32/cortex_a7.h b/include/lib/cpus/aarch32/cortex_a7.h new file mode 100644 index 0000000000..61b0d0085c --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a7.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __CORTEX_A7_H__ +#define __CORTEX_A7_H__ + +/******************************************************************************* + * Cortex-A7 midr with version/revision set to 0 + ******************************************************************************/ +#define CORTEX_A7_MIDR 0x410FC070 + +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CORTEX_A7_ACTLR_SMP_BIT (1 << 6) + +#endif /* __CORTEX_A7_H__ */ diff --git a/lib/cpus/aarch32/cortex_a7.S b/lib/cpus/aarch32/cortex_a7.S new file mode 100644 index 0000000000..0278d1fdaa --- /dev/null +++ b/lib/cpus/aarch32/cortex_a7.S @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include + + .macro assert_cache_enabled +#if ENABLE_ASSERTIONS + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + .endm + +func cortex_a7_disable_smp + ldcopr r0, ACTLR + bic r0, #CORTEX_A7_ACTLR_SMP_BIT + stcopr r0, ACTLR + isb + dsb sy + bx lr +endfunc cortex_a7_disable_smp + +func cortex_a7_enable_smp + ldcopr r0, ACTLR + orr r0, #CORTEX_A7_ACTLR_SMP_BIT + stcopr r0, ACTLR + isb + bx lr +endfunc cortex_a7_enable_smp + +func cortex_a7_reset_func + b cortex_a7_enable_smp +endfunc cortex_a7_reset_func + +func cortex_a7_core_pwr_dwn + push {r12, lr} + + assert_cache_enabled + + /* Flush L1 cache */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* Exit cluster coherency */ + pop {r12, lr} + b cortex_a7_disable_smp +endfunc cortex_a7_core_pwr_dwn + +func cortex_a7_cluster_pwr_dwn + push {r12, lr} + + assert_cache_enabled + + /* Flush L1 caches */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + bl plat_disable_acp + + /* Exit cluster coherency */ + pop {r12, lr} + b cortex_a7_disable_smp +endfunc cortex_a7_cluster_pwr_dwn + +declare_cpu_ops cortex_a7, CORTEX_A7_MIDR, \ + cortex_a7_reset_func, \ + cortex_a7_core_pwr_dwn, \ + cortex_a7_cluster_pwr_dwn From 778e411dc9b98897aec3ad4989f4fa4e8e0c4fdf Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:56:41 +0100 Subject: [PATCH 10/18] ARMv7: introduce Cortex-A17 Signed-off-by: Etienne Carriere --- include/lib/cpus/aarch32/cortex_a17.h | 20 +++++++ lib/cpus/aarch32/cortex_a17.S | 75 +++++++++++++++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 include/lib/cpus/aarch32/cortex_a17.h create mode 100644 lib/cpus/aarch32/cortex_a17.S diff --git a/include/lib/cpus/aarch32/cortex_a17.h b/include/lib/cpus/aarch32/cortex_a17.h new file mode 100644 index 0000000000..d2ca91c410 --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a17.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __CORTEX_A17_H__ +#define __CORTEX_A17_H__ + +/******************************************************************************* + * Cortex-A17 midr with version/revision set to 0 + ******************************************************************************/ +#define CORTEX_A17_MIDR 0x410FC0E0 + +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CORTEX_A17_ACTLR_SMP_BIT (1 << 6) + +#endif /* __CORTEX_A17_H__ */ diff --git a/lib/cpus/aarch32/cortex_a17.S b/lib/cpus/aarch32/cortex_a17.S new file mode 100644 index 0000000000..316d4f053c --- /dev/null +++ b/lib/cpus/aarch32/cortex_a17.S @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include + + .macro assert_cache_enabled +#if ENABLE_ASSERTIONS + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + .endm + +func cortex_a17_disable_smp + ldcopr r0, ACTLR + bic r0, #CORTEX_A17_ACTLR_SMP_BIT + stcopr r0, ACTLR + isb + dsb sy + bx lr +endfunc cortex_a17_disable_smp + +func cortex_a17_enable_smp + ldcopr r0, ACTLR + orr r0, #CORTEX_A17_ACTLR_SMP_BIT + stcopr r0, ACTLR + isb + bx lr +endfunc cortex_a17_enable_smp + +func cortex_a17_reset_func + b cortex_a17_enable_smp +endfunc cortex_a17_reset_func + +func cortex_a17_core_pwr_dwn + push {r12, lr} + + assert_cache_enabled + + /* Flush L1 cache */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* Exit cluster coherency */ + pop {r12, lr} + b cortex_a17_disable_smp +endfunc cortex_a17_core_pwr_dwn + +func cortex_a17_cluster_pwr_dwn + push {r12, lr} + + assert_cache_enabled + + /* Flush L1 caches */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + bl plat_disable_acp + + /* Exit cluster coherency */ + pop {r12, lr} + b cortex_a17_disable_smp +endfunc cortex_a17_cluster_pwr_dwn + +declare_cpu_ops cortex_a17, CORTEX_A17_MIDR, \ + cortex_a17_reset_func, \ + cortex_a17_core_pwr_dwn, \ + cortex_a17_cluster_pwr_dwn From 1ca8d023161bd94b96a766a2e3dd31bd41fbb245 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:56:50 +0100 Subject: [PATCH 11/18] ARMv7: introduce Cortex-A12 Signed-off-by: Etienne Carriere --- include/lib/cpus/aarch32/cortex_a12.h | 20 +++++++ lib/cpus/aarch32/cortex_a12.S | 75 +++++++++++++++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 include/lib/cpus/aarch32/cortex_a12.h create mode 100644 lib/cpus/aarch32/cortex_a12.S diff --git a/include/lib/cpus/aarch32/cortex_a12.h b/include/lib/cpus/aarch32/cortex_a12.h new file mode 100644 index 0000000000..3068a41fb5 --- /dev/null +++ b/include/lib/cpus/aarch32/cortex_a12.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef __CORTEX_A12_H__ +#define __CORTEX_A12_H__ + +/******************************************************************************* + * Cortex-A12 midr with version/revision set to 0 + ******************************************************************************/ +#define CORTEX_A12_MIDR 0x410FC0C0 + +/******************************************************************************* + * CPU Auxiliary Control register specific definitions. + ******************************************************************************/ +#define CORTEX_A12_ACTLR_SMP_BIT (1 << 6) + +#endif /* __CORTEX_A12_H__ */ diff --git a/lib/cpus/aarch32/cortex_a12.S b/lib/cpus/aarch32/cortex_a12.S new file mode 100644 index 0000000000..73c9750722 --- /dev/null +++ b/lib/cpus/aarch32/cortex_a12.S @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include + + .macro assert_cache_enabled +#if ENABLE_ASSERTIONS + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + .endm + +func cortex_a12_disable_smp + ldcopr r0, ACTLR + bic r0, #CORTEX_A12_ACTLR_SMP_BIT + stcopr r0, ACTLR + isb + dsb sy + bx lr +endfunc cortex_a12_disable_smp + +func cortex_a12_enable_smp + ldcopr r0, ACTLR + orr r0, #CORTEX_A12_ACTLR_SMP_BIT + stcopr r0, ACTLR + isb + bx lr +endfunc cortex_a12_enable_smp + +func cortex_a12_reset_func + b cortex_a12_enable_smp +endfunc cortex_a12_reset_func + +func cortex_a12_core_pwr_dwn + push {r12, lr} + + assert_cache_enabled + + /* Flush L1 cache */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + /* Exit cluster coherency */ + pop {r12, lr} + b cortex_a12_disable_smp +endfunc cortex_a12_core_pwr_dwn + +func cortex_a12_cluster_pwr_dwn + push {r12, lr} + + assert_cache_enabled + + /* Flush L1 caches */ + mov r0, #DC_OP_CISW + bl dcsw_op_level1 + + bl plat_disable_acp + + /* Exit cluster coherency */ + pop {r12, lr} + b cortex_a12_disable_smp +endfunc cortex_a12_cluster_pwr_dwn + +declare_cpu_ops cortex_a12, CORTEX_A12_MIDR, \ + cortex_a12_reset_func, \ + cortex_a12_core_pwr_dwn, \ + cortex_a12_cluster_pwr_dwn From 51b992ecec92b9dcca410a2c3716f45daca5afd1 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Wed, 8 Nov 2017 13:53:47 +0100 Subject: [PATCH 12/18] ARMv7 may not support large page addressing ARCH_SUPPORTS_LARGE_PAGE_ADDRESSING allows build environment to handle specific case when target ARMv7 core only supports 32bit MMU descriptor mode. If ARMv7 based platform does not set ARM_CORTEX_Ax=yes, platform shall define ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING to enable large page addressing support. Signed-off-by: Etienne Carriere --- lib/xlat_tables/aarch32/xlat_tables.c | 4 ++++ lib/xlat_tables_v2/aarch32/xlat_tables_arch.c | 4 ++++ make_helpers/armv7-a-cpus.mk | 9 +++++++++ 3 files changed, 17 insertions(+) diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c index c7e34f20f1..720d4461d5 100644 --- a/lib/xlat_tables/aarch32/xlat_tables.c +++ b/lib/xlat_tables/aarch32/xlat_tables.c @@ -13,6 +13,10 @@ #include #include "../xlat_tables_private.h" +#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING) +#error ARMv7 target does not support LPAE MMU descriptors +#endif + #define XLAT_TABLE_LEVEL_BASE \ GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE) diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c index 642f799a4d..fc7ca46af2 100644 --- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c @@ -14,6 +14,10 @@ #include #include "../xlat_tables_private.h" +#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING) +#error ARMv7 target does not support LPAE MMU descriptors +#endif + #if ENABLE_ASSERTIONS unsigned long long xlat_arch_get_max_supported_pa(void) { diff --git a/make_helpers/armv7-a-cpus.mk b/make_helpers/armv7-a-cpus.mk index 5a1c75ce5f..c6491aa8c1 100644 --- a/make_helpers/armv7-a-cpus.mk +++ b/make_helpers/armv7-a-cpus.mk @@ -31,3 +31,12 @@ march32-neon-$(ARM_WITH_NEON) := -mfpu=neon march32-set-yes ?= -march=armv7-a march32-directive := ${march32-set-yes} ${march32-neon-yes} endif + +# Platform may override these extension support directives: +# +# ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING +# Defined if core supports the Large Page Addressing extension. + +ifeq ($(filter yes,$(ARM_CORTEX_A7) $(ARM_CORTEX_A12) $(ARM_CORTEX_A15) $(ARM_CORTEX_A17)),yes) +$(eval $(call add_define,ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)) +endif From 64cc6e91e819ccbf5fe3bf8a5c177b8fa8012d8c Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Wed, 8 Nov 2017 14:38:33 +0100 Subject: [PATCH 13/18] ARMv7 may not support Virtualization Extensions ARMv7-A Virtualization extensions brings new instructions and resources that were supported by later architectures. Reference ARM ARM Issue C.c [DDI0406C_C]. ERET and extended MSR/MRS instructions, as specified in [DDI0406C_C] in ID_PFR1 description of bits[15:12] (Virtualization Extensions): A value of 0b0001 implies implementation of the HVC, ERET, MRS (Banked register), and MSR (Banked register) instructions. The ID_ISARs do not identify whether these instructions are implemented. UDIV/SDIV were introduced with the Virtualization extensions, even if not strictly related to the virtualization extensions. If ARMv7 based platform does not set ARM_CORTEX_Ax=yes, platform shall define ARMV7_SUPPORTS_VIRTUALIZATION to enable virtualization extension related resources. Signed-off-by: Etienne Carriere --- common/aarch32/debug.S | 16 ++++++ include/common/aarch32/asm_macros.S | 10 ++++ include/lib/aarch32/smcc_macros.S | 80 ++++++++++++++++++++++++++++- make_helpers/armv7-a-cpus.mk | 4 ++ 4 files changed, 109 insertions(+), 1 deletion(-) diff --git a/common/aarch32/debug.S b/common/aarch32/debug.S index 583ee4a520..f506356912 100644 --- a/common/aarch32/debug.S +++ b/common/aarch32/debug.S @@ -71,7 +71,15 @@ endfunc report_exception assert_msg1: .asciz "ASSERT: File " assert_msg2: +#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION) + /****************************************************************** + * Virtualization comes with the UDIV/SDIV instructions. If missing + * write file line number in hexadecimal format. + ******************************************************************/ + .asciz " Line 0x" +#else .asciz " Line " +#endif /* --------------------------------------------------------------------------- * Assertion support in assembly. @@ -113,6 +121,13 @@ func asm_assert bne 1f mov r4, r6 +#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION) + /****************************************************************** + * Virtualization comes with the UDIV/SDIV instructions. If missing + * write file line number in hexadecimal format. + ******************************************************************/ + bl asm_print_hex +#else /* Print line number in decimal */ mov r6, #10 /* Divide by 10 after every loop iteration */ ldr r5, =MAX_DEC_DIVISOR @@ -124,6 +139,7 @@ dec_print_loop: udiv r5, r5, r6 /* Reduce divisor */ cmp r5, #0 bne dec_print_loop +#endif bl plat_crash_console_flush diff --git a/include/common/aarch32/asm_macros.S b/include/common/aarch32/asm_macros.S index 0d1a37d1ec..74322228e7 100644 --- a/include/common/aarch32/asm_macros.S +++ b/include/common/aarch32/asm_macros.S @@ -79,6 +79,16 @@ ldr r0, =(\_name + \_size) .endm +#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION) + /* + * ARMv7 cores without Virtualization extension do not support the + * eret instruction. + */ + .macro eret + movs pc, lr + .endm +#endif + #if (ARM_ARCH_MAJOR == 7) /* ARMv7 does not support stl instruction */ .macro stl _reg, _write_lock diff --git a/include/lib/aarch32/smcc_macros.S b/include/lib/aarch32/smcc_macros.S index cf26175d63..93f211f7a9 100644 --- a/include/lib/aarch32/smcc_macros.S +++ b/include/lib/aarch32/smcc_macros.S @@ -22,6 +22,44 @@ mov r0, sp add r0, r0, #SMC_CTX_SP_USR +#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION) + /* Must be in secure state to restore Monitor mode */ + ldcopr r4, SCR + bic r2, r4, #SCR_NS_BIT + stcopr r2, SCR + isb + + cps #MODE32_sys + stm r0!, {sp, lr} + + cps #MODE32_irq + mrs r2, spsr + stm r0!, {r2, sp, lr} + + cps #MODE32_fiq + mrs r2, spsr + stm r0!, {r2, sp, lr} + + cps #MODE32_svc + mrs r2, spsr + stm r0!, {r2, sp, lr} + + cps #MODE32_abt + mrs r2, spsr + stm r0!, {r2, sp, lr} + + cps #MODE32_und + mrs r2, spsr + stm r0!, {r2, sp, lr} + + /* lr_mon is already saved by caller */ + cps #MODE32_mon + mrs r2, spsr + stm r0!, {r2} + + stcopr r4, SCR + isb +#else /* Save the banked registers including the current SPSR and LR */ mrs r4, sp_usr mrs r5, lr_usr @@ -44,9 +82,10 @@ mrs r11, lr_und mrs r12, spsr stm r0!, {r4-r12} - /* lr_mon is already saved by caller */ + ldcopr r4, SCR +#endif str r4, [sp, #SMC_CTX_SCR] ldcopr r4, PMCR str r4, [sp, #SMC_CTX_PMCR] @@ -82,6 +121,44 @@ /* Restore the banked registers including the current SPSR */ add r1, r0, #SMC_CTX_SP_USR + +#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION) + /* Must be in secure state to restore Monitor mode */ + ldcopr r4, SCR + bic r2, r4, #SCR_NS_BIT + stcopr r2, SCR + isb + + cps #MODE32_sys + ldm r1!, {sp, lr} + + cps #MODE32_irq + ldm r1!, {r2, sp, lr} + msr spsr_fsxc, r2 + + cps #MODE32_fiq + ldm r1!, {r2, sp, lr} + msr spsr_fsxc, r2 + + cps #MODE32_svc + ldm r1!, {r2, sp, lr} + msr spsr_fsxc, r2 + + cps #MODE32_abt + ldm r1!, {r2, sp, lr} + msr spsr_fsxc, r2 + + cps #MODE32_und + ldm r1!, {r2, sp, lr} + msr spsr_fsxc, r2 + + cps #MODE32_mon + ldm r1!, {r2} + msr spsr_fsxc, r2 + + stcopr r4, SCR + isb +#else ldm r1!, {r4-r12} msr sp_usr, r4 msr lr_usr, r5 @@ -109,6 +186,7 @@ * f->[31:24] and c->[7:0] bits of SPSR. */ msr spsr_fsxc, r12 +#endif /* Restore the LR */ ldr lr, [r0, #SMC_CTX_LR_MON] diff --git a/make_helpers/armv7-a-cpus.mk b/make_helpers/armv7-a-cpus.mk index c6491aa8c1..120b36c7b9 100644 --- a/make_helpers/armv7-a-cpus.mk +++ b/make_helpers/armv7-a-cpus.mk @@ -36,7 +36,11 @@ endif # # ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING # Defined if core supports the Large Page Addressing extension. +# +# ARMV7_SUPPORTS_VIRTUALIZATION +# Defined if ARMv7 core supports the Virtualization extension. ifeq ($(filter yes,$(ARM_CORTEX_A7) $(ARM_CORTEX_A12) $(ARM_CORTEX_A15) $(ARM_CORTEX_A17)),yes) $(eval $(call add_define,ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)) +$(eval $(call add_define,ARMV7_SUPPORTS_VIRTUALIZATION)) endif From 86e2683597ec145cb2b679be0fff6f8962ba9bfd Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Wed, 8 Nov 2017 14:41:47 +0100 Subject: [PATCH 14/18] ARMv7 may not support Generic Timer Extension If ARMv7 based platform does not set ARM_CORTEX_Ax=yes, platform shall define ARMV7_SUPPORTS_GENERIC_TIMER to enable generic timer support. Signed-off-by: Etienne Carriere --- lib/psci/psci_setup.c | 2 ++ make_helpers/armv7-a-cpus.mk | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c index a841ddab9c..c00bd94ac4 100644 --- a/lib/psci/psci_setup.c +++ b/lib/psci/psci_setup.c @@ -266,8 +266,10 @@ int psci_setup(const psci_lib_args_t *lib_args) ******************************************************************************/ void psci_arch_setup(void) { +#if ARM_ARCH_MAJOR > 7 || defined(ARMV7_SUPPORTS_GENERIC_TIMER) /* Program the counter frequency */ write_cntfrq_el0(plat_get_syscnt_freq2()); +#endif /* Initialize the cpu_ops pointer. */ init_cpu_ops(); diff --git a/make_helpers/armv7-a-cpus.mk b/make_helpers/armv7-a-cpus.mk index 120b36c7b9..20e7ec533e 100644 --- a/make_helpers/armv7-a-cpus.mk +++ b/make_helpers/armv7-a-cpus.mk @@ -39,8 +39,12 @@ endif # # ARMV7_SUPPORTS_VIRTUALIZATION # Defined if ARMv7 core supports the Virtualization extension. +# +# ARMV7_SUPPORTS_GENERIC_TIMER +# Defined if ARMv7 core supports the Generic Timer extension. ifeq ($(filter yes,$(ARM_CORTEX_A7) $(ARM_CORTEX_A12) $(ARM_CORTEX_A15) $(ARM_CORTEX_A17)),yes) $(eval $(call add_define,ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)) $(eval $(call add_define,ARMV7_SUPPORTS_VIRTUALIZATION)) +$(eval $(call add_define,ARMV7_SUPPORTS_GENERIC_TIMER)) endif From ceada2b57e9706cca0a6faedbcc0f000200eb6f6 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:57:20 +0100 Subject: [PATCH 15/18] aarch32: add few missing weak platform specific function Adds weak functions for plat_report_exception, bl1_plat_prepare_exit and plat_error_handler in AArch32 mode. Signed-off-by: Etienne Carriere --- plat/common/aarch32/platform_helpers.S | 31 ++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/plat/common/aarch32/platform_helpers.S b/plat/common/aarch32/platform_helpers.S index 61d21ab36e..0a0e92714a 100644 --- a/plat/common/aarch32/platform_helpers.S +++ b/plat/common/aarch32/platform_helpers.S @@ -7,16 +7,28 @@ #include #include + .weak plat_report_exception .weak plat_crash_console_init .weak plat_crash_console_putc .weak plat_crash_console_flush .weak plat_reset_handler .weak plat_disable_acp + .weak bl1_plat_prepare_exit .weak platform_mem_init + .weak plat_error_handler .weak plat_panic_handler .weak bl2_plat_preload_setup .weak plat_try_next_boot_source + /* ----------------------------------------------------- + * Placeholder function which should be redefined by + * each platform. + * ----------------------------------------------------- + */ +func plat_report_exception + bx lr +endfunc plat_report_exception + /* ----------------------------------------------------- * Placeholder function which should be redefined by * each platform. @@ -73,6 +85,25 @@ func platform_mem_init bx lr endfunc platform_mem_init + /* ----------------------------------------------------- + * void bl1_plat_prepare_exit(entry_point_info_t *ep_info); + * Called before exiting BL1. Default: do nothing + * ----------------------------------------------------- + */ +func bl1_plat_prepare_exit + bx lr +endfunc bl1_plat_prepare_exit + + /* ----------------------------------------------------- + * void plat_error_handler(int err) __dead2; + * Endless loop by default. + * ----------------------------------------------------- + */ +func plat_error_handler + wfi + b plat_error_handler +endfunc plat_error_handler + /* ----------------------------------------------------- * void plat_panic_handler(void) __dead2; * Endless loop by default. From 634e4d2be6a15231cb132d4d15770c321d4d84f3 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:57:29 +0100 Subject: [PATCH 16/18] aarch32: add missing dmb() macro Signed-off-by: Etienne Carriere --- include/lib/aarch32/arch_helpers.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h index ede6d2ed31..42309d5505 100644 --- a/include/lib/aarch32/arch_helpers.h +++ b/include/lib/aarch32/arch_helpers.h @@ -304,6 +304,7 @@ DEFINE_DCOP_PARAM_FUNC(cvac, DCCMVAC) /* Previously defined accessor functions with incomplete register names */ #define dsb() dsbsy() +#define dmb() dmbsy() #define IS_IN_SECURE() \ (GET_NS_BIT(read_scr()) == 0) From 64deed19e4f78abd7f68393096dacb58f8be1fa3 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:57:38 +0100 Subject: [PATCH 17/18] ARMv7: GICv2 driver can manage GICv1 with security extension Some SoCs integrate a GIC in version 1 that is currently not supported by the trusted firmware. This change hijacks GICv2 driver to handle the GICv1 as GICv1 is compatible enough with GICv2 as far as the platform does not attempt to play with virtualization support or some GICv2 specific power features. Note that current trusted firmware does not use these GICv2 features that are not available in GICv1 Security Extension. Change-Id: Ic2cb3055f1319a83455571d6d918661da583f179 Signed-off-by: Etienne Carriere --- drivers/arm/gic/v2/gicv2_main.c | 14 +++++++++++++- include/drivers/arm/gic_common.h | 2 ++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/arm/gic/v2/gicv2_main.c b/drivers/arm/gic/v2/gicv2_main.c index 25296a63e4..1d963baedf 100644 --- a/drivers/arm/gic/v2/gicv2_main.c +++ b/drivers/arm/gic/v2/gicv2_main.c @@ -167,7 +167,19 @@ void gicv2_driver_init(const gicv2_driver_data_t *plat_driver_data) gic_version = gicd_read_pidr2(plat_driver_data->gicd_base); gic_version = (gic_version >> PIDR2_ARCH_REV_SHIFT) & PIDR2_ARCH_REV_MASK; - assert(gic_version == ARCH_REV_GICV2); + + /* + * GICv1 with security extension complies with trusted firmware + * GICv2 driver as far as virtualization and few tricky power + * features are not used. GICv2 features that are not supported + * by GICv1 with Security Extensions are: + * - virtual interrupt support. + * - wake up events. + * - writeable GIC state register (for power sequences) + * - interrupt priority drop. + * - interrupt signal bypass. + */ + assert(gic_version == ARCH_REV_GICV2 || gic_version == ARCH_REV_GICV1); driver_data = plat_driver_data; diff --git a/include/drivers/arm/gic_common.h b/include/drivers/arm/gic_common.h index 9e126a854b..67d4a28bad 100644 --- a/include/drivers/arm/gic_common.h +++ b/include/drivers/arm/gic_common.h @@ -72,6 +72,8 @@ #define ARCH_REV_GICV3 0x3 /* GICv2 revision as reported by the PIDR2 register */ #define ARCH_REV_GICV2 0x2 +/* GICv1 revision as reported by the PIDR2 register */ +#define ARCH_REV_GICV1 0x1 #define IGROUPR_SHIFT 5 #define ISENABLER_SHIFT 5 From 1d791530d0f3a4a02e285a38f35fecac4feec70c Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Sun, 5 Nov 2017 22:57:56 +0100 Subject: [PATCH 18/18] ARMv7: division support for missing __aeabi_*divmod ARMv7-A architectures that do not support the Virtualization extensions do not support instructions for the 32bit division. This change provides a software implementation for 32bit division. The division implementation is dumped from the OP-TEE project http://github.com/OP-TEE/optee_os. The code was slightly modified to pass trusted firmware checkpatch requirements and copyright is given to the ARM trusted firmware initiative and its contributors. Change-Id: Idae0c7b80a0d75eac9bd41ae121921d4c5af3fa3 Signed-off-by: Etienne Carriere --- lib/aarch32/arm32_aeabi_divmod.c | 203 +++++++++++++++++++++++++++ lib/aarch32/arm32_aeabi_divmod_a32.S | 30 ++++ 2 files changed, 233 insertions(+) create mode 100644 lib/aarch32/arm32_aeabi_divmod.c create mode 100644 lib/aarch32/arm32_aeabi_divmod_a32.S diff --git a/lib/aarch32/arm32_aeabi_divmod.c b/lib/aarch32/arm32_aeabi_divmod.c new file mode 100644 index 0000000000..a8f2e74200 --- /dev/null +++ b/lib/aarch32/arm32_aeabi_divmod.c @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/* + * Form ABI specifications: + * int __aeabi_idiv(int numerator, int denominator); + * unsigned __aeabi_uidiv(unsigned numerator, unsigned denominator); + * + * typedef struct { int quot; int rem; } idiv_return; + * typedef struct { unsigned quot; unsigned rem; } uidiv_return; + * + * __value_in_regs idiv_return __aeabi_idivmod(int numerator, + * int *denominator); + * __value_in_regs uidiv_return __aeabi_uidivmod(unsigned *numerator, + * unsigned denominator); + */ + +/* struct qr - stores qutient/remainder to handle divmod EABI interfaces. */ +struct qr { + unsigned int q; /* computed quotient */ + unsigned int r; /* computed remainder */ + unsigned int q_n; /* specficies if quotient shall be negative */ + unsigned int r_n; /* specficies if remainder shall be negative */ +}; + +static void uint_div_qr(unsigned int numerator, unsigned int denominator, + struct qr *qr); + +/* returns in R0 and R1 by tail calling an asm function */ +unsigned int __aeabi_uidivmod(unsigned int numerator, unsigned int denominator); + +unsigned int __aeabi_uidiv(unsigned int numerator, unsigned int denominator); +unsigned int __aeabi_uimod(unsigned int numerator, unsigned int denominator); + +/* returns in R0 and R1 by tail calling an asm function */ +signed int __aeabi_idivmod(signed int numerator, signed int denominator); + +signed int __aeabi_idiv(signed int numerator, signed int denominator); +signed int __aeabi_imod(signed int numerator, signed int denominator); + +/* + * __ste_idivmod_ret_t __aeabi_idivmod(signed numerator, signed denominator) + * Numerator and Denominator are received in R0 and R1. + * Where __ste_idivmod_ret_t is returned in R0 and R1. + * + * __ste_uidivmod_ret_t __aeabi_uidivmod(unsigned numerator, + * unsigned denominator) + * Numerator and Denominator are received in R0 and R1. + * Where __ste_uidivmod_ret_t is returned in R0 and R1. + */ +#ifdef __GNUC__ +signed int ret_idivmod_values(signed int quotient, signed int remainder); +unsigned int ret_uidivmod_values(unsigned int quotient, unsigned int remainder); +#else +#error "Compiler not supported" +#endif + +static void division_qr(unsigned int n, unsigned int p, struct qr *qr) +{ + unsigned int i = 1, q = 0; + + if (p == 0) { + qr->r = 0xFFFFFFFF; /* division by 0 */ + return; + } + + while ((p >> 31) == 0) { + i = i << 1; /* count the max division steps */ + p = p << 1; /* increase p until it has maximum size*/ + } + + while (i > 0) { + q = q << 1; /* write bit in q at index (size-1) */ + if (n >= p) { + n -= p; + q++; + } + p = p >> 1; /* decrease p */ + i = i >> 1; /* decrease remaining size in q */ + } + qr->r = n; + qr->q = q; +} + +static void uint_div_qr(unsigned int numerator, unsigned int denominator, + struct qr *qr) +{ + division_qr(numerator, denominator, qr); + + /* negate quotient and/or remainder according to requester */ + if (qr->q_n) + qr->q = -qr->q; + if (qr->r_n) + qr->r = -qr->r; +} + +unsigned int __aeabi_uidiv(unsigned int numerator, unsigned int denominator) +{ + struct qr qr = { .q_n = 0, .r_n = 0 }; + + uint_div_qr(numerator, denominator, &qr); + + return qr.q; +} + +unsigned int __aeabi_uimod(unsigned int numerator, unsigned int denominator) +{ + struct qr qr = { .q_n = 0, .r_n = 0 }; + + uint_div_qr(numerator, denominator, &qr); + + return qr.r; +} + +unsigned int __aeabi_uidivmod(unsigned int numerator, unsigned int denominator) +{ + struct qr qr = { .q_n = 0, .r_n = 0 }; + + uint_div_qr(numerator, denominator, &qr); + + return ret_uidivmod_values(qr.q, qr.r); +} + +signed int __aeabi_idiv(signed int numerator, signed int denominator) +{ + struct qr qr = { .q_n = 0, .r_n = 0 }; + + if (((numerator < 0) && (denominator > 0)) || + ((numerator > 0) && (denominator < 0))) + qr.q_n = 1; /* quotient shall be negate */ + + if (numerator < 0) { + numerator = -numerator; + qr.r_n = 1; /* remainder shall be negate */ + } + + if (denominator < 0) + denominator = -denominator; + + uint_div_qr(numerator, denominator, &qr); + + return qr.q; +} + +signed int __aeabi_imod(signed int numerator, signed int denominator) +{ + signed int s; + signed int i; + signed int j; + signed int h; + struct qr qr = { .q_n = 0, .r_n = 0 }; + + /* in case modulo of a power of 2 */ + for (i = 0, j = 0, h = 0, s = denominator; (s != 0) || (h > 1); i++) { + if (s & 1) { + j = i; + h++; + } + s = s >> 1; + } + if (h == 1) + return numerator >> j; + + if (((numerator < 0) && (denominator > 0)) || + ((numerator > 0) && (denominator < 0))) + qr.q_n = 1; /* quotient shall be negate */ + + if (numerator < 0) { + numerator = -numerator; + qr.r_n = 1; /* remainder shall be negate */ + } + + if (denominator < 0) + denominator = -denominator; + + uint_div_qr(numerator, denominator, &qr); + + return qr.r; +} + +signed int __aeabi_idivmod(signed int numerator, signed int denominator) +{ + struct qr qr = { .q_n = 0, .r_n = 0 }; + + if (((numerator < 0) && (denominator > 0)) || + ((numerator > 0) && (denominator < 0))) + qr.q_n = 1; /* quotient shall be negate */ + + if (numerator < 0) { + numerator = -numerator; + qr.r_n = 1; /* remainder shall be negate */ + } + + if (denominator < 0) + denominator = -denominator; + + uint_div_qr(numerator, denominator, &qr); + + return ret_idivmod_values(qr.q, qr.r); +} diff --git a/lib/aarch32/arm32_aeabi_divmod_a32.S b/lib/aarch32/arm32_aeabi_divmod_a32.S new file mode 100644 index 0000000000..6915dcd882 --- /dev/null +++ b/lib/aarch32/arm32_aeabi_divmod_a32.S @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +/* + * EABI wrappers from the udivmod and idivmod functions + */ + + .globl ret_uidivmod_values + .globl ret_idivmod_values + +/* + * signed ret_idivmod_values(signed quot, signed rem); + * return quotient and remaining the EABI way (regs r0,r1) + */ +func ret_idivmod_values + bx lr +endfunc ret_idivmod_values + +/* + * unsigned ret_uidivmod_values(unsigned quot, unsigned rem); + * return quotient and remaining the EABI way (regs r0,r1) + */ +func ret_uidivmod_values + bx lr +endfunc ret_uidivmod_values