From f62ad322695d16178db464dc062fe0af592c6780 Mon Sep 17 00:00:00 2001 From: Dimitris Papastamos Date: Thu, 30 Nov 2017 14:53:53 +0000 Subject: [PATCH 1/3] Workaround for CVE-2017-5715 on Cortex A57 and A72 Invalidate the Branch Target Buffer (BTB) on entry to EL3 by disabling and enabling the MMU. To achieve this without performing any branch instruction, a per-cpu vbar is installed which executes the workaround and then branches off to the corresponding vector entry in the main vector table. A side effect of this change is that the main vbar is configured before any reset handling. This is to allow the per-cpu reset function to override the vbar setting. This workaround is enabled by default on the affected CPUs. Change-Id: I97788d38463a5840a410e3cea85ed297a1678265 Signed-off-by: Dimitris Papastamos --- bl31/aarch64/runtime_exceptions.S | 20 +++ bl31/bl31.mk | 4 + docs/cpu-specific-build-macros.rst | 10 ++ include/common/aarch64/el3_common_macros.S | 20 +-- lib/cpus/aarch64/cortex_a57.S | 5 + lib/cpus/aarch64/cortex_a72.S | 6 + .../aarch64/workaround_cve_2017_5715_mmu.S | 114 ++++++++++++++++++ lib/cpus/cpu-ops.mk | 5 + 8 files changed, 174 insertions(+), 10 deletions(-) create mode 100644 lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S index d8fbb9b218..9b7735f1e4 100644 --- a/bl31/aarch64/runtime_exceptions.S +++ b/bl31/aarch64/runtime_exceptions.S @@ -14,6 +14,26 @@ .globl runtime_exceptions + .globl sync_exception_sp_el0 + .globl irq_sp_el0 + .globl fiq_sp_el0 + .globl serror_sp_el0 + + .globl sync_exception_sp_elx + .globl irq_sp_elx + .globl fiq_sp_elx + .globl serror_sp_elx + + .globl sync_exception_aarch64 + .globl irq_aarch64 + .globl fiq_aarch64 + .globl serror_aarch64 + + .globl sync_exception_aarch32 + .globl irq_aarch32 + .globl fiq_aarch32 + .globl serror_aarch32 + /* --------------------------------------------------------------------- * This macro handles Synchronous exceptions. * Only SMC exceptions are supported. diff --git a/bl31/bl31.mk b/bl31/bl31.mk index fdcc93139a..0732e05215 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -58,6 +58,10 @@ ifeq (${ENABLE_SVE_FOR_NS},1) BL31_SOURCES += lib/extensions/sve/sve.c endif +ifeq (${WORKAROUND_CVE_2017_5715},1) +BL31_SOURCES += lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S +endif + BL31_LINKERFILE := bl31/bl31.ld.S # Flag used to indicate if Crash reporting via console should be included diff --git a/docs/cpu-specific-build-macros.rst b/docs/cpu-specific-build-macros.rst index f74b45933e..014817d398 100644 --- a/docs/cpu-specific-build-macros.rst +++ b/docs/cpu-specific-build-macros.rst @@ -11,6 +11,15 @@ This document describes the various build options present in the CPU specific operations framework to enable errata workarounds and to enable optimizations for a specific CPU on a platform. +Security Vulnerability Workarounds +---------------------------------- + +ARM Trusted Firmware exports a series of build flags which control which +security vulnerability workarounds should be applied at runtime. + +- ``WORKAROUND_CVE_2017_5715``: Enables the security workaround for + `CVE-2017-5715`_. Defaults to 1. + CPU Errata Workarounds ---------------------- @@ -142,6 +151,7 @@ architecture that can be enabled by the platform as desired. *Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.* +.. _CVE-2017-5715: http://www.cve.mitre.org/cgi-bin/cvename.cgi?name=2017-5715 .. _Cortex-A53 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm048406/Cortex_A53_MPCore_Software_Developers_Errata_Notice.pdf .. _Cortex-A57 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm049219/cortex_a57_mpcore_software_developers_errata_notice.pdf .. _Cortex-A72 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm012079/index.html diff --git a/include/common/aarch64/el3_common_macros.S b/include/common/aarch64/el3_common_macros.S index 63a0fa770f..defd4a24e3 100644 --- a/include/common/aarch64/el3_common_macros.S +++ b/include/common/aarch64/el3_common_macros.S @@ -13,7 +13,7 @@ /* * Helper macro to initialise EL3 registers we care about. */ - .macro el3_arch_init_common _exception_vectors + .macro el3_arch_init_common /* --------------------------------------------------------------------- * SCTLR_EL3 has already been initialised - read current value before * modifying. @@ -49,14 +49,6 @@ bl init_cpu_data_ptr #endif /* IMAGE_BL31 */ - /* --------------------------------------------------------------------- - * Set the exception vectors. - * --------------------------------------------------------------------- - */ - adr x0, \_exception_vectors - msr vbar_el3, x0 - isb - /* --------------------------------------------------------------------- * Initialise SCR_EL3, setting all fields rather than relying on hw. * All fields are architecturally UNKNOWN on reset. The following fields @@ -220,6 +212,14 @@ do_cold_boot: .endif /* _warm_boot_mailbox */ + /* --------------------------------------------------------------------- + * Set the exception vectors. + * --------------------------------------------------------------------- + */ + adr x0, \_exception_vectors + msr vbar_el3, x0 + isb + /* --------------------------------------------------------------------- * It is a cold boot. * Perform any processor specific actions upon reset e.g. cache, TLB @@ -228,7 +228,7 @@ */ bl reset_handler - el3_arch_init_common \_exception_vectors + el3_arch_init_common .if \_secondary_cold_boot /* ------------------------------------------------------------- diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S index a720e984ae..683be47e52 100644 --- a/lib/cpus/aarch64/cortex_a57.S +++ b/lib/cpus/aarch64/cortex_a57.S @@ -383,6 +383,11 @@ func cortex_a57_reset_func bl errata_a57_859972_wa #endif +#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 + adr x0, workaround_mmu_runtime_exceptions + msr vbar_el3, x0 +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S index b034125656..93821b7493 100644 --- a/lib/cpus/aarch64/cortex_a72.S +++ b/lib/cpus/aarch64/cortex_a72.S @@ -110,6 +110,12 @@ func cortex_a72_reset_func mov x0, x18 bl errata_a72_859971_wa #endif + +#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 + adr x0, workaround_mmu_runtime_exceptions + msr vbar_el3, x0 +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S new file mode 100644 index 0000000000..f4781484c9 --- /dev/null +++ b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include + + .globl workaround_mmu_runtime_exceptions + +vector_base workaround_mmu_runtime_exceptions + + .macro apply_workaround + stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + mrs x0, sctlr_el3 + /* Disable MMU */ + bic x1, x0, #SCTLR_M_BIT + msr sctlr_el3, x1 + isb + /* Restore MMU config */ + msr sctlr_el3, x0 + isb + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + .endm + + /* --------------------------------------------------------------------- + * Current EL with SP_EL0 : 0x0 - 0x200 + * --------------------------------------------------------------------- + */ +vector_entry workaround_mmu_sync_exception_sp_el0 + b sync_exception_sp_el0 + check_vector_size workaround_mmu_sync_exception_sp_el0 + +vector_entry workaround_mmu_irq_sp_el0 + b irq_sp_el0 + check_vector_size workaround_mmu_irq_sp_el0 + +vector_entry workaround_mmu_fiq_sp_el0 + b fiq_sp_el0 + check_vector_size workaround_mmu_fiq_sp_el0 + +vector_entry workaround_mmu_serror_sp_el0 + b serror_sp_el0 + check_vector_size workaround_mmu_serror_sp_el0 + + /* --------------------------------------------------------------------- + * Current EL with SP_ELx: 0x200 - 0x400 + * --------------------------------------------------------------------- + */ +vector_entry workaround_mmu_sync_exception_sp_elx + b sync_exception_sp_elx + check_vector_size workaround_mmu_sync_exception_sp_elx + +vector_entry workaround_mmu_irq_sp_elx + b irq_sp_elx + check_vector_size workaround_mmu_irq_sp_elx + +vector_entry workaround_mmu_fiq_sp_elx + b fiq_sp_elx + check_vector_size workaround_mmu_fiq_sp_elx + +vector_entry workaround_mmu_serror_sp_elx + b serror_sp_elx + check_vector_size workaround_mmu_serror_sp_elx + + /* --------------------------------------------------------------------- + * Lower EL using AArch64 : 0x400 - 0x600 + * --------------------------------------------------------------------- + */ +vector_entry workaround_mmu_sync_exception_aarch64 + apply_workaround + b sync_exception_aarch64 + check_vector_size workaround_mmu_sync_exception_aarch64 + +vector_entry workaround_mmu_irq_aarch64 + apply_workaround + b irq_aarch64 + check_vector_size workaround_mmu_irq_aarch64 + +vector_entry workaround_mmu_fiq_aarch64 + apply_workaround + b fiq_aarch64 + check_vector_size workaround_mmu_fiq_aarch64 + +vector_entry workaround_mmu_serror_aarch64 + apply_workaround + b serror_aarch64 + check_vector_size workaround_mmu_serror_aarch64 + + /* --------------------------------------------------------------------- + * Lower EL using AArch32 : 0x600 - 0x800 + * --------------------------------------------------------------------- + */ +vector_entry workaround_mmu_sync_exception_aarch32 + apply_workaround + b sync_exception_aarch32 + check_vector_size workaround_mmu_sync_exception_aarch32 + +vector_entry workaround_mmu_irq_aarch32 + apply_workaround + b irq_aarch32 + check_vector_size workaround_mmu_irq_aarch32 + +vector_entry workaround_mmu_fiq_aarch32 + apply_workaround + b fiq_aarch32 + check_vector_size workaround_mmu_fiq_aarch32 + +vector_entry workaround_mmu_serror_aarch32 + apply_workaround + b serror_aarch32 + check_vector_size workaround_mmu_serror_aarch32 diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk index 31adfb4284..3ba8c1fcca 100644 --- a/lib/cpus/cpu-ops.mk +++ b/lib/cpus/cpu-ops.mk @@ -16,6 +16,8 @@ A53_DISABLE_NON_TEMPORAL_HINT ?=1 # It is enabled by default. A57_DISABLE_NON_TEMPORAL_HINT ?=1 +WORKAROUND_CVE_2017_5715 ?=1 + # Process SKIP_A57_L1_FLUSH_PWR_DWN flag $(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN)) $(eval $(call add_define,SKIP_A57_L1_FLUSH_PWR_DWN)) @@ -28,6 +30,9 @@ $(eval $(call add_define,A53_DISABLE_NON_TEMPORAL_HINT)) $(eval $(call assert_boolean,A57_DISABLE_NON_TEMPORAL_HINT)) $(eval $(call add_define,A57_DISABLE_NON_TEMPORAL_HINT)) +# Process WORKAROUND_CVE_2017_5715 flag +$(eval $(call assert_boolean,WORKAROUND_CVE_2017_5715)) +$(eval $(call add_define,WORKAROUND_CVE_2017_5715)) # CPU Errata Build flags. # These should be enabled by the platform if the erratum workaround needs to be From a1781a211a53df6a24345d774c0f2eaa5b675ca8 Mon Sep 17 00:00:00 2001 From: Dimitris Papastamos Date: Mon, 18 Dec 2017 13:46:21 +0000 Subject: [PATCH 2/3] Workaround for CVE-2017-5715 on Cortex A73 and A75 Invalidate the Branch Target Buffer (BTB) on entry to EL3 by temporarily dropping into AArch32 Secure-EL1 and executing the `BPIALL` instruction. This is achieved by using 3 vector tables. There is the runtime vector table which is used to handle exceptions and 2 additional tables which are required to implement this workaround. The additional tables are `vbar0` and `vbar1`. The sequence of events for handling a single exception is as follows: 1) Install vector table `vbar0` which saves the CPU context on entry to EL3 and sets up the Secure-EL1 context to execute in AArch32 mode with the MMU disabled and I$ enabled. This is the default vector table. 2) Before doing an ERET into Secure-EL1, switch vbar to point to another vector table `vbar1`. This is required to restore EL3 state when returning from the workaround, before proceeding with normal EL3 exception handling. 3) While in Secure-EL1, the `BPIALL` instruction is executed and an SMC call back to EL3 is performed. 4) On entry to EL3 from Secure-EL1, the saved context from step 1) is restored. The vbar is switched to point to `vbar0` in preparation to handle further exceptions. Finally a branch to the runtime vector table entry is taken to complete the handling of the original exception. This workaround is enabled by default on the affected CPUs. NOTE ==== There are 4 different stubs in Secure-EL1. Each stub corresponds to an exception type such as Sync/IRQ/FIQ/SError. Each stub will move a different value in `R0` before doing an SMC call back into EL3. Without this piece of information it would not be possible to know what the original exception type was as we cannot use `ESR_EL3` to distinguish between IRQs and FIQs. Change-Id: I90b32d14a3735290b48685d43c70c99daaa4b434 Signed-off-by: Dimitris Papastamos --- bl31/bl31.mk | 3 +- include/lib/aarch64/arch.h | 5 + include/lib/el3_runtime/aarch64/context.h | 26 +- lib/cpus/aarch64/cortex_a73.S | 5 + lib/cpus/aarch64/cortex_a75.S | 5 + .../aarch64/workaround_cve_2017_5715_bpiall.S | 372 ++++++++++++++++++ 6 files changed, 414 insertions(+), 2 deletions(-) create mode 100644 lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 0732e05215..8ff8f89bb8 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -59,7 +59,8 @@ BL31_SOURCES += lib/extensions/sve/sve.c endif ifeq (${WORKAROUND_CVE_2017_5715},1) -BL31_SOURCES += lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S +BL31_SOURCES += lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S \ + lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S endif BL31_LINKERFILE := bl31/bl31.ld.S diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index 96e2d5fe22..13a8c79774 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -337,6 +337,11 @@ #define SPSR_T_ARM U(0x0) #define SPSR_T_THUMB U(0x1) +#define SPSR_M_SHIFT U(4) +#define SPSR_M_MASK U(0x1) +#define SPSR_M_AARCH64 U(0x0) +#define SPSR_M_AARCH32 U(0x1) + #define DISABLE_ALL_EXCEPTIONS \ (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT | DAIF_DBG_BIT) diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h index 58899049b8..5e212ec3fe 100644 --- a/include/lib/el3_runtime/aarch64/context.h +++ b/include/lib/el3_runtime/aarch64/context.h @@ -46,12 +46,26 @@ #define CTX_GPREG_SP_EL0 U(0xf8) #define CTX_GPREGS_END U(0x100) +#if WORKAROUND_CVE_2017_5715 +#define CTX_CVE_2017_5715_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END) +#define CTX_CVE_2017_5715_QUAD0 U(0x0) +#define CTX_CVE_2017_5715_QUAD1 U(0x8) +#define CTX_CVE_2017_5715_QUAD2 U(0x10) +#define CTX_CVE_2017_5715_QUAD3 U(0x18) +#define CTX_CVE_2017_5715_QUAD4 U(0x20) +#define CTX_CVE_2017_5715_QUAD5 U(0x28) +#define CTX_CVE_2017_5715_END U(0x30) +#else +#define CTX_CVE_2017_5715_OFFSET CTX_GPREGS_OFFSET +#define CTX_CVE_2017_5715_END CTX_GPREGS_END +#endif + /******************************************************************************* * Constants that allow assembler code to access members of and the 'el3_state' * structure at their correct offsets. Note that some of the registers are only * 32-bits wide but are stored as 64-bit values for convenience ******************************************************************************/ -#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END) +#define CTX_EL3STATE_OFFSET (CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_END) #define CTX_SCR_EL3 U(0x0) #define CTX_RUNTIME_SP U(0x8) #define CTX_SPSR_EL3 U(0x10) @@ -186,6 +200,9 @@ /* Constants to determine the size of individual context structures */ #define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT) +#if WORKAROUND_CVE_2017_5715 +#define CTX_CVE_2017_5715_ALL (CTX_CVE_2017_5715_END >> DWORD_SHIFT) +#endif #define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT) #if CTX_INCLUDE_FPREGS #define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT) @@ -201,6 +218,10 @@ */ DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL); +#if WORKAROUND_CVE_2017_5715 +DEFINE_REG_STRUCT(cve_2017_5715_regs, CTX_CVE_2017_5715_ALL); +#endif + /* * AArch64 EL1 system register context structure for preserving the * architectural state during switches from one security state to @@ -242,6 +263,9 @@ DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL); */ typedef struct cpu_context { gp_regs_t gpregs_ctx; +#if WORKAROUND_CVE_2017_5715 + cve_2017_5715_regs_t cve_2017_5715_regs_ctx; +#endif el3_state_t el3state_ctx; el1_sys_regs_t sysregs_ctx; #if CTX_INCLUDE_FPREGS diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S index f642816ef0..c43f07ec15 100644 --- a/lib/cpus/aarch64/cortex_a73.S +++ b/lib/cpus/aarch64/cortex_a73.S @@ -36,6 +36,11 @@ func cortex_a73_disable_smp endfunc cortex_a73_disable_smp func cortex_a73_reset_func +#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 + adr x0, workaround_bpiall_vbar0_runtime_exceptions + msr vbar_el3, x0 +#endif + /* --------------------------------------------- * Enable the SMP bit. * Clobbers : x0 diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S index 4cab9e4fcc..1a9d904912 100644 --- a/lib/cpus/aarch64/cortex_a75.S +++ b/lib/cpus/aarch64/cortex_a75.S @@ -12,6 +12,11 @@ #include func cortex_a75_reset_func +#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 + adr x0, workaround_bpiall_vbar0_runtime_exceptions + msr vbar_el3, x0 +#endif + #if ENABLE_AMU /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */ mrs x0, actlr_el3 diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S new file mode 100644 index 0000000000..cd29266ed7 --- /dev/null +++ b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S @@ -0,0 +1,372 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include + + .globl workaround_bpiall_vbar0_runtime_exceptions + +#define EMIT_BPIALL 0xee070fd5 +#define EMIT_MOV_R0_IMM(v) 0xe3a0000##v +#define EMIT_SMC 0xe1600070 + + .macro enter_workaround _stub_name + /* Save GP regs */ + stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + + adr x4, \_stub_name + + /* + * Load SPSR_EL3 and VBAR_EL3. SPSR_EL3 is set up to have + * all interrupts masked in preparation to running the workaround + * stub in S-EL1. VBAR_EL3 points to the vector table that + * will handle the SMC back from the workaround stub. + */ + ldp x0, x1, [x4, #0] + + /* + * Load SCTLR_EL1 and ELR_EL3. SCTLR_EL1 is configured to disable + * the MMU in S-EL1. ELR_EL3 points to the appropriate stub in S-EL1. + */ + ldp x2, x3, [x4, #16] + + mrs x4, scr_el3 + mrs x5, spsr_el3 + mrs x6, elr_el3 + mrs x7, sctlr_el1 + mrs x8, esr_el3 + + /* Preserve system registers in the workaround context */ + stp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0] + stp x6, x7, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2] + stp x8, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4] + + /* + * Setting SCR_EL3 to all zeroes means that the NS, RW + * and SMD bits are configured as expected. + */ + msr scr_el3, xzr + + /* + * Reload system registers with the crafted values + * in preparation for entry in S-EL1. + */ + msr spsr_el3, x0 + msr vbar_el3, x1 + msr sctlr_el1, x2 + msr elr_el3, x3 + + eret + .endm + + /* --------------------------------------------------------------------- + * This vector table is used at runtime to enter the workaround at + * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions. If the workaround + * is not enabled, the existing runtime exception vector table is used. + * --------------------------------------------------------------------- + */ +vector_base workaround_bpiall_vbar0_runtime_exceptions + + /* --------------------------------------------------------------------- + * Current EL with SP_EL0 : 0x0 - 0x200 + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0 + b sync_exception_sp_el0 + /* + * Since each vector table entry is 128 bytes, we can store the + * stub context in the unused space to minimize memory footprint. + */ +aarch32_stub_smc: + .word EMIT_BPIALL + .word EMIT_MOV_R0_IMM(1) + .word EMIT_SMC +aarch32_stub_ctx_smc: + /* Mask all interrupts and set AArch32 Supervisor mode */ + .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \ + SPSR_M_AARCH32 << SPSR_M_SHIFT | \ + MODE32_svc << MODE32_SHIFT) + + /* + * VBAR_EL3 points to vbar1 which is the vector table + * used while the workaround is executing. + */ + .quad workaround_bpiall_vbar1_runtime_exceptions + + /* Setup SCTLR_EL1 with MMU off and I$ on */ + .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT + + /* ELR_EL3 is setup to point to the sync exception stub in AArch32 */ + .quad aarch32_stub_smc + check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0 + +vector_entry workaround_bpiall_vbar0_irq_sp_el0 + b irq_sp_el0 +aarch32_stub_irq: + .word EMIT_BPIALL + .word EMIT_MOV_R0_IMM(2) + .word EMIT_SMC +aarch32_stub_ctx_irq: + .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \ + SPSR_M_AARCH32 << SPSR_M_SHIFT | \ + MODE32_svc << MODE32_SHIFT) + .quad workaround_bpiall_vbar1_runtime_exceptions + .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT + .quad aarch32_stub_irq + check_vector_size workaround_bpiall_vbar0_irq_sp_el0 + +vector_entry workaround_bpiall_vbar0_fiq_sp_el0 + b fiq_sp_el0 +aarch32_stub_fiq: + .word EMIT_BPIALL + .word EMIT_MOV_R0_IMM(4) + .word EMIT_SMC +aarch32_stub_ctx_fiq: + .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \ + SPSR_M_AARCH32 << SPSR_M_SHIFT | \ + MODE32_svc << MODE32_SHIFT) + .quad workaround_bpiall_vbar1_runtime_exceptions + .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT + .quad aarch32_stub_fiq + check_vector_size workaround_bpiall_vbar0_fiq_sp_el0 + +vector_entry workaround_bpiall_vbar0_serror_sp_el0 + b serror_sp_el0 +aarch32_stub_serror: + .word EMIT_BPIALL + .word EMIT_MOV_R0_IMM(8) + .word EMIT_SMC +aarch32_stub_ctx_serror: + .quad (SPSR_AIF_MASK << SPSR_AIF_SHIFT | \ + SPSR_M_AARCH32 << SPSR_M_SHIFT | \ + MODE32_svc << MODE32_SHIFT) + .quad workaround_bpiall_vbar1_runtime_exceptions + .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT + .quad aarch32_stub_serror + check_vector_size workaround_bpiall_vbar0_serror_sp_el0 + + /* --------------------------------------------------------------------- + * Current EL with SP_ELx: 0x200 - 0x400 + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx + b sync_exception_sp_elx + check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx + +vector_entry workaround_bpiall_vbar0_irq_sp_elx + b irq_sp_elx + check_vector_size workaround_bpiall_vbar0_irq_sp_elx + +vector_entry workaround_bpiall_vbar0_fiq_sp_elx + b fiq_sp_elx + check_vector_size workaround_bpiall_vbar0_fiq_sp_elx + +vector_entry workaround_bpiall_vbar0_serror_sp_elx + b serror_sp_elx + check_vector_size workaround_bpiall_vbar0_serror_sp_elx + + /* --------------------------------------------------------------------- + * Lower EL using AArch64 : 0x400 - 0x600 + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar0_sync_exception_aarch64 + enter_workaround aarch32_stub_ctx_smc + check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64 + +vector_entry workaround_bpiall_vbar0_irq_aarch64 + enter_workaround aarch32_stub_ctx_irq + check_vector_size workaround_bpiall_vbar0_irq_aarch64 + +vector_entry workaround_bpiall_vbar0_fiq_aarch64 + enter_workaround aarch32_stub_ctx_fiq + check_vector_size workaround_bpiall_vbar0_fiq_aarch64 + +vector_entry workaround_bpiall_vbar0_serror_aarch64 + enter_workaround aarch32_stub_ctx_serror + check_vector_size workaround_bpiall_vbar0_serror_aarch64 + + /* --------------------------------------------------------------------- + * Lower EL using AArch32 : 0x600 - 0x800 + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar0_sync_exception_aarch32 + enter_workaround aarch32_stub_ctx_smc + check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32 + +vector_entry workaround_bpiall_vbar0_irq_aarch32 + enter_workaround aarch32_stub_ctx_irq + check_vector_size workaround_bpiall_vbar0_irq_aarch32 + +vector_entry workaround_bpiall_vbar0_fiq_aarch32 + enter_workaround aarch32_stub_ctx_fiq + check_vector_size workaround_bpiall_vbar0_fiq_aarch32 + +vector_entry workaround_bpiall_vbar0_serror_aarch32 + enter_workaround aarch32_stub_ctx_serror + check_vector_size workaround_bpiall_vbar0_serror_aarch32 + + /* --------------------------------------------------------------------- + * This vector table is used while the workaround is executing. It + * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError + * workaround stubs to enter EL3 from S-EL1. It restores the previous + * EL3 state before proceeding with the normal runtime exception vector. + * --------------------------------------------------------------------- + */ +vector_base workaround_bpiall_vbar1_runtime_exceptions + + /* --------------------------------------------------------------------- + * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED) + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0 + b report_unhandled_exception + check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0 + +vector_entry workaround_bpiall_vbar1_irq_sp_el0 + b report_unhandled_interrupt + check_vector_size workaround_bpiall_vbar1_irq_sp_el0 + +vector_entry workaround_bpiall_vbar1_fiq_sp_el0 + b report_unhandled_interrupt + check_vector_size workaround_bpiall_vbar1_fiq_sp_el0 + +vector_entry workaround_bpiall_vbar1_serror_sp_el0 + b report_unhandled_exception + check_vector_size workaround_bpiall_vbar1_serror_sp_el0 + + /* --------------------------------------------------------------------- + * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED) + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx + b report_unhandled_exception + check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx + +vector_entry workaround_bpiall_vbar1_irq_sp_elx + b report_unhandled_interrupt + check_vector_size workaround_bpiall_vbar1_irq_sp_elx + +vector_entry workaround_bpiall_vbar1_fiq_sp_elx + b report_unhandled_interrupt + check_vector_size workaround_bpiall_vbar1_fiq_sp_elx + +vector_entry workaround_bpiall_vbar1_serror_sp_elx + b report_unhandled_exception + check_vector_size workaround_bpiall_vbar1_serror_sp_elx + + /* --------------------------------------------------------------------- + * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED) + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar1_sync_exception_aarch64 + b report_unhandled_exception + check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64 + +vector_entry workaround_bpiall_vbar1_irq_aarch64 + b report_unhandled_interrupt + check_vector_size workaround_bpiall_vbar1_irq_aarch64 + +vector_entry workaround_bpiall_vbar1_fiq_aarch64 + b report_unhandled_interrupt + check_vector_size workaround_bpiall_vbar1_fiq_aarch64 + +vector_entry workaround_bpiall_vbar1_serror_aarch64 + b report_unhandled_exception + check_vector_size workaround_bpiall_vbar1_serror_aarch64 + + /* --------------------------------------------------------------------- + * Lower EL using AArch32 : 0x600 - 0x800 + * --------------------------------------------------------------------- + */ +vector_entry workaround_bpiall_vbar1_sync_exception_aarch32 + /* Restore register state from the workaround context */ + ldp x2, x3, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD0] + ldp x4, x5, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD2] + ldp x6, x30, [sp, #CTX_CVE_2017_5715_OFFSET + CTX_CVE_2017_5715_QUAD4] + + /* Apply the restored system register state */ + msr scr_el3, x2 + msr spsr_el3, x3 + msr elr_el3, x4 + msr sctlr_el1, x5 + msr esr_el3, x6 + + /* + * Workaround is complete, so swap VBAR_EL3 to point + * to workaround entry table in preparation for subsequent + * Sync/IRQ/FIQ/SError exceptions. + */ + adr x2, workaround_bpiall_vbar0_runtime_exceptions + msr vbar_el3, x2 + + /* + * Restore all GP regs except x0 and x1. The value in x0 + * indicates the type of the original exception. + */ + ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + + /* + * Each of these handlers will first restore x0 and x1 from + * the context and the branch to the common implementation for + * each of the exception types. + */ + tbnz x0, #1, workaround_bpiall_vbar1_irq + tbnz x0, #2, workaround_bpiall_vbar1_fiq + tbnz x0, #3, workaround_bpiall_vbar1_serror + + /* Fallthrough case for Sync exception */ + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + b sync_exception_aarch64 + check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32 + +vector_entry workaround_bpiall_vbar1_irq_aarch32 + b report_unhandled_interrupt +workaround_bpiall_vbar1_irq: + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + b irq_aarch64 + check_vector_size workaround_bpiall_vbar1_irq_aarch32 + +vector_entry workaround_bpiall_vbar1_fiq_aarch32 + b report_unhandled_interrupt +workaround_bpiall_vbar1_fiq: + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + b fiq_aarch64 + check_vector_size workaround_bpiall_vbar1_fiq_aarch32 + +vector_entry workaround_bpiall_vbar1_serror_aarch32 + b report_unhandled_exception +workaround_bpiall_vbar1_serror: + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + b serror_aarch64 + check_vector_size workaround_bpiall_vbar1_serror_aarch32 From 780edd86a0b413c3620c5c42fd123a78b5b1587a Mon Sep 17 00:00:00 2001 From: Dimitris Papastamos Date: Tue, 2 Jan 2018 15:53:01 +0000 Subject: [PATCH 3/3] Use PFR0 to identify need for mitigation of CVE-2017-5915 If the CSV2 field reads as 1 then branch targets trained in one context cannot affect speculative execution in a different context. In that case skip the workaround on Cortex A75. Change-Id: I4d5504cba516a67311fb5f0657b08f72909cbd38 Signed-off-by: Dimitris Papastamos --- include/lib/aarch64/arch.h | 3 +++ lib/cpus/aarch64/cortex_a75.S | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index 13a8c79774..7f3e9faadb 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -117,6 +117,9 @@ #define ID_AA64PFR0_SVE_SHIFT U(32) #define ID_AA64PFR0_SVE_MASK U(0xf) #define ID_AA64PFR0_SVE_LENGTH U(4) +#define ID_AA64PFR0_CSV2_SHIFT U(56) +#define ID_AA64PFR0_CSV2_MASK U(0xf) +#define ID_AA64PFR0_CSV2_LENGTH U(4) /* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */ #define ID_AA64DFR0_PMS_SHIFT U(32) diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S index 1a9d904912..9b54b48f7d 100644 --- a/lib/cpus/aarch64/cortex_a75.S +++ b/lib/cpus/aarch64/cortex_a75.S @@ -13,8 +13,18 @@ func cortex_a75_reset_func #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 + mrs x0, id_aa64pfr0_el1 + ubfx x0, x0, #ID_AA64PFR0_CSV2_SHIFT, #ID_AA64PFR0_CSV2_LENGTH + /* + * If the field equals to 1 then branch targets trained in one + * context cannot affect speculative execution in a different context. + */ + cmp x0, #1 + beq 1f + adr x0, workaround_bpiall_vbar0_runtime_exceptions msr vbar_el3, x0 +1: #endif #if ENABLE_AMU