From e0ae9fab61263bf7ec5beaa8256c573f09c744f0 Mon Sep 17 00:00:00 2001 From: Sandrine Bailleux Date: Tue, 24 May 2016 16:56:03 +0100 Subject: [PATCH 1/2] Introduce some helper macros for exception vectors This patch introduces some assembler macros to simplify the declaration of the exception vectors. It abstracts the section the exception code is put into as well as the alignments constraints mandated by the ARMv8 architecture. For all TF images, the exception code has been updated to make use of these macros. This patch also updates some invalid comments in the exception vector code. Change-Id: I35737b8f1c8c24b6da89b0a954c8152a4096fa95 --- bl1/aarch64/bl1_exceptions.S | 61 ++++++++++---------------- bl31/aarch64/runtime_exceptions.S | 59 ++++++++++--------------- bl32/tsp/aarch64/tsp_exceptions.S | 66 ++++++++++------------------ common/aarch64/early_exceptions.S | 72 ++++++++++++------------------- include/common/asm_macros.S | 23 +++++++++- 5 files changed, 116 insertions(+), 165 deletions(-) diff --git a/bl1/aarch64/bl1_exceptions.S b/bl1/aarch64/bl1_exceptions.S index 9ff6a57b0a..68f9b7aedd 100644 --- a/bl1/aarch64/bl1_exceptions.S +++ b/bl1/aarch64/bl1_exceptions.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -34,42 +34,37 @@ #include #include +/* ----------------------------------------------------------------------------- + * Very simple stackless exception handlers used by BL1. + * ----------------------------------------------------------------------------- + */ .globl bl1_exceptions - .section .vectors, "ax"; .align 11 +vector_base bl1_exceptions - /* ----------------------------------------------------- - * Very simple stackless exception handlers used by BL1. - * ----------------------------------------------------- - */ - .align 7 -bl1_exceptions: /* ----------------------------------------------------- * Current EL with SP0 : 0x0 - 0x200 * ----------------------------------------------------- */ -SynchronousExceptionSP0: +vector_entry SynchronousExceptionSP0 mov x0, #SYNC_EXCEPTION_SP_EL0 bl plat_report_exception b SynchronousExceptionSP0 check_vector_size SynchronousExceptionSP0 - .align 7 -IrqSP0: +vector_entry IrqSP0 mov x0, #IRQ_SP_EL0 bl plat_report_exception b IrqSP0 check_vector_size IrqSP0 - .align 7 -FiqSP0: +vector_entry FiqSP0 mov x0, #FIQ_SP_EL0 bl plat_report_exception b FiqSP0 check_vector_size FiqSP0 - .align 7 -SErrorSP0: +vector_entry SErrorSP0 mov x0, #SERROR_SP_EL0 bl plat_report_exception b SErrorSP0 @@ -79,29 +74,25 @@ SErrorSP0: * Current EL with SPx: 0x200 - 0x400 * ----------------------------------------------------- */ - .align 7 -SynchronousExceptionSPx: +vector_entry SynchronousExceptionSPx mov x0, #SYNC_EXCEPTION_SP_ELX bl plat_report_exception b SynchronousExceptionSPx check_vector_size SynchronousExceptionSPx - .align 7 -IrqSPx: +vector_entry IrqSPx mov x0, #IRQ_SP_ELX bl plat_report_exception b IrqSPx check_vector_size IrqSPx - .align 7 -FiqSPx: +vector_entry FiqSPx mov x0, #FIQ_SP_ELX bl plat_report_exception b FiqSPx check_vector_size FiqSPx - .align 7 -SErrorSPx: +vector_entry SErrorSPx mov x0, #SERROR_SP_ELX bl plat_report_exception b SErrorSPx @@ -111,8 +102,7 @@ SErrorSPx: * Lower EL using AArch64 : 0x400 - 0x600 * ----------------------------------------------------- */ - .align 7 -SynchronousExceptionA64: +vector_entry SynchronousExceptionA64 /* Enable the SError interrupt */ msr daifclr, #DAIF_ABT_BIT @@ -127,22 +117,19 @@ SynchronousExceptionA64: b smc_handler64 check_vector_size SynchronousExceptionA64 - .align 7 -IrqA64: +vector_entry IrqA64 mov x0, #IRQ_AARCH64 bl plat_report_exception b IrqA64 check_vector_size IrqA64 - .align 7 -FiqA64: +vector_entry FiqA64 mov x0, #FIQ_AARCH64 bl plat_report_exception b FiqA64 check_vector_size FiqA64 - .align 7 -SErrorA64: +vector_entry SErrorA64 mov x0, #SERROR_AARCH64 bl plat_report_exception b SErrorA64 @@ -152,29 +139,25 @@ SErrorA64: * Lower EL using AArch32 : 0x600 - 0x800 * ----------------------------------------------------- */ - .align 7 -SynchronousExceptionA32: +vector_entry SynchronousExceptionA32 mov x0, #SYNC_EXCEPTION_AARCH32 bl plat_report_exception b SynchronousExceptionA32 check_vector_size SynchronousExceptionA32 - .align 7 -IrqA32: +vector_entry IrqA32 mov x0, #IRQ_AARCH32 bl plat_report_exception b IrqA32 check_vector_size IrqA32 - .align 7 -FiqA32: +vector_entry FiqA32 mov x0, #FIQ_AARCH32 bl plat_report_exception b FiqA32 check_vector_size FiqA32 - .align 7 -SErrorA32: +vector_entry SErrorA32 mov x0, #SERROR_AARCH32 bl plat_report_exception b SErrorA32 diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S index dc11e0a72f..799062efdb 100644 --- a/bl31/aarch64/runtime_exceptions.S +++ b/bl31/aarch64/runtime_exceptions.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -161,14 +161,14 @@ interrupt_exit_\label: str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] .endm - .section .vectors, "ax"; .align 11 - .align 7 -runtime_exceptions: + +vector_base runtime_exceptions + /* ----------------------------------------------------- * Current EL with _sp_el0 : 0x0 - 0x200 * ----------------------------------------------------- */ -sync_exception_sp_el0: +vector_entry sync_exception_sp_el0 /* ----------------------------------------------------- * We don't expect any synchronous exceptions from EL3 * ----------------------------------------------------- @@ -176,23 +176,22 @@ sync_exception_sp_el0: bl report_unhandled_exception check_vector_size sync_exception_sp_el0 - .align 7 /* ----------------------------------------------------- * EL3 code is non-reentrant. Any asynchronous exception * is a serious error. Loop infinitely. * ----------------------------------------------------- */ -irq_sp_el0: +vector_entry irq_sp_el0 bl report_unhandled_interrupt check_vector_size irq_sp_el0 - .align 7 -fiq_sp_el0: + +vector_entry fiq_sp_el0 bl report_unhandled_interrupt check_vector_size fiq_sp_el0 - .align 7 -serror_sp_el0: + +vector_entry serror_sp_el0 bl report_unhandled_exception check_vector_size serror_sp_el0 @@ -200,8 +199,8 @@ serror_sp_el0: * Current EL with SPx: 0x200 - 0x400 * ----------------------------------------------------- */ - .align 7 -sync_exception_sp_elx: + +vector_entry sync_exception_sp_elx /* ----------------------------------------------------- * This exception will trigger if anything went wrong * during a previous exception entry or exit or while @@ -212,18 +211,15 @@ sync_exception_sp_elx: bl report_unhandled_exception check_vector_size sync_exception_sp_elx - .align 7 -irq_sp_elx: +vector_entry irq_sp_elx bl report_unhandled_interrupt check_vector_size irq_sp_elx - .align 7 -fiq_sp_elx: +vector_entry fiq_sp_elx bl report_unhandled_interrupt check_vector_size fiq_sp_elx - .align 7 -serror_sp_elx: +vector_entry serror_sp_elx bl report_unhandled_exception check_vector_size serror_sp_elx @@ -231,8 +227,7 @@ serror_sp_elx: * Lower EL using AArch64 : 0x400 - 0x600 * ----------------------------------------------------- */ - .align 7 -sync_exception_aarch64: +vector_entry sync_exception_aarch64 /* ----------------------------------------------------- * This exception vector will be the entry point for * SMCs and traps that are unhandled at lower ELs most @@ -244,23 +239,20 @@ sync_exception_aarch64: handle_sync_exception check_vector_size sync_exception_aarch64 - .align 7 /* ----------------------------------------------------- * Asynchronous exceptions from lower ELs are not * currently supported. Report their occurrence. * ----------------------------------------------------- */ -irq_aarch64: +vector_entry irq_aarch64 handle_interrupt_exception irq_aarch64 check_vector_size irq_aarch64 - .align 7 -fiq_aarch64: +vector_entry fiq_aarch64 handle_interrupt_exception fiq_aarch64 check_vector_size fiq_aarch64 - .align 7 -serror_aarch64: +vector_entry serror_aarch64 bl report_unhandled_exception check_vector_size serror_aarch64 @@ -268,8 +260,7 @@ serror_aarch64: * Lower EL using AArch32 : 0x600 - 0x800 * ----------------------------------------------------- */ - .align 7 -sync_exception_aarch32: +vector_entry sync_exception_aarch32 /* ----------------------------------------------------- * This exception vector will be the entry point for * SMCs and traps that are unhandled at lower ELs most @@ -281,27 +272,23 @@ sync_exception_aarch32: handle_sync_exception check_vector_size sync_exception_aarch32 - .align 7 /* ----------------------------------------------------- * Asynchronous exceptions from lower ELs are not * currently supported. Report their occurrence. * ----------------------------------------------------- */ -irq_aarch32: +vector_entry irq_aarch32 handle_interrupt_exception irq_aarch32 check_vector_size irq_aarch32 - .align 7 -fiq_aarch32: +vector_entry fiq_aarch32 handle_interrupt_exception fiq_aarch32 check_vector_size fiq_aarch32 - .align 7 -serror_aarch32: +vector_entry serror_aarch32 bl report_unhandled_exception check_vector_size serror_aarch32 - .align 7 /* ----------------------------------------------------- * The following code handles secure monitor calls. diff --git a/bl32/tsp/aarch64/tsp_exceptions.S b/bl32/tsp/aarch64/tsp_exceptions.S index edcfb718dd..20e40dfb02 100644 --- a/bl32/tsp/aarch64/tsp_exceptions.S +++ b/bl32/tsp/aarch64/tsp_exceptions.S @@ -28,10 +28,10 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include #include -#include #include +#include +#include /* ---------------------------------------------------- @@ -98,110 +98,90 @@ interrupt_exit_\label: * TSP exception handlers. * ----------------------------------------------------- */ - .section .vectors, "ax"; .align 11 - - .align 7 -tsp_exceptions: +vector_base tsp_exceptions /* ----------------------------------------------------- - * Current EL with _sp_el0 : 0x0 - 0x180. No exceptions + * Current EL with _sp_el0 : 0x0 - 0x200. No exceptions * are expected and treated as irrecoverable errors. * ----------------------------------------------------- */ -sync_exception_sp_el0: +vector_entry sync_exception_sp_el0 bl plat_panic_handler check_vector_size sync_exception_sp_el0 - .align 7 - -irq_sp_el0: +vector_entry irq_sp_el0 bl plat_panic_handler check_vector_size irq_sp_el0 - .align 7 -fiq_sp_el0: +vector_entry fiq_sp_el0 bl plat_panic_handler check_vector_size fiq_sp_el0 - .align 7 -serror_sp_el0: +vector_entry serror_sp_el0 bl plat_panic_handler check_vector_size serror_sp_el0 /* ----------------------------------------------------- - * Current EL with SPx: 0x200 - 0x380. Only IRQs/FIQs + * Current EL with SPx: 0x200 - 0x400. Only IRQs/FIQs * are expected and handled * ----------------------------------------------------- */ - .align 7 -sync_exception_sp_elx: +vector_entry sync_exception_sp_elx bl plat_panic_handler check_vector_size sync_exception_sp_elx - .align 7 -irq_sp_elx: +vector_entry irq_sp_elx handle_tsp_interrupt irq_sp_elx check_vector_size irq_sp_elx - .align 7 -fiq_sp_elx: +vector_entry fiq_sp_elx handle_tsp_interrupt fiq_sp_elx check_vector_size fiq_sp_elx - .align 7 -serror_sp_elx: +vector_entry serror_sp_elx bl plat_panic_handler check_vector_size serror_sp_elx /* ----------------------------------------------------- - * Lower EL using AArch64 : 0x400 - 0x580. No exceptions + * Lower EL using AArch64 : 0x400 - 0x600. No exceptions * are handled since TSP does not implement a lower EL * ----------------------------------------------------- */ - .align 7 -sync_exception_aarch64: +vector_entry sync_exception_aarch64 bl plat_panic_handler check_vector_size sync_exception_aarch64 - .align 7 -irq_aarch64: +vector_entry irq_aarch64 bl plat_panic_handler check_vector_size irq_aarch64 - .align 7 -fiq_aarch64: +vector_entry fiq_aarch64 bl plat_panic_handler check_vector_size fiq_aarch64 - .align 7 -serror_aarch64: +vector_entry serror_aarch64 bl plat_panic_handler check_vector_size serror_aarch64 /* ----------------------------------------------------- - * Lower EL using AArch32 : 0x600 - 0x780. No exceptions + * Lower EL using AArch32 : 0x600 - 0x800. No exceptions * handled since the TSP does not implement a lower EL. * ----------------------------------------------------- */ - .align 7 -sync_exception_aarch32: +vector_entry sync_exception_aarch32 bl plat_panic_handler check_vector_size sync_exception_aarch32 - .align 7 -irq_aarch32: +vector_entry irq_aarch32 bl plat_panic_handler check_vector_size irq_aarch32 - .align 7 -fiq_aarch32: +vector_entry fiq_aarch32 bl plat_panic_handler check_vector_size fiq_aarch32 - .align 7 -serror_aarch32: +vector_entry serror_aarch32 bl plat_panic_handler check_vector_size serror_aarch32 - .align 7 diff --git a/common/aarch64/early_exceptions.S b/common/aarch64/early_exceptions.S index 64bfcd0f0c..0ef5950751 100644 --- a/common/aarch64/early_exceptions.S +++ b/common/aarch64/early_exceptions.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,140 +31,122 @@ #include #include +/* ----------------------------------------------------------------------------- + * Very simple stackless exception handlers used by BL2 and BL31 stages. + * BL31 uses them before stacks are setup. BL2 uses them throughout. + * ----------------------------------------------------------------------------- + */ .globl early_exceptions - .section .vectors, "ax"; .align 11 +vector_base early_exceptions /* ----------------------------------------------------- - * Very simple stackless exception handlers used by BL2 - * and BL31 bootloader stages. BL31 uses them before - * stacks are setup. BL2 uses them throughout. + * Current EL with SP0 : 0x0 - 0x200 * ----------------------------------------------------- */ - .align 7 -early_exceptions: - /* ----------------------------------------------------- - * Current EL with SP0 : 0x0 - 0x180 - * ----------------------------------------------------- - */ -SynchronousExceptionSP0: +vector_entry SynchronousExceptionSP0 mov x0, #SYNC_EXCEPTION_SP_EL0 bl plat_report_exception b SynchronousExceptionSP0 check_vector_size SynchronousExceptionSP0 - .align 7 -IrqSP0: +vector_entry IrqSP0 mov x0, #IRQ_SP_EL0 bl plat_report_exception b IrqSP0 check_vector_size IrqSP0 - .align 7 -FiqSP0: +vector_entry FiqSP0 mov x0, #FIQ_SP_EL0 bl plat_report_exception b FiqSP0 check_vector_size FiqSP0 - .align 7 -SErrorSP0: +vector_entry SErrorSP0 mov x0, #SERROR_SP_EL0 bl plat_report_exception b SErrorSP0 check_vector_size SErrorSP0 /* ----------------------------------------------------- - * Current EL with SPx: 0x200 - 0x380 + * Current EL with SPx: 0x200 - 0x400 * ----------------------------------------------------- */ - .align 7 -SynchronousExceptionSPx: +vector_entry SynchronousExceptionSPx mov x0, #SYNC_EXCEPTION_SP_ELX bl plat_report_exception b SynchronousExceptionSPx check_vector_size SynchronousExceptionSPx - .align 7 -IrqSPx: +vector_entry IrqSPx mov x0, #IRQ_SP_ELX bl plat_report_exception b IrqSPx check_vector_size IrqSPx - .align 7 -FiqSPx: +vector_entry FiqSPx mov x0, #FIQ_SP_ELX bl plat_report_exception b FiqSPx check_vector_size FiqSPx - .align 7 -SErrorSPx: +vector_entry SErrorSPx mov x0, #SERROR_SP_ELX bl plat_report_exception b SErrorSPx check_vector_size SErrorSPx /* ----------------------------------------------------- - * Lower EL using AArch64 : 0x400 - 0x580 + * Lower EL using AArch64 : 0x400 - 0x600 * ----------------------------------------------------- */ - .align 7 -SynchronousExceptionA64: +vector_entry SynchronousExceptionA64 mov x0, #SYNC_EXCEPTION_AARCH64 bl plat_report_exception b SynchronousExceptionA64 check_vector_size SynchronousExceptionA64 - .align 7 -IrqA64: +vector_entry IrqA64 mov x0, #IRQ_AARCH64 bl plat_report_exception b IrqA64 check_vector_size IrqA64 - .align 7 -FiqA64: +vector_entry FiqA64 mov x0, #FIQ_AARCH64 bl plat_report_exception b FiqA64 check_vector_size FiqA64 - .align 7 -SErrorA64: +vector_entry SErrorA64 mov x0, #SERROR_AARCH64 bl plat_report_exception b SErrorA64 check_vector_size SErrorA64 /* ----------------------------------------------------- - * Lower EL using AArch32 : 0x0 - 0x180 + * Lower EL using AArch32 : 0x600 - 0x800 * ----------------------------------------------------- */ - .align 7 -SynchronousExceptionA32: +vector_entry SynchronousExceptionA32 mov x0, #SYNC_EXCEPTION_AARCH32 bl plat_report_exception b SynchronousExceptionA32 check_vector_size SynchronousExceptionA32 - .align 7 -IrqA32: +vector_entry IrqA32 mov x0, #IRQ_AARCH32 bl plat_report_exception b IrqA32 check_vector_size IrqA32 - .align 7 -FiqA32: +vector_entry FiqA32 mov x0, #FIQ_AARCH32 bl plat_report_exception b FiqA32 check_vector_size FiqA32 - .align 7 -SErrorA32: +vector_entry SErrorA32 mov x0, #SERROR_AARCH32 bl plat_report_exception b SErrorA32 diff --git a/include/common/asm_macros.S b/include/common/asm_macros.S index a331c051af..00c7d88bed 100644 --- a/include/common/asm_macros.S +++ b/include/common/asm_macros.S @@ -66,11 +66,30 @@ b.ne $label .endm + /* + * Declare the exception vector table, enforcing it is aligned on a + * 2KB boundary, as required by the ARMv8 architecture. + */ + .macro vector_base label + .section .vectors, "ax" + .align 11 + \label: + .endm /* - * This macro verifies that the a given vector doesn't exceed the + * Create an entry in the exception vector table, enforcing it is + * aligned on a 128-byte boundary, as required by the ARMv8 architecture. + */ + .macro vector_entry label + .section .vectors, "ax" + .align 7 + \label: + .endm + + /* + * This macro verifies that the given vector doesn't exceed the * architectural limit of 32 instructions. This is meant to be placed - * immedately after the last instruction in the vector. It takes the + * immediately after the last instruction in the vector. It takes the * vector entry as the parameter */ .macro check_vector_size since From 79627dc37259781e578c47e1e63856dd0424b2a2 Mon Sep 17 00:00:00 2001 From: Sandrine Bailleux Date: Tue, 24 May 2016 16:22:59 +0100 Subject: [PATCH 2/2] Fill exception vectors with zero bytes The documentation of the GNU assembler specifies the following about the .align assembler directive: "the padding bytes are normally zero. However, on some systems, if the section is marked as containing code and the fill value is omitted, the space is filled with no-op instructions." (see https://sourceware.org/binutils/docs/as/Align.html) When building Trusted Firmware, the AArch64 GNU assembler uses a mix of zero bytes and no-op instructions as the padding bytes to align exception vectors. This patch mandates to use zero bytes to be stored in the padding bytes in the exception vectors. In the AArch64 instruction set, no valid instruction encodes as zero so this effectively inserts illegal instructions. Should this code end up being executed for any reason, it would crash immediately. This gives us an extra protection against misbehaving code at no extra cost. Change-Id: I4f2abb39d0320ca0f9d467fc5af0cb92ae297351 --- include/common/asm_macros.S | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/include/common/asm_macros.S b/include/common/asm_macros.S index 00c7d88bed..d4bd11ee2c 100644 --- a/include/common/asm_macros.S +++ b/include/common/asm_macros.S @@ -69,20 +69,26 @@ /* * Declare the exception vector table, enforcing it is aligned on a * 2KB boundary, as required by the ARMv8 architecture. + * Use zero bytes as the fill value to be stored in the padding bytes + * so that it inserts illegal AArch64 instructions. This increases + * security, robustness and potentially facilitates debugging. */ .macro vector_base label .section .vectors, "ax" - .align 11 + .align 11, 0 \label: .endm /* * Create an entry in the exception vector table, enforcing it is * aligned on a 128-byte boundary, as required by the ARMv8 architecture. + * Use zero bytes as the fill value to be stored in the padding bytes + * so that it inserts illegal AArch64 instructions. This increases + * security, robustness and potentially facilitates debugging. */ .macro vector_entry label .section .vectors, "ax" - .align 7 + .align 7, 0 \label: .endm