Introduce new exception handling framework

This patch introduces the reworked exception handling logic which lays
the foundation for accessing runtime services in later patches. The
type of an exception has a greater say in the way it is
handled. SP_EL3 is used as the stack pointer for:

1. Determining the type of exception and handling the unexpected ones
   on the exception stack

2. Saving and restoring the essential general purpose and system
   register state after exception entry and prior to exception exit.

SP_EL0 is used as the stack pointer for handling runtime service
requests e.g. SMCs. A new structure for preserving general purpose
register state has been added to the 'cpu_context' structure. All
assembler ensures that it does not use callee saved registers
(x19-x29). The C runtime preserves them across functions calls. Hence
EL3 code does not have to save and restore them explicitly.

Since the exception handling framework has undergone substantial change,
the changes have been kept in separate files to aid readability. These
files will replace the existing ones in subsequent patches.

Change-Id: Ice418686592990ff7a4260771e8d6676e6c8c5ef
This commit is contained in:
Achin Gupta
2014-02-02 12:02:23 +00:00
committed by Dan Handley
parent 7421b4653d
commit 07f4e078b6
7 changed files with 622 additions and 14 deletions

View File

@@ -39,18 +39,18 @@
.macro dcache_line_size reg, tmp
mrs \tmp, ctr_el0
mrs \tmp, ctr_el0
ubfx \tmp, \tmp, #16, #4
mov \reg, #4
lsl \reg, \reg, \tmp
mov \reg, #4
lsl \reg, \reg, \tmp
.endm
.macro icache_line_size reg, tmp
mrs \tmp, ctr_el0
and \tmp, \tmp, #0xf
mov \reg, #4
lsl \reg, \reg, \tmp
mrs \tmp, ctr_el0
and \tmp, \tmp, #0xf
mov \reg, #4
lsl \reg, \reg, \tmp
.endm

115
include/cm_macros.S Normal file
View File

@@ -0,0 +1,115 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* ---------------------------------------------
* Zero out the callee saved register to prevent
* leakage of secure state into the normal world
* during the first ERET after a cold/warm boot.
* ---------------------------------------------
*/
.macro zero_callee_saved_regs
mov x19, xzr
mov x20, xzr
mov x21, xzr
mov x22, xzr
mov x23, xzr
mov x24, xzr
mov x25, xzr
mov x26, xzr
mov x27, xzr
mov x28, xzr
mov x29, xzr
.endm
.macro switch_to_exception_stack reg1 reg2
mov \reg1 , sp
ldr \reg2, [\reg1, #CTX_EL3STATE_OFFSET + CTX_EXCEPTION_SP]
mov sp, \reg2
.endm
/* -----------------------------------------------------
* Handle SMC exceptions seperately from other sync.
* exceptions.
* -----------------------------------------------------
*/
.macro handle_sync_exception
stp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
mrs x30, esr_el3
ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
cmp x30, #EC_AARCH32_SMC
b.eq smc_handler32
cmp x30, #EC_AARCH64_SMC
b.eq smc_handler64
/* -----------------------------------------------------
* The following code handles any synchronous exception
* that is not an SMC. SP_EL3 is pointing to a context
* structure where all the scratch registers are saved.
* An exception stack is also retrieved from the context
* Currently, a register dump is printed since BL31 does
* not expect any such exceptions.
* -----------------------------------------------------
*/
bl save_scratch_registers
switch_to_exception_stack x0 x1
/* Save the core_context pointer for handled faults */
stp x0, xzr, [sp, #-0x10]!
bl fault_handler
ldp x0, xzr, [sp], #0x10
mov sp, x0
bl restore_scratch_registers
ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
eret
.endm
/* -----------------------------------------------------
* Use a platform defined mechanism to report an async.
* exception.
* -----------------------------------------------------
*/
.macro handle_async_exception type
stp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
bl save_scratch_registers
switch_to_exception_stack x0 x1
/* Save the core_context pointer */
stp x0, xzr, [sp, #-0x10]!
mov x0, \type
bl plat_report_exception
ldp x0, xzr, [sp], #0x10
mov sp, x0
bl restore_scratch_registers
ldp x30, xzr, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
.endm

View File

@@ -34,14 +34,43 @@
#include <bl_common.h>
#include <arch.h>
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'gp_regs'
* structure at their correct offsets.
******************************************************************************/
#define CTX_GPREGS_OFFSET 0x0
#define CTX_GPREG_X0 0x0
#define CTX_GPREG_X1 0x8
#define CTX_GPREG_X2 0x10
#define CTX_GPREG_X3 0x18
#define CTX_GPREG_X4 0x20
#define CTX_GPREG_X5 0x28
#define CTX_GPREG_X6 0x30
#define CTX_GPREG_X7 0x38
#define CTX_GPREG_X8 0x40
#define CTX_GPREG_X9 0x48
#define CTX_GPREG_X10 0x50
#define CTX_GPREG_X11 0x58
#define CTX_GPREG_X12 0x60
#define CTX_GPREG_X13 0x68
#define CTX_GPREG_X14 0x70
#define CTX_GPREG_X15 0x78
#define CTX_GPREG_X16 0x80
#define CTX_GPREG_X17 0x88
#define CTX_GPREG_X18 0x90
#define CTX_GPREG_SP_EL0 0x98
#define CTX_GPREG_LR 0xa0
/* Unused space to allow registers to be stored as pairs */
#define CTX_GPREGS_END 0xb0
/*******************************************************************************
* Constants that allow assembler code to access members of and the 'el3_state'
* structure at their correct offsets. Note that some of the registers are only
* 32-bits wide but are stored as 64-bit values for convenience
******************************************************************************/
#define CTX_EL3STATE_OFFSET 0x0
#define CTX_SAVED_SP_EL3 0x0
#define CTX_SAVED_SP_EL0 0x8
#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END)
#define CTX_EXCEPTION_SP 0x0
#define CTX_RUNTIME_SP 0x8
#define CTX_SPSR_EL3 0x10
#define CTX_ELR_EL3 0x18
#define CTX_SCR_EL3 0x20
@@ -153,10 +182,19 @@
} __aligned(16) name
/* Constants to determine the size of individual context structures */
#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT)
#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT)
#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT)
#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT)
/*
* AArch64 general purpose register context structure. Only x0-x18, lr
* are saved as the compiler is expected to preserve the remaining
* callee saved registers if used by the C runtime and the assembler
* does not touch the remaining.
*/
DEFINE_REG_STRUCT(gp_regs_next, CTX_GPREG_ALL);
/*
* AArch64 EL1 system register context structure for preserving the
* architectural state during switches from one security state to
@@ -195,6 +233,7 @@ DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
* correspond to either the secure or the non-secure state.
*/
typedef struct {
gp_regs_next gpregs_ctx;
el3_state el3state_ctx;
el1_sys_regs sysregs_ctx;
fp_regs fpregs_ctx;
@@ -204,12 +243,15 @@ typedef struct {
#define get_el3state_ctx(h) (&((cpu_context *) h)->el3state_ctx)
#define get_fpregs_ctx(h) (&((cpu_context *) h)->fpregs_ctx)
#define get_sysregs_ctx(h) (&((cpu_context *) h)->sysregs_ctx)
#define get_gpregs_ctx(h) (&((cpu_context *) h)->gpregs_ctx)
/*
* Compile time assertions related to the 'cpu_context' structure to
* ensure that the assembler and the compiler view of the offsets of
* the structure members is the same.
*/
CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context, gpregs_ctx), \
assert_core_context_gp_offset_mismatch);
CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context, sysregs_ctx), \
assert_core_context_sys_offset_mismatch);
CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context, fpregs_ctx), \
@@ -229,6 +271,7 @@ void fpregs_context_restore(fp_regs *regs);
#undef CTX_SYSREG_ALL
#undef CTX_FP_ALL
#undef CTX_GPREG_ALL
#undef CTX_EL3STATE_ALL
#endif /* __ASSEMBLY__ */

View File

@@ -59,10 +59,11 @@
/* For the moment this Panic function is very basic, Report an error and
* spin. This can be expanded in the future to provide more information.
*/
static inline void panic(void)
static inline void __attribute__((noreturn)) panic(void)
{
ERROR("PANIC\n");
while (1);
while (1)
;
}
#endif /* __ASSEMBLY__ */

View File

@@ -143,7 +143,8 @@
* Constants to allow the assembler access a runtime service
* descriptor
*/
#define SIZEOF_RT_SVC_DESC 32
#define RT_SVC_SIZE_LOG2 5
#define SIZEOF_RT_SVC_DESC (1 << RT_SVC_SIZE_LOG2)
#define RT_SVC_DESC_INIT 16
#define RT_SVC_DESC_HANDLE 24
@@ -156,6 +157,13 @@
#ifndef __ASSEMBLY__
/* Various flags passed to SMC handlers */
#define SMC_FROM_SECURE (0 << 0)
#define SMC_FROM_NON_SECURE (1 << 0)
#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE))
#define is_caller_secure(_f) (!(is_caller_non_secure(_f)))
/* Prototype for runtime service initializing function */
typedef int32_t (*rt_svc_init)(void);
@@ -288,7 +296,7 @@ CASSERT(GPREGS_FP_OFF == __builtin_offsetof(gp_regs, fp), \
* of the structure are the same.
* 2. ensure that the assembler and the compiler see the initialisation
* routine at the same offset.
* 2. ensure that the assembler and the compiler see the handler
* 3. ensure that the assembler and the compiler see the handler
* routine at the same offset.
*/
CASSERT((sizeof(rt_svc_desc) == SIZEOF_RT_SVC_DESC), \