Add Cortex-M0 core support

The Cortex-M0 core is based on ARMv6-M instruction set rather than
ARMv7-M as Cortex-M3 and M4.

Signed-off-by: Vincent Palatin <vpalatin@chromium.org>

BRANCH=none
BUG=none
TEST=run console on STM32F072,
and pass all available unit-tests on target.

Change-Id: I9bdf6637132ba4a3e739d388580a72b4c84e930e
Reviewed-on: https://chromium-review.googlesource.com/188982
Reviewed-by: Vincent Palatin <vpalatin@chromium.org>
Commit-Queue: Vincent Palatin <vpalatin@chromium.org>
Tested-by: Vincent Palatin <vpalatin@chromium.org>
This commit is contained in:
Vincent Palatin
2014-03-01 10:20:47 -08:00
committed by chrome-internal-fetch
parent 7aab81edce
commit 0f73a129b4
14 changed files with 1734 additions and 0 deletions

64
core/cortex-m0/atomic.h Normal file
View File

@@ -0,0 +1,64 @@
/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/* Atomic operations for ARMv6-M */
#ifndef __CROS_EC_ATOMIC_H
#define __CROS_EC_ATOMIC_H
#include "common.h"
/**
* Implements atomic arithmetic operations on 32-bit integers.
*
* There is no load/store exclusive on ARMv6-M, just disable interrupts
*/
#define ATOMIC_OP(asm_op, a, v) do { \
uint32_t reg0; \
\
__asm__ __volatile__(" cpsid i\n" \
" ldr %0, [%1]\n" \
#asm_op" %0, %0, %2\n" \
" str %0, [%1]\n" \
" cpsie i\n" \
: "=&r" (reg0) \
: "r" (a), "r" (v) : "cc"); \
} while (0)
static inline void atomic_clear(uint32_t *addr, uint32_t bits)
{
ATOMIC_OP(bic, addr, bits);
}
static inline void atomic_or(uint32_t *addr, uint32_t bits)
{
ATOMIC_OP(orr, addr, bits);
}
static inline void atomic_add(uint32_t *addr, uint32_t value)
{
ATOMIC_OP(add, addr, value);
}
static inline void atomic_sub(uint32_t *addr, uint32_t value)
{
ATOMIC_OP(sub, addr, value);
}
static inline uint32_t atomic_read_clear(uint32_t *addr)
{
uint32_t ret;
__asm__ __volatile__(" mov %2, #0\n"
" cpsid i\n"
" ldr %0, [%1]\n"
" str %2, [%1]\n"
" cpsie i\n"
: "=&r" (ret)
: "r" (addr), "r" (0) : "cc");
return ret;
}
#endif /* __CROS_EC_ATOMIC_H */

17
core/cortex-m0/build.mk Normal file
View File

@@ -0,0 +1,17 @@
# -*- makefile -*-
# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Cortex-M0 core OS files build
#
# Select ARMv6-m compatible bare-metal toolchain
CROSS_COMPILE?=arm-none-eabi-
# CPU specific compilation flags
CFLAGS_CPU+=-mthumb -Os -mno-sched-prolog
CFLAGS_CPU+=-mno-unaligned-access
core-y=cpu.o init.o panic.o switch.o task.o thumb_case.o div.o
core-$(CONFIG_WATCHDOG)+=watchdog.o

View File

@@ -0,0 +1,16 @@
/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef __CONFIG_CORE_H
#define __CONFIG_CORE_H
/* Linker binary architecture and format */
#define BFD_ARCH arm
#define BFD_FORMAT "elf32-littlearm"
/* Emulate the CLZ instruction since the CPU core is lacking support */
#define CONFIG_SOFTWARE_CLZ
#endif /* __CONFIG_CORE_H */

17
core/cortex-m0/cpu.c Normal file
View File

@@ -0,0 +1,17 @@
/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*
* Set up the Cortex-M0 core
*/
#include "cpu.h"
void cpu_init(void)
{
/* Catch unaligned access */
CPU_NVIC_CCR |= CPU_NVIC_CCR_UNALIGN_TRAP;
/* Set supervisor call (SVC) to priority 0 */
CPU_NVIC_SHCSR2 = 0;
}

38
core/cortex-m0/cpu.h Normal file
View File

@@ -0,0 +1,38 @@
/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*
* Registers map and definitions for Cortex-M0 processor
*/
#ifndef __CPU_H
#define __CPU_H
#include <stdint.h>
/* Macro to access 32-bit registers */
#define CPUREG(addr) (*(volatile uint32_t*)(addr))
/* Nested Vectored Interrupt Controller */
#define CPU_NVIC_EN(x) CPUREG(0xe000e100)
#define CPU_NVIC_DIS(x) CPUREG(0xe000e180)
#define CPU_NVIC_UNPEND(x) CPUREG(0xe000e280)
#define CPU_NVIC_ISPR(x) CPUREG(0xe000e200)
#define CPU_NVIC_PRI(x) CPUREG(0xe000e400)
/* System Control Block */
/* SCB AIRCR : Application interrupt and reset control register */
#define CPU_NVIC_APINT CPUREG(0xe000ed0c)
/* SCB SCR : System Control Register */
#define CPU_SCB_SYSCTRL CPUREG(0xe000ed10)
#define CPU_NVIC_CCR CPUREG(0xe000ed14)
#define CPU_NVIC_SHCSR2 CPUREG(0xe000ed1c)
#define CPU_NVIC_SHCSR3 CPUREG(0xe000ed20)
#define CPU_NVIC_CCR_UNALIGN_TRAP (1 << 3)
/* Set up the cpu to detect faults */
void cpu_init(void);
#endif /* __CPU_H */

183
core/cortex-m0/div.S Normal file
View File

@@ -0,0 +1,183 @@
/* Runtime ABI for the ARM Cortex-M0
* idiv.S: signed 32 bit division (only quotient)
* idivmod.S: signed 32 bit division (quotient and remainder)
* uidivmod.S: unsigned 32 bit division
*
* Copyright (c) 2012 Jörg Mische <bobbl@gmx.de>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
.syntax unified
.text
.thumb
.cpu cortex-m0
@ int __divsi3(int num, int denom)
@
@ libgcc wrapper: just an alias for __aeabi_idivmod(), the remainder is ignored
@
.thumb_func
.global __divsi3
__divsi3:
@ int __aeabi_idiv(int num:r0, int denom:r1)
@
@ Divide r0 by r1 and return quotient in r0 (all signed).
@ Use __aeabi_uidivmod() but check signs before and change signs afterwards.
@
.thumb_func
.global __aeabi_idiv
__aeabi_idiv:
cmp r0, #0
bge L_num_pos
rsbs r0, r0, #0 @ num = -num
cmp r1, #0
bge L_neg_result
rsbs r1, r1, #0 @ den = -den
b __aeabi_uidivmod
L_num_pos:
cmp r1, #0
bge __aeabi_uidivmod
rsbs r1, r1, #0 @ den = -den
L_neg_result:
push {lr}
bl __aeabi_uidivmod
rsbs r0, r0, #0 @ quot = -quot
pop {pc}
@ {int quotient:r0, int remainder:r1}
@ __aeabi_idivmod(int numerator:r0, int denominator:r1)
@
@ Divide r0 by r1 and return the quotient in r0 and the remainder in r1
@
.thumb_func
.global __aeabi_idivmod
__aeabi_idivmod:
cmp r0, #0
bge L_num_pos_bis
rsbs r0, r0, #0 @ num = -num
cmp r1, #0
bge L_neg_both
rsbs r1, r1, #0 @ den = -den
push {lr}
bl __aeabi_uidivmod
rsbs r1, r1, #0 @ rem = -rem
pop {pc}
L_neg_both:
push {lr}
bl __aeabi_uidivmod
rsbs r0, r0, #0 @ quot = -quot
rsbs r1, r1, #0 @ rem = -rem
pop {pc}
L_num_pos_bis:
cmp r1, #0
bge __aeabi_uidivmod
rsbs r1, r1, #0 @ den = -den
push {lr}
bl __aeabi_uidivmod
rsbs r0, r0, #0 @ quot = -quot
pop {pc}
@ unsigned __udivsi3(unsigned num, unsigned denom)
@
@ libgcc wrapper: just an alias for __aeabi_uidivmod(), the remainder is ignored
@
.thumb_func
.global __udivsi3
__udivsi3:
@ unsigned __aeabi_uidiv(unsigned num, unsigned denom)
@
@ Just an alias for __aeabi_uidivmod(), the remainder is ignored
@
.thumb_func
.global __aeabi_uidiv
__aeabi_uidiv:
@ {unsigned quotient:r0, unsigned remainder:r1}
@ __aeabi_uidivmod(unsigned numerator:r0, unsigned denominator:r1)
@
@ Divide r0 by r1 and return the quotient in r0 and the remainder in r1
@
.thumb_func
.global __aeabi_uidivmod
__aeabi_uidivmod:
cmp r1, #0
bne L_no_div0
b __aeabi_idiv0
L_no_div0:
@ Shift left the denominator until it is greater than the numerator
movs r2, #1 @ counter
movs r3, #0 @ result
cmp r0, r1
bls L_sub_loop0
adds r1, #0 @ dont shift if denominator would overflow
bmi L_sub_loop0
L_denom_shift_loop:
lsls r2, #1
lsls r1, #1
bmi L_sub_loop0
cmp r0, r1
bhi L_denom_shift_loop
L_sub_loop0:
cmp r0, r1
bcc L_dont_sub0 @ if (num>denom)
subs r0, r1 @ numerator -= denom
orrs r3, r2 @ result(r3) |= bitmask(r2)
L_dont_sub0:
lsrs r1, #1 @ denom(r1) >>= 1
lsrs r2, #1 @ bitmask(r2) >>= 1
bne L_sub_loop0
mov r1, r0 @ remainder(r1) = numerator(r0)
mov r0, r3 @ quotient(r0) = result(r3)
bx lr
__aeabi_idiv0:
bl panic

207
core/cortex-m0/ec.lds.S Normal file
View File

@@ -0,0 +1,207 @@
/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "config.h"
#define FW_OFF_(section) CONFIG_FW_##section##_OFF
#define FW_OFF(section) (CONFIG_FLASH_BASE + FW_OFF_(section))
#define FW_SIZE_(section) CONFIG_FW_##section##_SIZE
#define FW_SIZE(section) FW_SIZE_(section)
OUTPUT_FORMAT(BFD_FORMAT, BFD_FORMAT, BFD_FORMAT)
OUTPUT_ARCH(BFD_ARCH)
ENTRY(reset)
MEMORY
{
FLASH (rx) : ORIGIN = FW_OFF(SECTION), LENGTH = FW_SIZE(SECTION)
IRAM (rw) : ORIGIN = CONFIG_RAM_BASE, LENGTH = CONFIG_RAM_SIZE
}
SECTIONS
{
.text : {
OUTDIR/core/CORE/init.o (.text.vecttable)
. = ALIGN(4);
__version_struct_offset = .;
*(.rodata.ver)
#ifdef SHIFT_CODE_FOR_TEST
. = ALIGN(256);
#else
. = ALIGN(4);
#endif
OUTDIR/core/CORE/init.o (.text)
*(.text*)
#ifdef COMPILE_FOR_RAM
} > IRAM
#else
} > FLASH
#endif
. = ALIGN(4);
.rodata : {
/* Symbols defined here are declared in link_defs.h */
__irqprio = .;
*(.rodata.irqprio)
__irqprio_end = .;
. = ALIGN(4);
__cmds = .;
*(SORT(.rodata.cmds*))
__cmds_end = .;
. = ALIGN(4);
__hcmds = .;
*(.rodata.hcmds)
__hcmds_end = .;
. = ALIGN(4);
__hooks_init = .;
*(.rodata.HOOK_INIT)
__hooks_init_end = .;
__hooks_pre_freq_change = .;
*(.rodata.HOOK_PRE_FREQ_CHANGE)
__hooks_pre_freq_change_end = .;
__hooks_freq_change = .;
*(.rodata.HOOK_FREQ_CHANGE)
__hooks_freq_change_end = .;
__hooks_sysjump = .;
*(.rodata.HOOK_SYSJUMP)
__hooks_sysjump_end = .;
__hooks_chipset_pre_init = .;
*(.rodata.HOOK_CHIPSET_PRE_INIT)
__hooks_chipset_pre_init_end = .;
__hooks_chipset_startup = .;
*(.rodata.HOOK_CHIPSET_STARTUP)
__hooks_chipset_startup_end = .;
__hooks_chipset_resume = .;
*(.rodata.HOOK_CHIPSET_RESUME)
__hooks_chipset_resume_end = .;
__hooks_chipset_suspend = .;
*(.rodata.HOOK_CHIPSET_SUSPEND)
__hooks_chipset_suspend_end = .;
__hooks_chipset_shutdown = .;
*(.rodata.HOOK_CHIPSET_SHUTDOWN)
__hooks_chipset_shutdown_end = .;
__hooks_ac_change = .;
*(.rodata.HOOK_AC_CHANGE)
__hooks_ac_change_end = .;
__hooks_lid_change = .;
*(.rodata.HOOK_LID_CHANGE)
__hooks_lid_change_end = .;
__hooks_pwrbtn_change = .;
*(.rodata.HOOK_POWER_BUTTON_CHANGE)
__hooks_pwrbtn_change_end = .;
__hooks_charge_state_change = .;
*(.rodata.HOOK_CHARGE_STATE_CHANGE)
__hooks_charge_state_change_end = .;
__hooks_tick = .;
*(.rodata.HOOK_TICK)
__hooks_tick_end = .;
__hooks_second = .;
*(.rodata.HOOK_SECOND)
__hooks_second_end = .;
__deferred_funcs = .;
*(.rodata.deferred)
__deferred_funcs_end = .;
. = ALIGN(4);
*(.rodata*)
#if defined(SECTION_IS_RO) && defined(CONFIG_FLASH)
. = ALIGN(64);
*(.google)
#endif
. = ALIGN(4);
#ifdef COMPILE_FOR_RAM
} > IRAM
#else
} > FLASH
#endif
__ro_end = . ;
__deferred_funcs_count =
(__deferred_funcs_end - __deferred_funcs) / 4;
ASSERT(__deferred_funcs_count <= DEFERRABLE_MAX_COUNT,
"Increase DEFERRABLE_MAX_COUNT")
.bss : {
/*
* Align to 512 bytes. This is convenient when some memory block
* need big alignment. When COMPILE_FOR_RAM is not set, this is the
* beginning of the RAM, so there is usually no penalty on aligning
* this.
*/
. = ALIGN(512);
__bss_start = .;
*(.bss.big_align)
/* Stacks must be 64-bit aligned */
. = ALIGN(8);
*(.bss.system_stack)
/* Rest of .bss takes care of its own alignment */
*(.bss)
. = ALIGN(4);
__bss_end = .;
} > IRAM
#ifdef COMPILE_FOR_RAM
.data : {
#else
.data : AT(ADDR(.rodata) + SIZEOF(.rodata)) {
#endif
. = ALIGN(4);
__data_start = .;
*(.data.tasks)
*(.data)
. = ALIGN(4);
*(.iram.text)
. = ALIGN(4);
__data_end = .;
/* Shared memory buffer must be at the end of preallocated RAM, so it
* can expand to use all the remaining RAM. */
__shared_mem_buf = .;
/* Tag at end of firmware image so that we can find the image size.
* This may be overwritten by the shared memory buffer; that's ok
* because we only use it to find the image size in flash. */
. = ALIGN(4);
BYTE(0x45);
BYTE(0x4e);
BYTE(0x44);
BYTE(0xea);
/* NOTHING MAY GO AFTER THIS! */
} > IRAM
/* The linker won't notice if the .data section is too big to fit,
* apparently because we're sending it into IRAM, not FLASH. The following
* symbol isn't used by the code, but running "objdump -t *.elf | grep hey"
* will let us check how much flash space we're actually using. The
* explicit ASSERT afterwards will cause the linker to abort if we use too
* much. */
__hey_flash_used = LOADADDR(.data) + SIZEOF(.data) - FW_OFF(SECTION);
ASSERT(FW_SIZE(SECTION) >
(LOADADDR(.data) + SIZEOF(.data) - FW_OFF(SECTION)),
"No room left in the flash")
#if !(defined(SECTION_IS_RO) && defined(CONFIG_FLASH))
/DISCARD/ : {
*(.google)
}
#endif
/DISCARD/ : { *(.ARM.*) }
}

164
core/cortex-m0/init.S Normal file
View File

@@ -0,0 +1,164 @@
/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*
* Cortex-M0 CPU initialization
*/
#include "config.h"
.section .text.vecttable
.macro vector name
.long \name\()_handler
.weak \name\()_handler
.set \name\()_handler, default_handler
.endm
.macro vector_irq number
.if \number < CONFIG_IRQ_COUNT
vector irq_\()\number
.endif
.endm
/* Exceptions vector */
vectors:
.long stack_end @ initial stack pointer
.long reset @ reset handler
vector nmi @ NMI handler
vector hard_fault @ HardFault handler
vector mpu_fault @ MPU fault handler
vector bus_fault @ Bus fault handler
vector usage_fault @ Usage fault handler
.long 0 @ reserved
.long 0 @ reserved
.long 0 @ reserved
.long 0 @ reserved
vector svc @ SWI
vector debug @ Debug handler
.long 0 @ reserved
vector pendsv @ PendSV handler
vector sys_tick @ SysTick handler
vector_irq 0 @ IRQ 0 handler
vector_irq 1 @ IRQ 1 handler
vector_irq 2 @ IRQ 2 handler
vector_irq 3 @ IRQ 3 handler
vector_irq 4 @ IRQ 4 handler
vector_irq 5 @ IRQ 5 handler
vector_irq 6 @ IRQ 6 handler
vector_irq 7 @ IRQ 7 handler
vector_irq 8 @ IRQ 8 handler
vector_irq 9 @ IRQ 9 handler
vector_irq 10 @ IRQ 10 handler
vector_irq 11 @ IRQ 11 handler
vector_irq 12 @ IRQ 12 handler
vector_irq 13 @ IRQ 13 handler
vector_irq 14 @ IRQ 14 handler
vector_irq 15 @ IRQ 15 handler
vector_irq 16 @ IRQ 16 handler
vector_irq 17 @ IRQ 17 handler
vector_irq 18 @ IRQ 18 handler
vector_irq 19 @ IRQ 19 handler
vector_irq 20 @ IRQ 20 handler
vector_irq 21 @ IRQ 21 handler
vector_irq 22 @ IRQ 22 handler
vector_irq 23 @ IRQ 23 handler
vector_irq 24 @ IRQ 24 handler
vector_irq 25 @ IRQ 25 handler
vector_irq 26 @ IRQ 26 handler
vector_irq 27 @ IRQ 27 handler
vector_irq 28 @ IRQ 28 handler
vector_irq 29 @ IRQ 29 handler
vector_irq 30 @ IRQ 30 handler
vector_irq 31 @ IRQ 31 handler
.text
.syntax unified
.code 16
.global reset
.thumb_func
reset:
/*
* Ensure we're in privileged mode with main stack. Necessary if
* we've jumped directly here from another image after task_start().
*/
movs r0, #0 @ priv. mode / main stack / no floating point
msr control, r0
isb @ ensure the write is done
/* Set the vector table on our current code */
ldr r1, =vectors
ldr r2, =0xE000ED08 /* VTABLE register in SCB*/
str r1, [r2]
/* Clear BSS */
movs r0, #0
ldr r1,_bss_start
ldr r2,_bss_end
bss_loop:
str r0, [r1]
adds r1, #4
cmp r1, r2
blt bss_loop
#ifndef COMPILE_FOR_RAM
/* Copy initialized data to Internal RAM */
ldr r0,_ro_end
ldr r1,_data_start
ldr r2,_data_end
data_loop:
ldr r3, [r0]
adds r0, #4
str r3, [r1]
adds r1, #4
cmp r1, r2
blt data_loop
#endif
/*
* Set stack pointer. Already done by Cortex-M hardware, but re-doing
* this here allows software to jump directly to the reset vector.
*/
ldr r0, =stack_end
mov sp, r0
/* Jump to C code */
bl main
/* That should not return. If it does, loop forever. */
fini_loop:
b fini_loop
/* Default exception handler */
.thumb_func
default_handler:
b exception_panic
_bss_start:
.long __bss_start
_bss_end:
.long __bss_end
_data_start:
.long __data_start
_data_end:
.long __data_end
_ro_end:
.long __ro_end
/* Dummy functions to avoid linker complaints */
.global __aeabi_unwind_cpp_pr0
.global __aeabi_unwind_cpp_pr1
.global __aeabi_unwind_cpp_pr2
__aeabi_unwind_cpp_pr0:
__aeabi_unwind_cpp_pr1:
__aeabi_unwind_cpp_pr2:
bx lr
/* Reserve space for system stack */
.section .bss.system_stack
stack_start:
.space CONFIG_STACK_SIZE, 0
stack_end:
.global stack_end

View File

@@ -0,0 +1,65 @@
/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/* Helper to declare IRQ handling routines */
#ifndef __IRQ_HANDLER_H
#define __IRQ_HANDLER_H
#ifdef CONFIG_TASK_PROFILING
#define bl_task_start_irq_handler "bl task_start_irq_handler\n"
#else
#define bl_task_start_irq_handler ""
#endif
/* Helper macros to build the IRQ handler and priority struct names */
#define IRQ_HANDLER(irqname) CONCAT3(irq_, irqname, _handler)
#define IRQ_PRIORITY(irqname) CONCAT2(prio_, irqname)
/* re-scheduling flag */
extern int need_resched_or_profiling;
/*
* Macro to connect the interrupt handler "routine" to the irq number "irq" and
* ensure it is enabled in the interrupt controller with the right priority.
*/
#define DECLARE_IRQ(irq, routine, priority) \
void IRQ_HANDLER(irq)(void) __attribute__((naked)); \
void IRQ_HANDLER(irq)(void) \
{ \
asm volatile("mov r0, lr\n" \
/* Must push registers in pairs to keep 64-bit aligned*/\
/* stack for ARM EABI. */ \
"push {r0, %0}\n" \
bl_task_start_irq_handler \
"bl "#routine"\n" \
"pop {r2, r3}\n" \
/* read need_resched_or_profiling result after IRQ */ \
"ldr r0, [r3]\n" \
"mov r1, #8\n" \
"cmp r0, #0\n" \
/* if we need to go through the re-scheduling, go on */ \
"bne 2f\n" \
/* else return from exception */ \
"1: bx r2\n" \
/* check if that's a nested exception */ \
"2: tst r1, r2\n" \
/* if yes return immediatly */ \
"beq 1b\n" \
"push {r0, r2}\n" \
"mov r0, #0\n" \
"mov r1, #0\n" \
/* ensure we have priority 0 during re-scheduling */ \
"cpsid i\n isb\n" \
/* re-schedule the highest priority task */ \
"bl svc_handler\n" \
/* return from exception */ \
"pop {r0,pc}\n" \
: : "r"(&need_resched_or_profiling)); \
} \
const struct irq_priority IRQ_PRIORITY(irq) \
__attribute__((section(".rodata.irqprio"))) \
= {irq, priority}
#endif /* __IRQ_HANDLER_H */

178
core/cortex-m0/panic.c Normal file
View File

@@ -0,0 +1,178 @@
/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "common.h"
#include "console.h"
#include "cpu.h"
#include "host_command.h"
#include "panic.h"
#include "printf.h"
#include "system.h"
#include "task.h"
#include "timer.h"
#include "uart.h"
#include "util.h"
#include "watchdog.h"
/* Whether bus fault is ignored */
static int bus_fault_ignored;
/* Panic data goes at the end of RAM. */
static struct panic_data * const pdata_ptr = PANIC_DATA_PTR;
/* Preceded by stack, rounded down to nearest 64-bit-aligned boundary */
static const uint32_t pstack_addr = (CONFIG_RAM_BASE + CONFIG_RAM_SIZE
- sizeof(struct panic_data)) & ~7;
/**
* Print the name and value of a register
*
* This is a convenient helper function for displaying a register value.
* It shows the register name in a 3 character field, followed by a colon.
* The register value is regs[index], and this is shown in hex. If regs is
* NULL, then we display spaces instead.
*
* After displaying the value, either a space or \n is displayed depending
* on the register number, so that (assuming the caller passes all 16
* registers in sequence) we put 4 values per line like this
*
* r0 :0000000b r1 :00000047 r2 :60000000 r3 :200012b5
* r4 :00000000 r5 :08004e64 r6 :08004e1c r7 :200012a8
* r8 :08004e64 r9 :00000002 r10:00000000 r11:00000000
* r12:0000003f sp :200009a0 lr :0800270d pc :0800351a
*
* @param regnum Register number to display (0-15)
* @param regs Pointer to array holding the registers, or NULL
* @param index Index into array where the register value is present
*/
static void print_reg(int regnum, const uint32_t *regs, int index)
{
static const char regname[] = "r10r11r12sp lr pc ";
static char rname[3] = "r ";
const char *name;
rname[1] = '0' + regnum;
name = regnum < 10 ? rname : &regname[(regnum - 10) * 3];
panic_printf("%c%c%c:", name[0], name[1], name[2]);
if (regs)
panic_printf("%08x", regs[index]);
else
panic_puts(" ");
panic_puts((regnum & 3) == 3 ? "\n" : " ");
}
/*
* Returns non-zero if the exception frame was created on the main stack, or
* zero if it's on the process stack.
*
* See B1.5.8 "Exception return behavior" of ARM DDI 0403D for details.
*/
static int32_t is_frame_in_handler_stack(const uint32_t exc_return)
{
return (exc_return & 0xf) == 1 || (exc_return & 0xf) == 9;
}
/*
* Print panic data
*/
void panic_data_print(const struct panic_data *pdata)
{
const uint32_t *lregs = pdata->cm.regs;
const uint32_t *sregs = NULL;
const int32_t in_handler =
is_frame_in_handler_stack(pdata->cm.regs[11]);
int i;
if (pdata->flags & PANIC_DATA_FLAG_FRAME_VALID)
sregs = pdata->cm.frame;
panic_printf("\n=== %s EXCEPTION: %02x ====== xPSR: %08x ===\n",
in_handler ? "HANDLER" : "PROCESS",
lregs[1] & 0xff, sregs ? sregs[7] : -1);
for (i = 0; i < 4; i++)
print_reg(i, sregs, i);
for (i = 4; i < 10; i++)
print_reg(i, lregs, i - 1);
print_reg(10, lregs, 9);
print_reg(11, lregs, 10);
print_reg(12, sregs, 4);
print_reg(13, lregs, in_handler ? 2 : 0);
print_reg(14, sregs, 5);
print_reg(15, sregs, 6);
}
void report_panic(void)
{
struct panic_data *pdata = pdata_ptr;
uint32_t sp;
pdata->magic = PANIC_DATA_MAGIC;
pdata->struct_size = sizeof(*pdata);
pdata->struct_version = 2;
pdata->arch = PANIC_ARCH_CORTEX_M;
pdata->flags = 0;
pdata->reserved = 0;
/* Choose the right sp (psp or msp) based on EXC_RETURN value */
sp = is_frame_in_handler_stack(pdata->cm.regs[11])
? pdata->cm.regs[2] : pdata->cm.regs[0];
/* If stack is valid, copy exception frame to pdata */
if ((sp & 3) == 0 &&
sp >= CONFIG_RAM_BASE &&
sp <= CONFIG_RAM_BASE + CONFIG_RAM_SIZE - 8 * sizeof(uint32_t)) {
const uint32_t *sregs = (const uint32_t *)sp;
int i;
for (i = 0; i < 8; i++)
pdata->cm.frame[i] = sregs[i];
pdata->flags |= PANIC_DATA_FLAG_FRAME_VALID;
}
panic_data_print(pdata);
panic_reboot();
}
/**
* Default exception handler, which reports a panic.
*
* Declare this as a naked call so we can extract raw LR and IPSR values.
*/
void exception_panic(void) __attribute__((naked));
void exception_panic(void)
{
/* Save registers and branch directly to panic handler */
asm volatile(
"mov r0, %[pregs]\n"
"mrs r1, psp\n"
"mrs r2, ipsr\n"
"mov r3, sp\n"
"stmia r0!, {r1-r7}\n"
"mov r1, r8\n"
"mov r2, r9\n"
"mov r3, r10\n"
"mov r4, r11\n"
"mov r5, lr\n"
"stmia r0!, {r1-r5}\n"
"mov sp, %[pstack]\n"
"b report_panic\n" : :
[pregs] "r" (pdata_ptr->cm.regs),
[pstack] "r" (pstack_addr) :
/* Constraints protecting these from being clobbered.
* Gcc should be using r0 & r12 for pregs and pstack. */
"r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
"r10", "r11", "cc", "memory"
);
}
void bus_fault_handler(void)
{
if (!bus_fault_ignored)
exception_panic();
}
void ignore_bus_fault(int ignored)
{
bus_fault_ignored = ignored;
}

81
core/cortex-m0/switch.S Normal file
View File

@@ -0,0 +1,81 @@
/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*
* Context swtching
*/
#include "config.h"
.text
.syntax unified
.code 16
/**
* Task context switching
*
* Change the task scheduled after returning from the exception.
*
* Save the registers of the current task below the exception context on
* its task, then restore the live registers of the next task and set the
* process stack pointer to the new stack.
*
* r0: pointer to the task to switch from
* r1: pointer to the task to switch to
*
* must be called from interrupt context
*
* the structure of the saved context on the stack is :
* r8, r9, r10, r11, r4, r5, r6, r7, r0, r1, r2, r3, r12, lr, pc, psr
* additional registers <|> exception frame
*/
.global __switchto
.thumb_func
__switchto:
mrs r2, psp @ get the task stack where the context has been saved
mov r3, sp
mov sp, r2
push {r4-r7} @ save additional r4-r7 in the task stack
mov r4, r8
mov r5, r9
mov r6, r10
mov r7, r11
push {r4-r7} @ save additional r8-r11 in the task stack
mov r2, sp @ prepare to save former task stack pointer
mov sp, r3 @ restore system stack pointer
str r2, [r0] @ save the task stack pointer in its context
ldr r2, [r1] @ get the new scheduled task stack pointer
ldmia r2!, {r4-r7} @ restore r8-r11 for the next task context
mov r8, r4
mov r9, r5
mov r10, r6
mov r11, r7
ldmia r2!, {r4-r7} @ restore r4-r7 for the next task context
msr psp, r2 @ set the process stack pointer to exception context
bx lr @ return from exception
/**
* Start the task scheduling. r0 is a pointer to task_stack_ready, which is
* set to 1 after the task stack is set up.
*/
.global __task_start
.thumb_func
__task_start:
ldr r2,=scratchpad @ area used as dummy thread stack for the first switch
movs r3, #2 @ use : priv. mode / thread stack / no floating point
adds r2, #17*4 @ put the pointer at the top of the stack
movs r1, #0 @ __Schedule parameter : re-schedule nothing
msr psp, r2 @ setup a thread stack up to the first context switch
movs r2, #1
isb @ ensure the write is done
msr control, r3
movs r3, r0
movs r0, #0 @ __Schedule parameter : de-schedule nothing
isb @ ensure the write is done
str r2, [r3] @ Task scheduling is now active
bl __schedule @ execute the task with the highest priority
/* we should never return here */
movs r0, #1 @ set to EC_ERROR_UNKNOWN
bx lr

629
core/cortex-m0/task.c Normal file
View File

@@ -0,0 +1,629 @@
/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/* Task scheduling / events module for Chrome EC operating system */
#include "atomic.h"
#include "common.h"
#include "console.h"
#include "cpu.h"
#include "link_defs.h"
#include "task.h"
#include "timer.h"
#include "uart.h"
#include "util.h"
typedef union {
struct {
/*
* Note that sp must be the first element in the task struct
* for __switchto() to work.
*/
uint32_t sp; /* Saved stack pointer for context switch */
uint32_t events; /* Bitmaps of received events */
uint64_t runtime; /* Time spent in task */
uint32_t *stack; /* Start of stack */
};
} task_;
/* Value to store in unused stack */
#define STACK_UNUSED_VALUE 0xdeadd00d
/* declare task routine prototypes */
#define TASK(n, r, d, s) int r(void *);
void __idle(void);
CONFIG_TASK_LIST
CONFIG_TEST_TASK_LIST
#undef TASK
/* Task names for easier debugging */
#define TASK(n, r, d, s) #n,
static const char * const task_names[] = {
"<< idle >>",
CONFIG_TASK_LIST
CONFIG_TEST_TASK_LIST
};
#undef TASK
#ifdef CONFIG_TASK_PROFILING
static uint64_t task_start_time; /* Time task scheduling started */
static uint64_t exc_start_time; /* Time of task->exception transition */
static uint64_t exc_end_time; /* Time of exception->task transition */
static uint64_t exc_total_time; /* Total time in exceptions */
static uint32_t svc_calls; /* Number of service calls */
static uint32_t task_switches; /* Number of times active task changed */
static uint32_t irq_dist[CONFIG_IRQ_COUNT]; /* Distribution of IRQ calls */
#endif
extern void __switchto(task_ *from, task_ *to);
extern int __task_start(int *task_stack_ready);
#ifndef CONFIG_LOW_POWER_IDLE
/* Idle task. Executed when no tasks are ready to be scheduled. */
void __idle(void)
{
while (1) {
/*
* Wait for the next irq event. This stops the CPU clock
* (sleep / deep sleep, depending on chip config).
*/
asm("wfi");
}
}
#endif /* !CONFIG_LOW_POWER_IDLE */
static void task_exit_trap(void)
{
int i = task_get_current();
cprintf(CC_TASK, "[%T Task %d (%s) exited!]\n", i, task_names[i]);
/* Exited tasks simply sleep forever */
while (1)
task_wait_event(-1);
}
/* Startup parameters for all tasks. */
#define TASK(n, r, d, s) { \
.r0 = (uint32_t)d, \
.pc = (uint32_t)r, \
.stack_size = s, \
},
static const struct {
uint32_t r0;
uint32_t pc;
uint16_t stack_size;
} const tasks_init[] = {
TASK(IDLE, __idle, 0, IDLE_TASK_STACK_SIZE)
CONFIG_TASK_LIST
CONFIG_TEST_TASK_LIST
};
#undef TASK
/* Contexts for all tasks */
static task_ tasks[TASK_ID_COUNT];
/* Sanity checks about static task invariants */
BUILD_ASSERT(TASK_ID_COUNT <= sizeof(unsigned) * 8);
BUILD_ASSERT(TASK_ID_COUNT < (1 << (sizeof(task_id_t) * 8)));
/* Stacks for all tasks */
#define TASK(n, r, d, s) + s
uint8_t task_stacks[0
TASK(IDLE, __idle, 0, IDLE_TASK_STACK_SIZE)
CONFIG_TASK_LIST
CONFIG_TEST_TASK_LIST
] __aligned(8);
#undef TASK
/* Reserve space to discard context on first context switch. */
uint32_t scratchpad[17];
static task_ *current_task = (task_ *)scratchpad;
/*
* Should IRQs chain to svc_handler()? This should be set if either of the
* following is true:
*
* 1) Task scheduling has started, and task profiling is enabled. Task
* profiling does its tracking in svc_handler().
*
* 2) An event was set by an interrupt; this could result in a higher-priority
* task unblocking. After checking for a task switch, svc_handler() will clear
* the flag (unless profiling is also enabled; then the flag remains set).
*/
int need_resched_or_profiling;
/*
* Bitmap of all tasks ready to be run.
*
* Currently all tasks are enabled at startup.
*/
static uint32_t tasks_ready = (1<<TASK_ID_COUNT) - 1;
static int start_called; /* Has task swapping started */
static inline task_ *__task_id_to_ptr(task_id_t id)
{
return tasks + id;
}
void interrupt_disable(void)
{
asm("cpsid i");
}
void interrupt_enable(void)
{
asm("cpsie i");
}
inline int in_interrupt_context(void)
{
int ret;
asm("mrs %0, ipsr\n" /* read exception number */
"lsl %0, #23\n" : "=r"(ret)); /* exception bits are the 9 LSB */
return ret;
}
inline int get_interrupt_context(void)
{
int ret;
asm("mrs %0, ipsr\n" : "=r"(ret)); /* read exception number */
return ret & 0x1ff; /* exception bits are the 9 LSB */
}
task_id_t task_get_current(void)
{
return current_task - tasks;
}
uint32_t *task_get_event_bitmap(task_id_t tskid)
{
task_ *tsk = __task_id_to_ptr(tskid);
return &tsk->events;
}
int task_start_called(void)
{
return start_called;
}
/**
* Scheduling system call
*/
task_ *__svc_handler(int desched, task_id_t resched)
{
task_ *current, *next;
#ifdef CONFIG_TASK_PROFILING
int exc = get_interrupt_context();
uint64_t t;
#endif
/* Priority is already at 0 we cannot be interrupted */
#ifdef CONFIG_TASK_PROFILING
/*
* SVCall isn't triggered via DECLARE_IRQ(), so it needs to track its
* start time explicitly.
*/
if (exc == 0xb) {
exc_start_time = get_time().val;
svc_calls++;
}
#endif
current = current_task;
#ifdef CONFIG_DEBUG_STACK_OVERFLOW
if (*current->stack != STACK_UNUSED_VALUE) {
panic_printf("\n\nStack overflow in %s task!\n",
task_names[current - tasks]);
panic_reboot();
}
#endif
if (desched && !current->events) {
/*
* Remove our own ready bit (current - tasks is same as
* task_get_current())
*/
tasks_ready &= ~(1 << (current - tasks));
}
tasks_ready |= 1 << resched;
ASSERT(tasks_ready);
next = __task_id_to_ptr(31 - __builtin_clz(tasks_ready));
#ifdef CONFIG_TASK_PROFILING
/* Track time in interrupts */
t = get_time().val;
exc_total_time += (t - exc_start_time);
/*
* Bill the current task for time between the end of the last interrupt
* and the start of this one.
*/
current->runtime += (exc_start_time - exc_end_time);
exc_end_time = t;
#else
/*
* Don't chain here from interrupts until the next time an interrupt
* sets an event.
*/
need_resched_or_profiling = 0;
#endif
/* Switch to new task */
#ifdef CONFIG_TASK_PROFILING
if (next != current)
task_switches++;
#endif
current_task = next;
return current;
}
void svc_handler(int desched, task_id_t resched)
{
/*
* The layout of the this routine (and the __svc_handler companion one)
* ensures that we are getting the right tail call optimization from
* the compiler.
*/
task_ *prev = __svc_handler(desched, resched);
if (current_task != prev)
__switchto(prev, current_task);
}
void __schedule(int desched, int resched)
{
register int p0 asm("r0") = desched;
register int p1 asm("r1") = resched;
asm("svc 0" : : "r"(p0), "r"(p1));
}
#ifdef CONFIG_TASK_PROFILING
void task_start_irq_handler(void *excep_return)
{
/*
* Get time before checking depth, in case this handler is
* pre-empted.
*/
uint64_t t = get_time().val;
int irq = get_interrupt_context() - 16;
/*
* Track IRQ distribution. No need for atomic add, because an IRQ
* can't pre-empt itself.
*/
if (irq < ARRAY_SIZE(irq_dist))
irq_dist[irq]++;
/*
* Continue iff a rescheduling event happened or profiling is active,
* and we are not called from another exception (this must match the
* logic for when we chain to svc_handler() below).
*/
if (!need_resched_or_profiling || (((uint32_t)excep_return & 0xf) == 1))
return;
exc_start_time = t;
}
#endif
static uint32_t __wait_evt(int timeout_us, task_id_t resched)
{
task_ *tsk = current_task;
task_id_t me = tsk - tasks;
uint32_t evt;
int ret;
ASSERT(!in_interrupt_context());
if (timeout_us > 0) {
timestamp_t deadline = get_time();
deadline.val += timeout_us;
ret = timer_arm(deadline, me);
ASSERT(ret == EC_SUCCESS);
}
while (!(evt = atomic_read_clear(&tsk->events))) {
/* Remove ourself and get the next task in the scheduler */
__schedule(1, resched);
resched = TASK_ID_IDLE;
}
if (timeout_us > 0)
timer_cancel(me);
return evt;
}
uint32_t task_set_event(task_id_t tskid, uint32_t event, int wait)
{
task_ *receiver = __task_id_to_ptr(tskid);
ASSERT(receiver);
/* Set the event bit in the receiver message bitmap */
atomic_or(&receiver->events, event);
/* Re-schedule if priorities have changed */
if (in_interrupt_context()) {
/* The receiver might run again */
atomic_or(&tasks_ready, 1 << tskid);
#ifndef CONFIG_TASK_PROFILING
need_resched_or_profiling = 1;
#endif
} else {
if (wait)
return __wait_evt(-1, tskid);
else
__schedule(0, tskid);
}
return 0;
}
uint32_t task_wait_event(int timeout_us)
{
return __wait_evt(timeout_us, TASK_ID_IDLE);
}
void task_enable_irq(int irq)
{
CPU_NVIC_EN(0) = 1 << irq;
}
void task_disable_irq(int irq)
{
CPU_NVIC_DIS(0) = 1 << irq;
}
void task_clear_pending_irq(int irq)
{
CPU_NVIC_UNPEND(0) = 1 << irq;
}
void task_trigger_irq(int irq)
{
CPU_NVIC_ISPR(0) = 1 << irq;
}
/*
* Initialize IRQs in the NVIC and set their priorities as defined by the
* DECLARE_IRQ statements.
*/
static void __nvic_init_irqs(void)
{
/* Get the IRQ priorities section from the linker */
int exc_calls = __irqprio_end - __irqprio;
int i;
/* Mask and clear all pending interrupts */
CPU_NVIC_DIS(0) = 0xffffffff;
CPU_NVIC_UNPEND(0) = 0xffffffff;
/*
* Re-enable global interrupts in case they're disabled. On a reboot,
* they're already enabled; if we've jumped here from another image,
* they're not.
*/
interrupt_enable();
/* Set priorities */
for (i = 0; i < exc_calls; i++) {
uint8_t irq = __irqprio[i].irq;
uint8_t prio = __irqprio[i].priority;
uint32_t prio_shift = irq % 4 * 8 + 6;
CPU_NVIC_PRI(irq / 4) =
(CPU_NVIC_PRI(irq / 4) &
~(0x7 << prio_shift)) |
(prio << prio_shift);
}
}
void mutex_lock(struct mutex *mtx)
{
uint32_t id = 1 << task_get_current();
ASSERT(id != TASK_ID_INVALID);
atomic_or(&mtx->waiters, id);
while (1) {
/* Try to get the lock (set 2 into the lock field) */
__asm__ __volatile__("cpsid i");
if (mtx->lock == 0)
break;
__asm__ __volatile__("cpsie i");
task_wait_event(0); /* Contention on the mutex */
}
mtx->lock = 2;
__asm__ __volatile__("cpsie i");
atomic_clear(&mtx->waiters, id);
}
void mutex_unlock(struct mutex *mtx)
{
uint32_t waiters;
task_ *tsk = current_task;
__asm__ __volatile__(" ldr %0, [%2]\n"
" str %3, [%1]\n"
: "=&r" (waiters)
: "r" (&mtx->lock), "r" (&mtx->waiters), "r" (0)
: "cc");
while (waiters) {
task_id_t id = 31 - __builtin_clz(waiters);
/* Somebody is waiting on the mutex */
task_set_event(id, TASK_EVENT_MUTEX, 0);
waiters &= ~(1 << id);
}
/* Ensure no event is remaining from mutex wake-up */
atomic_clear(&tsk->events, TASK_EVENT_MUTEX);
}
void task_print_list(void)
{
int i;
ccputs("Task Ready Name Events Time (s) StkUsed\n");
for (i = 0; i < TASK_ID_COUNT; i++) {
char is_ready = (tasks_ready & (1<<i)) ? 'R' : ' ';
uint32_t *sp;
int stackused = tasks_init[i].stack_size;
for (sp = tasks[i].stack;
sp < (uint32_t *)tasks[i].sp && *sp == STACK_UNUSED_VALUE;
sp++)
stackused -= sizeof(uint32_t);
ccprintf("%4d %c %-16s %08x %11.6ld %3d/%3d\n", i, is_ready,
task_names[i], tasks[i].events, tasks[i].runtime,
stackused, tasks_init[i].stack_size);
cflush();
}
}
int command_task_info(int argc, char **argv)
{
#ifdef CONFIG_TASK_PROFILING
int total = 0;
int i;
#endif
task_print_list();
#ifdef CONFIG_TASK_PROFILING
ccputs("IRQ counts by type:\n");
cflush();
for (i = 0; i < ARRAY_SIZE(irq_dist); i++) {
if (irq_dist[i]) {
ccprintf("%4d %8d\n", i, irq_dist[i]);
total += irq_dist[i];
}
}
ccprintf("Service calls: %11d\n", svc_calls);
ccprintf("Total exceptions: %11d\n", total + svc_calls);
ccprintf("Task switches: %11d\n", task_switches);
ccprintf("Task switching started: %11.6ld s\n", task_start_time);
ccprintf("Time in tasks: %11.6ld s\n",
get_time().val - task_start_time);
ccprintf("Time in exceptions: %11.6ld s\n", exc_total_time);
#endif
return EC_SUCCESS;
}
DECLARE_CONSOLE_COMMAND(taskinfo, command_task_info,
NULL,
"Print task info",
NULL);
static int command_task_ready(int argc, char **argv)
{
if (argc < 2) {
ccprintf("tasks_ready: 0x%08x\n", tasks_ready);
} else {
tasks_ready = strtoi(argv[1], NULL, 16);
ccprintf("Setting tasks_ready to 0x%08x\n", tasks_ready);
__schedule(0, 0);
}
return EC_SUCCESS;
}
DECLARE_CONSOLE_COMMAND(taskready, command_task_ready,
"[setmask]",
"Print/set ready tasks",
NULL);
#ifdef CONFIG_CMD_STACKOVERFLOW
static void stack_overflow_recurse(int n)
{
ccprintf("+%d", n);
/*
* Force task context switch, since that's where we do stack overflow
* checking.
*/
msleep(10);
stack_overflow_recurse(n+1);
/*
* Do work after the recursion, or else the compiler uses tail-chaining
* and we don't actually consume additional stack.
*/
ccprintf("-%d", n);
}
static int command_stackoverflow(int argc, char **argv)
{
ccprintf("Recursing 0,");
stack_overflow_recurse(1);
return EC_SUCCESS;
}
DECLARE_CONSOLE_COMMAND(stackoverflow, command_stackoverflow,
NULL,
"Recurse until stack overflow",
NULL);
#endif /* CONFIG_CMD_STACKOVERFLOW */
void task_pre_init(void)
{
uint32_t *stack_next = (uint32_t *)task_stacks;
int i;
/* Fill the task memory with initial values */
for (i = 0; i < TASK_ID_COUNT; i++) {
uint32_t *sp;
/* Stack size in words */
uint32_t ssize = tasks_init[i].stack_size / 4;
tasks[i].stack = stack_next;
/*
* Update stack used by first frame: 8 words for the normal
* stack, plus 8 for R4-R11. With FP enabled, we need another
* 18 words for S0-S15 and FPCSR and to align to 64-bit.
*/
sp = stack_next + ssize - 16;
tasks[i].sp = (uint32_t)sp;
/* Initial context on stack (see __switchto()) */
sp[8] = tasks_init[i].r0; /* r0 */
sp[13] = (uint32_t)task_exit_trap; /* lr */
sp[14] = tasks_init[i].pc; /* pc */
sp[15] = 0x01000000; /* psr */
/* Fill unused stack; also used to detect stack overflow. */
for (sp = stack_next; sp < (uint32_t *)tasks[i].sp; sp++)
*sp = STACK_UNUSED_VALUE;
stack_next += ssize;
}
/*
* Fill in guard value in scratchpad to prevent stack overflow
* detection failure on the first context switch. This works because
* the first word in the scratchpad is where the switcher will store
* sp, so it's ok to blow away.
*/
((task_ *)scratchpad)->stack = (uint32_t *)scratchpad;
*(uint32_t *)scratchpad = STACK_UNUSED_VALUE;
/* Initialize IRQs */
__nvic_init_irqs();
}
int task_start(void)
{
#ifdef CONFIG_TASK_PROFILING
task_start_time = exc_end_time = get_time().val;
#endif
start_called = 1;
return __task_start(&need_resched_or_profiling);
}

View File

@@ -0,0 +1,35 @@
/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*
* Thumb mode toolchain helpers for compact switch/case statement.
*/
#include "config.h"
.text
.syntax unified
.code 16
/*
* Helper for compact switch
*
* r0: the table index
* lr: the table base address
*
* r0 and lr must be PRESERVED.
* r12 can be clobbered.
*/
.global __gnu_thumb1_case_uqi
.thumb_func
__gnu_thumb1_case_uqi:
push {r1}
mov r1, lr
lsrs r1, r1, #1
lsls r1, r1, #1
ldrb r1, [r1, r0]
lsls r1, r1, #1
add lr, lr, r1
pop {r1}
bx lr

40
core/cortex-m0/watchdog.c Normal file
View File

@@ -0,0 +1,40 @@
/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/* Watchdog common code */
#include "common.h"
#include "panic.h"
#include "task.h"
#include "timer.h"
#include "uart.h"
#include "watchdog.h"
void watchdog_trace(uint32_t excep_lr, uint32_t excep_sp)
{
uint32_t psp;
uint32_t *stack;
asm("mrs %0, psp" : "=r"(psp));
if ((excep_lr & 0xf) == 1) {
/* we were already in exception context */
stack = (uint32_t *)excep_sp;
} else {
/* we were in task context */
stack = (uint32_t *)psp;
}
panic_printf("### WATCHDOG PC=%08x / LR=%08x / pSP=%08x ",
stack[6], stack[5], psp);
if ((excep_lr & 0xf) == 1)
panic_puts("(exc) ###\n");
else
panic_printf("(task %d) ###\n", task_get_current());
/* If we are blocked in a high priority IT handler, the following debug
* messages might not appear but they are useless in that situation. */
timer_print_info();
task_print_list();
}