1
0
mirror of https://github.com/trezor/trezor-firmware.git synced 2025-06-26 09:52:34 +00:00

feat(core): add pc to systask_postmortem_info

[no changelog]
This commit is contained in:
cepetr 2025-06-06 11:34:46 +02:00 committed by cepetr
parent ce3eae95a4
commit 09a4b2dca0
2 changed files with 122 additions and 52 deletions

View File

@ -50,6 +50,8 @@ typedef struct {
// Address associated with the SecureFault // Address associated with the SecureFault
uint32_t sfar; uint32_t sfar;
#endif #endif
// PC (return address) at the time of the fault
uint32_t pc;
// Stack pointer at the time of the fault // Stack pointer at the time of the fault
// (MSP or PSP depending on the privilege level) // (MSP or PSP depending on the privilege level)
uint32_t sp; uint32_t sp;
@ -133,6 +135,11 @@ typedef struct {
// Applet bound to the task // Applet bound to the task
void* applet; void* applet;
// Original stack base
uint32_t stack_base;
// Original stack end
uint32_t stack_end;
// Set if the task is processing the kernel callback // Set if the task is processing the kernel callback
bool in_callback; bool in_callback;
@ -155,7 +162,7 @@ void systask_yield_to(systask_t* task);
// Initializes a task with the given stack pointer, stack size // Initializes a task with the given stack pointer, stack size
// //
// The task must be not be running when the function is called // The task must be not be running when the function is called
bool systask_init(systask_t* task, uint32_t stack_ptr, uint32_t stack_size, bool systask_init(systask_t* task, uint32_t stack_base, uint32_t stack_size,
void* context); void* context);
// Returns true if the task is alive (not terminated, killed or crashed) // Returns true if the task is alive (not terminated, killed or crashed)

View File

@ -70,6 +70,8 @@ static systask_scheduler_t g_systask_scheduler = {
.kernel_task = { .kernel_task = {
.sp_lim = (uint32_t)&_stack_section_start, .sp_lim = (uint32_t)&_stack_section_start,
.id = 0, // Kernel task ID == 0 .id = 0, // Kernel task ID == 0
.stack_base = (uint32_t)&_stack_section_start,
.stack_end = (uint32_t)&_stack_section_end,
}}; }};
void systask_scheduler_init(systask_error_handler_t error_handler) { void systask_scheduler_init(systask_error_handler_t error_handler) {
@ -83,6 +85,8 @@ void systask_scheduler_init(systask_error_handler_t error_handler) {
scheduler->task_id_map = 0x00000001; // Kernel task is always present scheduler->task_id_map = 0x00000001; // Kernel task is always present
scheduler->kernel_task.sp_lim = (uint32_t)&_stack_section_start; scheduler->kernel_task.sp_lim = (uint32_t)&_stack_section_start;
scheduler->kernel_task.stack_base = (uint32_t)&_stack_section_start;
scheduler->kernel_task.stack_end = (uint32_t)&_stack_section_end;
// SVCall priority should be the lowest since it is // SVCall priority should be the lowest since it is
// generally a blocking operation // generally a blocking operation
@ -146,7 +150,7 @@ static systask_id_t systask_get_unused_id(void) {
return id; return id;
} }
bool systask_init(systask_t* task, uint32_t stack_ptr, uint32_t stack_size, bool systask_init(systask_t* task, uint32_t stack_base, uint32_t stack_size,
void* applet) { void* applet) {
systask_id_t id = systask_get_unused_id(); systask_id_t id = systask_get_unused_id();
if (id >= SYSTASK_MAX_TASKS) { if (id >= SYSTASK_MAX_TASKS) {
@ -154,8 +158,8 @@ bool systask_init(systask_t* task, uint32_t stack_ptr, uint32_t stack_size,
} }
memset(task, 0, sizeof(systask_t)); memset(task, 0, sizeof(systask_t));
task->sp = stack_ptr + stack_size; task->sp = stack_base + stack_size;
task->sp_lim = stack_size > 1024 ? stack_ptr + 256 : stack_ptr; task->sp_lim = stack_size > 1024 ? stack_base + 256 : stack_base;
#if !defined(__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE == 3U) #if !defined(__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE == 3U)
task->exc_return = 0xFFFFFFED; // Secure Thread mode, use PSP, pop FP context task->exc_return = 0xFFFFFFED; // Secure Thread mode, use PSP, pop FP context
#else #else
@ -163,6 +167,8 @@ bool systask_init(systask_t* task, uint32_t stack_ptr, uint32_t stack_size,
#endif #endif
task->id = id; task->id = id;
task->mpu_mode = MPU_MODE_APP; task->mpu_mode = MPU_MODE_APP;
task->stack_base = stack_base;
task->stack_end = stack_base + stack_size;
task->applet = applet; task->applet = applet;
// Notify all event sources about the task creation // Notify all event sources about the task creation
@ -411,11 +417,68 @@ void systask_exit_fatal(systask_t* task, const char* message,
systask_kill(task); systask_kill(task);
} }
static uint32_t get_return_addr(bool secure, bool privileged, uint32_t sp) {
// Ensure the stack pointer is aligned to 8 bytes (required for a
// valid exception frame). If it isnt aligned, we cant reliably index
// into the stacked registers.
if (!IS_ALIGNED(sp, 8)) {
return 0;
}
// Get the pointer to thte return address in the stack frame.
uint32_t* ret_addr = &((uint32_t*)sp)[STK_FRAME_RET_ADDR];
// Verify that ret_addr is in a readable region for
// the context that caused the exception.
#if defined(__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U)
// In Secure-Monitor mode, use CMSE intrinsics to check:
// - CMSE_MPU_READ indicates we only need read access
// - CMSE_MPU_UNPRIV if the fault originated from an unprivileged context
// - CMSE_NONSECURE if the fault originated from Non-Secure state
uint32_t flags = CMSE_MPU_READ;
if (!privileged) {
flags |= CMSE_MPU_UNPRIV;
}
if (!secure) {
flags |= CMSE_NONSECURE;
}
if (!cmse_check_address_range(ret_addr, sizeof(uint32_t), flags)) {
return 0;
}
#else
systask_scheduler_t* sched = &g_systask_scheduler;
systask_t* task = privileged ? &sched->kernel_task : sched->active_task;
// Check if the pointer is inside the current tasks stack boundaries.
if (ret_addr < (uint32_t*)task->stack_base ||
ret_addr >= (uint32_t*)(task->stack_end)) {
return 0;
}
#endif
return *ret_addr;
}
// Terminate active task from fault/exception handler // Terminate active task from fault/exception handler
__attribute((used)) static void systask_exit_fault(bool privileged, __attribute((used)) static void systask_exit_fault(uint32_t msp,
uint32_t sp) { uint32_t exc_return) {
mpu_mode_t mpu_mode = mpu_reconfig(MPU_MODE_DEFAULT); mpu_mode_t mpu_mode = mpu_reconfig(MPU_MODE_DEFAULT);
bool privileged = (exc_return & 0x4) == 0;
uint32_t sp = privileged ? msp : __get_PSP();
#if defined(__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U)
bool secure = (exc_return & 0x40) != 0;
if (!secure) {
bool handler_mode = (exc_return & 0x8) == 0;
bool msp_used = (__TZ_get_CONTROL_NS() & CONTROL_SPSEL_Msk) == 0;
privileged = handler_mode || msp_used;
sp = privileged ? __TZ_get_MSP_NS() : __TZ_get_PSP_NS();
}
#else
bool secure = false;
#endif
systask_scheduler_t* scheduler = &g_systask_scheduler; systask_scheduler_t* scheduler = &g_systask_scheduler;
systask_t* task = systask_t* task =
@ -429,6 +492,7 @@ __attribute((used)) static void systask_exit_fault(bool privileged,
if (pminfo->reason != TASK_TERM_REASON_FAULT) { if (pminfo->reason != TASK_TERM_REASON_FAULT) {
pminfo->reason = TASK_TERM_REASON_FAULT; pminfo->reason = TASK_TERM_REASON_FAULT;
pminfo->privileged = privileged; pminfo->privileged = privileged;
pminfo->fault.pc = get_return_addr(secure, privileged, sp);
pminfo->fault.sp = sp; pminfo->fault.sp = sp;
#if !(defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__)) #if !(defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__))
pminfo->fault.sp_lim = task->sp_lim; pminfo->fault.sp_lim = task->sp_lim;
@ -658,29 +722,25 @@ __attribute__((naked, no_stack_protector)) void HardFault_Handler(void) {
// we set the stack pointer to the end of the stack. // we set the stack pointer to the end of the stack.
__asm__ volatile( __asm__ volatile(
"MRS R1, MSP \n" // R1 = MSP "MRS R0, MSP \n" // R0 = MSP
"LDR R0, =%[estack] \n" // Reset main stack "LDR R1, =%[estack] \n" // Reset main stack
"MSR MSP, R0 \n" // "MSR MSP, R1 \n" //
"MOV R0, #1 \n" // R0 = 1 (Privileged) "MOV R1, LR \n" // R1 = EXC_RETURN code
"B systask_exit_fault \n" // Exit task with fault "B systask_exit_fault \n" // Exit task with fault
: :
: [estack] "i"(&_stack_section_end) : [estack] "i"(&_stack_section_end)
: "memory"); :);
} }
__attribute__((naked, no_stack_protector)) void MemManage_Handler(void) { __attribute__((naked, no_stack_protector)) void MemManage_Handler(void) {
__asm__ volatile( __asm__ volatile(
"TST LR, #0x4 \n" // Return stack (1=>PSP, 0=>MSP) "MRS R0, MSP \n" // R0 = MSP
"ITTEE EQ \n" "MOV R1, LR \n" // R1 = EXC_RETURN code
"MOVEQ R0, #1 \n" // R0 = 1 (Privileged)
"MRSEQ R1, MSP \n" // R1 = MSP
"MOVNE R0, #0 \n" // R0 = 0 (Unprivileged)
"MRSNE R1, PSP \n" // R1 = PSP
#if !(defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__)) #if !(defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__))
"CMP R0, #0 \n" "TST LR, #0x4 \n" // Return stack (1=>PSP, 0=>MSP)
"BEQ 1f \n" // Skip stack ptr checking for PSP "BEQ 1f \n" // Skip stack ptr checking for PSP
"LDR R2, =%[sstack] \n" "LDR R2, =%[sstack] \n"
"CMP R1, R2 \n" // Check if PSP is below the stack "CMP R0, R2 \n" // Check if PSP is below the stack
"ITT LO \n" // base "ITT LO \n" // base
"LDRLO R2, =%[estack] \n" "LDRLO R2, =%[estack] \n"
"MSRLO MSP, R2 \n" // Reset MSP "MSRLO MSP, R2 \n" // Reset MSP
@ -691,73 +751,76 @@ __attribute__((naked, no_stack_protector)) void MemManage_Handler(void) {
: [estack] "i"(&_stack_section_end), [sstack] "i"( : [estack] "i"(&_stack_section_end), [sstack] "i"(
(uint32_t)&_stack_section_start + (uint32_t)&_stack_section_start +
256) 256)
: "memory"); :);
} }
__attribute__((naked, no_stack_protector)) void BusFault_Handler(void) { __attribute__((naked, no_stack_protector)) void BusFault_Handler(void) {
__asm__ volatile( __asm__ volatile(
"TST LR, #0x4 \n" // Return stack (1=>PSP, 0=>MSP) "MRS R0, MSP \n" // R0 = MSP
"ITTEE EQ \n" "MOV R1, LR \n" // R1 = EXC_RETURN code
"MOVEQ R0, #1 \n" // R0 = 1 (Privileged)
"MRSEQ R1, MSP \n" // R1 = MSP
"MOVNE R0, #0 \n" // R0 = 0 (Unprivileged)
"MRSNE R1, PSP \n" // R1 = PSP
"B systask_exit_fault \n" // Exit task with fault "B systask_exit_fault \n" // Exit task with fault
); );
} }
__attribute__((naked, no_stack_protector)) void UsageFault_Handler(void) { __attribute__((naked, no_stack_protector)) void UsageFault_Handler(void) {
__asm__ volatile( __asm__ volatile(
"MRS R0, MSP \n" // R0 = MSP
"MOV R1, LR \n" // R1 = EXC_RETURN code
"TST LR, #0x4 \n" // Return stack (1=>PSP, 0=>MSP) "TST LR, #0x4 \n" // Return stack (1=>PSP, 0=>MSP)
"ITTT NE \n"
"MOVNE R0, #0 \n" // R0 = 0 (Unprivileged)
"MRSNE R1, PSP \n" // R1 = PSP
"BNE systask_exit_fault \n" // Exit task with fault "BNE systask_exit_fault \n" // Exit task with fault
#if defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__) #if defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__)
"MRS R1, MSP \n" // R1 = MSP "LDR R2, =0xE000ED28 \n" // SCB->CFSR
"LDR R0, =0xE000ED28 \n" // SCB->CFSR "LDR R2, [R2] \n"
"LDR R0, [R0] \n" "TST R2, #0x100000 \n" // STKOF bit set?
"TST R0, #0x100000 \n" // STKOF bit set?
"ITT NE \n" "ITT NE \n"
"LDRNE R0, =%[estack] \n" // Reset main stack in case of stack "LDRNE R2, =%[estack] \n" // Reset main stack in case of stack
"MSRNE MSP, R0 \n" // overflow "MSRNE MSP, R2 \n" // overflow
#endif #endif
"MOV R0, #1 \n" // R0 = 1 (Privileged)
"B systask_exit_fault \n" // Exit task with fault "B systask_exit_fault \n" // Exit task with fault
: :
: [estack] "i"(&_stack_section_end) : [estack] "i"(&_stack_section_end)
: "memory"); :);
} }
#if defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__) #if defined(__ARM_ARCH_8M_MAIN__) || defined(__ARM_ARCH_8M_BASE__)
__attribute__((naked, no_stack_protector)) void SecureFault_Handler(void) { __attribute__((naked, no_stack_protector)) void SecureFault_Handler(void) {
__asm__ volatile( __asm__ volatile(
"TST LR, #0x4 \n" // Return stack (1=>PSP, 0=>MSP) "MRS R0, MSP \n" // R0 = MSP
"ITTEE EQ \n" "MOV R1, LR \n" // R1 = EXC_RETURN code
"MOVEQ R0, #1 \n" // R0 = 1 (Privileged)
"MRSEQ R1, MSP \n" // R1 = MSP
"MOVNE R0, #0 \n" // R0 = 0 (Unprivileged)
"MRSNE R1, PSP \n" // R1 = PSP
"B systask_exit_fault \n" // Exit task with fault "B systask_exit_fault \n" // Exit task with fault
); );
} }
#endif #endif
#ifdef STM32U5 #ifdef STM32U5
void GTZC_IRQHandler(void) { systask_exit_fault(true, __get_MSP()); } __attribute__((naked, no_stack_protector)) void GTZC_IRQHandler(void) {
__asm__ volatile(
"MRS R0, MSP \n" // R0 = MSP
"MOV R1, LR \n" // R1 = EXC_RETURN code
"B systask_exit_fault \n" // Exit task with fault
);
}
#endif #endif
void NMI_Handler(void) { __attribute__((no_stack_protector, used)) static void nmi_handler(
mpu_mode_t mpu_mode = mpu_reconfig(MPU_MODE_DEFAULT); uint32_t msp, uint32_t exc_return) {
mpu_reconfig(MPU_MODE_DEFAULT);
// Clear pending Clock security interrupt flag
#ifdef STM32U5 #ifdef STM32U5
if ((RCC->CIFR & RCC_CIFR_CSSF) != 0) { RCC->CICR = RCC_CICR_CSSC;
#else #else
if ((RCC->CIR & RCC_CIR_CSSF) != 0) { RCC->CIR = RCC_CIR_CSSC;
#endif #endif
// Clock Security System triggered NMI systask_exit_fault(msp, exc_return);
systask_exit_fault(true, __get_MSP()); }
}
mpu_restore(mpu_mode); __attribute__((no_stack_protector)) void NMI_Handler(void) {
__asm__ volatile(
"MRS R0, MSP \n" // R0 = MSP
"MOV R1, LR \n" // R1 = EXC_RETURN code
"B nmi_handler \n" // nmi_handler in C
);
} }
void Default_IRQHandler(void) { error_shutdown("Unhandled IRQ"); } void Default_IRQHandler(void) { error_shutdown("Unhandled IRQ"); }