2010-07-07 11:12:45 +00:00
|
|
|
Subject: DWARF2 EH-frame based stack unwinding
|
|
|
|
From: jbeulich@novell.com
|
|
|
|
Patch-mainline: no
|
|
|
|
|
|
|
|
This includes reverting f1883f86dea84fe47a71a39fc1afccc005915ed8.
|
|
|
|
|
|
|
|
Update Jan 17 2009 jeffm:
|
|
|
|
- Something in 2.6.29-rc1 tweaked the frame pointer code somehow, so I fixed
|
|
|
|
that up.
|
2011-04-19 20:09:59 +00:00
|
|
|
Update Jul 02 2010 jbeulich:
|
|
|
|
- fix after upstream commit 9e565292270a2d55524be38835104c564ac8f795
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
Automatically created from "patches.suse/stack-unwind" by xen-port-patches.py
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-02-17.orig/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:11:05.000000000 +0100
|
|
|
|
+++ head-2011-02-17/arch/x86/include/mach-xen/asm/system.h 2011-03-03 16:13:47.000000000 +0100
|
|
|
|
@@ -124,12 +124,22 @@ do { \
|
2010-07-07 11:12:45 +00:00
|
|
|
#define __switch_canary_iparam
|
|
|
|
#endif /* CC_STACKPROTECTOR */
|
|
|
|
|
|
|
|
+/* The stack unwind code needs this but it pollutes traces otherwise */
|
|
|
|
+#ifdef CONFIG_UNWIND_INFO
|
|
|
|
+#define THREAD_RETURN_SYM \
|
|
|
|
+ ".globl thread_return\n" \
|
|
|
|
+ "thread_return:\n\t"
|
|
|
|
+#else
|
|
|
|
+#define THREAD_RETURN_SYM
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
/* Save restore flags to clear handle leaking NT */
|
|
|
|
#define switch_to(prev, next, last) \
|
|
|
|
asm volatile(SAVE_CONTEXT \
|
|
|
|
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
|
|
|
|
"movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
|
|
|
|
"call __switch_to\n\t" \
|
|
|
|
+ THREAD_RETURN_SYM \
|
|
|
|
"movq "__percpu_arg([current_task])",%%rsi\n\t" \
|
|
|
|
__switch_canary \
|
|
|
|
"movq %P[thread_info](%%rsi),%%r8\n\t" \
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-02-17.orig/arch/x86/kernel/entry_32-xen.S 2011-02-01 15:41:35.000000000 +0100
|
|
|
|
+++ head-2011-02-17/arch/x86/kernel/entry_32-xen.S 2011-02-02 15:07:22.000000000 +0100
|
|
|
|
@@ -1225,6 +1225,41 @@ END(spurious_interrupt_bug)
|
2010-07-07 11:12:45 +00:00
|
|
|
*/
|
|
|
|
.popsection
|
|
|
|
|
|
|
|
+#ifdef CONFIG_STACK_UNWIND
|
|
|
|
+ENTRY(arch_unwind_init_running)
|
|
|
|
+ CFI_STARTPROC
|
|
|
|
+ movl 4(%esp), %edx
|
|
|
|
+ movl (%esp), %ecx
|
|
|
|
+ leal 4(%esp), %eax
|
|
|
|
+ movl %ebx, PT_EBX(%edx)
|
|
|
|
+ xorl %ebx, %ebx
|
|
|
|
+ movl %ebx, PT_ECX(%edx)
|
|
|
|
+ movl %ebx, PT_EDX(%edx)
|
|
|
|
+ movl %esi, PT_ESI(%edx)
|
|
|
|
+ movl %edi, PT_EDI(%edx)
|
|
|
|
+ movl %ebp, PT_EBP(%edx)
|
|
|
|
+ movl %ebx, PT_EAX(%edx)
|
|
|
|
+ movl $__USER_DS, PT_DS(%edx)
|
|
|
|
+ movl $__USER_DS, PT_ES(%edx)
|
|
|
|
+ movl $__KERNEL_PERCPU, PT_FS(%edx)
|
|
|
|
+ movl $__KERNEL_STACK_CANARY, PT_GS(%edx)
|
|
|
|
+ movl %eax, PT_OLDESP(%edx)
|
|
|
|
+ movl 16(%esp), %eax
|
|
|
|
+ movl %ebx, PT_ORIG_EAX(%edx)
|
|
|
|
+ movl %ecx, PT_EIP(%edx)
|
|
|
|
+ movl 12(%esp), %ecx
|
|
|
|
+ movl $__KERNEL_CS, PT_CS(%edx)
|
|
|
|
+ movl %eax, 12(%esp)
|
|
|
|
+ movl 8(%esp), %eax
|
|
|
|
+ movl %ecx, 8(%esp)
|
|
|
|
+ movl %ebx, PT_EFLAGS(%edx)
|
|
|
|
+ movl PT_EBX(%edx), %ebx
|
|
|
|
+ movl $__KERNEL_DS, PT_OLDSS(%edx)
|
|
|
|
+ jmpl *%eax
|
|
|
|
+ CFI_ENDPROC
|
|
|
|
+ENDPROC(arch_unwind_init_running)
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
ENTRY(kernel_thread_helper)
|
|
|
|
pushl $0 # fake return address for unwinder
|
|
|
|
CFI_STARTPROC
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-02-17.orig/arch/x86/kernel/entry_64-xen.S 2011-02-01 15:41:35.000000000 +0100
|
|
|
|
+++ head-2011-02-17/arch/x86/kernel/entry_64-xen.S 2011-02-02 15:07:22.000000000 +0100
|
|
|
|
@@ -1135,6 +1135,40 @@ ENTRY(call_softirq)
|
2010-07-07 11:12:45 +00:00
|
|
|
CFI_ENDPROC
|
|
|
|
END(call_softirq)
|
|
|
|
|
|
|
|
+#ifdef CONFIG_STACK_UNWIND
|
|
|
|
+ENTRY(arch_unwind_init_running)
|
|
|
|
+ CFI_STARTPROC
|
|
|
|
+ movq %r15, R15(%rdi)
|
|
|
|
+ movq %r14, R14(%rdi)
|
|
|
|
+ xchgq %rsi, %rdx
|
|
|
|
+ movq %r13, R13(%rdi)
|
|
|
|
+ movq %r12, R12(%rdi)
|
|
|
|
+ xorl %eax, %eax
|
|
|
|
+ movq %rbp, RBP(%rdi)
|
|
|
|
+ movq %rbx, RBX(%rdi)
|
|
|
|
+ movq (%rsp), %r9
|
|
|
|
+ xchgq %rdx, %rcx
|
|
|
|
+ movq %rax, R11(%rdi)
|
|
|
|
+ movq %rax, R10(%rdi)
|
|
|
|
+ movq %rax, R9(%rdi)
|
|
|
|
+ movq %rax, R8(%rdi)
|
|
|
|
+ movq %rax, RAX(%rdi)
|
|
|
|
+ movq %rax, RCX(%rdi)
|
|
|
|
+ movq %rax, RDX(%rdi)
|
|
|
|
+ movq %rax, RSI(%rdi)
|
|
|
|
+ movq %rax, RDI(%rdi)
|
|
|
|
+ movq %rax, ORIG_RAX(%rdi)
|
|
|
|
+ movq %r9, RIP(%rdi)
|
|
|
|
+ leaq 8(%rsp), %r9
|
|
|
|
+ movq $__KERNEL_CS, CS(%rdi)
|
|
|
|
+ movq %rax, EFLAGS(%rdi)
|
|
|
|
+ movq %r9, RSP(%rdi)
|
|
|
|
+ movq $__KERNEL_DS, SS(%rdi)
|
|
|
|
+ jmpq *%rcx
|
|
|
|
+ CFI_ENDPROC
|
|
|
|
+END(arch_unwind_init_running)
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
/*
|
|
|
|
* Some functions should be protected against kprobes
|
|
|
|
*/
|