From 321e1e85458876248c65149ed690130952ec8042 Mon Sep 17 00:00:00 2001 From: Tao Liu Date: Wed, 14 Aug 2024 11:25:24 +1200 Subject: [PATCH] Fix a segfault issue due to the incorrect irq_stack_size on ARM64 See the following stack trace: (gdb) bt #0 0x00005635ac2b166b in arm64_unwind_frame (frame=0x7ffdaf35cb70, bt=0x7ffdaf35d430) at arm64.c:2821 #1 arm64_back_trace_cmd (bt=0x7ffdaf35d430) at arm64.c:3306 #2 0x00005635ac27b108 in back_trace (bt=bt@entry=0x7ffdaf35d430) at kernel.c:3239 #3 0x00005635ac2880ae in cmd_bt () at kernel.c:2863 #4 0x00005635ac1f16dc in exec_command () at main.c:893 #5 0x00005635ac1f192a in main_loop () at main.c:840 #6 0x00005635ac50df81 in captured_main (data=) at main.c:1284 #7 gdb_main (args=) at main.c:1313 #8 0x00005635ac50e000 in gdb_main_entry (argc=, argv=) at main.c:1338 #9 0x00005635ac1ea2a5 in main (argc=5, argv=0x7ffdaf35dde8) at main.c:721 The issue may be encountered when thread_union symbol not found in vmlinux due to compiling optimization. This patch will try the following 2 methods to get the irq_stack_size when thread_union symbol unavailable: 1. change the thread_shift when KASAN is enabled and with vmcoreinfo. In arm64/include/asm/memory.h: #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) ... #define IRQ_STACK_SIZE THREAD_SIZE Since enabling the KASAN will affect the final value, this patch reset IRQ_STACK_SIZE according to the calculation process in kernel code. 2. Try getting the value from kernel code disassembly, to get THREAD_SHIFT directly from tbnz instruction. In arch/arm64/kernel/entry.S: .macro kernel_ventry, el:req, ht:req, regsize:req, label:req ... add sp, sp, x0 sub x0, sp, x0 tbnz x0, #THREAD_SHIFT, 0f $ gdb vmlinux (gdb) disass vectors Dump of assembler code for function vectors: ... 0xffff800080010804 <+4>: add sp, sp, x0 0xffff800080010808 <+8>: sub x0, sp, x0 0xffff80008001080c <+12>: tbnz w0, #16, 0xffff80008001081c Signed-off-by: yeping.zheng Improved-by: Tao Liu --- arm64.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/arm64.c b/arm64.c index 8ed1aaf..a9884a8 100644 --- a/arm64.c +++ b/arm64.c @@ -95,6 +95,7 @@ static int arm64_is_uvaddr(ulong, struct task_context *); static void arm64_calc_KERNELPACMASK(void); static void arm64_recalc_KERNELPACMASK(void); static int arm64_get_vmcoreinfo(unsigned long *vaddr, const char *label, int base); +static ulong arm64_set_irq_stack_size(void); struct kernel_range { unsigned long modules_vaddr, modules_end; @@ -2355,8 +2356,10 @@ arm64_irq_stack_init(void) if (MEMBER_EXISTS("thread_union", "stack")) { if ((sz = MEMBER_SIZE("thread_union", "stack")) > 0) ms->irq_stack_size = sz; - } else - ms->irq_stack_size = ARM64_IRQ_STACK_SIZE; + } else { + ulong res = arm64_set_irq_stack_size(); + ms->irq_stack_size = (res > 0) ? res : ARM64_IRQ_STACK_SIZE; + } machdep->flags |= IRQ_STACKS; @@ -5073,6 +5076,57 @@ static void arm64_recalc_KERNELPACMASK(void){ } } +static ulong arm64_set_irq_stack_size(void) +{ + int min_thread_shift = 14; + ulong thread_shift = 0; + char buf1[BUFSIZE]; + char *pos1, *pos2; + int errflag = 0; + + if (kernel_symbol_exists("vmcoreinfo_data") && + kernel_symbol_exists("vmcoreinfo_size")) { + /* + * Referring to arch/arm64/include/asm/memory.h + */ + if (kernel_symbol_exists("kasan_enable_current")) + min_thread_shift += 1; + + if (MEMBER_EXISTS("task_struct", "stack_vm_area") && + (min_thread_shift < machdep->pageshift)) + thread_shift = machdep->pageshift; + else + thread_shift = min_thread_shift; + } else { + sprintf(buf1, "x/32i vectors"); + open_tmpfile(); + if (!gdb_pass_through(buf1, pc->tmpfile, GNU_RETURN_ON_ERROR)) + goto out; + + rewind(pc->tmpfile); + while (fgets(buf1, BUFSIZE, pc->tmpfile)) { + if ((pos1 = strstr(buf1, "tbnz"))) { + if ((pos2 = strchr(pos1, '#'))) { + pos2 += 1; + for (pos1 = pos2; *pos2 != '\0' && *pos2 != ','; pos2++); + *pos2 = '\0'; + thread_shift = stol(pos1, RETURN_ON_ERROR|QUIET, &errflag); + if (errflag) + thread_shift = 0; + break; + } + } + } +out: + close_tmpfile(); + } + + if (thread_shift) + return ((1UL) << thread_shift); + + return 0; +} + #endif /* ARM64 */