From ad74016b919cbad78d203fa1c459ae18e73ce586 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 29 Sep 2025 13:29:45 +0200 Subject: [PATCH 1/2] x86/alternative: Drop not needed test after call of alt_replace_call() alt_replace_call() will never return a negative value, so testing the return value to be less than zero can be dropped. This makes it possible to switch the return type of alt_replace_call() and the type of insn_buff_sz to unsigned int. Signed-off-by: Juergen Gross Signed-off-by: Peter Zijlstra (Intel) --- arch/x86/kernel/alternative.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 8ee5ff547357..4f3ea50e41e8 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -559,7 +559,7 @@ EXPORT_SYMBOL(BUG_func); * Rewrite the "call BUG_func" replacement to point to the target of the * indirect pv_ops call "call *disp(%ip)". */ -static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a) +static unsigned int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a) { void *target, *bug = &BUG_func; s32 disp; @@ -643,7 +643,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, * order. */ for (a = start; a < end; a++) { - int insn_buff_sz = 0; + unsigned int insn_buff_sz = 0; /* * In case of nested ALTERNATIVE()s the outer alternative might @@ -683,11 +683,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, memcpy(insn_buff, replacement, a->replacementlen); insn_buff_sz = a->replacementlen; - if (a->flags & ALT_FLAG_DIRECT_CALL) { + if (a->flags & ALT_FLAG_DIRECT_CALL) insn_buff_sz = alt_replace_call(instr, insn_buff, a); - if (insn_buff_sz < 0) - continue; - } for (; insn_buff_sz < a->instrlen; insn_buff_sz++) insn_buff[insn_buff_sz] = 0x90; From ced37e9ceae50e4cb6cd058963bd315ec9afa651 Mon Sep 17 00:00:00 2001 From: Tengda Wu Date: Thu, 23 Oct 2025 09:06:32 +0000 Subject: [PATCH 2/2] x86/dumpstack: Prevent KASAN false positive warnings in __show_regs() When triggering a stack dump via sysrq (echo t > /proc/sysrq-trigger), KASAN may report false-positive out-of-bounds access: BUG: KASAN: out-of-bounds in __show_regs+0x4b/0x340 Call Trace: dump_stack_lvl print_address_description.constprop.0 print_report __show_regs show_trace_log_lvl sched_show_task show_state_filter sysrq_handle_showstate __handle_sysrq write_sysrq_trigger proc_reg_write vfs_write ksys_write do_syscall_64 entry_SYSCALL_64_after_hwframe The issue occurs as follows: Task A (walk other tasks' stacks) Task B (running) 1. echo t > /proc/sysrq-trigger show_trace_log_lvl regs = unwind_get_entry_regs() show_regs_if_on_stack(regs) 2. The stack value pointed by `regs` keeps changing, and so are the tags in its KASAN shadow region. __show_regs(regs) regs->ax, regs->bx, ... 3. hit KASAN redzones, OOB When task A walks task B's stack without suspending it, the continuous changes in task B's stack (and corresponding KASAN shadow tags) may cause task A to hit KASAN redzones when accessing obsolete values on the stack, resulting in false positive reports. Simply stopping the task before unwinding is not a viable fix, as it would alter the state intended to inspect. This is especially true for diagnosing misbehaving tasks (e.g., in a hard lockup), where stopping might fail or hide the root cause by changing the call stack. Therefore, fix this by disabling KASAN checks during asynchronous stack unwinding, which is identified when the unwinding task does not match the current task (task != current). [ bp: Align arguments on function's opening brace. ] Fixes: 3b3fa11bc700 ("x86/dumpstack: Print any pt_regs found on the stack") Signed-off-by: Tengda Wu Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Andrey Ryabinin Acked-by: Josh Poimboeuf Link: https://patch.msgid.link/all/20251023090632.269121-1-wutengda@huaweicloud.com --- arch/x86/kernel/dumpstack.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 71ee20102a8a..b10684dedc58 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -181,8 +181,8 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, * in false positive reports. Disable instrumentation to avoid those. */ __no_kmsan_checks -static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, - unsigned long *stack, const char *log_lvl) +static void __show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, + unsigned long *stack, const char *log_lvl) { struct unwind_state state; struct stack_info stack_info = {0}; @@ -303,6 +303,25 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, } } +static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, + unsigned long *stack, const char *log_lvl) +{ + /* + * Disable KASAN to avoid false positives during walking another + * task's stacks, as values on these stacks may change concurrently + * with task execution. + */ + bool disable_kasan = task && task != current; + + if (disable_kasan) + kasan_disable_current(); + + __show_trace_log_lvl(task, regs, stack, log_lvl); + + if (disable_kasan) + kasan_enable_current(); +} + void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) {