s390: Add stackprotector support

Stackprotector support was previously unavailable on s390 because by
default compilers generate code which is not suitable for the kernel:
the canary value is accessed via thread local storage, where the address
of thread local storage is within access registers 0 and 1.

Using those registers also for the kernel would come with a significant
performance impact and more complicated kernel entry/exit code, since
access registers contents would have to be exchanged on every kernel entry
and exit.

With the upcoming gcc 16 release new compiler options will become available
which allow to generate code suitable for the kernel. [1]

Compiler option -mstack-protector-guard=global instructs gcc to generate
stackprotector code that refers to a global stackprotector canary value via
symbol __stack_chk_guard. Access to this value is guaranteed to occur via
larl and lgrl instructions.

Furthermore, compiler option -mstack-protector-guard-record generates a
section containing all code addresses that reference the canary value.

To allow for per task canary values the instructions which load the address
of __stack_chk_guard are patched so they access a lowcore field instead: a
per task canary value is available within the task_struct of each task, and
is written to the per-cpu lowcore location on each context switch.

Also add sanity checks and debugging option to be consistent with other
kernel code patching mechanisms.

Full debugging output can be enabled with the following kernel command line
options:

debug_stackprotector
bootdebug
ignore_loglevel
earlyprintk
dyndbg="file stackprotector.c +p"

Example debug output:

stackprot: 0000021e402d4eda: c010005a9ae3 -> c01f00070240

where "<insn address>: <old insn> -> <new insn>".

[1] gcc commit 0cd1f03939d5 ("s390: Support global stack protector")

Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
Heiko Carstens 2025-11-17 15:09:53 +01:00
parent 1d7764cfe3
commit f5730d44e0
18 changed files with 269 additions and 4 deletions

View File

@ -69,6 +69,9 @@ config CC_HAS_ASM_AOR_FORMAT_FLAGS
Clang versions before 19.1.0 do not support A, Clang versions before 19.1.0 do not support A,
O, and R inline assembly format flags. O, and R inline assembly format flags.
config CC_HAS_STACKPROTECTOR_GLOBAL
def_bool $(cc-option, -mstack-protector-guard=global -mstack-protector-guard-record)
config S390 config S390
def_bool y def_bool y
# #
@ -245,6 +248,7 @@ config S390
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_SETUP_PER_CPU_AREA select HAVE_SETUP_PER_CPU_AREA
select HAVE_SOFTIRQ_ON_OWN_STACK select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if CC_HAS_STACKPROTECTOR_GLOBAL
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_VIRT_CPU_ACCOUNTING_IDLE select HAVE_VIRT_CPU_ACCOUNTING_IDLE

View File

@ -89,6 +89,10 @@ ifdef CONFIG_EXPOLINE
aflags-y += -DCC_USING_EXPOLINE aflags-y += -DCC_USING_EXPOLINE
endif endif
ifeq ($(CONFIG_STACKPROTECTOR),y)
KBUILD_CFLAGS += -mstack-protector-guard=global -mstack-protector-guard-record
endif
ifdef CONFIG_FUNCTION_TRACER ifdef CONFIG_FUNCTION_TRACER
ifeq ($(call cc-option,-mfentry -mnop-mcount),) ifeq ($(call cc-option,-mfentry -mnop-mcount),)
# make use of hotpatch feature if the compiler supports it # make use of hotpatch feature if the compiler supports it

View File

@ -32,6 +32,7 @@ obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
obj-$(CONFIG_KMSAN) += kmsan.o obj-$(CONFIG_KMSAN) += kmsan.o
obj-$(CONFIG_STACKPROTECTOR) += stackprotector.o
obj-all := $(obj-y) piggy.o syms.o obj-all := $(obj-y) piggy.o syms.o
targets := bzImage section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y) targets := bzImage section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)

View File

@ -28,6 +28,10 @@ struct vmlinux_info {
unsigned long invalid_pg_dir_off; unsigned long invalid_pg_dir_off;
unsigned long alt_instructions; unsigned long alt_instructions;
unsigned long alt_instructions_end; unsigned long alt_instructions_end;
#ifdef CONFIG_STACKPROTECTOR
unsigned long stack_prot_start;
unsigned long stack_prot_end;
#endif
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
unsigned long kasan_early_shadow_page_off; unsigned long kasan_early_shadow_page_off;
unsigned long kasan_early_shadow_pte_off; unsigned long kasan_early_shadow_pte_off;

View File

@ -3,6 +3,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/arch-stackprotector.h>
#include <asm/abs_lowcore.h> #include <asm/abs_lowcore.h>
#include <asm/page-states.h> #include <asm/page-states.h>
#include <asm/machine.h> #include <asm/machine.h>
@ -294,6 +295,11 @@ void parse_boot_command_line(void)
cmma_flag = 0; cmma_flag = 0;
} }
#ifdef CONFIG_STACKPROTECTOR
if (!strcmp(param, "debug_stackprotector"))
stack_protector_debug = 1;
#endif
#if IS_ENABLED(CONFIG_KVM) #if IS_ENABLED(CONFIG_KVM)
if (!strcmp(param, "prot_virt")) { if (!strcmp(param, "prot_virt")) {
rc = kstrtobool(val, &enabled); rc = kstrtobool(val, &enabled);

View File

@ -0,0 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define boot_fmt(fmt) "stackprot: " fmt
#include "boot.h"
#include "../kernel/stackprotector.c"

View File

@ -20,6 +20,9 @@
#include <asm/uv.h> #include <asm/uv.h>
#include <asm/abs_lowcore.h> #include <asm/abs_lowcore.h>
#include <asm/physmem_info.h> #include <asm/physmem_info.h>
#include <asm/stacktrace.h>
#include <asm/asm-offsets.h>
#include <asm/arch-stackprotector.h>
#include "decompressor.h" #include "decompressor.h"
#include "boot.h" #include "boot.h"
#include "uv.h" #include "uv.h"
@ -477,6 +480,10 @@ static void kaslr_adjust_vmlinux_info(long offset)
vmlinux.invalid_pg_dir_off += offset; vmlinux.invalid_pg_dir_off += offset;
vmlinux.alt_instructions += offset; vmlinux.alt_instructions += offset;
vmlinux.alt_instructions_end += offset; vmlinux.alt_instructions_end += offset;
#ifdef CONFIG_STACKPROTECTOR
vmlinux.stack_prot_start += offset;
vmlinux.stack_prot_end += offset;
#endif
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
vmlinux.kasan_early_shadow_page_off += offset; vmlinux.kasan_early_shadow_page_off += offset;
vmlinux.kasan_early_shadow_pte_off += offset; vmlinux.kasan_early_shadow_pte_off += offset;
@ -622,6 +629,7 @@ void startup_kernel(void)
__apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions, __apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions,
(struct alt_instr *)_vmlinux_info.alt_instructions_end, (struct alt_instr *)_vmlinux_info.alt_instructions_end,
ALT_CTX_EARLY); ALT_CTX_EARLY);
stack_protector_apply_early(text_lma);
/* /*
* Save KASLR offset for early dumps, before vmcore_info is set. * Save KASLR offset for early dumps, before vmcore_info is set.

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_ARCH_STACKPROTECTOR_H
#define _ASM_S390_ARCH_STACKPROTECTOR_H
extern unsigned long __stack_chk_guard;
extern int stack_protector_debug;
void __stack_protector_apply_early(unsigned long kernel_start);
int __stack_protector_apply(unsigned long *start, unsigned long *end, unsigned long kernel_start);
static inline void stack_protector_apply_early(unsigned long kernel_start)
{
if (IS_ENABLED(CONFIG_STACKPROTECTOR))
__stack_protector_apply_early(kernel_start);
}
static inline int stack_protector_apply(unsigned long *start, unsigned long *end)
{
if (IS_ENABLED(CONFIG_STACKPROTECTOR))
return __stack_protector_apply(start, end, 0);
return 0;
}
#endif /* _ASM_S390_ARCH_STACKPROTECTOR_H */

View File

@ -100,7 +100,8 @@ struct lowcore {
/* Save areas. */ /* Save areas. */
__u64 save_area[8]; /* 0x0200 */ __u64 save_area[8]; /* 0x0200 */
__u8 pad_0x0240[0x0280-0x0240]; /* 0x0240 */ __u64 stack_canary; /* 0x0240 */
__u8 pad_0x0248[0x0280-0x0248]; /* 0x0248 */
__u64 save_area_restart[1]; /* 0x0280 */ __u64 save_area_restart[1]; /* 0x0280 */
__u64 pcpu; /* 0x0288 */ __u64 pcpu; /* 0x0288 */

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_STACKPROTECTOR_H
#define _ASM_S390_STACKPROTECTOR_H
#include <linux/sched.h>
#include <asm/current.h>
#include <asm/lowcore.h>
static __always_inline void boot_init_stack_canary(void)
{
current->stack_canary = get_random_canary();
get_lowcore()->stack_canary = current->stack_canary;
}
#endif /* _ASM_S390_STACKPROTECTOR_H */

View File

@ -67,7 +67,7 @@ obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o
obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_STACKPROTECTOR) += stackprotector.o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
obj-$(CONFIG_CERT_STORE) += cert_store.o obj-$(CONFIG_CERT_STORE) += cert_store.o

View File

@ -21,6 +21,9 @@ int main(void)
OFFSET(__TASK_stack, task_struct, stack); OFFSET(__TASK_stack, task_struct, stack);
OFFSET(__TASK_thread, task_struct, thread); OFFSET(__TASK_thread, task_struct, thread);
OFFSET(__TASK_pid, task_struct, pid); OFFSET(__TASK_pid, task_struct, pid);
#ifdef CONFIG_STACKPROTECTOR
OFFSET(__TASK_stack_canary, task_struct, stack_canary);
#endif
BLANK(); BLANK();
/* thread struct offsets */ /* thread struct offsets */
OFFSET(__THREAD_ksp, thread_struct, ksp); OFFSET(__THREAD_ksp, thread_struct, ksp);
@ -139,6 +142,7 @@ int main(void)
OFFSET(__LC_CURRENT_PID, lowcore, current_pid); OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
OFFSET(__LC_LAST_BREAK, lowcore, last_break); OFFSET(__LC_LAST_BREAK, lowcore, last_break);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
OFFSET(__LC_STACK_CANARY, lowcore, stack_canary);
OFFSET(__LC_DUMP_REIPL, lowcore, ipib); OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info); OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info);
OFFSET(__LC_OS_INFO, lowcore, os_info); OFFSET(__LC_OS_INFO, lowcore, os_info);

View File

@ -162,9 +162,13 @@ SYM_FUNC_START(__switch_to_asm)
stg %r3,__LC_CURRENT(%r13) # store task struct of next stg %r3,__LC_CURRENT(%r13) # store task struct of next
stg %r15,__LC_KERNEL_STACK(%r13) # store end of kernel stack stg %r15,__LC_KERNEL_STACK(%r13) # store end of kernel stack
lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
aghi %r3,__TASK_pid aghik %r4,%r3,__TASK_pid
mvc __LC_CURRENT_PID(4,%r13),0(%r3) # store pid of next mvc __LC_CURRENT_PID(4,%r13),0(%r4) # store pid of next
ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40) ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40)
#ifdef CONFIG_STACKPROTECTOR
lg %r3,__TASK_stack_canary(%r3)
stg %r3,__LC_STACK_CANARY(%r13)
#endif
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
BR_EX %r14 BR_EX %r14
SYM_FUNC_END(__switch_to_asm) SYM_FUNC_END(__switch_to_asm)

View File

@ -22,12 +22,14 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/execmem.h> #include <linux/execmem.h>
#include <asm/arch-stackprotector.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include <asm/facility.h> #include <asm/facility.h>
#include <asm/ftrace.lds.h> #include <asm/ftrace.lds.h>
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/asm-offsets.h>
#if 0 #if 0
#define DEBUGP printk #define DEBUGP printk
@ -525,6 +527,13 @@ int module_finalize(const Elf_Ehdr *hdr,
(str_has_prefix(secname, ".s390_return"))) (str_has_prefix(secname, ".s390_return")))
nospec_revert(aseg, aseg + s->sh_size); nospec_revert(aseg, aseg + s->sh_size);
if (IS_ENABLED(CONFIG_STACKPROTECTOR) &&
(str_has_prefix(secname, "__stack_protector_loc"))) {
rc = stack_protector_apply(aseg, aseg + s->sh_size);
if (rc)
break;
}
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) { if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) {
rc = module_alloc_ftrace_hotpatch_trampolines(me, s); rc = module_alloc_ftrace_hotpatch_trampolines(me, s);

View File

@ -280,6 +280,9 @@ static void pcpu_attach_task(int cpu, struct task_struct *tsk)
lc->hardirq_timer = tsk->thread.hardirq_timer; lc->hardirq_timer = tsk->thread.hardirq_timer;
lc->softirq_timer = tsk->thread.softirq_timer; lc->softirq_timer = tsk->thread.softirq_timer;
lc->steal_timer = 0; lc->steal_timer = 0;
#ifdef CONFIG_STACKPROTECTOR
lc->stack_canary = tsk->stack_canary;
#endif
} }
static void pcpu_start_fn(int cpu, void (*func)(void *), void *data) static void pcpu_start_fn(int cpu, void (*func)(void *), void *data)

View File

@ -0,0 +1,156 @@
// SPDX-License-Identifier: GPL-2.0
#ifndef pr_fmt
#define pr_fmt(fmt) "stackprot: " fmt
#endif
#include <linux/export.h>
#include <linux/uaccess.h>
#include <linux/printk.h>
#include <asm/abs_lowcore.h>
#include <asm/sections.h>
#include <asm/machine.h>
#include <asm/asm-offsets.h>
#include <asm/arch-stackprotector.h>
#ifdef __DECOMPRESSOR
#define DEBUGP boot_debug
#define EMERGP boot_emerg
#define PANIC boot_panic
#else /* __DECOMPRESSOR */
#define DEBUGP pr_debug
#define EMERGP pr_emerg
#define PANIC panic
#endif /* __DECOMPRESSOR */
int __bootdata_preserved(stack_protector_debug);
unsigned long __stack_chk_guard;
EXPORT_SYMBOL(__stack_chk_guard);
struct insn_ril {
u8 opc1 : 8;
u8 r1 : 4;
u8 opc2 : 4;
u32 imm;
} __packed;
/*
* Convert a virtual instruction address to a real instruction address. The
* decompressor needs to patch instructions within the kernel image based on
* their virtual addresses, while dynamic address translation is still
* disabled. Therefore a translation from virtual kernel image addresses to
* the corresponding physical addresses is required.
*
* After dynamic address translation is enabled and when the kernel needs to
* patch instructions such a translation is not required since the addresses
* are identical.
*/
static struct insn_ril *vaddress_to_insn(unsigned long vaddress)
{
#ifdef __DECOMPRESSOR
return (struct insn_ril *)__kernel_pa(vaddress);
#else
return (struct insn_ril *)vaddress;
#endif
}
static unsigned long insn_to_vaddress(struct insn_ril *insn)
{
#ifdef __DECOMPRESSOR
return (unsigned long)__kernel_va(insn);
#else
return (unsigned long)insn;
#endif
}
#define INSN_RIL_STRING_SIZE (sizeof(struct insn_ril) * 2 + 1)
static void insn_ril_to_string(char *str, struct insn_ril *insn)
{
u8 *ptr = (u8 *)insn;
int i;
for (i = 0; i < sizeof(*insn); i++)
hex_byte_pack(&str[2 * i], ptr[i]);
str[2 * i] = 0;
}
static void stack_protector_dump(struct insn_ril *old, struct insn_ril *new)
{
char ostr[INSN_RIL_STRING_SIZE];
char nstr[INSN_RIL_STRING_SIZE];
insn_ril_to_string(ostr, old);
insn_ril_to_string(nstr, new);
DEBUGP("%016lx: %s -> %s\n", insn_to_vaddress(old), ostr, nstr);
}
static int stack_protector_verify(struct insn_ril *insn, unsigned long kernel_start)
{
char istr[INSN_RIL_STRING_SIZE];
unsigned long vaddress, offset;
/* larl */
if (insn->opc1 == 0xc0 && insn->opc2 == 0x0)
return 0;
/* lgrl */
if (insn->opc1 == 0xc4 && insn->opc2 == 0x8)
return 0;
insn_ril_to_string(istr, insn);
vaddress = insn_to_vaddress(insn);
if (__is_defined(__DECOMPRESSOR)) {
offset = (unsigned long)insn - kernel_start + TEXT_OFFSET;
EMERGP("Unexpected instruction at %016lx/%016lx: %s\n", vaddress, offset, istr);
PANIC("Stackprotector error\n");
} else {
EMERGP("Unexpected instruction at %016lx: %s\n", vaddress, istr);
}
return -EINVAL;
}
int __stack_protector_apply(unsigned long *start, unsigned long *end, unsigned long kernel_start)
{
unsigned long canary, *loc;
struct insn_ril *insn, new;
int rc;
/*
* Convert LARL/LGRL instructions to LLILF so register R1 contains the
* address of the per-cpu / per-process stack canary:
*
* LARL/LGRL R1,__stack_chk_guard => LLILF R1,__lc_stack_canary
*/
canary = __LC_STACK_CANARY;
if (machine_has_relocated_lowcore())
canary += LOWCORE_ALT_ADDRESS;
for (loc = start; loc < end; loc++) {
insn = vaddress_to_insn(*loc);
rc = stack_protector_verify(insn, kernel_start);
if (rc)
return rc;
new = *insn;
new.opc1 = 0xc0;
new.opc2 = 0xf;
new.imm = canary;
if (stack_protector_debug)
stack_protector_dump(insn, &new);
s390_kernel_write(insn, &new, sizeof(*insn));
}
return 0;
}
#ifdef __DECOMPRESSOR
void __stack_protector_apply_early(unsigned long kernel_start)
{
unsigned long *start, *end;
start = (unsigned long *)vmlinux.stack_prot_start;
end = (unsigned long *)vmlinux.stack_prot_end;
__stack_protector_apply(start, end, kernel_start);
}
#endif

View File

@ -32,6 +32,7 @@ KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_
KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64)) KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64))
KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64)) KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64))
KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables
KBUILD_CFLAGS_64 += -fno-stack-protector
ldflags-y := -shared -soname=linux-vdso64.so.1 \ ldflags-y := -shared -soname=linux-vdso64.so.1 \
--hash-style=both --build-id=sha1 -T --hash-style=both --build-id=sha1 -T

View File

@ -150,6 +150,15 @@ SECTIONS
*(.altinstr_replacement) *(.altinstr_replacement)
} }
#ifdef CONFIG_STACKPROTECTOR
. = ALIGN(8);
.stack_prot_table : {
__stack_prot_start = .;
KEEP(*(__stack_protector_loc))
__stack_prot_end = .;
}
#endif
/* /*
* Table with the patch locations to undo expolines * Table with the patch locations to undo expolines
*/ */
@ -257,6 +266,10 @@ SECTIONS
QUAD(invalid_pg_dir) QUAD(invalid_pg_dir)
QUAD(__alt_instructions) QUAD(__alt_instructions)
QUAD(__alt_instructions_end) QUAD(__alt_instructions_end)
#ifdef CONFIG_STACKPROTECTOR
QUAD(__stack_prot_start)
QUAD(__stack_prot_end)
#endif
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
QUAD(kasan_early_shadow_page) QUAD(kasan_early_shadow_page)
QUAD(kasan_early_shadow_pte) QUAD(kasan_early_shadow_pte)