diff --git a/arch/x86/Kconfig.cpufeatures b/arch/x86/Kconfig.cpufeatures index 250c10627ab3..733d5aff2456 100644 --- a/arch/x86/Kconfig.cpufeatures +++ b/arch/x86/Kconfig.cpufeatures @@ -124,6 +124,10 @@ config X86_DISABLED_FEATURE_PCID def_bool y depends on !X86_64 +config X86_DISABLED_FEATURE_LASS + def_bool y + depends on X86_32 + config X86_DISABLED_FEATURE_PKU def_bool y depends on !X86_INTEL_MEMORY_PROTECTION_KEYS diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index fc5f32d4da6e..4b1a6ade1700 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -30,7 +30,7 @@ enum cpuid_leafs CPUID_6_EAX, CPUID_8000_000A_EDX, CPUID_7_ECX, - CPUID_8000_0007_EBX, + CPUID_LNX_6, CPUID_7_EDX, CPUID_8000_001F_EAX, CPUID_8000_0021_EAX, diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 9e54fc0e7ed3..d90ce601917c 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -314,6 +314,7 @@ #define X86_FEATURE_SM4 (12*32+ 2) /* SM4 instructions */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* "avx_vnni" AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* "avx512_bf16" AVX512 BFLOAT16 instructions */ +#define X86_FEATURE_LASS (12*32+ 6) /* "lass" Linear Address Space Separation */ #define X86_FEATURE_CMPCCXADD (12*32+ 7) /* CMPccXADD instructions */ #define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* Intel Architectural PerfMon Extension */ #define X86_FEATURE_FZRM (12*32+10) /* Fast zero-length REP MOVSB */ @@ -407,9 +408,12 @@ #define X86_FEATURE_ENQCMD (16*32+29) /* "enqcmd" ENQCMD and ENQCMDS instructions */ #define X86_FEATURE_SGX_LC (16*32+30) /* "sgx_lc" Software Guard Extensions Launch Control */ -/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ +/* + * Linux-defined word for use with scattered/synthetic bits. + */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* "overflow_recov" MCA overflow recovery support */ #define X86_FEATURE_SUCCOR (17*32+ 1) /* "succor" Uncorrectable error containment and recovery */ + #define X86_FEATURE_SMCA (17*32+ 3) /* "smca" Scalable MCA */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h index 4f84d421d1cf..20a3baae9568 100644 --- a/arch/x86/include/asm/smap.h +++ b/arch/x86/include/asm/smap.h @@ -23,18 +23,55 @@ #else /* __ASSEMBLER__ */ +/* + * The CLAC/STAC instructions toggle the enforcement of + * X86_FEATURE_SMAP along with X86_FEATURE_LASS. + * + * SMAP enforcement is based on the _PAGE_BIT_USER bit in the page + * tables. The kernel is not allowed to touch pages with that bit set + * unless the AC bit is set. + * + * Use stac()/clac() when accessing userspace (_PAGE_USER) mappings, + * regardless of location. + * + * Note: a barrier is implicit in alternative(). + */ + static __always_inline void clac(void) { - /* Note: a barrier is implicit in alternative() */ alternative("", "clac", X86_FEATURE_SMAP); } static __always_inline void stac(void) { - /* Note: a barrier is implicit in alternative() */ alternative("", "stac", X86_FEATURE_SMAP); } +/* + * LASS enforcement is based on bit 63 of the virtual address. The + * kernel is not allowed to touch memory in the lower half of the + * virtual address space. + * + * Use lass_stac()/lass_clac() to toggle the AC bit for kernel data + * accesses (!_PAGE_USER) that are blocked by LASS, but not by SMAP. + * + * Even with the AC bit set, LASS will continue to block instruction + * fetches from the user half of the address space. To allow those, + * clear CR4.LASS to disable the LASS mechanism entirely. + * + * Note: a barrier is implicit in alternative(). + */ + +static __always_inline void lass_clac(void) +{ + alternative("", "clac", X86_FEATURE_LASS); +} + +static __always_inline void lass_stac(void) +{ + alternative("", "stac", X86_FEATURE_LASS); +} + static __always_inline unsigned long smap_save(void) { unsigned long flags; diff --git a/arch/x86/include/asm/string.h b/arch/x86/include/asm/string.h index c3c2c1914d65..9cb5aae7fba9 100644 --- a/arch/x86/include/asm/string.h +++ b/arch/x86/include/asm/string.h @@ -1,6 +1,32 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_STRING_H +#define _ASM_X86_STRING_H + #ifdef CONFIG_X86_32 # include #else # include #endif + +static __always_inline void *__inline_memcpy(void *to, const void *from, size_t len) +{ + void *ret = to; + + asm volatile("rep movsb" + : "+D" (to), "+S" (from), "+c" (len) + : : "memory"); + return ret; +} + +static __always_inline void *__inline_memset(void *s, int v, size_t n) +{ + void *ret = s; + + asm volatile("rep stosb" + : "+D" (s), "+c" (n) + : "a" ((uint8_t)v) + : "memory"); + return ret; +} + +#endif /* _ASM_X86_STRING_H */ diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h index f1a4adc78272..81d0c8bf1137 100644 --- a/arch/x86/include/uapi/asm/processor-flags.h +++ b/arch/x86/include/uapi/asm/processor-flags.h @@ -136,6 +136,8 @@ #define X86_CR4_PKE _BITUL(X86_CR4_PKE_BIT) #define X86_CR4_CET_BIT 23 /* enable Control-flow Enforcement Technology */ #define X86_CR4_CET _BITUL(X86_CR4_CET_BIT) +#define X86_CR4_LASS_BIT 27 /* enable Linear Address Space Separation support */ +#define X86_CR4_LASS _BITUL(X86_CR4_LASS_BIT) #define X86_CR4_LAM_SUP_BIT 28 /* LAM for supervisor pointers */ #define X86_CR4_LAM_SUP _BITUL(X86_CR4_LAM_SUP_BIT) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index e377b06e70e3..74f4c659f9c9 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -2453,16 +2453,30 @@ void __init_or_module text_poke_early(void *addr, const void *opcode, __ro_after_init struct mm_struct *text_poke_mm; __ro_after_init unsigned long text_poke_mm_addr; +/* + * Text poking creates and uses a mapping in the lower half of the + * address space. Relax LASS enforcement when accessing the poking + * address. + * + * objtool enforces a strict policy of "no function calls within AC=1 + * regions". Adhere to the policy by using inline versions of + * memcpy()/memset() that will never result in a function call. + */ + static void text_poke_memcpy(void *dst, const void *src, size_t len) { - memcpy(dst, src, len); + lass_stac(); + __inline_memcpy(dst, src, len); + lass_clac(); } static void text_poke_memset(void *dst, const void *src, size_t len) { int c = *(const int *)src; - memset(dst, c, len); + lass_stac(); + __inline_memset(dst, c, len); + lass_clac(); } typedef void text_poke_f(void *dst, const void *src, size_t len); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9aae990ed7c7..e7ab22fce3b5 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -406,6 +406,28 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c) cr4_clear_bits(X86_CR4_UMIP); } +static __always_inline void setup_lass(struct cpuinfo_x86 *c) +{ + if (!cpu_feature_enabled(X86_FEATURE_LASS)) + return; + + /* + * Legacy vsyscall page access causes a #GP when LASS is active. + * Disable LASS because the #GP handler doesn't support vsyscall + * emulation. + * + * Also disable LASS when running under EFI, as some runtime and + * boot services rely on 1:1 mappings in the lower half. + */ + if (IS_ENABLED(CONFIG_X86_VSYSCALL_EMULATION) || + IS_ENABLED(CONFIG_EFI)) { + setup_clear_cpu_cap(X86_FEATURE_LASS); + return; + } + + cr4_set_bits(X86_CR4_LASS); +} + /* These bits should not change their value after CPU init is finished. */ static const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE | X86_CR4_CET | X86_CR4_FRED; @@ -1026,12 +1048,8 @@ void get_cpu_cap(struct cpuinfo_x86 *c) c->x86_capability[CPUID_8000_0001_EDX] = edx; } - if (c->extended_cpuid_level >= 0x80000007) { - cpuid(0x80000007, &eax, &ebx, &ecx, &edx); - - c->x86_capability[CPUID_8000_0007_EBX] = ebx; - c->x86_power = edx; - } + if (c->extended_cpuid_level >= 0x80000007) + c->x86_power = cpuid_edx(0x80000007); if (c->extended_cpuid_level >= 0x80000008) { cpuid(0x80000008, &eax, &ebx, &ecx, &edx); @@ -2016,10 +2034,10 @@ static void identify_cpu(struct cpuinfo_x86 *c) /* Disable the PN if appropriate */ squash_the_stupid_serial_number(c); - /* Set up SMEP/SMAP/UMIP */ setup_smep(c); setup_smap(c); setup_umip(c); + setup_lass(c); /* Enable FSGSBASE instructions if available. */ if (cpu_has(c, X86_FEATURE_FSGSBASE)) { diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index a40f5545e25b..146f6f8b0650 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -91,6 +91,7 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_SHSTK, X86_FEATURE_XSAVES }, { X86_FEATURE_FRED, X86_FEATURE_LKGS }, { X86_FEATURE_SPEC_CTRL_SSBD, X86_FEATURE_SPEC_CTRL }, + { X86_FEATURE_LASS, X86_FEATURE_SMAP }, {} }; diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 0524ac0260fc..cde4b6cd3471 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -45,6 +45,9 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_SGX2, CPUID_EAX, 1, 0x00000012, 0 }, { X86_FEATURE_SGX_EUPDATESVN, CPUID_EAX, 10, 0x00000012, 0 }, { X86_FEATURE_SGX_EDECCSSA, CPUID_EAX, 11, 0x00000012, 0 }, + { X86_FEATURE_OVERFLOW_RECOV, CPUID_EBX, 0, 0x80000007, 0 }, + { X86_FEATURE_SUCCOR, CPUID_EBX, 1, 0x80000007, 0 }, + { X86_FEATURE_SMCA, CPUID_EBX, 3, 0x80000007, 0 }, { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 }, { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 11e20bb13aca..4ffba68dc57b 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -95,9 +95,12 @@ SYM_CODE_START_NOALIGN(relocate_kernel) /* Leave CR4 in %r13 to enable the right paging mode later. */ movq %cr4, %r13 - /* Disable global pages immediately to ensure this mapping is RWX */ + /* + * Disable global pages immediately to ensure this mapping is RWX. + * Disable LASS before jumping to the identity mapped page. + */ movq %r13, %r12 - andq $~(X86_CR4_PGE), %r12 + andq $~(X86_CR4_PGE | X86_CR4_LASS), %r12 movq %r12, %cr4 /* Save %rsp and CRs. */ diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index cb324cc1fd99..bcf1dedc1d00 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -732,13 +732,23 @@ DEFINE_IDTENTRY(exc_bounds) enum kernel_gp_hint { GP_NO_HINT, GP_NON_CANONICAL, - GP_CANONICAL + GP_CANONICAL, + GP_LASS_VIOLATION, + GP_NULL_POINTER, +}; + +static const char * const kernel_gp_hint_help[] = { + [GP_NON_CANONICAL] = "probably for non-canonical address", + [GP_CANONICAL] = "maybe for address", + [GP_LASS_VIOLATION] = "probably LASS violation for address", + [GP_NULL_POINTER] = "kernel NULL pointer dereference", }; /* * When an uncaught #GP occurs, try to determine the memory address accessed by * the instruction and return that address to the caller. Also, try to figure - * out whether any part of the access to that address was non-canonical. + * out whether any part of the access to that address was non-canonical or + * across privilege levels. */ static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs, unsigned long *addr) @@ -760,14 +770,28 @@ static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs, return GP_NO_HINT; #ifdef CONFIG_X86_64 - /* - * Check that: - * - the operand is not in the kernel half - * - the last byte of the operand is not in the user canonical half - */ - if (*addr < ~__VIRTUAL_MASK && - *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK) + /* Operand is in the kernel half */ + if (*addr >= ~__VIRTUAL_MASK) + return GP_CANONICAL; + + /* The last byte of the operand is not in the user canonical half */ + if (*addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK) return GP_NON_CANONICAL; + + /* + * A NULL pointer dereference usually causes a #PF. However, it + * can result in a #GP when LASS is active. Provide the same + * hint in the rare case that the condition is hit without LASS. + */ + if (*addr < PAGE_SIZE) + return GP_NULL_POINTER; + + /* + * Assume that LASS caused the exception, because the address is + * canonical and in the user half. + */ + if (cpu_feature_enabled(X86_FEATURE_LASS)) + return GP_LASS_VIOLATION; #endif return GP_CANONICAL; @@ -930,9 +954,7 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection) if (hint != GP_NO_HINT) snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx", - (hint == GP_NON_CANONICAL) ? "probably for non-canonical address" - : "maybe for address", - gp_addr); + kernel_gp_hint_help[hint], gp_addr); /* * KASAN is interested only in the non-canonical case, clear it diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 743ab25ba787..81b4a7acf72e 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -78,7 +78,6 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_6_EAX] = { 6, 0, CPUID_EAX}, [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX}, [CPUID_7_ECX] = { 7, 0, CPUID_ECX}, - [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX}, [CPUID_7_EDX] = { 7, 0, CPUID_EDX}, [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX}, [CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX}, diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 245cf6b3ec57..ccc01ad6ff7c 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -407,9 +407,12 @@ #define X86_FEATURE_ENQCMD (16*32+29) /* "enqcmd" ENQCMD and ENQCMDS instructions */ #define X86_FEATURE_SGX_LC (16*32+30) /* "sgx_lc" Software Guard Extensions Launch Control */ -/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ +/* + * Linux-defined word for use with scattered/synthetic bits. + */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* "overflow_recov" MCA overflow recovery support */ #define X86_FEATURE_SUCCOR (17*32+ 1) /* "succor" Uncorrectable error containment and recovery */ + #define X86_FEATURE_SMCA (17*32+ 3) /* "smca" Scalable MCA */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c index 05e1e6774fba..918eaec8bfbe 100644 --- a/tools/testing/selftests/x86/test_vsyscall.c +++ b/tools/testing/selftests/x86/test_vsyscall.c @@ -308,12 +308,13 @@ static void test_getcpu(int cpu) #ifdef __x86_64__ static jmp_buf jmpbuf; -static volatile unsigned long segv_err; +static volatile unsigned long segv_err, segv_trapno; static void sigsegv(int sig, siginfo_t *info, void *ctx_void) { ucontext_t *ctx = (ucontext_t *)ctx_void; + segv_trapno = ctx->uc_mcontext.gregs[REG_TRAPNO]; segv_err = ctx->uc_mcontext.gregs[REG_ERR]; siglongjmp(jmpbuf, 1); } @@ -336,7 +337,8 @@ static void test_vsys_r(void) else if (can_read) ksft_test_result_pass("We have read access\n"); else - ksft_test_result_pass("We do not have read access: #PF(0x%lx)\n", segv_err); + ksft_test_result_pass("We do not have read access (trap=%ld, error=0x%lx)\n", + segv_trapno, segv_err); } static void test_vsys_x(void) @@ -347,7 +349,7 @@ static void test_vsys_x(void) return; } - ksft_print_msg("Make sure that vsyscalls really page fault\n"); + ksft_print_msg("Make sure that vsyscalls really cause a fault\n"); bool can_exec; if (sigsetjmp(jmpbuf, 1) == 0) { @@ -358,13 +360,14 @@ static void test_vsys_x(void) } if (can_exec) - ksft_test_result_fail("Executing the vsyscall did not page fault\n"); - else if (segv_err & (1 << 4)) /* INSTR */ - ksft_test_result_pass("Executing the vsyscall page failed: #PF(0x%lx)\n", - segv_err); + ksft_test_result_fail("Executing the vsyscall did not fault\n"); + /* #GP or #PF (with X86_PF_INSTR) */ + else if ((segv_trapno == 13) || ((segv_trapno == 14) && (segv_err & (1 << 4)))) + ksft_test_result_pass("Executing the vsyscall page failed (trap=%ld, error=0x%lx)\n", + segv_trapno, segv_err); else - ksft_test_result_fail("Execution failed with the wrong error: #PF(0x%lx)\n", - segv_err); + ksft_test_result_fail("Execution failed with the wrong error (trap=%ld, error=0x%lx)\n", + segv_trapno, segv_err); } /*