more s390 updates for 6.14 merge window

- The rework that uncoupled physical and virtual address spaces
   inadvertently prevented KASAN shadow mappings from using large
   pages. Restore large page mappings for KASAN shadows
 
 - Add decompressor routine physmem_alloc() that may fail,
   unlike physmem_alloc_or_die(). This allows callers to
   implement fallback paths
 
 - Allow falling back from large pages to smaller pages (1MB or
   4KB) if the allocation of 2GB pages  in the decompressor can
   not be fulfilled
 
 - Add to the decompressor boot print support of "%%" format
   string, width and padding hadnling, length modifiers and
   decimal conversion specifiers
 
 - Add to the decompressor message severity levels similar to
   kernel ones. Support command-line options that control
   console output verbosity
 
 - Replaces boot_printk() calls with appropriate loglevel-
   specific helpers such as boot_emerg(), boot_warn(), and
   boot_debug().
 
 - Collect all boot messages into a ring buffer independent
   of the current log level. This is particularly useful for
   early crash analysis
 
 - If 'earlyprintk' command line parameter is not specified, store
   decompressor boot messages in a ring buffer to be printed later
   by the kernel, once the console driver is registered
 
 - Add 'bootdebug' command line parameter to enable printing of
   decompressor debug messages when needed. That parameters allows
   message supressing and filtering
 
 - Dump boot messages on a decompressor crash, but only if
   'bootdebug' command line parameter is enabled
 
 - When CONFIG_PRINTK_TIME is enabled, add timestamps to boot
   messages in the same format as regular printk()
 
 - Dump physical memory tracking information on boot:
   online ranges, reserved areas and vmem allocations
 
 - Dump virtual memory layout and randomization details
 
 - Improve decompression error reporting and dump the message
   ring buffer in case the boot failed and system halted
 
 - Add an exception handler which handles exceptions when FPU
   control register is attempted to be set to an invalid value.
   Remove '.fixup' section as result of this change
 
 - Use 'A', 'O', and 'R' inline assembly format flags, which
   allows recent Clang compilers to generate better FPU code
 
 - Rework uaccess code so it reads better and generates more
   efficient code
 
 - Cleanup futex inline assembly code
 
 - Disable KMSAN instrumention for futex inline assemblies, which
   contain dereferenced user pointers. Otherwise, shadows for the
   user pointers would be accessed
 
 - PFs which are not initially configured but in standby create
   only a single-function PCI domain. If they are configured later
   on, sibling PFs and their child VFs will not be added to their
   PCI domain breaking SR-IOV expectations. Fix that by allowing
   initially configured but in standby PFs create multi-function
   PCI domains
 
 - Add '-std=gnu11' to decompressor and purgatory CFLAGS to avoid
   compile errors caused by kernel's own definitions of 'bool',
   'false', and 'true' conflicting with the C23 reserved keywords
 
 - Fix sclp subsystem failure when a sclp console is not present
 
 - Fix misuse of non-NULL terminated strings in vmlogrdr driver
 
 - Various other small improvements, cleanups and fixes
 -----BEGIN PGP SIGNATURE-----
 
 iI0EABYKADUWIQQrtrZiYVkVzKQcYivNdxKlNrRb8AUCZ5t0jhccYWdvcmRlZXZA
 bGludXguaWJtLmNvbQAKCRDNdxKlNrRb8A4wAP91BcHtV4OTmoMsG4JTwiy9wTzI
 zro9IrBSCLPLED7kfQEAzjEspd5gWMFDnIM/KxXmCNHA/nBTXVh/1F+b1F0z+Q8=
 =JfEG
 -----END PGP SIGNATURE-----

Merge tag 's390-6.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Alexander Gordeev:

 - The rework that uncoupled physical and virtual address spaces
   inadvertently prevented KASAN shadow mappings from using large pages.
   Restore large page mappings for KASAN shadows

 - Add decompressor routine physmem_alloc() that may fail, unlike
   physmem_alloc_or_die(). This allows callers to implement fallback
   paths

 - Allow falling back from large pages to smaller pages (1MB or 4KB) if
   the allocation of 2GB pages in the decompressor can not be fulfilled

 - Add to the decompressor boot print support of "%%" format string,
   width and padding hadnling, length modifiers and decimal conversion
   specifiers

 - Add to the decompressor message severity levels similar to kernel
   ones. Support command-line options that control console output
   verbosity

 - Replaces boot_printk() calls with appropriate loglevel- specific
   helpers such as boot_emerg(), boot_warn(), and boot_debug().

 - Collect all boot messages into a ring buffer independent of the
   current log level. This is particularly useful for early crash
   analysis

 - If 'earlyprintk' command line parameter is not specified, store
   decompressor boot messages in a ring buffer to be printed later by
   the kernel, once the console driver is registered

 - Add 'bootdebug' command line parameter to enable printing of
   decompressor debug messages when needed. That parameters allows
   message suppressing and filtering

 - Dump boot messages on a decompressor crash, but only if 'bootdebug'
   command line parameter is enabled

 - When CONFIG_PRINTK_TIME is enabled, add timestamps to boot messages
   in the same format as regular printk()

 - Dump physical memory tracking information on boot: online ranges,
   reserved areas and vmem allocations

 - Dump virtual memory layout and randomization details

 - Improve decompression error reporting and dump the message ring
   buffer in case the boot failed and system halted

 - Add an exception handler which handles exceptions when FPU control
   register is attempted to be set to an invalid value. Remove '.fixup'
   section as result of this change

 - Use 'A', 'O', and 'R' inline assembly format flags, which allows
   recent Clang compilers to generate better FPU code

 - Rework uaccess code so it reads better and generates more efficient
   code

 - Cleanup futex inline assembly code

 - Disable KMSAN instrumention for futex inline assemblies, which
   contain dereferenced user pointers. Otherwise, shadows for the user
   pointers would be accessed

 - PFs which are not initially configured but in standby create only a
   single-function PCI domain. If they are configured later on, sibling
   PFs and their child VFs will not be added to their PCI domain
   breaking SR-IOV expectations.

   Fix that by allowing initially configured but in standby PFs create
   multi-function PCI domains

 - Add '-std=gnu11' to decompressor and purgatory CFLAGS to avoid
   compile errors caused by kernel's own definitions of 'bool', 'false',
   and 'true' conflicting with the C23 reserved keywords

 - Fix sclp subsystem failure when a sclp console is not present

 - Fix misuse of non-NULL terminated strings in vmlogrdr driver

 - Various other small improvements, cleanups and fixes

* tag 's390-6.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (53 commits)
  s390/vmlogrdr: Use array instead of string initializer
  s390/vmlogrdr: Use internal_name for error messages
  s390/sclp: Initialize sclp subsystem via arch_cpu_finalize_init()
  s390/tools: Use array instead of string initializer
  s390/vmem: Fix null-pointer-arithmetic warning in vmem_map_init()
  s390: Add '-std=gnu11' to decompressor and purgatory CFLAGS
  s390/bitops: Use correct constraint for arch_test_bit() inline assembly
  s390/pci: Fix SR-IOV for PFs initially in standby
  s390/futex: Avoid KMSAN instrumention for user pointers
  s390/uaccess: Rename get_put_user_noinstr_attributes to uaccess_kmsan_or_inline
  s390/futex: Cleanup futex_atomic_cmpxchg_inatomic()
  s390/futex: Generate futex atomic op functions
  s390/uaccess: Remove INLINE_COPY_FROM_USER and INLINE_COPY_TO_USER
  s390/uaccess: Use asm goto for put_user()/get_user()
  s390/uaccess: Remove usage of the oac specifier
  s390/uaccess: Replace EX_TABLE_UA_LOAD_MEM exception handling
  s390/uaccess: Cleanup noinstr __put_user()/__get_user() inline assembly constraints
  s390/uaccess: Remove __put_user_fn()/__get_user_fn() wrappers
  s390/uaccess: Move put_user() / __put_user() close to put_user() asm code
  s390/uaccess: Use asm goto for __mvc_kernel_nofault()
  ...
This commit is contained in:
Linus Torvalds 2025-01-30 10:48:17 -08:00
commit b731bc5f49
34 changed files with 1124 additions and 683 deletions

View File

@ -52,13 +52,19 @@ config KASAN_SHADOW_OFFSET
depends on KASAN depends on KASAN
default 0x1C000000000000 default 0x1C000000000000
config GCC_ASM_FLAG_OUTPUT_BROKEN config CC_ASM_FLAG_OUTPUT_BROKEN
def_bool CC_IS_GCC && GCC_VERSION < 140200 def_bool CC_IS_GCC && GCC_VERSION < 140200
help help
GCC versions before 14.2.0 may die with an internal GCC versions before 14.2.0 may die with an internal
compiler error in some configurations if flag output compiler error in some configurations if flag output
operands are used within inline assemblies. operands are used within inline assemblies.
config CC_HAS_ASM_AOR_FORMAT_FLAGS
def_bool !(CC_IS_CLANG && CLANG_VERSION < 190100)
help
Clang versions before 19.1.0 do not support A,
O, and R inline assembly format flags.
config S390 config S390
def_bool y def_bool y
# #
@ -72,6 +78,7 @@ config S390
select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_CRC32 select ARCH_HAS_CRC32
select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VIRTUAL

View File

@ -22,7 +22,7 @@ KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
ifndef CONFIG_AS_IS_LLVM ifndef CONFIG_AS_IS_LLVM
KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf)) KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
endif endif
KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11
KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain

View File

@ -46,7 +46,7 @@ void print_missing_facilities(void)
* z/VM adds a four character prefix. * z/VM adds a four character prefix.
*/ */
if (strlen(als_str) > 70) { if (strlen(als_str) > 70) {
boot_printk("%s\n", als_str); boot_emerg("%s\n", als_str);
*als_str = '\0'; *als_str = '\0';
} }
u16_to_decimal(val_str, i * BITS_PER_LONG + j); u16_to_decimal(val_str, i * BITS_PER_LONG + j);
@ -54,7 +54,7 @@ void print_missing_facilities(void)
first = 0; first = 0;
} }
} }
boot_printk("%s\n", als_str); boot_emerg("%s\n", als_str);
} }
static void facility_mismatch(void) static void facility_mismatch(void)
@ -62,10 +62,10 @@ static void facility_mismatch(void)
struct cpuid id; struct cpuid id;
get_cpu_id(&id); get_cpu_id(&id);
boot_printk("The Linux kernel requires more recent processor hardware\n"); boot_emerg("The Linux kernel requires more recent processor hardware\n");
boot_printk("Detected machine-type number: %4x\n", id.machine); boot_emerg("Detected machine-type number: %4x\n", id.machine);
print_missing_facilities(); print_missing_facilities();
boot_printk("See Principles of Operations for facility bits\n"); boot_emerg("See Principles of Operations for facility bits\n");
disabled_wait(); disabled_wait();
} }

View File

@ -8,6 +8,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/printk.h>
#include <asm/physmem_info.h> #include <asm/physmem_info.h>
struct machine_info { struct machine_info {
@ -47,13 +48,16 @@ void physmem_set_usable_limit(unsigned long limit);
void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size); void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size);
void physmem_free(enum reserved_range_type type); void physmem_free(enum reserved_range_type type);
/* for continuous/multiple allocations per type */ /* for continuous/multiple allocations per type */
unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size, unsigned long physmem_alloc_or_die(enum reserved_range_type type, unsigned long size,
unsigned long align); unsigned long align);
unsigned long physmem_alloc(enum reserved_range_type type, unsigned long size,
unsigned long align, bool die_on_oom);
/* for single allocations, 1 per type */ /* for single allocations, 1 per type */
unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size, unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long size,
unsigned long align, unsigned long min, unsigned long max, unsigned long align, unsigned long min, unsigned long max,
bool die_on_oom); bool die_on_oom);
unsigned long get_physmem_alloc_pos(void); unsigned long get_physmem_alloc_pos(void);
void dump_physmem_reserved(void);
bool ipl_report_certs_intersects(unsigned long addr, unsigned long size, bool ipl_report_certs_intersects(unsigned long addr, unsigned long size,
unsigned long *intersection_start); unsigned long *intersection_start);
bool is_ipl_block_dump(void); bool is_ipl_block_dump(void);
@ -69,12 +73,28 @@ void print_pgm_check_info(void);
unsigned long randomize_within_range(unsigned long size, unsigned long align, unsigned long randomize_within_range(unsigned long size, unsigned long align,
unsigned long min, unsigned long max); unsigned long min, unsigned long max);
void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit); void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit);
void __printf(1, 2) boot_printk(const char *fmt, ...); int __printf(1, 2) boot_printk(const char *fmt, ...);
void print_stacktrace(unsigned long sp); void print_stacktrace(unsigned long sp);
void error(char *m); void error(char *m);
int get_random(unsigned long limit, unsigned long *value); int get_random(unsigned long limit, unsigned long *value);
void boot_rb_dump(void);
#ifndef boot_fmt
#define boot_fmt(fmt) fmt
#endif
#define boot_emerg(fmt, ...) boot_printk(KERN_EMERG boot_fmt(fmt), ##__VA_ARGS__)
#define boot_alert(fmt, ...) boot_printk(KERN_ALERT boot_fmt(fmt), ##__VA_ARGS__)
#define boot_crit(fmt, ...) boot_printk(KERN_CRIT boot_fmt(fmt), ##__VA_ARGS__)
#define boot_err(fmt, ...) boot_printk(KERN_ERR boot_fmt(fmt), ##__VA_ARGS__)
#define boot_warn(fmt, ...) boot_printk(KERN_WARNING boot_fmt(fmt), ##__VA_ARGS__)
#define boot_notice(fmt, ...) boot_printk(KERN_NOTICE boot_fmt(fmt), ##__VA_ARGS__)
#define boot_info(fmt, ...) boot_printk(KERN_INFO boot_fmt(fmt), ##__VA_ARGS__)
#define boot_debug(fmt, ...) boot_printk(KERN_DEBUG boot_fmt(fmt), ##__VA_ARGS__)
extern struct machine_info machine; extern struct machine_info machine;
extern int boot_console_loglevel;
extern bool boot_ignore_loglevel;
/* Symbols defined by linker scripts */ /* Symbols defined by linker scripts */
extern const char kernel_version[]; extern const char kernel_version[];

View File

@ -9,6 +9,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/boot_data.h>
#include <asm/page.h> #include <asm/page.h>
#include "decompressor.h" #include "decompressor.h"
#include "boot.h" #include "boot.h"
@ -63,6 +64,15 @@ static unsigned long free_mem_end_ptr = (unsigned long) _end + BOOT_HEAP_SIZE;
#include "../../../../lib/decompress_unzstd.c" #include "../../../../lib/decompress_unzstd.c"
#endif #endif
static void decompress_error(char *m)
{
if (bootdebug)
boot_rb_dump();
boot_emerg("Decompression error: %s\n", m);
boot_emerg(" -- System halted\n");
disabled_wait();
}
unsigned long mem_safe_offset(void) unsigned long mem_safe_offset(void)
{ {
return ALIGN(free_mem_end_ptr, PAGE_SIZE); return ALIGN(free_mem_end_ptr, PAGE_SIZE);
@ -71,5 +81,5 @@ unsigned long mem_safe_offset(void)
void deploy_kernel(void *output) void deploy_kernel(void *output)
{ {
__decompress(_compressed_start, _compressed_end - _compressed_start, __decompress(_compressed_start, _compressed_end - _compressed_start,
NULL, NULL, output, vmlinux.image_size, NULL, error); NULL, NULL, output, vmlinux.image_size, NULL, decompress_error);
} }

View File

@ -215,7 +215,7 @@ static void check_cleared_facilities(void)
for (i = 0; i < ARRAY_SIZE(als); i++) { for (i = 0; i < ARRAY_SIZE(als); i++) {
if ((stfle_fac_list[i] & als[i]) != als[i]) { if ((stfle_fac_list[i] & als[i]) != als[i]) {
boot_printk("Warning: The Linux kernel requires facilities cleared via command line option\n"); boot_emerg("The Linux kernel requires facilities cleared via command line option\n");
print_missing_facilities(); print_missing_facilities();
break; break;
} }
@ -313,5 +313,23 @@ void parse_boot_command_line(void)
#endif #endif
if (!strcmp(param, "relocate_lowcore") && test_facility(193)) if (!strcmp(param, "relocate_lowcore") && test_facility(193))
relocate_lowcore = 1; relocate_lowcore = 1;
if (!strcmp(param, "earlyprintk"))
boot_earlyprintk = true;
if (!strcmp(param, "debug"))
boot_console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
if (!strcmp(param, "bootdebug")) {
bootdebug = true;
if (val)
strncpy(bootdebug_filter, val, sizeof(bootdebug_filter) - 1);
}
if (!strcmp(param, "quiet"))
boot_console_loglevel = CONSOLE_LOGLEVEL_QUIET;
if (!strcmp(param, "ignore_loglevel"))
boot_ignore_loglevel = true;
if (!strcmp(param, "loglevel")) {
boot_console_loglevel = simple_strtoull(val, NULL, 10);
if (boot_console_loglevel < CONSOLE_LOGLEVEL_MIN)
boot_console_loglevel = CONSOLE_LOGLEVEL_MIN;
}
} }
} }

View File

@ -30,7 +30,6 @@ static unsigned long get_cert_comp_list_size(void)
{ {
struct ipl_rb_certificate_entry *cert; struct ipl_rb_certificate_entry *cert;
struct ipl_rb_component_entry *comp; struct ipl_rb_component_entry *comp;
size_t size;
/* /*
* Find the length for the IPL report boot data * Find the length for the IPL report boot data
@ -155,7 +154,7 @@ void save_ipl_cert_comp_list(void)
return; return;
size = get_cert_comp_list_size(); size = get_cert_comp_list_size();
early_ipl_comp_list_addr = physmem_alloc_top_down(RR_CERT_COMP_LIST, size, sizeof(int)); early_ipl_comp_list_addr = physmem_alloc_or_die(RR_CERT_COMP_LIST, size, sizeof(int));
ipl_cert_list_addr = early_ipl_comp_list_addr + early_ipl_comp_list_size; ipl_cert_list_addr = early_ipl_comp_list_addr + early_ipl_comp_list_size;
copy_components_bootdata(); copy_components_bootdata();

View File

@ -32,7 +32,7 @@ struct prng_parm {
static int check_prng(void) static int check_prng(void)
{ {
if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) { if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
boot_printk("KASLR disabled: CPU has no PRNG\n"); boot_warn("KASLR disabled: CPU has no PRNG\n");
return 0; return 0;
} }
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
@ -168,7 +168,7 @@ static unsigned long iterate_valid_positions(unsigned long size, unsigned long a
* cannot have chains. * cannot have chains.
* *
* On the other hand, "dynamic" or "repetitive" allocations are done via * On the other hand, "dynamic" or "repetitive" allocations are done via
* physmem_alloc_top_down(). These allocations are tightly packed together * physmem_alloc_or_die(). These allocations are tightly packed together
* top down from the end of online memory. physmem_alloc_pos represents * top down from the end of online memory. physmem_alloc_pos represents
* current position where those allocations start. * current position where those allocations start.
* *

View File

@ -17,13 +17,14 @@ void print_stacktrace(unsigned long sp)
(unsigned long)_stack_end }; (unsigned long)_stack_end };
bool first = true; bool first = true;
boot_printk("Call Trace:\n"); boot_emerg("Call Trace:\n");
while (!(sp & 0x7) && on_stack(&boot_stack, sp, sizeof(struct stack_frame))) { while (!(sp & 0x7) && on_stack(&boot_stack, sp, sizeof(struct stack_frame))) {
struct stack_frame *sf = (struct stack_frame *)sp; struct stack_frame *sf = (struct stack_frame *)sp;
boot_printk(first ? "(sp:%016lx [<%016lx>] %pS)\n" : if (first)
" sp:%016lx [<%016lx>] %pS\n", boot_emerg("(sp:%016lx [<%016lx>] %pS)\n", sp, sf->gprs[8], (void *)sf->gprs[8]);
sp, sf->gprs[8], (void *)sf->gprs[8]); else
boot_emerg(" sp:%016lx [<%016lx>] %pS\n", sp, sf->gprs[8], (void *)sf->gprs[8]);
if (sf->back_chain <= sp) if (sf->back_chain <= sp)
break; break;
sp = sf->back_chain; sp = sf->back_chain;
@ -36,30 +37,30 @@ void print_pgm_check_info(void)
unsigned long *gpregs = (unsigned long *)get_lowcore()->gpregs_save_area; unsigned long *gpregs = (unsigned long *)get_lowcore()->gpregs_save_area;
struct psw_bits *psw = &psw_bits(get_lowcore()->psw_save_area); struct psw_bits *psw = &psw_bits(get_lowcore()->psw_save_area);
boot_printk("Linux version %s\n", kernel_version); if (bootdebug)
boot_rb_dump();
boot_emerg("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0]) if (!is_prot_virt_guest() && early_command_line[0])
boot_printk("Kernel command line: %s\n", early_command_line); boot_emerg("Kernel command line: %s\n", early_command_line);
boot_printk("Kernel fault: interruption code %04x ilc:%x\n", boot_emerg("Kernel fault: interruption code %04x ilc:%d\n",
get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1); get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1);
if (kaslr_enabled()) { if (kaslr_enabled()) {
boot_printk("Kernel random base: %lx\n", __kaslr_offset); boot_emerg("Kernel random base: %lx\n", __kaslr_offset);
boot_printk("Kernel random base phys: %lx\n", __kaslr_offset_phys); boot_emerg("Kernel random base phys: %lx\n", __kaslr_offset_phys);
} }
boot_printk("PSW : %016lx %016lx (%pS)\n", boot_emerg("PSW : %016lx %016lx (%pS)\n",
get_lowcore()->psw_save_area.mask, get_lowcore()->psw_save_area.mask,
get_lowcore()->psw_save_area.addr, get_lowcore()->psw_save_area.addr,
(void *)get_lowcore()->psw_save_area.addr); (void *)get_lowcore()->psw_save_area.addr);
boot_printk( boot_emerg(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n",
" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n",
psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck, psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck,
psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri, psw->eaba);
psw->eaba); boot_emerg("GPRS: %016lx %016lx %016lx %016lx\n", gpregs[0], gpregs[1], gpregs[2], gpregs[3]);
boot_printk("GPRS: %016lx %016lx %016lx %016lx\n", gpregs[0], gpregs[1], gpregs[2], gpregs[3]); boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]);
boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]); boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]); boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
boot_printk(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
print_stacktrace(get_lowcore()->gpregs_save_area[15]); print_stacktrace(get_lowcore()->gpregs_save_area[15]);
boot_printk("Last Breaking-Event-Address:\n"); boot_emerg("Last Breaking-Event-Address:\n");
boot_printk(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break, boot_emerg(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break,
(void *)get_lowcore()->pgm_last_break); (void *)get_lowcore()->pgm_last_break);
} }

View File

@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#define boot_fmt(fmt) "physmem: " fmt
#include <linux/processor.h> #include <linux/processor.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/init.h> #include <linux/init.h>
@ -28,7 +29,7 @@ static struct physmem_range *__get_physmem_range_ptr(u32 n)
return &physmem_info.online[n]; return &physmem_info.online[n];
if (unlikely(!physmem_info.online_extended)) { if (unlikely(!physmem_info.online_extended)) {
physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range( physmem_info.online_extended = (struct physmem_range *)physmem_alloc_range(
RR_MEM_DETECT_EXTENDED, ENTRIES_EXTENDED_MAX, sizeof(long), 0, RR_MEM_DETECT_EXT, ENTRIES_EXTENDED_MAX, sizeof(long), 0,
physmem_alloc_pos, true); physmem_alloc_pos, true);
} }
return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES]; return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
@ -207,11 +208,16 @@ unsigned long detect_max_physmem_end(void)
max_physmem_end = search_mem_end(); max_physmem_end = search_mem_end();
physmem_info.info_source = MEM_DETECT_BIN_SEARCH; physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
} }
boot_debug("Max physical memory: 0x%016lx (info source: %s)\n", max_physmem_end,
get_physmem_info_source());
return max_physmem_end; return max_physmem_end;
} }
void detect_physmem_online_ranges(unsigned long max_physmem_end) void detect_physmem_online_ranges(unsigned long max_physmem_end)
{ {
unsigned long start, end;
int i;
if (!sclp_early_read_storage_info()) { if (!sclp_early_read_storage_info()) {
physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO; physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
} else if (physmem_info.info_source == MEM_DETECT_DIAG500_STOR_LIMIT) { } else if (physmem_info.info_source == MEM_DETECT_DIAG500_STOR_LIMIT) {
@ -226,12 +232,16 @@ void detect_physmem_online_ranges(unsigned long max_physmem_end)
} else if (max_physmem_end) { } else if (max_physmem_end) {
add_physmem_online_range(0, max_physmem_end); add_physmem_online_range(0, max_physmem_end);
} }
boot_debug("Online memory ranges (info source: %s):\n", get_physmem_info_source());
for_each_physmem_online_range(i, &start, &end)
boot_debug(" online [%d]: 0x%016lx-0x%016lx\n", i, start, end);
} }
void physmem_set_usable_limit(unsigned long limit) void physmem_set_usable_limit(unsigned long limit)
{ {
physmem_info.usable = limit; physmem_info.usable = limit;
physmem_alloc_pos = limit; physmem_alloc_pos = limit;
boot_debug("Usable memory limit: 0x%016lx\n", limit);
} }
static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max) static void die_oom(unsigned long size, unsigned long align, unsigned long min, unsigned long max)
@ -241,38 +251,47 @@ static void die_oom(unsigned long size, unsigned long align, unsigned long min,
enum reserved_range_type t; enum reserved_range_type t;
int i; int i;
boot_printk("Linux version %s\n", kernel_version); boot_emerg("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0]) if (!is_prot_virt_guest() && early_command_line[0])
boot_printk("Kernel command line: %s\n", early_command_line); boot_emerg("Kernel command line: %s\n", early_command_line);
boot_printk("Out of memory allocating %lx bytes %lx aligned in range %lx:%lx\n", boot_emerg("Out of memory allocating %lu bytes 0x%lx aligned in range %lx:%lx\n",
size, align, min, max); size, align, min, max);
boot_printk("Reserved memory ranges:\n"); boot_emerg("Reserved memory ranges:\n");
for_each_physmem_reserved_range(t, range, &start, &end) { for_each_physmem_reserved_range(t, range, &start, &end) {
boot_printk("%016lx %016lx %s\n", start, end, get_rr_type_name(t)); boot_emerg("%016lx %016lx %s\n", start, end, get_rr_type_name(t));
total_reserved_mem += end - start; total_reserved_mem += end - start;
} }
boot_printk("Usable online memory ranges (info source: %s [%x]):\n", boot_emerg("Usable online memory ranges (info source: %s [%d]):\n",
get_physmem_info_source(), physmem_info.info_source); get_physmem_info_source(), physmem_info.info_source);
for_each_physmem_usable_range(i, &start, &end) { for_each_physmem_usable_range(i, &start, &end) {
boot_printk("%016lx %016lx\n", start, end); boot_emerg("%016lx %016lx\n", start, end);
total_mem += end - start; total_mem += end - start;
} }
boot_printk("Usable online memory total: %lx Reserved: %lx Free: %lx\n", boot_emerg("Usable online memory total: %lu Reserved: %lu Free: %lu\n",
total_mem, total_reserved_mem, total_mem, total_reserved_mem,
total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0); total_mem > total_reserved_mem ? total_mem - total_reserved_mem : 0);
print_stacktrace(current_frame_address()); print_stacktrace(current_frame_address());
boot_printk("\n\n -- System halted\n"); boot_emerg(" -- System halted\n");
disabled_wait(); disabled_wait();
} }
void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size) static void _physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
{ {
physmem_info.reserved[type].start = addr; physmem_info.reserved[type].start = addr;
physmem_info.reserved[type].end = addr + size; physmem_info.reserved[type].end = addr + size;
} }
void physmem_reserve(enum reserved_range_type type, unsigned long addr, unsigned long size)
{
_physmem_reserve(type, addr, size);
boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Reserve:", addr, addr + size,
get_rr_type_name(type));
}
void physmem_free(enum reserved_range_type type) void physmem_free(enum reserved_range_type type)
{ {
boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Free:", physmem_info.reserved[type].start,
physmem_info.reserved[type].end, get_rr_type_name(type));
physmem_info.reserved[type].start = 0; physmem_info.reserved[type].start = 0;
physmem_info.reserved[type].end = 0; physmem_info.reserved[type].end = 0;
} }
@ -339,41 +358,73 @@ unsigned long physmem_alloc_range(enum reserved_range_type type, unsigned long s
max = min(max, physmem_alloc_pos); max = min(max, physmem_alloc_pos);
addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom); addr = __physmem_alloc_range(size, align, min, max, 0, NULL, die_on_oom);
if (addr) if (addr)
physmem_reserve(type, addr, size); _physmem_reserve(type, addr, size);
boot_debug("%-14s 0x%016lx-0x%016lx %s\n", "Alloc range:", addr, addr + size,
get_rr_type_name(type));
return addr; return addr;
} }
unsigned long physmem_alloc_top_down(enum reserved_range_type type, unsigned long size, unsigned long physmem_alloc(enum reserved_range_type type, unsigned long size,
unsigned long align) unsigned long align, bool die_on_oom)
{ {
struct reserved_range *range = &physmem_info.reserved[type]; struct reserved_range *range = &physmem_info.reserved[type];
struct reserved_range *new_range; struct reserved_range *new_range = NULL;
unsigned int ranges_left; unsigned int ranges_left;
unsigned long addr; unsigned long addr;
addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges, addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos, physmem_alloc_ranges,
&ranges_left, true); &ranges_left, die_on_oom);
if (!addr)
return 0;
/* if not a consecutive allocation of the same type or first allocation */ /* if not a consecutive allocation of the same type or first allocation */
if (range->start != addr + size) { if (range->start != addr + size) {
if (range->end) { if (range->end) {
physmem_alloc_pos = __physmem_alloc_range( addr = __physmem_alloc_range(sizeof(struct reserved_range), 0, 0,
sizeof(struct reserved_range), 0, 0, physmem_alloc_pos, physmem_alloc_pos, physmem_alloc_ranges,
physmem_alloc_ranges, &ranges_left, true); &ranges_left, true);
new_range = (struct reserved_range *)physmem_alloc_pos; new_range = (struct reserved_range *)addr;
addr = __physmem_alloc_range(size, align, 0, addr, ranges_left,
&ranges_left, die_on_oom);
if (!addr)
return 0;
*new_range = *range; *new_range = *range;
range->chain = new_range; range->chain = new_range;
addr = __physmem_alloc_range(size, align, 0, physmem_alloc_pos,
ranges_left, &ranges_left, true);
} }
range->end = addr + size; range->end = addr + size;
} }
if (type != RR_VMEM) {
boot_debug("%-14s 0x%016lx-0x%016lx %-20s align 0x%lx split %d\n", "Alloc topdown:",
addr, addr + size, get_rr_type_name(type), align, !!new_range);
}
range->start = addr; range->start = addr;
physmem_alloc_pos = addr; physmem_alloc_pos = addr;
physmem_alloc_ranges = ranges_left; physmem_alloc_ranges = ranges_left;
return addr; return addr;
} }
unsigned long physmem_alloc_or_die(enum reserved_range_type type, unsigned long size,
unsigned long align)
{
return physmem_alloc(type, size, align, true);
}
unsigned long get_physmem_alloc_pos(void) unsigned long get_physmem_alloc_pos(void)
{ {
return physmem_alloc_pos; return physmem_alloc_pos;
} }
void dump_physmem_reserved(void)
{
struct reserved_range *range;
enum reserved_range_type t;
unsigned long start, end;
boot_debug("Reserved memory ranges:\n");
for_each_physmem_reserved_range(t, range, &start, &end) {
if (end) {
boot_debug("%-14s 0x%016lx-0x%016lx @%012lx chain %012lx\n",
get_rr_type_name(t), start, end, (unsigned long)range,
(unsigned long)range->chain);
}
}
}

View File

@ -5,21 +5,111 @@
#include <linux/ctype.h> #include <linux/ctype.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/boot_data.h> #include <asm/boot_data.h>
#include <asm/sections.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/uv.h> #include <asm/uv.h>
#include "boot.h" #include "boot.h"
int boot_console_loglevel = CONFIG_CONSOLE_LOGLEVEL_DEFAULT;
bool boot_ignore_loglevel;
char __bootdata(boot_rb)[PAGE_SIZE * 2];
bool __bootdata(boot_earlyprintk);
size_t __bootdata(boot_rb_off);
char __bootdata(bootdebug_filter)[128];
bool __bootdata(bootdebug);
static void boot_rb_add(const char *str, size_t len)
{
/* leave double '\0' in the end */
size_t avail = sizeof(boot_rb) - boot_rb_off - 1;
/* store strings separated by '\0' */
if (len + 1 > avail)
boot_rb_off = 0;
strcpy(boot_rb + boot_rb_off, str);
boot_rb_off += len + 1;
}
static void print_rb_entry(const char *str)
{
sclp_early_printk(printk_skip_level(str));
}
static bool debug_messages_printed(void)
{
return boot_earlyprintk && (boot_ignore_loglevel || boot_console_loglevel > LOGLEVEL_DEBUG);
}
void boot_rb_dump(void)
{
if (debug_messages_printed())
return;
sclp_early_printk("Boot messages ring buffer:\n");
boot_rb_foreach(print_rb_entry);
}
const char hex_asc[] = "0123456789abcdef"; const char hex_asc[] = "0123456789abcdef";
static char *as_hex(char *dst, unsigned long val, int pad) static char *as_hex(char *dst, unsigned long val, int pad)
{ {
char *p, *end = p = dst + max(pad, (int)__fls(val | 1) / 4 + 1); char *p = dst + max(pad, (int)__fls(val | 1) / 4 + 1);
for (*p-- = 0; p >= dst; val >>= 4) for (*p-- = '\0'; p >= dst; val >>= 4)
*p-- = hex_asc[val & 0x0f]; *p-- = hex_asc[val & 0x0f];
return end; return dst;
}
#define MAX_NUMLEN 21
static char *as_dec(char *buf, unsigned long val, bool is_signed)
{
bool negative = false;
char *p = buf + MAX_NUMLEN;
if (is_signed && (long)val < 0) {
val = (val == LONG_MIN ? LONG_MIN : -(long)val);
negative = true;
}
*--p = '\0';
do {
*--p = '0' + (val % 10);
val /= 10;
} while (val);
if (negative)
*--p = '-';
return p;
}
static ssize_t strpad(char *dst, size_t dst_size, const char *src,
int _pad, bool zero_pad, bool decimal)
{
ssize_t len = strlen(src), pad = _pad;
char *p = dst;
if (max(len, abs(pad)) >= dst_size)
return -E2BIG;
if (pad > len) {
if (decimal && zero_pad && *src == '-') {
*p++ = '-';
src++;
len--;
pad--;
}
memset(p, zero_pad ? '0' : ' ', pad - len);
p += pad - len;
}
memcpy(p, src, len);
p += len;
if (pad < 0 && -pad > len) {
memset(p, ' ', -pad - len);
p += -pad - len;
}
*p = '\0';
return p - dst;
} }
static char *symstart(char *p) static char *symstart(char *p)
@ -58,35 +148,94 @@ static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned sh
return NULL; return NULL;
} }
static noinline char *strsym(void *ip) #define MAX_SYMLEN 64
static noinline char *strsym(char *buf, void *ip)
{ {
static char buf[64];
unsigned short off; unsigned short off;
unsigned short len; unsigned short len;
char *p; char *p;
p = findsym((unsigned long)ip, &off, &len); p = findsym((unsigned long)ip, &off, &len);
if (p) { if (p) {
strncpy(buf, p, sizeof(buf)); strncpy(buf, p, MAX_SYMLEN);
/* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */ /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */
p = buf + strnlen(buf, sizeof(buf) - 15); p = buf + strnlen(buf, MAX_SYMLEN - 15);
strcpy(p, "+0x"); strcpy(p, "+0x");
p = as_hex(p + 3, off, 0); as_hex(p + 3, off, 0);
strcpy(p, "/0x"); strcat(p, "/0x");
as_hex(p + 3, len, 0); as_hex(p + strlen(p), len, 0);
} else { } else {
as_hex(buf, (unsigned long)ip, 16); as_hex(buf, (unsigned long)ip, 16);
} }
return buf; return buf;
} }
void boot_printk(const char *fmt, ...) static inline int printk_loglevel(const char *buf)
{
if (buf[0] == KERN_SOH_ASCII && buf[1]) {
switch (buf[1]) {
case '0' ... '7':
return buf[1] - '0';
}
}
return MESSAGE_LOGLEVEL_DEFAULT;
}
static void boot_console_earlyprintk(const char *buf)
{
int level = printk_loglevel(buf);
/* always print emergency messages */
if (level > LOGLEVEL_EMERG && !boot_earlyprintk)
return;
buf = printk_skip_level(buf);
/* print debug messages only when bootdebug is enabled */
if (level == LOGLEVEL_DEBUG && (!bootdebug || !bootdebug_filter_match(skip_timestamp(buf))))
return;
if (boot_ignore_loglevel || level < boot_console_loglevel)
sclp_early_printk(buf);
}
static char *add_timestamp(char *buf)
{
#ifdef CONFIG_PRINTK_TIME
union tod_clock *boot_clock = (union tod_clock *)&get_lowcore()->boot_clock;
unsigned long ns = tod_to_ns(get_tod_clock() - boot_clock->tod);
char ts[MAX_NUMLEN];
*buf++ = '[';
buf += strpad(buf, MAX_NUMLEN, as_dec(ts, ns / NSEC_PER_SEC, 0), 5, 0, 0);
*buf++ = '.';
buf += strpad(buf, MAX_NUMLEN, as_dec(ts, (ns % NSEC_PER_SEC) / NSEC_PER_USEC, 0), 6, 1, 0);
*buf++ = ']';
*buf++ = ' ';
#endif
return buf;
}
#define va_arg_len_type(args, lenmod, typemod) \
((lenmod == 'l') ? va_arg(args, typemod long) : \
(lenmod == 'h') ? (typemod short)va_arg(args, typemod int) : \
(lenmod == 'H') ? (typemod char)va_arg(args, typemod int) : \
(lenmod == 'z') ? va_arg(args, typemod long) : \
va_arg(args, typemod int))
int boot_printk(const char *fmt, ...)
{ {
char buf[1024] = { 0 }; char buf[1024] = { 0 };
char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */ char *end = buf + sizeof(buf) - 1; /* make sure buf is 0 terminated */
unsigned long pad; bool zero_pad, decimal;
char *p = buf; char *strval, *p = buf;
char valbuf[MAX(MAX_SYMLEN, MAX_NUMLEN)];
va_list args; va_list args;
char lenmod;
ssize_t len;
int pad;
*p++ = KERN_SOH_ASCII;
*p++ = printk_get_level(fmt) ?: '0' + MESSAGE_LOGLEVEL_DEFAULT;
p = add_timestamp(p);
fmt = printk_skip_level(fmt);
va_start(args, fmt); va_start(args, fmt);
for (; p < end && *fmt; fmt++) { for (; p < end && *fmt; fmt++) {
@ -94,31 +243,56 @@ void boot_printk(const char *fmt, ...)
*p++ = *fmt; *p++ = *fmt;
continue; continue;
} }
pad = isdigit(*++fmt) ? simple_strtol(fmt, (char **)&fmt, 10) : 0; if (*++fmt == '%') {
*p++ = '%';
continue;
}
zero_pad = (*fmt == '0');
pad = simple_strtol(fmt, (char **)&fmt, 10);
lenmod = (*fmt == 'h' || *fmt == 'l' || *fmt == 'z') ? *fmt++ : 0;
if (lenmod == 'h' && *fmt == 'h') {
lenmod = 'H';
fmt++;
}
decimal = false;
switch (*fmt) { switch (*fmt) {
case 's': case 's':
p = buf + strlcat(buf, va_arg(args, char *), sizeof(buf)); if (lenmod)
goto out;
strval = va_arg(args, char *);
zero_pad = false;
break; break;
case 'p': case 'p':
if (*++fmt != 'S') if (*++fmt != 'S' || lenmod)
goto out; goto out;
p = buf + strlcat(buf, strsym(va_arg(args, void *)), sizeof(buf)); strval = strsym(valbuf, va_arg(args, void *));
zero_pad = false;
break; break;
case 'l': case 'd':
if (*++fmt != 'x' || end - p <= max(sizeof(long) * 2, pad)) case 'i':
goto out; strval = as_dec(valbuf, va_arg_len_type(args, lenmod, signed), 1);
p = as_hex(p, va_arg(args, unsigned long), pad); decimal = true;
break;
case 'u':
strval = as_dec(valbuf, va_arg_len_type(args, lenmod, unsigned), 0);
break; break;
case 'x': case 'x':
if (end - p <= max(sizeof(int) * 2, pad)) strval = as_hex(valbuf, va_arg_len_type(args, lenmod, unsigned), 0);
goto out;
p = as_hex(p, va_arg(args, unsigned int), pad);
break; break;
default: default:
goto out; goto out;
} }
len = strpad(p, end - p, strval, pad, zero_pad, decimal);
if (len == -E2BIG)
break;
p += len;
} }
out: out:
va_end(args); va_end(args);
sclp_early_printk(buf); len = strlen(buf);
if (len) {
boot_rb_add(buf, len);
boot_console_earlyprintk(buf);
}
return len;
} }

View File

@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#define boot_fmt(fmt) "startup: " fmt
#include <linux/string.h> #include <linux/string.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <asm/page-states.h> #include <asm/page-states.h>
@ -42,7 +43,8 @@ struct machine_info machine;
void error(char *x) void error(char *x)
{ {
boot_printk("\n\n%s\n\n -- System halted", x); boot_emerg("%s\n", x);
boot_emerg(" -- System halted\n");
disabled_wait(); disabled_wait();
} }
@ -143,7 +145,7 @@ static void rescue_initrd(unsigned long min, unsigned long max)
return; return;
old_addr = addr; old_addr = addr;
physmem_free(RR_INITRD); physmem_free(RR_INITRD);
addr = physmem_alloc_top_down(RR_INITRD, size, 0); addr = physmem_alloc_or_die(RR_INITRD, size, 0);
memmove((void *)addr, (void *)old_addr, size); memmove((void *)addr, (void *)old_addr, size);
} }
@ -222,12 +224,16 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
if (oldmem_data.start) { if (oldmem_data.start) {
__kaslr_enabled = 0; __kaslr_enabled = 0;
ident_map_size = min(ident_map_size, oldmem_data.size); ident_map_size = min(ident_map_size, oldmem_data.size);
boot_debug("kdump memory limit: 0x%016lx\n", oldmem_data.size);
} else if (ipl_block_valid && is_ipl_block_dump()) { } else if (ipl_block_valid && is_ipl_block_dump()) {
__kaslr_enabled = 0; __kaslr_enabled = 0;
if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size) {
ident_map_size = min(ident_map_size, hsa_size); ident_map_size = min(ident_map_size, hsa_size);
boot_debug("Stand-alone dump limit: 0x%016lx\n", hsa_size);
}
} }
#endif #endif
boot_debug("Identity map size: 0x%016lx\n", ident_map_size);
} }
#define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore)) #define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
@ -267,6 +273,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE); BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE); vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
boot_debug("vmem size estimated: 0x%016lx\n", vsize);
if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE || if (IS_ENABLED(CONFIG_KASAN) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
(vsize > _REGION2_SIZE && kaslr_enabled())) { (vsize > _REGION2_SIZE && kaslr_enabled())) {
asce_limit = _REGION1_SIZE; asce_limit = _REGION1_SIZE;
@ -290,8 +297,10 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
* otherwise asce_limit and rte_size would have been adjusted. * otherwise asce_limit and rte_size would have been adjusted.
*/ */
vmax = adjust_to_uv_max(asce_limit); vmax = adjust_to_uv_max(asce_limit);
boot_debug("%d level paging 0x%016lx vmax\n", vmax == _REGION1_SIZE ? 4 : 3, vmax);
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START); BUILD_BUG_ON(__NO_KASLR_END_KERNEL > KASAN_SHADOW_START);
boot_debug("KASAN shadow area: 0x%016lx-0x%016lx\n", KASAN_SHADOW_START, KASAN_SHADOW_END);
/* force vmalloc and modules below kasan shadow */ /* force vmalloc and modules below kasan shadow */
vmax = min(vmax, KASAN_SHADOW_START); vmax = min(vmax, KASAN_SHADOW_START);
#endif #endif
@ -305,19 +314,27 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
pos = 0; pos = 0;
kernel_end = vmax - pos * THREAD_SIZE; kernel_end = vmax - pos * THREAD_SIZE;
kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE); kernel_start = round_down(kernel_end - kernel_size, THREAD_SIZE);
boot_debug("Randomization range: 0x%016lx-0x%016lx\n", vmax - kaslr_len, vmax);
boot_debug("kernel image: 0x%016lx-0x%016lx (kaslr)\n", kernel_start,
kernel_size + kernel_size);
} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) { } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL) {
kernel_start = round_down(vmax - kernel_size, THREAD_SIZE); kernel_start = round_down(vmax - kernel_size, THREAD_SIZE);
boot_printk("The kernel base address is forced to %lx\n", kernel_start); boot_debug("kernel image: 0x%016lx-0x%016lx (constrained)\n", kernel_start,
kernel_start + kernel_size);
} else { } else {
kernel_start = __NO_KASLR_START_KERNEL; kernel_start = __NO_KASLR_START_KERNEL;
boot_debug("kernel image: 0x%016lx-0x%016lx (nokaslr)\n", kernel_start,
kernel_start + kernel_size);
} }
__kaslr_offset = kernel_start; __kaslr_offset = kernel_start;
boot_debug("__kaslr_offset: 0x%016lx\n", __kaslr_offset);
MODULES_END = round_down(kernel_start, _SEGMENT_SIZE); MODULES_END = round_down(kernel_start, _SEGMENT_SIZE);
MODULES_VADDR = MODULES_END - MODULES_LEN; MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR; VMALLOC_END = MODULES_VADDR;
if (IS_ENABLED(CONFIG_KMSAN)) if (IS_ENABLED(CONFIG_KMSAN))
VMALLOC_END -= MODULES_LEN * 2; VMALLOC_END -= MODULES_LEN * 2;
boot_debug("modules area: 0x%016lx-0x%016lx\n", MODULES_VADDR, MODULES_END);
/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */ /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
vsize = (VMALLOC_END - FIXMAP_SIZE) / 2; vsize = (VMALLOC_END - FIXMAP_SIZE) / 2;
@ -329,10 +346,15 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
VMALLOC_END -= vmalloc_size * 2; VMALLOC_END -= vmalloc_size * 2;
} }
VMALLOC_START = VMALLOC_END - vmalloc_size; VMALLOC_START = VMALLOC_END - vmalloc_size;
boot_debug("vmalloc area: 0x%016lx-0x%016lx\n", VMALLOC_START, VMALLOC_END);
__memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE); __memcpy_real_area = round_down(VMALLOC_START - MEMCPY_REAL_SIZE, PAGE_SIZE);
boot_debug("memcpy real area: 0x%016lx-0x%016lx\n", __memcpy_real_area,
__memcpy_real_area + MEMCPY_REAL_SIZE);
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE, __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
sizeof(struct lowcore)); sizeof(struct lowcore));
boot_debug("abs lowcore: 0x%016lx-0x%016lx\n", __abs_lowcore,
__abs_lowcore + ABS_LOWCORE_MAP_SIZE);
/* split remaining virtual space between 1:1 mapping & vmemmap array */ /* split remaining virtual space between 1:1 mapping & vmemmap array */
pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page)); pages = __abs_lowcore / (PAGE_SIZE + sizeof(struct page));
@ -352,8 +374,11 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS)); BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
max_mappable = max(ident_map_size, MAX_DCSS_ADDR); max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
max_mappable = min(max_mappable, vmemmap_start); max_mappable = min(max_mappable, vmemmap_start);
if (IS_ENABLED(CONFIG_RANDOMIZE_IDENTITY_BASE)) #ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
__identity_base = round_down(vmemmap_start - max_mappable, rte_size); __identity_base = round_down(vmemmap_start - max_mappable, rte_size);
#endif
boot_debug("identity map: 0x%016lx-0x%016lx\n", __identity_base,
__identity_base + ident_map_size);
return asce_limit; return asce_limit;
} }
@ -412,6 +437,10 @@ void startup_kernel(void)
psw_t psw; psw_t psw;
setup_lpp(); setup_lpp();
store_ipl_parmblock();
uv_query_info();
setup_boot_command_line();
parse_boot_command_line();
/* /*
* Non-randomized kernel physical start address must be _SEGMENT_SIZE * Non-randomized kernel physical start address must be _SEGMENT_SIZE
@ -431,12 +460,8 @@ void startup_kernel(void)
oldmem_data.start = parmarea.oldmem_base; oldmem_data.start = parmarea.oldmem_base;
oldmem_data.size = parmarea.oldmem_size; oldmem_data.size = parmarea.oldmem_size;
store_ipl_parmblock();
read_ipl_report(); read_ipl_report();
uv_query_info();
sclp_early_read_info(); sclp_early_read_info();
setup_boot_command_line();
parse_boot_command_line();
detect_facilities(); detect_facilities();
cmma_init(); cmma_init();
sanitize_prot_virt_host(); sanitize_prot_virt_host();
@ -526,6 +551,7 @@ void startup_kernel(void)
__kaslr_offset, __kaslr_offset_phys); __kaslr_offset, __kaslr_offset_phys);
kaslr_adjust_got(__kaslr_offset); kaslr_adjust_got(__kaslr_offset);
setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit); setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
dump_physmem_reserved();
copy_bootdata(); copy_bootdata();
__apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions, __apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions,
(struct alt_instr *)_vmlinux_info.alt_instructions_end, (struct alt_instr *)_vmlinux_info.alt_instructions_end,
@ -542,5 +568,6 @@ void startup_kernel(void)
*/ */
psw.addr = __kaslr_offset + vmlinux.entry; psw.addr = __kaslr_offset + vmlinux.entry;
psw.mask = PSW_KERNEL_BITS; psw.mask = PSW_KERNEL_BITS;
boot_debug("Starting kernel at: 0x%016lx\n", psw.addr);
__load_psw(psw); __load_psw(psw);
} }

View File

@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#define boot_fmt(fmt) "vmem: " fmt
#include <linux/sched/task.h> #include <linux/sched/task.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/kasan.h> #include <linux/kasan.h>
@ -13,6 +14,7 @@
#include "decompressor.h" #include "decompressor.h"
#include "boot.h" #include "boot.h"
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
struct ctlreg __bootdata_preserved(s390_invalid_asce); struct ctlreg __bootdata_preserved(s390_invalid_asce);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
@ -31,12 +33,42 @@ enum populate_mode {
POPULATE_IDENTITY, POPULATE_IDENTITY,
POPULATE_KERNEL, POPULATE_KERNEL,
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
/* KASAN modes should be last and grouped together, see is_kasan_populate_mode() */
POPULATE_KASAN_MAP_SHADOW, POPULATE_KASAN_MAP_SHADOW,
POPULATE_KASAN_ZERO_SHADOW, POPULATE_KASAN_ZERO_SHADOW,
POPULATE_KASAN_SHALLOW POPULATE_KASAN_SHALLOW
#endif #endif
}; };
#define POPULATE_MODE_NAME(t) case POPULATE_ ## t: return #t
static inline const char *get_populate_mode_name(enum populate_mode t)
{
switch (t) {
POPULATE_MODE_NAME(NONE);
POPULATE_MODE_NAME(DIRECT);
POPULATE_MODE_NAME(LOWCORE);
POPULATE_MODE_NAME(ABS_LOWCORE);
POPULATE_MODE_NAME(IDENTITY);
POPULATE_MODE_NAME(KERNEL);
#ifdef CONFIG_KASAN
POPULATE_MODE_NAME(KASAN_MAP_SHADOW);
POPULATE_MODE_NAME(KASAN_ZERO_SHADOW);
POPULATE_MODE_NAME(KASAN_SHALLOW);
#endif
default:
return "UNKNOWN";
}
}
static bool is_kasan_populate_mode(enum populate_mode mode)
{
#ifdef CONFIG_KASAN
return mode >= POPULATE_KASAN_MAP_SHADOW;
#else
return false;
#endif
}
static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode); static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
@ -52,9 +84,12 @@ static pte_t pte_z;
static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode) static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
{ {
start = PAGE_ALIGN_DOWN(__sha(start)); unsigned long sha_start = PAGE_ALIGN_DOWN(__sha(start));
end = PAGE_ALIGN(__sha(end)); unsigned long sha_end = PAGE_ALIGN(__sha(end));
pgtable_populate(start, end, mode);
boot_debug("%-17s 0x%016lx-0x%016lx >> 0x%016lx-0x%016lx\n", get_populate_mode_name(mode),
start, end, sha_start, sha_end);
pgtable_populate(sha_start, sha_end, mode);
} }
static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end) static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end)
@ -200,7 +235,7 @@ static void *boot_crst_alloc(unsigned long val)
unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER; unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
unsigned long *table; unsigned long *table;
table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size); table = (unsigned long *)physmem_alloc_or_die(RR_VMEM, size, size);
crst_table_init(table, val); crst_table_init(table, val);
__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER); __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
return table; return table;
@ -216,7 +251,7 @@ static pte_t *boot_pte_alloc(void)
* during POPULATE_KASAN_MAP_SHADOW when EDAT is off * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
*/ */
if (!pte_leftover) { if (!pte_leftover) {
pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE); pte_leftover = (void *)physmem_alloc_or_die(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
pte = pte_leftover + _PAGE_TABLE_SIZE; pte = pte_leftover + _PAGE_TABLE_SIZE;
__arch_set_page_dat(pte, 1); __arch_set_page_dat(pte, 1);
} else { } else {
@ -228,11 +263,12 @@ static pte_t *boot_pte_alloc(void)
return pte; return pte;
} }
static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode) static unsigned long resolve_pa_may_alloc(unsigned long addr, unsigned long size,
enum populate_mode mode)
{ {
switch (mode) { switch (mode) {
case POPULATE_NONE: case POPULATE_NONE:
return -1; return INVALID_PHYS_ADDR;
case POPULATE_DIRECT: case POPULATE_DIRECT:
return addr; return addr;
case POPULATE_LOWCORE: case POPULATE_LOWCORE:
@ -245,38 +281,64 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
return __identity_pa(addr); return __identity_pa(addr);
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
case POPULATE_KASAN_MAP_SHADOW: case POPULATE_KASAN_MAP_SHADOW:
addr = physmem_alloc_top_down(RR_VMEM, size, size); /* Allow to fail large page allocations, this will fall back to 1mb/4k pages */
addr = physmem_alloc(RR_VMEM, size, size, size == PAGE_SIZE);
if (addr) {
memset((void *)addr, 0, size); memset((void *)addr, 0, size);
return addr; return addr;
}
return INVALID_PHYS_ADDR;
#endif #endif
default: default:
return -1; return INVALID_PHYS_ADDR;
} }
} }
static bool large_allowed(enum populate_mode mode) static bool large_page_mapping_allowed(enum populate_mode mode)
{ {
return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL); switch (mode) {
case POPULATE_DIRECT:
case POPULATE_IDENTITY:
case POPULATE_KERNEL:
#ifdef CONFIG_KASAN
case POPULATE_KASAN_MAP_SHADOW:
#endif
return true;
default:
return false;
}
} }
static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end, static unsigned long try_get_large_pud_pa(pud_t *pu_dir, unsigned long addr, unsigned long end,
enum populate_mode mode) enum populate_mode mode)
{ {
unsigned long size = end - addr; unsigned long pa, size = end - addr;
return machine.has_edat2 && large_allowed(mode) && if (!machine.has_edat2 || !large_page_mapping_allowed(mode) ||
IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) && !IS_ALIGNED(addr, PUD_SIZE) || (size < PUD_SIZE))
IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE); return INVALID_PHYS_ADDR;
pa = resolve_pa_may_alloc(addr, size, mode);
if (!IS_ALIGNED(pa, PUD_SIZE))
return INVALID_PHYS_ADDR;
return pa;
} }
static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end, static unsigned long try_get_large_pmd_pa(pmd_t *pm_dir, unsigned long addr, unsigned long end,
enum populate_mode mode) enum populate_mode mode)
{ {
unsigned long size = end - addr; unsigned long pa, size = end - addr;
return machine.has_edat1 && large_allowed(mode) && if (!machine.has_edat1 || !large_page_mapping_allowed(mode) ||
IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) && !IS_ALIGNED(addr, PMD_SIZE) || (size < PMD_SIZE))
IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE); return INVALID_PHYS_ADDR;
pa = resolve_pa_may_alloc(addr, size, mode);
if (!IS_ALIGNED(pa, PMD_SIZE))
return INVALID_PHYS_ADDR;
return pa;
} }
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end, static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
@ -290,7 +352,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
if (pte_none(*pte)) { if (pte_none(*pte)) {
if (kasan_pte_populate_zero_shadow(pte, mode)) if (kasan_pte_populate_zero_shadow(pte, mode))
continue; continue;
entry = __pte(_pa(addr, PAGE_SIZE, mode)); entry = __pte(resolve_pa_may_alloc(addr, PAGE_SIZE, mode));
entry = set_pte_bit(entry, PAGE_KERNEL); entry = set_pte_bit(entry, PAGE_KERNEL);
set_pte(pte, entry); set_pte(pte, entry);
pages++; pages++;
@ -303,7 +365,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end, static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
enum populate_mode mode) enum populate_mode mode)
{ {
unsigned long next, pages = 0; unsigned long pa, next, pages = 0;
pmd_t *pmd, entry; pmd_t *pmd, entry;
pte_t *pte; pte_t *pte;
@ -313,8 +375,9 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode)) if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
continue; continue;
if (can_large_pmd(pmd, addr, next, mode)) { pa = try_get_large_pmd_pa(pmd, addr, next, mode);
entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode)); if (pa != INVALID_PHYS_ADDR) {
entry = __pmd(pa);
entry = set_pmd_bit(entry, SEGMENT_KERNEL); entry = set_pmd_bit(entry, SEGMENT_KERNEL);
set_pmd(pmd, entry); set_pmd(pmd, entry);
pages++; pages++;
@ -334,7 +397,7 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end, static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
enum populate_mode mode) enum populate_mode mode)
{ {
unsigned long next, pages = 0; unsigned long pa, next, pages = 0;
pud_t *pud, entry; pud_t *pud, entry;
pmd_t *pmd; pmd_t *pmd;
@ -344,8 +407,9 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
if (pud_none(*pud)) { if (pud_none(*pud)) {
if (kasan_pud_populate_zero_shadow(pud, addr, next, mode)) if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
continue; continue;
if (can_large_pud(pud, addr, next, mode)) { pa = try_get_large_pud_pa(pud, addr, next, mode);
entry = __pud(_pa(addr, _REGION3_SIZE, mode)); if (pa != INVALID_PHYS_ADDR) {
entry = __pud(pa);
entry = set_pud_bit(entry, REGION3_KERNEL); entry = set_pud_bit(entry, REGION3_KERNEL);
set_pud(pud, entry); set_pud(pud, entry);
pages++; pages++;
@ -388,6 +452,13 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d; p4d_t *p4d;
if (!is_kasan_populate_mode(mode)) {
boot_debug("%-17s 0x%016lx-0x%016lx -> 0x%016lx-0x%016lx\n",
get_populate_mode_name(mode), addr, end,
resolve_pa_may_alloc(addr, 0, mode),
resolve_pa_may_alloc(end - 1, 0, mode) + 1);
}
pgd = pgd_offset(&init_mm, addr); pgd = pgd_offset(&init_mm, addr);
for (; addr < end; addr = next, pgd++) { for (; addr < end; addr = next, pgd++) {
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);

View File

@ -9,11 +9,11 @@
#define EX_TYPE_NONE 0 #define EX_TYPE_NONE 0
#define EX_TYPE_FIXUP 1 #define EX_TYPE_FIXUP 1
#define EX_TYPE_BPF 2 #define EX_TYPE_BPF 2
#define EX_TYPE_UA_STORE 3 #define EX_TYPE_UA_FAULT 3
#define EX_TYPE_UA_LOAD_MEM 4
#define EX_TYPE_UA_LOAD_REG 5 #define EX_TYPE_UA_LOAD_REG 5
#define EX_TYPE_UA_LOAD_REGPAIR 6 #define EX_TYPE_UA_LOAD_REGPAIR 6
#define EX_TYPE_ZEROPAD 7 #define EX_TYPE_ZEROPAD 7
#define EX_TYPE_FPC 8
#define EX_DATA_REG_ERR_SHIFT 0 #define EX_DATA_REG_ERR_SHIFT 0
#define EX_DATA_REG_ERR GENMASK(3, 0) #define EX_DATA_REG_ERR GENMASK(3, 0)
@ -69,11 +69,8 @@
#define EX_TABLE_AMODE31(_fault, _target) \ #define EX_TABLE_AMODE31(_fault, _target) \
__EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP, __stringify(%%r0), __stringify(%%r0), 0) __EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP, __stringify(%%r0), __stringify(%%r0), 0)
#define EX_TABLE_UA_STORE(_fault, _target, _regerr) \ #define EX_TABLE_UA_FAULT(_fault, _target, _regerr) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0) __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_FAULT, _regerr, _regerr, 0)
#define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len)
#define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \ #define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0) __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0)
@ -84,4 +81,7 @@
#define EX_TABLE_ZEROPAD(_fault, _target, _regdata, _regaddr) \ #define EX_TABLE_ZEROPAD(_fault, _target, _regdata, _regaddr) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_ZEROPAD, _regdata, _regaddr, 0) __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_ZEROPAD, _regdata, _regaddr, 0)
#define EX_TABLE_FPC(_fault, _target) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FPC, __stringify(%%r0), __stringify(%%r0), 0)
#endif /* __ASM_EXTABLE_H */ #endif /* __ASM_EXTABLE_H */

View File

@ -28,7 +28,7 @@
* [var] also contains the program mask. CC_TRANSFORM() moves the condition * [var] also contains the program mask. CC_TRANSFORM() moves the condition
* code to the two least significant bits and sets all other bits to zero. * code to the two least significant bits and sets all other bits to zero.
*/ */
#if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_GCC_ASM_FLAG_OUTPUT_BROKEN)) #if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_CC_ASM_FLAG_OUTPUT_BROKEN))
#define __HAVE_ASM_FLAG_OUTPUTS__ #define __HAVE_ASM_FLAG_OUTPUTS__

View File

@ -60,7 +60,7 @@ static __always_inline bool arch_test_bit(unsigned long nr, const volatile unsig
asm volatile( asm volatile(
" tm %[addr],%[mask]\n" " tm %[addr],%[mask]\n"
: "=@cc" (cc) : "=@cc" (cc)
: [addr] "R" (*addr), [mask] "I" (mask) : [addr] "Q" (*addr), [mask] "I" (mask)
); );
return cc == 3; return cc == 3;
} }

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_BOOT_DATA_H #ifndef _ASM_S390_BOOT_DATA_H
#include <linux/string.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/ipl.h> #include <asm/ipl.h>
@ -15,4 +16,54 @@ extern unsigned long ipl_cert_list_size;
extern unsigned long early_ipl_comp_list_addr; extern unsigned long early_ipl_comp_list_addr;
extern unsigned long early_ipl_comp_list_size; extern unsigned long early_ipl_comp_list_size;
extern char boot_rb[PAGE_SIZE * 2];
extern bool boot_earlyprintk;
extern size_t boot_rb_off;
extern char bootdebug_filter[128];
extern bool bootdebug;
#define boot_rb_foreach(cb) \
do { \
size_t off = boot_rb_off + strlen(boot_rb + boot_rb_off) + 1; \
size_t len; \
for (; off < sizeof(boot_rb) && (len = strlen(boot_rb + off)); off += len + 1) \
cb(boot_rb + off); \
for (off = 0; off < boot_rb_off && (len = strlen(boot_rb + off)); off += len + 1) \
cb(boot_rb + off); \
} while (0)
/*
* bootdebug_filter is a comma separated list of strings,
* where each string can be a prefix of the message.
*/
static inline bool bootdebug_filter_match(const char *buf)
{
char *p = bootdebug_filter, *s;
char *end;
if (!*p)
return true;
end = p + strlen(p);
while (p < end) {
p = skip_spaces(p);
s = memscan(p, ',', end - p);
if (!strncmp(p, buf, s - p))
return true;
p = s + 1;
}
return false;
}
static inline const char *skip_timestamp(const char *buf)
{
#ifdef CONFIG_PRINTK_TIME
const char *p = memchr(buf, ']', strlen(buf));
if (p && p[1] == ' ')
return p + 2;
#endif
return buf;
}
#endif /* _ASM_S390_BOOT_DATA_H */ #endif /* _ASM_S390_BOOT_DATA_H */

View File

@ -100,19 +100,12 @@ static __always_inline void fpu_lfpc(unsigned int *fpc)
*/ */
static inline void fpu_lfpc_safe(unsigned int *fpc) static inline void fpu_lfpc_safe(unsigned int *fpc)
{ {
u32 tmp;
instrument_read(fpc, sizeof(*fpc)); instrument_read(fpc, sizeof(*fpc));
asm_inline volatile( asm_inline volatile(
"0: lfpc %[fpc]\n" " lfpc %[fpc]\n"
"1: nopr %%r7\n" "0: nopr %%r7\n"
".pushsection .fixup, \"ax\"\n" EX_TABLE_FPC(0b, 0b)
"2: lghi %[tmp],0\n" :
" sfpc %[tmp]\n"
" jg 1b\n"
".popsection\n"
EX_TABLE(1b, 2b)
: [tmp] "=d" (tmp)
: [fpc] "Q" (*fpc) : [fpc] "Q" (*fpc)
: "memory"); : "memory");
} }
@ -183,7 +176,19 @@ static __always_inline void fpu_vgfmg(u8 v1, u8 v2, u8 v3)
: "memory"); : "memory");
} }
#ifdef CONFIG_CC_IS_CLANG #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
static __always_inline void fpu_vl(u8 v1, const void *vxr)
{
instrument_read(vxr, sizeof(__vector128));
asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n"
:
: [vxr] "Q" (*(__vector128 *)vxr),
[v1] "I" (v1)
: "memory");
}
#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vl(u8 v1, const void *vxr) static __always_inline void fpu_vl(u8 v1, const void *vxr)
{ {
@ -197,19 +202,7 @@ static __always_inline void fpu_vl(u8 v1, const void *vxr)
: "memory", "1"); : "memory", "1");
} }
#else /* CONFIG_CC_IS_CLANG */ #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vl(u8 v1, const void *vxr)
{
instrument_read(vxr, sizeof(__vector128));
asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n"
:
: [vxr] "Q" (*(__vector128 *)vxr),
[v1] "I" (v1)
: "memory");
}
#endif /* CONFIG_CC_IS_CLANG */
static __always_inline void fpu_vleib(u8 v, s16 val, u8 index) static __always_inline void fpu_vleib(u8 v, s16 val, u8 index)
{ {
@ -238,7 +231,23 @@ static __always_inline u64 fpu_vlgvf(u8 v, u16 index)
return val; return val;
} }
#ifdef CONFIG_CC_IS_CLANG #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
{
unsigned int size;
size = min(index + 1, sizeof(__vector128));
instrument_read(vxr, size);
asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n"
:
: [vxr] "Q" (*(u8 *)vxr),
[index] "d" (index),
[v1] "I" (v1)
: "memory");
}
#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr) static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
{ {
@ -256,25 +265,27 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
: "memory", "1"); : "memory", "1");
} }
#else /* CONFIG_CC_IS_CLANG */ #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr) #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
{
unsigned int size;
size = min(index + 1, sizeof(__vector128)); #define fpu_vlm(_v1, _v3, _vxrs) \
instrument_read(vxr, size); ({ \
asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n" unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
: struct { \
: [vxr] "Q" (*(u8 *)vxr), __vector128 _v[(_v3) - (_v1) + 1]; \
[index] "d" (index), } *_v = (void *)(_vxrs); \
[v1] "I" (v1) \
: "memory"); instrument_read(_v, size); \
} asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
: \
: [vxrs] "Q" (*_v), \
[v1] "I" (_v1), [v3] "I" (_v3) \
: "memory"); \
(_v3) - (_v1) + 1; \
})
#endif /* CONFIG_CC_IS_CLANG */ #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
#ifdef CONFIG_CC_IS_CLANG
#define fpu_vlm(_v1, _v3, _vxrs) \ #define fpu_vlm(_v1, _v3, _vxrs) \
({ \ ({ \
@ -294,25 +305,7 @@ static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
(_v3) - (_v1) + 1; \ (_v3) - (_v1) + 1; \
}) })
#else /* CONFIG_CC_IS_CLANG */ #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
#define fpu_vlm(_v1, _v3, _vxrs) \
({ \
unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
struct { \
__vector128 _v[(_v3) - (_v1) + 1]; \
} *_v = (void *)(_vxrs); \
\
instrument_read(_v, size); \
asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
: \
: [vxrs] "Q" (*_v), \
[v1] "I" (_v1), [v3] "I" (_v3) \
: "memory"); \
(_v3) - (_v1) + 1; \
})
#endif /* CONFIG_CC_IS_CLANG */
static __always_inline void fpu_vlr(u8 v1, u8 v2) static __always_inline void fpu_vlr(u8 v1, u8 v2)
{ {
@ -362,7 +355,18 @@ static __always_inline void fpu_vsrlb(u8 v1, u8 v2, u8 v3)
: "memory"); : "memory");
} }
#ifdef CONFIG_CC_IS_CLANG #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
static __always_inline void fpu_vst(u8 v1, const void *vxr)
{
instrument_write(vxr, sizeof(__vector128));
asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n"
: [vxr] "=Q" (*(__vector128 *)vxr)
: [v1] "I" (v1)
: "memory");
}
#else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vst(u8 v1, const void *vxr) static __always_inline void fpu_vst(u8 v1, const void *vxr)
{ {
@ -375,20 +379,23 @@ static __always_inline void fpu_vst(u8 v1, const void *vxr)
: "memory", "1"); : "memory", "1");
} }
#else /* CONFIG_CC_IS_CLANG */ #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vst(u8 v1, const void *vxr) #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
{ {
instrument_write(vxr, sizeof(__vector128)); unsigned int size;
asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n"
: [vxr] "=Q" (*(__vector128 *)vxr) size = min(index + 1, sizeof(__vector128));
: [v1] "I" (v1) instrument_write(vxr, size);
asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n"
: [vxr] "=Q" (*(u8 *)vxr)
: [index] "d" (index), [v1] "I" (v1)
: "memory"); : "memory");
} }
#endif /* CONFIG_CC_IS_CLANG */ #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
#ifdef CONFIG_CC_IS_CLANG
static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr) static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
{ {
@ -404,23 +411,26 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
: "memory", "1"); : "memory", "1");
} }
#else /* CONFIG_CC_IS_CLANG */ #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr) #ifdef CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS
{
unsigned int size;
size = min(index + 1, sizeof(__vector128)); #define fpu_vstm(_v1, _v3, _vxrs) \
instrument_write(vxr, size); ({ \
asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n" unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
: [vxr] "=Q" (*(u8 *)vxr) struct { \
: [index] "d" (index), [v1] "I" (v1) __vector128 _v[(_v3) - (_v1) + 1]; \
: "memory"); } *_v = (void *)(_vxrs); \
} \
instrument_write(_v, size); \
asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
: [vxrs] "=Q" (*_v) \
: [v1] "I" (_v1), [v3] "I" (_v3) \
: "memory"); \
(_v3) - (_v1) + 1; \
})
#endif /* CONFIG_CC_IS_CLANG */ #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
#ifdef CONFIG_CC_IS_CLANG
#define fpu_vstm(_v1, _v3, _vxrs) \ #define fpu_vstm(_v1, _v3, _vxrs) \
({ \ ({ \
@ -439,24 +449,7 @@ static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
(_v3) - (_v1) + 1; \ (_v3) - (_v1) + 1; \
}) })
#else /* CONFIG_CC_IS_CLANG */ #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
#define fpu_vstm(_v1, _v3, _vxrs) \
({ \
unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
struct { \
__vector128 _v[(_v3) - (_v1) + 1]; \
} *_v = (void *)(_vxrs); \
\
instrument_write(_v, size); \
asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
: [vxrs] "=Q" (*_v) \
: [v1] "I" (_v1), [v3] "I" (_v3) \
: "memory"); \
(_v3) - (_v1) + 1; \
})
#endif /* CONFIG_CC_IS_CLANG */
static __always_inline void fpu_vupllf(u8 v1, u8 v2) static __always_inline void fpu_vupllf(u8 v1, u8 v2)
{ {

View File

@ -2,80 +2,95 @@
#ifndef _ASM_S390_FUTEX_H #ifndef _ASM_S390_FUTEX_H
#define _ASM_S390_FUTEX_H #define _ASM_S390_FUTEX_H
#include <linux/instrumented.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/futex.h> #include <linux/futex.h>
#include <asm/asm-extable.h> #include <asm/asm-extable.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/errno.h> #include <asm/errno.h>
#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ #define FUTEX_OP_FUNC(name, insn) \
asm volatile( \ static uaccess_kmsan_or_inline int \
__futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \
{ \
int rc, new; \
\
instrument_copy_from_user_before(old, uaddr, sizeof(*old)); \
asm_inline volatile( \
" sacf 256\n" \ " sacf 256\n" \
"0: l %1,0(%6)\n" \ "0: l %[old],%[uaddr]\n" \
"1:"insn \ "1:"insn \
"2: cs %1,%2,0(%6)\n" \ "2: cs %[old],%[new],%[uaddr]\n" \
"3: jl 1b\n" \ "3: jl 1b\n" \
" lhi %0,0\n" \ " lhi %[rc],0\n" \
"4: sacf 768\n" \ "4: sacf 768\n" \
EX_TABLE(0b,4b) EX_TABLE(1b,4b) \ EX_TABLE_UA_FAULT(0b, 4b, %[rc]) \
EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ EX_TABLE_UA_FAULT(1b, 4b, %[rc]) \
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \ EX_TABLE_UA_FAULT(2b, 4b, %[rc]) \
"=m" (*uaddr) \ EX_TABLE_UA_FAULT(3b, 4b, %[rc]) \
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ : [rc] "=d" (rc), [old] "=&d" (*old), \
"m" (*uaddr) : "cc"); [new] "=&d" (new), [uaddr] "+Q" (*uaddr) \
: [oparg] "d" (oparg) \
: "cc"); \
if (!rc) \
instrument_copy_from_user_after(old, uaddr, sizeof(*old), 0); \
return rc; \
}
static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, FUTEX_OP_FUNC(set, "lr %[new],%[oparg]\n")
u32 __user *uaddr) FUTEX_OP_FUNC(add, "lr %[new],%[old]\n ar %[new],%[oparg]\n")
FUTEX_OP_FUNC(or, "lr %[new],%[old]\n or %[new],%[oparg]\n")
FUTEX_OP_FUNC(and, "lr %[new],%[old]\n nr %[new],%[oparg]\n")
FUTEX_OP_FUNC(xor, "lr %[new],%[old]\n xr %[new],%[oparg]\n")
static inline
int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{ {
int oldval = 0, newval, ret; int old, rc;
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
__futex_atomic_op("lr %2,%5\n", rc = __futex_atomic_set(oparg, &old, uaddr);
ret, oldval, newval, uaddr, oparg);
break; break;
case FUTEX_OP_ADD: case FUTEX_OP_ADD:
__futex_atomic_op("lr %2,%1\nar %2,%5\n", rc = __futex_atomic_add(oparg, &old, uaddr);
ret, oldval, newval, uaddr, oparg);
break; break;
case FUTEX_OP_OR: case FUTEX_OP_OR:
__futex_atomic_op("lr %2,%1\nor %2,%5\n", rc = __futex_atomic_or(oparg, &old, uaddr);
ret, oldval, newval, uaddr, oparg);
break; break;
case FUTEX_OP_ANDN: case FUTEX_OP_ANDN:
__futex_atomic_op("lr %2,%1\nnr %2,%5\n", rc = __futex_atomic_and(~oparg, &old, uaddr);
ret, oldval, newval, uaddr, ~oparg);
break; break;
case FUTEX_OP_XOR: case FUTEX_OP_XOR:
__futex_atomic_op("lr %2,%1\nxr %2,%5\n", rc = __futex_atomic_xor(oparg, &old, uaddr);
ret, oldval, newval, uaddr, oparg);
break; break;
default: default:
ret = -ENOSYS; rc = -ENOSYS;
}
if (!rc)
*oval = old;
return rc;
} }
if (!ret) static uaccess_kmsan_or_inline
*oval = oldval; int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval)
return ret;
}
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
int ret; int rc;
asm volatile( instrument_copy_from_user_before(uval, uaddr, sizeof(*uval));
asm_inline volatile(
" sacf 256\n" " sacf 256\n"
"0: cs %1,%4,0(%5)\n" "0: cs %[old],%[new],%[uaddr]\n"
"1: la %0,0\n" "1: lhi %[rc],0\n"
"2: sacf 768\n" "2: sacf 768\n"
EX_TABLE(0b,2b) EX_TABLE(1b,2b) EX_TABLE_UA_FAULT(0b, 2b, %[rc])
: "=d" (ret), "+d" (oldval), "=m" (*uaddr) EX_TABLE_UA_FAULT(1b, 2b, %[rc])
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : [rc] "=d" (rc), [old] "+d" (oldval), [uaddr] "+Q" (*uaddr)
: [new] "d" (newval)
: "cc", "memory"); : "cc", "memory");
*uval = oldval; *uval = oldval;
return ret; instrument_copy_from_user_after(uval, uaddr, sizeof(*uval), 0);
return rc;
} }
#endif /* _ASM_S390_FUTEX_H */ #endif /* _ASM_S390_FUTEX_H */

View File

@ -184,7 +184,11 @@ extern struct vm_layout vm_layout;
#define __kaslr_offset vm_layout.kaslr_offset #define __kaslr_offset vm_layout.kaslr_offset
#define __kaslr_offset_phys vm_layout.kaslr_offset_phys #define __kaslr_offset_phys vm_layout.kaslr_offset_phys
#ifdef CONFIG_RANDOMIZE_IDENTITY_BASE
#define __identity_base vm_layout.identity_base #define __identity_base vm_layout.identity_base
#else
#define __identity_base 0UL
#endif
#define ident_map_size vm_layout.identity_size #define ident_map_size vm_layout.identity_size
static inline unsigned long kaslr_offset(void) static inline unsigned long kaslr_offset(void)

View File

@ -26,7 +26,7 @@ enum reserved_range_type {
RR_AMODE31, RR_AMODE31,
RR_IPLREPORT, RR_IPLREPORT,
RR_CERT_COMP_LIST, RR_CERT_COMP_LIST,
RR_MEM_DETECT_EXTENDED, RR_MEM_DETECT_EXT,
RR_VMEM, RR_VMEM,
RR_MAX RR_MAX
}; };
@ -128,7 +128,7 @@ static inline const char *get_rr_type_name(enum reserved_range_type t)
RR_TYPE_NAME(AMODE31); RR_TYPE_NAME(AMODE31);
RR_TYPE_NAME(IPLREPORT); RR_TYPE_NAME(IPLREPORT);
RR_TYPE_NAME(CERT_COMP_LIST); RR_TYPE_NAME(CERT_COMP_LIST);
RR_TYPE_NAME(MEM_DETECT_EXTENDED); RR_TYPE_NAME(MEM_DETECT_EXT);
RR_TYPE_NAME(VMEM); RR_TYPE_NAME(VMEM);
default: default:
return "UNKNOWN"; return "UNKNOWN";

View File

@ -172,6 +172,7 @@ void sclp_early_printk(const char *s);
void __sclp_early_printk(const char *s, unsigned int len); void __sclp_early_printk(const char *s, unsigned int len);
void sclp_emergency_printk(const char *s); void sclp_emergency_printk(const char *s);
int sclp_init(void);
int sclp_early_get_memsize(unsigned long *mem); int sclp_early_get_memsize(unsigned long *mem);
int sclp_early_get_hsa_size(unsigned long *hsa_size); int sclp_early_get_hsa_size(unsigned long *hsa_size);
int _sclp_get_core_info(struct sclp_core_info *info); int _sclp_get_core_info(struct sclp_core_info *info);

View File

@ -22,16 +22,117 @@
void debug_user_asce(int exit); void debug_user_asce(int exit);
unsigned long __must_check union oac {
raw_copy_from_user(void *to, const void __user *from, unsigned long n); unsigned int val;
struct {
struct {
unsigned short key : 4;
unsigned short : 4;
unsigned short as : 2;
unsigned short : 4;
unsigned short k : 1;
unsigned short a : 1;
} oac1;
struct {
unsigned short key : 4;
unsigned short : 4;
unsigned short as : 2;
unsigned short : 4;
unsigned short k : 1;
unsigned short a : 1;
} oac2;
};
};
unsigned long __must_check static __always_inline __must_check unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n); raw_copy_from_user_key(void *to, const void __user *from, unsigned long size, unsigned long key)
{
unsigned long rem;
union oac spec = {
.oac2.key = key,
.oac2.as = PSW_BITS_AS_SECONDARY,
.oac2.k = 1,
.oac2.a = 1,
};
#ifndef CONFIG_KASAN asm_inline volatile(
#define INLINE_COPY_FROM_USER " lr %%r0,%[spec]\n"
#define INLINE_COPY_TO_USER "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
#endif "1: jz 5f\n"
" algr %[size],%[val]\n"
" slgr %[from],%[val]\n"
" slgr %[to],%[val]\n"
" j 0b\n"
"2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */
" nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */
" slgr %[rem],%[from]\n"
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
"4: slgr %[size],%[rem]\n"
" j 6f\n"
"5: lghi %[size],0\n"
"6:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
EX_TABLE(3b, 6b)
EX_TABLE(4b, 6b)
: [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem)
: [val] "a" (-4096UL), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
static __always_inline __must_check unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return raw_copy_from_user_key(to, from, n, 0);
}
static __always_inline __must_check unsigned long
raw_copy_to_user_key(void __user *to, const void *from, unsigned long size, unsigned long key)
{
unsigned long rem;
union oac spec = {
.oac1.key = key,
.oac1.as = PSW_BITS_AS_SECONDARY,
.oac1.k = 1,
.oac1.a = 1,
};
asm_inline volatile(
" lr %%r0,%[spec]\n"
"0: mvcos 0(%[to]),0(%[from]),%[size]\n"
"1: jz 5f\n"
" algr %[size],%[val]\n"
" slgr %[to],%[val]\n"
" slgr %[from],%[val]\n"
" j 0b\n"
"2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
" nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
" slgr %[rem],%[to]\n"
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
"4: slgr %[size],%[rem]\n"
" j 6f\n"
"5: lghi %[size],0\n"
"6:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
EX_TABLE(3b, 6b)
EX_TABLE(4b, 6b)
: [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem)
: [val] "a" (-4096UL), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
static __always_inline __must_check unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return raw_copy_to_user_key(to, from, n, 0);
}
unsigned long __must_check unsigned long __must_check
_copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key); _copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
@ -55,63 +156,71 @@ copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned lo
return n; return n;
} }
union oac {
unsigned int val;
struct {
struct {
unsigned short key : 4;
unsigned short : 4;
unsigned short as : 2;
unsigned short : 4;
unsigned short k : 1;
unsigned short a : 1;
} oac1;
struct {
unsigned short key : 4;
unsigned short : 4;
unsigned short as : 2;
unsigned short : 4;
unsigned short k : 1;
unsigned short a : 1;
} oac2;
};
};
int __noreturn __put_user_bad(void); int __noreturn __put_user_bad(void);
#ifdef CONFIG_KMSAN #ifdef CONFIG_KMSAN
#define get_put_user_noinstr_attributes \ #define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory
noinline __maybe_unused __no_sanitize_memory
#else #else
#define get_put_user_noinstr_attributes __always_inline #define uaccess_kmsan_or_inline __always_inline
#endif #endif
#define DEFINE_PUT_USER(type) \ #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
static get_put_user_noinstr_attributes int \
#define DEFINE_PUT_USER_NOINSTR(type) \
static uaccess_kmsan_or_inline int \
__put_user_##type##_noinstr(unsigned type __user *to, \
unsigned type *from, \
unsigned long size) \
{ \
asm goto( \
" llilh %%r0,%[spec]\n" \
"0: mvcos %[to],%[from],%[size]\n" \
"1: nopr %%r7\n" \
EX_TABLE(0b, %l[Efault]) \
EX_TABLE(1b, %l[Efault]) \
: [to] "+Q" (*to) \
: [size] "d" (size), [from] "Q" (*from), \
[spec] "I" (0x81) \
: "cc", "0" \
: Efault \
); \
return 0; \
Efault: \
return -EFAULT; \
}
#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
#define DEFINE_PUT_USER_NOINSTR(type) \
static uaccess_kmsan_or_inline int \
__put_user_##type##_noinstr(unsigned type __user *to, \ __put_user_##type##_noinstr(unsigned type __user *to, \
unsigned type *from, \ unsigned type *from, \
unsigned long size) \ unsigned long size) \
{ \ { \
union oac __oac_spec = { \
.oac1.as = PSW_BITS_AS_SECONDARY, \
.oac1.a = 1, \
}; \
int rc; \ int rc; \
\ \
asm volatile( \ asm volatile( \
" lr 0,%[spec]\n" \ " llilh %%r0,%[spec]\n" \
"0: mvcos %[_to],%[_from],%[_size]\n" \ "0: mvcos %[to],%[from],%[size]\n" \
"1: xr %[rc],%[rc]\n" \ "1: lhi %[rc],0\n" \
"2:\n" \ "2:\n" \
EX_TABLE_UA_STORE(0b, 2b, %[rc]) \ EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \
EX_TABLE_UA_STORE(1b, 2b, %[rc]) \ EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \
: [rc] "=&d" (rc), [_to] "+Q" (*(to)) \ : [rc] "=d" (rc), [to] "+Q" (*to) \
: [_size] "d" (size), [_from] "Q" (*(from)), \ : [size] "d" (size), [from] "Q" (*from), \
[spec] "d" (__oac_spec.val) \ [spec] "I" (0x81) \
: "cc", "0"); \ : "cc", "0"); \
return rc; \ return rc; \
} \ }
\
#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
DEFINE_PUT_USER_NOINSTR(char);
DEFINE_PUT_USER_NOINSTR(short);
DEFINE_PUT_USER_NOINSTR(int);
DEFINE_PUT_USER_NOINSTR(long);
#define DEFINE_PUT_USER(type) \
static __always_inline int \ static __always_inline int \
__put_user_##type(unsigned type __user *to, unsigned type *from, \ __put_user_##type(unsigned type __user *to, unsigned type *from, \
unsigned long size) \ unsigned long size) \
@ -128,69 +237,111 @@ DEFINE_PUT_USER(short);
DEFINE_PUT_USER(int); DEFINE_PUT_USER(int);
DEFINE_PUT_USER(long); DEFINE_PUT_USER(long);
static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) #define __put_user(x, ptr) \
{ ({ \
int rc; __typeof__(*(ptr)) __x = (x); \
int __prc; \
\
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
__prc = __put_user_char((unsigned char __user *)(ptr), \
(unsigned char *)&__x, \
sizeof(*(ptr))); \
break; \
case 2: \
__prc = __put_user_short((unsigned short __user *)(ptr),\
(unsigned short *)&__x, \
sizeof(*(ptr))); \
break; \
case 4: \
__prc = __put_user_int((unsigned int __user *)(ptr), \
(unsigned int *)&__x, \
sizeof(*(ptr))); \
break; \
case 8: \
__prc = __put_user_long((unsigned long __user *)(ptr), \
(unsigned long *)&__x, \
sizeof(*(ptr))); \
break; \
default: \
__prc = __put_user_bad(); \
break; \
} \
__builtin_expect(__prc, 0); \
})
switch (size) { #define put_user(x, ptr) \
case 1: ({ \
rc = __put_user_char((unsigned char __user *)ptr, might_fault(); \
(unsigned char *)x, __put_user(x, ptr); \
size); })
break;
case 2:
rc = __put_user_short((unsigned short __user *)ptr,
(unsigned short *)x,
size);
break;
case 4:
rc = __put_user_int((unsigned int __user *)ptr,
(unsigned int *)x,
size);
break;
case 8:
rc = __put_user_long((unsigned long __user *)ptr,
(unsigned long *)x,
size);
break;
default:
__put_user_bad();
break;
}
return rc;
}
int __noreturn __get_user_bad(void); int __noreturn __get_user_bad(void);
#define DEFINE_GET_USER(type) \ #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
static get_put_user_noinstr_attributes int \
#define DEFINE_GET_USER_NOINSTR(type) \
static uaccess_kmsan_or_inline int \
__get_user_##type##_noinstr(unsigned type *to, \ __get_user_##type##_noinstr(unsigned type *to, \
unsigned type __user *from, \ const unsigned type __user *from, \
unsigned long size) \
{ \
asm goto( \
" lhi %%r0,%[spec]\n" \
"0: mvcos %[to],%[from],%[size]\n" \
"1: nopr %%r7\n" \
EX_TABLE(0b, %l[Efault]) \
EX_TABLE(1b, %l[Efault]) \
: [to] "=Q" (*to) \
: [size] "d" (size), [from] "Q" (*from), \
[spec] "I" (0x81) \
: "cc", "0" \
: Efault \
); \
return 0; \
Efault: \
*to = 0; \
return -EFAULT; \
}
#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
#define DEFINE_GET_USER_NOINSTR(type) \
static uaccess_kmsan_or_inline int \
__get_user_##type##_noinstr(unsigned type *to, \
const unsigned type __user *from, \
unsigned long size) \ unsigned long size) \
{ \ { \
union oac __oac_spec = { \
.oac2.as = PSW_BITS_AS_SECONDARY, \
.oac2.a = 1, \
}; \
int rc; \ int rc; \
\ \
asm volatile( \ asm volatile( \
" lr 0,%[spec]\n" \ " lhi %%r0,%[spec]\n" \
"0: mvcos 0(%[_to]),%[_from],%[_size]\n" \ "0: mvcos %[to],%[from],%[size]\n" \
"1: xr %[rc],%[rc]\n" \ "1: lhi %[rc],0\n" \
"2:\n" \ "2:\n" \
EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \ EX_TABLE_UA_FAULT(0b, 2b, %[rc]) \
EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \ EX_TABLE_UA_FAULT(1b, 2b, %[rc]) \
: [rc] "=&d" (rc), "=Q" (*(to)) \ : [rc] "=d" (rc), [to] "=Q" (*to) \
: [_size] "d" (size), [_from] "Q" (*(from)), \ : [size] "d" (size), [from] "Q" (*from), \
[spec] "d" (__oac_spec.val), [_to] "a" (to), \ [spec] "I" (0x81) \
[_ksize] "K" (size) \
: "cc", "0"); \ : "cc", "0"); \
if (likely(!rc)) \
return 0; \
*to = 0; \
return rc; \ return rc; \
} \ }
\
#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
DEFINE_GET_USER_NOINSTR(char);
DEFINE_GET_USER_NOINSTR(short);
DEFINE_GET_USER_NOINSTR(int);
DEFINE_GET_USER_NOINSTR(long);
#define DEFINE_GET_USER(type) \
static __always_inline int \ static __always_inline int \
__get_user_##type(unsigned type *to, unsigned type __user *from, \ __get_user_##type(unsigned type *to, const unsigned type __user *from, \
unsigned long size) \ unsigned long size) \
{ \ { \
int rc; \ int rc; \
@ -205,107 +356,50 @@ DEFINE_GET_USER(short);
DEFINE_GET_USER(int); DEFINE_GET_USER(int);
DEFINE_GET_USER(long); DEFINE_GET_USER(long);
static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
int rc;
switch (size) {
case 1:
rc = __get_user_char((unsigned char *)x,
(unsigned char __user *)ptr,
size);
break;
case 2:
rc = __get_user_short((unsigned short *)x,
(unsigned short __user *)ptr,
size);
break;
case 4:
rc = __get_user_int((unsigned int *)x,
(unsigned int __user *)ptr,
size);
break;
case 8:
rc = __get_user_long((unsigned long *)x,
(unsigned long __user *)ptr,
size);
break;
default:
__get_user_bad();
break;
}
return rc;
}
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*/
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __x = (x); \
int __pu_err = -EFAULT; \
\
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
case 2: \
case 4: \
case 8: \
__pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \
break; \
default: \
__put_user_bad(); \
break; \
} \
__builtin_expect(__pu_err, 0); \
})
#define put_user(x, ptr) \
({ \
might_fault(); \
__put_user(x, ptr); \
})
#define __get_user(x, ptr) \ #define __get_user(x, ptr) \
({ \ ({ \
int __gu_err = -EFAULT; \ const __user void *____guptr = (ptr); \
int __grc; \
\ \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: { \ case 1: { \
const unsigned char __user *__guptr = ____guptr; \
unsigned char __x; \ unsigned char __x; \
\ \
__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ __grc = __get_user_char(&__x, __guptr, sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *)&__x; \ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
break; \ break; \
}; \ }; \
case 2: { \ case 2: { \
const unsigned short __user *__guptr = ____guptr; \
unsigned short __x; \ unsigned short __x; \
\ \
__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ __grc = __get_user_short(&__x, __guptr, sizeof(*(ptr)));\
(x) = *(__force __typeof__(*(ptr)) *)&__x; \ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
break; \ break; \
}; \ }; \
case 4: { \ case 4: { \
const unsigned int __user *__guptr = ____guptr; \
unsigned int __x; \ unsigned int __x; \
\ \
__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ __grc = __get_user_int(&__x, __guptr, sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *)&__x; \ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
break; \ break; \
}; \ }; \
case 8: { \ case 8: { \
const unsigned long __user *__guptr = ____guptr; \
unsigned long __x; \ unsigned long __x; \
\ \
__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ __grc = __get_user_long(&__x, __guptr, sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *)&__x; \ (x) = *(__force __typeof__(*(ptr)) *)&__x; \
break; \ break; \
}; \ }; \
default: \ default: \
__get_user_bad(); \ __grc = __get_user_bad(); \
break; \ break; \
} \ } \
__builtin_expect(__gu_err, 0); \ __builtin_expect(__grc, 0); \
}) })
#define get_user(x, ptr) \ #define get_user(x, ptr) \
@ -341,109 +435,71 @@ static inline void *s390_kernel_write(void *dst, const void *src, size_t size)
return __s390_kernel_write(dst, src, size); return __s390_kernel_write(dst, src, size);
} }
int __noreturn __put_kernel_bad(void); void __noreturn __mvc_kernel_nofault_bad(void);
#define __put_kernel_asm(val, to, insn) \ #if defined(CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && defined(CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS)
({ \
int __rc; \
\
asm volatile( \
"0: " insn " %[_val],%[_to]\n" \
"1: xr %[rc],%[rc]\n" \
"2:\n" \
EX_TABLE_UA_STORE(0b, 2b, %[rc]) \
EX_TABLE_UA_STORE(1b, 2b, %[rc]) \
: [rc] "=d" (__rc), [_to] "+Q" (*(to)) \
: [_val] "d" (val) \
: "cc"); \
__rc; \
})
#define __put_kernel_nofault(dst, src, type, err_label) \ #define __mvc_kernel_nofault(dst, src, type, err_label) \
do { \ do { \
unsigned long __x = (unsigned long)(*((type *)(src))); \ switch (sizeof(type)) { \
int __pk_err; \ case 1: \
case 2: \
case 4: \
case 8: \
asm goto( \
"0: mvc %O[_dst](%[_len],%R[_dst]),%[_src]\n" \
"1: nopr %%r7\n" \
EX_TABLE(0b, %l[err_label]) \
EX_TABLE(1b, %l[err_label]) \
: [_dst] "=Q" (*(type *)dst) \
: [_src] "Q" (*(type *)(src)), \
[_len] "I" (sizeof(type)) \
: \
: err_label); \
break; \
default: \
__mvc_kernel_nofault_bad(); \
break; \
} \
} while (0)
#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT) && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
#define __mvc_kernel_nofault(dst, src, type, err_label) \
do { \
type *(__dst) = (type *)(dst); \
int __rc; \
\ \
switch (sizeof(type)) { \ switch (sizeof(type)) { \
case 1: \ case 1: \
__pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
break; \
case 2: \ case 2: \
__pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
break; \
case 4: \ case 4: \
__pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \
break; \
case 8: \ case 8: \
__pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \ asm_inline volatile( \
break; \ "0: mvc 0(%[_len],%[_dst]),%[_src]\n" \
default: \ "1: lhi %[_rc],0\n" \
__pk_err = __put_kernel_bad(); \
break; \
} \
if (unlikely(__pk_err)) \
goto err_label; \
} while (0)
int __noreturn __get_kernel_bad(void);
#define __get_kernel_asm(val, from, insn) \
({ \
int __rc; \
\
asm volatile( \
"0: " insn " %[_val],%[_from]\n" \
"1: xr %[rc],%[rc]\n" \
"2:\n" \ "2:\n" \
EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \ EX_TABLE_UA_FAULT(0b, 2b, %[_rc]) \
EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \ EX_TABLE_UA_FAULT(1b, 2b, %[_rc]) \
: [rc] "=d" (__rc), [_val] "=d" (val) \ : [_rc] "=d" (__rc), \
: [_from] "Q" (*(from)) \ "=m" (*__dst) \
: "cc"); \ : [_src] "Q" (*(type *)(src)), \
__rc; \ [_dst] "a" (__dst), \
}) [_len] "I" (sizeof(type))); \
if (__rc) \
#define __get_kernel_nofault(dst, src, type, err_label) \ goto err_label; \
do { \
int __gk_err; \
\
switch (sizeof(type)) { \
case 1: { \
unsigned char __x; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
*((type *)(dst)) = (type)__x; \
break; \ break; \
}; \
case 2: { \
unsigned short __x; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
case 4: { \
unsigned int __x; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
case 8: { \
unsigned long __x; \
\
__gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
*((type *)(dst)) = (type)__x; \
break; \
}; \
default: \ default: \
__gk_err = __get_kernel_bad(); \ __mvc_kernel_nofault_bad(); \
break; \ break; \
} \ } \
if (unlikely(__gk_err)) \
goto err_label; \
} while (0) } while (0)
#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
#define __get_kernel_nofault __mvc_kernel_nofault
#define __put_kernel_nofault __mvc_kernel_nofault
void __cmpxchg_user_key_called_with_bad_pointer(void); void __cmpxchg_user_key_called_with_bad_pointer(void);
#define CMPXCHG_USER_KEY_MAX_LOOPS 128 #define CMPXCHG_USER_KEY_MAX_LOOPS 128

View File

@ -50,6 +50,7 @@ decompressor_handled_param(facilities);
decompressor_handled_param(nokaslr); decompressor_handled_param(nokaslr);
decompressor_handled_param(cmma); decompressor_handled_param(cmma);
decompressor_handled_param(relocate_lowcore); decompressor_handled_param(relocate_lowcore);
decompressor_handled_param(bootdebug);
#if IS_ENABLED(CONFIG_KVM) #if IS_ENABLED(CONFIG_KVM)
decompressor_handled_param(prot_virt); decompressor_handled_param(prot_virt);
#endif #endif
@ -58,7 +59,7 @@ static void __init kasan_early_init(void)
{ {
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
init_task.kasan_depth = 0; init_task.kasan_depth = 0;
sclp_early_printk("KernelAddressSanitizer initialized\n"); pr_info("KernelAddressSanitizer initialized\n");
#endif #endif
} }

View File

@ -157,6 +157,12 @@ u64 __bootdata_preserved(stfle_fac_list[16]);
EXPORT_SYMBOL(stfle_fac_list); EXPORT_SYMBOL(stfle_fac_list);
struct oldmem_data __bootdata_preserved(oldmem_data); struct oldmem_data __bootdata_preserved(oldmem_data);
char __bootdata(boot_rb)[PAGE_SIZE * 2];
bool __bootdata(boot_earlyprintk);
size_t __bootdata(boot_rb_off);
char __bootdata(bootdebug_filter)[128];
bool __bootdata(bootdebug);
unsigned long __bootdata_preserved(VMALLOC_START); unsigned long __bootdata_preserved(VMALLOC_START);
EXPORT_SYMBOL(VMALLOC_START); EXPORT_SYMBOL(VMALLOC_START);
@ -686,7 +692,7 @@ static void __init reserve_physmem_info(void)
{ {
unsigned long addr, size; unsigned long addr, size;
if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size)) if (get_physmem_reserved(RR_MEM_DETECT_EXT, &addr, &size))
memblock_reserve(addr, size); memblock_reserve(addr, size);
} }
@ -694,7 +700,7 @@ static void __init free_physmem_info(void)
{ {
unsigned long addr, size; unsigned long addr, size;
if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size)) if (get_physmem_reserved(RR_MEM_DETECT_EXT, &addr, &size))
memblock_phys_free(addr, size); memblock_phys_free(addr, size);
} }
@ -724,7 +730,7 @@ static void __init reserve_lowcore(void)
void *lowcore_end = lowcore_start + sizeof(struct lowcore); void *lowcore_end = lowcore_start + sizeof(struct lowcore);
void *start, *end; void *start, *end;
if ((void *)__identity_base < lowcore_end) { if (absolute_pointer(__identity_base) < lowcore_end) {
start = max(lowcore_start, (void *)__identity_base); start = max(lowcore_start, (void *)__identity_base);
end = min(lowcore_end, (void *)(__identity_base + ident_map_size)); end = min(lowcore_end, (void *)(__identity_base + ident_map_size));
memblock_reserve(__pa(start), __pa(end)); memblock_reserve(__pa(start), __pa(end));
@ -865,6 +871,23 @@ static void __init log_component_list(void)
} }
} }
/*
* Print avoiding interpretation of % in buf and taking bootdebug option
* into consideration.
*/
static void __init print_rb_entry(const char *buf)
{
char fmt[] = KERN_SOH "0boot: %s";
int level = printk_get_level(buf);
buf = skip_timestamp(printk_skip_level(buf));
if (level == KERN_DEBUG[1] && (!bootdebug || !bootdebug_filter_match(buf)))
return;
fmt[1] = level;
printk(fmt, buf);
}
/* /*
* Setup function called from init/main.c just after the banner * Setup function called from init/main.c just after the banner
* was printed. * was printed.
@ -884,6 +907,9 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running natively in 64-bit mode\n"); pr_info("Linux is running natively in 64-bit mode\n");
else else
pr_info("Linux is running as a guest in 64-bit mode\n"); pr_info("Linux is running as a guest in 64-bit mode\n");
/* Print decompressor messages if not already printed */
if (!boot_earlyprintk)
boot_rb_foreach(print_rb_entry);
if (have_relocated_lowcore()) if (have_relocated_lowcore())
pr_info("Lowcore relocated to 0x%px\n", get_lowcore()); pr_info("Lowcore relocated to 0x%px\n", get_lowcore());
@ -987,3 +1013,8 @@ void __init setup_arch(char **cmdline_p)
/* Add system specific data to the random pool */ /* Add system specific data to the random pool */
setup_randomness(); setup_randomness();
} }
void __init arch_cpu_finalize_init(void)
{
sclp_init();
}

View File

@ -52,7 +52,6 @@ SECTIONS
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
FTRACE_HOTPATCH_TRAMPOLINES_TEXT FTRACE_HOTPATCH_TRAMPOLINES_TEXT
*(.text.*_indirect_*) *(.text.*_indirect_*)
*(.fixup)
*(.gnu.warning) *(.gnu.warning)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
_etext = .; /* End of text section */ _etext = .; /* End of text section */

View File

@ -31,51 +31,6 @@ void debug_user_asce(int exit)
} }
#endif /*CONFIG_DEBUG_ENTRY */ #endif /*CONFIG_DEBUG_ENTRY */
static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
unsigned long size, unsigned long key)
{
unsigned long rem;
union oac spec = {
.oac2.key = key,
.oac2.as = PSW_BITS_AS_SECONDARY,
.oac2.k = 1,
.oac2.a = 1,
};
asm volatile(
" lr 0,%[spec]\n"
"0: mvcos 0(%[to]),0(%[from]),%[size]\n"
"1: jz 5f\n"
" algr %[size],%[val]\n"
" slgr %[from],%[val]\n"
" slgr %[to],%[val]\n"
" j 0b\n"
"2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */
" nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */
" slgr %[rem],%[from]\n"
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
"4: slgr %[size],%[rem]\n"
" j 6f\n"
"5: slgr %[size],%[size]\n"
"6:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
EX_TABLE(3b, 6b)
EX_TABLE(4b, 6b)
: [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem)
: [val] "a" (-4096UL), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return raw_copy_from_user_key(to, from, n, 0);
}
EXPORT_SYMBOL(raw_copy_from_user);
unsigned long _copy_from_user_key(void *to, const void __user *from, unsigned long _copy_from_user_key(void *to, const void __user *from,
unsigned long n, unsigned long key) unsigned long n, unsigned long key)
{ {
@ -93,51 +48,6 @@ unsigned long _copy_from_user_key(void *to, const void __user *from,
} }
EXPORT_SYMBOL(_copy_from_user_key); EXPORT_SYMBOL(_copy_from_user_key);
static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
unsigned long size, unsigned long key)
{
unsigned long rem;
union oac spec = {
.oac1.key = key,
.oac1.as = PSW_BITS_AS_SECONDARY,
.oac1.k = 1,
.oac1.a = 1,
};
asm volatile(
" lr 0,%[spec]\n"
"0: mvcos 0(%[to]),0(%[from]),%[size]\n"
"1: jz 5f\n"
" algr %[size],%[val]\n"
" slgr %[to],%[val]\n"
" slgr %[from],%[val]\n"
" j 0b\n"
"2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
" nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
" slgr %[rem],%[to]\n"
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
"4: slgr %[size],%[rem]\n"
" j 6f\n"
"5: slgr %[size],%[size]\n"
"6:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
EX_TABLE(3b, 6b)
EX_TABLE(4b, 6b)
: [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem)
: [val] "a" (-4096UL), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return raw_copy_to_user_key(to, from, n, 0);
}
EXPORT_SYMBOL(raw_copy_to_user);
unsigned long _copy_to_user_key(void __user *to, const void *from, unsigned long _copy_to_user_key(void __user *to, const void *from,
unsigned long n, unsigned long key) unsigned long n, unsigned long key)
{ {

View File

@ -7,6 +7,7 @@
#include <linux/panic.h> #include <linux/panic.h>
#include <asm/asm-extable.h> #include <asm/asm-extable.h>
#include <asm/extable.h> #include <asm/extable.h>
#include <asm/fpu.h>
const struct exception_table_entry *s390_search_extables(unsigned long addr) const struct exception_table_entry *s390_search_extables(unsigned long addr)
{ {
@ -26,7 +27,7 @@ static bool ex_handler_fixup(const struct exception_table_entry *ex, struct pt_r
return true; return true;
} }
static bool ex_handler_ua_store(const struct exception_table_entry *ex, struct pt_regs *regs) static bool ex_handler_ua_fault(const struct exception_table_entry *ex, struct pt_regs *regs)
{ {
unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
@ -35,18 +36,6 @@ static bool ex_handler_ua_store(const struct exception_table_entry *ex, struct p
return true; return true;
} }
static bool ex_handler_ua_load_mem(const struct exception_table_entry *ex, struct pt_regs *regs)
{
unsigned int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
size_t len = FIELD_GET(EX_DATA_LEN, ex->data);
regs->gprs[reg_err] = -EFAULT;
memset((void *)regs->gprs[reg_addr], 0, len);
regs->psw.addr = extable_fixup(ex);
return true;
}
static bool ex_handler_ua_load_reg(const struct exception_table_entry *ex, static bool ex_handler_ua_load_reg(const struct exception_table_entry *ex,
bool pair, struct pt_regs *regs) bool pair, struct pt_regs *regs)
{ {
@ -77,6 +66,13 @@ static bool ex_handler_zeropad(const struct exception_table_entry *ex, struct pt
return true; return true;
} }
static bool ex_handler_fpc(const struct exception_table_entry *ex, struct pt_regs *regs)
{
fpu_sfpc(0);
regs->psw.addr = extable_fixup(ex);
return true;
}
bool fixup_exception(struct pt_regs *regs) bool fixup_exception(struct pt_regs *regs)
{ {
const struct exception_table_entry *ex; const struct exception_table_entry *ex;
@ -89,16 +85,16 @@ bool fixup_exception(struct pt_regs *regs)
return ex_handler_fixup(ex, regs); return ex_handler_fixup(ex, regs);
case EX_TYPE_BPF: case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs); return ex_handler_bpf(ex, regs);
case EX_TYPE_UA_STORE: case EX_TYPE_UA_FAULT:
return ex_handler_ua_store(ex, regs); return ex_handler_ua_fault(ex, regs);
case EX_TYPE_UA_LOAD_MEM:
return ex_handler_ua_load_mem(ex, regs);
case EX_TYPE_UA_LOAD_REG: case EX_TYPE_UA_LOAD_REG:
return ex_handler_ua_load_reg(ex, false, regs); return ex_handler_ua_load_reg(ex, false, regs);
case EX_TYPE_UA_LOAD_REGPAIR: case EX_TYPE_UA_LOAD_REGPAIR:
return ex_handler_ua_load_reg(ex, true, regs); return ex_handler_ua_load_reg(ex, true, regs);
case EX_TYPE_ZEROPAD: case EX_TYPE_ZEROPAD:
return ex_handler_zeropad(ex, regs); return ex_handler_zeropad(ex, regs);
case EX_TYPE_FPC:
return ex_handler_fpc(ex, regs);
} }
panic("invalid exception table entry"); panic("invalid exception table entry");
} }

View File

@ -662,7 +662,7 @@ void __init vmem_map_init(void)
if (!static_key_enabled(&cpu_has_bear)) if (!static_key_enabled(&cpu_has_bear))
set_memory_x(0, 1); set_memory_x(0, 1);
if (debug_pagealloc_enabled()) if (debug_pagealloc_enabled())
__set_memory_4k(__va(0), __va(0) + ident_map_size); __set_memory_4k(__va(0), absolute_pointer(__va(0)) + ident_map_size);
pr_info("Write protected kernel read-only data: %luk\n", pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10); (unsigned long)(__end_rodata - _stext) >> 10);
} }

View File

@ -171,7 +171,6 @@ void zpci_bus_scan_busses(void)
static bool zpci_bus_is_multifunction_root(struct zpci_dev *zdev) static bool zpci_bus_is_multifunction_root(struct zpci_dev *zdev)
{ {
return !s390_pci_no_rid && zdev->rid_available && return !s390_pci_no_rid && zdev->rid_available &&
zpci_is_device_configured(zdev) &&
!zdev->vfn; !zdev->vfn;
} }

View File

@ -13,7 +13,7 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY
$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(call if_changed_rule,as_o_S) $(call if_changed_rule,as_o_S)
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes KBUILD_CFLAGS := -std=gnu11 -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common

View File

@ -201,6 +201,17 @@ static int cmp_long_insn(const void *a, const void *b)
return strcmp(((struct insn *)a)->name, ((struct insn *)b)->name); return strcmp(((struct insn *)a)->name, ((struct insn *)b)->name);
} }
static void print_insn_name(const char *name)
{
size_t i, len;
len = strlen(name);
printf("{");
for (i = 0; i < len; i++)
printf(" \'%c\',", name[i]);
printf(" }");
}
static void print_long_insn(struct gen_opcode *desc) static void print_long_insn(struct gen_opcode *desc)
{ {
struct insn *insn; struct insn *insn;
@ -223,7 +234,9 @@ static void print_long_insn(struct gen_opcode *desc)
insn = &desc->insn[i]; insn = &desc->insn[i];
if (insn->name_len < 6) if (insn->name_len < 6)
continue; continue;
printf("\t[LONG_INSN_%s] = \"%s\", \\\n", insn->upper, insn->name); printf("\t[LONG_INSN_%s] = ", insn->upper);
print_insn_name(insn->name);
printf(", \\\n");
} }
printf("}\n\n"); printf("}\n\n");
} }
@ -236,10 +249,12 @@ static void print_opcode(struct insn *insn, int nr)
if (insn->type->byte != 0) if (insn->type->byte != 0)
opcode += 2; opcode += 2;
printf("\t[%4d] = { .opfrag = 0x%s, .format = INSTR_%s, ", nr, opcode, insn->format); printf("\t[%4d] = { .opfrag = 0x%s, .format = INSTR_%s, ", nr, opcode, insn->format);
if (insn->name_len < 6) if (insn->name_len < 6) {
printf(".name = \"%s\" ", insn->name); printf(".name = ");
else print_insn_name(insn->name);
} else {
printf(".offset = LONG_INSN_%s", insn->upper); printf(".offset = LONG_INSN_%s", insn->upper);
}
printf(" }, \\\n"); printf(" }, \\\n");
} }

View File

@ -245,7 +245,6 @@ static void sclp_request_timeout(bool force_restart);
static void sclp_process_queue(void); static void sclp_process_queue(void);
static void __sclp_make_read_req(void); static void __sclp_make_read_req(void);
static int sclp_init_mask(int calculate); static int sclp_init_mask(int calculate);
static int sclp_init(void);
static void static void
__sclp_queue_read_req(void) __sclp_queue_read_req(void)
@ -1251,8 +1250,7 @@ static struct platform_driver sclp_pdrv = {
/* Initialize SCLP driver. Return zero if driver is operational, non-zero /* Initialize SCLP driver. Return zero if driver is operational, non-zero
* otherwise. */ * otherwise. */
static int int sclp_init(void)
sclp_init(void)
{ {
unsigned long flags; unsigned long flags;
int rc = 0; int rc = 0;
@ -1305,13 +1303,7 @@ sclp_init(void)
static __init int sclp_initcall(void) static __init int sclp_initcall(void)
{ {
int rc; return platform_driver_register(&sclp_pdrv);
rc = platform_driver_register(&sclp_pdrv);
if (rc)
return rc;
return sclp_init();
} }
arch_initcall(sclp_initcall); arch_initcall(sclp_initcall);

View File

@ -123,7 +123,7 @@ static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
*/ */
static struct vmlogrdr_priv_t sys_ser[] = { static struct vmlogrdr_priv_t sys_ser[] = {
{ .system_service = "*LOGREC ", { .system_service = { '*', 'L', 'O', 'G', 'R', 'E', 'C', ' ' },
.internal_name = "logrec", .internal_name = "logrec",
.recording_name = "EREP", .recording_name = "EREP",
.minor_num = 0, .minor_num = 0,
@ -132,7 +132,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
.autorecording = 1, .autorecording = 1,
.autopurge = 1, .autopurge = 1,
}, },
{ .system_service = "*ACCOUNT", { .system_service = { '*', 'A', 'C', 'C', 'O', 'U', 'N', 'T' },
.internal_name = "account", .internal_name = "account",
.recording_name = "ACCOUNT", .recording_name = "ACCOUNT",
.minor_num = 1, .minor_num = 1,
@ -141,7 +141,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
.autorecording = 1, .autorecording = 1,
.autopurge = 1, .autopurge = 1,
}, },
{ .system_service = "*SYMPTOM", { .system_service = { '*', 'S', 'Y', 'M', 'P', 'T', 'O', 'M' },
.internal_name = "symptom", .internal_name = "symptom",
.recording_name = "SYMPTOM", .recording_name = "SYMPTOM",
.minor_num = 2, .minor_num = 2,
@ -356,7 +356,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
if (connect_rc) { if (connect_rc) {
pr_err("vmlogrdr: iucv connection to %s " pr_err("vmlogrdr: iucv connection to %s "
"failed with rc %i \n", "failed with rc %i \n",
logptr->system_service, connect_rc); logptr->internal_name, connect_rc);
goto out_path; goto out_path;
} }