s390 updates for 6.15 merge window

- Add sorting of mcount locations at build time
 
 - Rework uaccess functions with C exception handling to shorten inline
   assembly size and enable full inlining. This yields near-optimal code
   for small constant copies with a ~40kb kernel size increase
 
 - Add support for a configurable STRICT_MM_TYPECHECKS which allows to
   generate better code, but also allows to have type checking for
   debug builds
 
 - Optimize get_lowcore() for common callers with alternatives that
   nearly revert to the pre-relocated lowcore code, while also slightly
   reducing syscall entry and exit time
 
 - Convert MACHINE_HAS_* checks for single facility tests into cpu_has_*
   style macros that call test_facility(), and for features with additional
   conditions, add a new ALT_TYPE_FEATURE alternative to provide a static
   branch via alternative patching. Also, move machine feature detection
   to the decompressor for early patching and add debugging functionality
   to easily show which alternatives are patched
 
 - Add exception table support to early boot / startup code to get rid
   of the open coded exception handling
 
 - Use asm_inline for all inline assemblies with EX_TABLE or ALTERNATIVE
   to ensure correct inlining and unrolling decisions
 
 - Remove 2k page table leftovers now that s390 has been switched to
   always allocate 4k page tables
 
 - Split kfence pool into 4k mappings in arch_kfence_init_pool() and
   remove the architecture-specific kfence_split_mapping()
 
 - Use READ_ONCE_NOCHECK() in regs_get_kernel_stack_nth() to silence
   spurious KASAN warnings from opportunistic ftrace argument tracing
 
 - Force __atomic_add_const() variants on s390 to always return void,
   ensuring compile errors for improper usage
 
 - Remove s390's ioremap_wt() and pgprot_writethrough() due to mismatched
   semantics and lack of known users, relying on asm-generic fallbacks
 
 - Signal eventfd in vfio-ap to notify userspace when the guest AP
   configuration changes, including during mdev removal
 
 - Convert mdev_types from an array to a pointer in vfio-ccw and vfio-ap
   drivers to avoid fake flex array confusion
 
 - Cleanup trap code
 
 - Remove references to the outdated linux390@de.ibm.com address
 
 - Other various small fixes and improvements all over the code
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEE3QHqV+H2a8xAv27vjYWKoQLXFBgFAmfmuPwACgkQjYWKoQLX
 FBgTDAgAjKmZ5OYjACRfYepTvKk9SDqa2CBlQZ+BhbAXEVIrxKnv8OkImAXoWNsM
 mFxiCxAHWdcD+nqTrxFsXhkNLsndijlwnj/IqZgvy6R/3yNtBlAYRPLujOmVrsQB
 dWB8Dl38p63Ip1JfAqyabiAOUjfhrclRcM5FX5tgciXA6N/vhY3OM6k0+k7wN4Nj
 Dei/rCrnYRXTrFQgtM4w8JTIrwdnXjeKvaTYCflh4Q5ISJ7TceSF7cqq8HOs5hhK
 o2ciaoTdx212522CIsxeN3Ls3jrn8bCOCoOeSCysc5RL84grAuFnmjSajo1LFide
 S/TQtHXYy78Wuei9xvHi561ogiv/ww==
 =Kxgc
 -----END PGP SIGNATURE-----

Merge tag 's390-6.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Vasily Gorbik:

 - Add sorting of mcount locations at build time

 - Rework uaccess functions with C exception handling to shorten inline
   assembly size and enable full inlining. This yields near-optimal code
   for small constant copies with a ~40kb kernel size increase

 - Add support for a configurable STRICT_MM_TYPECHECKS which allows to
   generate better code, but also allows to have type checking for debug
   builds

 - Optimize get_lowcore() for common callers with alternatives that
   nearly revert to the pre-relocated lowcore code, while also slightly
   reducing syscall entry and exit time

 - Convert MACHINE_HAS_* checks for single facility tests into cpu_has_*
   style macros that call test_facility(), and for features with
   additional conditions, add a new ALT_TYPE_FEATURE alternative to
   provide a static branch via alternative patching. Also, move machine
   feature detection to the decompressor for early patching and add
   debugging functionality to easily show which alternatives are patched

 - Add exception table support to early boot / startup code to get rid
   of the open coded exception handling

 - Use asm_inline for all inline assemblies with EX_TABLE or ALTERNATIVE
   to ensure correct inlining and unrolling decisions

 - Remove 2k page table leftovers now that s390 has been switched to
   always allocate 4k page tables

 - Split kfence pool into 4k mappings in arch_kfence_init_pool() and
   remove the architecture-specific kfence_split_mapping()

 - Use READ_ONCE_NOCHECK() in regs_get_kernel_stack_nth() to silence
   spurious KASAN warnings from opportunistic ftrace argument tracing

 - Force __atomic_add_const() variants on s390 to always return void,
   ensuring compile errors for improper usage

 - Remove s390's ioremap_wt() and pgprot_writethrough() due to
   mismatched semantics and lack of known users, relying on asm-generic
   fallbacks

 - Signal eventfd in vfio-ap to notify userspace when the guest AP
   configuration changes, including during mdev removal

 - Convert mdev_types from an array to a pointer in vfio-ccw and vfio-ap
   drivers to avoid fake flex array confusion

 - Cleanup trap code

 - Remove references to the outdated linux390@de.ibm.com address

 - Other various small fixes and improvements all over the code

* tag 's390-6.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (78 commits)
  s390: Use inline qualifier for all EX_TABLE and ALTERNATIVE inline assemblies
  s390/kfence: Split kfence pool into 4k mappings in arch_kfence_init_pool()
  s390/ptrace: Avoid KASAN false positives in regs_get_kernel_stack_nth()
  s390/boot: Ignore vmlinux.map
  s390/sysctl: Remove "vm/allocate_pgste" sysctl
  s390: Remove 2k vs 4k page table leftovers
  s390/tlb: Use mm_has_pgste() instead of mm_alloc_pgste()
  s390/lowcore: Use lghi instead llilh to clear register
  s390/syscall: Merge __do_syscall() and do_syscall()
  s390/spinlock: Implement SPINLOCK_LOCKVAL with inline assembly
  s390/smp: Implement raw_smp_processor_id() with inline assembly
  s390/current: Implement current with inline assembly
  s390/lowcore: Use inline qualifier for get_lowcore() inline assembly
  s390: Move s390 sysctls into their own file under arch/s390
  s390/syscall: Simplify syscall_get_arguments()
  s390/vfio-ap: Notify userspace that guest's AP config changed when mdev removed
  s390: Remove ioremap_wt() and pgprot_writethrough()
  s390/mm: Add configurable STRICT_MM_TYPECHECKS
  s390/mm: Convert pgste_val() into function
  s390/mm: Convert pgprot_val() into function
  ...
This commit is contained in:
Linus Torvalds 2025-03-29 11:59:43 -07:00
commit f90f2145b2
141 changed files with 1446 additions and 1096 deletions

View File

@ -70,7 +70,6 @@ config S390
imply IMA_SECURE_AND_OR_TRUSTED_BOOT
select ALTERNATE_USER_ADDRESS_SPACE
select ARCH_32BIT_USTAT_F_TINODE
select ARCH_BINFMT_ELF_STATE
select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
select ARCH_ENABLE_MEMORY_HOTREMOVE
@ -183,6 +182,7 @@ config S390
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_VMAP_STACK
select HAVE_ASM_MODVERSIONS
select HAVE_BUILDTIME_MCOUNT_SORT
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK

View File

@ -13,6 +13,16 @@ config DEBUG_ENTRY
If unsure, say N.
config STRICT_MM_TYPECHECKS
bool "Strict Memory Management Type Checks"
depends on DEBUG_KERNEL
help
Enable strict type checking for memory management types like pte_t
and pmd_t. This generates slightly worse code and should be used
for debug builds.
If unsure, say N.
config CIO_INJECT
bool "CIO Inject interfaces"
depends on DEBUG_KERNEL && DEBUG_FS

View File

@ -5,4 +5,5 @@ relocs.S
section_cmp.*
vmlinux
vmlinux.lds
vmlinux.map
vmlinux.syms

View File

@ -26,7 +26,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o
obj-y += version.o pgm_check.o ctype.o ipl_data.o relocs.o alternative.o
obj-y += uv.o printk.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o

View File

@ -1,3 +1,138 @@
// SPDX-License-Identifier: GPL-2.0
#define boot_fmt(fmt) "alt: " fmt
#include "boot.h"
#define a_debug boot_debug
#include "../kernel/alternative.c"
static void alt_debug_all(int type)
{
int i;
switch (type) {
case ALT_TYPE_FACILITY:
for (i = 0; i < ARRAY_SIZE(alt_debug.facilities); i++)
alt_debug.facilities[i] = -1UL;
break;
case ALT_TYPE_FEATURE:
for (i = 0; i < ARRAY_SIZE(alt_debug.mfeatures); i++)
alt_debug.mfeatures[i] = -1UL;
break;
case ALT_TYPE_SPEC:
alt_debug.spec = 1;
break;
}
}
static void alt_debug_modify(int type, unsigned int nr, bool clear)
{
switch (type) {
case ALT_TYPE_FACILITY:
if (clear)
__clear_facility(nr, alt_debug.facilities);
else
__set_facility(nr, alt_debug.facilities);
break;
case ALT_TYPE_FEATURE:
if (clear)
__clear_machine_feature(nr, alt_debug.mfeatures);
else
__set_machine_feature(nr, alt_debug.mfeatures);
break;
}
}
static char *alt_debug_parse(int type, char *str)
{
unsigned long val, endval;
char *endp;
bool clear;
int i;
if (*str == ':') {
str++;
} else {
alt_debug_all(type);
return str;
}
clear = false;
if (*str == '!') {
alt_debug_all(type);
clear = true;
str++;
}
while (*str) {
val = simple_strtoull(str, &endp, 0);
if (str == endp)
break;
str = endp;
if (*str == '-') {
str++;
endval = simple_strtoull(str, &endp, 0);
if (str == endp)
break;
str = endp;
while (val <= endval) {
alt_debug_modify(type, val, clear);
val++;
}
} else {
alt_debug_modify(type, val, clear);
}
if (*str != ',')
break;
str++;
}
return str;
}
/*
* Use debug-alternative command line parameter for debugging:
* "debug-alternative"
* -> print debug message for every single alternative
*
* "debug-alternative=0;2"
* -> print debug message for all alternatives with type 0 and 2
*
* "debug-alternative=0:0-7"
* -> print debug message for all alternatives with type 0 and with
* facility numbers within the range of 0-7
* (if type 0 is ALT_TYPE_FACILITY)
*
* "debug-alternative=0:!8;1"
* -> print debug message for all alternatives with type 0, for all
* facility number, except facility 8, and in addition print all
* alternatives with type 1
*/
void alt_debug_setup(char *str)
{
unsigned long type;
char *endp;
int i;
if (!str) {
alt_debug_all(ALT_TYPE_FACILITY);
alt_debug_all(ALT_TYPE_FEATURE);
alt_debug_all(ALT_TYPE_SPEC);
return;
}
while (*str) {
type = simple_strtoull(str, &endp, 0);
if (str == endp)
break;
str = endp;
switch (type) {
case ALT_TYPE_FACILITY:
case ALT_TYPE_FEATURE:
str = alt_debug_parse(type, str);
break;
case ALT_TYPE_SPEC:
alt_debug_all(ALT_TYPE_SPEC);
break;
}
if (*str != ';')
break;
str++;
}
}

View File

@ -11,11 +11,6 @@
#include <linux/printk.h>
#include <asm/physmem_info.h>
struct machine_info {
unsigned char has_edat1 : 1;
unsigned char has_edat2 : 1;
};
struct vmlinux_info {
unsigned long entry;
unsigned long image_size; /* does not include .bss */
@ -69,7 +64,8 @@ void parse_boot_command_line(void);
void verify_facilities(void);
void print_missing_facilities(void);
void sclp_early_setup_buffer(void);
void print_pgm_check_info(void);
void alt_debug_setup(char *str);
void do_pgm_check(struct pt_regs *regs);
unsigned long randomize_within_range(unsigned long size, unsigned long align,
unsigned long min, unsigned long max);
void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit);

View File

@ -254,8 +254,9 @@ SYM_CODE_START_LOCAL(startup_normal)
xc 0xf00(256),0xf00
larl %r13,.Lctl
lctlg %c0,%c15,0(%r13) # load control registers
stcke __LC_BOOT_CLOCK
mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
larl %r13,tod_clock_base
stcke 0(%r13)
mvc __LC_LAST_UPDATE_CLOCK(8),1(%r13)
larl %r13,6f
spt 0(%r13)
mvc __LC_LAST_UPDATE_TIMER(8),0(%r13)
@ -292,12 +293,6 @@ SYM_CODE_END(startup_normal)
#include "head_kdump.S"
#
# This program check is active immediately after kernel start
# and until early_pgm_check_handler is set in kernel/early.c
# It simply saves general/control registers and psw in
# the save area and does disabled wait with a faulty address.
#
SYM_CODE_START_LOCAL(startup_pgm_check_handler)
stmg %r8,%r15,__LC_SAVE_AREA
la %r8,4095
@ -311,8 +306,18 @@ SYM_CODE_START_LOCAL(startup_pgm_check_handler)
oi __LC_RETURN_PSW+1,0x2 # set wait state bit
larl %r9,.Lold_psw_disabled_wait
stg %r9,__LC_PGM_NEW_PSW+8
larl %r15,_dump_info_stack_end-STACK_FRAME_OVERHEAD
brasl %r14,print_pgm_check_info
larl %r15,_dump_info_stack_end-(STACK_FRAME_OVERHEAD+__PT_SIZE)
la %r2,STACK_FRAME_OVERHEAD(%r15)
mvc __PT_PSW(16,%r2),__LC_PSW_SAVE_AREA-4095(%r8)
mvc __PT_R0(128,%r2),__LC_GPREGS_SAVE_AREA-4095(%r8)
mvc __PT_LAST_BREAK(8,%r2),__LC_PGM_LAST_BREAK
mvc __PT_INT_CODE(4,%r2),__LC_PGM_INT_CODE
brasl %r14,do_pgm_check
larl %r9,startup_pgm_check_handler
stg %r9,__LC_PGM_NEW_PSW+8
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
lpswe __LC_RETURN_PSW
.Lold_psw_disabled_wait:
la %r8,4095
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8)

View File

@ -5,6 +5,7 @@
#include <linux/pgtable.h>
#include <asm/abs_lowcore.h>
#include <asm/page-states.h>
#include <asm/machine.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include <asm/sections.h>
@ -34,29 +35,14 @@ int vmalloc_size_set;
static inline int __diag308(unsigned long subcode, void *addr)
{
unsigned long reg1, reg2;
union register_pair r1;
psw_t old;
union register_pair r1 = { .even = (unsigned long)addr, .odd = 0 };
r1.even = (unsigned long) addr;
r1.odd = 0;
asm volatile(
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
" epsw %[reg1],%[reg2]\n"
" st %[reg1],0(%[psw_pgm])\n"
" st %[reg2],4(%[psw_pgm])\n"
" larl %[reg1],1f\n"
" stg %[reg1],8(%[psw_pgm])\n"
asm_inline volatile(
" diag %[r1],%[subcode],0x308\n"
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
: [r1] "+&d" (r1.pair),
[reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
"+Q" (get_lowcore()->program_new_psw),
"=Q" (old)
: [subcode] "d" (subcode),
[psw_old] "a" (&old),
[psw_pgm] "a" (&get_lowcore()->program_new_psw)
"0:\n"
EX_TABLE(0b, 0b)
: [r1] "+d" (r1.pair)
: [subcode] "d" (subcode)
: "cc", "memory");
return r1.odd;
}
@ -295,6 +281,9 @@ void parse_boot_command_line(void)
if (!strcmp(param, "facilities") && val)
modify_fac_list(val);
if (!strcmp(param, "debug-alternative"))
alt_debug_setup(val);
if (!strcmp(param, "nokaslr"))
__kaslr_enabled = 0;
@ -312,7 +301,7 @@ void parse_boot_command_line(void)
}
#endif
if (!strcmp(param, "relocate_lowcore") && test_facility(193))
relocate_lowcore = 1;
set_machine_feature(MFEATURE_LOWCORE);
if (!strcmp(param, "earlyprintk"))
boot_earlyprintk = true;
if (!strcmp(param, "debug"))

View File

@ -32,26 +32,49 @@ void print_stacktrace(unsigned long sp)
}
}
void print_pgm_check_info(void)
{
unsigned long *gpregs = (unsigned long *)get_lowcore()->gpregs_save_area;
struct psw_bits *psw = &psw_bits(get_lowcore()->psw_save_area);
extern struct exception_table_entry __start___ex_table[];
extern struct exception_table_entry __stop___ex_table[];
static inline unsigned long extable_insn(const struct exception_table_entry *x)
{
return (unsigned long)&x->insn + x->insn;
}
static bool ex_handler(struct pt_regs *regs)
{
const struct exception_table_entry *ex;
for (ex = __start___ex_table; ex < __stop___ex_table; ex++) {
if (extable_insn(ex) != regs->psw.addr)
continue;
if (ex->type != EX_TYPE_FIXUP)
return false;
regs->psw.addr = extable_fixup(ex);
return true;
}
return false;
}
void do_pgm_check(struct pt_regs *regs)
{
struct psw_bits *psw = &psw_bits(regs->psw);
unsigned long *gpregs = regs->gprs;
if (ex_handler(regs))
return;
if (bootdebug)
boot_rb_dump();
boot_emerg("Linux version %s\n", kernel_version);
if (!is_prot_virt_guest() && early_command_line[0])
boot_emerg("Kernel command line: %s\n", early_command_line);
boot_emerg("Kernel fault: interruption code %04x ilc:%d\n",
get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1);
regs->int_code & 0xffff, regs->int_code >> 17);
if (kaslr_enabled()) {
boot_emerg("Kernel random base: %lx\n", __kaslr_offset);
boot_emerg("Kernel random base phys: %lx\n", __kaslr_offset_phys);
}
boot_emerg("PSW : %016lx %016lx (%pS)\n",
get_lowcore()->psw_save_area.mask,
get_lowcore()->psw_save_area.addr,
(void *)get_lowcore()->psw_save_area.addr);
regs->psw.mask, regs->psw.addr, (void *)regs->psw.addr);
boot_emerg(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n",
psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck,
psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri, psw->eaba);
@ -59,8 +82,11 @@ void print_pgm_check_info(void)
boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]);
boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]);
boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
print_stacktrace(get_lowcore()->gpregs_save_area[15]);
print_stacktrace(gpregs[15]);
boot_emerg("Last Breaking-Event-Address:\n");
boot_emerg(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break,
(void *)get_lowcore()->pgm_last_break);
boot_emerg(" [<%016lx>] %pS\n", regs->last_break, (void *)regs->last_break);
/* Convert to disabled wait PSW */
psw->io = 0;
psw->ext = 0;
psw->wait = 1;
}

View File

@ -59,36 +59,22 @@ void add_physmem_online_range(u64 start, u64 end)
static int __diag260(unsigned long rx1, unsigned long rx2)
{
unsigned long reg1, reg2, ry;
union register_pair rx;
int cc, exception;
psw_t old;
unsigned long ry;
rx.even = rx1;
rx.odd = rx2;
ry = 0x10; /* storage configuration */
exception = 1;
asm volatile(
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
" epsw %[reg1],%[reg2]\n"
" st %[reg1],0(%[psw_pgm])\n"
" st %[reg2],4(%[psw_pgm])\n"
" larl %[reg1],1f\n"
" stg %[reg1],8(%[psw_pgm])\n"
asm_inline volatile(
" diag %[rx],%[ry],0x260\n"
" lhi %[exc],0\n"
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
"0: lhi %[exc],0\n"
"1:\n"
CC_IPM(cc)
: CC_OUT(cc, cc),
[exc] "+d" (exception),
[reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
[ry] "+&d" (ry),
"+Q" (get_lowcore()->program_new_psw),
"=Q" (old)
: [rx] "d" (rx.pair),
[psw_old] "a" (&old),
[psw_pgm] "a" (&get_lowcore()->program_new_psw)
EX_TABLE(0b, 1b)
: CC_OUT(cc, cc), [exc] "+d" (exception), [ry] "+d" (ry)
: [rx] "d" (rx.pair)
: CC_CLOBBER_LIST("memory"));
cc = exception ? -1 : CC_TRANSFORM(cc);
return cc == 0 ? ry : -1;
@ -118,29 +104,15 @@ static int diag260(void)
static int diag500_storage_limit(unsigned long *max_physmem_end)
{
unsigned long storage_limit;
unsigned long reg1, reg2;
psw_t old;
asm volatile(
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
" epsw %[reg1],%[reg2]\n"
" st %[reg1],0(%[psw_pgm])\n"
" st %[reg2],4(%[psw_pgm])\n"
" larl %[reg1],1f\n"
" stg %[reg1],8(%[psw_pgm])\n"
" lghi 1,%[subcode]\n"
" lghi 2,0\n"
" diag 2,4,0x500\n"
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
" lgr %[slimit],2\n"
: [reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
[slimit] "=d" (storage_limit),
"=Q" (get_lowcore()->program_new_psw),
"=Q" (old)
: [psw_old] "a" (&old),
[psw_pgm] "a" (&get_lowcore()->program_new_psw),
[subcode] "i" (DIAG500_SC_STOR_LIMIT)
asm_inline volatile(
" lghi %%r1,%[subcode]\n"
" lghi %%r2,0\n"
" diag %%r2,%%r4,0x500\n"
"0: lgr %[slimit],%%r2\n"
EX_TABLE(0b, 0b)
: [slimit] "=d" (storage_limit)
: [subcode] "i" (DIAG500_SC_STOR_LIMIT)
: "memory", "1", "2");
if (!storage_limit)
return -EINVAL;
@ -151,31 +123,17 @@ static int diag500_storage_limit(unsigned long *max_physmem_end)
static int tprot(unsigned long addr)
{
unsigned long reg1, reg2;
int cc, exception;
psw_t old;
exception = 1;
asm volatile(
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
" epsw %[reg1],%[reg2]\n"
" st %[reg1],0(%[psw_pgm])\n"
" st %[reg2],4(%[psw_pgm])\n"
" larl %[reg1],1f\n"
" stg %[reg1],8(%[psw_pgm])\n"
asm_inline volatile(
" tprot 0(%[addr]),0\n"
" lhi %[exc],0\n"
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
"0: lhi %[exc],0\n"
"1:\n"
CC_IPM(cc)
: CC_OUT(cc, cc),
[exc] "+d" (exception),
[reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
"=Q" (get_lowcore()->program_new_psw.addr),
"=Q" (old)
: [psw_old] "a" (&old),
[psw_pgm] "a" (&get_lowcore()->program_new_psw),
[addr] "a" (addr)
EX_TABLE(0b, 1b)
: CC_OUT(cc, cc), [exc] "+d" (exception)
: [addr] "a" (addr)
: CC_CLOBBER_LIST("memory"));
cc = exception ? -EFAULT : CC_TRANSFORM(cc);
return cc;

View File

@ -8,6 +8,7 @@
#include <asm/sections.h>
#include <asm/lowcore.h>
#include <asm/setup.h>
#include <asm/timex.h>
#include <asm/sclp.h>
#include <asm/uv.h>
#include "boot.h"
@ -199,8 +200,7 @@ static void boot_console_earlyprintk(const char *buf)
static char *add_timestamp(char *buf)
{
#ifdef CONFIG_PRINTK_TIME
union tod_clock *boot_clock = (union tod_clock *)&get_lowcore()->boot_clock;
unsigned long ns = tod_to_ns(get_tod_clock() - boot_clock->tod);
unsigned long ns = tod_to_ns(__get_tod_clock_monotonic());
char ts[MAX_NUMLEN];
*buf++ = '[';

View File

@ -7,8 +7,11 @@
#include <asm/extmem.h>
#include <asm/sections.h>
#include <asm/maccess.h>
#include <asm/machine.h>
#include <asm/sysinfo.h>
#include <asm/cpu_mf.h>
#include <asm/setup.h>
#include <asm/timex.h>
#include <asm/kasan.h>
#include <asm/kexec.h>
#include <asm/sclp.h>
@ -34,13 +37,12 @@ unsigned long __bootdata_preserved(max_mappable);
unsigned long __bootdata_preserved(page_noexec_mask);
unsigned long __bootdata_preserved(segment_noexec_mask);
unsigned long __bootdata_preserved(region_noexec_mask);
int __bootdata_preserved(relocate_lowcore);
union tod_clock __bootdata_preserved(tod_clock_base);
u64 __bootdata_preserved(clock_comparator_max) = -1UL;
u64 __bootdata_preserved(stfle_fac_list[16]);
struct oldmem_data __bootdata_preserved(oldmem_data);
struct machine_info machine;
void error(char *x)
{
boot_emerg("%s\n", x);
@ -48,50 +50,101 @@ void error(char *x)
disabled_wait();
}
static char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
static void detect_machine_type(void)
{
struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
/* Check current-configuration-level */
if (stsi(NULL, 0, 0, 0) <= 2) {
set_machine_feature(MFEATURE_LPAR);
return;
}
/* Get virtual-machine cpu information. */
if (stsi(vmms, 3, 2, 2) || !vmms->count)
return;
/* Detect known hypervisors */
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
set_machine_feature(MFEATURE_KVM);
else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
set_machine_feature(MFEATURE_VM);
}
static void detect_diag9c(void)
{
unsigned int cpu;
int rc = 1;
cpu = stap();
asm_inline volatile(
" diag %[cpu],%%r0,0x9c\n"
"0: lhi %[rc],0\n"
"1:\n"
EX_TABLE(0b, 1b)
: [rc] "+d" (rc)
: [cpu] "d" (cpu)
: "cc", "memory");
if (!rc)
set_machine_feature(MFEATURE_DIAG9C);
}
static void reset_tod_clock(void)
{
union tod_clock clk;
if (store_tod_clock_ext_cc(&clk) == 0)
return;
/* TOD clock not running. Set the clock to Unix Epoch. */
if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk))
disabled_wait();
memset(&tod_clock_base, 0, sizeof(tod_clock_base));
tod_clock_base.tod = TOD_UNIX_EPOCH;
get_lowcore()->last_update_clock = TOD_UNIX_EPOCH;
}
static void detect_facilities(void)
{
if (test_facility(8)) {
machine.has_edat1 = 1;
if (cpu_has_edat1())
local_ctl_set_bit(0, CR0_EDAT_BIT);
}
if (test_facility(78))
machine.has_edat2 = 1;
page_noexec_mask = -1UL;
segment_noexec_mask = -1UL;
region_noexec_mask = -1UL;
if (!test_facility(130)) {
if (!cpu_has_nx()) {
page_noexec_mask &= ~_PAGE_NOEXEC;
segment_noexec_mask &= ~_SEGMENT_ENTRY_NOEXEC;
region_noexec_mask &= ~_REGION_ENTRY_NOEXEC;
}
if (IS_ENABLED(CONFIG_PCI) && test_facility(153))
set_machine_feature(MFEATURE_PCI_MIO);
reset_tod_clock();
if (test_facility(139) && (tod_clock_base.tod >> 63)) {
/* Enable signed clock comparator comparisons */
set_machine_feature(MFEATURE_SCC);
clock_comparator_max = -1UL >> 1;
local_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT);
}
if (test_facility(50) && test_facility(73)) {
set_machine_feature(MFEATURE_TX);
local_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT);
}
if (cpu_has_vx())
local_ctl_set_bit(0, CR0_VECTOR_BIT);
}
static int cmma_test_essa(void)
{
unsigned long reg1, reg2, tmp = 0;
unsigned long tmp = 0;
int rc = 1;
psw_t old;
/* Test ESSA_GET_STATE */
asm volatile(
" mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
" epsw %[reg1],%[reg2]\n"
" st %[reg1],0(%[psw_pgm])\n"
" st %[reg2],4(%[psw_pgm])\n"
" larl %[reg1],1f\n"
" stg %[reg1],8(%[psw_pgm])\n"
asm_inline volatile(
" .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
" la %[rc],0\n"
"1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
: [reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
[rc] "+&d" (rc),
[tmp] "+&d" (tmp),
"+Q" (get_lowcore()->program_new_psw),
"=Q" (old)
: [psw_old] "a" (&old),
[psw_pgm] "a" (&get_lowcore()->program_new_psw),
[cmd] "i" (ESSA_GET_STATE)
"0: lhi %[rc],0\n"
"1:\n"
EX_TABLE(0b, 1b)
: [rc] "+d" (rc), [tmp] "+d" (tmp)
: [cmd] "i" (ESSA_GET_STATE)
: "cc", "memory");
return rc;
}
@ -462,7 +515,10 @@ void startup_kernel(void)
read_ipl_report();
sclp_early_read_info();
sclp_early_detect_machine_features();
detect_facilities();
detect_diag9c();
detect_machine_type();
cmma_init();
sanitize_prot_virt_host();
max_physmem_end = detect_max_physmem_end();

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define boot_fmt(fmt) "vmem: " fmt
#include <linux/cpufeature.h>
#include <linux/sched/task.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>
@ -10,6 +11,7 @@
#include <asm/ctlreg.h>
#include <asm/physmem_info.h>
#include <asm/maccess.h>
#include <asm/machine.h>
#include <asm/abs_lowcore.h>
#include "decompressor.h"
#include "boot.h"
@ -314,7 +316,7 @@ static unsigned long try_get_large_pud_pa(pud_t *pu_dir, unsigned long addr, uns
{
unsigned long pa, size = end - addr;
if (!machine.has_edat2 || !large_page_mapping_allowed(mode) ||
if (!cpu_has_edat2() || !large_page_mapping_allowed(mode) ||
!IS_ALIGNED(addr, PUD_SIZE) || (size < PUD_SIZE))
return INVALID_PHYS_ADDR;
@ -330,7 +332,7 @@ static unsigned long try_get_large_pmd_pa(pmd_t *pm_dir, unsigned long addr, uns
{
unsigned long pa, size = end - addr;
if (!machine.has_edat1 || !large_page_mapping_allowed(mode) ||
if (!cpu_has_edat1() || !large_page_mapping_allowed(mode) ||
!IS_ALIGNED(addr, PMD_SIZE) || (size < PMD_SIZE))
return INVALID_PHYS_ADDR;
@ -516,7 +518,7 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l
__arch_set_page_dat((void *)swapper_pg_dir, 1UL << CRST_ALLOC_ORDER);
__arch_set_page_dat((void *)invalid_pg_dir, 1UL << CRST_ALLOC_ORDER);
if (relocate_lowcore)
if (machine_has_relocated_lowcore())
lowcore_address = LOWCORE_ALT_ADDRESS;
/*

View File

@ -40,6 +40,7 @@ SECTIONS
*(.rodata.*)
_erodata = . ;
}
EXCEPTION_TABLE(16)
.got : {
*(.got)
}
@ -165,7 +166,6 @@ SECTIONS
/DISCARD/ : {
COMMON_DISCARDS
*(.eh_frame)
*(__ex_table)
*(*__ksymtab*)
*(___kcrctab*)
}

View File

@ -885,12 +885,14 @@ CONFIG_USER_EVENTS=y
CONFIG_HIST_TRIGGERS=y
CONFIG_FTRACE_STARTUP_TEST=y
# CONFIG_EVENT_TRACE_STARTUP_TEST is not set
CONFIG_FTRACE_SORT_STARTUP_TEST=y
CONFIG_SAMPLES=y
CONFIG_SAMPLE_TRACE_PRINTK=m
CONFIG_SAMPLE_FTRACE_DIRECT=m
CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
CONFIG_SAMPLE_FTRACE_OPS=m
CONFIG_DEBUG_ENTRY=y
CONFIG_STRICT_MM_TYPECHECKS=y
CONFIG_CIO_INJECT=y
CONFIG_KUNIT=m
CONFIG_KUNIT_DEBUGFS=y

View File

@ -0,0 +1,2 @@
# Help: Enable strict memory management typechecks
CONFIG_STRICT_MM_TYPECHECKS=y

View File

@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/cpu.h>
#include <asm/machine.h>
#include <asm/diag.h>
#include <asm/hypfs.h>
#include "hypfs.h"
@ -107,7 +108,7 @@ static struct hypfs_dbfs_file dbfs_file_0c = {
*/
int __init hypfs_diag0c_init(void)
{
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return 0;
hypfs_dbfs_create_file(&dbfs_file_0c);
return 0;
@ -118,7 +119,7 @@ int __init hypfs_diag0c_init(void)
*/
void hypfs_diag0c_exit(void)
{
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return;
hypfs_dbfs_remove_file(&dbfs_file_0c);
}

View File

@ -16,6 +16,7 @@
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/machine.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include "hypfs_diag.h"
@ -382,7 +383,7 @@ static void diag224_delete_name_table(void)
int __init __hypfs_diag_fs_init(void)
{
if (MACHINE_IS_LPAR)
if (machine_is_lpar())
return diag224_get_name_table();
return 0;
}

View File

@ -11,6 +11,7 @@
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <asm/extable.h>
#include <asm/machine.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/timex.h>
@ -121,7 +122,7 @@ static struct hypfs_dbfs_file dbfs_file_2fc = {
int hypfs_vm_init(void)
{
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return 0;
if (diag2fc(0, all_guests, NULL) > 0)
diag2fc_guest_query = all_guests;
@ -135,7 +136,7 @@ int hypfs_vm_init(void)
void hypfs_vm_exit(void)
{
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return;
hypfs_dbfs_remove_file(&dbfs_file_2fc);
}

View File

@ -24,6 +24,7 @@
#include <linux/kobject.h>
#include <linux/seq_file.h>
#include <linux/uio.h>
#include <asm/machine.h>
#include <asm/ebcdic.h>
#include "hypfs.h"
@ -184,7 +185,7 @@ static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
hypfs_delete_tree(sb->s_root);
if (MACHINE_IS_VM)
if (machine_is_vm())
rc = hypfs_vm_create_files(sb->s_root);
else
rc = hypfs_diag_create_files(sb->s_root);
@ -273,7 +274,7 @@ static int hypfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_root = root_dentry = d_make_root(root_inode);
if (!root_dentry)
return -ENOMEM;
if (MACHINE_IS_VM)
if (machine_is_vm())
rc = hypfs_vm_create_files(root_dentry);
else
rc = hypfs_diag_create_files(root_dentry);

View File

@ -25,11 +25,4 @@ static inline void put_abs_lowcore(struct lowcore *lc)
put_cpu();
}
extern int relocate_lowcore;
static inline int have_relocated_lowcore(void)
{
return relocate_lowcore;
}
#endif /* _ASM_S390_ABS_LOWCORE_H */

View File

@ -32,8 +32,8 @@
#define ALT_CTX_ALL (ALT_CTX_EARLY | ALT_CTX_LATE)
#define ALT_TYPE_FACILITY 0
#define ALT_TYPE_SPEC 1
#define ALT_TYPE_LOWCORE 2
#define ALT_TYPE_FEATURE 1
#define ALT_TYPE_SPEC 2
#define ALT_DATA_SHIFT 0
#define ALT_TYPE_SHIFT 20
@ -43,13 +43,14 @@
ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \
(facility) << ALT_DATA_SHIFT)
#define ALT_FEATURE(feature) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \
ALT_TYPE_FEATURE << ALT_TYPE_SHIFT | \
(feature) << ALT_DATA_SHIFT)
#define ALT_SPEC(facility) (ALT_CTX_LATE << ALT_CTX_SHIFT | \
ALT_TYPE_SPEC << ALT_TYPE_SHIFT | \
(facility) << ALT_DATA_SHIFT)
#define ALT_LOWCORE (ALT_CTX_EARLY << ALT_CTX_SHIFT | \
ALT_TYPE_LOWCORE << ALT_TYPE_SHIFT)
#ifndef __ASSEMBLY__
#include <linux/types.h>

View File

@ -9,6 +9,7 @@
#define _ASM_S390_APPLDATA_H
#include <linux/io.h>
#include <asm/machine.h>
#include <asm/diag.h>
#define APPLDATA_START_INTERVAL_REC 0x80
@ -48,7 +49,7 @@ static inline int appldata_asm(struct appldata_parameter_list *parm_list,
{
int ry;
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return -EOPNOTSUPP;
parm_list->diag = 0xdc;
parm_list->function = fn;

View File

@ -14,6 +14,8 @@
#define EX_TYPE_UA_LOAD_REGPAIR 6
#define EX_TYPE_ZEROPAD 7
#define EX_TYPE_FPC 8
#define EX_TYPE_UA_MVCOS_TO 9
#define EX_TYPE_UA_MVCOS_FROM 10
#define EX_DATA_REG_ERR_SHIFT 0
#define EX_DATA_REG_ERR GENMASK(3, 0)
@ -84,4 +86,10 @@
#define EX_TABLE_FPC(_fault, _target) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FPC, __stringify(%%r0), __stringify(%%r0), 0)
#define EX_TABLE_UA_MVCOS_TO(_fault, _target) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_MVCOS_TO, __stringify(%%r0), __stringify(%%r0), 0)
#define EX_TABLE_UA_MVCOS_FROM(_fault, _target) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_MVCOS_FROM, __stringify(%%r0), __stringify(%%r0), 0)
#endif /* __ASM_EXTABLE_H */

View File

@ -163,10 +163,10 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#undef __ATOMIC64_OPS
#define __atomic_add_const(val, ptr) __atomic_add(val, ptr)
#define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr)
#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
#define __atomic_add_const(val, ptr) ((void)__atomic_add(val, ptr))
#define __atomic_add_const_barrier(val, ptr) ((void)__atomic_add(val, ptr))
#define __atomic64_add_const(val, ptr) ((void)__atomic64_add(val, ptr))
#define __atomic64_add_const_barrier(val, ptr) ((void)__atomic64_add(val, ptr))
#endif /* MARCH_HAS_Z196_FEATURES */

View File

@ -171,7 +171,7 @@ static inline int qctri(struct cpumf_ctr_info *info)
{
int rc = -EINVAL;
asm volatile (
asm_inline volatile (
"0: qctri %1\n"
"1: lhi %0,0\n"
"2:\n"
@ -185,7 +185,7 @@ static inline int lcctl(u64 ctl)
{
int cc;
asm volatile (
asm_inline volatile (
" lcctl %[ctl]\n"
CC_IPM(cc)
: CC_OUT(cc, cc)
@ -200,7 +200,7 @@ static inline int __ecctr(u64 ctr, u64 *content)
u64 _content;
int cc;
asm volatile (
asm_inline volatile (
" ecctr %[_content],%[ctr]\n"
CC_IPM(cc)
: CC_OUT(cc, cc), [_content] "=d" (_content)

View File

@ -9,6 +9,8 @@
#ifndef __ASM_S390_CPUFEATURE_H
#define __ASM_S390_CPUFEATURE_H
#include <asm/facility.h>
enum {
S390_CPU_FEATURE_MSA,
S390_CPU_FEATURE_VXRS,
@ -20,4 +22,16 @@ enum {
int cpu_have_feature(unsigned int nr);
#define cpu_has_bear() test_facility(193)
#define cpu_has_edat1() test_facility(8)
#define cpu_has_edat2() test_facility(78)
#define cpu_has_gs() test_facility(133)
#define cpu_has_idte() test_facility(3)
#define cpu_has_nx() test_facility(130)
#define cpu_has_rdp() test_facility(194)
#define cpu_has_seq_insn() test_facility(85)
#define cpu_has_tlb_lc() test_facility(51)
#define cpu_has_topology() test_facility(11)
#define cpu_has_vx() test_facility(129)
#endif /* __ASM_S390_CPUFEATURE_H */

View File

@ -11,9 +11,25 @@
#define _S390_CURRENT_H
#include <asm/lowcore.h>
#include <asm/machine.h>
struct task_struct;
#define current ((struct task_struct *const)get_lowcore()->current_task)
static __always_inline struct task_struct *get_current(void)
{
unsigned long ptr, lc_current;
lc_current = offsetof(struct lowcore, current_task);
asm_inline(
ALTERNATIVE(" lg %[ptr],%[offzero](%%r0)\n",
" lg %[ptr],%[offalt](%%r0)\n",
ALT_FEATURE(MFEATURE_LOWCORE))
: [ptr] "=d" (ptr)
: [offzero] "i" (lc_current),
[offalt] "i" (lc_current + LOWCORE_ALT_ADDRESS));
return (struct task_struct *)ptr;
}
#define current get_current()
#endif /* !(_S390_CURRENT_H) */

View File

@ -66,7 +66,7 @@ static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
end_addr = pfn_to_phys(start_pfn + num_pfn - 1);
diag_stat_inc(DIAG_STAT_X010);
asm volatile(
asm_inline volatile(
"0: diag %0,%1,0x10\n"
"1: nopr %%r7\n"
EX_TABLE(0b, 1b)

View File

@ -158,9 +158,6 @@ enum {
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
/* s390 specific phdr types */
#define PT_S390_PGSTE 0x70000000
/*
* ELF register definitions..
*/
@ -191,35 +188,6 @@ typedef s390_compat_regs compat_elf_gregset_t;
&& (x)->e_ident[EI_CLASS] == ELF_CLASS)
#define compat_start_thread start_thread31
struct arch_elf_state {
int rc;
};
#define INIT_ARCH_ELF_STATE { .rc = 0 }
#define arch_check_elf(ehdr, interp, interp_ehdr, state) (0)
#ifdef CONFIG_PGSTE
#define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \
({ \
struct arch_elf_state *_state = state; \
if ((phdr)->p_type == PT_S390_PGSTE && \
!page_table_allocate_pgste && \
!test_thread_flag(TIF_PGSTE) && \
!current->mm->context.alloc_pgste) { \
set_thread_flag(TIF_PGSTE); \
set_pt_regs_flag(task_pt_regs(current), \
PIF_EXECVE_PGSTE_RESTART); \
_state->rc = -EAGAIN; \
} \
_state->rc; \
})
#else
#define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \
({ \
(state)->rc; \
})
#endif
/* For SVR4/S390 the function pointer to be registered with `atexit` is
passed in R14. */
#define ELF_PLAT_INIT(_r, load_addr) \

View File

@ -44,6 +44,7 @@
#ifndef _ASM_S390_FPU_H
#define _ASM_S390_FPU_H
#include <linux/cpufeature.h>
#include <linux/processor.h>
#include <linux/preempt.h>
#include <linux/string.h>
@ -51,12 +52,6 @@
#include <asm/sigcontext.h>
#include <asm/fpu-types.h>
#include <asm/fpu-insn.h>
#include <asm/facility.h>
static inline bool cpu_has_vx(void)
{
return likely(test_facility(129));
}
enum {
KERNEL_FPC_BIT = 0,

View File

@ -9,12 +9,13 @@
#ifndef _ASM_S390_HUGETLB_H
#define _ASM_S390_HUGETLB_H
#include <linux/cpufeature.h>
#include <linux/pgtable.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <asm/page.h>
#define hugepages_supported() (MACHINE_HAS_EDAT1)
#define hugepages_supported() cpu_has_edat1()
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,

View File

@ -34,8 +34,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define ioremap_wc(addr, size) \
ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL)))
#define ioremap_wt(addr, size) \
ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL)))
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{

View File

@ -11,28 +11,17 @@
void __kernel_map_pages(struct page *page, int numpages, int enable);
static __always_inline bool arch_kfence_init_pool(void)
{
return true;
}
#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
/*
* Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
* but earlier where page table allocations still happen with memblock.
* Reason is that arch_kfence_init_pool() gets called when the system
* is still in a limbo state - disabling and enabling bottom halves is
* not yet allowed, but that is what our page_table_alloc() would do.
*/
static __always_inline void kfence_split_mapping(void)
{
#ifdef CONFIG_KFENCE
unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
set_memory_4k((unsigned long)__kfence_pool, pool_pages);
#endif
return true;
}
#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
__kernel_map_pages(virt_to_page((void *)addr), 1, !protect);

View File

@ -10,6 +10,7 @@
#define _ASM_S390_LOWCORE_H
#include <linux/types.h>
#include <asm/machine.h>
#include <asm/ptrace.h>
#include <asm/ctlreg.h>
#include <asm/cpu.h>
@ -126,7 +127,7 @@ struct lowcore {
__u64 int_clock; /* 0x0318 */
__u8 pad_0x0320[0x0328-0x0320]; /* 0x0320 */
__u64 clock_comparator; /* 0x0328 */
__u64 boot_clock[2]; /* 0x0330 */
__u8 pad_0x0330[0x0340-0x0330]; /* 0x0330 */
/* Current process. */
__u64 current_task; /* 0x0340 */
@ -222,9 +223,12 @@ static __always_inline struct lowcore *get_lowcore(void)
if (__is_defined(__DECOMPRESSOR))
return NULL;
asm(ALTERNATIVE("llilh %[lc],0", "llilh %[lc],%[alt]", ALT_LOWCORE)
: [lc] "=d" (lc)
: [alt] "i" (LOWCORE_ALT_ADDRESS >> 16));
asm_inline(
ALTERNATIVE(" lghi %[lc],0",
" llilh %[lc],%[alt]",
ALT_FEATURE(MFEATURE_LOWCORE))
: [lc] "=d" (lc)
: [alt] "i" (LOWCORE_ALT_ADDRESS >> 16));
return lc;
}
@ -238,15 +242,15 @@ static inline void set_prefix(__u32 address)
#else /* __ASSEMBLY__ */
.macro GET_LC reg
ALTERNATIVE "llilh \reg,0", \
ALTERNATIVE "lghi \reg,0", \
__stringify(llilh \reg, LOWCORE_ALT_ADDRESS >> 16), \
ALT_LOWCORE
ALT_FEATURE(MFEATURE_LOWCORE)
.endm
.macro STMG_LC start, end, savearea
ALTERNATIVE "stmg \start, \end, \savearea", \
__stringify(stmg \start, \end, LOWCORE_ALT_ADDRESS + \savearea), \
ALT_LOWCORE
ALT_FEATURE(MFEATURE_LOWCORE)
.endm
#endif /* __ASSEMBLY__ */

View File

@ -0,0 +1,103 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2024
*/
#ifndef __ASM_S390_MACHINE_H
#define __ASM_S390_MACHINE_H
#include <linux/const.h>
#define MFEATURE_LOWCORE 0
#define MFEATURE_PCI_MIO 1
#define MFEATURE_SCC 2
#define MFEATURE_TLB_GUEST 3
#define MFEATURE_TX 4
#define MFEATURE_ESOP 5
#define MFEATURE_DIAG9C 6
#define MFEATURE_VM 7
#define MFEATURE_KVM 8
#define MFEATURE_LPAR 9
#ifndef __ASSEMBLY__
#include <linux/bitops.h>
#include <asm/alternative.h>
extern unsigned long machine_features[1];
#define MAX_MFEATURE_BIT (sizeof(machine_features) * BITS_PER_BYTE)
static inline void __set_machine_feature(unsigned int nr, unsigned long *mfeatures)
{
if (nr >= MAX_MFEATURE_BIT)
return;
__set_bit(nr, mfeatures);
}
static inline void set_machine_feature(unsigned int nr)
{
__set_machine_feature(nr, machine_features);
}
static inline void __clear_machine_feature(unsigned int nr, unsigned long *mfeatures)
{
if (nr >= MAX_MFEATURE_BIT)
return;
__clear_bit(nr, mfeatures);
}
static inline void clear_machine_feature(unsigned int nr)
{
__clear_machine_feature(nr, machine_features);
}
static bool __test_machine_feature(unsigned int nr, unsigned long *mfeatures)
{
if (nr >= MAX_MFEATURE_BIT)
return false;
return test_bit(nr, mfeatures);
}
static bool test_machine_feature(unsigned int nr)
{
return __test_machine_feature(nr, machine_features);
}
static __always_inline bool __test_machine_feature_constant(unsigned int nr)
{
asm goto(
ALTERNATIVE("brcl 15,%l[l_no]", "brcl 0,0", ALT_FEATURE(%[nr]))
:
: [nr] "i" (nr)
:
: l_no);
return true;
l_no:
return false;
}
#define DEFINE_MACHINE_HAS_FEATURE(name, feature) \
static __always_inline bool machine_has_##name(void) \
{ \
if (!__is_defined(__DECOMPRESSOR) && __builtin_constant_p(feature)) \
return __test_machine_feature_constant(feature); \
return test_machine_feature(feature); \
}
DEFINE_MACHINE_HAS_FEATURE(relocated_lowcore, MFEATURE_LOWCORE)
DEFINE_MACHINE_HAS_FEATURE(scc, MFEATURE_SCC)
DEFINE_MACHINE_HAS_FEATURE(tlb_guest, MFEATURE_TLB_GUEST)
DEFINE_MACHINE_HAS_FEATURE(tx, MFEATURE_TX)
DEFINE_MACHINE_HAS_FEATURE(esop, MFEATURE_ESOP)
DEFINE_MACHINE_HAS_FEATURE(diag9c, MFEATURE_DIAG9C)
DEFINE_MACHINE_HAS_FEATURE(vm, MFEATURE_VM)
DEFINE_MACHINE_HAS_FEATURE(kvm, MFEATURE_KVM)
DEFINE_MACHINE_HAS_FEATURE(lpar, MFEATURE_LPAR)
#define machine_is_vm machine_has_vm
#define machine_is_kvm machine_has_kvm
#define machine_is_lpar machine_has_lpar
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_MACHINE_H */

View File

@ -22,10 +22,7 @@ typedef struct {
* The following bitfields need a down_write on the mm
* semaphore when they are written to. As they are only
* written once, they can be read without a lock.
*
* The mmu context allocates 4K page tables.
*/
unsigned int alloc_pgste:1;
/* The mmu context uses extended page tables. */
unsigned int has_pgste:1;
/* The mmu context uses storage keys. */

View File

@ -29,9 +29,6 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.gmap_asce = 0;
mm->context.flush_mm = 0;
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste ||
test_thread_flag(TIF_PGSTE) ||
(current->mm && current->mm->context.alloc_pgste);
mm->context.has_pgste = 0;
mm->context.uses_skeys = 0;
mm->context.uses_cmm = 0;

View File

@ -71,9 +71,11 @@ static inline void copy_page(void *to, void *from)
#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr)
/*
* These are used to make use of C type-checking..
*/
#ifdef CONFIG_STRICT_MM_TYPECHECKS
#define STRICT_MM_TYPECHECKS
#endif
#ifdef STRICT_MM_TYPECHECKS
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pgste; } pgste_t;
@ -82,43 +84,48 @@ typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pud; } pud_t;
typedef struct { unsigned long p4d; } p4d_t;
typedef struct { unsigned long pgd; } pgd_t;
#define DEFINE_PGVAL_FUNC(name) \
static __always_inline unsigned long name ## _val(name ## _t name) \
{ \
return name.name; \
}
#else /* STRICT_MM_TYPECHECKS */
typedef unsigned long pgprot_t;
typedef unsigned long pgste_t;
typedef unsigned long pte_t;
typedef unsigned long pmd_t;
typedef unsigned long pud_t;
typedef unsigned long p4d_t;
typedef unsigned long pgd_t;
#define DEFINE_PGVAL_FUNC(name) \
static __always_inline unsigned long name ## _val(name ## _t name) \
{ \
return name; \
}
#endif /* STRICT_MM_TYPECHECKS */
DEFINE_PGVAL_FUNC(pgprot)
DEFINE_PGVAL_FUNC(pgste)
DEFINE_PGVAL_FUNC(pte)
DEFINE_PGVAL_FUNC(pmd)
DEFINE_PGVAL_FUNC(pud)
DEFINE_PGVAL_FUNC(p4d)
DEFINE_PGVAL_FUNC(pgd)
typedef pte_t *pgtable_t;
#define pgprot_val(x) ((x).pgprot)
#define pgste_val(x) ((x).pgste)
static inline unsigned long pte_val(pte_t pte)
{
return pte.pte;
}
static inline unsigned long pmd_val(pmd_t pmd)
{
return pmd.pmd;
}
static inline unsigned long pud_val(pud_t pud)
{
return pud.pud;
}
static inline unsigned long p4d_val(p4d_t p4d)
{
return p4d.p4d;
}
static inline unsigned long pgd_val(pgd_t pgd)
{
return pgd.pgd;
}
#define __pgprot(x) ((pgprot_t) { (x) } )
#define __pgste(x) ((pgste_t) { (x) } )
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pud(x) ((pud_t) { (x) } )
#define __p4d(x) ((p4d_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
static inline void page_set_storage_key(unsigned long addr,
unsigned char skey, int mapped)

View File

@ -26,7 +26,6 @@ unsigned long *page_table_alloc(struct mm_struct *);
struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm);
void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_pgste(struct ptdesc *ptdesc);
extern int page_table_allocate_pgste;
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
{

View File

@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/cpufeature.h>
#include <linux/page-flags.h>
#include <linux/radix-tree.h>
#include <linux/atomic.h>
@ -583,13 +584,14 @@ static inline int mm_is_protected(struct mm_struct *mm)
return 0;
}
static inline int mm_alloc_pgste(struct mm_struct *mm)
static inline pgste_t clear_pgste_bit(pgste_t pgste, unsigned long mask)
{
#ifdef CONFIG_PGSTE
if (unlikely(mm->context.alloc_pgste))
return 1;
#endif
return 0;
return __pgste(pgste_val(pgste) & ~mask);
}
static inline pgste_t set_pgste_bit(pgste_t pgste, unsigned long mask)
{
return __pgste(pgste_val(pgste) | mask);
}
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
@ -1339,7 +1341,7 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
* PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead.
* A local RDP can be used to do the flush.
*/
if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT))
if (cpu_has_rdp() && !(pte_val(*ptep) & _PAGE_PROTECT))
__ptep_rdp(address, ptep, 0, 0, 1);
}
#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
@ -1354,7 +1356,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
{
if (pte_same(*ptep, entry))
return 0;
if (MACHINE_HAS_RDP && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry))
if (cpu_has_rdp() && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry))
ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry);
else
ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
@ -1402,9 +1404,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
#define pgprot_writecombine pgprot_writecombine
pgprot_t pgprot_writecombine(pgprot_t prot);
#define pgprot_writethrough pgprot_writethrough
pgprot_t pgprot_writethrough(pgprot_t prot);
#define PFN_PTE_SHIFT PAGE_SHIFT
/*
@ -1890,7 +1889,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
#define has_transparent_hugepage has_transparent_hugepage
static inline int has_transparent_hugepage(void)
{
return MACHINE_HAS_EDAT1 ? 1 : 0;
return cpu_has_edat1() ? 1 : 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

View File

@ -416,7 +416,11 @@ static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
static __always_inline void bpon(void)
{
asm volatile(ALTERNATIVE("nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)));
asm_inline volatile(
ALTERNATIVE(" nop\n",
" .insn rrf,0xb2e80000,0,0,13,0\n",
ALT_SPEC(82))
);
}
#endif /* __ASSEMBLY__ */

View File

@ -12,13 +12,11 @@
#include <asm/tpi.h>
#define PIF_SYSCALL 0 /* inside a system call */
#define PIF_EXECVE_PGSTE_RESTART 1 /* restart execve for PGSTE binaries */
#define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */
#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */
#define PIF_FTRACE_FULL_REGS 4 /* all register contents valid (ftrace) */
#define _PIF_SYSCALL BIT(PIF_SYSCALL)
#define _PIF_EXECVE_PGSTE_RESTART BIT(PIF_EXECVE_PGSTE_RESTART)
#define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET)
#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT)
#define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS)

View File

@ -168,6 +168,7 @@ int sclp_early_read_storage_info(void);
int sclp_early_get_core_info(struct sclp_core_info *info);
void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
void sclp_early_detect(void);
void sclp_early_detect_machine_features(void);
void sclp_early_printk(const char *s);
void __sclp_early_printk(const char *s, unsigned int len);
void sclp_emergency_printk(const char *s);

View File

@ -13,28 +13,6 @@
#define PARMAREA 0x10400
#define COMMAND_LINE_SIZE CONFIG_COMMAND_LINE_SIZE
/*
* Machine features detected in early.c
*/
#define MACHINE_FLAG_VM BIT(0)
#define MACHINE_FLAG_KVM BIT(1)
#define MACHINE_FLAG_LPAR BIT(2)
#define MACHINE_FLAG_DIAG9C BIT(3)
#define MACHINE_FLAG_ESOP BIT(4)
#define MACHINE_FLAG_IDTE BIT(5)
#define MACHINE_FLAG_EDAT1 BIT(7)
#define MACHINE_FLAG_EDAT2 BIT(8)
#define MACHINE_FLAG_TOPOLOGY BIT(10)
#define MACHINE_FLAG_TE BIT(11)
#define MACHINE_FLAG_TLB_LC BIT(12)
#define MACHINE_FLAG_TLB_GUEST BIT(14)
#define MACHINE_FLAG_NX BIT(15)
#define MACHINE_FLAG_GS BIT(16)
#define MACHINE_FLAG_SCC BIT(17)
#define MACHINE_FLAG_PCI_MIO BIT(18)
#define MACHINE_FLAG_RDP BIT(19)
#define MACHINE_FLAG_SEQ_INSN BIT(20)
#define LPP_MAGIC BIT(31)
#define LPP_PID_MASK _AC(0xffffffff, UL)
@ -78,26 +56,6 @@ extern unsigned long max_mappable;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
#define MACHINE_IS_VM (get_lowcore()->machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (get_lowcore()->machine_flags & MACHINE_FLAG_KVM)
#define MACHINE_IS_LPAR (get_lowcore()->machine_flags & MACHINE_FLAG_LPAR)
#define MACHINE_HAS_DIAG9C (get_lowcore()->machine_flags & MACHINE_FLAG_DIAG9C)
#define MACHINE_HAS_ESOP (get_lowcore()->machine_flags & MACHINE_FLAG_ESOP)
#define MACHINE_HAS_IDTE (get_lowcore()->machine_flags & MACHINE_FLAG_IDTE)
#define MACHINE_HAS_EDAT1 (get_lowcore()->machine_flags & MACHINE_FLAG_EDAT1)
#define MACHINE_HAS_EDAT2 (get_lowcore()->machine_flags & MACHINE_FLAG_EDAT2)
#define MACHINE_HAS_TOPOLOGY (get_lowcore()->machine_flags & MACHINE_FLAG_TOPOLOGY)
#define MACHINE_HAS_TE (get_lowcore()->machine_flags & MACHINE_FLAG_TE)
#define MACHINE_HAS_TLB_LC (get_lowcore()->machine_flags & MACHINE_FLAG_TLB_LC)
#define MACHINE_HAS_TLB_GUEST (get_lowcore()->machine_flags & MACHINE_FLAG_TLB_GUEST)
#define MACHINE_HAS_NX (get_lowcore()->machine_flags & MACHINE_FLAG_NX)
#define MACHINE_HAS_GS (get_lowcore()->machine_flags & MACHINE_FLAG_GS)
#define MACHINE_HAS_SCC (get_lowcore()->machine_flags & MACHINE_FLAG_SCC)
#define MACHINE_HAS_PCI_MIO (get_lowcore()->machine_flags & MACHINE_FLAG_PCI_MIO)
#define MACHINE_HAS_RDP (get_lowcore()->machine_flags & MACHINE_FLAG_RDP)
#define MACHINE_HAS_SEQ_INSN (get_lowcore()->machine_flags & MACHINE_FLAG_SEQ_INSN)
/*
* Console mode. Override with conmode=
*/

View File

@ -7,11 +7,29 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
#include <asm/sigp.h>
#include <asm/lowcore.h>
#include <asm/processor.h>
#include <asm/lowcore.h>
#include <asm/machine.h>
#include <asm/sigp.h>
static __always_inline unsigned int raw_smp_processor_id(void)
{
unsigned long lc_cpu_nr;
unsigned int cpu;
BUILD_BUG_ON(sizeof_field(struct lowcore, cpu_nr) != sizeof(cpu));
lc_cpu_nr = offsetof(struct lowcore, cpu_nr);
asm_inline(
ALTERNATIVE(" ly %[cpu],%[offzero](%%r0)\n",
" ly %[cpu],%[offalt](%%r0)\n",
ALT_FEATURE(MFEATURE_LOWCORE))
: [cpu] "=d" (cpu)
: [offzero] "i" (lc_cpu_nr),
[offalt] "i" (lc_cpu_nr + LOWCORE_ALT_ADDRESS),
"m" (((struct lowcore *)0)->cpu_nr));
return cpu;
}
#define raw_smp_processor_id() (get_lowcore()->cpu_nr)
#define arch_scale_cpu_capacity smp_cpu_get_capacity
extern struct mutex smp_cpu_state_mutex;

View File

@ -16,7 +16,23 @@
#include <asm/processor.h>
#include <asm/alternative.h>
#define SPINLOCK_LOCKVAL (get_lowcore()->spinlock_lockval)
static __always_inline unsigned int spinlock_lockval(void)
{
unsigned long lc_lockval;
unsigned int lockval;
BUILD_BUG_ON(sizeof_field(struct lowcore, spinlock_lockval) != sizeof(lockval));
lc_lockval = offsetof(struct lowcore, spinlock_lockval);
asm_inline(
ALTERNATIVE(" ly %[lockval],%[offzero](%%r0)\n",
" ly %[lockval],%[offalt](%%r0)\n",
ALT_FEATURE(MFEATURE_LOWCORE))
: [lockval] "=d" (lockval)
: [offzero] "i" (lc_lockval),
[offalt] "i" (lc_lockval + LOWCORE_ALT_ADDRESS),
"m" (((struct lowcore *)0)->spinlock_lockval));
return lockval;
}
extern int spin_retry;
@ -60,7 +76,7 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
int old = 0;
barrier();
return likely(arch_try_cmpxchg(&lp->lock, &old, SPINLOCK_LOCKVAL));
return likely(arch_try_cmpxchg(&lp->lock, &old, spinlock_lockval()));
}
static inline void arch_spin_lock(arch_spinlock_t *lp)

View File

@ -65,15 +65,13 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned long *args)
{
unsigned long mask = -1UL;
unsigned int n = 6;
#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_31BIT))
mask = 0xffffffff;
#endif
while (n-- > 0)
if (n > 0)
args[n] = regs->gprs[2 + n] & mask;
for (int i = 1; i < 6; i++)
args[i] = regs->gprs[2 + i] & mask;
args[0] = regs->orig_gpr2 & mask;
}

View File

@ -11,8 +11,34 @@
#ifndef __ASM_S390_SYSINFO_H
#define __ASM_S390_SYSINFO_H
#include <asm/bitsperlong.h>
#include <linux/uuid.h>
#include <asm/bitsperlong.h>
#include <asm/asm.h>
/*
* stsi - store system information
*
* Returns the current configuration level if function code 0 was specified.
* Otherwise returns 0 on success or a negative value on error.
*/
static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
{
int r0 = (fc << 28) | sel1;
int cc;
asm volatile(
" lr %%r0,%[r0]\n"
" lr %%r1,%[r1]\n"
" stsi %[sysinfo]\n"
" lr %[r0],%%r0\n"
CC_IPM(cc)
: CC_OUT(cc, cc), [r0] "+d" (r0), [sysinfo] "=Q" (*(char *)sysinfo)
: [r1] "d" (sel2)
: CC_CLOBBER_LIST("0", "1", "memory"));
if (cc == 3)
return -EOPNOTSUPP;
return fc ? 0 : (unsigned int)r0 >> 28;
}
struct sysinfo_1_1_1 {
unsigned char p:1;

View File

@ -67,7 +67,6 @@ void arch_setup_new_exec(void);
#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling needed */
#define TIF_UPROBE 4 /* breakpointed or single-stepping */
#define TIF_PATCH_PENDING 5 /* pending live patching update */
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
#define TIF_GUARDED_STORAGE 8 /* load guarded storage control block */
#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
@ -89,7 +88,6 @@ void arch_setup_new_exec(void);
#define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY)
#define _TIF_UPROBE BIT(TIF_UPROBE)
#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
#define _TIF_PGSTE BIT(TIF_PGSTE)
#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL)
#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE)
#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)

View File

@ -13,6 +13,7 @@
#include <linux/preempt.h>
#include <linux/time64.h>
#include <asm/lowcore.h>
#include <asm/machine.h>
#include <asm/asm.h>
/* The value of the TOD clock for 1.1.1970. */
@ -267,7 +268,7 @@ static __always_inline u128 eitod_to_ns(u128 todval)
*/
static inline int tod_after(unsigned long a, unsigned long b)
{
if (MACHINE_HAS_SCC)
if (machine_has_scc())
return (long) a > (long) b;
return a > b;
}
@ -281,7 +282,7 @@ static inline int tod_after(unsigned long a, unsigned long b)
*/
static inline int tod_after_eq(unsigned long a, unsigned long b)
{
if (MACHINE_HAS_SCC)
if (machine_has_scc())
return (long) a >= (long) b;
return a >= b;
}

View File

@ -84,7 +84,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
tlb->mm->context.flush_mm = 1;
tlb->freed_tables = 1;
tlb->cleared_pmds = 1;
if (mm_alloc_pgste(tlb->mm))
if (mm_has_pgste(tlb->mm))
gmap_unlink(tlb->mm, (unsigned long *)pte, address);
tlb_remove_ptdesc(tlb, virt_to_ptdesc(pte));
}

View File

@ -2,9 +2,11 @@
#ifndef _S390_TLBFLUSH_H
#define _S390_TLBFLUSH_H
#include <linux/cpufeature.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/machine.h>
/*
* Flush all TLB entries on the local CPU.
@ -22,7 +24,7 @@ static inline void __tlb_flush_idte(unsigned long asce)
unsigned long opt;
opt = IDTE_PTOA;
if (MACHINE_HAS_TLB_GUEST)
if (machine_has_tlb_guest())
opt |= IDTE_GUEST_ASCE;
/* Global TLB flush for the mm */
asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc");
@ -52,7 +54,7 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
barrier();
gmap_asce = READ_ONCE(mm->context.gmap_asce);
if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
if (cpu_has_idte() && gmap_asce != -1UL) {
if (gmap_asce)
__tlb_flush_idte(gmap_asce);
__tlb_flush_idte(mm->context.asce);
@ -66,7 +68,7 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_IDTE)
if (cpu_has_idte())
__tlb_flush_idte(init_mm.context.asce);
else
__tlb_flush_global();

View File

@ -13,6 +13,7 @@
/*
* User space memory access functions
*/
#include <linux/pgtable.h>
#include <asm/asm-extable.h>
#include <asm/processor.h>
#include <asm/extable.h>
@ -22,116 +23,69 @@
void debug_user_asce(int exit);
union oac {
unsigned int val;
struct {
struct {
unsigned short key : 4;
unsigned short : 4;
unsigned short as : 2;
unsigned short : 4;
unsigned short k : 1;
unsigned short a : 1;
} oac1;
struct {
unsigned short key : 4;
unsigned short : 4;
unsigned short as : 2;
unsigned short : 4;
unsigned short k : 1;
unsigned short a : 1;
} oac2;
};
};
#ifdef CONFIG_KMSAN
#define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory
#else
#define uaccess_kmsan_or_inline __always_inline
#endif
static __always_inline __must_check unsigned long
raw_copy_from_user_key(void *to, const void __user *from, unsigned long size, unsigned long key)
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
static uaccess_kmsan_or_inline __must_check unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long size)
{
unsigned long rem;
union oac spec = {
.oac2.key = key,
.oac2.as = PSW_BITS_AS_SECONDARY,
.oac2.k = 1,
.oac2.a = 1,
};
unsigned long osize;
int cc;
asm_inline volatile(
" lr %%r0,%[spec]\n"
"0: mvcos 0(%[to]),0(%[from]),%[size]\n"
"1: jz 5f\n"
" algr %[size],%[val]\n"
" slgr %[from],%[val]\n"
" slgr %[to],%[val]\n"
" j 0b\n"
"2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */
" nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */
" slgr %[rem],%[from]\n"
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
"4: slgr %[size],%[rem]\n"
" j 6f\n"
"5: lghi %[size],0\n"
"6:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
EX_TABLE(3b, 6b)
EX_TABLE(4b, 6b)
: [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem)
: [val] "a" (-4096UL), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
while (1) {
osize = size;
asm_inline volatile(
" lhi %%r0,%[spec]\n"
"0: mvcos %[to],%[from],%[size]\n"
"1: nopr %%r7\n"
CC_IPM(cc)
EX_TABLE_UA_MVCOS_FROM(0b, 0b)
EX_TABLE_UA_MVCOS_FROM(1b, 0b)
: CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char *)to)
: [spec] "I" (0x81), [from] "Q" (*(const char __user *)from)
: CC_CLOBBER_LIST("memory", "0"));
if (__builtin_constant_p(osize) && osize <= 4096)
return osize - size;
if (likely(CC_TRANSFORM(cc) == 0))
return osize - size;
size -= 4096;
to += 4096;
from += 4096;
}
}
static __always_inline __must_check unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
static uaccess_kmsan_or_inline __must_check unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long size)
{
return raw_copy_from_user_key(to, from, n, 0);
}
unsigned long osize;
int cc;
static __always_inline __must_check unsigned long
raw_copy_to_user_key(void __user *to, const void *from, unsigned long size, unsigned long key)
{
unsigned long rem;
union oac spec = {
.oac1.key = key,
.oac1.as = PSW_BITS_AS_SECONDARY,
.oac1.k = 1,
.oac1.a = 1,
};
asm_inline volatile(
" lr %%r0,%[spec]\n"
"0: mvcos 0(%[to]),0(%[from]),%[size]\n"
"1: jz 5f\n"
" algr %[size],%[val]\n"
" slgr %[to],%[val]\n"
" slgr %[from],%[val]\n"
" j 0b\n"
"2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
" nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
" slgr %[rem],%[to]\n"
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
"4: slgr %[size],%[rem]\n"
" j 6f\n"
"5: lghi %[size],0\n"
"6:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
EX_TABLE(3b, 6b)
EX_TABLE(4b, 6b)
: [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem)
: [val] "a" (-4096UL), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
static __always_inline __must_check unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return raw_copy_to_user_key(to, from, n, 0);
while (1) {
osize = size;
asm_inline volatile(
" llilh %%r0,%[spec]\n"
"0: mvcos %[to],%[from],%[size]\n"
"1: nopr %%r7\n"
CC_IPM(cc)
EX_TABLE_UA_MVCOS_TO(0b, 0b)
EX_TABLE_UA_MVCOS_TO(1b, 0b)
: CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to)
: [spec] "I" (0x81), [from] "Q" (*(const char *)from)
: CC_CLOBBER_LIST("memory", "0"));
if (__builtin_constant_p(osize) && osize <= 4096)
return osize - size;
if (likely(CC_TRANSFORM(cc) == 0))
return osize - size;
size -= 4096;
to += 4096;
from += 4096;
}
}
unsigned long __must_check
@ -158,12 +112,6 @@ copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned lo
int __noreturn __put_user_bad(void);
#ifdef CONFIG_KMSAN
#define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory
#else
#define uaccess_kmsan_or_inline __always_inline
#endif
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define DEFINE_PUT_USER_NOINSTR(type) \
@ -199,7 +147,7 @@ __put_user_##type##_noinstr(unsigned type __user *to, \
{ \
int rc; \
\
asm volatile( \
asm_inline volatile( \
" llilh %%r0,%[spec]\n" \
"0: mvcos %[to],%[from],%[size]\n" \
"1: lhi %[rc],0\n" \
@ -315,7 +263,7 @@ __get_user_##type##_noinstr(unsigned type *to, \
{ \
int rc; \
\
asm volatile( \
asm_inline volatile( \
" lhi %%r0,%[spec]\n" \
"0: mvcos %[to],%[from],%[size]\n" \
"1: lhi %[rc],0\n" \
@ -415,12 +363,34 @@ long __must_check strncpy_from_user(char *dst, const char __user *src, long coun
long __must_check strnlen_user(const char __user *src, long count);
/*
* Zero Userspace
*/
unsigned long __must_check __clear_user(void __user *to, unsigned long size);
static uaccess_kmsan_or_inline __must_check unsigned long
__clear_user(void __user *to, unsigned long size)
{
unsigned long osize;
int cc;
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
while (1) {
osize = size;
asm_inline volatile(
" llilh %%r0,%[spec]\n"
"0: mvcos %[to],%[from],%[size]\n"
"1: nopr %%r7\n"
CC_IPM(cc)
EX_TABLE_UA_MVCOS_TO(0b, 0b)
EX_TABLE_UA_MVCOS_TO(1b, 0b)
: CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to)
: [spec] "I" (0x81), [from] "Q" (*(const char *)empty_zero_page)
: CC_CLOBBER_LIST("memory", "0"));
if (__builtin_constant_p(osize) && osize <= 4096)
return osize - size;
if (CC_TRANSFORM(cc) == 0)
return osize - size;
size -= 4096;
to += 4096;
}
}
static __always_inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
might_fault();
return __clear_user(to, n);
@ -520,7 +490,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
_old = ((unsigned int)old & 0xff) << shift;
_new = ((unsigned int)new & 0xff) << shift;
mask = ~(0xff << shift);
asm volatile(
asm_inline volatile(
" spka 0(%[key])\n"
" sacf 256\n"
" llill %[count],%[max_loops]\n"
@ -568,7 +538,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
_old = ((unsigned int)old & 0xffff) << shift;
_new = ((unsigned int)new & 0xffff) << shift;
mask = ~(0xffff << shift);
asm volatile(
asm_inline volatile(
" spka 0(%[key])\n"
" sacf 256\n"
" llill %[count],%[max_loops]\n"
@ -610,7 +580,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
case 4: {
unsigned int prev = old;
asm volatile(
asm_inline volatile(
" spka 0(%[key])\n"
" sacf 256\n"
"0: cs %[prev],%[new],%[address]\n"
@ -631,7 +601,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
case 8: {
unsigned long prev = old;
asm volatile(
asm_inline volatile(
" spka 0(%[key])\n"
" sacf 256\n"
"0: csg %[prev],%[new],%[address]\n"
@ -652,7 +622,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
case 16: {
__uint128_t prev = old;
asm volatile(
asm_inline volatile(
" spka 0(%[key])\n"
" sacf 256\n"
"0: cdsg %[prev],%[new],%[address]\n"

View File

@ -52,7 +52,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long data;
asm volatile(
asm_inline volatile(
"0: lg %[data],0(%[addr])\n"
"1: nopr %%r7\n"
EX_TABLE_ZEROPAD(0b, 1b, %[data], %[addr])

View File

@ -5,7 +5,6 @@
#include <asm/sections.h>
unsigned long __bootdata_preserved(__abs_lowcore);
int __bootdata_preserved(relocate_lowcore);
int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc)
{

View File

@ -1,41 +1,90 @@
// SPDX-License-Identifier: GPL-2.0
#ifndef pr_fmt
#define pr_fmt(fmt) "alt: " fmt
#endif
#include <linux/uaccess.h>
#include <linux/printk.h>
#include <asm/nospec-branch.h>
#include <asm/abs_lowcore.h>
#include <asm/alternative.h>
#include <asm/facility.h>
#include <asm/sections.h>
#include <asm/machine.h>
#ifndef a_debug
#define a_debug pr_debug
#endif
#ifndef __kernel_va
#define __kernel_va(x) (void *)(x)
#endif
unsigned long __bootdata_preserved(machine_features[1]);
struct alt_debug {
unsigned long facilities[MAX_FACILITY_BIT / BITS_PER_LONG];
unsigned long mfeatures[MAX_MFEATURE_BIT / BITS_PER_LONG];
int spec;
};
static struct alt_debug __bootdata_preserved(alt_debug);
static void alternative_dump(u8 *old, u8 *new, unsigned int len, unsigned int type, unsigned int data)
{
char oinsn[33], ninsn[33];
unsigned long kptr;
unsigned int pos;
for (pos = 0; pos < len && 2 * pos < sizeof(oinsn) - 3; pos++)
hex_byte_pack(&oinsn[2 * pos], old[pos]);
oinsn[2 * pos] = 0;
for (pos = 0; pos < len && 2 * pos < sizeof(ninsn) - 3; pos++)
hex_byte_pack(&ninsn[2 * pos], new[pos]);
ninsn[2 * pos] = 0;
kptr = (unsigned long)__kernel_va(old);
a_debug("[%d/%3d] %016lx: %s -> %s\n", type, data, kptr, oinsn, ninsn);
}
void __apply_alternatives(struct alt_instr *start, struct alt_instr *end, unsigned int ctx)
{
u8 *instr, *replacement;
struct alt_debug *d;
struct alt_instr *a;
bool replace;
bool debug, replace;
u8 *old, *new;
/*
* The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code.
*/
d = &alt_debug;
for (a = start; a < end; a++) {
if (!(a->ctx & ctx))
continue;
switch (a->type) {
case ALT_TYPE_FACILITY:
replace = test_facility(a->data);
debug = __test_facility(a->data, d->facilities);
break;
case ALT_TYPE_FEATURE:
replace = test_machine_feature(a->data);
debug = __test_machine_feature(a->data, d->mfeatures);
break;
case ALT_TYPE_SPEC:
replace = nobp_enabled();
break;
case ALT_TYPE_LOWCORE:
replace = have_relocated_lowcore();
debug = d->spec;
break;
default:
replace = false;
debug = false;
}
if (!replace)
continue;
instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
s390_kernel_write(instr, replacement, a->instrlen);
old = (u8 *)&a->instr_offset + a->instr_offset;
new = (u8 *)&a->repl_offset + a->repl_offset;
if (debug)
alternative_dump(old, new, a->instrlen, a->type, a->data);
s390_kernel_write(old, new, a->instrlen);
}
}

View File

@ -49,6 +49,7 @@ int main(void)
OFFSET(__PT_R14, pt_regs, gprs[14]);
OFFSET(__PT_R15, pt_regs, gprs[15]);
OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2);
OFFSET(__PT_INT_CODE, pt_regs, int_code);
OFFSET(__PT_FLAGS, pt_regs, flags);
OFFSET(__PT_CR1, pt_regs, cr1);
OFFSET(__PT_LAST_BREAK, pt_regs, last_break);
@ -76,7 +77,8 @@ int main(void)
OFFSET(__LC_EXT_CPU_ADDR, lowcore, ext_cpu_addr);
OFFSET(__LC_EXT_INT_CODE, lowcore, ext_int_code);
OFFSET(__LC_PGM_ILC, lowcore, pgm_ilc);
OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_code);
OFFSET(__LC_PGM_CODE, lowcore, pgm_code);
OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_int_code);
OFFSET(__LC_DATA_EXC_CODE, lowcore, data_exc_code);
OFFSET(__LC_MON_CLASS_NR, lowcore, mon_class_num);
OFFSET(__LC_PER_CODE, lowcore, per_code);
@ -122,7 +124,6 @@ int main(void)
OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer);
OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
OFFSET(__LC_BOOT_CLOCK, lowcore, boot_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);

View File

@ -235,7 +235,7 @@ static int __diag320(unsigned long subcode, void *addr)
{
union register_pair rp = { .even = (unsigned long)addr, };
asm volatile(
asm_inline volatile(
" diag %[rp],%[subcode],0x320\n"
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)

View File

@ -195,7 +195,7 @@ static inline int __diag204(unsigned long *subcode, unsigned long size, void *ad
{
union register_pair rp = { .even = *subcode, .odd = size };
asm volatile(
asm_inline volatile(
" diag %[addr],%[rp],0x204\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
@ -286,7 +286,7 @@ int diag224(void *ptr)
int rc = -EOPNOTSUPP;
diag_stat_inc(DIAG_STAT_X224);
asm volatile("\n"
asm_inline volatile("\n"
" diag %[type],%[addr],0x224\n"
"0: lhi %[rc],0\n"
"1:\n"

View File

@ -8,6 +8,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/sched/debug.h>
#include <linux/cpufeature.h>
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/errno.h>
@ -21,6 +22,7 @@
#include <asm/asm-extable.h>
#include <linux/memblock.h>
#include <asm/access-regs.h>
#include <asm/machine.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/fpu.h>
@ -36,12 +38,14 @@
#include <asm/boot_data.h>
#include "entry.h"
#define decompressor_handled_param(param) \
static int __init ignore_decompressor_param_##param(char *s) \
#define __decompressor_handled_param(func, param) \
static int __init ignore_decompressor_param_##func(char *s) \
{ \
return 0; \
} \
early_param(#param, ignore_decompressor_param_##param)
early_param(#param, ignore_decompressor_param_##func)
#define decompressor_handled_param(param) __decompressor_handled_param(param, param)
decompressor_handled_param(mem);
decompressor_handled_param(vmalloc);
@ -51,6 +55,7 @@ decompressor_handled_param(nokaslr);
decompressor_handled_param(cmma);
decompressor_handled_param(relocate_lowcore);
decompressor_handled_param(bootdebug);
__decompressor_handled_param(debug_alternative, debug-alternative);
#if IS_ENABLED(CONFIG_KVM)
decompressor_handled_param(prot_virt);
#endif
@ -63,21 +68,6 @@ static void __init kasan_early_init(void)
#endif
}
static void __init reset_tod_clock(void)
{
union tod_clock clk;
if (store_tod_clock_ext_cc(&clk) == 0)
return;
/* TOD clock not running. Set the clock to Unix Epoch. */
if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk))
disabled_wait();
memset(&tod_clock_base, 0, sizeof(tod_clock_base));
tod_clock_base.tod = TOD_UNIX_EPOCH;
get_lowcore()->last_update_clock = TOD_UNIX_EPOCH;
}
/*
* Initialize storage key for kernel pages
*/
@ -96,26 +86,6 @@ static noinline __init void init_kernel_storage_key(void)
static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
static noinline __init void detect_machine_type(void)
{
struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
/* Check current-configuration-level */
if (stsi(NULL, 0, 0, 0) <= 2) {
get_lowcore()->machine_flags |= MACHINE_FLAG_LPAR;
return;
}
/* Get virtual-machine cpu information. */
if (stsi(vmms, 3, 2, 2) || !vmms->count)
return;
/* Detect known hypervisors */
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
get_lowcore()->machine_flags |= MACHINE_FLAG_KVM;
else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
get_lowcore()->machine_flags |= MACHINE_FLAG_VM;
}
/* Remove leading, trailing and double whitespace. */
static inline void strim_all(char *str)
{
@ -156,9 +126,9 @@ static noinline __init void setup_arch_string(void)
strim_all(hvstr);
} else {
sprintf(hvstr, "%s",
MACHINE_IS_LPAR ? "LPAR" :
MACHINE_IS_VM ? "z/VM" :
MACHINE_IS_KVM ? "KVM" : "unknown");
machine_is_lpar() ? "LPAR" :
machine_is_vm() ? "z/VM" :
machine_is_kvm() ? "KVM" : "unknown");
}
dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
}
@ -167,9 +137,8 @@ static __init void setup_topology(void)
{
int max_mnest;
if (!test_facility(11))
if (!cpu_has_topology())
return;
get_lowcore()->machine_flags |= MACHINE_FLAG_TOPOLOGY;
for (max_mnest = 6; max_mnest > 1; max_mnest--) {
if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
break;
@ -218,65 +187,10 @@ static noinline __init void setup_lowcore_early(void)
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
}
static __init void detect_diag9c(void)
{
unsigned int cpu_address;
int rc;
cpu_address = stap();
diag_stat_inc(DIAG_STAT_X09C);
asm volatile(
" diag %2,0,0x9c\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
if (!rc)
get_lowcore()->machine_flags |= MACHINE_FLAG_DIAG9C;
}
static __init void detect_machine_facilities(void)
{
if (test_facility(8)) {
get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT1;
system_ctl_set_bit(0, CR0_EDAT_BIT);
}
if (test_facility(78))
get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT2;
if (test_facility(3))
get_lowcore()->machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(50) && test_facility(73)) {
get_lowcore()->machine_flags |= MACHINE_FLAG_TE;
system_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT);
}
if (test_facility(51))
get_lowcore()->machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129))
system_ctl_set_bit(0, CR0_VECTOR_BIT);
if (test_facility(130))
get_lowcore()->machine_flags |= MACHINE_FLAG_NX;
if (test_facility(133))
get_lowcore()->machine_flags |= MACHINE_FLAG_GS;
if (test_facility(139) && (tod_clock_base.tod >> 63)) {
/* Enabled signed clock comparator comparisons */
get_lowcore()->machine_flags |= MACHINE_FLAG_SCC;
clock_comparator_max = -1ULL >> 1;
system_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT);
}
if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
get_lowcore()->machine_flags |= MACHINE_FLAG_PCI_MIO;
/* the control bit is set during PCI initialization */
}
if (test_facility(194))
get_lowcore()->machine_flags |= MACHINE_FLAG_RDP;
if (test_facility(85))
get_lowcore()->machine_flags |= MACHINE_FLAG_SEQ_INSN;
}
static inline void save_vector_registers(void)
{
#ifdef CONFIG_CRASH_DUMP
if (test_facility(129))
if (cpu_has_vx())
save_vx_regs(boot_cpu_vector_save_area);
#endif
}
@ -308,17 +222,13 @@ static void __init sort_amode31_extable(void)
void __init startup_init(void)
{
kasan_early_init();
reset_tod_clock();
time_early_init();
init_kernel_storage_key();
lockdep_off();
sort_amode31_extable();
setup_lowcore_early();
detect_machine_type();
setup_arch_string();
setup_boot_command_line();
detect_diag9c();
detect_machine_facilities();
save_vector_registers();
setup_topology();
sclp_early_detect();

View File

@ -29,6 +29,7 @@
#include <asm/nmi.h>
#include <asm/nospec-insn.h>
#include <asm/lowcore.h>
#include <asm/machine.h>
_LPP_OFFSET = __LC_LPP
@ -44,7 +45,7 @@ _LPP_OFFSET = __LC_LPP
ALTERNATIVE_2 "b \lpswe;nopr", \
".insn siy,0xeb0000000071,\address,0", ALT_FACILITY(193), \
__stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0), \
ALT_LOWCORE
ALT_FEATURE(MFEATURE_LOWCORE)
.endm
.macro MBEAR reg, lowcore
@ -67,7 +68,7 @@ _LPP_OFFSET = __LC_LPP
clg %r14,__LC_RESTART_STACK(\lowcore)
je \oklabel
la %r14,\savearea(\lowcore)
j stack_overflow
j stack_invalid
.endm
/*
@ -315,7 +316,7 @@ SYM_CODE_START(pgm_check_handler)
tm __LC_PGM_ILC+3(%r13),0x80 # check for per exception
jnz .Lpgm_svcper # -> single stepped svc
2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
# CHECK_VMAP_STACK branches to stack_overflow or 4f
# CHECK_VMAP_STACK branches to stack_invalid or 4f
CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f
3: lg %r15,__LC_KERNEL_STACK(%r13)
4: la %r11,STACK_FRAME_OVERHEAD(%r15)
@ -590,11 +591,11 @@ SYM_CODE_END(early_pgm_check_handler)
.section .kprobes.text, "ax"
/*
* The synchronous or the asynchronous stack overflowed. We are dead.
* The synchronous or the asynchronous stack pointer is invalid. We are dead.
* No need to properly save the registers, we are going to panic anyway.
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
SYM_CODE_START(stack_overflow)
SYM_CODE_START(stack_invalid)
GET_LC %r15
lg %r15,__LC_NODAT_STACK(%r15) # change to panic stack
la %r11,STACK_FRAME_OVERHEAD(%r15)
@ -604,8 +605,8 @@ SYM_CODE_START(stack_overflow)
stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_overflow
SYM_CODE_END(stack_overflow)
jg kernel_stack_invalid
SYM_CODE_END(stack_invalid)
.section .data, "aw"
.balign 4
@ -621,7 +622,7 @@ SYM_DATA_END(daton_psw)
.balign 8
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
SYM_DATA_START(sys_call_table)
#include "asm/syscall_table.h"
#include <asm/syscall_table.h>
SYM_DATA_END(sys_call_table)
#undef SYSCALL
@ -629,7 +630,7 @@ SYM_DATA_END(sys_call_table)
#define SYSCALL(esame,emu) .quad __s390_ ## emu
SYM_DATA_START(sys_call_table_emu)
#include "asm/syscall_table.h"
#include <asm/syscall_table.h>
SYM_DATA_END(sys_call_table_emu)
#undef SYSCALL
#endif

View File

@ -31,7 +31,7 @@ void do_secure_storage_access(struct pt_regs *regs);
void do_non_secure_storage_access(struct pt_regs *regs);
void do_secure_storage_violation(struct pt_regs *regs);
void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
void kernel_stack_overflow(struct pt_regs * regs);
void kernel_stack_invalid(struct pt_regs *regs);
void handle_signal32(struct ksignal *ksig, sigset_t *oldset,
struct pt_regs *regs);

View File

@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/kmsan-checks.h>
#include <linux/cpufeature.h>
#include <linux/kprobes.h>
#include <linux/execmem.h>
#include <trace/syscall.h>
@ -69,7 +70,7 @@ static const char *ftrace_shared_hotpatch_trampoline(const char **end)
bool ftrace_need_init_nop(void)
{
return !MACHINE_HAS_SEQ_INSN;
return !cpu_has_seq_insn();
}
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
@ -189,7 +190,7 @@ static int ftrace_modify_trampoline_call(struct dyn_ftrace *rec,
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
if (MACHINE_HAS_SEQ_INSN)
if (cpu_has_seq_insn())
return ftrace_patch_branch_insn(rec->ip, old_addr, addr);
else
return ftrace_modify_trampoline_call(rec, old_addr, addr);
@ -213,8 +214,8 @@ static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
/* Expect brcl 0xf,... for the !MACHINE_HAS_SEQ_INSN case */
if (MACHINE_HAS_SEQ_INSN)
/* Expect brcl 0xf,... for the !cpu_has_seq_insn() case */
if (cpu_has_seq_insn())
return ftrace_patch_branch_insn(rec->ip, addr, 0);
else
return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
@ -234,7 +235,7 @@ static int ftrace_make_trampoline_call(struct dyn_ftrace *rec, unsigned long add
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
if (MACHINE_HAS_SEQ_INSN)
if (cpu_has_seq_insn())
return ftrace_patch_branch_insn(rec->ip, 0, addr);
else
return ftrace_make_trampoline_call(rec, addr);

View File

@ -4,6 +4,7 @@
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/cpufeature.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
@ -109,7 +110,7 @@ static int gs_broadcast(void)
SYSCALL_DEFINE2(s390_guarded_storage, int, command,
struct gs_cb __user *, gs_cb)
{
if (!MACHINE_HAS_GS)
if (!cpu_has_gs())
return -EOPNOTSUPP;
switch (command) {
case GS_ENABLE:

View File

@ -18,12 +18,10 @@
__HEAD
SYM_CODE_START(startup_continue)
larl %r1,tod_clock_base
GET_LC %r2
mvc 0(16,%r1),__LC_BOOT_CLOCK(%r2)
#
# Setup stack
#
GET_LC %r2
larl %r14,init_task
stg %r14,__LC_CURRENT(%r2)
larl %r15,init_thread_union+STACK_INIT_OFFSET

View File

@ -45,6 +45,7 @@
* therefore delaying the throughput loss caused by using SMP threads.
*/
#include <linux/cpufeature.h>
#include <linux/cpumask.h>
#include <linux/debugfs.h>
#include <linux/device.h>
@ -87,7 +88,7 @@ static DECLARE_DELAYED_WORK(hd_capacity_work, hd_capacity_work_fn);
static int hd_set_hiperdispatch_mode(int enable)
{
if (!MACHINE_HAS_TOPOLOGY)
if (!cpu_has_topology())
enable = 0;
if (hd_enabled == enable)
return 0;

View File

@ -22,6 +22,7 @@
#include <linux/debug_locks.h>
#include <linux/vmalloc.h>
#include <asm/asm-extable.h>
#include <asm/machine.h>
#include <asm/diag.h>
#include <asm/ipl.h>
#include <asm/smp.h>
@ -185,7 +186,7 @@ static inline int __diag308(unsigned long subcode, unsigned long addr)
r1.even = addr;
r1.odd = 0;
asm volatile(
asm_inline volatile(
" diag %[r1],%[subcode],0x308\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
@ -685,7 +686,7 @@ static int __init ipl_init(void)
goto out;
switch (ipl_info.type) {
case IPL_TYPE_CCW:
if (MACHINE_IS_VM)
if (machine_is_vm())
rc = sysfs_create_group(&ipl_kset->kobj,
&ipl_ccw_attr_group_vm);
else
@ -1272,7 +1273,7 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
ipb->ccw.flags = IPL_PB0_FLAG_LOADPARM;
/* VM PARM */
if (MACHINE_IS_VM && ipl_block_valid &&
if (machine_is_vm() && ipl_block_valid &&
(ipl_block.ccw.vm_flags & IPL_PB0_CCW_VM_FLAG_VP)) {
ipb->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_VP;
@ -1286,7 +1287,7 @@ static int __init reipl_nss_init(void)
{
int rc;
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return 0;
reipl_block_nss = (void *) get_zeroed_page(GFP_KERNEL);
@ -1311,8 +1312,8 @@ static int __init reipl_ccw_init(void)
return -ENOMEM;
rc = sysfs_create_group(&reipl_kset->kobj,
MACHINE_IS_VM ? &reipl_ccw_attr_group_vm
: &reipl_ccw_attr_group_lpar);
machine_is_vm() ? &reipl_ccw_attr_group_vm
: &reipl_ccw_attr_group_lpar);
if (rc)
return rc;
@ -1987,7 +1988,7 @@ static void vmcmd_run(struct shutdown_trigger *trigger)
static int vmcmd_init(void)
{
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return -EOPNOTSUPP;
vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
if (!vmcmd_kset)
@ -2264,7 +2265,7 @@ static void __init strncpy_skip_quote(char *dst, char *src, int n)
static int __init vmcmd_on_reboot_setup(char *str)
{
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return 1;
strncpy_skip_quote(vmcmd_on_reboot, str, VMCMD_MAX_SIZE);
vmcmd_on_reboot[VMCMD_MAX_SIZE] = 0;
@ -2275,7 +2276,7 @@ __setup("vmreboot=", vmcmd_on_reboot_setup);
static int __init vmcmd_on_panic_setup(char *str)
{
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return 1;
strncpy_skip_quote(vmcmd_on_panic, str, VMCMD_MAX_SIZE);
vmcmd_on_panic[VMCMD_MAX_SIZE] = 0;
@ -2286,7 +2287,7 @@ __setup("vmpanic=", vmcmd_on_panic_setup);
static int __init vmcmd_on_halt_setup(char *str)
{
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return 1;
strncpy_skip_quote(vmcmd_on_halt, str, VMCMD_MAX_SIZE);
vmcmd_on_halt[VMCMD_MAX_SIZE] = 0;
@ -2297,7 +2298,7 @@ __setup("vmhalt=", vmcmd_on_halt_setup);
static int __init vmcmd_on_poff_setup(char *str)
{
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return 1;
strncpy_skip_quote(vmcmd_on_poff, str, VMCMD_MAX_SIZE);
vmcmd_on_poff[VMCMD_MAX_SIZE] = 0;

View File

@ -9,6 +9,7 @@
*/
#include <linux/kernel_stat.h>
#include <linux/cpufeature.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
@ -25,6 +26,7 @@
#include <asm/irq_regs.h>
#include <asm/cputime.h>
#include <asm/lowcore.h>
#include <asm/machine.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/stacktrace.h>
@ -148,7 +150,7 @@ void noinstr do_io_irq(struct pt_regs *regs)
if (user_mode(regs)) {
update_timer_sys();
if (static_branch_likely(&cpu_has_bear))
if (cpu_has_bear())
current->thread.last_break = regs->last_break;
}
@ -163,7 +165,7 @@ void noinstr do_io_irq(struct pt_regs *regs)
do_irq_async(regs, THIN_INTERRUPT);
else
do_irq_async(regs, IO_INTERRUPT);
} while (MACHINE_IS_LPAR && irq_pending(regs));
} while (machine_is_lpar() && irq_pending(regs));
irq_exit_rcu();
@ -184,7 +186,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
if (user_mode(regs)) {
update_timer_sys();
if (static_branch_likely(&cpu_has_bear))
if (cpu_has_bear())
current->thread.last_break = regs->last_break;
}

View File

@ -13,6 +13,7 @@
#include <linux/ptrace.h>
#include <linux/preempt.h>
#include <linux/stop_machine.h>
#include <linux/cpufeature.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <linux/extable.h>
@ -153,7 +154,7 @@ void arch_arm_kprobe(struct kprobe *p)
{
struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
if (MACHINE_HAS_SEQ_INSN) {
if (cpu_has_seq_insn()) {
swap_instruction(&args);
text_poke_sync();
} else {
@ -166,7 +167,7 @@ void arch_disarm_kprobe(struct kprobe *p)
{
struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
if (MACHINE_HAS_SEQ_INSN) {
if (cpu_has_seq_insn()) {
swap_instruction(&args);
text_poke_sync();
} else {

View File

@ -13,7 +13,9 @@
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/debug_locks.h>
#include <linux/cpufeature.h>
#include <asm/guarded_storage.h>
#include <asm/machine.h>
#include <asm/pfault.h>
#include <asm/cio.h>
#include <asm/fpu.h>
@ -94,7 +96,7 @@ static noinline void __machine_kdump(void *image)
mcesa = __va(get_lowcore()->mcesad & MCESA_ORIGIN_MASK);
if (cpu_has_vx())
save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) {
if (cpu_has_gs()) {
local_ctl_store(2, &cr2_old.reg);
cr2_new = cr2_old;
cr2_new.gse = 1;
@ -178,7 +180,7 @@ void arch_kexec_unprotect_crashkres(void)
static int machine_kexec_prepare_kdump(void)
{
#ifdef CONFIG_CRASH_DUMP
if (MACHINE_IS_VM)
if (machine_is_vm())
diag10_range(PFN_DOWN(crashk_res.start),
PFN_DOWN(crashk_res.end - crashk_res.start + 1));
return 0;

View File

@ -9,6 +9,7 @@
*/
#include <linux/kernel_stat.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/entry-common.h>
@ -45,7 +46,7 @@ static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
static inline int nmi_needs_mcesa(void)
{
return cpu_has_vx() || MACHINE_HAS_GS;
return cpu_has_vx() || cpu_has_gs();
}
/*
@ -61,7 +62,7 @@ void __init nmi_alloc_mcesa_early(u64 *mcesad)
if (!nmi_needs_mcesa())
return;
*mcesad = __pa(&boot_mcesa);
if (MACHINE_HAS_GS)
if (cpu_has_gs())
*mcesad |= ilog2(MCESA_MAX_SIZE);
}
@ -73,14 +74,14 @@ int nmi_alloc_mcesa(u64 *mcesad)
*mcesad = 0;
if (!nmi_needs_mcesa())
return 0;
size = MACHINE_HAS_GS ? MCESA_MAX_SIZE : MCESA_MIN_SIZE;
size = cpu_has_gs() ? MCESA_MAX_SIZE : MCESA_MIN_SIZE;
origin = kmalloc(size, GFP_KERNEL);
if (!origin)
return -ENOMEM;
/* The pointer is stored with mcesa_bits ORed in */
kmemleak_not_leak(origin);
*mcesad = __pa(origin);
if (MACHINE_HAS_GS)
if (cpu_has_gs())
*mcesad |= ilog2(MCESA_MAX_SIZE);
return 0;
}

View File

@ -8,6 +8,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/stop_machine.h>
#include <linux/cpufeature.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/random.h>
@ -19,6 +20,7 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <asm/text-patching.h>
#include <asm/machine.h>
#include <asm/diag.h>
#include <asm/facility.h>
#include <asm/elf.h>
@ -209,14 +211,14 @@ static int __init setup_hwcaps(void)
elf_hwcap |= HWCAP_DFP;
/* huge page support */
if (MACHINE_HAS_EDAT1)
if (cpu_has_edat1())
elf_hwcap |= HWCAP_HPAGE;
/* 64-bit register support for 31-bit processes */
elf_hwcap |= HWCAP_HIGH_GPRS;
/* transactional execution */
if (MACHINE_HAS_TE)
if (machine_has_tx())
elf_hwcap |= HWCAP_TE;
/* vector */
@ -244,10 +246,10 @@ static int __init setup_hwcaps(void)
elf_hwcap |= HWCAP_NNPA;
/* guarded storage */
if (MACHINE_HAS_GS)
if (cpu_has_gs())
elf_hwcap |= HWCAP_GS;
if (MACHINE_HAS_PCI_MIO)
if (test_machine_feature(MFEATURE_PCI_MIO))
elf_hwcap |= HWCAP_PCI_MIO;
/* virtualization support */

View File

@ -7,10 +7,10 @@
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#include "asm/ptrace.h"
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/cpufeature.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
@ -31,6 +31,9 @@
#include <asm/unistd.h>
#include <asm/runtime_instr.h>
#include <asm/facility.h>
#include <asm/machine.h>
#include <asm/ptrace.h>
#include <asm/rwonce.h>
#include <asm/fpu.h>
#include "entry.h"
@ -60,7 +63,7 @@ void update_cr_regs(struct task_struct *task)
cr0_new = cr0_old;
cr2_new = cr2_old;
/* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE) {
if (machine_has_tx()) {
/* Set or clear transaction execution TXC bit 8. */
cr0_new.tcx = 1;
if (task->thread.per_flags & PER_FLAG_NO_TE)
@ -75,7 +78,7 @@ void update_cr_regs(struct task_struct *task)
}
}
/* Take care of enable/disable of guarded storage. */
if (MACHINE_HAS_GS) {
if (cpu_has_gs()) {
cr2_new.gse = 0;
if (task->thread.gs_cb)
cr2_new.gse = 1;
@ -470,18 +473,18 @@ long arch_ptrace(struct task_struct *child, long request,
case PTRACE_GET_LAST_BREAK:
return put_user(child->thread.last_break, (unsigned long __user *)data);
case PTRACE_ENABLE_TE:
if (!MACHINE_HAS_TE)
if (!machine_has_tx())
return -EIO;
child->thread.per_flags &= ~PER_FLAG_NO_TE;
return 0;
case PTRACE_DISABLE_TE:
if (!MACHINE_HAS_TE)
if (!machine_has_tx())
return -EIO;
child->thread.per_flags |= PER_FLAG_NO_TE;
child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
return 0;
case PTRACE_TE_ABORT_RAND:
if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
if (!machine_has_tx() || (child->thread.per_flags & PER_FLAG_NO_TE))
return -EIO;
switch (data) {
case 0UL:
@ -1033,7 +1036,7 @@ static int s390_gs_cb_get(struct task_struct *target,
{
struct gs_cb *data = target->thread.gs_cb;
if (!MACHINE_HAS_GS)
if (!cpu_has_gs())
return -ENODEV;
if (!data)
return -ENODATA;
@ -1050,7 +1053,7 @@ static int s390_gs_cb_set(struct task_struct *target,
struct gs_cb gs_cb = { }, *data = NULL;
int rc;
if (!MACHINE_HAS_GS)
if (!cpu_has_gs())
return -ENODEV;
if (!target->thread.gs_cb) {
data = kzalloc(sizeof(*data), GFP_KERNEL);
@ -1087,7 +1090,7 @@ static int s390_gs_bc_get(struct task_struct *target,
{
struct gs_cb *data = target->thread.gs_bc_cb;
if (!MACHINE_HAS_GS)
if (!cpu_has_gs())
return -ENODEV;
if (!data)
return -ENODATA;
@ -1101,7 +1104,7 @@ static int s390_gs_bc_set(struct task_struct *target,
{
struct gs_cb *data = target->thread.gs_bc_cb;
if (!MACHINE_HAS_GS)
if (!cpu_has_gs())
return -ENODEV;
if (!data) {
data = kzalloc(sizeof(*data), GFP_KERNEL);
@ -1571,5 +1574,5 @@ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
addr = kernel_stack_pointer(regs) + n * sizeof(long);
if (!regs_within_kernel_stack(regs, addr))
return 0;
return *(unsigned long *)addr;
return READ_ONCE_NOCHECK(addr);
}

View File

@ -54,6 +54,7 @@
#include <asm/archrandom.h>
#include <asm/boot_data.h>
#include <asm/machine.h>
#include <asm/ipl.h>
#include <asm/facility.h>
#include <asm/smp.h>
@ -180,8 +181,6 @@ unsigned long __bootdata_preserved(MODULES_END);
struct lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
DEFINE_STATIC_KEY_FALSE(cpu_has_bear);
/*
* The Write Back bit position in the physaddr is given by the SLPC PCI.
* Leaving the mask zero always uses write through which is safe
@ -251,7 +250,7 @@ static void __init conmode_default(void)
char query_buffer[1024];
char *ptr;
if (MACHINE_IS_VM) {
if (machine_is_vm()) {
cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
ptr = strstr(query_buffer, "SUBCHANNEL =");
@ -289,7 +288,7 @@ static void __init conmode_default(void)
SET_CONSOLE_SCLP;
#endif
}
} else if (MACHINE_IS_KVM) {
} else if (machine_is_kvm()) {
if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
SET_CONSOLE_VT220;
else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
@ -652,7 +651,7 @@ static void __init reserve_crashkernel(void)
return;
}
if (!oldmem_data.start && MACHINE_IS_VM)
if (!oldmem_data.start && machine_is_vm())
diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
@ -898,12 +897,12 @@ void __init setup_arch(char **cmdline_p)
/*
* print what head.S has found out about the machine
*/
if (MACHINE_IS_VM)
if (machine_is_vm())
pr_info("Linux is running as a z/VM "
"guest operating system in 64-bit mode\n");
else if (MACHINE_IS_KVM)
else if (machine_is_kvm())
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
else if (machine_is_lpar())
pr_info("Linux is running natively in 64-bit mode\n");
else
pr_info("Linux is running as a guest in 64-bit mode\n");
@ -911,7 +910,7 @@ void __init setup_arch(char **cmdline_p)
if (!boot_earlyprintk)
boot_rb_foreach(print_rb_entry);
if (have_relocated_lowcore())
if (machine_has_relocated_lowcore())
pr_info("Lowcore relocated to 0x%px\n", get_lowcore());
log_component_list();
@ -961,7 +960,7 @@ void __init setup_arch(char **cmdline_p)
setup_uv();
dma_contiguous_reserve(ident_map_size);
vmcp_cma_reserve();
if (MACHINE_HAS_EDAT2)
if (cpu_has_edat2())
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
reserve_crashkernel();
@ -981,10 +980,6 @@ void __init setup_arch(char **cmdline_p)
numa_setup();
smp_detect_cpus();
topology_init_early();
if (test_facility(193))
static_branch_enable(&cpu_has_bear);
setup_protection_map();
/*
* Create kernel page tables.

View File

@ -18,6 +18,7 @@
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/cpufeature.h>
#include <linux/workqueue.h>
#include <linux/memblock.h>
#include <linux/export.h>
@ -38,6 +39,7 @@
#include <linux/kprobes.h>
#include <asm/access-regs.h>
#include <asm/asm-offsets.h>
#include <asm/machine.h>
#include <asm/ctlreg.h>
#include <asm/pfault.h>
#include <asm/diag.h>
@ -416,7 +418,7 @@ EXPORT_SYMBOL(arch_vcpu_is_preempted);
void notrace smp_yield_cpu(int cpu)
{
if (!MACHINE_HAS_DIAG9C)
if (!machine_has_diag9c())
return;
diag_stat_inc_norecursion(DIAG_STAT_X09C);
asm volatile("diag %0,0,0x9c"
@ -561,10 +563,10 @@ int smp_store_status(int cpu)
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
if (!cpu_has_vx() && !MACHINE_HAS_GS)
if (!cpu_has_vx() && !cpu_has_gs())
return 0;
pa = lc->mcesad & MCESA_ORIGIN_MASK;
if (MACHINE_HAS_GS)
if (cpu_has_gs())
pa |= lc->mcesad & MCESA_LC_MASK;
if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
pa) != SIGP_CC_ORDER_CODE_ACCEPTED)

View File

@ -12,6 +12,7 @@
* platform.
*/
#include <linux/cpufeature.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
@ -81,25 +82,35 @@ SYSCALL_DEFINE0(ni_syscall)
return -ENOSYS;
}
static void do_syscall(struct pt_regs *regs)
void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
{
unsigned long nr;
add_random_kstack_offset();
enter_from_user_mode(regs);
regs->psw = get_lowcore()->svc_old_psw;
regs->int_code = get_lowcore()->svc_int_code;
update_timer_sys();
if (cpu_has_bear())
current->thread.last_break = regs->last_break;
local_irq_enable();
regs->orig_gpr2 = regs->gprs[2];
if (unlikely(per_trap))
set_thread_flag(TIF_PER_TRAP);
regs->flags = 0;
set_pt_regs_flag(regs, PIF_SYSCALL);
nr = regs->int_code & 0xffff;
if (!nr) {
if (likely(!nr)) {
nr = regs->gprs[1] & 0xffff;
regs->int_code &= ~0xffffUL;
regs->int_code |= nr;
}
regs->gprs[2] = nr;
if (nr == __NR_restart_syscall && !(current->restart_block.arch_data & 1)) {
regs->psw.addr = current->restart_block.arch_data;
current->restart_block.arch_data = 1;
}
nr = syscall_enter_from_user_mode_work(regs, nr);
/*
* In the s390 ptrace ABI, both the syscall number and the return value
* use gpr2. However, userspace puts the syscall number either in the
@ -107,37 +118,11 @@ static void do_syscall(struct pt_regs *regs)
* work, the ptrace code sets PIF_SYSCALL_RET_SET, which is checked here
* and if set, the syscall will be skipped.
*/
if (unlikely(test_and_clear_pt_regs_flag(regs, PIF_SYSCALL_RET_SET)))
goto out;
regs->gprs[2] = -ENOSYS;
if (likely(nr >= NR_syscalls))
goto out;
do {
if (likely(nr < NR_syscalls))
regs->gprs[2] = current->thread.sys_call_table[nr](regs);
} while (test_and_clear_pt_regs_flag(regs, PIF_EXECVE_PGSTE_RESTART));
out:
syscall_exit_to_user_mode_work(regs);
}
void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
{
add_random_kstack_offset();
enter_from_user_mode(regs);
regs->psw = get_lowcore()->svc_old_psw;
regs->int_code = get_lowcore()->svc_int_code;
update_timer_sys();
if (static_branch_likely(&cpu_has_bear))
current->thread.last_break = regs->last_break;
local_irq_enable();
regs->orig_gpr2 = regs->gprs[2];
if (per_trap)
set_thread_flag(TIF_PER_TRAP);
regs->flags = 0;
set_pt_regs_flag(regs, PIF_SYSCALL);
do_syscall(regs);
exit_to_user_mode();
syscall_exit_to_user_mode(regs);
}

View File

@ -5,6 +5,7 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
*/
#include <linux/cpufeature.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@ -15,54 +16,17 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/asm-extable.h>
#include <asm/machine.h>
#include <asm/ebcdic.h>
#include <asm/debug.h>
#include <asm/sysinfo.h>
#include <asm/cpcmd.h>
#include <asm/topology.h>
#include <asm/fpu.h>
#include <asm/asm.h>
int topology_max_mnest;
static inline int __stsi(void *sysinfo, int fc, int sel1, int sel2, int *lvl)
{
int r0 = (fc << 28) | sel1;
int rc = 0;
asm volatile(
" lr 0,%[r0]\n"
" lr 1,%[r1]\n"
" stsi 0(%[sysinfo])\n"
"0: jz 2f\n"
"1: lhi %[rc],%[retval]\n"
"2: lr %[r0],0\n"
EX_TABLE(0b, 1b)
: [r0] "+d" (r0), [rc] "+d" (rc)
: [r1] "d" (sel2),
[sysinfo] "a" (sysinfo),
[retval] "K" (-EOPNOTSUPP)
: "cc", "0", "1", "memory");
*lvl = ((unsigned int) r0) >> 28;
return rc;
}
/*
* stsi - store system information
*
* Returns the current configuration level if function code 0 was specified.
* Otherwise returns 0 on success or a negative value on error.
*/
int stsi(void *sysinfo, int fc, int sel1, int sel2)
{
int lvl, rc;
rc = __stsi(sysinfo, fc, sel1, sel2, &lvl);
if (rc)
return rc;
return fc ? 0 : lvl;
}
EXPORT_SYMBOL(stsi);
#ifdef CONFIG_PROC_FS
static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
@ -154,7 +118,7 @@ static void stsi_15_1_x(struct seq_file *m, struct sysinfo_15_1_x *info)
int i;
seq_putc(m, '\n');
if (!MACHINE_HAS_TOPOLOGY)
if (!cpu_has_topology())
return;
if (stsi(info, 15, 1, topology_max_mnest))
return;
@ -415,7 +379,7 @@ static struct service_level service_level_vm = {
static __init int create_proc_service_level(void)
{
proc_create_seq("service_levels", 0, NULL, &service_level_seq_ops);
if (MACHINE_IS_VM)
if (machine_is_vm())
register_service_level(&service_level_vm);
return 0;
}
@ -559,7 +523,7 @@ static __init int stsi_init_debugfs(void)
sf = &stsi_file[i];
debugfs_create_file(sf->name, 0400, stsi_root, NULL, sf->fops);
}
if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY) && MACHINE_HAS_TOPOLOGY) {
if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY) && cpu_has_topology()) {
char link_to[10];
sprintf(link_to, "15_1_%d", topology_mnest_limit());

View File

@ -54,10 +54,10 @@
#include <asm/cio.h>
#include "entry.h"
union tod_clock tod_clock_base __section(".data");
union tod_clock __bootdata_preserved(tod_clock_base);
EXPORT_SYMBOL_GPL(tod_clock_base);
u64 clock_comparator_max = -1ULL;
u64 __bootdata_preserved(clock_comparator_max);
EXPORT_SYMBOL_GPL(clock_comparator_max);
static DEFINE_PER_CPU(struct clock_event_device, comparators);

View File

@ -6,6 +6,7 @@
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/cpufeature.h>
#include <linux/workqueue.h>
#include <linux/memblock.h>
#include <linux/uaccess.h>
@ -240,7 +241,7 @@ int topology_set_cpu_management(int fc)
{
int cpu, rc;
if (!MACHINE_HAS_TOPOLOGY)
if (!cpu_has_topology())
return -EOPNOTSUPP;
if (fc)
rc = ptf(PTF_VERTICAL);
@ -315,13 +316,13 @@ static int __arch_update_cpu_topology(void)
hd_status = 0;
rc = 0;
mutex_lock(&smp_cpu_state_mutex);
if (MACHINE_HAS_TOPOLOGY) {
if (cpu_has_topology()) {
rc = 1;
store_topology(info);
tl_to_masks(info);
}
update_cpu_masks();
if (!MACHINE_HAS_TOPOLOGY)
if (!cpu_has_topology())
topology_update_polarization_simple();
if (cpu_management == 1)
hd_status = hd_enable_hiperdispatch();
@ -376,7 +377,7 @@ static void set_topology_timer(void)
void topology_expect_change(void)
{
if (!MACHINE_HAS_TOPOLOGY)
if (!cpu_has_topology())
return;
/* This is racy, but it doesn't matter since it is just a heuristic.
* Worst case is that we poll in a higher frequency for a bit longer.
@ -500,7 +501,7 @@ int topology_cpu_init(struct cpu *cpu)
int rc;
rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
if (rc || !MACHINE_HAS_TOPOLOGY)
if (rc || !cpu_has_topology())
return rc;
rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
if (rc)
@ -569,12 +570,12 @@ void __init topology_init_early(void)
set_sched_topology(s390_topology);
if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
if (MACHINE_HAS_TOPOLOGY)
if (cpu_has_topology())
topology_mode = TOPOLOGY_MODE_HW;
else
topology_mode = TOPOLOGY_MODE_SINGLE;
}
if (!MACHINE_HAS_TOPOLOGY)
if (!cpu_has_topology())
goto out;
tl_info = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
info = tl_info;
@ -596,7 +597,7 @@ static inline int topology_get_mode(int enabled)
{
if (!enabled)
return TOPOLOGY_MODE_SINGLE;
return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
return cpu_has_topology() ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
}
static inline int topology_is_enabled(void)
@ -686,7 +687,7 @@ static int __init topology_init(void)
int rc = 0;
timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
if (MACHINE_HAS_TOPOLOGY)
if (cpu_has_topology())
set_topology_timer();
else
topology_update_polarization_simple();

View File

@ -3,18 +3,13 @@
* S390 version
* Copyright IBM Corp. 1999, 2000
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
*
* Derived from "arch/i386/kernel/traps.c"
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
#include "asm/irqflags.h"
#include "asm/ptrace.h"
#include <linux/cpufeature.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/randomize_kstack.h>
@ -29,6 +24,8 @@
#include <linux/entry-common.h>
#include <linux/kmsan.h>
#include <asm/asm-extable.h>
#include <asm/irqflags.h>
#include <asm/ptrace.h>
#include <asm/vtime.h>
#include <asm/fpu.h>
#include <asm/fault.h>
@ -42,7 +39,7 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
address = current->thread.trap_tdb.data[3];
else
address = regs->psw.addr;
return (void __user *) (address - (regs->int_code >> 16));
return (void __user *)(address - (regs->int_code >> 16));
}
#ifdef CONFIG_GENERIC_BUG
@ -57,16 +54,15 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
if (user_mode(regs)) {
force_sig_fault(si_signo, si_code, get_trap_ip(regs));
report_user_fault(regs, si_signo, 0);
} else {
} else {
if (!fixup_exception(regs))
die(regs, str);
}
}
}
static void do_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
{
if (notify_die(DIE_TRAP, str, regs, 0,
regs->int_code, si_signo) == NOTIFY_STOP)
if (notify_die(DIE_TRAP, str, regs, 0, regs->int_code, si_signo) == NOTIFY_STOP)
return;
do_report_trap(regs, si_signo, si_code, str);
}
@ -78,8 +74,7 @@ void do_per_trap(struct pt_regs *regs)
return;
if (!current->ptrace)
return;
force_sig_fault(SIGTRAP, TRAP_HWBKPT,
(void __force __user *) current->thread.per_event.address);
force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __force __user *)current->thread.per_event.address);
}
NOKPROBE_SYMBOL(do_per_trap);
@ -98,36 +93,25 @@ static void name(struct pt_regs *regs) \
do_trap(regs, signr, sicode, str); \
}
DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
"addressing exception")
DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
"execute exception")
DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
"fixpoint divide exception")
DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
"fixpoint overflow exception")
DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
"HFP overflow exception")
DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
"HFP underflow exception")
DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
"HFP significance exception")
DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
"HFP divide exception")
DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
"HFP square root exception")
DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
"operand exception")
DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
"privileged operation")
DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
"special operation exception")
DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
"transaction constraint exception")
DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, "addressing exception")
DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV, "fixpoint divide exception")
DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN, "execute exception")
DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV, "HFP divide exception")
DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF, "HFP overflow exception")
DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES, "HFP significance exception")
DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV, "HFP square root exception")
DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND, "HFP underflow exception")
DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN, "operand exception")
DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF, "fixpoint overflow exception")
DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, "privileged operation")
DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, "special operation exception")
DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, "specification exception");
DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, "transaction constraint exception")
static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
{
int si_code = 0;
/* FPC[2] is Data Exception Code */
if ((fpc & 0x00000300) == 0) {
/* bits 6 and 7 of DXC are 0 iff IEEE exception */
@ -153,36 +137,35 @@ static void translation_specification_exception(struct pt_regs *regs)
static void illegal_op(struct pt_regs *regs)
{
__u8 opcode[6];
__u16 __user *location;
int is_uprobe_insn = 0;
u16 __user *location;
int signal = 0;
u16 opcode;
location = get_trap_ip(regs);
if (user_mode(regs)) {
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
if (get_user(opcode, location))
return;
if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
if (opcode == S390_BREAKPOINT_U16) {
if (current->ptrace)
force_sig_fault(SIGTRAP, TRAP_BRKPT, location);
else
signal = SIGILL;
#ifdef CONFIG_UPROBES
} else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
} else if (opcode == UPROBE_SWBP_INSN) {
is_uprobe_insn = 1;
#endif
} else
} else {
signal = SIGILL;
}
}
/*
* We got either an illegal op in kernel mode, or user space trapped
* This is either an illegal op in kernel mode, or user space trapped
* on a uprobes illegal instruction. See if kprobes or uprobes picks
* it up. If not, SIGILL.
*/
if (is_uprobe_insn || !user_mode(regs)) {
if (notify_die(DIE_BPT, "bpt", regs, 0,
3, SIGTRAP) != NOTIFY_STOP)
if (notify_die(DIE_BPT, "bpt", regs, 0, 3, SIGTRAP) != NOTIFY_STOP)
signal = SIGILL;
}
if (signal)
@ -190,18 +173,10 @@ static void illegal_op(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(illegal_op);
DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
static void vector_exception(struct pt_regs *regs)
{
int si_code, vic;
if (!cpu_has_vx()) {
do_trap(regs, SIGILL, ILL_ILLOPN, "illegal operation");
return;
}
/* get vector interrupt code from fpc */
save_user_fpu_regs();
vic = (current->thread.ufpu.fpc & 0xf00) >> 8;
@ -249,7 +224,6 @@ static void monitor_event_exception(struct pt_regs *regs)
{
if (user_mode(regs))
return;
switch (report_bug(regs->psw.addr - (regs->int_code >> 16), regs)) {
case BUG_TRAP_TYPE_NONE:
fixup_exception(regs);
@ -262,7 +236,7 @@ static void monitor_event_exception(struct pt_regs *regs)
}
}
void kernel_stack_overflow(struct pt_regs *regs)
void kernel_stack_invalid(struct pt_regs *regs)
{
/*
* Normally regs are unpoisoned by the generic entry code, but
@ -270,12 +244,12 @@ void kernel_stack_overflow(struct pt_regs *regs)
*/
kmsan_unpoison_entry_regs(regs);
bust_spinlocks(1);
printk("Kernel stack overflow.\n");
pr_emerg("Kernel stack pointer invalid\n");
show_regs(regs);
bust_spinlocks(0);
panic("Corrupt kernel stack, can't continue.");
panic("Invalid kernel stack pointer, cannot continue");
}
NOKPROBE_SYMBOL(kernel_stack_overflow);
NOKPROBE_SYMBOL(kernel_stack_invalid);
static void __init test_monitor_call(void)
{
@ -283,7 +257,7 @@ static void __init test_monitor_call(void)
if (!IS_ENABLED(CONFIG_BUG))
return;
asm volatile(
asm_inline volatile(
" mc 0,0\n"
"0: lhi %[val],0\n"
"1:\n"
@ -323,7 +297,6 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
teid.val = lc->trans_exc_code;
regs->int_code = lc->pgm_int_code;
regs->int_parm_long = teid.val;
/*
* In case of a guest fault, short-circuit the fault handler and return.
* This way the sie64a() function will return 0; fault address and
@ -336,23 +309,19 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
current->thread.gmap_int_code = regs->int_code & 0xffff;
return;
}
state = irqentry_enter(regs);
if (user_mode(regs)) {
update_timer_sys();
if (!static_branch_likely(&cpu_has_bear)) {
if (!cpu_has_bear()) {
if (regs->last_break < 4096)
regs->last_break = 1;
}
current->thread.last_break = regs->last_break;
}
if (lc->pgm_code & 0x0200) {
/* transaction abort */
current->thread.trap_tdb = lc->pgm_tdb;
}
if (lc->pgm_code & PGM_INT_CODE_PER) {
if (user_mode(regs)) {
struct per_event *ev = &current->thread.per_event;
@ -368,11 +337,9 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
goto out;
}
}
if (!irqs_disabled_flags(regs->psw.mask))
trace_hardirqs_on();
__arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
trapnr = regs->int_code & PGM_INT_CODE_MASK;
if (trapnr)
pgm_check_table[trapnr](regs);

View File

@ -10,6 +10,7 @@
#define KMSG_COMPONENT "kvm-s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/cpufeature.h>
#include <linux/interrupt.h>
#include <linux/kvm_host.h>
#include <linux/hrtimer.h>
@ -577,7 +578,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
/* take care of lazy register loading */
kvm_s390_fpu_store(vcpu->run);
save_access_regs(vcpu->run->s.regs.acrs);
if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
if (cpu_has_gs() && vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
/* Extended save area */
@ -948,8 +949,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
(u64 *) __LC_PGM_LAST_BREAK);
rc |= put_guest_lc(vcpu, pgm_info.code,
(u16 *)__LC_PGM_INT_CODE);
rc |= put_guest_lc(vcpu, pgm_info.code, (u16 *)__LC_PGM_CODE);
rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,

View File

@ -23,6 +23,7 @@
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/cpufeature.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/timer.h>
@ -36,6 +37,7 @@
#include <asm/access-regs.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
#include <asm/machine.h>
#include <asm/stp.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
@ -443,13 +445,13 @@ static void __init kvm_s390_cpu_feat_init(void)
if (test_facility(201)) /* PFCR */
pfcr_query(&kvm_s390_available_subfunc.pfcr);
if (MACHINE_HAS_ESOP)
if (machine_has_esop())
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
/*
* We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
* 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
*/
if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
if (!sclp.has_sief2 || !machine_has_esop() || !sclp.has_64bscao ||
!test_facility(3) || !nested)
return;
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
@ -638,7 +640,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = min_t(unsigned int, num_online_cpus(), r);
break;
case KVM_CAP_S390_COW:
r = MACHINE_HAS_ESOP;
r = machine_has_esop();
break;
case KVM_CAP_S390_VECTOR_REGISTERS:
r = test_facility(129);
@ -3396,7 +3398,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* we emulate STHYI in kvm */
set_kvm_facility(kvm->arch.model.fac_mask, 74);
set_kvm_facility(kvm->arch.model.fac_list, 74);
if (MACHINE_HAS_TLB_GUEST) {
if (machine_has_tlb_guest()) {
set_kvm_facility(kvm->arch.model.fac_mask, 147);
set_kvm_facility(kvm->arch.model.fac_list, 147);
}
@ -3892,8 +3894,8 @@ static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_s390_vcpu_setup_model(vcpu);
/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
if (MACHINE_HAS_ESOP)
/* pgste_set_pte has special handling for !machine_has_esop() */
if (machine_has_esop())
vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
if (test_kvm_facility(vcpu->kvm, 9))
vcpu->arch.sie_block->ecb |= ECB_SRSI;
@ -5176,7 +5178,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
}
if (MACHINE_HAS_GS) {
if (cpu_has_gs()) {
preempt_disable();
local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
if (current->thread.gs_cb) {
@ -5242,7 +5244,7 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
if (MACHINE_HAS_GS) {
if (cpu_has_gs()) {
preempt_disable();
local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
if (vcpu->arch.gs_enabled)

View File

@ -10,11 +10,13 @@
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/io.h>
#include <asm/alternative.h>
#include <asm/machine.h>
#include <asm/asm.h>
int spin_retry = -1;
@ -37,6 +39,23 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
static const struct ctl_table s390_spin_sysctl_table[] = {
{
.procname = "spin_retry",
.data = &spin_retry,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
};
static int __init init_s390_spin_sysctls(void)
{
register_sysctl_init("kernel", s390_spin_sysctl_table);
return 0;
}
arch_initcall(init_s390_spin_sysctls);
struct spin_wait {
struct spin_wait *next, *prev;
int node_id;
@ -141,7 +160,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
ix = get_lowcore()->spinlock_index++;
barrier();
lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
lockval = spinlock_lockval(); /* cpu + 1 */
node = this_cpu_ptr(&spin_wait[ix]);
node->prev = node->next = NULL;
node_id = node->node_id;
@ -212,7 +231,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
if (count-- >= 0)
continue;
count = spin_retry;
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
if (!machine_is_lpar() || arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
}
@ -232,7 +251,7 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
{
int lockval, old, new, owner, count;
lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
lockval = spinlock_lockval(); /* cpu + 1 */
/* Pass the virtual CPU to the lock holder if it is not running */
owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
@ -255,7 +274,7 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
if (count-- >= 0)
continue;
count = spin_retry;
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
if (!machine_is_lpar() || arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
}
}
@ -271,7 +290,7 @@ EXPORT_SYMBOL(arch_spin_lock_wait);
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
int cpu = SPINLOCK_LOCKVAL;
int cpu = spinlock_lockval();
int owner, count;
for (count = spin_retry; count > 0; count--) {
@ -337,7 +356,7 @@ void arch_spin_relax(arch_spinlock_t *lp)
cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
if (!cpu)
return;
if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
if (machine_is_lpar() && !arch_vcpu_is_preempted(cpu - 1))
return;
smp_yield_cpu(cpu - 1);
}

View File

@ -31,6 +31,60 @@ void debug_user_asce(int exit)
}
#endif /*CONFIG_DEBUG_ENTRY */
union oac {
unsigned int val;
struct {
struct {
unsigned short key : 4;
unsigned short : 4;
unsigned short as : 2;
unsigned short : 4;
unsigned short k : 1;
unsigned short a : 1;
} oac1;
struct {
unsigned short key : 4;
unsigned short : 4;
unsigned short as : 2;
unsigned short : 4;
unsigned short k : 1;
unsigned short a : 1;
} oac2;
};
};
static uaccess_kmsan_or_inline __must_check unsigned long
raw_copy_from_user_key(void *to, const void __user *from, unsigned long size, unsigned long key)
{
unsigned long osize;
union oac spec = {
.oac2.key = key,
.oac2.as = PSW_BITS_AS_SECONDARY,
.oac2.k = 1,
.oac2.a = 1,
};
int cc;
while (1) {
osize = size;
asm_inline volatile(
" lr %%r0,%[spec]\n"
"0: mvcos %[to],%[from],%[size]\n"
"1: nopr %%r7\n"
CC_IPM(cc)
EX_TABLE_UA_MVCOS_FROM(0b, 0b)
EX_TABLE_UA_MVCOS_FROM(1b, 0b)
: CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char *)to)
: [spec] "d" (spec.val), [from] "Q" (*(const char __user *)from)
: CC_CLOBBER_LIST("memory", "0"));
if (CC_TRANSFORM(cc) == 0)
return osize - size;
size -= 4096;
to += 4096;
from += 4096;
}
}
unsigned long _copy_from_user_key(void *to, const void __user *from,
unsigned long n, unsigned long key)
{
@ -48,6 +102,38 @@ unsigned long _copy_from_user_key(void *to, const void __user *from,
}
EXPORT_SYMBOL(_copy_from_user_key);
static uaccess_kmsan_or_inline __must_check unsigned long
raw_copy_to_user_key(void __user *to, const void *from, unsigned long size, unsigned long key)
{
unsigned long osize;
union oac spec = {
.oac1.key = key,
.oac1.as = PSW_BITS_AS_SECONDARY,
.oac1.k = 1,
.oac1.a = 1,
};
int cc;
while (1) {
osize = size;
asm_inline volatile(
" lr %%r0,%[spec]\n"
"0: mvcos %[to],%[from],%[size]\n"
"1: nopr %%r7\n"
CC_IPM(cc)
EX_TABLE_UA_MVCOS_TO(0b, 0b)
EX_TABLE_UA_MVCOS_TO(1b, 0b)
: CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to)
: [spec] "d" (spec.val), [from] "Q" (*(const char *)from)
: CC_CLOBBER_LIST("memory", "0"));
if (CC_TRANSFORM(cc) == 0)
return osize - size;
size -= 4096;
to += 4096;
from += 4096;
}
}
unsigned long _copy_to_user_key(void __user *to, const void *from,
unsigned long n, unsigned long key)
{
@ -58,39 +144,3 @@ unsigned long _copy_to_user_key(void __user *to, const void *from,
return raw_copy_to_user_key(to, from, n, key);
}
EXPORT_SYMBOL(_copy_to_user_key);
unsigned long __clear_user(void __user *to, unsigned long size)
{
unsigned long rem;
union oac spec = {
.oac1.as = PSW_BITS_AS_SECONDARY,
.oac1.a = 1,
};
asm volatile(
" lr 0,%[spec]\n"
"0: mvcos 0(%[to]),0(%[zeropg]),%[size]\n"
"1: jz 5f\n"
" algr %[size],%[val]\n"
" slgr %[to],%[val]\n"
" j 0b\n"
"2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
" nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
" slgr %[rem],%[to]\n"
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"3: mvcos 0(%[to]),0(%[zeropg]),%[rem]\n"
"4: slgr %[size],%[rem]\n"
" j 6f\n"
"5: slgr %[size],%[size]\n"
"6:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
EX_TABLE(3b, 6b)
EX_TABLE(4b, 6b)
: [size] "+&a" (size), [to] "+&a" (to), [rem] "=&a" (rem)
: [val] "a" (-4096UL), [zeropg] "a" (empty_zero_page), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
EXPORT_SYMBOL(__clear_user);

View File

@ -1,4 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/cpufeature.h>
#include <linux/set_memory.h>
#include <linux/ptdump.h>
#include <linux/seq_file.h>
@ -82,7 +84,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
* in which case we have two lpswe instructions in lowcore that need
* to be executable.
*/
if (addr == PAGE_SIZE && (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)))
if (addr == PAGE_SIZE && (nospec_uses_trampoline() || !cpu_has_bear()))
return;
WARN_ONCE(IS_ENABLED(CONFIG_DEBUG_WX),
"s390/mm: Found insecure W+X mapping at address %pS\n",
@ -167,7 +169,7 @@ bool ptdump_check_wx(void)
},
};
if (!MACHINE_HAS_NX)
if (!cpu_has_nx())
return true;
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
if (st.wx_pages) {
@ -176,7 +178,7 @@ bool ptdump_check_wx(void)
return false;
} else {
pr_info("Checked W+X mappings: passed, no %sW+X pages found\n",
(nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)) ?
(nospec_uses_trampoline() || !cpu_has_bear()) ?
"unexpected " : "");
return true;

View File

@ -73,6 +73,49 @@ static bool ex_handler_fpc(const struct exception_table_entry *ex, struct pt_reg
return true;
}
struct insn_ssf {
u64 opc1 : 8;
u64 r3 : 4;
u64 opc2 : 4;
u64 b1 : 4;
u64 d1 : 12;
u64 b2 : 4;
u64 d2 : 12;
} __packed;
static bool ex_handler_ua_mvcos(const struct exception_table_entry *ex,
bool from, struct pt_regs *regs)
{
unsigned long uaddr, remainder;
struct insn_ssf *insn;
/*
* If the faulting user space access crossed a page boundary retry by
* limiting the access to the first page (adjust length accordingly).
* Then the mvcos instruction will either complete with condition code
* zero, or generate another fault where the user space access did not
* cross a page boundary.
* If the faulting user space access did not cross a page boundary set
* length to zero and retry. In this case no user space access will
* happen, and the mvcos instruction will complete with condition code
* zero.
* In both cases the instruction will complete with condition code
* zero (copying finished), and the register which contains the
* length, indicates the number of bytes copied.
*/
regs->psw.addr = extable_fixup(ex);
insn = (struct insn_ssf *)regs->psw.addr;
if (from)
uaddr = regs->gprs[insn->b2] + insn->d2;
else
uaddr = regs->gprs[insn->b1] + insn->d1;
remainder = PAGE_SIZE - (uaddr & (PAGE_SIZE - 1));
if (regs->gprs[insn->r3] <= remainder)
remainder = 0;
regs->gprs[insn->r3] = remainder;
return true;
}
bool fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *ex;
@ -95,6 +138,10 @@ bool fixup_exception(struct pt_regs *regs)
return ex_handler_zeropad(ex, regs);
case EX_TYPE_FPC:
return ex_handler_fpc(ex, regs);
case EX_TYPE_UA_MVCOS_TO:
return ex_handler_ua_mvcos(ex, false, regs);
case EX_TYPE_UA_MVCOS_FROM:
return ex_handler_ua_mvcos(ex, true, regs);
}
panic("invalid exception table entry");
}

View File

@ -21,6 +21,7 @@
#include <linux/ioport.h>
#include <linux/refcount.h>
#include <linux/pgtable.h>
#include <asm/machine.h>
#include <asm/diag.h>
#include <asm/page.h>
#include <asm/ebcdic.h>
@ -255,7 +256,7 @@ segment_type (char* name)
int rc;
struct dcss_segment seg;
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return -ENOSYS;
dcss_mkname(name, seg.dcss_name);
@ -418,7 +419,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr,
struct dcss_segment *seg;
int rc;
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return -ENOSYS;
mutex_lock(&dcss_lock);
@ -540,7 +541,7 @@ segment_unload(char *name)
unsigned long dummy;
struct dcss_segment *seg;
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return;
mutex_lock(&dcss_lock);
@ -572,7 +573,7 @@ segment_save(char *name)
char cmd2[80];
int i, response;
if (!MACHINE_IS_VM)
if (!machine_is_vm())
return;
mutex_lock(&dcss_lock);

View File

@ -11,11 +11,11 @@
#include <linux/kernel_stat.h>
#include <linux/mmu_context.h>
#include <linux/cpufeature.h>
#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
@ -46,16 +46,6 @@
#include <asm/uv.h>
#include "../kernel/entry.h"
static DEFINE_STATIC_KEY_FALSE(have_store_indication);
static int __init fault_init(void)
{
if (test_facility(75))
static_branch_enable(&have_store_indication);
return 0;
}
early_initcall(fault_init);
/*
* Find out which address space caused the exception.
*/
@ -81,7 +71,7 @@ static __always_inline bool fault_is_write(struct pt_regs *regs)
{
union teid teid = { .val = regs->int_parm_long };
if (static_branch_likely(&have_store_indication))
if (test_facility(75))
return teid.fsi == TEID_FSI_STORE;
return false;
}
@ -175,6 +165,23 @@ static void dump_fault_info(struct pt_regs *regs)
int show_unhandled_signals = 1;
static const struct ctl_table s390_fault_sysctl_table[] = {
{
.procname = "userprocess_debug",
.data = &show_unhandled_signals,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
};
static int __init init_s390_fault_sysctls(void)
{
register_sysctl_init("kernel", s390_fault_sysctl_table);
return 0;
}
arch_initcall(init_s390_fault_sysctls);
void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
{
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
@ -377,7 +384,7 @@ void do_protection_exception(struct pt_regs *regs)
*/
return handle_fault_error_nolock(regs, 0);
}
if (unlikely(MACHINE_HAS_NX && teid.b56)) {
if (unlikely(cpu_has_nx() && teid.b56)) {
regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);
return handle_fault_error_nolock(regs, SEGV_ACCERR);
}

View File

@ -8,6 +8,7 @@
* Janosch Frank <frankja@linux.vnet.ibm.com>
*/
#include <linux/cpufeature.h>
#include <linux/kernel.h>
#include <linux/pagewalk.h>
#include <linux/swap.h>
@ -20,6 +21,7 @@
#include <linux/pgtable.h>
#include <asm/page-states.h>
#include <asm/pgalloc.h>
#include <asm/machine.h>
#include <asm/gmap.h>
#include <asm/page.h>
#include <asm/tlb.h>
@ -135,7 +137,7 @@ EXPORT_SYMBOL_GPL(gmap_create);
static void gmap_flush_tlb(struct gmap *gmap)
{
if (MACHINE_HAS_IDTE)
if (cpu_has_idte())
__tlb_flush_idte(gmap->asce);
else
__tlb_flush_global();
@ -2025,10 +2027,10 @@ static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
gaddr &= HPAGE_MASK;
pmdp_notify_gmap(gmap, pmdp, gaddr);
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_GMAP_IN));
if (MACHINE_HAS_TLB_GUEST)
if (machine_has_tlb_guest())
__pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
IDTE_GLOBAL);
else if (MACHINE_HAS_IDTE)
else if (cpu_has_idte())
__pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
else
__pmdp_csp(pmdp);
@ -2103,10 +2105,10 @@ void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
_SEGMENT_ENTRY_GMAP_UC |
_SEGMENT_ENTRY));
if (MACHINE_HAS_TLB_GUEST)
if (machine_has_tlb_guest())
__pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
gmap->asce, IDTE_LOCAL);
else if (MACHINE_HAS_IDTE)
else if (cpu_has_idte())
__pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
}
@ -2136,10 +2138,10 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
_SEGMENT_ENTRY_GMAP_UC |
_SEGMENT_ENTRY));
if (MACHINE_HAS_TLB_GUEST)
if (machine_has_tlb_guest())
__pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
gmap->asce, IDTE_GLOBAL);
else if (MACHINE_HAS_IDTE)
else if (cpu_has_idte())
__pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
else
__pmdp_csp(pmdp);
@ -2258,9 +2260,6 @@ int s390_enable_sie(void)
/* Do we have pgstes? if yes, we are done */
if (mm_has_pgste(mm))
return 0;
/* Fail if the page tables are 2K */
if (!mm_alloc_pgste(mm))
return -EINVAL;
mmap_write_lock(mm);
mm->context.has_pgste = 1;
/* split thp mappings and disable thp for future mappings */

View File

@ -9,12 +9,13 @@
#define KMSG_COMPONENT "hugetlb"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <asm/pgalloc.h>
#include <linux/cpufeature.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/sched/mm.h>
#include <linux/security.h>
#include <asm/pgalloc.h>
/*
* If the bit selected by single-bit bitmask "a" is set within "x", move
@ -248,9 +249,9 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
bool __init arch_hugetlb_valid_size(unsigned long size)
{
if (MACHINE_HAS_EDAT1 && size == PMD_SIZE)
if (cpu_has_edat1() && size == PMD_SIZE)
return true;
else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE)
else if (cpu_has_edat2() && size == PUD_SIZE)
return true;
else
return false;

View File

@ -8,6 +8,7 @@
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/cpufeature.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@ -117,7 +118,7 @@ void mark_rodata_ro(void)
{
unsigned long size = __end_ro_after_init - __start_ro_after_init;
if (MACHINE_HAS_NX)
if (cpu_has_nx())
system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
__set_memory_ro(__start_ro_after_init, __end_ro_after_init);
pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
@ -174,7 +175,6 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
pv_init();
kfence_split_mapping();
/* this will put all low memory onto the freelists */
memblock_free_all();
@ -285,7 +285,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
unsigned long size_pages = PFN_DOWN(size);
int rc;
if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
if (WARN_ON_ONCE(pgprot_val(params->pgprot) != pgprot_val(PAGE_KERNEL)))
return -EINVAL;
VM_BUG_ON(!mhp_range_allowed(start, size, true));

View File

@ -51,7 +51,6 @@ static inline unsigned long mmap_base(unsigned long rnd,
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
unsigned long gap_min, gap_max;
/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)
@ -61,13 +60,7 @@ static inline unsigned long mmap_base(unsigned long rnd,
* Top of mmap area (just below the process stack).
* Leave at least a ~128 MB hole.
*/
gap_min = SZ_128M;
gap_max = (STACK_TOP / 6) * 5;
if (gap < gap_min)
gap = gap_min;
else if (gap > gap_max)
gap = gap_max;
gap = clamp(gap, SZ_128M, (STACK_TOP / 6) * 5);
return PAGE_ALIGN(STACK_TOP - gap - rnd);
}

View File

@ -3,6 +3,7 @@
* Copyright IBM Corp. 2011
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/cpufeature.h>
#include <linux/hugetlb.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
@ -27,7 +28,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
unsigned long boundary, size;
while (start < end) {
if (MACHINE_HAS_EDAT1) {
if (cpu_has_edat1()) {
/* set storage keys for a 1MB frame */
size = 1UL << 20;
boundary = (start + size) & ~(size - 1);
@ -63,7 +64,7 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
unsigned long *table, mask;
mask = 0;
if (MACHINE_HAS_EDAT2) {
if (cpu_has_edat2()) {
switch (dtt) {
case CRDTE_DTT_REGION3:
mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1);
@ -77,7 +78,7 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
}
table = (unsigned long *)((unsigned long)old & mask);
crdte(*old, new, table, dtt, addr, get_lowcore()->kernel_asce.val);
} else if (MACHINE_HAS_IDTE) {
} else if (cpu_has_idte()) {
cspg(old, *old, new);
} else {
csp((unsigned int *)old + 1, *old, new);
@ -373,7 +374,7 @@ int __set_memory(unsigned long addr, unsigned long numpages, unsigned long flags
unsigned long end;
int rc;
if (!MACHINE_HAS_NX)
if (!cpu_has_nx())
flags &= ~(SET_MEMORY_NX | SET_MEMORY_X);
if (!flags)
return 0;

View File

@ -56,7 +56,7 @@ int __pfault_init(void)
if (pfault_disable)
return rc;
diag_stat_inc(DIAG_STAT_X258);
asm volatile(
asm_inline volatile(
" diag %[refbk],%[rc],0x258\n"
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)
@ -78,7 +78,7 @@ void __pfault_fini(void)
if (pfault_disable)
return;
diag_stat_inc(DIAG_STAT_X258);
asm volatile(
asm_inline volatile(
" diag %[refbk],0,0x258\n"
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)

View File

@ -16,31 +16,6 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_PGSTE
int page_table_allocate_pgste = 0;
EXPORT_SYMBOL(page_table_allocate_pgste);
static const struct ctl_table page_table_sysctl[] = {
{
.procname = "allocate_pgste",
.data = &page_table_allocate_pgste,
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
};
static int __init page_table_register_sysctl(void)
{
return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
}
__initcall(page_table_register_sysctl);
#endif /* CONFIG_PGSTE */
unsigned long *crst_table_alloc(struct mm_struct *mm)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);

View File

@ -4,6 +4,7 @@
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/cpufeature.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@ -23,6 +24,7 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/page-states.h>
#include <asm/machine.h>
pgprot_t pgprot_writecombine(pgprot_t prot)
{
@ -34,22 +36,12 @@ pgprot_t pgprot_writecombine(pgprot_t prot)
}
EXPORT_SYMBOL_GPL(pgprot_writecombine);
pgprot_t pgprot_writethrough(pgprot_t prot)
{
/*
* mio_wb_bit_mask may be set on a different CPU, but it is only set
* once at init and only read afterwards.
*/
return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask);
}
EXPORT_SYMBOL_GPL(pgprot_writethrough);
static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, int nodat)
{
unsigned long opt, asce;
if (MACHINE_HAS_TLB_GUEST) {
if (machine_has_tlb_guest()) {
opt = 0;
asce = READ_ONCE(mm->context.gmap_asce);
if (asce == 0UL || nodat)
@ -69,7 +61,7 @@ static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
{
unsigned long opt, asce;
if (MACHINE_HAS_TLB_GUEST) {
if (machine_has_tlb_guest()) {
opt = 0;
asce = READ_ONCE(mm->context.gmap_asce);
if (asce == 0UL || nodat)
@ -94,7 +86,7 @@ static inline pte_t ptep_flush_direct(struct mm_struct *mm,
if (unlikely(pte_val(old) & _PAGE_INVALID))
return old;
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
if (cpu_has_tlb_lc() &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
ptep_ipte_local(mm, addr, ptep, nodat);
else
@ -173,10 +165,10 @@ static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
skey = (unsigned long) page_get_storage_key(address);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
/* Transfer page changed & referenced bit to guest bits in pgste */
pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
pgste = set_pgste_bit(pgste, bits << 48); /* GR bit & GC bit */
/* Copy page access key and fetch protection bit to pgste */
pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
pgste = clear_pgste_bit(pgste, PGSTE_ACC_BITS | PGSTE_FP_BIT);
pgste = set_pgste_bit(pgste, (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56);
#endif
return pgste;
@ -210,7 +202,7 @@ static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
if ((pte_val(entry) & _PAGE_PRESENT) &&
(pte_val(entry) & _PAGE_WRITE) &&
!(pte_val(entry) & _PAGE_INVALID)) {
if (!MACHINE_HAS_ESOP) {
if (!machine_has_esop()) {
/*
* Without enhanced suppression-on-protection force
* the dirty bit on for all writable ptes.
@ -220,7 +212,7 @@ static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
}
if (!(pte_val(entry) & _PAGE_PROTECT))
/* This pte allows write access, set user-dirty */
pgste_val(pgste) |= PGSTE_UC_BIT;
pgste = set_pgste_bit(pgste, PGSTE_UC_BIT);
}
#endif
set_pte(ptep, entry);
@ -236,7 +228,7 @@ static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
if (bits) {
pgste_val(pgste) ^= bits;
pgste = __pgste(pgste_val(pgste) ^ bits);
ptep_notify(mm, addr, ptep, bits);
}
#endif
@ -374,7 +366,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
static inline void pmdp_idte_local(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
if (MACHINE_HAS_TLB_GUEST)
if (machine_has_tlb_guest())
__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_LOCAL);
else
@ -386,12 +378,12 @@ static inline void pmdp_idte_local(struct mm_struct *mm,
static inline void pmdp_idte_global(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
if (MACHINE_HAS_TLB_GUEST) {
if (machine_has_tlb_guest()) {
__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_GLOBAL);
if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
gmap_pmdp_idte_global(mm, addr);
} else if (MACHINE_HAS_IDTE) {
} else if (cpu_has_idte()) {
__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
gmap_pmdp_idte_global(mm, addr);
@ -411,7 +403,7 @@ static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
return old;
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
if (cpu_has_tlb_lc() &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
pmdp_idte_local(mm, addr, pmdp);
else
@ -505,7 +497,7 @@ EXPORT_SYMBOL(pmdp_xchg_lazy);
static inline void pudp_idte_local(struct mm_struct *mm,
unsigned long addr, pud_t *pudp)
{
if (MACHINE_HAS_TLB_GUEST)
if (machine_has_tlb_guest())
__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_LOCAL);
else
@ -515,10 +507,10 @@ static inline void pudp_idte_local(struct mm_struct *mm,
static inline void pudp_idte_global(struct mm_struct *mm,
unsigned long addr, pud_t *pudp)
{
if (MACHINE_HAS_TLB_GUEST)
if (machine_has_tlb_guest())
__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_GLOBAL);
else if (MACHINE_HAS_IDTE)
else if (cpu_has_idte())
__pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
else
/*
@ -537,7 +529,7 @@ static inline pud_t pudp_flush_direct(struct mm_struct *mm,
if (pud_val(old) & _REGION_ENTRY_INVALID)
return old;
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
if (cpu_has_tlb_lc() &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
pudp_idte_local(mm, addr, pudp);
else
@ -609,7 +601,7 @@ void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
/* the mm_has_pgste() check is done in set_pte_at() */
preempt_disable();
pgste = pgste_get_lock(ptep);
pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
pgste = clear_pgste_bit(pgste, _PGSTE_GPS_ZERO);
pgste_set_key(ptep, pgste, entry, mm);
pgste = pgste_set_pte(ptep, pgste, entry);
pgste_set_unlock(ptep, pgste);
@ -622,7 +614,7 @@ void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
preempt_disable();
pgste = pgste_get_lock(ptep);
pgste_val(pgste) |= PGSTE_IN_BIT;
pgste = set_pgste_bit(pgste, PGSTE_IN_BIT);
pgste_set_unlock(ptep, pgste);
preempt_enable();
}
@ -667,7 +659,7 @@ int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
entry = clear_pte_bit(entry, __pgprot(_PAGE_INVALID));
entry = set_pte_bit(entry, __pgprot(_PAGE_PROTECT));
}
pgste_val(pgste) |= bit;
pgste = set_pgste_bit(pgste, bit);
pgste = pgste_set_pte(ptep, pgste, entry);
pgste_set_unlock(ptep, pgste);
return 0;
@ -687,7 +679,7 @@ int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
if (!(pte_val(spte) & _PAGE_INVALID) &&
!((pte_val(spte) & _PAGE_PROTECT) &&
!(pte_val(pte) & _PAGE_PROTECT))) {
pgste_val(spgste) |= PGSTE_VSIE_BIT;
spgste = set_pgste_bit(spgste, PGSTE_VSIE_BIT);
tpgste = pgste_get_lock(tptep);
tpte = __pte((pte_val(spte) & PAGE_MASK) |
(pte_val(pte) & _PAGE_PROTECT));
@ -745,7 +737,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
pte_clear(mm, addr, ptep);
}
if (reset)
pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
pgste = clear_pgste_bit(pgste, _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
pgste_set_unlock(ptep, pgste);
preempt_enable();
}
@ -758,8 +750,8 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
/* Clear storage key ACC and F, but set R/C */
preempt_disable();
pgste = pgste_get_lock(ptep);
pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
pgste = clear_pgste_bit(pgste, PGSTE_ACC_BITS | PGSTE_FP_BIT);
pgste = set_pgste_bit(pgste, PGSTE_GR_BIT | PGSTE_GC_BIT);
ptev = pte_val(*ptep);
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
@ -780,13 +772,13 @@ bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
pgste = pgste_get_lock(ptep);
dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
pgste_val(pgste) &= ~PGSTE_UC_BIT;
pgste = clear_pgste_bit(pgste, PGSTE_UC_BIT);
pte = *ptep;
if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
pgste = pgste_pte_notify(mm, addr, ptep, pgste);
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
ptep_ipte_global(mm, addr, ptep, nodat);
if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
if (machine_has_esop() || !(pte_val(pte) & _PAGE_WRITE))
pte = set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
else
pte = set_pte_bit(pte, __pgprot(_PAGE_INVALID));
@ -842,11 +834,11 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
if (!ptep)
goto again;
new = old = pgste_get_lock(ptep);
pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
PGSTE_ACC_BITS | PGSTE_FP_BIT);
new = clear_pgste_bit(new, PGSTE_GR_BIT | PGSTE_GC_BIT |
PGSTE_ACC_BITS | PGSTE_FP_BIT);
keyul = (unsigned long) key;
pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
new = set_pgste_bit(new, (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48);
new = set_pgste_bit(new, (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56);
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
unsigned long bits, skey;
@ -857,12 +849,12 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
/* Set storage key ACC and FP */
page_set_storage_key(paddr, skey, !nq);
/* Merge host changed & referenced into pgste */
pgste_val(new) |= bits << 52;
new = set_pgste_bit(new, bits << 52);
}
/* changing the guest storage key is considered a change of the page */
if ((pgste_val(new) ^ pgste_val(old)) &
(PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
pgste_val(new) |= PGSTE_UC_BIT;
new = set_pgste_bit(new, PGSTE_UC_BIT);
pgste_set_unlock(ptep, new);
pte_unmap_unlock(ptep, ptl);
@ -950,19 +942,19 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
goto again;
new = old = pgste_get_lock(ptep);
/* Reset guest reference bit only */
pgste_val(new) &= ~PGSTE_GR_BIT;
new = clear_pgste_bit(new, PGSTE_GR_BIT);
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
paddr = pte_val(*ptep) & PAGE_MASK;
cc = page_reset_referenced(paddr);
/* Merge real referenced bit into host-set */
pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
new = set_pgste_bit(new, ((unsigned long)cc << 53) & PGSTE_HR_BIT);
}
/* Reflect guest's logical view, not physical */
cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
/* Changing the guest storage key is considered a change of the page */
if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
pgste_val(new) |= PGSTE_UC_BIT;
new = set_pgste_bit(new, PGSTE_UC_BIT);
pgste_set_unlock(ptep, new);
pte_unmap_unlock(ptep, ptl);
@ -1126,7 +1118,7 @@ int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
if (res)
pgstev |= _PGSTE_GPS_ZERO;
pgste_val(pgste) = pgstev;
pgste = __pgste(pgstev);
pgste_set_unlock(ptep, pgste);
pte_unmap_unlock(ptep, ptl);
return res;
@ -1159,8 +1151,8 @@ int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
return -EFAULT;
new = pgste_get_lock(ptep);
pgste_val(new) &= ~bits;
pgste_val(new) |= value & bits;
new = clear_pgste_bit(new, bits);
new = set_pgste_bit(new, value & bits);
pgste_set_unlock(ptep, new);
pte_unmap_unlock(ptep, ptl);

View File

@ -4,6 +4,7 @@
*/
#include <linux/memory_hotplug.h>
#include <linux/cpufeature.h>
#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/mm.h>
@ -249,12 +250,12 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
} else if (pmd_none(*pmd)) {
if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE) &&
MACHINE_HAS_EDAT1 && direct &&
cpu_has_edat1() && direct &&
!debug_pagealloc_enabled()) {
set_pmd(pmd, __pmd(__pa(addr) | prot));
pages++;
continue;
} else if (!direct && MACHINE_HAS_EDAT1) {
} else if (!direct && cpu_has_edat1()) {
void *new_page;
/*
@ -335,7 +336,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
} else if (pud_none(*pud)) {
if (IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE) &&
MACHINE_HAS_EDAT2 && direct &&
cpu_has_edat2() && direct &&
!debug_pagealloc_enabled()) {
set_pud(pud, __pud(__pa(addr) | prot));
pages++;
@ -659,7 +660,7 @@ void __init vmem_map_init(void)
* prefix page is used to return to the previous context with
* an LPSWE instruction and therefore must be executable.
*/
if (!static_key_enabled(&cpu_has_bear))
if (!cpu_has_bear())
set_memory_x(0, 1);
if (debug_pagealloc_enabled())
__set_memory_4k(__va(0), absolute_pointer(__va(0)) + ident_map_size);

View File

@ -31,6 +31,7 @@
#include <linux/lockdep.h>
#include <linux/list_sort.h>
#include <asm/machine.h>
#include <asm/isc.h>
#include <asm/airq.h>
#include <asm/facility.h>
@ -1078,7 +1079,7 @@ char * __init pcibios_setup(char *str)
return NULL;
}
if (!strcmp(str, "nomio")) {
get_lowcore()->machine_flags &= ~MACHINE_FLAG_PCI_MIO;
clear_machine_feature(MFEATURE_PCI_MIO);
return NULL;
}
if (!strcmp(str, "force_floating")) {
@ -1153,7 +1154,7 @@ static int __init pci_base_init(void)
return 0;
}
if (MACHINE_HAS_PCI_MIO) {
if (test_machine_feature(MFEATURE_PCI_MIO)) {
static_branch_enable(&have_mio);
system_ctl_set_bit(2, CR2_MIO_ADDRESSING_BIT);
}

View File

@ -56,7 +56,7 @@ static inline int clp_get_ilp(unsigned long *ilp)
int cc, exception;
exception = 1;
asm volatile (
asm_inline volatile (
" .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
"0: lhi %[exc],0\n"
"1:\n"
@ -79,7 +79,7 @@ static __always_inline int clp_req(void *data, unsigned int lps)
u64 ignored;
exception = 1;
asm volatile (
asm_inline volatile (
" .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
"0: lhi %[exc],0\n"
"1:\n"

Some files were not shown because too many files have changed in this diff Show More