mirror of https://github.com/torvalds/linux.git
Merge branch 'x86/cpu' into x86/asm, to pick up dependent commits
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
cfdaa618de
7
.mailmap
7
.mailmap
|
|
@ -226,6 +226,7 @@ Fangrui Song <i@maskray.me> <maskray@google.com>
|
|||
Felipe W Damasio <felipewd@terra.com.br>
|
||||
Felix Kuhling <fxkuehl@gmx.de>
|
||||
Felix Moeller <felix@derklecks.de>
|
||||
Feng Tang <feng.79.tang@gmail.com> <feng.tang@intel.com>
|
||||
Fenglin Wu <quic_fenglinw@quicinc.com> <fenglinw@codeaurora.org>
|
||||
Filipe Lautert <filipe@icewall.org>
|
||||
Finn Thain <fthain@linux-m68k.org> <fthain@telegraphics.com.au>
|
||||
|
|
@ -317,6 +318,8 @@ Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
|
|||
Jean Tourrilhes <jt@hpl.hp.com>
|
||||
Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org>
|
||||
Jeff Garzik <jgarzik@pretzel.yyz.us>
|
||||
Jeff Johnson <jeff.johnson@oss.qualcomm.com> <jjohnson@codeaurora.org>
|
||||
Jeff Johnson <jeff.johnson@oss.qualcomm.com> <quic_jjohnson@quicinc.com>
|
||||
Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
|
||||
Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
|
||||
Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
|
||||
|
|
@ -519,6 +522,7 @@ Nadav Amit <nadav.amit@gmail.com> <namit@cs.technion.ac.il>
|
|||
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
|
||||
Naoya Horiguchi <nao.horiguchi@gmail.com> <n-horiguchi@ah.jp.nec.com>
|
||||
Naoya Horiguchi <nao.horiguchi@gmail.com> <naoya.horiguchi@nec.com>
|
||||
Natalie Vock <natalie.vock@gmx.de> <friedrich.vock@gmx.de>
|
||||
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
|
||||
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.ibm.com>
|
||||
Naveen N Rao <naveen@kernel.org> <naveen.n.rao@linux.vnet.ibm.com>
|
||||
|
|
@ -531,6 +535,7 @@ Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
|
|||
Nicholas Piggin <npiggin@gmail.com> <npiggin@suse.de>
|
||||
Nicholas Piggin <npiggin@gmail.com> <nickpiggin@yahoo.com.au>
|
||||
Nicholas Piggin <npiggin@gmail.com> <piggin@cyberone.com.au>
|
||||
Nick Desaulniers <nick.desaulniers+lkml@gmail.com> <ndesaulniers@google.com>
|
||||
Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
|
||||
Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
|
||||
Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
|
||||
|
|
@ -609,6 +614,8 @@ Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
|
|||
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
|
||||
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
|
||||
Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
|
||||
Rodrigo Siqueira <siqueira@igalia.com> <rodrigosiqueiramelo@gmail.com>
|
||||
Rodrigo Siqueira <siqueira@igalia.com> <Rodrigo.Siqueira@amd.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
|
||||
|
|
|
|||
|
|
@ -180,10 +180,6 @@ Dump-capture kernel config options (Arch Dependent, i386 and x86_64)
|
|||
1) On i386, enable high memory support under "Processor type and
|
||||
features"::
|
||||
|
||||
CONFIG_HIGHMEM64G=y
|
||||
|
||||
or::
|
||||
|
||||
CONFIG_HIGHMEM4G
|
||||
|
||||
2) With CONFIG_SMP=y, usually nr_cpus=1 need specified on the kernel
|
||||
|
|
|
|||
|
|
@ -416,10 +416,6 @@
|
|||
Format: { quiet (default) | verbose | debug }
|
||||
Change the amount of debugging information output
|
||||
when initialising the APIC and IO-APIC components.
|
||||
For X86-32, this can also be used to specify an APIC
|
||||
driver name.
|
||||
Format: apic=driver_name
|
||||
Examples: apic=bigsmp
|
||||
|
||||
apic_extnmi= [APIC,X86,EARLY] External NMI delivery setting
|
||||
Format: { bsp (default) | all | none }
|
||||
|
|
@ -7672,13 +7668,6 @@
|
|||
16 - SIGBUS faults
|
||||
Example: user_debug=31
|
||||
|
||||
userpte=
|
||||
[X86,EARLY] Flags controlling user PTE allocations.
|
||||
|
||||
nohigh = do not allocate PTE pages in
|
||||
HIGHMEM regardless of setting
|
||||
of CONFIG_HIGHPTE.
|
||||
|
||||
vdso= [X86,SH,SPARC]
|
||||
On X86_32, this is an alias for vdso32=. Otherwise:
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ Introduction
|
|||
both access system memory directly and with the same effective
|
||||
addresses.
|
||||
|
||||
**This driver is deprecated and will be removed in a future release.**
|
||||
|
||||
Hardware overview
|
||||
=================
|
||||
|
|
@ -453,7 +454,7 @@ Sysfs Class
|
|||
|
||||
A cxl sysfs class is added under /sys/class/cxl to facilitate
|
||||
enumeration and tuning of the accelerators. Its layout is
|
||||
described in Documentation/ABI/testing/sysfs-class-cxl
|
||||
described in Documentation/ABI/obsolete/sysfs-class-cxl
|
||||
|
||||
|
||||
Udev rules
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ to cache translations for virtual addresses. The IOMMU driver uses the
|
|||
mmu_notifier() support to keep the device TLB cache and the CPU cache in
|
||||
sync. When an ATS lookup fails for a virtual address, the device should
|
||||
use the PRI in order to request the virtual address to be paged into the
|
||||
CPU page tables. The device must use ATS again in order the fetch the
|
||||
CPU page tables. The device must use ATS again in order to fetch the
|
||||
translation before use.
|
||||
|
||||
Shared Hardware Workqueues
|
||||
|
|
@ -216,7 +216,7 @@ submitting work and processing completions.
|
|||
|
||||
Single Root I/O Virtualization (SR-IOV) focuses on providing independent
|
||||
hardware interfaces for virtualizing hardware. Hence, it's required to be
|
||||
almost fully functional interface to software supporting the traditional
|
||||
an almost fully functional interface to software supporting the traditional
|
||||
BARs, space for interrupts via MSI-X, its own register layout.
|
||||
Virtual Functions (VFs) are assisted by the Physical Function (PF)
|
||||
driver.
|
||||
|
|
|
|||
|
|
@ -20,11 +20,7 @@ It has several drawbacks, though:
|
|||
features (wheel, extra buttons, touchpad mode) of the real PS/2 mouse may
|
||||
not be available.
|
||||
|
||||
2) If CONFIG_HIGHMEM64G is enabled, the PS/2 mouse emulation can cause
|
||||
system crashes, because the SMM BIOS is not expecting to be in PAE mode.
|
||||
The Intel E7505 is a typical machine where this happens.
|
||||
|
||||
3) If AMD64 64-bit mode is enabled, again system crashes often happen,
|
||||
2) If AMD64 64-bit mode is enabled, again system crashes often happen,
|
||||
because the SMM BIOS isn't expecting the CPU to be in 64-bit mode. The
|
||||
BIOS manufacturers only test with Windows, and Windows doesn't do 64-bit
|
||||
yet.
|
||||
|
|
@ -38,11 +34,6 @@ Problem 1)
|
|||
compiled-in, too.
|
||||
|
||||
Problem 2)
|
||||
can currently only be solved by either disabling HIGHMEM64G
|
||||
in the kernel config or USB Legacy support in the BIOS. A BIOS update
|
||||
could help, but so far no such update exists.
|
||||
|
||||
Problem 3)
|
||||
is usually fixed by a BIOS update. Check the board
|
||||
manufacturers web site. If an update is not available, disable USB
|
||||
Legacy support in the BIOS. If this alone doesn't help, try also adding
|
||||
|
|
|
|||
|
|
@ -53,11 +53,17 @@ properties:
|
|||
reg:
|
||||
maxItems: 1
|
||||
|
||||
power-controller:
|
||||
type: object
|
||||
|
||||
reboot-mode:
|
||||
type: object
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
||||
additionalProperties: true
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
|
|
|
|||
|
|
@ -33,6 +33,10 @@ properties:
|
|||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: nf_clk
|
||||
|
||||
dmas:
|
||||
maxItems: 1
|
||||
|
||||
|
|
@ -51,6 +55,7 @@ required:
|
|||
- reg-names
|
||||
- interrupts
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
|
|
@ -66,7 +71,8 @@ examples:
|
|||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&nf_clk>;
|
||||
clocks = <&clk>;
|
||||
clock-names = "nf_clk";
|
||||
cdns,board-delay-ps = <4830>;
|
||||
|
||||
nand@0 {
|
||||
|
|
|
|||
|
|
@ -112,7 +112,7 @@ Functions
|
|||
Callbacks
|
||||
=========
|
||||
|
||||
There are six callbacks:
|
||||
There are seven callbacks:
|
||||
|
||||
::
|
||||
|
||||
|
|
@ -182,6 +182,13 @@ There are six callbacks:
|
|||
the length of the message. skb->len - offset may be greater
|
||||
then full_len since strparser does not trim the skb.
|
||||
|
||||
::
|
||||
|
||||
int (*read_sock)(struct strparser *strp, read_descriptor_t *desc,
|
||||
sk_read_actor_t recv_actor);
|
||||
|
||||
The read_sock callback is used by strparser instead of
|
||||
sock->ops->read_sock, if provided.
|
||||
::
|
||||
|
||||
int (*read_sock_done)(struct strparser *strp, int err);
|
||||
|
|
|
|||
|
|
@ -308,7 +308,7 @@ an involved disclosed party. The current ambassadors list:
|
|||
|
||||
Google Kees Cook <keescook@chromium.org>
|
||||
|
||||
LLVM Nick Desaulniers <ndesaulniers@google.com>
|
||||
LLVM Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
|
||||
============= ========================================================
|
||||
|
||||
If you want your organization to be added to the ambassadors list, please
|
||||
|
|
|
|||
|
|
@ -287,7 +287,7 @@ revelada involucrada. La lista de embajadores actuales:
|
|||
|
||||
Google Kees Cook <keescook@chromium.org>
|
||||
|
||||
LLVM Nick Desaulniers <ndesaulniers@google.com>
|
||||
LLVM Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
|
||||
============= ========================================================
|
||||
|
||||
Si quiere que su organización se añada a la lista de embajadores, por
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ Landlock: unprivileged access control
|
|||
=====================================
|
||||
|
||||
:Author: Mickaël Salaün
|
||||
:Date: October 2024
|
||||
:Date: January 2025
|
||||
|
||||
The goal of Landlock is to enable restriction of ambient rights (e.g. global
|
||||
filesystem or network access) for a set of processes. Because Landlock
|
||||
|
|
@ -329,11 +329,11 @@ non-sandboxed process, we can specify this restriction with
|
|||
A sandboxed process can connect to a non-sandboxed process when its domain is
|
||||
not scoped. If a process's domain is scoped, it can only connect to sockets
|
||||
created by processes in the same scope.
|
||||
Moreover, If a process is scoped to send signal to a non-scoped process, it can
|
||||
Moreover, if a process is scoped to send signal to a non-scoped process, it can
|
||||
only send signals to processes in the same scope.
|
||||
|
||||
A connected datagram socket behaves like a stream socket when its domain is
|
||||
scoped, meaning if the domain is scoped after the socket is connected , it can
|
||||
scoped, meaning if the domain is scoped after the socket is connected, it can
|
||||
still :manpage:`send(2)` data just like a stream socket. However, in the same
|
||||
scenario, a non-connected datagram socket cannot send data (with
|
||||
:manpage:`sendto(2)`) outside its scope.
|
||||
|
|
|
|||
44
MAINTAINERS
44
MAINTAINERS
|
|
@ -1046,14 +1046,14 @@ F: drivers/crypto/ccp/hsti.*
|
|||
AMD DISPLAY CORE
|
||||
M: Harry Wentland <harry.wentland@amd.com>
|
||||
M: Leo Li <sunpeng.li@amd.com>
|
||||
M: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
|
||||
R: Rodrigo Siqueira <siqueira@igalia.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
||||
F: drivers/gpu/drm/amd/display/
|
||||
|
||||
AMD DISPLAY CORE - DML
|
||||
M: Chaitanya Dhere <chaitanya.dhere@amd.com>
|
||||
M: Austin Zheng <austin.zheng@amd.com>
|
||||
M: Jun Lei <jun.lei@amd.com>
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/amd/display/dc/dml/
|
||||
|
|
@ -2210,6 +2210,7 @@ F: sound/soc/codecs/ssm3515.c
|
|||
|
||||
ARM/APPLE MACHINE SUPPORT
|
||||
M: Sven Peter <sven@svenpeter.dev>
|
||||
M: Janne Grunau <j@jannau.net>
|
||||
R: Alyssa Rosenzweig <alyssa@rosenzweig.io>
|
||||
L: asahi@lists.linux.dev
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
|
|
@ -2284,7 +2285,7 @@ F: drivers/irqchip/irq-aspeed-i2c-ic.c
|
|||
|
||||
ARM/ASPEED MACHINE SUPPORT
|
||||
M: Joel Stanley <joel@jms.id.au>
|
||||
R: Andrew Jeffery <andrew@codeconstruct.com.au>
|
||||
M: Andrew Jeffery <andrew@codeconstruct.com.au>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
|
|
@ -2877,7 +2878,7 @@ F: drivers/pinctrl/nxp/
|
|||
|
||||
ARM/NXP S32G/S32R DWMAC ETHERNET DRIVER
|
||||
M: Jan Petrous <jan.petrous@oss.nxp.com>
|
||||
L: NXP S32 Linux Team <s32@nxp.com>
|
||||
R: s32@nxp.com
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/nxp,s32-dwmac.yaml
|
||||
F: drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
|
||||
|
|
@ -5655,7 +5656,7 @@ F: .clang-format
|
|||
|
||||
CLANG/LLVM BUILD SUPPORT
|
||||
M: Nathan Chancellor <nathan@kernel.org>
|
||||
R: Nick Desaulniers <ndesaulniers@google.com>
|
||||
R: Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
|
||||
R: Bill Wendling <morbo@google.com>
|
||||
R: Justin Stitt <justinstitt@google.com>
|
||||
L: llvm@lists.linux.dev
|
||||
|
|
@ -5855,7 +5856,6 @@ F: Documentation/security/snp-tdx-threat-model.rst
|
|||
|
||||
CONFIGFS
|
||||
M: Joel Becker <jlbec@evilplan.org>
|
||||
M: Christoph Hellwig <hch@lst.de>
|
||||
S: Supported
|
||||
T: git git://git.infradead.org/users/hch/configfs.git
|
||||
F: fs/configfs/
|
||||
|
|
@ -5926,6 +5926,17 @@ F: tools/testing/selftests/cgroup/test_cpuset.c
|
|||
F: tools/testing/selftests/cgroup/test_cpuset_prs.sh
|
||||
F: tools/testing/selftests/cgroup/test_cpuset_v1_base.sh
|
||||
|
||||
CONTROL GROUP - DEVICE MEMORY CONTROLLER (DMEM)
|
||||
M: Maarten Lankhorst <dev@lankhorst.se>
|
||||
M: Maxime Ripard <mripard@kernel.org>
|
||||
M: Natalie Vock <natalie.vock@gmx.de>
|
||||
L: cgroups@vger.kernel.org
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
F: include/linux/cgroup_dmem.h
|
||||
F: kernel/cgroup/dmem.c
|
||||
|
||||
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
|
||||
M: Johannes Weiner <hannes@cmpxchg.org>
|
||||
M: Michal Hocko <mhocko@kernel.org>
|
||||
|
|
@ -6878,7 +6889,6 @@ F: kernel/dma/map_benchmark.c
|
|||
F: tools/testing/selftests/dma/
|
||||
|
||||
DMA MAPPING HELPERS
|
||||
M: Christoph Hellwig <hch@lst.de>
|
||||
M: Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
R: Robin Murphy <robin.murphy@arm.com>
|
||||
L: iommu@lists.linux.dev
|
||||
|
|
@ -7425,7 +7435,6 @@ F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
|
|||
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
|
||||
|
||||
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
|
||||
M: Karol Herbst <kherbst@redhat.com>
|
||||
M: Lyude Paul <lyude@redhat.com>
|
||||
M: Danilo Krummrich <dakr@kernel.org>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
|
|
@ -9829,8 +9838,7 @@ F: drivers/input/touchscreen/goodix*
|
|||
|
||||
GOOGLE ETHERNET DRIVERS
|
||||
M: Jeroen de Borst <jeroendb@google.com>
|
||||
M: Praveen Kaligineedi <pkaligineedi@google.com>
|
||||
R: Shailend Chand <shailend@google.com>
|
||||
M: Harshitha Ramamurthy <hramamurthy@google.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/networking/device_drivers/ethernet/google/gve.rst
|
||||
|
|
@ -15683,7 +15691,7 @@ F: include/uapi/linux/cciss*.h
|
|||
|
||||
MICROSOFT MANA RDMA DRIVER
|
||||
M: Long Li <longli@microsoft.com>
|
||||
M: Ajay Sharma <sharmaajay@microsoft.com>
|
||||
M: Konstantin Taranov <kotaranov@microsoft.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/mana/
|
||||
|
|
@ -16472,6 +16480,12 @@ F: net/ethtool/cabletest.c
|
|||
F: tools/testing/selftests/drivers/net/*/ethtool*
|
||||
K: cable_test
|
||||
|
||||
NETWORKING [ETHTOOL MAC MERGE]
|
||||
M: Vladimir Oltean <vladimir.oltean@nxp.com>
|
||||
F: net/ethtool/mm.c
|
||||
F: tools/testing/selftests/drivers/net/hw/ethtool_mm.sh
|
||||
K: ethtool_mm
|
||||
|
||||
NETWORKING [GENERAL]
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
M: Eric Dumazet <edumazet@google.com>
|
||||
|
|
@ -19652,7 +19666,6 @@ F: drivers/net/wireless/quantenna
|
|||
RADEON and AMDGPU DRM DRIVERS
|
||||
M: Alex Deucher <alexander.deucher@amd.com>
|
||||
M: Christian König <christian.koenig@amd.com>
|
||||
M: Xinhui Pan <Xinhui.Pan@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
B: https://gitlab.freedesktop.org/drm/amd/-/issues
|
||||
|
|
@ -19874,7 +19887,7 @@ F: net/rds/
|
|||
F: tools/testing/selftests/net/rds/
|
||||
|
||||
RDT - RESOURCE ALLOCATION
|
||||
M: Fenghua Yu <fenghua.yu@intel.com>
|
||||
M: Tony Luck <tony.luck@intel.com>
|
||||
M: Reinette Chatre <reinette.chatre@intel.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
|
|
@ -20325,6 +20338,7 @@ RISC-V ARCHITECTURE
|
|||
M: Paul Walmsley <paul.walmsley@sifive.com>
|
||||
M: Palmer Dabbelt <palmer@dabbelt.com>
|
||||
M: Albert Ou <aou@eecs.berkeley.edu>
|
||||
R: Alexandre Ghiti <alex@ghiti.fr>
|
||||
L: linux-riscv@lists.infradead.org
|
||||
S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-riscv/list/
|
||||
|
|
@ -21918,10 +21932,13 @@ F: sound/soc/uniphier/
|
|||
|
||||
SOCKET TIMESTAMPING
|
||||
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
|
||||
R: Jason Xing <kernelxing@tencent.com>
|
||||
S: Maintained
|
||||
F: Documentation/networking/timestamping.rst
|
||||
F: include/linux/net_tstamp.h
|
||||
F: include/uapi/linux/net_tstamp.h
|
||||
F: tools/testing/selftests/bpf/*/net_timestamping*
|
||||
F: tools/testing/selftests/net/*timestamp*
|
||||
F: tools/testing/selftests/net/so_txtime.c
|
||||
|
||||
SOEKRIS NET48XX LED SUPPORT
|
||||
|
|
@ -24064,7 +24081,6 @@ F: tools/testing/selftests/ftrace/
|
|||
TRACING MMIO ACCESSES (MMIOTRACE)
|
||||
M: Steven Rostedt <rostedt@goodmis.org>
|
||||
M: Masami Hiramatsu <mhiramat@kernel.org>
|
||||
R: Karol Herbst <karolherbst@gmail.com>
|
||||
R: Pekka Paalanen <ppaalanen@gmail.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: nouveau@lists.freedesktop.org
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
|||
|
|
@ -381,7 +381,7 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
|
|||
void iounmap(volatile void __iomem *io_addr);
|
||||
#define iounmap iounmap
|
||||
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags);
|
||||
#define arch_memremap_wb arch_memremap_wb
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -436,7 +436,7 @@ void __arm_iomem_set_ro(void __iomem *ptr, size_t size)
|
|||
set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE);
|
||||
}
|
||||
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags)
|
||||
{
|
||||
return (__force void *)arch_ioremap_caller(phys_addr, size,
|
||||
MT_MEMORY_RW,
|
||||
|
|
|
|||
|
|
@ -248,7 +248,7 @@ void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
|
|||
EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
|
||||
#endif
|
||||
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags)
|
||||
{
|
||||
return (void *)phys_addr;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -226,7 +226,6 @@ &uart0 {
|
|||
};
|
||||
|
||||
&uart5 {
|
||||
pinctrl-0 = <&uart5_xfer>;
|
||||
rts-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
|||
|
|
@ -396,6 +396,12 @@ &u2phy_host {
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
&uart5 {
|
||||
/delete-property/ dmas;
|
||||
/delete-property/ dma-names;
|
||||
pinctrl-0 = <&uart5_xfer>;
|
||||
};
|
||||
|
||||
/* Mule UCAN */
|
||||
&usb_host0_ehci {
|
||||
status = "okay";
|
||||
|
|
|
|||
|
|
@ -17,8 +17,7 @@ / {
|
|||
|
||||
&gmac2io {
|
||||
phy-handle = <&yt8531c>;
|
||||
tx_delay = <0x19>;
|
||||
rx_delay = <0x05>;
|
||||
phy-mode = "rgmii-id";
|
||||
status = "okay";
|
||||
|
||||
mdio {
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ / {
|
|||
|
||||
&gmac2io {
|
||||
phy-handle = <&rtl8211e>;
|
||||
phy-mode = "rgmii";
|
||||
tx_delay = <0x24>;
|
||||
rx_delay = <0x18>;
|
||||
status = "okay";
|
||||
|
|
|
|||
|
|
@ -109,7 +109,6 @@ &gmac2io {
|
|||
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
|
||||
assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;
|
||||
clock_in_out = "input";
|
||||
phy-mode = "rgmii";
|
||||
phy-supply = <&vcc_io>;
|
||||
pinctrl-0 = <&rgmiim1_pins>;
|
||||
pinctrl-names = "default";
|
||||
|
|
|
|||
|
|
@ -22,11 +22,11 @@ pp900_ap: regulator-pp900-ap {
|
|||
};
|
||||
|
||||
/* EC turns on w/ pp900_usb_en */
|
||||
pp900_usb: pp900-ap {
|
||||
pp900_usb: regulator-pp900-ap {
|
||||
};
|
||||
|
||||
/* EC turns on w/ pp900_pcie_en */
|
||||
pp900_pcie: pp900-ap {
|
||||
pp900_pcie: regulator-pp900-ap {
|
||||
};
|
||||
|
||||
pp3000: regulator-pp3000 {
|
||||
|
|
@ -126,7 +126,7 @@ pp1800_pcie: regulator-pp1800-pcie {
|
|||
};
|
||||
|
||||
/* Always on; plain and simple */
|
||||
pp3000_ap: pp3000_emmc: pp3000 {
|
||||
pp3000_ap: pp3000_emmc: regulator-pp3000 {
|
||||
};
|
||||
|
||||
pp1500_ap_io: regulator-pp1500-ap-io {
|
||||
|
|
@ -160,7 +160,7 @@ pp3300_disp: regulator-pp3300-disp {
|
|||
};
|
||||
|
||||
/* EC turns on w/ pp3300_usb_en_l */
|
||||
pp3300_usb: pp3300 {
|
||||
pp3300_usb: regulator-pp3300 {
|
||||
};
|
||||
|
||||
/* gpio is shared with pp1800_pcie and pinctrl is set there */
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ pp900_s3: regulator-pp900-s3 {
|
|||
};
|
||||
|
||||
/* EC turns on pp1800_s3_en */
|
||||
pp1800_s3: pp1800 {
|
||||
pp1800_s3: regulator-pp1800 {
|
||||
};
|
||||
|
||||
/* pp3300 children, sorted by name */
|
||||
|
|
@ -109,11 +109,11 @@ pp2800_cam: regulator-pp2800-avdd {
|
|||
};
|
||||
|
||||
/* EC turns on pp3300_s0_en */
|
||||
pp3300_s0: pp3300 {
|
||||
pp3300_s0: regulator-pp3300 {
|
||||
};
|
||||
|
||||
/* EC turns on pp3300_s3_en */
|
||||
pp3300_s3: pp3300 {
|
||||
pp3300_s3: regulator-pp3300 {
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -189,39 +189,39 @@ ppvar_gpu: ppvar-gpu {
|
|||
};
|
||||
|
||||
/* EC turns on w/ pp900_ddrpll_en */
|
||||
pp900_ddrpll: pp900-ap {
|
||||
pp900_ddrpll: regulator-pp900-ap {
|
||||
};
|
||||
|
||||
/* EC turns on w/ pp900_pll_en */
|
||||
pp900_pll: pp900-ap {
|
||||
pp900_pll: regulator-pp900-ap {
|
||||
};
|
||||
|
||||
/* EC turns on w/ pp900_pmu_en */
|
||||
pp900_pmu: pp900-ap {
|
||||
pp900_pmu: regulator-pp900-ap {
|
||||
};
|
||||
|
||||
/* EC turns on w/ pp1800_s0_en_l */
|
||||
pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: pp1800 {
|
||||
pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: regulator-pp1800 {
|
||||
};
|
||||
|
||||
/* EC turns on w/ pp1800_avdd_en_l */
|
||||
pp1800_avdd: pp1800 {
|
||||
pp1800_avdd: regulator-pp1800 {
|
||||
};
|
||||
|
||||
/* EC turns on w/ pp1800_lid_en_l */
|
||||
pp1800_lid: pp1800_mic: pp1800 {
|
||||
pp1800_lid: pp1800_mic: regulator-pp1800 {
|
||||
};
|
||||
|
||||
/* EC turns on w/ lpddr_pwr_en */
|
||||
pp1800_lpddr: pp1800 {
|
||||
pp1800_lpddr: regulator-pp1800 {
|
||||
};
|
||||
|
||||
/* EC turns on w/ pp1800_pmu_en_l */
|
||||
pp1800_pmu: pp1800 {
|
||||
pp1800_pmu: regulator-pp1800 {
|
||||
};
|
||||
|
||||
/* EC turns on w/ pp1800_usb_en_l */
|
||||
pp1800_usb: pp1800 {
|
||||
pp1800_usb: regulator-pp1800 {
|
||||
};
|
||||
|
||||
pp3000_sd_slot: regulator-pp3000-sd-slot {
|
||||
|
|
@ -259,11 +259,11 @@ ppvar_sd_card_io: ppvar-sd-card-io {
|
|||
};
|
||||
|
||||
/* EC turns on w/ pp3300_trackpad_en_l */
|
||||
pp3300_trackpad: pp3300-trackpad {
|
||||
pp3300_trackpad: regulator-pp3300-trackpad {
|
||||
};
|
||||
|
||||
/* EC turns on w/ usb_a_en */
|
||||
pp5000_usb_a_vbus: pp5000 {
|
||||
pp5000_usb_a_vbus: regulator-pp5000 {
|
||||
};
|
||||
|
||||
ap_rtc_clk: ap-rtc-clk {
|
||||
|
|
|
|||
|
|
@ -549,10 +549,10 @@ usb_host2_xhci: usb@fcd00000 {
|
|||
mmu600_pcie: iommu@fc900000 {
|
||||
compatible = "arm,smmu-v3";
|
||||
reg = <0x0 0xfc900000 0x0 0x200000>;
|
||||
interrupts = <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
interrupts = <GIC_SPI 369 IRQ_TYPE_EDGE_RISING 0>,
|
||||
<GIC_SPI 371 IRQ_TYPE_EDGE_RISING 0>,
|
||||
<GIC_SPI 374 IRQ_TYPE_EDGE_RISING 0>,
|
||||
<GIC_SPI 367 IRQ_TYPE_EDGE_RISING 0>;
|
||||
interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
|
||||
#iommu-cells = <1>;
|
||||
};
|
||||
|
|
@ -560,10 +560,10 @@ mmu600_pcie: iommu@fc900000 {
|
|||
mmu600_php: iommu@fcb00000 {
|
||||
compatible = "arm,smmu-v3";
|
||||
reg = <0x0 0xfcb00000 0x0 0x200000>;
|
||||
interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||
<GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
interrupts = <GIC_SPI 381 IRQ_TYPE_EDGE_RISING 0>,
|
||||
<GIC_SPI 383 IRQ_TYPE_EDGE_RISING 0>,
|
||||
<GIC_SPI 386 IRQ_TYPE_EDGE_RISING 0>,
|
||||
<GIC_SPI 379 IRQ_TYPE_EDGE_RISING 0>;
|
||||
interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
|
||||
#iommu-cells = <1>;
|
||||
status = "disabled";
|
||||
|
|
@ -2668,9 +2668,9 @@ tsadc: tsadc@fec00000 {
|
|||
rockchip,hw-tshut-temp = <120000>;
|
||||
rockchip,hw-tshut-mode = <0>; /* tshut mode 0:CRU 1:GPIO */
|
||||
rockchip,hw-tshut-polarity = <0>; /* tshut polarity 0:LOW 1:HIGH */
|
||||
pinctrl-0 = <&tsadc_gpio_func>;
|
||||
pinctrl-1 = <&tsadc_shut>;
|
||||
pinctrl-names = "gpio", "otpout";
|
||||
pinctrl-0 = <&tsadc_shut_org>;
|
||||
pinctrl-1 = <&tsadc_gpio_func>;
|
||||
pinctrl-names = "default", "sleep";
|
||||
#thermal-sensor-cells = <1>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ vcc3v3_lcd: regulator-vcc3v3-lcd {
|
|||
compatible = "regulator-fixed";
|
||||
regulator-name = "vcc3v3_lcd";
|
||||
enable-active-high;
|
||||
gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_HIGH>;
|
||||
gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&lcdpwr_en>;
|
||||
vin-supply = <&vcc3v3_sys>;
|
||||
|
|
@ -241,7 +241,7 @@ &pcie3x4 {
|
|||
&pinctrl {
|
||||
lcd {
|
||||
lcdpwr_en: lcdpwr-en {
|
||||
rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
|
||||
rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
|
||||
};
|
||||
|
||||
bl_en: bl-en {
|
||||
|
|
|
|||
|
|
@ -213,7 +213,6 @@ pcie3x4_ep: pcie-ep@fe150000 {
|
|||
interrupt-names = "sys", "pmc", "msg", "legacy", "err",
|
||||
"dma0", "dma1", "dma2", "dma3";
|
||||
max-link-speed = <3>;
|
||||
iommus = <&mmu600_pcie 0x0000>;
|
||||
num-lanes = <4>;
|
||||
phys = <&pcie30phy>;
|
||||
phy-names = "pcie-phy";
|
||||
|
|
|
|||
|
|
@ -23,3 +23,7 @@ &pcie3x4_ep {
|
|||
vpcie3v3-supply = <&vcc3v3_pcie30>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&mmu600_pcie {
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1551,6 +1551,8 @@ CONFIG_PWM_VISCONTI=m
|
|||
CONFIG_SL28CPLD_INTC=y
|
||||
CONFIG_QCOM_PDC=y
|
||||
CONFIG_QCOM_MPM=y
|
||||
CONFIG_TI_SCI_INTR_IRQCHIP=y
|
||||
CONFIG_TI_SCI_INTA_IRQCHIP=y
|
||||
CONFIG_RESET_GPIO=m
|
||||
CONFIG_RESET_IMX7=y
|
||||
CONFIG_RESET_QCOM_AOSS=y
|
||||
|
|
|
|||
|
|
@ -42,8 +42,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty);
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long sz);
|
||||
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
|
||||
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
|
|
@ -76,12 +76,22 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
|
|||
{
|
||||
unsigned long stride = huge_page_size(hstate_vma(vma));
|
||||
|
||||
if (stride == PMD_SIZE)
|
||||
__flush_tlb_range(vma, start, end, stride, false, 2);
|
||||
else if (stride == PUD_SIZE)
|
||||
__flush_tlb_range(vma, start, end, stride, false, 1);
|
||||
else
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
|
||||
switch (stride) {
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
case PUD_SIZE:
|
||||
__flush_tlb_range(vma, start, end, PUD_SIZE, false, 1);
|
||||
break;
|
||||
#endif
|
||||
case CONT_PMD_SIZE:
|
||||
case PMD_SIZE:
|
||||
__flush_tlb_range(vma, start, end, PMD_SIZE, false, 2);
|
||||
break;
|
||||
case CONT_PTE_SIZE:
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3);
|
||||
break;
|
||||
default:
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __ASM_HUGETLB_H */
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@
|
|||
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
|
||||
#define TCR_EL2_T0SZ_MASK 0x3f
|
||||
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
|
||||
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
|
||||
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
|
||||
|
||||
/* VTCR_EL2 Registers bits */
|
||||
#define VTCR_EL2_DS TCR_EL2_DS
|
||||
|
|
|
|||
|
|
@ -1259,7 +1259,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
|
|||
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
|
||||
int __init kvm_arm_vmid_alloc_init(void);
|
||||
void __init kvm_arm_vmid_alloc_free(void);
|
||||
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
|
||||
void kvm_arm_vmid_clear_active(void);
|
||||
|
||||
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
|
||||
|
|
|
|||
|
|
@ -559,6 +559,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
mmu = vcpu->arch.hw_mmu;
|
||||
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
|
||||
|
||||
/*
|
||||
* Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
|
||||
* which happens eagerly in VHE.
|
||||
*
|
||||
* Also, the VMID allocator only preserves VMIDs that are active at the
|
||||
* time of rollover, so KVM might need to grab a new VMID for the MMU if
|
||||
* this is called from kvm_sched_in().
|
||||
*/
|
||||
kvm_arm_vmid_update(&mmu->vmid);
|
||||
|
||||
/*
|
||||
* We guarantee that both TLBs and I-cache are private to each
|
||||
* vcpu. If detecting that a vcpu from the same VM has
|
||||
|
|
@ -1138,18 +1148,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* The VMID allocator only tracks active VMIDs per
|
||||
* physical CPU, and therefore the VMID allocated may not be
|
||||
* preserved on VMID roll-over if the task was preempted,
|
||||
* making a thread's VMID inactive. So we need to call
|
||||
* kvm_arm_vmid_update() in non-premptible context.
|
||||
*/
|
||||
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
|
||||
has_vhe())
|
||||
__load_stage2(vcpu->arch.hw_mmu,
|
||||
vcpu->arch.hw_mmu->arch);
|
||||
|
||||
kvm_pmu_flush_hwstate(vcpu);
|
||||
|
||||
local_irq_disable();
|
||||
|
|
@ -1980,7 +1978,7 @@ static int kvm_init_vector_slots(void)
|
|||
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
{
|
||||
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
|
||||
unsigned long tcr, ips;
|
||||
unsigned long tcr;
|
||||
|
||||
/*
|
||||
* Calculate the raw per-cpu offset without a translation from the
|
||||
|
|
@ -1994,19 +1992,18 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
|||
params->mair_el2 = read_sysreg(mair_el1);
|
||||
|
||||
tcr = read_sysreg(tcr_el1);
|
||||
ips = FIELD_GET(TCR_IPS_MASK, tcr);
|
||||
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
|
||||
tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
|
||||
tcr |= TCR_EPD1_MASK;
|
||||
} else {
|
||||
unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
|
||||
|
||||
tcr &= TCR_EL2_MASK;
|
||||
tcr |= TCR_EL2_RES1;
|
||||
tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
|
||||
if (lpa2_is_enabled())
|
||||
tcr |= TCR_EL2_DS;
|
||||
}
|
||||
tcr &= ~TCR_T0SZ_MASK;
|
||||
tcr |= TCR_T0SZ(hyp_va_bits);
|
||||
tcr &= ~TCR_EL2_PS_MASK;
|
||||
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
|
||||
if (lpa2_is_enabled())
|
||||
tcr |= TCR_EL2_DS;
|
||||
params->tcr_el2 = tcr;
|
||||
|
||||
params->pgd_pa = kvm_mmu_get_httbr();
|
||||
|
|
|
|||
|
|
@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
|
|||
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
|
||||
}
|
||||
|
||||
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 vmid, old_active_vmid;
|
||||
bool updated = false;
|
||||
|
||||
vmid = atomic64_read(&kvm_vmid->id);
|
||||
|
||||
|
|
@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
|||
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
|
||||
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
|
||||
old_active_vmid, vmid))
|
||||
return false;
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
|
||||
|
||||
/* Check that our VMID belongs to the current generation. */
|
||||
vmid = atomic64_read(&kvm_vmid->id);
|
||||
if (!vmid_gen_match(vmid)) {
|
||||
if (!vmid_gen_match(vmid))
|
||||
vmid = new_vmid(kvm_vmid);
|
||||
updated = true;
|
||||
}
|
||||
|
||||
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
|
||||
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
|
||||
|
||||
return updated;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
|
||||
{
|
||||
int contig_ptes = 0;
|
||||
int contig_ptes = 1;
|
||||
|
||||
*pgsize = size;
|
||||
|
||||
switch (size) {
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
case PUD_SIZE:
|
||||
if (pud_sect_supported())
|
||||
contig_ptes = 1;
|
||||
break;
|
||||
#endif
|
||||
case PMD_SIZE:
|
||||
contig_ptes = 1;
|
||||
break;
|
||||
case CONT_PMD_SIZE:
|
||||
*pgsize = PMD_SIZE;
|
||||
contig_ptes = CONT_PMDS;
|
||||
|
|
@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
|
|||
*pgsize = PAGE_SIZE;
|
||||
contig_ptes = CONT_PTES;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(!__hugetlb_valid_size(size));
|
||||
}
|
||||
|
||||
return contig_ptes;
|
||||
|
|
@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm,
|
|||
unsigned long pgsize,
|
||||
unsigned long ncontig)
|
||||
{
|
||||
pte_t orig_pte = __ptep_get(ptep);
|
||||
unsigned long i;
|
||||
pte_t pte, tmp_pte;
|
||||
bool present;
|
||||
|
||||
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
|
||||
pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
/*
|
||||
* If HW_AFDBM is enabled, then the HW could turn on
|
||||
* the dirty or accessed bit for any page in the set,
|
||||
* so check them all.
|
||||
*/
|
||||
if (pte_dirty(pte))
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
|
||||
if (pte_young(pte))
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
pte = __ptep_get_and_clear(mm, addr, ptep);
|
||||
present = pte_present(pte);
|
||||
while (--ncontig) {
|
||||
ptep++;
|
||||
addr += pgsize;
|
||||
tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
|
||||
if (present) {
|
||||
if (pte_dirty(tmp_pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
if (pte_young(tmp_pte))
|
||||
pte = pte_mkyoung(pte);
|
||||
}
|
||||
}
|
||||
return orig_pte;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static pte_t get_clear_contig_flush(struct mm_struct *mm,
|
||||
|
|
@ -396,18 +388,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|||
__pte_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
int ncontig;
|
||||
size_t pgsize;
|
||||
pte_t orig_pte = __ptep_get(ptep);
|
||||
|
||||
if (!pte_cont(orig_pte))
|
||||
return __ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
|
||||
|
||||
ncontig = num_contig_ptes(sz, &pgsize);
|
||||
return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
|
||||
}
|
||||
|
||||
|
|
@ -549,6 +536,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
|
|||
|
||||
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
unsigned long psize = huge_page_size(hstate_vma(vma));
|
||||
|
||||
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
|
||||
/*
|
||||
* Break-before-make (BBM) is required for all user space mappings
|
||||
|
|
@ -558,7 +547,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
|
|||
if (pte_user_exec(__ptep_get(ptep)))
|
||||
return huge_ptep_clear_flush(vma, addr, ptep);
|
||||
}
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
|
||||
}
|
||||
|
||||
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
|
||||
|
|
|
|||
|
|
@ -279,12 +279,7 @@ void __init arm64_memblock_init(void)
|
|||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
extern u16 memstart_offset_seed;
|
||||
|
||||
/*
|
||||
* Use the sanitised version of id_aa64mmfr0_el1 so that linear
|
||||
* map randomization can be enabled by shrinking the IPA space.
|
||||
*/
|
||||
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
|
||||
int parange = cpuid_feature_extract_unsigned_field(
|
||||
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
s64 range = linear_region_size -
|
||||
|
|
|
|||
|
|
@ -36,7 +36,8 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
pte_t clear;
|
||||
pte_t pte = ptep_get(ptep);
|
||||
|
|
@ -51,8 +52,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
unsigned long sz = huge_page_size(hstate_vma(vma));
|
||||
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
|
||||
flush_tlb_page(vma, addr);
|
||||
return pte;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -468,6 +468,8 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
|
|||
Elf_Sym *sym, const char *symname))
|
||||
{
|
||||
int i;
|
||||
struct section *extab_sec = sec_lookup("__ex_table");
|
||||
int extab_index = extab_sec ? extab_sec - secs : -1;
|
||||
|
||||
/* Walk through the relocations */
|
||||
for (i = 0; i < ehdr.e_shnum; i++) {
|
||||
|
|
@ -480,6 +482,9 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
|
|||
if (sec->shdr.sh_type != SHT_REL_TYPE)
|
||||
continue;
|
||||
|
||||
if (sec->shdr.sh_info == extab_index)
|
||||
continue;
|
||||
|
||||
sec_symtab = sec->link;
|
||||
sec_applies = &secs[sec->shdr.sh_info];
|
||||
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))
|
||||
|
|
|
|||
|
|
@ -27,7 +27,8 @@ static inline int prepare_hugepage_range(struct file *file,
|
|||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
pte_t clear;
|
||||
pte_t pte = *ptep;
|
||||
|
|
@ -42,13 +43,14 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
unsigned long sz = huge_page_size(hstate_vma(vma));
|
||||
|
||||
/*
|
||||
* clear the huge pte entry firstly, so that the other smp threads will
|
||||
* not get old pte entry after finishing flush_tlb_page and before
|
||||
* setting new huge pte entry
|
||||
*/
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
|
||||
flush_tlb_page(vma, addr);
|
||||
return pte;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
pte_t *ptep, unsigned long sz);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
pte_t entry;
|
||||
|
||||
|
|
|
|||
|
|
@ -77,9 +77,17 @@
|
|||
/*
|
||||
* With 4K page size the real_pte machinery is all nops.
|
||||
*/
|
||||
#define __real_pte(e, p, o) ((real_pte_t){(e)})
|
||||
static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
|
||||
{
|
||||
return (real_pte_t){pte};
|
||||
}
|
||||
|
||||
#define __rpte_to_pte(r) ((r).pte)
|
||||
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
|
||||
|
||||
static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
|
||||
{
|
||||
return pte_val(__rpte_to_pte(rpte)) >> H_PAGE_F_GIX_SHIFT;
|
||||
}
|
||||
|
||||
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
|
||||
do { \
|
||||
|
|
|
|||
|
|
@ -45,7 +45,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
|
|||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
|
||||
}
|
||||
|
|
@ -55,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
unsigned long sz = huge_page_size(hstate_vma(vma));
|
||||
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
|
||||
flush_hugetlb_page(vma, addr);
|
||||
return pte;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ static int text_area_cpu_up(unsigned int cpu)
|
|||
unsigned long addr;
|
||||
int err;
|
||||
|
||||
area = get_vm_area(PAGE_SIZE, VM_ALLOC);
|
||||
area = get_vm_area(PAGE_SIZE, 0);
|
||||
if (!area) {
|
||||
WARN_ONCE(1, "Failed to create text area for cpu %d\n",
|
||||
cpu);
|
||||
|
|
@ -493,7 +493,9 @@ static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool rep
|
|||
|
||||
orig_mm = start_using_temp_mm(patching_mm);
|
||||
|
||||
kasan_disable_current();
|
||||
err = __patch_instructions(patch_addr, code, len, repeat_instr);
|
||||
kasan_enable_current();
|
||||
|
||||
/* context synchronisation performed by __patch_instructions */
|
||||
stop_using_temp_mm(patching_mm, orig_mm);
|
||||
|
|
|
|||
|
|
@ -231,7 +231,7 @@
|
|||
__arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \
|
||||
sc_prepend, sc_append, \
|
||||
cas_prepend, cas_append, \
|
||||
__ret, __ptr, (long), __old, __new); \
|
||||
__ret, __ptr, (long)(int)(long), __old, __new); \
|
||||
break; \
|
||||
case 8: \
|
||||
__arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
|
||||
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
|
||||
: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
|
||||
: [ov] "Jr" (oldval), [nv] "Jr" (newval)
|
||||
: [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval)
|
||||
: "memory");
|
||||
__disable_user_access();
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm,
|
|||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
|
|||
#include <asm-generic/io.h>
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define arch_memremap_wb(addr, size) \
|
||||
#define arch_memremap_wb(addr, size, flags) \
|
||||
((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -108,11 +108,11 @@ int populate_cache_leaves(unsigned int cpu)
|
|||
if (!np)
|
||||
return -ENOENT;
|
||||
|
||||
if (of_property_read_bool(np, "cache-size"))
|
||||
if (of_property_present(np, "cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
|
||||
if (of_property_read_bool(np, "i-cache-size"))
|
||||
if (of_property_present(np, "i-cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
|
||||
if (of_property_read_bool(np, "d-cache-size"))
|
||||
if (of_property_present(np, "d-cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
|
||||
|
||||
prev = np;
|
||||
|
|
@ -125,11 +125,11 @@ int populate_cache_leaves(unsigned int cpu)
|
|||
break;
|
||||
if (level <= levels)
|
||||
break;
|
||||
if (of_property_read_bool(np, "cache-size"))
|
||||
if (of_property_present(np, "cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
|
||||
if (of_property_read_bool(np, "i-cache-size"))
|
||||
if (of_property_present(np, "i-cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
|
||||
if (of_property_read_bool(np, "d-cache-size"))
|
||||
if (of_property_present(np, "d-cache-size"))
|
||||
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
|
||||
levels = level;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -479,7 +479,7 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
|
|||
if (bit < RISCV_ISA_EXT_BASE)
|
||||
*this_hwcap |= isa2hwcap[bit];
|
||||
}
|
||||
} while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
|
||||
} while (loop && !bitmap_equal(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX));
|
||||
}
|
||||
|
||||
static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap)
|
||||
|
|
|
|||
|
|
@ -322,8 +322,8 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
riscv_init_cbo_blocksizes();
|
||||
riscv_fill_hwcap();
|
||||
init_rt_signal_env();
|
||||
apply_boot_alternatives();
|
||||
init_rt_signal_env();
|
||||
|
||||
if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
|
||||
riscv_isa_extension_available(NULL, ZICBOM))
|
||||
|
|
|
|||
|
|
@ -215,12 +215,6 @@ static size_t get_rt_frame_size(bool cal_all)
|
|||
if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
|
||||
total_context_size += riscv_v_sc_size;
|
||||
}
|
||||
/*
|
||||
* Preserved a __riscv_ctx_hdr for END signal context header if an
|
||||
* extension uses __riscv_extra_ext_header
|
||||
*/
|
||||
if (total_context_size)
|
||||
total_context_size += sizeof(struct __riscv_ctx_hdr);
|
||||
|
||||
frame_size += total_context_size;
|
||||
|
||||
|
|
|
|||
|
|
@ -974,7 +974,6 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
|
|||
|
||||
if (imsic->vsfile_cpu >= 0) {
|
||||
writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
} else {
|
||||
eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
|
||||
set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/wordpart.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/kvm_vcpu_sbi.h>
|
||||
|
||||
|
|
@ -79,12 +80,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
|
|||
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
|
||||
if (!target_vcpu)
|
||||
return SBI_ERR_INVALID_PARAM;
|
||||
if (!kvm_riscv_vcpu_stopped(target_vcpu))
|
||||
return SBI_HSM_STATE_STARTED;
|
||||
else if (vcpu->stat.generic.blocking)
|
||||
if (kvm_riscv_vcpu_stopped(target_vcpu))
|
||||
return SBI_HSM_STATE_STOPPED;
|
||||
else if (target_vcpu->stat.generic.blocking)
|
||||
return SBI_HSM_STATE_SUSPENDED;
|
||||
else
|
||||
return SBI_HSM_STATE_STOPPED;
|
||||
return SBI_HSM_STATE_STARTED;
|
||||
}
|
||||
|
||||
static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
|
|
@ -109,7 +110,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||
}
|
||||
return 0;
|
||||
case SBI_EXT_HSM_HART_SUSPEND:
|
||||
switch (cp->a0) {
|
||||
switch (lower_32_bits(cp->a0)) {
|
||||
case SBI_HSM_SUSPEND_RET_DEFAULT:
|
||||
kvm_riscv_vcpu_wfi(vcpu);
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||
u64 next_cycle;
|
||||
|
||||
if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
|
||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
||||
unsigned long hmask = cp->a0;
|
||||
unsigned long hbase = cp->a1;
|
||||
unsigned long hart_bit = 0, sentmask = 0;
|
||||
|
||||
if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
|
||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||
if (hbase != -1UL) {
|
||||
if (tmp->vcpu_id < hbase)
|
||||
continue;
|
||||
if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
|
||||
hart_bit = tmp->vcpu_id - hbase;
|
||||
if (hart_bit >= __riscv_xlen)
|
||||
goto done;
|
||||
if (!(hmask & (1UL << hart_bit)))
|
||||
continue;
|
||||
}
|
||||
ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
|
||||
if (ret < 0)
|
||||
break;
|
||||
sentmask |= 1UL << hart_bit;
|
||||
kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
|
||||
}
|
||||
|
||||
done:
|
||||
if (hbase != -1UL && (hmask ^ sentmask))
|
||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/wordpart.h>
|
||||
|
||||
#include <asm/kvm_vcpu_sbi.h>
|
||||
#include <asm/sbi.h>
|
||||
|
|
@ -19,7 +20,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||
|
||||
switch (funcid) {
|
||||
case SBI_EXT_SUSP_SYSTEM_SUSPEND:
|
||||
if (cp->a0 != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
|
||||
if (lower_32_bits(cp->a0) != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) {
|
||||
retdata->err_val = SBI_ERR_INVALID_PARAM;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -293,7 +293,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep)
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
pte_t orig_pte = ptep_get(ptep);
|
||||
int pte_num;
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ static int cmma_test_essa(void)
|
|||
: [reg1] "=&d" (reg1),
|
||||
[reg2] "=&a" (reg2),
|
||||
[rc] "+&d" (rc),
|
||||
[tmp] "=&d" (tmp),
|
||||
[tmp] "+&d" (tmp),
|
||||
"+Q" (get_lowcore()->program_new_psw),
|
||||
"=Q" (old)
|
||||
: [psw_old] "a" (&old),
|
||||
|
|
|
|||
|
|
@ -469,6 +469,7 @@ CONFIG_SCSI_DH_ALUA=m
|
|||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=y
|
||||
# CONFIG_MD_BITMAP_FILE is not set
|
||||
CONFIG_MD_LINEAR=m
|
||||
CONFIG_MD_CLUSTER=m
|
||||
CONFIG_BCACHE=m
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
|
|
@ -874,6 +875,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=300
|
|||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_BOOTTIME_TRACING=y
|
||||
CONFIG_FUNCTION_GRAPH_RETVAL=y
|
||||
CONFIG_FUNCTION_GRAPH_RETADDR=y
|
||||
CONFIG_FPROBE=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
|
|
|
|||
|
|
@ -459,6 +459,7 @@ CONFIG_SCSI_DH_ALUA=m
|
|||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=y
|
||||
# CONFIG_MD_BITMAP_FILE is not set
|
||||
CONFIG_MD_LINEAR=m
|
||||
CONFIG_MD_CLUSTER=m
|
||||
CONFIG_BCACHE=m
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
|
|
@ -825,6 +826,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
|||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_BOOTTIME_TRACING=y
|
||||
CONFIG_FUNCTION_GRAPH_RETVAL=y
|
||||
CONFIG_FUNCTION_GRAPH_RETADDR=y
|
||||
CONFIG_FPROBE=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
|
|
|
|||
|
|
@ -25,8 +25,16 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
#define __HAVE_ARCH_HUGE_PTEP_GET
|
||||
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||
|
||||
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
return __huge_ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
static inline void arch_clear_hugetlb_flags(struct folio *folio)
|
||||
{
|
||||
|
|
@ -48,7 +56,7 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|||
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *ptep)
|
||||
{
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
|
||||
return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
|
||||
|
|
@ -59,7 +67,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
|
||||
|
||||
if (changed) {
|
||||
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
__huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
|
||||
}
|
||||
return changed;
|
||||
|
|
@ -69,7 +77,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
|
||||
pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -188,8 +188,8 @@ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|||
return __rste_to_pte(pte_val(*ptep));
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = huge_ptep_get(mm, addr, ptep);
|
||||
pmd_t *pmdp = (pmd_t *) ptep;
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
|
|||
$(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
|
||||
$(call if_changed_rule,cc_o_c)
|
||||
|
||||
CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY
|
||||
CFLAGS_sha256.o := -D__NO_FORTIFY
|
||||
|
||||
$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
|
||||
$(call if_changed_rule,as_o_S)
|
||||
|
|
@ -19,9 +19,11 @@ KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
|
|||
KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common
|
||||
KBUILD_CFLAGS += -fno-stack-protector
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
|
||||
KBUILD_CFLAGS += $(CLANG_FLAGS)
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
|
||||
KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
|
||||
KBUILD_AFLAGS += -D__DISABLE_EXPORTS
|
||||
|
||||
# Since we link purgatory with -r unresolved symbols are not checked, so we
|
||||
# also link a purgatory.chk binary without -r to check for unresolved symbols.
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
pte_t *ptep, unsigned long sz);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
|
|
|||
|
|
@ -260,7 +260,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
unsigned int i, nptes, orig_shift, shift;
|
||||
unsigned long size;
|
||||
|
|
|
|||
|
|
@ -440,25 +440,24 @@ void __init arch_cpu_finalize_init(void)
|
|||
os_check_bugs();
|
||||
}
|
||||
|
||||
void apply_seal_endbr(s32 *start, s32 *end, struct module *mod)
|
||||
void apply_seal_endbr(s32 *start, s32 *end)
|
||||
{
|
||||
}
|
||||
|
||||
void apply_retpolines(s32 *start, s32 *end, struct module *mod)
|
||||
void apply_retpolines(s32 *start, s32 *end)
|
||||
{
|
||||
}
|
||||
|
||||
void apply_returns(s32 *start, s32 *end, struct module *mod)
|
||||
void apply_returns(s32 *start, s32 *end)
|
||||
{
|
||||
}
|
||||
|
||||
void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
|
||||
s32 *start_cfi, s32 *end_cfi, struct module *mod)
|
||||
s32 *start_cfi, s32 *end_cfi)
|
||||
{
|
||||
}
|
||||
|
||||
void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
|
||||
struct module *mod)
|
||||
void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
|||
161
arch/x86/Kconfig
161
arch/x86/Kconfig
|
|
@ -85,6 +85,7 @@ config X86
|
|||
select ARCH_HAS_DMA_OPS if GART_IOMMU || XEN
|
||||
select ARCH_HAS_EARLY_DEBUG if KGDB
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_EXECMEM_ROX if X86_64
|
||||
select ARCH_HAS_FAST_MULTIPLIER
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
|
|
@ -132,7 +133,7 @@ config X86
|
|||
select ARCH_SUPPORTS_AUTOFDO_CLANG
|
||||
select ARCH_SUPPORTS_PROPELLER_CLANG if X86_64
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
select ARCH_USE_CMPXCHG_LOCKREF if X86_CMPXCHG64
|
||||
select ARCH_USE_CMPXCHG_LOCKREF if X86_CX8
|
||||
select ARCH_USE_MEMTEST
|
||||
select ARCH_USE_QUEUED_RWLOCKS
|
||||
select ARCH_USE_QUEUED_SPINLOCKS
|
||||
|
|
@ -232,7 +233,7 @@ config X86
|
|||
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64
|
||||
select HAVE_EBPF_JIT
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select HAVE_EISA
|
||||
select HAVE_EISA if X86_32
|
||||
select HAVE_EXIT_THREAD
|
||||
select HAVE_GUP_FAST
|
||||
select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE
|
||||
|
|
@ -277,7 +278,7 @@ config X86
|
|||
select HAVE_PCI
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT
|
||||
select MMU_GATHER_RCU_TABLE_FREE
|
||||
select MMU_GATHER_MERGE_VMAS
|
||||
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
|
|
@ -521,12 +522,6 @@ config X86_FRED
|
|||
ring transitions and exception/interrupt handling if the
|
||||
system supports it.
|
||||
|
||||
config X86_BIGSMP
|
||||
bool "Support for big SMP systems with more than 8 CPUs"
|
||||
depends on SMP && X86_32
|
||||
help
|
||||
This option is needed for the systems that have more than 8 CPUs.
|
||||
|
||||
config X86_EXTENDED_PLATFORM
|
||||
bool "Support for extended (non-PC) x86 platforms"
|
||||
default y
|
||||
|
|
@ -544,13 +539,12 @@ config X86_EXTENDED_PLATFORM
|
|||
AMD Elan
|
||||
RDC R-321x SoC
|
||||
SGI 320/540 (Visual Workstation)
|
||||
STA2X11-based (e.g. Northville)
|
||||
Moorestown MID devices
|
||||
|
||||
64-bit platforms (CONFIG_64BIT=y):
|
||||
Numascale NumaChip
|
||||
ScaleMP vSMP
|
||||
SGI Ultraviolet
|
||||
Merrifield/Moorefield MID devices
|
||||
|
||||
If you have one of these systems, or if you want to build a
|
||||
generic distribution kernel, say Y here - otherwise say N.
|
||||
|
|
@ -595,8 +589,31 @@ config X86_UV
|
|||
This option is needed in order to support SGI Ultraviolet systems.
|
||||
If you don't have one of these, you should say N here.
|
||||
|
||||
# Following is an alphabetically sorted list of 32 bit extended platforms
|
||||
# Please maintain the alphabetic order if and when there are additions
|
||||
config X86_INTEL_MID
|
||||
bool "Intel Z34xx/Z35xx MID platform support"
|
||||
depends on X86_EXTENDED_PLATFORM
|
||||
depends on X86_PLATFORM_DEVICES
|
||||
depends on PCI
|
||||
depends on X86_64 || (EXPERT && PCI_GOANY)
|
||||
depends on X86_IO_APIC
|
||||
select I2C
|
||||
select DW_APB_TIMER
|
||||
select INTEL_SCU_PCI
|
||||
help
|
||||
Select to build a kernel capable of supporting 64-bit Intel MID
|
||||
(Mobile Internet Device) platform systems which do not have
|
||||
the PCI legacy interfaces.
|
||||
|
||||
The only supported devices are the 22nm Merrified (Z34xx)
|
||||
and Moorefield (Z35xx) SoC used in the Intel Edison board and
|
||||
a small number of Android devices such as the Asus Zenfone 2,
|
||||
Asus FonePad 8 and Dell Venue 7.
|
||||
|
||||
If you are building for a PC class system or non-MID tablet
|
||||
SoCs like Bay Trail (Z36xx/Z37xx), say N here.
|
||||
|
||||
Intel MID platforms are based on an Intel processor and chipset which
|
||||
consume less power than most of the x86 derivatives.
|
||||
|
||||
config X86_GOLDFISH
|
||||
bool "Goldfish (Virtual Platform)"
|
||||
|
|
@ -606,6 +623,9 @@ config X86_GOLDFISH
|
|||
for Android development. Unless you are building for the Android
|
||||
Goldfish emulator say N here.
|
||||
|
||||
# Following is an alphabetically sorted list of 32 bit extended platforms
|
||||
# Please maintain the alphabetic order if and when there are additions
|
||||
|
||||
config X86_INTEL_CE
|
||||
bool "CE4100 TV platform"
|
||||
depends on PCI
|
||||
|
|
@ -621,24 +641,6 @@ config X86_INTEL_CE
|
|||
This option compiles in support for the CE4100 SOC for settop
|
||||
boxes and media devices.
|
||||
|
||||
config X86_INTEL_MID
|
||||
bool "Intel MID platform support"
|
||||
depends on X86_EXTENDED_PLATFORM
|
||||
depends on X86_PLATFORM_DEVICES
|
||||
depends on PCI
|
||||
depends on X86_64 || (PCI_GOANY && X86_32)
|
||||
depends on X86_IO_APIC
|
||||
select I2C
|
||||
select DW_APB_TIMER
|
||||
select INTEL_SCU_PCI
|
||||
help
|
||||
Select to build a kernel capable of supporting Intel MID (Mobile
|
||||
Internet Device) platform systems which do not have the PCI legacy
|
||||
interfaces. If you are building for a PC class system say N here.
|
||||
|
||||
Intel MID platforms are based on an Intel processor and chipset which
|
||||
consume less power than most of the x86 derivatives.
|
||||
|
||||
config X86_INTEL_QUARK
|
||||
bool "Intel Quark platform support"
|
||||
depends on X86_32
|
||||
|
|
@ -720,18 +722,6 @@ config X86_RDC321X
|
|||
as R-8610-(G).
|
||||
If you don't have one of these chips, you should say N here.
|
||||
|
||||
config X86_32_NON_STANDARD
|
||||
bool "Support non-standard 32-bit SMP architectures"
|
||||
depends on X86_32 && SMP
|
||||
depends on X86_EXTENDED_PLATFORM
|
||||
help
|
||||
This option compiles in the bigsmp and STA2X11 default
|
||||
subarchitectures. It is intended for a generic binary
|
||||
kernel. If you select them all, kernel will probe it one by
|
||||
one and will fallback to default.
|
||||
|
||||
# Alphabetically sorted list of Non standard 32 bit platforms
|
||||
|
||||
config X86_SUPPORTS_MEMORY_FAILURE
|
||||
def_bool y
|
||||
# MCE code calls memory_failure():
|
||||
|
|
@ -741,19 +731,6 @@ config X86_SUPPORTS_MEMORY_FAILURE
|
|||
depends on X86_64 || !SPARSEMEM
|
||||
select ARCH_SUPPORTS_MEMORY_FAILURE
|
||||
|
||||
config STA2X11
|
||||
bool "STA2X11 Companion Chip Support"
|
||||
depends on X86_32_NON_STANDARD && PCI
|
||||
select SWIOTLB
|
||||
select MFD_STA2X11
|
||||
select GPIOLIB
|
||||
help
|
||||
This adds support for boards based on the STA2X11 IO-Hub,
|
||||
a.k.a. "ConneXt". The chip is used in place of the standard
|
||||
PC chipset, so all "standard" peripherals are missing. If this
|
||||
option is selected the kernel will still be able to boot on
|
||||
standard PC machines.
|
||||
|
||||
config X86_32_IRIS
|
||||
tristate "Eurobraille/Iris poweroff module"
|
||||
depends on X86_32
|
||||
|
|
@ -1003,8 +980,7 @@ config NR_CPUS_RANGE_BEGIN
|
|||
config NR_CPUS_RANGE_END
|
||||
int
|
||||
depends on X86_32
|
||||
default 64 if SMP && X86_BIGSMP
|
||||
default 8 if SMP && !X86_BIGSMP
|
||||
default 8 if SMP
|
||||
default 1 if !SMP
|
||||
|
||||
config NR_CPUS_RANGE_END
|
||||
|
|
@ -1017,7 +993,6 @@ config NR_CPUS_RANGE_END
|
|||
config NR_CPUS_DEFAULT
|
||||
int
|
||||
depends on X86_32
|
||||
default 32 if X86_BIGSMP
|
||||
default 8 if SMP
|
||||
default 1 if !SMP
|
||||
|
||||
|
|
@ -1093,7 +1068,7 @@ config UP_LATE_INIT
|
|||
config X86_UP_APIC
|
||||
bool "Local APIC support on uniprocessors" if !PCI_MSI
|
||||
default PCI_MSI
|
||||
depends on X86_32 && !SMP && !X86_32_NON_STANDARD
|
||||
depends on X86_32 && !SMP
|
||||
help
|
||||
A local APIC (Advanced Programmable Interrupt Controller) is an
|
||||
integrated interrupt controller in the CPU. If you have a single-CPU
|
||||
|
|
@ -1118,7 +1093,7 @@ config X86_UP_IOAPIC
|
|||
|
||||
config X86_LOCAL_APIC
|
||||
def_bool y
|
||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
|
||||
depends on X86_64 || SMP || X86_UP_APIC || PCI_MSI
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
|
||||
config ACPI_MADT_WAKEUP
|
||||
|
|
@ -1386,15 +1361,11 @@ config X86_CPUID
|
|||
with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
|
||||
/dev/cpu/31/cpuid.
|
||||
|
||||
choice
|
||||
prompt "High Memory Support"
|
||||
default HIGHMEM4G
|
||||
config HIGHMEM4G
|
||||
bool "High Memory Support"
|
||||
depends on X86_32
|
||||
|
||||
config NOHIGHMEM
|
||||
bool "off"
|
||||
help
|
||||
Linux can use up to 64 Gigabytes of physical memory on x86 systems.
|
||||
Linux can use up to 4 Gigabytes of physical memory on x86 systems.
|
||||
However, the address space of 32-bit x86 processors is only 4
|
||||
Gigabytes large. That means that, if you have a large amount of
|
||||
physical memory, not all of it can be "permanently mapped" by the
|
||||
|
|
@ -1410,38 +1381,9 @@ config NOHIGHMEM
|
|||
possible.
|
||||
|
||||
If the machine has between 1 and 4 Gigabytes physical RAM, then
|
||||
answer "4GB" here.
|
||||
answer "Y" here.
|
||||
|
||||
If more than 4 Gigabytes is used then answer "64GB" here. This
|
||||
selection turns Intel PAE (Physical Address Extension) mode on.
|
||||
PAE implements 3-level paging on IA32 processors. PAE is fully
|
||||
supported by Linux, PAE mode is implemented on all recent Intel
|
||||
processors (Pentium Pro and better). NOTE: If you say "64GB" here,
|
||||
then the kernel will not boot on CPUs that don't support PAE!
|
||||
|
||||
The actual amount of total physical memory will either be
|
||||
auto detected or can be forced by using a kernel command line option
|
||||
such as "mem=256M". (Try "man bootparam" or see the documentation of
|
||||
your boot loader (lilo or loadlin) about how to pass options to the
|
||||
kernel at boot time.)
|
||||
|
||||
If unsure, say "off".
|
||||
|
||||
config HIGHMEM4G
|
||||
bool "4GB"
|
||||
help
|
||||
Select this if you have a 32-bit processor and between 1 and 4
|
||||
gigabytes of physical RAM.
|
||||
|
||||
config HIGHMEM64G
|
||||
bool "64GB"
|
||||
depends on X86_HAVE_PAE
|
||||
select X86_PAE
|
||||
help
|
||||
Select this if you have a 32-bit processor and more than 4
|
||||
gigabytes of physical RAM.
|
||||
|
||||
endchoice
|
||||
If unsure, say N.
|
||||
|
||||
choice
|
||||
prompt "Memory split" if EXPERT
|
||||
|
|
@ -1487,14 +1429,12 @@ config PAGE_OFFSET
|
|||
depends on X86_32
|
||||
|
||||
config HIGHMEM
|
||||
def_bool y
|
||||
depends on X86_32 && (HIGHMEM64G || HIGHMEM4G)
|
||||
def_bool HIGHMEM4G
|
||||
|
||||
config X86_PAE
|
||||
bool "PAE (Physical Address Extension) Support"
|
||||
depends on X86_32 && X86_HAVE_PAE
|
||||
select PHYS_ADDR_T_64BIT
|
||||
select SWIOTLB
|
||||
help
|
||||
PAE is required for NX support, and furthermore enables
|
||||
larger swapspace support for non-overcommit purposes. It
|
||||
|
|
@ -1564,8 +1504,7 @@ config AMD_MEM_ENCRYPT
|
|||
config NUMA
|
||||
bool "NUMA Memory Allocation and Scheduler Support"
|
||||
depends on SMP
|
||||
depends on X86_64 || (X86_32 && HIGHMEM64G && X86_BIGSMP)
|
||||
default y if X86_BIGSMP
|
||||
depends on X86_64
|
||||
select USE_PERCPU_NUMA_NODE_ID
|
||||
select OF_NUMA if OF
|
||||
help
|
||||
|
|
@ -1578,9 +1517,6 @@ config NUMA
|
|||
For 64-bit this is recommended if the system is Intel Core i7
|
||||
(or later), AMD Opteron, or EM64T NUMA.
|
||||
|
||||
For 32-bit this is only needed if you boot a 32-bit
|
||||
kernel on a 64-bit NUMA platform.
|
||||
|
||||
Otherwise, you should say N.
|
||||
|
||||
config AMD_NUMA
|
||||
|
|
@ -1619,7 +1555,7 @@ config ARCH_FLATMEM_ENABLE
|
|||
|
||||
config ARCH_SPARSEMEM_ENABLE
|
||||
def_bool y
|
||||
depends on X86_64 || NUMA || X86_32 || X86_32_NON_STANDARD
|
||||
depends on X86_64 || NUMA || X86_32
|
||||
select SPARSEMEM_STATIC if X86_32
|
||||
select SPARSEMEM_VMEMMAP_ENABLE if X86_64
|
||||
|
||||
|
|
@ -1665,15 +1601,6 @@ config X86_PMEM_LEGACY
|
|||
|
||||
Say Y if unsure.
|
||||
|
||||
config HIGHPTE
|
||||
bool "Allocate 3rd-level pagetables from highmem"
|
||||
depends on HIGHMEM
|
||||
help
|
||||
The VM uses one page table entry for each page of physical memory.
|
||||
For systems with a lot of RAM, this can be wasteful of precious
|
||||
low memory. Setting this option will put user-space page table
|
||||
entries in high memory.
|
||||
|
||||
config X86_CHECK_BIOS_CORRUPTION
|
||||
bool "Check for low memory corruption"
|
||||
help
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
# Put here option for CPU selection and depending optimization
|
||||
choice
|
||||
prompt "Processor family"
|
||||
default M686 if X86_32
|
||||
default GENERIC_CPU if X86_64
|
||||
prompt "x86-32 Processor family"
|
||||
depends on X86_32
|
||||
default M686
|
||||
help
|
||||
This is the processor type of your CPU. This information is
|
||||
used for optimizing purposes. In order to compile a kernel
|
||||
|
|
@ -31,7 +31,6 @@ choice
|
|||
- "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
|
||||
- "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
|
||||
- "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
|
||||
- "Opteron/Athlon64/Hammer/K8" for all K8 and newer AMD CPUs.
|
||||
- "Crusoe" for the Transmeta Crusoe series.
|
||||
- "Efficeon" for the Transmeta Efficeon series.
|
||||
- "Winchip-C6" for original IDT Winchip.
|
||||
|
|
@ -42,13 +41,10 @@ choice
|
|||
- "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
|
||||
- "VIA C3-2" for VIA C3-2 "Nehemiah" (model 9 and above).
|
||||
- "VIA C7" for VIA C7.
|
||||
- "Intel P4" for the Pentium 4/Netburst microarchitecture.
|
||||
- "Core 2/newer Xeon" for all core2 and newer Intel CPUs.
|
||||
- "Intel Atom" for the Atom-microarchitecture CPUs.
|
||||
- "Generic-x86-64" for a kernel which runs on any x86-64 CPU.
|
||||
|
||||
See each option's help text for additional details. If you don't know
|
||||
what to do, choose "486".
|
||||
what to do, choose "Pentium-Pro".
|
||||
|
||||
config M486SX
|
||||
bool "486SX"
|
||||
|
|
@ -114,11 +110,11 @@ config MPENTIUMIII
|
|||
extensions.
|
||||
|
||||
config MPENTIUMM
|
||||
bool "Pentium M"
|
||||
bool "Pentium M/Pentium Dual Core/Core Solo/Core Duo"
|
||||
depends on X86_32
|
||||
help
|
||||
Select this for Intel Pentium M (not Pentium-4 M)
|
||||
notebook chips.
|
||||
"Merom" Core Solo/Duo notebook chips
|
||||
|
||||
config MPENTIUM4
|
||||
bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
|
||||
|
|
@ -139,22 +135,10 @@ config MPENTIUM4
|
|||
-Mobile Pentium 4
|
||||
-Mobile Pentium 4 M
|
||||
-Extreme Edition (Gallatin)
|
||||
-Prescott
|
||||
-Prescott 2M
|
||||
-Cedar Mill
|
||||
-Presler
|
||||
-Smithfiled
|
||||
Xeons (Intel Xeon, Xeon MP, Xeon LV, Xeon MV) corename:
|
||||
-Foster
|
||||
-Prestonia
|
||||
-Gallatin
|
||||
-Nocona
|
||||
-Irwindale
|
||||
-Cranford
|
||||
-Potomac
|
||||
-Paxville
|
||||
-Dempsey
|
||||
|
||||
|
||||
config MK6
|
||||
bool "K6/K6-II/K6-III"
|
||||
|
|
@ -172,13 +156,6 @@ config MK7
|
|||
some extended instructions, and passes appropriate optimization
|
||||
flags to GCC.
|
||||
|
||||
config MK8
|
||||
bool "Opteron/Athlon64/Hammer/K8"
|
||||
help
|
||||
Select this for an AMD Opteron or Athlon64 Hammer-family processor.
|
||||
Enables use of some extended instructions, and passes appropriate
|
||||
optimization flags to GCC.
|
||||
|
||||
config MCRUSOE
|
||||
bool "Crusoe"
|
||||
depends on X86_32
|
||||
|
|
@ -258,42 +235,14 @@ config MVIAC7
|
|||
Select this for a VIA C7. Selecting this uses the correct cache
|
||||
shift and tells gcc to treat the CPU as a 686.
|
||||
|
||||
config MPSC
|
||||
bool "Intel P4 / older Netburst based Xeon"
|
||||
depends on X86_64
|
||||
help
|
||||
Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
|
||||
Xeon CPUs with Intel 64bit which is compatible with x86-64.
|
||||
Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
|
||||
Netburst core and shouldn't use this option. You can distinguish them
|
||||
using the cpu family field
|
||||
in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
|
||||
|
||||
config MCORE2
|
||||
bool "Core 2/newer Xeon"
|
||||
help
|
||||
|
||||
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
|
||||
53xx) CPUs. You can distinguish newer from older Xeons by the CPU
|
||||
family in /proc/cpuinfo. Newer ones have 6 and older ones 15
|
||||
(not a typo)
|
||||
|
||||
config MATOM
|
||||
bool "Intel Atom"
|
||||
help
|
||||
|
||||
Select this for the Intel Atom platform. Intel Atom CPUs have an
|
||||
in-order pipelining architecture and thus can benefit from
|
||||
accordingly optimized code. Use a recent GCC with specific Atom
|
||||
support in order to fully benefit from selecting this option.
|
||||
|
||||
config GENERIC_CPU
|
||||
bool "Generic-x86-64"
|
||||
depends on X86_64
|
||||
help
|
||||
Generic x86-64 CPU.
|
||||
Run equally well on all x86-64 CPUs.
|
||||
|
||||
endchoice
|
||||
|
||||
config X86_GENERIC
|
||||
|
|
@ -317,8 +266,8 @@ config X86_INTERNODE_CACHE_SHIFT
|
|||
|
||||
config X86_L1_CACHE_SHIFT
|
||||
int
|
||||
default "7" if MPENTIUM4 || MPSC
|
||||
default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
|
||||
default "7" if MPENTIUM4
|
||||
default "6" if MK7 || MPENTIUMM || MATOM || MVIAC7 || X86_GENERIC || X86_64
|
||||
default "4" if MELAN || M486SX || M486 || MGEODEGX1
|
||||
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
|
||||
|
||||
|
|
@ -336,51 +285,35 @@ config X86_ALIGNMENT_16
|
|||
|
||||
config X86_INTEL_USERCOPY
|
||||
def_bool y
|
||||
depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
|
||||
depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK7 || MEFFICEON
|
||||
|
||||
config X86_USE_PPRO_CHECKSUM
|
||||
def_bool y
|
||||
depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
|
||||
|
||||
#
|
||||
# P6_NOPs are a relatively minor optimization that require a family >=
|
||||
# 6 processor, except that it is broken on certain VIA chips.
|
||||
# Furthermore, AMD chips prefer a totally different sequence of NOPs
|
||||
# (which work on all CPUs). In addition, it looks like Virtual PC
|
||||
# does not understand them.
|
||||
#
|
||||
# As a result, disallow these if we're not compiling for X86_64 (these
|
||||
# NOPs do work on all x86-64 capable chips); the list of processors in
|
||||
# the right-hand clause are the cores that benefit from this optimization.
|
||||
#
|
||||
config X86_P6_NOP
|
||||
def_bool y
|
||||
depends on X86_64
|
||||
depends on (MCORE2 || MPENTIUM4 || MPSC)
|
||||
depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MATOM
|
||||
|
||||
config X86_TSC
|
||||
def_bool y
|
||||
depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
|
||||
depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MATOM) || X86_64
|
||||
|
||||
config X86_HAVE_PAE
|
||||
def_bool y
|
||||
depends on MCRUSOE || MEFFICEON || MCYRIXIII || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC7 || MCORE2 || MATOM || X86_64
|
||||
depends on MCRUSOE || MEFFICEON || MCYRIXIII || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC7 || MATOM || X86_64
|
||||
|
||||
config X86_CMPXCHG64
|
||||
config X86_CX8
|
||||
def_bool y
|
||||
depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7
|
||||
depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7 || MGEODEGX1 || MGEODE_LX
|
||||
|
||||
# this should be set for all -march=.. options where the compiler
|
||||
# generates cmov.
|
||||
config X86_CMOV
|
||||
def_bool y
|
||||
depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
|
||||
depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || MATOM || MGEODE_LX || X86_64)
|
||||
|
||||
config X86_MINIMUM_CPU_FAMILY
|
||||
int
|
||||
default "64" if X86_64
|
||||
default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8)
|
||||
default "5" if X86_32 && X86_CMPXCHG64
|
||||
default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MK7)
|
||||
default "5" if X86_32 && X86_CX8
|
||||
default "4"
|
||||
|
||||
config X86_DEBUGCTLMSR
|
||||
|
|
|
|||
|
|
@ -171,20 +171,8 @@ else
|
|||
# Use -mskip-rax-setup if supported.
|
||||
KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
|
||||
|
||||
# FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
|
||||
cflags-$(CONFIG_MK8) += -march=k8
|
||||
cflags-$(CONFIG_MPSC) += -march=nocona
|
||||
cflags-$(CONFIG_MCORE2) += -march=core2
|
||||
cflags-$(CONFIG_MATOM) += -march=atom
|
||||
cflags-$(CONFIG_GENERIC_CPU) += -mtune=generic
|
||||
KBUILD_CFLAGS += $(cflags-y)
|
||||
|
||||
rustflags-$(CONFIG_MK8) += -Ctarget-cpu=k8
|
||||
rustflags-$(CONFIG_MPSC) += -Ctarget-cpu=nocona
|
||||
rustflags-$(CONFIG_MCORE2) += -Ctarget-cpu=core2
|
||||
rustflags-$(CONFIG_MATOM) += -Ctarget-cpu=atom
|
||||
rustflags-$(CONFIG_GENERIC_CPU) += -Ztune-cpu=generic
|
||||
KBUILD_RUSTFLAGS += $(rustflags-y)
|
||||
KBUILD_CFLAGS += -march=x86-64 -mtune=generic
|
||||
KBUILD_RUSTFLAGS += -Ctarget-cpu=x86-64 -Ztune-cpu=generic
|
||||
|
||||
KBUILD_CFLAGS += -mno-red-zone
|
||||
KBUILD_CFLAGS += -mcmodel=kernel
|
||||
|
|
|
|||
|
|
@ -24,7 +24,6 @@ cflags-$(CONFIG_MK6) += -march=k6
|
|||
# Please note, that patches that add -march=athlon-xp and friends are pointless.
|
||||
# They make zero difference whatsosever to performance at this time.
|
||||
cflags-$(CONFIG_MK7) += -march=athlon
|
||||
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
|
||||
cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)
|
||||
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)
|
||||
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
|
||||
|
|
@ -32,9 +31,7 @@ cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
|
|||
cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)
|
||||
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
|
||||
cflags-$(CONFIG_MVIAC7) += -march=i686
|
||||
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
|
||||
cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
|
||||
$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
|
||||
cflags-$(CONFIG_MATOM) += -march=atom
|
||||
|
||||
# AMD Elan support
|
||||
cflags-$(CONFIG_MELAN) += -march=i486
|
||||
|
|
|
|||
|
|
@ -1,6 +1,4 @@
|
|||
# global x86 required specific stuff
|
||||
# On 32-bit HIGHMEM4G is not allowed
|
||||
CONFIG_HIGHMEM64G=y
|
||||
CONFIG_64BIT=y
|
||||
|
||||
# These enable us to allow some of the
|
||||
|
|
|
|||
|
|
@ -190,6 +190,7 @@ static __always_inline bool int80_is_external(void)
|
|||
|
||||
/**
|
||||
* do_int80_emulation - 32-bit legacy syscall C entry from asm
|
||||
* @regs: syscall arguments in struct pt_args on the stack.
|
||||
*
|
||||
* This entry point can be used by 32-bit and 64-bit programs to perform
|
||||
* 32-bit system calls. Instances of INT $0x80 can be found inline in
|
||||
|
|
|
|||
|
|
@ -48,8 +48,7 @@ int __init init_vdso_image(const struct vdso_image *image)
|
|||
|
||||
apply_alternatives((struct alt_instr *)(image->data + image->alt),
|
||||
(struct alt_instr *)(image->data + image->alt +
|
||||
image->alt_len),
|
||||
NULL);
|
||||
image->alt_len));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event)
|
|||
if (event->attr.type == event->pmu->type)
|
||||
event->hw.config |= x86_pmu_get_event_config(event);
|
||||
|
||||
if (event->attr.sample_period && x86_pmu.limit_period) {
|
||||
if (!event->attr.freq && x86_pmu.limit_period) {
|
||||
s64 left = event->attr.sample_period;
|
||||
x86_pmu.limit_period(event, &left);
|
||||
if (left > event->attr.sample_period)
|
||||
|
|
|
|||
|
|
@ -397,34 +397,28 @@ static struct event_constraint intel_lnc_event_constraints[] = {
|
|||
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
|
||||
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
|
||||
|
||||
INTEL_EVENT_CONSTRAINT(0x20, 0xf),
|
||||
|
||||
INTEL_UEVENT_CONSTRAINT(0x012a, 0xf),
|
||||
INTEL_UEVENT_CONSTRAINT(0x012b, 0xf),
|
||||
INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
|
||||
INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
|
||||
|
||||
INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
|
||||
INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
|
||||
/*
|
||||
* Generally event codes < 0x90 are restricted to counters 0-3.
|
||||
* The 0x2E and 0x3C are exception, which has no restriction.
|
||||
*/
|
||||
INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
|
||||
|
||||
INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
|
||||
INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
|
||||
INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
|
||||
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
|
||||
INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
|
||||
INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
|
||||
INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
|
||||
INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
|
||||
INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc),
|
||||
INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
|
||||
INTEL_EVENT_CONSTRAINT(0xce, 0x1),
|
||||
|
||||
INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
|
||||
/*
|
||||
* Generally event codes >= 0x90 are likely to have no restrictions.
|
||||
* The exception are defined as above.
|
||||
*/
|
||||
INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0x3ff),
|
||||
|
||||
INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf),
|
||||
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
|
@ -3958,6 +3952,85 @@ static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
|
|||
return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
|
||||
}
|
||||
|
||||
static u64 intel_pmu_freq_start_period(struct perf_event *event)
|
||||
{
|
||||
int type = event->attr.type;
|
||||
u64 config, factor;
|
||||
s64 start;
|
||||
|
||||
/*
|
||||
* The 127 is the lowest possible recommended SAV (sample after value)
|
||||
* for a 4000 freq (default freq), according to the event list JSON file.
|
||||
* Also, assume the workload is idle 50% time.
|
||||
*/
|
||||
factor = 64 * 4000;
|
||||
if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE)
|
||||
goto end;
|
||||
|
||||
/*
|
||||
* The estimation of the start period in the freq mode is
|
||||
* based on the below assumption.
|
||||
*
|
||||
* For a cycles or an instructions event, 1GHZ of the
|
||||
* underlying platform, 1 IPC. The workload is idle 50% time.
|
||||
* The start period = 1,000,000,000 * 1 / freq / 2.
|
||||
* = 500,000,000 / freq
|
||||
*
|
||||
* Usually, the branch-related events occur less than the
|
||||
* instructions event. According to the Intel event list JSON
|
||||
* file, the SAV (sample after value) of a branch-related event
|
||||
* is usually 1/4 of an instruction event.
|
||||
* The start period of branch-related events = 125,000,000 / freq.
|
||||
*
|
||||
* The cache-related events occurs even less. The SAV is usually
|
||||
* 1/20 of an instruction event.
|
||||
* The start period of cache-related events = 25,000,000 / freq.
|
||||
*/
|
||||
config = event->attr.config & PERF_HW_EVENT_MASK;
|
||||
if (type == PERF_TYPE_HARDWARE) {
|
||||
switch (config) {
|
||||
case PERF_COUNT_HW_CPU_CYCLES:
|
||||
case PERF_COUNT_HW_INSTRUCTIONS:
|
||||
case PERF_COUNT_HW_BUS_CYCLES:
|
||||
case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND:
|
||||
case PERF_COUNT_HW_STALLED_CYCLES_BACKEND:
|
||||
case PERF_COUNT_HW_REF_CPU_CYCLES:
|
||||
factor = 500000000;
|
||||
break;
|
||||
case PERF_COUNT_HW_BRANCH_INSTRUCTIONS:
|
||||
case PERF_COUNT_HW_BRANCH_MISSES:
|
||||
factor = 125000000;
|
||||
break;
|
||||
case PERF_COUNT_HW_CACHE_REFERENCES:
|
||||
case PERF_COUNT_HW_CACHE_MISSES:
|
||||
factor = 25000000;
|
||||
break;
|
||||
default:
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
if (type == PERF_TYPE_HW_CACHE)
|
||||
factor = 25000000;
|
||||
end:
|
||||
/*
|
||||
* Usually, a prime or a number with less factors (close to prime)
|
||||
* is chosen as an SAV, which makes it less likely that the sampling
|
||||
* period synchronizes with some periodic event in the workload.
|
||||
* Minus 1 to make it at least avoiding values near power of twos
|
||||
* for the default freq.
|
||||
*/
|
||||
start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1;
|
||||
|
||||
if (start > x86_pmu.max_period)
|
||||
start = x86_pmu.max_period;
|
||||
|
||||
if (x86_pmu.limit_period)
|
||||
x86_pmu.limit_period(event, &start);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
static int intel_pmu_hw_config(struct perf_event *event)
|
||||
{
|
||||
int ret = x86_pmu_hw_config(event);
|
||||
|
|
@ -3969,6 +4042,12 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (event->attr.freq && event->attr.sample_freq) {
|
||||
event->hw.sample_period = intel_pmu_freq_start_period(event);
|
||||
event->hw.last_period = event->hw.sample_period;
|
||||
local64_set(&event->hw.period_left, event->hw.sample_period);
|
||||
}
|
||||
|
||||
if (event->attr.precise_ip) {
|
||||
if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
|
||||
return -EINVAL;
|
||||
|
|
@ -4606,9 +4685,9 @@ static int adl_hw_config(struct perf_event *event)
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void)
|
||||
static enum intel_cpu_type adl_get_hybrid_cpu_type(void)
|
||||
{
|
||||
return HYBRID_INTEL_CORE;
|
||||
return INTEL_CPU_TYPE_CORE;
|
||||
}
|
||||
|
||||
static inline bool erratum_hsw11(struct perf_event *event)
|
||||
|
|
@ -4953,7 +5032,8 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
|
|||
|
||||
static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
|
||||
{
|
||||
u8 cpu_type = get_this_hybrid_cpu_type();
|
||||
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
|
||||
enum intel_cpu_type cpu_type = c->topo.intel_type;
|
||||
int i;
|
||||
|
||||
/*
|
||||
|
|
@ -4962,7 +5042,7 @@ static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
|
|||
* on it. There should be a fixup function provided for these
|
||||
* troublesome CPUs (->get_hybrid_cpu_type).
|
||||
*/
|
||||
if (cpu_type == HYBRID_INTEL_NONE) {
|
||||
if (cpu_type == INTEL_CPU_TYPE_UNKNOWN) {
|
||||
if (x86_pmu.get_hybrid_cpu_type)
|
||||
cpu_type = x86_pmu.get_hybrid_cpu_type();
|
||||
else
|
||||
|
|
@ -4979,16 +5059,16 @@ static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
|
|||
enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type;
|
||||
u32 native_id;
|
||||
|
||||
if (cpu_type == HYBRID_INTEL_CORE && pmu_type == hybrid_big)
|
||||
if (cpu_type == INTEL_CPU_TYPE_CORE && pmu_type == hybrid_big)
|
||||
return &x86_pmu.hybrid_pmu[i];
|
||||
if (cpu_type == HYBRID_INTEL_ATOM) {
|
||||
if (cpu_type == INTEL_CPU_TYPE_ATOM) {
|
||||
if (x86_pmu.num_hybrid_pmus == 2 && pmu_type == hybrid_small)
|
||||
return &x86_pmu.hybrid_pmu[i];
|
||||
|
||||
native_id = get_this_hybrid_cpu_native_id();
|
||||
if (native_id == skt_native_id && pmu_type == hybrid_small)
|
||||
native_id = c->topo.intel_native_model_id;
|
||||
if (native_id == INTEL_ATOM_SKT_NATIVE_ID && pmu_type == hybrid_small)
|
||||
return &x86_pmu.hybrid_pmu[i];
|
||||
if (native_id == cmt_native_id && pmu_type == hybrid_tiny)
|
||||
if (native_id == INTEL_ATOM_CMT_NATIVE_ID && pmu_type == hybrid_tiny)
|
||||
return &x86_pmu.hybrid_pmu[i];
|
||||
}
|
||||
}
|
||||
|
|
@ -6617,7 +6697,7 @@ __init int intel_pmu_init(void)
|
|||
case INTEL_ATOM_SILVERMONT_D:
|
||||
case INTEL_ATOM_SILVERMONT_MID:
|
||||
case INTEL_ATOM_AIRMONT:
|
||||
case INTEL_ATOM_AIRMONT_MID:
|
||||
case INTEL_ATOM_SILVERMONT_MID2:
|
||||
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
|
||||
|
|
|
|||
|
|
@ -1199,7 +1199,7 @@ struct event_constraint intel_lnc_pebs_event_constraints[] = {
|
|||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),
|
||||
|
||||
INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3ff),
|
||||
INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3fc),
|
||||
INTEL_HYBRID_STLAT_CONSTRAINT(0x2cd, 0x3),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
|
||||
|
|
|
|||
|
|
@ -669,18 +669,6 @@ enum {
|
|||
#define PERF_PEBS_DATA_SOURCE_GRT_MAX 0x10
|
||||
#define PERF_PEBS_DATA_SOURCE_GRT_MASK (PERF_PEBS_DATA_SOURCE_GRT_MAX - 1)
|
||||
|
||||
/*
|
||||
* CPUID.1AH.EAX[31:0] uniquely identifies the microarchitecture
|
||||
* of the core. Bits 31-24 indicates its core type (Core or Atom)
|
||||
* and Bits [23:0] indicates the native model ID of the core.
|
||||
* Core type and native model ID are defined in below enumerations.
|
||||
*/
|
||||
enum hybrid_cpu_type {
|
||||
HYBRID_INTEL_NONE,
|
||||
HYBRID_INTEL_ATOM = 0x20,
|
||||
HYBRID_INTEL_CORE = 0x40,
|
||||
};
|
||||
|
||||
#define X86_HYBRID_PMU_ATOM_IDX 0
|
||||
#define X86_HYBRID_PMU_CORE_IDX 1
|
||||
#define X86_HYBRID_PMU_TINY_IDX 2
|
||||
|
|
@ -697,11 +685,6 @@ enum hybrid_pmu_type {
|
|||
hybrid_big_small_tiny = hybrid_big | hybrid_small_tiny,
|
||||
};
|
||||
|
||||
enum atom_native_id {
|
||||
cmt_native_id = 0x2, /* Crestmont */
|
||||
skt_native_id = 0x3, /* Skymont */
|
||||
};
|
||||
|
||||
struct x86_hybrid_pmu {
|
||||
struct pmu pmu;
|
||||
const char *name;
|
||||
|
|
@ -994,7 +977,7 @@ struct x86_pmu {
|
|||
*/
|
||||
int num_hybrid_pmus;
|
||||
struct x86_hybrid_pmu *hybrid_pmu;
|
||||
enum hybrid_cpu_type (*get_hybrid_cpu_type) (void);
|
||||
enum intel_cpu_type (*get_hybrid_cpu_type) (void);
|
||||
};
|
||||
|
||||
struct x86_perf_task_context_opt {
|
||||
|
|
|
|||
|
|
@ -879,6 +879,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
|
|||
X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE_U, &model_skl),
|
||||
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl),
|
||||
{},
|
||||
};
|
||||
|
|
|
|||
|
|
@ -239,5 +239,4 @@ void hyperv_setup_mmu_ops(void)
|
|||
|
||||
pr_info("Using hypercall for remote TLB flush\n");
|
||||
pv_ops.mmu.flush_tlb_multi = hyperv_flush_tlb_multi;
|
||||
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -87,16 +87,16 @@ extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
|
|||
* instructions were patched in already:
|
||||
*/
|
||||
extern int alternatives_patched;
|
||||
struct module;
|
||||
|
||||
extern void alternative_instructions(void);
|
||||
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
|
||||
struct module *mod);
|
||||
extern void apply_retpolines(s32 *start, s32 *end, struct module *mod);
|
||||
extern void apply_returns(s32 *start, s32 *end, struct module *mod);
|
||||
extern void apply_seal_endbr(s32 *start, s32 *end, struct module *mod);
|
||||
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
|
||||
extern void apply_retpolines(s32 *start, s32 *end);
|
||||
extern void apply_returns(s32 *start, s32 *end);
|
||||
extern void apply_seal_endbr(s32 *start, s32 *end);
|
||||
extern void apply_fineibt(s32 *start_retpoline, s32 *end_retpoine,
|
||||
s32 *start_cfi, s32 *end_cfi, struct module *mod);
|
||||
s32 *start_cfi, s32 *end_cfi);
|
||||
|
||||
struct module;
|
||||
|
||||
struct callthunk_sites {
|
||||
s32 *call_start, *call_end;
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
#include <asm/gsseg.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG64
|
||||
#ifndef CONFIG_X86_CX8
|
||||
extern void cmpxchg8b_emu(void);
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
|
|||
ATOMIC64_EXPORT(atomic64_##sym)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG64
|
||||
#ifdef CONFIG_X86_CX8
|
||||
#define __alternative_atomic64(f, g, out, in...) \
|
||||
asm volatile("call %c[func]" \
|
||||
: ALT_OUTPUT_SP(out) \
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
|
|||
return __arch_try_cmpxchg64(ptr, oldp, new,);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG64
|
||||
#ifdef CONFIG_X86_CX8
|
||||
|
||||
#define arch_cmpxchg64 __cmpxchg64
|
||||
|
||||
|
|
|
|||
|
|
@ -50,20 +50,6 @@ static inline void split_lock_init(void) {}
|
|||
static inline void bus_lock_init(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_INTEL
|
||||
u8 get_this_hybrid_cpu_type(void);
|
||||
u32 get_this_hybrid_cpu_native_id(void);
|
||||
#else
|
||||
static inline u8 get_this_hybrid_cpu_type(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 get_this_hybrid_cpu_native_id(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_IA32_FEAT_CTL
|
||||
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -37,19 +37,15 @@ enum cpuid_leafs
|
|||
NR_CPUID_WORDS,
|
||||
};
|
||||
|
||||
#define X86_CAP_FMT_NUM "%d:%d"
|
||||
#define x86_cap_flag_num(flag) ((flag) >> 5), ((flag) & 31)
|
||||
|
||||
extern const char * const x86_cap_flags[NCAPINTS*32];
|
||||
extern const char * const x86_power_flags[32];
|
||||
#define X86_CAP_FMT "%s"
|
||||
#define x86_cap_flag(flag) x86_cap_flags[flag]
|
||||
|
||||
/*
|
||||
* In order to save room, we index into this array by doing
|
||||
* X86_BUG_<name> - NCAPINTS*32.
|
||||
*/
|
||||
extern const char * const x86_bug_flags[NBUGINTS*32];
|
||||
#define x86_bug_flag(flag) x86_bug_flags[flag]
|
||||
|
||||
#define test_cpu_cap(c, bit) \
|
||||
arch_test_bit(bit, (unsigned long *)((c)->x86_capability))
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
#ifndef _ASM_X86_CPUID_H
|
||||
#define _ASM_X86_CPUID_H
|
||||
|
||||
#include <linux/build_bug.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/string.h>
|
||||
|
|
|
|||
|
|
@ -110,9 +110,9 @@
|
|||
|
||||
#define INTEL_SAPPHIRERAPIDS_X IFM(6, 0x8F) /* Golden Cove */
|
||||
|
||||
#define INTEL_EMERALDRAPIDS_X IFM(6, 0xCF)
|
||||
#define INTEL_EMERALDRAPIDS_X IFM(6, 0xCF) /* Raptor Cove */
|
||||
|
||||
#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD)
|
||||
#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD) /* Redwood Cove */
|
||||
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
|
||||
|
||||
/* "Hybrid" Processors (P-Core/E-Core) */
|
||||
|
|
@ -126,16 +126,16 @@
|
|||
#define INTEL_RAPTORLAKE_P IFM(6, 0xBA)
|
||||
#define INTEL_RAPTORLAKE_S IFM(6, 0xBF)
|
||||
|
||||
#define INTEL_METEORLAKE IFM(6, 0xAC)
|
||||
#define INTEL_METEORLAKE IFM(6, 0xAC) /* Redwood Cove / Crestmont */
|
||||
#define INTEL_METEORLAKE_L IFM(6, 0xAA)
|
||||
|
||||
#define INTEL_ARROWLAKE_H IFM(6, 0xC5)
|
||||
#define INTEL_ARROWLAKE_H IFM(6, 0xC5) /* Lion Cove / Skymont */
|
||||
#define INTEL_ARROWLAKE IFM(6, 0xC6)
|
||||
#define INTEL_ARROWLAKE_U IFM(6, 0xB5)
|
||||
|
||||
#define INTEL_LUNARLAKE_M IFM(6, 0xBD)
|
||||
#define INTEL_LUNARLAKE_M IFM(6, 0xBD) /* Lion Cove / Skymont */
|
||||
|
||||
#define INTEL_PANTHERLAKE_L IFM(6, 0xCC)
|
||||
#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */
|
||||
|
||||
/* "Small Core" Processors (Atom/E-Core) */
|
||||
|
||||
|
|
@ -149,9 +149,9 @@
|
|||
#define INTEL_ATOM_SILVERMONT IFM(6, 0x37) /* Bay Trail, Valleyview */
|
||||
#define INTEL_ATOM_SILVERMONT_D IFM(6, 0x4D) /* Avaton, Rangely */
|
||||
#define INTEL_ATOM_SILVERMONT_MID IFM(6, 0x4A) /* Merriefield */
|
||||
#define INTEL_ATOM_SILVERMONT_MID2 IFM(6, 0x5A) /* Anniedale */
|
||||
|
||||
#define INTEL_ATOM_AIRMONT IFM(6, 0x4C) /* Cherry Trail, Braswell */
|
||||
#define INTEL_ATOM_AIRMONT_MID IFM(6, 0x5A) /* Moorefield */
|
||||
#define INTEL_ATOM_AIRMONT_NP IFM(6, 0x75) /* Lightning Mountain */
|
||||
|
||||
#define INTEL_ATOM_GOLDMONT IFM(6, 0x5C) /* Apollo Lake */
|
||||
|
|
@ -182,10 +182,23 @@
|
|||
/* Family 19 */
|
||||
#define INTEL_PANTHERCOVE_X IFM(19, 0x01) /* Diamond Rapids */
|
||||
|
||||
/* CPU core types */
|
||||
/*
|
||||
* Intel CPU core types
|
||||
*
|
||||
* CPUID.1AH.EAX[31:0] uniquely identifies the microarchitecture
|
||||
* of the core. Bits 31-24 indicates its core type (Core or Atom)
|
||||
* and Bits [23:0] indicates the native model ID of the core.
|
||||
* Core type and native model ID are defined in below enumerations.
|
||||
*/
|
||||
enum intel_cpu_type {
|
||||
INTEL_CPU_TYPE_UNKNOWN,
|
||||
INTEL_CPU_TYPE_ATOM = 0x20,
|
||||
INTEL_CPU_TYPE_CORE = 0x40,
|
||||
};
|
||||
|
||||
enum intel_native_id {
|
||||
INTEL_ATOM_CMT_NATIVE_ID = 0x2, /* Crestmont */
|
||||
INTEL_ATOM_SKT_NATIVE_ID = 0x3, /* Skymont */
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_INTEL_FAMILY_H */
|
||||
|
|
|
|||
|
|
@ -175,6 +175,9 @@ extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, un
|
|||
extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size);
|
||||
#define ioremap_encrypted ioremap_encrypted
|
||||
|
||||
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags);
|
||||
#define arch_memremap_wb arch_memremap_wb
|
||||
|
||||
/**
|
||||
* ioremap - map bus memory into CPU space
|
||||
* @offset: bus address of the memory
|
||||
|
|
|
|||
|
|
@ -198,9 +198,8 @@
|
|||
.endm
|
||||
|
||||
/*
|
||||
* Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
|
||||
* to the retpoline thunk with a CS prefix when the register requires
|
||||
* a RAX prefix byte to encode. Also see apply_retpolines().
|
||||
* Emits a conditional CS prefix that is compatible with
|
||||
* -mindirect-branch-cs-prefix.
|
||||
*/
|
||||
.macro __CS_PREFIX reg:req
|
||||
.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
|
||||
|
|
@ -420,20 +419,27 @@ static inline void call_depth_return_thunk(void) {}
|
|||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/*
|
||||
* Emits a conditional CS prefix that is compatible with
|
||||
* -mindirect-branch-cs-prefix.
|
||||
*/
|
||||
#define __CS_PREFIX(reg) \
|
||||
".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \
|
||||
".ifc \\rs," reg "\n" \
|
||||
".byte 0x2e\n" \
|
||||
".endif\n" \
|
||||
".endr\n"
|
||||
|
||||
/*
|
||||
* Inline asm uses the %V modifier which is only in newer GCC
|
||||
* which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
|
||||
*/
|
||||
# define CALL_NOSPEC \
|
||||
ALTERNATIVE_2( \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
"call __x86_indirect_thunk_%V[thunk_target]\n", \
|
||||
X86_FEATURE_RETPOLINE, \
|
||||
"lfence;\n" \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
X86_FEATURE_RETPOLINE_LFENCE)
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
#define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \
|
||||
"call __x86_indirect_thunk_%V[thunk_target]\n"
|
||||
#else
|
||||
#define CALL_NOSPEC "call *%[thunk_target]\n"
|
||||
#endif
|
||||
|
||||
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
|
||||
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@
|
|||
* a virtual address space of one gigabyte, which limits the
|
||||
* amount of physical memory you can use to about 950MB.
|
||||
*
|
||||
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G
|
||||
* and CONFIG_HIGHMEM64G options in the kernel configuration.
|
||||
* If you want more physical memory than this then see the CONFIG_VMSPLIT_2G
|
||||
* and CONFIG_HIGHMEM4G options in the kernel configuration.
|
||||
*/
|
||||
#define __PAGE_OFFSET_BASE _AC(CONFIG_PAGE_OFFSET, UL)
|
||||
#define __PAGE_OFFSET __PAGE_OFFSET_BASE
|
||||
|
|
|
|||
|
|
@ -91,11 +91,6 @@ static inline void __flush_tlb_multi(const struct cpumask *cpumask,
|
|||
PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info);
|
||||
}
|
||||
|
||||
static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
|
||||
{
|
||||
PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
|
||||
}
|
||||
|
||||
static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
|
||||
{
|
||||
PVOP_VCALL1(mmu.exit_mmap, mm);
|
||||
|
|
|
|||
|
|
@ -134,8 +134,6 @@ struct pv_mmu_ops {
|
|||
void (*flush_tlb_multi)(const struct cpumask *cpus,
|
||||
const struct flush_tlb_info *info);
|
||||
|
||||
void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
|
||||
|
||||
/* Hook for intercepting the destruction of an mm_struct. */
|
||||
void (*exit_mmap)(struct mm_struct *mm);
|
||||
void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc);
|
||||
|
|
|
|||
|
|
@ -29,11 +29,6 @@ static inline void paravirt_release_pud(unsigned long pfn) {}
|
|||
static inline void paravirt_release_p4d(unsigned long pfn) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Flags to use when allocating a user page table page.
|
||||
*/
|
||||
extern gfp_t __userpte_alloc_gfp;
|
||||
|
||||
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
|
||||
/*
|
||||
* Instead of one PGD, we acquire two PGDs. Being order-1, it is
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@
|
|||
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
|
||||
#define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */
|
||||
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
|
||||
#define _PAGE_BIT_KERNEL_4K _PAGE_BIT_SOFTW3 /* page must not be converted to large */
|
||||
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
|
@ -64,6 +65,7 @@
|
|||
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
|
||||
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
|
||||
#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
|
||||
#define _PAGE_KERNEL_4K (_AT(pteval_t, 1) << _PAGE_BIT_KERNEL_4K)
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
#define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0)
|
||||
#define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1)
|
||||
|
|
|
|||
|
|
@ -60,18 +60,13 @@ struct vm86;
|
|||
# define ARCH_MIN_MMSTRUCT_ALIGN 0
|
||||
#endif
|
||||
|
||||
enum tlb_infos {
|
||||
ENTRIES,
|
||||
NR_INFO
|
||||
};
|
||||
|
||||
extern u16 __read_mostly tlb_lli_4k[NR_INFO];
|
||||
extern u16 __read_mostly tlb_lli_2m[NR_INFO];
|
||||
extern u16 __read_mostly tlb_lli_4m[NR_INFO];
|
||||
extern u16 __read_mostly tlb_lld_4k[NR_INFO];
|
||||
extern u16 __read_mostly tlb_lld_2m[NR_INFO];
|
||||
extern u16 __read_mostly tlb_lld_4m[NR_INFO];
|
||||
extern u16 __read_mostly tlb_lld_1g[NR_INFO];
|
||||
extern u16 __read_mostly tlb_lli_4k;
|
||||
extern u16 __read_mostly tlb_lli_2m;
|
||||
extern u16 __read_mostly tlb_lli_4m;
|
||||
extern u16 __read_mostly tlb_lld_4k;
|
||||
extern u16 __read_mostly tlb_lld_2m;
|
||||
extern u16 __read_mostly tlb_lld_4m;
|
||||
extern u16 __read_mostly tlb_lld_1g;
|
||||
|
||||
/*
|
||||
* CPU type and hardware bug flags. Kept separately for each CPU.
|
||||
|
|
@ -234,7 +229,7 @@ static inline unsigned long long l1tf_pfn_limit(void)
|
|||
void init_cpu_devs(void);
|
||||
void get_cpu_vendor(struct cpuinfo_x86 *c);
|
||||
extern void early_cpu_init(void);
|
||||
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
|
||||
extern void identify_secondary_cpu(unsigned int cpu);
|
||||
extern void print_cpu_info(struct cpuinfo_x86 *);
|
||||
void print_cpu_msr(struct cpuinfo_x86 *);
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@
|
|||
# define NEED_PAE 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG64
|
||||
#ifdef CONFIG_X86_CX8
|
||||
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
|
||||
#else
|
||||
# define NEED_CX8 0
|
||||
|
|
|
|||
|
|
@ -114,13 +114,12 @@ void wbinvd_on_cpu(int cpu);
|
|||
int wbinvd_on_all_cpus(void);
|
||||
|
||||
void smp_kick_mwait_play_dead(void);
|
||||
void __noreturn mwait_play_dead(unsigned int eax_hint);
|
||||
|
||||
void native_smp_send_reschedule(int cpu);
|
||||
void native_send_call_func_ipi(const struct cpumask *mask);
|
||||
void native_send_call_func_single_ipi(int cpu);
|
||||
|
||||
void smp_store_cpu_info(int id);
|
||||
|
||||
asmlinkage __visible void smp_reboot_interrupt(void);
|
||||
__visible void smp_reschedule_interrupt(struct pt_regs *regs);
|
||||
__visible void smp_call_function_interrupt(struct pt_regs *regs);
|
||||
|
|
@ -164,6 +163,8 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu)
|
|||
{
|
||||
return (struct cpumask *)cpumask_of(0);
|
||||
}
|
||||
|
||||
static inline void __noreturn mwait_play_dead(unsigned int eax_hint) { BUG(); }
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_DEBUG_NMI_SELFTEST
|
||||
|
|
|
|||
|
|
@ -1,13 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Header file for STMicroelectronics ConneXt (STA2X11) IOHub
|
||||
*/
|
||||
#ifndef __ASM_STA2X11_H
|
||||
#define __ASM_STA2X11_H
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
||||
/* This needs to be called from the MFD to configure its sub-devices */
|
||||
struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev);
|
||||
|
||||
#endif /* __ASM_STA2X11_H */
|
||||
|
|
@ -242,7 +242,7 @@ void flush_tlb_multi(const struct cpumask *cpumask,
|
|||
flush_tlb_mm_range((vma)->vm_mm, start, end, \
|
||||
((vma)->vm_flags & VM_HUGETLB) \
|
||||
? huge_page_shift(hstate_vma(vma)) \
|
||||
: PAGE_SHIFT, false)
|
||||
: PAGE_SHIFT, true)
|
||||
|
||||
extern void flush_tlb_all(void);
|
||||
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue