mirror of https://github.com/torvalds/linux.git
KVM: arm64: GICv3: Detect and work around the lack of ICV_DIR_EL1 trapping
A long time ago, an unsuspecting architect forgot to add a trap bit for ICV_DIR_EL1 in ICH_HCR_EL2. Which was unfortunate, but what's a bit of spec between friends? Thankfully, this was fixed in a later revision, and ARM "deprecates" the lack of trapping ability. Unfortuantely, a few (billion) CPUs went out with that defect, anything ARMv8.0 from ARM, give or take. And on these CPUs, you can't trap DIR on its own, full stop. As the next best thing, we can trap everything in the common group, which is a tad expensive, but hey ho, that's what you get. You can otherwise recycle the HW in the neaby bin. Tested-by: Fuad Tabba <tabba@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Tested-by: Mark Brown <broonie@kernel.org> Link: https://msgid.link/20251120172540.2267180-7-maz@kernel.org Signed-off-by: Oliver Upton <oupton@kernel.org>
This commit is contained in:
parent
567ebfedb5
commit
2a28810cbb
|
|
@ -40,8 +40,13 @@
|
||||||
*/
|
*/
|
||||||
#define HVC_FINALISE_EL2 3
|
#define HVC_FINALISE_EL2 3
|
||||||
|
|
||||||
|
/*
|
||||||
|
* HVC_GET_ICH_VTR_EL2 - Retrieve the ICH_VTR_EL2 value
|
||||||
|
*/
|
||||||
|
#define HVC_GET_ICH_VTR_EL2 4
|
||||||
|
|
||||||
/* Max number of HYP stub hypercalls */
|
/* Max number of HYP stub hypercalls */
|
||||||
#define HVC_STUB_HCALL_NR 4
|
#define HVC_STUB_HCALL_NR 5
|
||||||
|
|
||||||
/* Error returned when an invalid stub number is passed into x0 */
|
/* Error returned when an invalid stub number is passed into x0 */
|
||||||
#define HVC_STUB_ERR 0xbadca11
|
#define HVC_STUB_ERR 0xbadca11
|
||||||
|
|
|
||||||
|
|
@ -2303,6 +2303,49 @@ static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static bool can_trap_icv_dir_el1(const struct arm64_cpu_capabilities *entry,
|
||||||
|
int scope)
|
||||||
|
{
|
||||||
|
static const struct midr_range has_vgic_v3[] = {
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
struct arm_smccc_res res = {};
|
||||||
|
|
||||||
|
BUILD_BUG_ON(ARM64_HAS_ICH_HCR_EL2_TDIR <= ARM64_HAS_GICV3_CPUIF);
|
||||||
|
BUILD_BUG_ON(ARM64_HAS_ICH_HCR_EL2_TDIR <= ARM64_HAS_GICV5_LEGACY);
|
||||||
|
if (!cpus_have_cap(ARM64_HAS_GICV3_CPUIF) &&
|
||||||
|
!is_midr_in_range_list(has_vgic_v3))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!is_hyp_mode_available())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (cpus_have_cap(ARM64_HAS_GICV5_LEGACY))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (is_kernel_in_hyp_mode())
|
||||||
|
res.a1 = read_sysreg_s(SYS_ICH_VTR_EL2);
|
||||||
|
else
|
||||||
|
arm_smccc_1_1_hvc(HVC_GET_ICH_VTR_EL2, &res);
|
||||||
|
|
||||||
|
if (res.a0 == HVC_STUB_ERR)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return res.a1 & ICH_VTR_EL2_TDS;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_BTI
|
#ifdef CONFIG_ARM64_BTI
|
||||||
static void bti_enable(const struct arm64_cpu_capabilities *__unused)
|
static void bti_enable(const struct arm64_cpu_capabilities *__unused)
|
||||||
{
|
{
|
||||||
|
|
@ -2814,6 +2857,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||||
.matches = has_gic_prio_relaxed_sync,
|
.matches = has_gic_prio_relaxed_sync,
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Depends on having GICv3
|
||||||
|
*/
|
||||||
|
.desc = "ICV_DIR_EL1 trapping",
|
||||||
|
.capability = ARM64_HAS_ICH_HCR_EL2_TDIR,
|
||||||
|
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||||
|
.matches = can_trap_icv_dir_el1,
|
||||||
|
},
|
||||||
#ifdef CONFIG_ARM64_E0PD
|
#ifdef CONFIG_ARM64_E0PD
|
||||||
{
|
{
|
||||||
.desc = "E0PD",
|
.desc = "E0PD",
|
||||||
|
|
|
||||||
|
|
@ -54,6 +54,11 @@ SYM_CODE_START_LOCAL(elx_sync)
|
||||||
1: cmp x0, #HVC_FINALISE_EL2
|
1: cmp x0, #HVC_FINALISE_EL2
|
||||||
b.eq __finalise_el2
|
b.eq __finalise_el2
|
||||||
|
|
||||||
|
cmp x0, #HVC_GET_ICH_VTR_EL2
|
||||||
|
b.ne 2f
|
||||||
|
mrs_s x1, SYS_ICH_VTR_EL2
|
||||||
|
b 9f
|
||||||
|
|
||||||
2: cmp x0, #HVC_SOFT_RESTART
|
2: cmp x0, #HVC_SOFT_RESTART
|
||||||
b.ne 3f
|
b.ne 3f
|
||||||
mov x0, x2
|
mov x0, x2
|
||||||
|
|
|
||||||
|
|
@ -648,6 +648,9 @@ void noinstr kvm_compute_ich_hcr_trap_bits(struct alt_instr *alt,
|
||||||
dir_trap = true;
|
dir_trap = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!cpus_have_cap(ARM64_HAS_ICH_HCR_EL2_TDIR))
|
||||||
|
common_trap = true;
|
||||||
|
|
||||||
if (group0_trap)
|
if (group0_trap)
|
||||||
hcr |= ICH_HCR_EL2_TALL0;
|
hcr |= ICH_HCR_EL2_TALL0;
|
||||||
if (group1_trap)
|
if (group1_trap)
|
||||||
|
|
|
||||||
|
|
@ -40,6 +40,7 @@ HAS_GICV5_CPUIF
|
||||||
HAS_GICV5_LEGACY
|
HAS_GICV5_LEGACY
|
||||||
HAS_GIC_PRIO_MASKING
|
HAS_GIC_PRIO_MASKING
|
||||||
HAS_GIC_PRIO_RELAXED_SYNC
|
HAS_GIC_PRIO_RELAXED_SYNC
|
||||||
|
HAS_ICH_HCR_EL2_TDIR
|
||||||
HAS_HCR_NV1
|
HAS_HCR_NV1
|
||||||
HAS_HCX
|
HAS_HCX
|
||||||
HAS_LDAPR
|
HAS_LDAPR
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue