mirror of https://github.com/torvalds/linux.git
KVM: arm64: Add RMW specific sysreg accessor
In a number of cases, we perform a Read-Modify-Write operation on a system register, meaning that we would apply the RESx masks twice. Instead, provide a new accessor that performs this RMW operation, allowing the masks to be applied exactly once per operation. Reviewed-by: Miguel Luis <miguel.luis@oracle.com> Reviewed-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20250603070824.1192795-3-maz@kernel.org Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
6678791ee3
commit
8800b7c4bb
|
|
@ -1118,6 +1118,17 @@ u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
|
|||
ctxt_sys_reg(ctxt, (r)) = __v; \
|
||||
} while (0)
|
||||
|
||||
#define __vcpu_rmw_sys_reg(v, r, op, val) \
|
||||
do { \
|
||||
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
|
||||
u64 __v = ctxt_sys_reg(ctxt, (r)); \
|
||||
__v op (val); \
|
||||
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
|
||||
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
|
||||
\
|
||||
ctxt_sys_reg(ctxt, (r)) = __v; \
|
||||
} while (0)
|
||||
|
||||
#define __vcpu_sys_reg(v,r) \
|
||||
(*({ \
|
||||
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
|
||||
|
|
|
|||
|
|
@ -216,9 +216,9 @@ void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu)
|
|||
void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
if (val & OSLAR_EL1_OSLK)
|
||||
__vcpu_sys_reg(vcpu, OSLSR_EL1) |= OSLSR_EL1_OSLK;
|
||||
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK);
|
||||
else
|
||||
__vcpu_sys_reg(vcpu, OSLSR_EL1) &= ~OSLSR_EL1_OSLK;
|
||||
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK);
|
||||
|
||||
preempt_disable();
|
||||
kvm_arch_vcpu_put(vcpu);
|
||||
|
|
|
|||
|
|
@ -70,8 +70,8 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
val = read_sysreg_el1(SYS_CNTKCTL);
|
||||
val &= CNTKCTL_VALID_BITS;
|
||||
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) &= ~CNTKCTL_VALID_BITS;
|
||||
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val;
|
||||
__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, &=, ~CNTKCTL_VALID_BITS);
|
||||
__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, |=, val);
|
||||
}
|
||||
|
||||
__vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1));
|
||||
|
|
|
|||
|
|
@ -1757,7 +1757,7 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
|
|||
|
||||
out:
|
||||
for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
|
||||
(void)__vcpu_sys_reg(vcpu, sr);
|
||||
__vcpu_rmw_sys_reg(vcpu, sr, |=, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -510,7 +510,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
|
|||
continue;
|
||||
|
||||
/* Mark overflow */
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(i));
|
||||
|
||||
if (kvm_pmu_counter_can_chain(pmc))
|
||||
kvm_pmu_counter_increment(vcpu, BIT(i + 1),
|
||||
|
|
@ -556,7 +556,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
|
|||
perf_event->attr.sample_period = period;
|
||||
perf_event->hw.sample_period = period;
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(idx));
|
||||
|
||||
if (kvm_pmu_counter_can_chain(pmc))
|
||||
kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
|
||||
|
|
@ -914,9 +914,9 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
|
||||
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, mask);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, mask);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, mask);
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, mask);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -791,7 +791,7 @@ static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
mask |= GENMASK(n - 1, 0);
|
||||
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= mask;
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
|
@ -799,7 +799,7 @@ static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0));
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
|
@ -811,7 +811,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
return 0;
|
||||
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm));
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
|
@ -819,7 +819,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
|||
static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
reset_unknown(vcpu, r);
|
||||
__vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK;
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK);
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg);
|
||||
}
|
||||
|
|
@ -1103,10 +1103,10 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
val = p->regval & mask;
|
||||
if (r->Op2 & 0x1)
|
||||
/* accessing PMCNTENSET_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
|
||||
else
|
||||
/* accessing PMCNTENCLR_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
|
||||
|
||||
kvm_pmu_reprogram_counter_mask(vcpu, val);
|
||||
} else {
|
||||
|
|
@ -1129,10 +1129,10 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
|
||||
if (r->Op2 & 0x1)
|
||||
/* accessing PMINTENSET_EL1 */
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
|
||||
else
|
||||
/* accessing PMINTENCLR_EL1 */
|
||||
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
|
||||
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
|
||||
}
|
||||
|
|
@ -1151,10 +1151,10 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
if (p->is_write) {
|
||||
if (r->CRm & 0x2)
|
||||
/* accessing PMOVSSET_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask));
|
||||
else
|
||||
/* accessing PMOVSCLR_EL0 */
|
||||
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
|
||||
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask));
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
|
||||
}
|
||||
|
|
@ -4786,7 +4786,7 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
|||
r->reset(vcpu, r);
|
||||
|
||||
if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
|
||||
(void)__vcpu_sys_reg(vcpu, r->reg);
|
||||
__vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0);
|
||||
}
|
||||
|
||||
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
|
||||
|
|
|
|||
Loading…
Reference in New Issue