RISC-V: KVM: No need of explicit writable slot check

There is not much value in checking if a memslot is writable explicitly
before a write as it may change underneath after the check. Rather, return
invalid address error when write_guest fails as it checks if the slot
is writable anyways.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Acked-by: Paul Walmsley <pjw@kernel.org>
Link: https://lore.kernel.org/r/20250909-pmu_event_info-v6-6-d8f80cacb884@rivosinc.com
Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
Atish Patra 2025-09-09 00:03:25 -07:00 committed by Anup Patel
parent 880fcc329e
commit 41f4d0cc33
2 changed files with 4 additions and 16 deletions

View File

@ -409,8 +409,6 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s
int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data); int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data);
int sbiret = 0; int sbiret = 0;
gpa_t saddr; gpa_t saddr;
unsigned long hva;
bool writable;
if (!kvpmu || flags) { if (!kvpmu || flags) {
sbiret = SBI_ERR_INVALID_PARAM; sbiret = SBI_ERR_INVALID_PARAM;
@ -432,19 +430,14 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s
goto out; goto out;
} }
hva = kvm_vcpu_gfn_to_hva_prot(vcpu, saddr >> PAGE_SHIFT, &writable);
if (kvm_is_error_hva(hva) || !writable) {
sbiret = SBI_ERR_INVALID_ADDRESS;
goto out;
}
kvpmu->sdata = kzalloc(snapshot_area_size, GFP_ATOMIC); kvpmu->sdata = kzalloc(snapshot_area_size, GFP_ATOMIC);
if (!kvpmu->sdata) if (!kvpmu->sdata)
return -ENOMEM; return -ENOMEM;
/* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */
if (kvm_vcpu_write_guest(vcpu, saddr, kvpmu->sdata, snapshot_area_size)) { if (kvm_vcpu_write_guest(vcpu, saddr, kvpmu->sdata, snapshot_area_size)) {
kfree(kvpmu->sdata); kfree(kvpmu->sdata);
sbiret = SBI_ERR_FAILURE; sbiret = SBI_ERR_INVALID_ADDRESS;
goto out; goto out;
} }

View File

@ -85,8 +85,6 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
unsigned long shmem_phys_hi = cp->a1; unsigned long shmem_phys_hi = cp->a1;
u32 flags = cp->a2; u32 flags = cp->a2;
struct sbi_sta_struct zero_sta = {0}; struct sbi_sta_struct zero_sta = {0};
unsigned long hva;
bool writable;
gpa_t shmem; gpa_t shmem;
int ret; int ret;
@ -111,13 +109,10 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
return SBI_ERR_INVALID_ADDRESS; return SBI_ERR_INVALID_ADDRESS;
} }
hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable); /* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */
if (kvm_is_error_hva(hva) || !writable)
return SBI_ERR_INVALID_ADDRESS;
ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta)); ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
if (ret) if (ret)
return SBI_ERR_FAILURE; return SBI_ERR_INVALID_ADDRESS;
vcpu->arch.sta.shmem = shmem; vcpu->arch.sta.shmem = shmem;
vcpu->arch.sta.last_steal = current->sched_info.run_delay; vcpu->arch.sta.last_steal = current->sched_info.run_delay;