RISC-V: KVM: Remove automatic I/O mapping for VM_PFNMAP

As of commit aac6db75a9 ("vfio/pci: Use unmap_mapping_range()"),
vm_pgoff may no longer guaranteed to hold the PFN for VM_PFNMAP
regions. Using vma->vm_pgoff to derive the HPA here may therefore
produce incorrect mappings.

Instead, I/O mappings for such regions can be established on-demand
during g-stage page faults, making the upfront ioremap in this path
is unnecessary.

Fixes: aac6db75a9 ("vfio/pci: Use unmap_mapping_range()")
Signed-off-by: Fangyu Yu <fangyu.yu@linux.alibaba.com>
Tested-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Guo Ren <guoren@kernel.org>
Reviewed-by: Anup Patel <anup@brainfault.org>
Link: https://lore.kernel.org/r/20251021142131.78796-1-fangyu.yu@linux.alibaba.com
Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
Fangyu Yu 2025-10-21 22:21:31 +08:00 committed by Anup Patel
parent 873f10cf8e
commit 8c5fa3764f
1 changed files with 2 additions and 23 deletions

View File

@ -171,7 +171,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
enum kvm_mr_change change)
{
hva_t hva, reg_end, size;
gpa_t base_gpa;
bool writable;
int ret = 0;
@ -190,15 +189,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
hva = new->userspace_addr;
size = new->npages << PAGE_SHIFT;
reg_end = hva + size;
base_gpa = new->base_gfn << PAGE_SHIFT;
writable = !(new->flags & KVM_MEM_READONLY);
mmap_read_lock(current->mm);
/*
* A memory region could potentially cover multiple VMAs, and
* any holes between them, so iterate over all of them to find
* out if we can map any of them right now.
* any holes between them, so iterate over all of them.
*
* +--------------------------------------------+
* +---------------+----------------+ +----------------+
@ -209,7 +206,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
*/
do {
struct vm_area_struct *vma;
hva_t vm_start, vm_end;
hva_t vm_end;
vma = find_vma_intersection(current->mm, hva, reg_end);
if (!vma)
@ -225,36 +222,18 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
}
/* Take the intersection of this VMA with the memory region */
vm_start = max(hva, vma->vm_start);
vm_end = min(reg_end, vma->vm_end);
if (vma->vm_flags & VM_PFNMAP) {
gpa_t gpa = base_gpa + (vm_start - hva);
phys_addr_t pa;
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
ret = -EINVAL;
goto out;
}
ret = kvm_riscv_mmu_ioremap(kvm, gpa, pa, vm_end - vm_start,
writable, false);
if (ret)
break;
}
hva = vm_end;
} while (hva < reg_end);
if (change == KVM_MR_FLAGS_ONLY)
goto out;
if (ret)
kvm_riscv_mmu_iounmap(kvm, base_gpa, size);
out:
mmap_read_unlock(current->mm);
return ret;