KVM TDX changes for 6.19:

- Overhaul the TDX code to address systemic races where KVM (acting on behalf
    of userspace) could inadvertantly trigger lock contention in the TDX-Module,
    which KVM was either working around in weird, ugly ways, or was simply
    oblivious to (as proven by Yan tripping several KVM_BUG_ON()s with clever
    selftests).
 
  - Fix a bug where KVM could corrupt a vCPU's cpu_list when freeing a vCPU if
    creating said vCPU failed partway through.
 
  - Fix a few sparse warnings (bad annotation, 0 != NULL).
 
  - Use struct_size() to simplify copying capabilities to userspace.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEKTobbabEP7vbhhN9OlYIJqCjN/0FAmkmVkAACgkQOlYIJqCj
 N/18Ow//cWPmXAdJcM0fRtnSGwzIZszGSD63htgdh5UDeJIFVyUGKH7uGhndQUwK
 Uo8jCJ4ikwMxDdCijv+e4eqCCMZjb7HQhFKaauPVCJZOhmZn0br3EB5xX24Qgp8R
 YN5gTheiTCHHVaxAMl9grgi1xTRi6pJRufRebOmtyGKNQkclctXcuSdtw7IEhqdM
 wKM3eyb7qUhUrmt5tBkSyFAioGcPJIHE3vqLjImqDgduinbXJdQa1sek4Br0sX45
 rfISZ2geXDj/Sh7EPrPU1ne5LQbtgzp1WTG6MRCidYfP86riMQUlEMY6odEYAgIX
 kCd+z248OJShF5EYcEmjc894YLHJ0vVXIXKx/qh0+Jiobz3bujk+whaxTNa26rj0
 3qLPGzFpYugtxkGqBYH4q90oUTovEk4922+jPsQ9GKQ26f0q3XzvriEUSOgrvo0Z
 O26OyK7BezqSM5WMMSf/EGI1ESuli5lbBLYDOaNZS35di2YcDEgtaikRETpWwy82
 TGxrjyeW9Pu6M3iTtQsOVHNxA4hU//Qd5HcDj5rcXOg1rgiPV9n2OaCEMwc6qi+V
 VytbGm4IlMsz6AVHqyv3SUIt1Z4LNAZ/FwK8oeBRVd6LNfm6nfyrW6eQFQVLoIpA
 1nyi9XjMg7xj6ubiSEQSTSl9gto8FzVWwLKwZ8dLH7SPvqlz+zY=
 =qGpA
 -----END PGP SIGNATURE-----

Merge tag 'kvm-x86-tdx-6.19' of https://github.com/kvm-x86/linux into HEAD

KVM TDX changes for 6.19:

 - Overhaul the TDX code to address systemic races where KVM (acting on behalf
   of userspace) could inadvertantly trigger lock contention in the TDX-Module,
   which KVM was either working around in weird, ugly ways, or was simply
   oblivious to (as proven by Yan tripping several KVM_BUG_ON()s with clever
   selftests).

 - Fix a bug where KVM could corrupt a vCPU's cpu_list when freeing a vCPU if
   creating said vCPU failed partway through.

 - Fix a few sparse warnings (bad annotation, 0 != NULL).

 - Use struct_size() to simplify copying capabilities to userspace.
This commit is contained in:
Paolo Bonzini 2025-11-26 09:36:37 +01:00
commit de8e8ebb1a
24 changed files with 493 additions and 446 deletions

View File

@ -1835,6 +1835,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
return r;
}
long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
return -ENOIOCTLCMD;
}
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{

View File

@ -25,7 +25,6 @@ config KVM
select HAVE_KVM_IRQCHIP
select HAVE_KVM_MSI
select HAVE_KVM_READONLY_MEM
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_COMMON
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_GENERIC_HARDWARE_ENABLING

View File

@ -1473,8 +1473,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
return 0;
}
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct kvm_vcpu *vcpu = filp->private_data;

View File

@ -22,7 +22,6 @@ config KVM
select EXPORT_UASM
select KVM_COMMON
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_MMIO
select KVM_GENERIC_MMU_NOTIFIER
select KVM_GENERIC_HARDWARE_ENABLING

View File

@ -895,8 +895,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return r;
}
long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;

View File

@ -20,7 +20,6 @@ if VIRTUALIZATION
config KVM
bool
select KVM_COMMON
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_VFIO
select HAVE_KVM_IRQ_BYPASS

View File

@ -2028,8 +2028,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
return -EINVAL;
}
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;

View File

@ -23,7 +23,6 @@ config KVM
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_MSI
select HAVE_KVM_VCPU_ASYNC_IOCTL
select HAVE_KVM_READONLY_MEM
select HAVE_KVM_DIRTY_RING_ACQ_REL
select KVM_COMMON

View File

@ -238,8 +238,8 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;

View File

@ -20,7 +20,6 @@ config KVM
def_tristate y
prompt "Kernel-based Virtual Machine (KVM) support"
select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_ASYNC_PF
select KVM_ASYNC_PF_SYNC
select KVM_COMMON

View File

@ -5730,8 +5730,8 @@ static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
return r;
}
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;

View File

@ -128,6 +128,7 @@ KVM_X86_OP(enable_smi_window)
KVM_X86_OP_OPTIONAL(dev_get_attr)
KVM_X86_OP_OPTIONAL(mem_enc_ioctl)
KVM_X86_OP_OPTIONAL(vcpu_mem_enc_ioctl)
KVM_X86_OP_OPTIONAL(vcpu_mem_enc_unlocked_ioctl)
KVM_X86_OP_OPTIONAL(mem_enc_register_region)
KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from)

View File

@ -1843,15 +1843,15 @@ struct kvm_x86_ops {
void *external_spt);
/* Update the external page table from spte getting set. */
int (*set_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
kvm_pfn_t pfn_for_gfn);
u64 mirror_spte);
/* Update external page tables for page table about to be freed. */
int (*free_external_spt)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
void *external_spt);
/* Update external page table from spte getting removed, and flush TLB. */
int (*remove_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
kvm_pfn_t pfn_for_gfn);
void (*remove_external_spte)(struct kvm *kvm, gfn_t gfn, enum pg_level level,
u64 mirror_spte);
bool (*has_wbinvd_exit)(void);
@ -1909,6 +1909,7 @@ struct kvm_x86_ops {
int (*dev_get_attr)(u32 group, u64 attr, u64 *val);
int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
int (*vcpu_mem_enc_ioctl)(struct kvm_vcpu *vcpu, void __user *argp);
int (*vcpu_mem_enc_unlocked_ioctl)(struct kvm_vcpu *vcpu, void __user *argp);
int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);

View File

@ -255,8 +255,7 @@ extern bool tdp_mmu_enabled;
#define tdp_mmu_enabled false
#endif
bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa);
int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level);
int kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn);
static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
{

View File

@ -4924,7 +4924,8 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
return direct_page_fault(vcpu, fault);
}
int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level)
static int kvm_tdp_page_prefault(struct kvm_vcpu *vcpu, gpa_t gpa,
u64 error_code, u8 *level)
{
int r;
@ -4966,7 +4967,6 @@ int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level
return -EIO;
}
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_tdp_map_page);
long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
struct kvm_pre_fault_memory *range)
@ -5002,7 +5002,7 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
* Shadow paging uses GVA for kvm page fault, so restrict to
* two-dimensional paging.
*/
r = kvm_tdp_map_page(vcpu, range->gpa | direct_bits, error_code, &level);
r = kvm_tdp_page_prefault(vcpu, range->gpa | direct_bits, error_code, &level);
if (r < 0)
return r;
@ -5014,6 +5014,86 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
return min(range->size, end - range->gpa);
}
#ifdef CONFIG_KVM_GUEST_MEMFD
static void kvm_assert_gmem_invalidate_lock_held(struct kvm_memory_slot *slot)
{
#ifdef CONFIG_PROVE_LOCKING
if (WARN_ON_ONCE(!kvm_slot_has_gmem(slot)) ||
WARN_ON_ONCE(!slot->gmem.file) ||
WARN_ON_ONCE(!file_count(slot->gmem.file)))
return;
lockdep_assert_held(&file_inode(slot->gmem.file)->i_mapping->invalidate_lock);
#endif
}
int kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
{
struct kvm_page_fault fault = {
.addr = gfn_to_gpa(gfn),
.error_code = PFERR_GUEST_FINAL_MASK | PFERR_PRIVATE_ACCESS,
.prefetch = true,
.is_tdp = true,
.nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(vcpu->kvm),
.max_level = PG_LEVEL_4K,
.req_level = PG_LEVEL_4K,
.goal_level = PG_LEVEL_4K,
.is_private = true,
.gfn = gfn,
.slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn),
.pfn = pfn,
.map_writable = true,
};
struct kvm *kvm = vcpu->kvm;
int r;
lockdep_assert_held(&kvm->slots_lock);
/*
* Mapping a pre-determined private pfn is intended only for use when
* populating a guest_memfd instance. Assert that the slot is backed
* by guest_memfd and that the gmem instance's invalidate_lock is held.
*/
kvm_assert_gmem_invalidate_lock_held(fault.slot);
if (KVM_BUG_ON(!tdp_mmu_enabled, kvm))
return -EIO;
if (kvm_gfn_is_write_tracked(kvm, fault.slot, fault.gfn))
return -EPERM;
r = kvm_mmu_reload(vcpu);
if (r)
return r;
r = mmu_topup_memory_caches(vcpu, false);
if (r)
return r;
do {
if (signal_pending(current))
return -EINTR;
if (kvm_test_request(KVM_REQ_VM_DEAD, vcpu))
return -EIO;
cond_resched();
guard(read_lock)(&kvm->mmu_lock);
r = kvm_tdp_mmu_map(vcpu, &fault);
} while (r == RET_PF_RETRY);
if (r != RET_PF_FIXED)
return -EIO;
return 0;
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_tdp_mmu_map_private_pfn);
#endif
static void nonpaging_init_context(struct kvm_mmu *context)
{
context->page_fault = nonpaging_page_fault;
@ -5997,7 +6077,6 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
out:
return r;
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{

View File

@ -362,9 +362,6 @@ static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
static void remove_external_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte,
int level)
{
kvm_pfn_t old_pfn = spte_to_pfn(old_spte);
int ret;
/*
* External (TDX) SPTEs are limited to PG_LEVEL_4K, and external
* PTs are removed in a special order, involving free_external_spt().
@ -377,9 +374,8 @@ static void remove_external_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte,
/* Zapping leaf spte is allowed only when write lock is held. */
lockdep_assert_held_write(&kvm->mmu_lock);
/* Because write lock is held, operation should success. */
ret = kvm_x86_call(remove_external_spte)(kvm, gfn, level, old_pfn);
KVM_BUG_ON(ret, kvm);
kvm_x86_call(remove_external_spte)(kvm, gfn, level, old_spte);
}
/**
@ -519,7 +515,6 @@ static int __must_check set_external_spte_present(struct kvm *kvm, tdp_ptep_t sp
bool was_present = is_shadow_present_pte(old_spte);
bool is_present = is_shadow_present_pte(new_spte);
bool is_leaf = is_present && is_last_spte(new_spte, level);
kvm_pfn_t new_pfn = spte_to_pfn(new_spte);
int ret = 0;
KVM_BUG_ON(was_present, kvm);
@ -538,7 +533,7 @@ static int __must_check set_external_spte_present(struct kvm *kvm, tdp_ptep_t sp
* external page table, or leaf.
*/
if (is_leaf) {
ret = kvm_x86_call(set_external_spte)(kvm, gfn, level, new_pfn);
ret = kvm_x86_call(set_external_spte)(kvm, gfn, level, new_spte);
} else {
void *external_spt = get_external_spt(gfn, new_spte, level);
@ -1273,6 +1268,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
struct kvm_mmu_page *sp;
int ret = RET_PF_RETRY;
KVM_MMU_WARN_ON(!root || root->role.invalid);
kvm_mmu_hugepage_adjust(vcpu, fault);
trace_kvm_mmu_spte_requested(fault);
@ -1939,13 +1936,16 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
*
* Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
*/
static int __kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
struct kvm_mmu_page *root)
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level)
{
struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
struct tdp_iter iter;
gfn_t gfn = addr >> PAGE_SHIFT;
int leaf = -1;
*root_level = vcpu->arch.mmu->root_role.level;
for_each_tdp_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {
leaf = iter.level;
sptes[leaf] = iter.old_spte;
@ -1954,36 +1954,6 @@ static int __kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
return leaf;
}
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level)
{
struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
*root_level = vcpu->arch.mmu->root_role.level;
return __kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root);
}
bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa)
{
struct kvm *kvm = vcpu->kvm;
bool is_direct = kvm_is_addr_direct(kvm, gpa);
hpa_t root = is_direct ? vcpu->arch.mmu->root.hpa :
vcpu->arch.mmu->mirror_root_hpa;
u64 sptes[PT64_ROOT_MAX_LEVEL + 1], spte;
int leaf;
lockdep_assert_held(&kvm->mmu_lock);
rcu_read_lock();
leaf = __kvm_tdp_mmu_get_walk(vcpu, gpa, sptes, root_to_sp(root));
rcu_read_unlock();
if (leaf < 0)
return false;
spte = sptes[leaf];
return is_shadow_present_pte(spte) && is_last_spte(spte, leaf);
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_tdp_mmu_gpa_is_mapped);
/*
* Returns the last level spte pointer of the shadow page walk for the given
* gpa, and sets *spte to the spte value. This spte may be non-preset. If no

View File

@ -831,6 +831,14 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
return tdx_vcpu_ioctl(vcpu, argp);
}
static int vt_vcpu_mem_enc_unlocked_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
{
if (!is_td_vcpu(vcpu))
return -EINVAL;
return tdx_vcpu_unlocked_ioctl(vcpu, argp);
}
static int vt_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
bool is_private)
{
@ -1005,6 +1013,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.mem_enc_ioctl = vt_op_tdx_only(mem_enc_ioctl),
.vcpu_mem_enc_ioctl = vt_op_tdx_only(vcpu_mem_enc_ioctl),
.vcpu_mem_enc_unlocked_ioctl = vt_op_tdx_only(vcpu_mem_enc_unlocked_ioctl),
.gmem_max_mapping_level = vt_op_tdx_only(gmem_max_mapping_level)
};

File diff suppressed because it is too large Load Diff

View File

@ -36,8 +36,12 @@ struct kvm_tdx {
struct tdx_td td;
/* For KVM_TDX_INIT_MEM_REGION. */
atomic64_t nr_premapped;
/*
* Scratch pointer used to pass the source page to tdx_mem_page_add().
* Protected by slots_lock, and non-NULL only when mapping a private
* pfn via tdx_gmem_post_populate().
*/
struct page *page_add_src;
/*
* Prevent vCPUs from TD entry to ensure SEPT zap related SEAMCALLs do

View File

@ -149,6 +149,7 @@ int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
int tdx_vcpu_unlocked_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
void tdx_flush_tlb_current(struct kvm_vcpu *vcpu);
void tdx_flush_tlb_all(struct kvm_vcpu *vcpu);

View File

@ -7200,6 +7200,19 @@ static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
return 0;
}
long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
if (ioctl == KVM_MEMORY_ENCRYPT_OP &&
kvm_x86_ops.vcpu_mem_enc_unlocked_ioctl)
return kvm_x86_call(vcpu_mem_enc_unlocked_ioctl)(vcpu, argp);
return -ENOIOCTLCMD;
}
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;

View File

@ -1557,6 +1557,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
long kvm_arch_vcpu_unlocked_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
@ -2437,18 +2439,6 @@ static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_NO_POLL */
#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
#else
static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl,
unsigned long arg)
{
return -ENOIOCTLCMD;
}
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE

View File

@ -78,9 +78,6 @@ config HAVE_KVM_IRQ_BYPASS
tristate
select IRQ_BYPASS_MANAGER
config HAVE_KVM_VCPU_ASYNC_IOCTL
bool
config HAVE_KVM_VCPU_RUN_PID_CHANGE
bool

View File

@ -4434,10 +4434,10 @@ static long kvm_vcpu_ioctl(struct file *filp,
return r;
/*
* Some architectures have vcpu ioctls that are asynchronous to vcpu
* execution; mutex_lock() would break them.
* Let arch code handle select vCPU ioctls without holding vcpu->mutex,
* e.g. to support ioctls that can run asynchronous to vCPU execution.
*/
r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
r = kvm_arch_vcpu_unlocked_ioctl(filp, ioctl, arg);
if (r != -ENOIOCTLCMD)
return r;