KVM: arm64: Spin off release helper from vgic_put_irq()

Spin off the release implementation from vgic_put_irq() to prepare for a
more involved fix for lock ordering such that it may be unnested from
raw spinlocks. This has the minor functional change of doing call_rcu()
behind the xa_lock although it shouldn't be consequential.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250905100531.282980-4-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
Oliver Upton 2025-09-05 03:05:28 -07:00
parent 3a08a6ca7c
commit 0a4aedf2bd
1 changed files with 17 additions and 7 deletions

View File

@ -114,22 +114,32 @@ struct vgic_irq *vgic_get_vcpu_irq(struct kvm_vcpu *vcpu, u32 intid)
return vgic_get_irq(vcpu->kvm, intid);
}
static void vgic_release_lpi_locked(struct vgic_dist *dist, struct vgic_irq *irq)
{
lockdep_assert_held(&dist->lpi_xa.xa_lock);
__xa_erase(&dist->lpi_xa, irq->intid);
kfree_rcu(irq, rcu);
}
static __must_check bool __vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
if (irq->intid < VGIC_MIN_LPI)
return false;
return refcount_dec_and_test(&irq->refcount);
}
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long flags;
if (irq->intid < VGIC_MIN_LPI)
return;
if (!refcount_dec_and_test(&irq->refcount))
if (!__vgic_put_irq(kvm, irq))
return;
xa_lock_irqsave(&dist->lpi_xa, flags);
__xa_erase(&dist->lpi_xa, irq->intid);
vgic_release_lpi_locked(dist, irq);
xa_unlock_irqrestore(&dist->lpi_xa, flags);
kfree_rcu(irq, rcu);
}
void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)