mirror of https://github.com/torvalds/linux.git
irqchip/gic-v3: Switch high priority PPIs over to handle_percpu_devid_irq()
It so appears that handle_percpu_devid_irq() is extremely similar to handle_percpu_devid_fasteoi_nmi(), and that the differences do no justify the horrid machinery in the GICv3 driver to handle the flow handler switch. Stick with the standard flow handler, even for NMIs. Suggested-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Will Deacon <will@kernel.org> Link: https://patch.msgid.link/20251020122944.3074811-11-maz@kernel.org
This commit is contained in:
parent
f6c8aced7c
commit
21bbbc50f3
|
|
@ -230,9 +230,6 @@ static void __init gic_prio_init(void)
|
|||
!cpus_have_group0);
|
||||
}
|
||||
|
||||
/* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */
|
||||
static refcount_t *rdist_nmi_refs;
|
||||
|
||||
static struct gic_kvm_info gic_v3_kvm_info __initdata;
|
||||
static DEFINE_PER_CPU(bool, has_rss);
|
||||
|
||||
|
|
@ -608,24 +605,6 @@ static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
|
|||
}
|
||||
}
|
||||
|
||||
static u32 __gic_get_rdist_index(irq_hw_number_t hwirq)
|
||||
{
|
||||
switch (__get_intid_range(hwirq)) {
|
||||
case SGI_RANGE:
|
||||
case PPI_RANGE:
|
||||
return hwirq;
|
||||
case EPPI_RANGE:
|
||||
return hwirq - EPPI_BASE_INTID + 32;
|
||||
default:
|
||||
unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
static u32 gic_get_rdist_index(struct irq_data *d)
|
||||
{
|
||||
return __gic_get_rdist_index(d->hwirq);
|
||||
}
|
||||
|
||||
static int gic_irq_nmi_setup(struct irq_data *d)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(d->irq);
|
||||
|
|
@ -646,20 +625,8 @@ static int gic_irq_nmi_setup(struct irq_data *d)
|
|||
return -EINVAL;
|
||||
|
||||
/* desc lock should already be held */
|
||||
if (gic_irq_in_rdist(d)) {
|
||||
u32 idx = gic_get_rdist_index(d);
|
||||
|
||||
/*
|
||||
* Setting up a percpu interrupt as NMI, only switch handler
|
||||
* for first NMI
|
||||
*/
|
||||
if (!refcount_inc_not_zero(&rdist_nmi_refs[idx])) {
|
||||
refcount_set(&rdist_nmi_refs[idx], 1);
|
||||
desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
|
||||
}
|
||||
} else {
|
||||
if (!gic_irq_in_rdist(d))
|
||||
desc->handle_irq = handle_fasteoi_nmi;
|
||||
}
|
||||
|
||||
gic_irq_set_prio(d, dist_prio_nmi);
|
||||
|
||||
|
|
@ -686,15 +653,8 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
|
|||
return;
|
||||
|
||||
/* desc lock should already be held */
|
||||
if (gic_irq_in_rdist(d)) {
|
||||
u32 idx = gic_get_rdist_index(d);
|
||||
|
||||
/* Tearing down NMI, only switch handler for last NMI */
|
||||
if (refcount_dec_and_test(&rdist_nmi_refs[idx]))
|
||||
desc->handle_irq = handle_percpu_devid_irq;
|
||||
} else {
|
||||
if (!gic_irq_in_rdist(d))
|
||||
desc->handle_irq = handle_fasteoi_irq;
|
||||
}
|
||||
|
||||
gic_irq_set_prio(d, dist_prio_irq);
|
||||
}
|
||||
|
|
@ -2080,19 +2040,9 @@ static const struct gic_quirk gic_quirks[] = {
|
|||
|
||||
static void gic_enable_nmi_support(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!gic_prio_masking_enabled() || nmi_support_forbidden)
|
||||
return;
|
||||
|
||||
rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
|
||||
sizeof(*rdist_nmi_refs), GFP_KERNEL);
|
||||
if (!rdist_nmi_refs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < gic_data.ppi_nr + SGI_NR; i++)
|
||||
refcount_set(&rdist_nmi_refs[i], 0);
|
||||
|
||||
pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
|
||||
gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue