FROMGIT: genirq: Retain depth for managed IRQs across CPU hotplug
Affinity-managed IRQs may be shut down and restarted during CPU hotunplug/plug, and the IRQ may be left in an unexpected state. Specifically: 1. IRQ affines to CPU N 2. disable_irq() -> depth is 1 3. CPU N goes offline 4. irq_shutdown() -> depth is set to 1 (again) 5. CPU N goes online 6. irq_startup() -> depth is set to 0 (BUG! client expected IRQ is still disabled) 7. enable_irq() -> depth underflow / unbalanced enable_irq() WARN It seems depth only needs preserved for managed IRQs + CPU hotplug, so per Thomas's recommendation, we make that explicit. I add kunit tests that cover some of this in a following patch. Signed-off-by: Brian Norris <briannorris@chromium.org> Co-developed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/all/20250514201353.3481400-2-briannorris@chromium.org Bug: 417330473 Change-Id: I9e19d182b0c16f353aa9068cac4eff8a64ffb21b (cherry picked from commit 788019eb559fd0b365f501467ceafce540e377cc git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git) Signed-off-by: Bart Van Assche <bvanassche@google.com>
This commit is contained in:
committed by
Treehugger Robot
parent
f57486f69d
commit
1d830abe10
@@ -224,6 +224,19 @@ __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
|
||||
return IRQ_STARTUP_ABORT;
|
||||
return IRQ_STARTUP_MANAGED;
|
||||
}
|
||||
|
||||
void irq_startup_managed(struct irq_desc *desc)
|
||||
{
|
||||
/*
|
||||
* Only start it up when the disable depth is 1, so that a disable,
|
||||
* hotunplug, hotplug sequence does not end up enabling it during
|
||||
* hotplug unconditionally.
|
||||
*/
|
||||
desc->depth--;
|
||||
if (!desc->depth)
|
||||
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
|
||||
}
|
||||
|
||||
#else
|
||||
static __always_inline int
|
||||
__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
|
||||
@@ -276,6 +289,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
|
||||
ret = __irq_startup(desc);
|
||||
break;
|
||||
case IRQ_STARTUP_ABORT:
|
||||
desc->depth = 1;
|
||||
irqd_set_managed_shutdown(d);
|
||||
return 0;
|
||||
}
|
||||
@@ -308,7 +322,13 @@ void irq_shutdown(struct irq_desc *desc)
|
||||
{
|
||||
if (irqd_is_started(&desc->irq_data)) {
|
||||
clear_irq_resend(desc);
|
||||
desc->depth = 1;
|
||||
/*
|
||||
* Increment disable depth, so that a managed shutdown on
|
||||
* CPU hotunplug preserves the actual disabled state when the
|
||||
* CPU comes back online. See irq_startup_managed().
|
||||
*/
|
||||
desc->depth++;
|
||||
|
||||
if (desc->irq_data.chip->irq_shutdown) {
|
||||
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
||||
irq_state_set_disabled(desc);
|
||||
|
@@ -219,7 +219,7 @@ static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
|
||||
return;
|
||||
|
||||
if (irqd_is_managed_and_shutdown(data))
|
||||
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
|
||||
irq_startup_managed(desc);
|
||||
|
||||
/*
|
||||
* If the interrupt can only be directed to a single target
|
||||
|
@@ -87,6 +87,7 @@ extern void __enable_irq(struct irq_desc *desc);
|
||||
extern int irq_activate(struct irq_desc *desc);
|
||||
extern int irq_activate_and_startup(struct irq_desc *desc, bool resend);
|
||||
extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
|
||||
extern void irq_startup_managed(struct irq_desc *desc);
|
||||
|
||||
extern void irq_shutdown(struct irq_desc *desc);
|
||||
extern void irq_shutdown_and_deactivate(struct irq_desc *desc);
|
||||
|
Reference in New Issue
Block a user