Merge branch 'rework/suspend-fixes' into for-linus

This commit is contained in:
Petr Mladek 2025-12-01 14:16:28 +01:00
commit 3a9a3f5fb2
3 changed files with 79 additions and 21 deletions

View File

@ -185,6 +185,8 @@ struct console_flush_type {
bool legacy_offload;
};
extern bool console_irqwork_blocked;
/*
* Identify which console flushing methods should be used in the context of
* the caller.
@ -196,7 +198,7 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft)
switch (nbcon_get_default_prio()) {
case NBCON_PRIO_NORMAL:
if (have_nbcon_console && !have_boot_console) {
if (printk_kthreads_running)
if (printk_kthreads_running && !console_irqwork_blocked)
ft->nbcon_offload = true;
else
ft->nbcon_atomic = true;
@ -206,7 +208,7 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft)
if (have_legacy_console || have_boot_console) {
if (!is_printk_legacy_deferred())
ft->legacy_direct = true;
else
else if (!console_irqwork_blocked)
ft->legacy_offload = true;
}
break;
@ -219,7 +221,7 @@ static inline void printk_get_console_flush_type(struct console_flush_type *ft)
if (have_legacy_console || have_boot_console) {
if (!is_printk_legacy_deferred())
ft->legacy_direct = true;
else
else if (!console_irqwork_blocked)
ft->legacy_offload = true;
}
break;

View File

@ -1302,6 +1302,13 @@ void nbcon_kthreads_wake(void)
if (!printk_kthreads_running)
return;
/*
* It is not allowed to call this function when console irq_work
* is blocked.
*/
if (WARN_ON_ONCE(console_irqwork_blocked))
return;
cookie = console_srcu_read_lock();
for_each_console_srcu(con) {
if (!(console_srcu_read_flags(con) & CON_NBCON))
@ -1892,7 +1899,7 @@ void nbcon_device_release(struct console *con)
if (console_trylock())
console_unlock();
} else if (ft.legacy_offload) {
printk_trigger_flush();
defer_console_output();
}
}
console_srcu_read_unlock(cookie);

View File

@ -462,6 +462,9 @@ bool have_boot_console;
/* See printk_legacy_allow_panic_sync() for details. */
bool legacy_allow_panic_sync;
/* Avoid using irq_work when suspending. */
bool console_irqwork_blocked;
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
static DECLARE_WAIT_QUEUE_HEAD(legacy_wait);
@ -2390,7 +2393,7 @@ asmlinkage int vprintk_emit(int facility, int level,
/* If called from the scheduler, we can not call up(). */
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
ft.legacy_offload |= ft.legacy_direct;
ft.legacy_offload |= ft.legacy_direct && !console_irqwork_blocked;
ft.legacy_direct = false;
}
@ -2426,7 +2429,7 @@ asmlinkage int vprintk_emit(int facility, int level,
if (ft.legacy_offload)
defer_console_output();
else
else if (!console_irqwork_blocked)
wake_up_klogd();
return printed_len;
@ -2730,10 +2733,20 @@ void console_suspend_all(void)
{
struct console *con;
if (console_suspend_enabled)
pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
/*
* Flush any console backlog and then avoid queueing irq_work until
* console_resume_all(). Until then deferred printing is no longer
* triggered, NBCON consoles transition to atomic flushing, and
* any klogd waiters are not triggered.
*/
pr_flush(1000, true);
console_irqwork_blocked = true;
if (!console_suspend_enabled)
return;
pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
pr_flush(1000, true);
console_list_lock();
for_each_console(con)
@ -2754,26 +2767,34 @@ void console_resume_all(void)
struct console_flush_type ft;
struct console *con;
if (!console_suspend_enabled)
return;
console_list_lock();
for_each_console(con)
console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
console_list_unlock();
/*
* Ensure that all SRCU list walks have completed. All printing
* contexts must be able to see they are no longer suspended so
* that they are guaranteed to wake up and resume printing.
* Allow queueing irq_work. After restoring console state, deferred
* printing and any klogd waiters need to be triggered in case there
* is now a console backlog.
*/
synchronize_srcu(&console_srcu);
console_irqwork_blocked = false;
if (console_suspend_enabled) {
console_list_lock();
for_each_console(con)
console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
console_list_unlock();
/*
* Ensure that all SRCU list walks have completed. All printing
* contexts must be able to see they are no longer suspended so
* that they are guaranteed to wake up and resume printing.
*/
synchronize_srcu(&console_srcu);
}
printk_get_console_flush_type(&ft);
if (ft.nbcon_offload)
nbcon_kthreads_wake();
if (ft.legacy_offload)
defer_console_output();
else
wake_up_klogd();
pr_flush(1000, true);
}
@ -4559,6 +4580,13 @@ static void __wake_up_klogd(int val)
if (!printk_percpu_data_ready())
return;
/*
* It is not allowed to call this function when console irq_work
* is blocked.
*/
if (WARN_ON_ONCE(console_irqwork_blocked))
return;
preempt_disable();
/*
* Guarantee any new records can be seen by tasks preparing to wait
@ -4615,9 +4643,30 @@ void defer_console_output(void)
__wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
}
/**
* printk_trigger_flush - Attempt to flush printk buffer to consoles.
*
* If possible, flush the printk buffer to all consoles in the caller's
* context. If offloading is available, trigger deferred printing.
*
* This is best effort. Depending on the system state, console states,
* and caller context, no actual flushing may result from this call.
*/
void printk_trigger_flush(void)
{
defer_console_output();
struct console_flush_type ft;
printk_get_console_flush_type(&ft);
if (ft.nbcon_atomic)
nbcon_atomic_flush_pending();
if (ft.nbcon_offload)
nbcon_kthreads_wake();
if (ft.legacy_direct) {
if (console_trylock())
console_unlock();
}
if (ft.legacy_offload)
defer_console_output();
}
int vprintk_deferred(const char *fmt, va_list args)