mirror of https://github.com/torvalds/linux.git
timers/migration: Rename 'online' bit to 'available'
The timer migration hierarchy excludes offline CPUs via the tmigr_is_not_available function, which is essentially checking the online bit for the CPU. Rename the online bit to available and all references in function names and tracepoint to generalise the concept of available CPUs. Signed-off-by: Gabriele Monaco <gmonaco@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Link: https://patch.msgid.link/20251120145653.296659-2-gmonaco@redhat.com
This commit is contained in:
parent
308bc2e338
commit
8312cab5ff
|
|
@ -173,14 +173,14 @@ DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_active,
|
||||||
TP_ARGS(tmc)
|
TP_ARGS(tmc)
|
||||||
);
|
);
|
||||||
|
|
||||||
DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_online,
|
DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_available,
|
||||||
|
|
||||||
TP_PROTO(struct tmigr_cpu *tmc),
|
TP_PROTO(struct tmigr_cpu *tmc),
|
||||||
|
|
||||||
TP_ARGS(tmc)
|
TP_ARGS(tmc)
|
||||||
);
|
);
|
||||||
|
|
||||||
DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_offline,
|
DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_unavailable,
|
||||||
|
|
||||||
TP_PROTO(struct tmigr_cpu *tmc),
|
TP_PROTO(struct tmigr_cpu *tmc),
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -429,7 +429,7 @@ static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
|
||||||
|
|
||||||
static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
|
static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
|
||||||
{
|
{
|
||||||
return !(tmc->tmgroup && tmc->online);
|
return !(tmc->tmgroup && tmc->available);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -926,7 +926,7 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
|
||||||
* updated the event takes care when hierarchy is completely
|
* updated the event takes care when hierarchy is completely
|
||||||
* idle. Otherwise the migrator does it as the event is enqueued.
|
* idle. Otherwise the migrator does it as the event is enqueued.
|
||||||
*/
|
*/
|
||||||
if (!tmc->online || tmc->remote || tmc->cpuevt.ignore ||
|
if (!tmc->available || tmc->remote || tmc->cpuevt.ignore ||
|
||||||
now < tmc->cpuevt.nextevt.expires) {
|
now < tmc->cpuevt.nextevt.expires) {
|
||||||
raw_spin_unlock_irq(&tmc->lock);
|
raw_spin_unlock_irq(&tmc->lock);
|
||||||
return;
|
return;
|
||||||
|
|
@ -973,7 +973,7 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
|
||||||
* (See also section "Required event and timerqueue update after a
|
* (See also section "Required event and timerqueue update after a
|
||||||
* remote expiry" in the documentation at the top)
|
* remote expiry" in the documentation at the top)
|
||||||
*/
|
*/
|
||||||
if (!tmc->online || !tmc->idle) {
|
if (!tmc->available || !tmc->idle) {
|
||||||
timer_unlock_remote_bases(cpu);
|
timer_unlock_remote_bases(cpu);
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
@ -1422,19 +1422,19 @@ static long tmigr_trigger_active(void *unused)
|
||||||
{
|
{
|
||||||
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
|
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
|
||||||
|
|
||||||
WARN_ON_ONCE(!tmc->online || tmc->idle);
|
WARN_ON_ONCE(!tmc->available || tmc->idle);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tmigr_cpu_offline(unsigned int cpu)
|
static int tmigr_clear_cpu_available(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
|
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
|
||||||
int migrator;
|
int migrator;
|
||||||
u64 firstexp;
|
u64 firstexp;
|
||||||
|
|
||||||
raw_spin_lock_irq(&tmc->lock);
|
raw_spin_lock_irq(&tmc->lock);
|
||||||
tmc->online = false;
|
tmc->available = false;
|
||||||
WRITE_ONCE(tmc->wakeup, KTIME_MAX);
|
WRITE_ONCE(tmc->wakeup, KTIME_MAX);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -1442,7 +1442,7 @@ static int tmigr_cpu_offline(unsigned int cpu)
|
||||||
* offline; Therefore nextevt value is set to KTIME_MAX
|
* offline; Therefore nextevt value is set to KTIME_MAX
|
||||||
*/
|
*/
|
||||||
firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
|
firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
|
||||||
trace_tmigr_cpu_offline(tmc);
|
trace_tmigr_cpu_unavailable(tmc);
|
||||||
raw_spin_unlock_irq(&tmc->lock);
|
raw_spin_unlock_irq(&tmc->lock);
|
||||||
|
|
||||||
if (firstexp != KTIME_MAX) {
|
if (firstexp != KTIME_MAX) {
|
||||||
|
|
@ -1453,7 +1453,7 @@ static int tmigr_cpu_offline(unsigned int cpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tmigr_cpu_online(unsigned int cpu)
|
static int tmigr_set_cpu_available(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
|
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
|
||||||
|
|
||||||
|
|
@ -1462,11 +1462,11 @@ static int tmigr_cpu_online(unsigned int cpu)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
raw_spin_lock_irq(&tmc->lock);
|
raw_spin_lock_irq(&tmc->lock);
|
||||||
trace_tmigr_cpu_online(tmc);
|
trace_tmigr_cpu_available(tmc);
|
||||||
tmc->idle = timer_base_is_idle();
|
tmc->idle = timer_base_is_idle();
|
||||||
if (!tmc->idle)
|
if (!tmc->idle)
|
||||||
__tmigr_cpu_activate(tmc);
|
__tmigr_cpu_activate(tmc);
|
||||||
tmc->online = true;
|
tmc->available = true;
|
||||||
raw_spin_unlock_irq(&tmc->lock);
|
raw_spin_unlock_irq(&tmc->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
@ -1758,7 +1758,7 @@ static int tmigr_add_cpu(unsigned int cpu)
|
||||||
* The (likely) current CPU is expected to be online in the hierarchy,
|
* The (likely) current CPU is expected to be online in the hierarchy,
|
||||||
* otherwise the old root may not be active as expected.
|
* otherwise the old root may not be active as expected.
|
||||||
*/
|
*/
|
||||||
WARN_ON_ONCE(!per_cpu_ptr(&tmigr_cpu, raw_smp_processor_id())->online);
|
WARN_ON_ONCE(!per_cpu_ptr(&tmigr_cpu, raw_smp_processor_id())->available);
|
||||||
ret = tmigr_setup_groups(-1, old_root->numa_node, old_root, true);
|
ret = tmigr_setup_groups(-1, old_root->numa_node, old_root, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1854,7 +1854,7 @@ static int __init tmigr_init(void)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
|
ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
|
||||||
tmigr_cpu_online, tmigr_cpu_offline);
|
tmigr_set_cpu_available, tmigr_clear_cpu_available);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -97,7 +97,7 @@ struct tmigr_group {
|
||||||
*/
|
*/
|
||||||
struct tmigr_cpu {
|
struct tmigr_cpu {
|
||||||
raw_spinlock_t lock;
|
raw_spinlock_t lock;
|
||||||
bool online;
|
bool available;
|
||||||
bool idle;
|
bool idle;
|
||||||
bool remote;
|
bool remote;
|
||||||
struct tmigr_group *tmgroup;
|
struct tmigr_group *tmgroup;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue