mirror of https://github.com/torvalds/linux.git
sched_ext: idle: Handle migration-disabled tasks in BPF code
When scx_bpf_select_cpu_dfl()/and() kfuncs are invoked outside of
ops.select_cpu() we can't rely on @p->migration_disabled to determine if
migration is disabled for the task @p.
In fact, migration is always disabled for the current task while running
BPF code: __bpf_prog_enter() disables migration and __bpf_prog_exit()
re-enables it.
To handle this, when @p->migration_disabled == 1, check whether @p is
the current task. If so, migration was not disabled before entering the
callback, otherwise migration was disabled.
This ensures correct idle CPU selection in all cases. The behavior of
ops.select_cpu() remains unchanged, because this callback is never
invoked for the current task and migration-disabled tasks are always
excluded.
Example: without this change scx_bpf_select_cpu_and() called from
ops.enqueue() always returns -EBUSY; with this change applied, it
correctly returns idle CPUs.
Fixes: 06efc9fe0b ("sched_ext: idle: Handle migration-disabled tasks in idle selection")
Cc: stable@vger.kernel.org # v6.16+
Signed-off-by: Andrea Righi <arighi@nvidia.com>
Acked-by: Changwoo Min <changwoo@igalia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
a1eab4d813
commit
55ed11b181
|
|
@ -856,6 +856,32 @@ static bool check_builtin_idle_enabled(void)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Determine whether @p is a migration-disabled task in the context of BPF
|
||||||
|
* code.
|
||||||
|
*
|
||||||
|
* We can't simply check whether @p->migration_disabled is set in a
|
||||||
|
* sched_ext callback, because migration is always disabled for the current
|
||||||
|
* task while running BPF code.
|
||||||
|
*
|
||||||
|
* The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) respectively
|
||||||
|
* disable and re-enable migration. For this reason, the current task
|
||||||
|
* inside a sched_ext callback is always a migration-disabled task.
|
||||||
|
*
|
||||||
|
* Therefore, when @p->migration_disabled == 1, check whether @p is the
|
||||||
|
* current task or not: if it is, then migration was not disabled before
|
||||||
|
* entering the callback, otherwise migration was disabled.
|
||||||
|
*
|
||||||
|
* Returns true if @p is migration-disabled, false otherwise.
|
||||||
|
*/
|
||||||
|
static bool is_bpf_migration_disabled(const struct task_struct *p)
|
||||||
|
{
|
||||||
|
if (p->migration_disabled == 1)
|
||||||
|
return p != current;
|
||||||
|
else
|
||||||
|
return p->migration_disabled;
|
||||||
|
}
|
||||||
|
|
||||||
static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
|
static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
|
||||||
const struct cpumask *allowed, u64 flags)
|
const struct cpumask *allowed, u64 flags)
|
||||||
{
|
{
|
||||||
|
|
@ -898,7 +924,7 @@ static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_f
|
||||||
* selection optimizations and simply check whether the previously
|
* selection optimizations and simply check whether the previously
|
||||||
* used CPU is idle and within the allowed cpumask.
|
* used CPU is idle and within the allowed cpumask.
|
||||||
*/
|
*/
|
||||||
if (p->nr_cpus_allowed == 1 || is_migration_disabled(p)) {
|
if (p->nr_cpus_allowed == 1 || is_bpf_migration_disabled(p)) {
|
||||||
if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
|
if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
|
||||||
scx_idle_test_and_clear_cpu(prev_cpu))
|
scx_idle_test_and_clear_cpu(prev_cpu))
|
||||||
cpu = prev_cpu;
|
cpu = prev_cpu;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue