sched: Rename do_set_cpus_allowed()

Hopefully saner naming.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Juri Lelli <juri.lelli@redhat.com>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
This commit is contained in:
Peter Zijlstra 2025-09-10 10:08:05 +02:00
parent abfc01077d
commit b079d93796
5 changed files with 14 additions and 14 deletions

View File

@ -1861,8 +1861,8 @@ extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw); extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw); extern void dl_bw_free(int cpu, u64 dl_bw);
/* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */ /* set_cpus_allowed_force() - consider using set_cpus_allowed_ptr() instead */
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); extern void set_cpus_allowed_force(struct task_struct *p, const struct cpumask *new_mask);
/** /**
* set_cpus_allowed_ptr - set CPU affinity mask of a task * set_cpus_allowed_ptr - set CPU affinity mask of a task

View File

@ -4180,7 +4180,7 @@ bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
rcu_read_lock(); rcu_read_lock();
cs_mask = task_cs(tsk)->cpus_allowed; cs_mask = task_cs(tsk)->cpus_allowed;
if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) { if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
do_set_cpus_allowed(tsk, cs_mask); set_cpus_allowed_force(tsk, cs_mask);
changed = true; changed = true;
} }
rcu_read_unlock(); rcu_read_unlock();

View File

@ -599,7 +599,7 @@ static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mas
} }
scoped_guard (raw_spinlock_irqsave, &p->pi_lock) scoped_guard (raw_spinlock_irqsave, &p->pi_lock)
do_set_cpus_allowed(p, mask); set_cpus_allowed_force(p, mask);
/* It's safe because the task is inactive. */ /* It's safe because the task is inactive. */
p->flags |= PF_NO_SETAFFINITY; p->flags |= PF_NO_SETAFFINITY;
@ -880,7 +880,7 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
kthread_fetch_affinity(kthread, affinity); kthread_fetch_affinity(kthread, affinity);
scoped_guard (raw_spinlock_irqsave, &p->pi_lock) scoped_guard (raw_spinlock_irqsave, &p->pi_lock)
do_set_cpus_allowed(p, affinity); set_cpus_allowed_force(p, affinity);
mutex_unlock(&kthreads_hotplug_lock); mutex_unlock(&kthreads_hotplug_lock);
out: out:

View File

@ -2331,7 +2331,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
} }
static void static void
__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx); do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
static void migrate_disable_switch(struct rq *rq, struct task_struct *p) static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
{ {
@ -2348,7 +2348,7 @@ static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
scoped_guard (task_rq_lock, p) { scoped_guard (task_rq_lock, p) {
update_rq_clock(scope.rq); update_rq_clock(scope.rq);
__do_set_cpus_allowed(p, &ac); do_set_cpus_allowed(p, &ac);
} }
} }
@ -2662,7 +2662,7 @@ void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx
} }
static void static void
__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
{ {
struct rq *rq = task_rq(p); struct rq *rq = task_rq(p);
bool queued, running; bool queued, running;
@ -2692,7 +2692,7 @@ __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
* Used for kthread_bind() and select_fallback_rq(), in both cases the user * Used for kthread_bind() and select_fallback_rq(), in both cases the user
* affinity (if any) should be destroyed too. * affinity (if any) should be destroyed too.
*/ */
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) void set_cpus_allowed_force(struct task_struct *p, const struct cpumask *new_mask)
{ {
struct affinity_context ac = { struct affinity_context ac = {
.new_mask = new_mask, .new_mask = new_mask,
@ -2706,7 +2706,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
scoped_guard (__task_rq_lock, p) { scoped_guard (__task_rq_lock, p) {
update_rq_clock(scope.rq); update_rq_clock(scope.rq);
__do_set_cpus_allowed(p, &ac); do_set_cpus_allowed(p, &ac);
} }
/* /*
@ -2745,7 +2745,7 @@ int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
* Use pi_lock to protect content of user_cpus_ptr * Use pi_lock to protect content of user_cpus_ptr
* *
* Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
* do_set_cpus_allowed(). * set_cpus_allowed_force().
*/ */
raw_spin_lock_irqsave(&src->pi_lock, flags); raw_spin_lock_irqsave(&src->pi_lock, flags);
if (src->user_cpus_ptr) { if (src->user_cpus_ptr) {
@ -3073,7 +3073,7 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
goto out; goto out;
} }
__do_set_cpus_allowed(p, ctx); do_set_cpus_allowed(p, ctx);
return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
@ -3482,7 +3482,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
} }
fallthrough; fallthrough;
case possible: case possible:
do_set_cpus_allowed(p, task_cpu_fallback_mask(p)); set_cpus_allowed_force(p, task_cpu_fallback_mask(p));
state = fail; state = fail;
break; break;
case fail: case fail:

View File

@ -2617,7 +2617,7 @@ static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu)
static inline cpumask_t *alloc_user_cpus_ptr(int node) static inline cpumask_t *alloc_user_cpus_ptr(int node)
{ {
/* /*
* See do_set_cpus_allowed() above for the rcu_head usage. * See set_cpus_allowed_force() above for the rcu_head usage.
*/ */
int size = max_t(int, cpumask_size(), sizeof(struct rcu_head)); int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));