mirror of https://github.com/torvalds/linux.git
sched: Cleanup the sched_change NOCLOCK usage
Teach the sched_change pattern how to do update_rq_clock(); this allows for some simplifications / cleanups. Suggested-by: K Prateek Nayak <kprateek.nayak@amd.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Juri Lelli <juri.lelli@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
This commit is contained in:
parent
5892cbd85d
commit
d4c64207b8
|
|
@ -2346,10 +2346,8 @@ static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
|
||||||
if (p->cpus_ptr != &p->cpus_mask)
|
if (p->cpus_ptr != &p->cpus_mask)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
scoped_guard (task_rq_lock, p) {
|
scoped_guard (task_rq_lock, p)
|
||||||
update_rq_clock(scope.rq);
|
|
||||||
do_set_cpus_allowed(p, &ac);
|
do_set_cpus_allowed(p, &ac);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ___migrate_enable(void)
|
void ___migrate_enable(void)
|
||||||
|
|
@ -2666,9 +2664,7 @@ void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx
|
||||||
static void
|
static void
|
||||||
do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
|
do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
|
||||||
{
|
{
|
||||||
u32 flags = DEQUEUE_SAVE | DEQUEUE_NOCLOCK;
|
scoped_guard (sched_change, p, DEQUEUE_SAVE) {
|
||||||
|
|
||||||
scoped_guard (sched_change, p, flags) {
|
|
||||||
p->sched_class->set_cpus_allowed(p, ctx);
|
p->sched_class->set_cpus_allowed(p, ctx);
|
||||||
mm_set_cpus_allowed(p->mm, ctx->new_mask);
|
mm_set_cpus_allowed(p->mm, ctx->new_mask);
|
||||||
}
|
}
|
||||||
|
|
@ -2690,10 +2686,8 @@ void set_cpus_allowed_force(struct task_struct *p, const struct cpumask *new_mas
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
scoped_guard (__task_rq_lock, p) {
|
scoped_guard (__task_rq_lock, p)
|
||||||
update_rq_clock(scope.rq);
|
|
||||||
do_set_cpus_allowed(p, &ac);
|
do_set_cpus_allowed(p, &ac);
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Because this is called with p->pi_lock held, it is not possible
|
* Because this is called with p->pi_lock held, it is not possible
|
||||||
|
|
@ -9108,16 +9102,13 @@ static void sched_change_group(struct task_struct *tsk)
|
||||||
*/
|
*/
|
||||||
void sched_move_task(struct task_struct *tsk, bool for_autogroup)
|
void sched_move_task(struct task_struct *tsk, bool for_autogroup)
|
||||||
{
|
{
|
||||||
unsigned int queue_flags =
|
unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
|
||||||
DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
|
|
||||||
bool resched = false;
|
bool resched = false;
|
||||||
struct rq *rq;
|
struct rq *rq;
|
||||||
|
|
||||||
CLASS(task_rq_lock, rq_guard)(tsk);
|
CLASS(task_rq_lock, rq_guard)(tsk);
|
||||||
rq = rq_guard.rq;
|
rq = rq_guard.rq;
|
||||||
|
|
||||||
update_rq_clock(rq);
|
|
||||||
|
|
||||||
scoped_guard (sched_change, tsk, queue_flags) {
|
scoped_guard (sched_change, tsk, queue_flags) {
|
||||||
sched_change_group(tsk);
|
sched_change_group(tsk);
|
||||||
if (!for_autogroup)
|
if (!for_autogroup)
|
||||||
|
|
@ -10792,16 +10783,14 @@ struct sched_change_ctx *sched_change_begin(struct task_struct *p, unsigned int
|
||||||
|
|
||||||
lockdep_assert_rq_held(rq);
|
lockdep_assert_rq_held(rq);
|
||||||
|
|
||||||
|
if (!(flags & DEQUEUE_NOCLOCK)) {
|
||||||
|
update_rq_clock(rq);
|
||||||
|
flags |= DEQUEUE_NOCLOCK;
|
||||||
|
}
|
||||||
|
|
||||||
if (flags & DEQUEUE_CLASS) {
|
if (flags & DEQUEUE_CLASS) {
|
||||||
if (p->sched_class->switching_from) {
|
if (p->sched_class->switching_from)
|
||||||
/*
|
|
||||||
* switching_from_fair() assumes CLASS implies NOCLOCK;
|
|
||||||
* fixing this assumption would mean switching_from()
|
|
||||||
* would need to be able to change flags.
|
|
||||||
*/
|
|
||||||
WARN_ON(!(flags & DEQUEUE_NOCLOCK));
|
|
||||||
p->sched_class->switching_from(rq, p);
|
p->sched_class->switching_from(rq, p);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
*ctx = (struct sched_change_ctx){
|
*ctx = (struct sched_change_ctx){
|
||||||
|
|
@ -10840,7 +10829,7 @@ void sched_change_end(struct sched_change_ctx *ctx)
|
||||||
p->sched_class->switching_to(rq, p);
|
p->sched_class->switching_to(rq, p);
|
||||||
|
|
||||||
if (ctx->queued)
|
if (ctx->queued)
|
||||||
enqueue_task(rq, p, ctx->flags | ENQUEUE_NOCLOCK);
|
enqueue_task(rq, p, ctx->flags);
|
||||||
if (ctx->running)
|
if (ctx->running)
|
||||||
set_next_task(rq, p);
|
set_next_task(rq, p);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4654,7 +4654,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
|
||||||
percpu_down_write(&scx_fork_rwsem);
|
percpu_down_write(&scx_fork_rwsem);
|
||||||
scx_task_iter_start(&sti);
|
scx_task_iter_start(&sti);
|
||||||
while ((p = scx_task_iter_next_locked(&sti))) {
|
while ((p = scx_task_iter_next_locked(&sti))) {
|
||||||
unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
|
unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
|
||||||
const struct sched_class *old_class = p->sched_class;
|
const struct sched_class *old_class = p->sched_class;
|
||||||
const struct sched_class *new_class =
|
const struct sched_class *new_class =
|
||||||
__setscheduler_class(p->policy, p->prio);
|
__setscheduler_class(p->policy, p->prio);
|
||||||
|
|
@ -4662,8 +4662,6 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
|
||||||
if (!tryget_task_struct(p))
|
if (!tryget_task_struct(p))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
update_rq_clock(task_rq(p));
|
|
||||||
|
|
||||||
if (old_class != new_class)
|
if (old_class != new_class)
|
||||||
queue_flags |= DEQUEUE_CLASS;
|
queue_flags |= DEQUEUE_CLASS;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -64,7 +64,6 @@ static int effective_prio(struct task_struct *p)
|
||||||
|
|
||||||
void set_user_nice(struct task_struct *p, long nice)
|
void set_user_nice(struct task_struct *p, long nice)
|
||||||
{
|
{
|
||||||
struct rq *rq;
|
|
||||||
int old_prio;
|
int old_prio;
|
||||||
|
|
||||||
if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
|
if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
|
||||||
|
|
@ -73,10 +72,7 @@ void set_user_nice(struct task_struct *p, long nice)
|
||||||
* We have to be careful, if called from sys_setpriority(),
|
* We have to be careful, if called from sys_setpriority(),
|
||||||
* the task might be in the middle of scheduling on another CPU.
|
* the task might be in the middle of scheduling on another CPU.
|
||||||
*/
|
*/
|
||||||
CLASS(task_rq_lock, rq_guard)(p);
|
guard(task_rq_lock)(p);
|
||||||
rq = rq_guard.rq;
|
|
||||||
|
|
||||||
update_rq_clock(rq);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The RT priorities are set via sched_setscheduler(), but we still
|
* The RT priorities are set via sched_setscheduler(), but we still
|
||||||
|
|
@ -89,7 +85,7 @@ void set_user_nice(struct task_struct *p, long nice)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) {
|
scoped_guard (sched_change, p, DEQUEUE_SAVE) {
|
||||||
p->static_prio = NICE_TO_PRIO(nice);
|
p->static_prio = NICE_TO_PRIO(nice);
|
||||||
set_load_weight(p, true);
|
set_load_weight(p, true);
|
||||||
old_prio = p->prio;
|
old_prio = p->prio;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue