mirror of https://github.com/torvalds/linux.git
sched/fair: Have SD_SERIALIZE affect newidle balancing
Also serialize the possiblty much more frequent newidle balancing for the 'expensive' domains that have SD_BALANCE set. Initial benchmarking by K Prateek and Tim showed no negative effect. Split out from the larger patch moving sched_balance_running around for ease of bisect and such. Suggested-by: Shrikanth Hegde <sshegde@linux.ibm.com> Seconded-by: K Prateek Nayak <kprateek.nayak@amd.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/df068896-82f9-458d-8fff-5a2f654e8ffd@amd.com Link: https://patch.msgid.link/6fed119b723c71552943bfe5798c93851b30a361.1762800251.git.tim.c.chen@linux.intel.com # Conflicts: # kernel/sched/fair.c
This commit is contained in:
parent
3324b2180c
commit
522fb20fbd
|
|
@ -11732,7 +11732,7 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
|
||||||
goto out_balanced;
|
goto out_balanced;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!need_unlock && (sd->flags & SD_SERIALIZE) && idle != CPU_NEWLY_IDLE) {
|
if (!need_unlock && (sd->flags & SD_SERIALIZE)) {
|
||||||
int zero = 0;
|
int zero = 0;
|
||||||
if (!atomic_try_cmpxchg_acquire(&sched_balance_running, &zero, 1))
|
if (!atomic_try_cmpxchg_acquire(&sched_balance_running, &zero, 1))
|
||||||
goto out_balanced;
|
goto out_balanced;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue