sched/fair: Propagate load for throttled cfs_rq

Before task based throttle model, propagating load will stop at a
throttled cfs_rq and that propagate will happen on unthrottle time by
update_load_avg().

Now that there is no update_load_avg() on unthrottle for throttled
cfs_rq and all load tracking is done by task related operations, let the
propagate happen immediately.

While at it, add a comment to explain why cfs_rqs that are not affected
by throttle have to be added to leaf cfs_rq list in
propagate_entity_cfs_rq() per my understanding of commit 0258bdfaff
("sched/fair: Fix unfairness caused by missing load decay").

Signed-off-by: Aaron Lu <ziqianlu@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
This commit is contained in:
Aaron Lu 2025-09-10 17:50:41 +08:00 committed by Peter Zijlstra
parent 5b726e9bf9
commit fe8d238e64
1 changed files with 18 additions and 8 deletions

View File

@ -5729,6 +5729,11 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
return cfs_bandwidth_used() && cfs_rq->throttled;
}
static inline bool cfs_rq_pelt_clock_throttled(struct cfs_rq *cfs_rq)
{
return cfs_bandwidth_used() && cfs_rq->pelt_clock_throttled;
}
/* check whether cfs_rq, or any parent, is throttled */
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
{
@ -6721,6 +6726,11 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
return 0;
}
static inline bool cfs_rq_pelt_clock_throttled(struct cfs_rq *cfs_rq)
{
return false;
}
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
{
return 0;
@ -13151,10 +13161,13 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
if (cfs_rq_throttled(cfs_rq))
return;
if (!throttled_hierarchy(cfs_rq))
/*
* If a task gets attached to this cfs_rq and before being queued,
* it gets migrated to another CPU due to reasons like affinity
* change, make sure this cfs_rq stays on leaf cfs_rq list to have
* that removed load decayed or it can cause faireness problem.
*/
if (!cfs_rq_pelt_clock_throttled(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
/* Start to propagate at parent */
@ -13165,10 +13178,7 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
update_load_avg(cfs_rq, se, UPDATE_TG);
if (cfs_rq_throttled(cfs_rq))
break;
if (!throttled_hierarchy(cfs_rq))
if (!cfs_rq_pelt_clock_throttled(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
}
}