tools/sched_ext: Strip compatibility macros for cgroup and dispatch APIs

Enough time has passed since the introduction of scx_bpf_task_cgroup() and
the scx_bpf_dispatch* -> scx_bpf_dsq* kfunc renaming. Strip the compatibility
macros.

Acked-by: Changwoo Min <changwoo@igalia.com>
Acked-by: Andrea Righi <arighi@nvidia.com>
Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tejun Heo 2025-10-06 15:51:44 -10:00
parent 0128c85051
commit 111a79800a
3 changed files with 12 additions and 120 deletions

View File

@ -15,121 +15,17 @@
__ret; \ __ret; \
}) })
/* v6.12: 819513666966 ("sched_ext: Add cgroup support") */
#define __COMPAT_scx_bpf_task_cgroup(p) \
(bpf_ksym_exists(scx_bpf_task_cgroup) ? \
scx_bpf_task_cgroup((p)) : NULL)
/* /*
* v6.13: The verb `dispatch` was too overloaded and confusing. kfuncs are * v6.15: 950ad93df2fc ("bpf: add kfunc for populating cpumask bits")
* renamed to unload the verb.
* *
* Build error is triggered if old names are used. New binaries work with both * Compat macro will be dropped on v6.19 release.
* new and old names. The compat macros will be removed on v6.15 release.
*
* scx_bpf_dispatch_from_dsq() and friends were added during v6.12 by
* 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()").
* Preserve __COMPAT macros until v6.15.
*/ */
void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak;
void scx_bpf_dispatch_from_dsq_set_slice___compat(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
void scx_bpf_dispatch_from_dsq_set_vtime___compat(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
bool scx_bpf_dispatch_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak; int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak;
#define scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags) \
(bpf_ksym_exists(scx_bpf_dsq_insert) ? \
scx_bpf_dsq_insert((p), (dsq_id), (slice), (enq_flags)) : \
scx_bpf_dispatch___compat((p), (dsq_id), (slice), (enq_flags)))
#define scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags) \
(bpf_ksym_exists(scx_bpf_dsq_insert_vtime) ? \
scx_bpf_dsq_insert_vtime((p), (dsq_id), (slice), (vtime), (enq_flags)) : \
scx_bpf_dispatch_vtime___compat((p), (dsq_id), (slice), (vtime), (enq_flags)))
#define scx_bpf_dsq_move_to_local(dsq_id) \
(bpf_ksym_exists(scx_bpf_dsq_move_to_local) ? \
scx_bpf_dsq_move_to_local((dsq_id)) : \
scx_bpf_consume___compat((dsq_id)))
#define __COMPAT_scx_bpf_dsq_move_set_slice(it__iter, slice) \
(bpf_ksym_exists(scx_bpf_dsq_move_set_slice) ? \
scx_bpf_dsq_move_set_slice((it__iter), (slice)) : \
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___compat) ? \
scx_bpf_dispatch_from_dsq_set_slice___compat((it__iter), (slice)) : \
(void)0))
#define __COMPAT_scx_bpf_dsq_move_set_vtime(it__iter, vtime) \
(bpf_ksym_exists(scx_bpf_dsq_move_set_vtime) ? \
scx_bpf_dsq_move_set_vtime((it__iter), (vtime)) : \
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___compat) ? \
scx_bpf_dispatch_from_dsq_set_vtime___compat((it__iter), (vtime)) : \
(void) 0))
#define __COMPAT_scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \
(bpf_ksym_exists(scx_bpf_dsq_move) ? \
scx_bpf_dsq_move((it__iter), (p), (dsq_id), (enq_flags)) : \
(bpf_ksym_exists(scx_bpf_dispatch_from_dsq___compat) ? \
scx_bpf_dispatch_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
false))
#define __COMPAT_scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \
(bpf_ksym_exists(scx_bpf_dsq_move_vtime) ? \
scx_bpf_dsq_move_vtime((it__iter), (p), (dsq_id), (enq_flags)) : \
(bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___compat) ? \
scx_bpf_dispatch_vtime_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
false))
#define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz) \ #define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz) \
(bpf_ksym_exists(bpf_cpumask_populate) ? \ (bpf_ksym_exists(bpf_cpumask_populate) ? \
(bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP) (bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP)
#define scx_bpf_dispatch(p, dsq_id, slice, enq_flags) \
_Static_assert(false, "scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()")
#define scx_bpf_dispatch_vtime(p, dsq_id, slice, vtime, enq_flags) \
_Static_assert(false, "scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()")
#define scx_bpf_consume(dsq_id) ({ \
_Static_assert(false, "scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()"); \
false; \
})
#define scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \
_Static_assert(false, "scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()")
#define scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \
_Static_assert(false, "scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()")
#define scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
_Static_assert(false, "scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()"); \
false; \
})
#define scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
_Static_assert(false, "scx_bpf_dispatch_vtime_from_dsq() renamed to scx_bpf_dsq_move_vtime()"); \
false; \
})
#define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \
_Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_slice() renamed to __COMPAT_scx_bpf_dsq_move_set_slice()")
#define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \
_Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_vtime() renamed to __COMPAT_scx_bpf_dsq_move_set_vtime()")
#define __COMPAT_scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
_Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move()"); \
false; \
})
#define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \
_Static_assert(false, "__COMPAT_scx_bpf_dispatch_vtime_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move_vtime()"); \
false; \
})
/** /**
* __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on * __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
* in a compatible way. We will preserve this __COMPAT helper until v6.16. * in a compatible way. We will preserve this __COMPAT helper until v6.16.

View File

@ -382,7 +382,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
return; return;
} }
cgrp = __COMPAT_scx_bpf_task_cgroup(p); cgrp = scx_bpf_task_cgroup(p);
cgc = find_cgrp_ctx(cgrp); cgc = find_cgrp_ctx(cgrp);
if (!cgc) if (!cgc)
goto out_release; goto out_release;
@ -508,7 +508,7 @@ void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags)
{ {
struct cgroup *cgrp; struct cgroup *cgrp;
cgrp = __COMPAT_scx_bpf_task_cgroup(p); cgrp = scx_bpf_task_cgroup(p);
update_active_weight_sums(cgrp, true); update_active_weight_sums(cgrp, true);
bpf_cgroup_release(cgrp); bpf_cgroup_release(cgrp);
} }
@ -521,7 +521,7 @@ void BPF_STRUCT_OPS(fcg_running, struct task_struct *p)
if (fifo_sched) if (fifo_sched)
return; return;
cgrp = __COMPAT_scx_bpf_task_cgroup(p); cgrp = scx_bpf_task_cgroup(p);
cgc = find_cgrp_ctx(cgrp); cgc = find_cgrp_ctx(cgrp);
if (cgc) { if (cgc) {
/* /*
@ -564,7 +564,7 @@ void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable)
if (!taskc->bypassed_at) if (!taskc->bypassed_at)
return; return;
cgrp = __COMPAT_scx_bpf_task_cgroup(p); cgrp = scx_bpf_task_cgroup(p);
cgc = find_cgrp_ctx(cgrp); cgc = find_cgrp_ctx(cgrp);
if (cgc) { if (cgc) {
__sync_fetch_and_add(&cgc->cvtime_delta, __sync_fetch_and_add(&cgc->cvtime_delta,
@ -578,7 +578,7 @@ void BPF_STRUCT_OPS(fcg_quiescent, struct task_struct *p, u64 deq_flags)
{ {
struct cgroup *cgrp; struct cgroup *cgrp;
cgrp = __COMPAT_scx_bpf_task_cgroup(p); cgrp = scx_bpf_task_cgroup(p);
update_active_weight_sums(cgrp, false); update_active_weight_sums(cgrp, false);
bpf_cgroup_release(cgrp); bpf_cgroup_release(cgrp);
} }

View File

@ -320,12 +320,9 @@ static bool dispatch_highpri(bool from_timer)
if (tctx->highpri) { if (tctx->highpri) {
/* exercise the set_*() and vtime interface too */ /* exercise the set_*() and vtime interface too */
__COMPAT_scx_bpf_dsq_move_set_slice( scx_bpf_dsq_move_set_slice(BPF_FOR_EACH_ITER, slice_ns * 2);
BPF_FOR_EACH_ITER, slice_ns * 2); scx_bpf_dsq_move_set_vtime(BPF_FOR_EACH_ITER, highpri_seq++);
__COMPAT_scx_bpf_dsq_move_set_vtime( scx_bpf_dsq_move_vtime(BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0);
BPF_FOR_EACH_ITER, highpri_seq++);
__COMPAT_scx_bpf_dsq_move_vtime(
BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0);
} }
} }
@ -342,9 +339,8 @@ static bool dispatch_highpri(bool from_timer)
else else
cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0); cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0);
if (__COMPAT_scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p, if (scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p, SCX_DSQ_LOCAL_ON | cpu,
SCX_DSQ_LOCAL_ON | cpu, SCX_ENQ_PREEMPT)) {
SCX_ENQ_PREEMPT)) {
if (cpu == this_cpu) { if (cpu == this_cpu) {
dispatched = true; dispatched = true;
__sync_fetch_and_add(&nr_expedited_local, 1); __sync_fetch_and_add(&nr_expedited_local, 1);