perf/x86: Add dynamic constraint

More and more features require a dynamic event constraint, e.g., branch
counter logging, auto counter reload, Arch PEBS, etc.

Add a generic flag, PMU_FL_DYN_CONSTRAINT, to indicate the case. It
avoids keeping adding the individual flag in intel_cpuc_prepare().

Add a variable dyn_constraint in the struct hw_perf_event to track the
dynamic constraint of the event. Apply it if it's updated.

Apply the generic dynamic constraint for branch counter logging.
Many features on and after V6 require dynamic constraint. So
unconditionally set the flag for V6+.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Thomas Falcon <thomas.falcon@intel.com>
Link: https://lkml.kernel.org/r/20250327195217.2683619-2-kan.liang@linux.intel.com
This commit is contained in:
Kan Liang 2025-03-27 12:52:13 -07:00 committed by Peter Zijlstra
parent da916e96e2
commit 4dfe3232cc
5 changed files with 19 additions and 7 deletions

View File

@ -674,6 +674,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
event->hw.idx = -1;
event->hw.last_cpu = -1;
event->hw.last_tag = ~0ULL;
event->hw.dyn_constraint = ~0ULL;
/* mark unused */
event->hw.extra_reg.idx = EXTRA_REG_NONE;

View File

@ -3730,10 +3730,9 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
if (cpuc->excl_cntrs)
return intel_get_excl_constraints(cpuc, event, idx, c2);
/* Not all counters support the branch counter feature. */
if (branch_sample_counters(event)) {
if (event->hw.dyn_constraint != ~0ULL) {
c2 = dyn_constraint(cpuc, c2, idx);
c2->idxmsk64 &= x86_pmu.lbr_counters;
c2->idxmsk64 &= event->hw.dyn_constraint;
c2->weight = hweight64(c2->idxmsk64);
}
@ -4135,15 +4134,19 @@ static int intel_pmu_hw_config(struct perf_event *event)
leader = event->group_leader;
if (branch_sample_call_stack(leader))
return -EINVAL;
if (branch_sample_counters(leader))
if (branch_sample_counters(leader)) {
num++;
leader->hw.dyn_constraint &= x86_pmu.lbr_counters;
}
leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
for_each_sibling_event(sibling, leader) {
if (branch_sample_call_stack(sibling))
return -EINVAL;
if (branch_sample_counters(sibling))
if (branch_sample_counters(sibling)) {
num++;
sibling->hw.dyn_constraint &= x86_pmu.lbr_counters;
}
}
if (num > fls(x86_pmu.lbr_counters))
@ -4943,7 +4946,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
goto err;
}
if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) {
if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_DYN_CONSTRAINT)) {
size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
@ -6664,6 +6667,12 @@ __init int intel_pmu_init(void)
pr_cont(" AnyThread deprecated, ");
}
/*
* Many features on and after V6 require dynamic constraint,
* e.g., Arch PEBS, ACR.
*/
if (version >= 6)
x86_pmu.flags |= PMU_FL_DYN_CONSTRAINT;
/*
* Install the hw-cache-events table:
*/

View File

@ -1618,7 +1618,7 @@ void __init intel_pmu_arch_lbr_init(void)
x86_pmu.lbr_nr = lbr_nr;
if (!!x86_pmu.lbr_counters)
x86_pmu.flags |= PMU_FL_BR_CNTR;
x86_pmu.flags |= PMU_FL_BR_CNTR | PMU_FL_DYN_CONSTRAINT;
if (x86_pmu.lbr_mispred)
static_branch_enable(&x86_lbr_mispred);

View File

@ -1042,6 +1042,7 @@ do { \
#define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */
#define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */
#define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */
#define PMU_FL_DYN_CONSTRAINT 0x800 /* Needs dynamic constraint */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr

View File

@ -158,6 +158,7 @@ struct hw_perf_event {
struct { /* hardware */
u64 config;
u64 last_tag;
u64 dyn_constraint;
unsigned long config_base;
unsigned long event_base;
int event_base_rdpmc;