perf stat: Don't skip failing group events

Pass errno to stat_handle_error() rather than reading errno after it has
potentially been clobbered.

Move "skippable" handling first as a skippable event (from the perf stat
default list) should always just be skipped.

Remove logic to skip rather than fail events in a group when they
aren't the group leader.

The original logic was added in commit cb5ef60067 ("perf stat: Error
out unsupported group leader immediately") due to error handling and
opening being together and an assertion being raised.

Not failing this case causes broken groups to not report values,
particularly for topdown events.

Closes: https://lore.kernel.org/lkml/20250822082233.1850417-1-dapeng1.mi@linux.intel.com/
Reported-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Howard Chu <howardchu95@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@linaro.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Falcon <thomas.falcon@intel.com>
Cc: Yoshihiro Furudera <fj5100bi@fujitsu.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Ian Rogers 2025-08-25 14:12:03 -07:00 committed by Arnaldo Carvalho de Melo
parent 7970e206e1
commit 9eac5612da
1 changed files with 21 additions and 27 deletions

View File

@ -613,33 +613,40 @@ enum counter_recovery {
COUNTER_FATAL, COUNTER_FATAL,
}; };
static enum counter_recovery stat_handle_error(struct evsel *counter) static enum counter_recovery stat_handle_error(struct evsel *counter, int err)
{ {
char msg[BUFSIZ]; char msg[BUFSIZ];
if (counter->skippable) {
if (verbose > 0) {
ui__warning("skipping event %s that kernel failed to open .\n",
evsel__name(counter));
}
counter->supported = false;
counter->errored = true;
return COUNTER_SKIP;
}
/* /*
* PPC returns ENXIO for HW counters until 2.6.37 * PPC returns ENXIO for HW counters until 2.6.37
* (behavior changed with commit b0a873e). * (behavior changed with commit b0a873e).
*/ */
if (errno == EINVAL || errno == ENOSYS || if (err == EINVAL || err == ENOSYS || err == ENOENT || err == ENXIO) {
errno == ENOENT || errno == ENXIO) { if (verbose > 0) {
if (verbose > 0)
ui__warning("%s event is not supported by the kernel.\n", ui__warning("%s event is not supported by the kernel.\n",
evsel__name(counter)); evsel__name(counter));
}
counter->supported = false; counter->supported = false;
/* /*
* errored is a sticky flag that means one of the counter's * errored is a sticky flag that means one of the counter's
* cpu event had a problem and needs to be reexamined. * cpu event had a problem and needs to be reexamined.
*/ */
counter->errored = true; counter->errored = true;
} else if (evsel__fallback(counter, &target, err, msg, sizeof(msg))) {
if ((evsel__leader(counter) != counter) ||
!(counter->core.leader->nr_members > 1))
return COUNTER_SKIP;
} else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) {
if (verbose > 0) if (verbose > 0)
ui__warning("%s\n", msg); ui__warning("%s\n", msg);
return COUNTER_RETRY; return COUNTER_RETRY;
} else if (target__has_per_thread(&target) && errno != EOPNOTSUPP && } else if (target__has_per_thread(&target) && err != EOPNOTSUPP &&
evsel_list->core.threads && evsel_list->core.threads &&
evsel_list->core.threads->err_thread != -1) { evsel_list->core.threads->err_thread != -1) {
/* /*
@ -651,29 +658,16 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
evsel_list->core.threads->err_thread = -1; evsel_list->core.threads->err_thread = -1;
return COUNTER_RETRY; return COUNTER_RETRY;
} }
} else if (counter->skippable) { } else if (err == EOPNOTSUPP) {
if (verbose > 0)
ui__warning("skipping event %s that kernel failed to open .\n",
evsel__name(counter));
counter->supported = false;
counter->errored = true;
return COUNTER_SKIP;
}
if (errno == EOPNOTSUPP) {
if (verbose > 0) { if (verbose > 0) {
ui__warning("%s event is not supported by the kernel.\n", ui__warning("%s event is not supported by the kernel.\n",
evsel__name(counter)); evsel__name(counter));
} }
counter->supported = false; counter->supported = false;
counter->errored = true; counter->errored = true;
if ((evsel__leader(counter) != counter) ||
!(counter->core.leader->nr_members > 1))
return COUNTER_SKIP;
} }
evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); evsel__open_strerror(counter, &target, err, msg, sizeof(msg));
ui__error("%s\n", msg); ui__error("%s\n", msg);
if (child_pid != -1) if (child_pid != -1)
@ -761,7 +755,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
continue; continue;
} }
switch (stat_handle_error(counter)) { switch (stat_handle_error(counter, errno)) {
case COUNTER_FATAL: case COUNTER_FATAL:
err = -1; err = -1;
goto err_out; goto err_out;
@ -803,7 +797,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
if (create_perf_stat_counter(counter, &stat_config, &target, if (create_perf_stat_counter(counter, &stat_config, &target,
evlist_cpu_itr.cpu_map_idx) < 0) { evlist_cpu_itr.cpu_map_idx) < 0) {
switch (stat_handle_error(counter)) { switch (stat_handle_error(counter, errno)) {
case COUNTER_FATAL: case COUNTER_FATAL:
err = -1; err = -1;
goto err_out; goto err_out;