Skip to content

Commit be806f0

Browse files
captain5050namhyung
authored andcommitted
perf stat: Add/fix bperf cgroup max events workarounds
Commit b830851 bumped the max events to 1024 but this results in BPF verifier issues if the number of command line events is too large. Workaround this by: 1) moving the constants to a header file to share between BPF and perf C code, 2) testing that the maximum number of events doesn't cause BPF verifier issues in debug builds, 3) lower the max events from 1024 to 128, 4) in perf stat, if there are more events than the BPF counters can support then disable BPF counter usage. The rodata setup is factored into its own function to avoid duplicating it in the testing code. Signed-off-by: Ian Rogers <irogers@google.com> Fixes: b830851 ("perf stat bperf cgroup: Increase MAX_EVENTS from 32 to 1024") Signed-off-by: Namhyung Kim <namhyung@kernel.org>
1 parent 3e98f02 commit be806f0

File tree

4 files changed

+91
-34
lines changed

4 files changed

+91
-34
lines changed

tools/perf/builtin-stat.c

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,10 @@
9696
#include <perf/evlist.h>
9797
#include <internal/threadmap.h>
9898

99+
#ifdef HAVE_BPF_SKEL
100+
#include "util/bpf_skel/bperf_cgroup.h"
101+
#endif
102+
99103
#define DEFAULT_SEPARATOR " "
100104
#define FREEZE_ON_SMI_PATH "bus/event_source/devices/cpu/freeze_on_smi"
101105

@@ -2852,7 +2856,14 @@ int cmd_stat(int argc, const char **argv)
28522856
goto out;
28532857
}
28542858
}
2855-
2859+
#ifdef HAVE_BPF_SKEL
2860+
if (target.use_bpf && nr_cgroups &&
2861+
(evsel_list->core.nr_entries / nr_cgroups) > BPERF_CGROUP__MAX_EVENTS) {
2862+
pr_warning("Disabling BPF counters due to more events (%d) than the max (%d)\n",
2863+
evsel_list->core.nr_entries / nr_cgroups, BPERF_CGROUP__MAX_EVENTS);
2864+
target.use_bpf = false;
2865+
}
2866+
#endif // HAVE_BPF_SKEL
28562867
evlist__warn_user_requested_cpus(evsel_list, target.cpu_list);
28572868

28582869
evlist__for_each_entry(evsel_list, counter) {

tools/perf/util/bpf_counter_cgroup.c

Lines changed: 56 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include "cpumap.h"
2828
#include "thread_map.h"
2929

30+
#include "bpf_skel/bperf_cgroup.h"
3031
#include "bpf_skel/bperf_cgroup.skel.h"
3132

3233
static struct perf_event_attr cgrp_switch_attr = {
@@ -42,6 +43,55 @@ static struct bperf_cgroup_bpf *skel;
4243

4344
#define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
4445

46+
static void setup_rodata(struct bperf_cgroup_bpf *sk, int evlist_size)
47+
{
48+
int map_size, total_cpus = cpu__max_cpu().cpu;
49+
50+
sk->rodata->num_cpus = total_cpus;
51+
sk->rodata->num_events = evlist_size / nr_cgroups;
52+
53+
if (cgroup_is_v2("perf_event") > 0)
54+
sk->rodata->use_cgroup_v2 = 1;
55+
56+
BUG_ON(evlist_size % nr_cgroups != 0);
57+
58+
/* we need one copy of events per cpu for reading */
59+
map_size = total_cpus * evlist_size / nr_cgroups;
60+
bpf_map__set_max_entries(sk->maps.events, map_size);
61+
bpf_map__set_max_entries(sk->maps.cgrp_idx, nr_cgroups);
62+
/* previous result is saved in a per-cpu array */
63+
map_size = evlist_size / nr_cgroups;
64+
bpf_map__set_max_entries(sk->maps.prev_readings, map_size);
65+
/* cgroup result needs all events (per-cpu) */
66+
map_size = evlist_size;
67+
bpf_map__set_max_entries(sk->maps.cgrp_readings, map_size);
68+
}
69+
70+
static void test_max_events_program_load(void)
71+
{
72+
#ifndef NDEBUG
73+
/*
74+
* Test that the program verifies with the maximum number of events. If
75+
* this test fails unfortunately perf needs recompiling with a lower
76+
* BPERF_CGROUP__MAX_EVENTS to avoid BPF verifier issues.
77+
*/
78+
int err, max_events = BPERF_CGROUP__MAX_EVENTS * nr_cgroups;
79+
struct bperf_cgroup_bpf *test_skel = bperf_cgroup_bpf__open();
80+
81+
if (!test_skel) {
82+
pr_err("Failed to open cgroup skeleton\n");
83+
return;
84+
}
85+
setup_rodata(test_skel, max_events);
86+
err = bperf_cgroup_bpf__load(test_skel);
87+
if (err) {
88+
pr_err("Failed to load cgroup skeleton with max events %d.\n",
89+
BPERF_CGROUP__MAX_EVENTS);
90+
}
91+
bperf_cgroup_bpf__destroy(test_skel);
92+
#endif
93+
}
94+
4595
static int bperf_load_program(struct evlist *evlist)
4696
{
4797
struct bpf_link *link;
@@ -50,35 +100,18 @@ static int bperf_load_program(struct evlist *evlist)
50100
int i, j;
51101
struct perf_cpu cpu;
52102
int total_cpus = cpu__max_cpu().cpu;
53-
int map_size, map_fd;
54-
int prog_fd, err;
103+
int map_fd, prog_fd, err;
104+
105+
set_max_rlimit();
106+
107+
test_max_events_program_load();
55108

56109
skel = bperf_cgroup_bpf__open();
57110
if (!skel) {
58111
pr_err("Failed to open cgroup skeleton\n");
59112
return -1;
60113
}
61-
62-
skel->rodata->num_cpus = total_cpus;
63-
skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
64-
65-
if (cgroup_is_v2("perf_event") > 0)
66-
skel->rodata->use_cgroup_v2 = 1;
67-
68-
BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
69-
70-
/* we need one copy of events per cpu for reading */
71-
map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
72-
bpf_map__set_max_entries(skel->maps.events, map_size);
73-
bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
74-
/* previous result is saved in a per-cpu array */
75-
map_size = evlist->core.nr_entries / nr_cgroups;
76-
bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
77-
/* cgroup result needs all events (per-cpu) */
78-
map_size = evlist->core.nr_entries;
79-
bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
80-
81-
set_max_rlimit();
114+
setup_rodata(skel, evlist->core.nr_entries);
82115

83116
err = bperf_cgroup_bpf__load(skel);
84117
if (err) {

tools/perf/util/bpf_skel/bperf_cgroup.bpf.c

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,12 @@
11
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
22
// Copyright (c) 2021 Facebook
33
// Copyright (c) 2021 Google
4+
#include "bperf_cgroup.h"
45
#include "vmlinux.h"
56
#include <bpf/bpf_helpers.h>
67
#include <bpf/bpf_tracing.h>
78
#include <bpf/bpf_core_read.h>
89

9-
#define MAX_LEVELS 10 // max cgroup hierarchy level: arbitrary
10-
#define MAX_EVENTS 1024 // max events per cgroup: arbitrary
11-
1210
// NOTE: many of map and global data will be modified before loading
1311
// from the userspace (perf tool) using the skeleton helpers.
1412

@@ -97,7 +95,7 @@ static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
9795
cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup);
9896
level = BPF_CORE_READ(cgrp, level);
9997

100-
for (cnt = 0; i < MAX_LEVELS; i++) {
98+
for (cnt = 0; i < BPERF_CGROUP__MAX_LEVELS; i++) {
10199
__u64 cgrp_id;
102100

103101
if (i > level)
@@ -123,7 +121,7 @@ static inline int get_cgroup_v2_idx(__u32 *cgrps, int size)
123121
__u32 *elem;
124122
int cnt;
125123

126-
for (cnt = 0; i < MAX_LEVELS; i++) {
124+
for (cnt = 0; i < BPERF_CGROUP__MAX_LEVELS; i++) {
127125
__u64 cgrp_id = bpf_get_current_ancestor_cgroup_id(i);
128126

129127
if (cgrp_id == 0)
@@ -148,17 +146,17 @@ static int bperf_cgroup_count(void)
148146
register int c = 0;
149147
struct bpf_perf_event_value val, delta, *prev_val, *cgrp_val;
150148
__u32 cpu = bpf_get_smp_processor_id();
151-
__u32 cgrp_idx[MAX_LEVELS];
149+
__u32 cgrp_idx[BPERF_CGROUP__MAX_LEVELS];
152150
int cgrp_cnt;
153151
__u32 key, cgrp;
154152
long err;
155153

156154
if (use_cgroup_v2)
157-
cgrp_cnt = get_cgroup_v2_idx(cgrp_idx, MAX_LEVELS);
155+
cgrp_cnt = get_cgroup_v2_idx(cgrp_idx, BPERF_CGROUP__MAX_LEVELS);
158156
else
159-
cgrp_cnt = get_cgroup_v1_idx(cgrp_idx, MAX_LEVELS);
157+
cgrp_cnt = get_cgroup_v1_idx(cgrp_idx, BPERF_CGROUP__MAX_LEVELS);
160158

161-
for ( ; idx < MAX_EVENTS; idx++) {
159+
for ( ; idx < BPERF_CGROUP__MAX_EVENTS; idx++) {
162160
if (idx == num_events)
163161
break;
164162

@@ -186,7 +184,7 @@ static int bperf_cgroup_count(void)
186184
delta.enabled = val.enabled - prev_val->enabled;
187185
delta.running = val.running - prev_val->running;
188186

189-
for (c = 0; c < MAX_LEVELS; c++) {
187+
for (c = 0; c < BPERF_CGROUP__MAX_LEVELS; c++) {
190188
if (c == cgrp_cnt)
191189
break;
192190

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2+
/* Data structures shared between BPF and tools. */
3+
#ifndef __BPERF_CGROUP_H
4+
#define __BPERF_CGROUP_H
5+
6+
// These constants impact code size of bperf_cgroup.bpf.c that may result in BPF
7+
// verifier issues. They are exposed to control the size and also to disable BPF
8+
// counters when the number of user events is too large.
9+
10+
// max cgroup hierarchy level: arbitrary
11+
#define BPERF_CGROUP__MAX_LEVELS 10
12+
// max events per cgroup: arbitrary
13+
#define BPERF_CGROUP__MAX_EVENTS 128
14+
15+
#endif /* __BPERF_CGROUP_H */

0 commit comments

Comments
 (0)