1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <stdio.h> 4 #include <sys/epoll.h> 5 #include <sys/types.h> 6 #include <sys/stat.h> 7 #include <fcntl.h> 8 #include <util/record.h> 9 #include <util/util.h> 10 #include <util/bpf-loader.h> 11 #include <util/evlist.h> 12 #include <linux/bpf.h> 13 #include <linux/filter.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <api/fs/fs.h> 17 #include <bpf/bpf.h> 18 #include <perf/mmap.h> 19 #include "tests.h" 20 #include "llvm.h" 21 #include "debug.h" 22 #include "parse-events.h" 23 #include "util/mmap.h" 24 #define NR_ITERS 111 25 #define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test" 26 27 #ifdef HAVE_LIBBPF_SUPPORT 28 29 static int epoll_pwait_loop(void) 30 { 31 int i; 32 33 /* Should fail NR_ITERS times */ 34 for (i = 0; i < NR_ITERS; i++) 35 epoll_pwait(-(i + 1), NULL, 0, 0, NULL); 36 return 0; 37 } 38 39 #ifdef HAVE_BPF_PROLOGUE 40 41 static int llseek_loop(void) 42 { 43 int fds[2], i; 44 45 fds[0] = open("/dev/null", O_RDONLY); 46 fds[1] = open("/dev/null", O_RDWR); 47 48 if (fds[0] < 0 || fds[1] < 0) 49 return -1; 50 51 for (i = 0; i < NR_ITERS; i++) { 52 lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET); 53 lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET); 54 } 55 close(fds[0]); 56 close(fds[1]); 57 return 0; 58 } 59 60 #endif 61 62 static struct { 63 enum test_llvm__testcase prog_id; 64 const char *desc; 65 const char *name; 66 const char *msg_compile_fail; 67 const char *msg_load_fail; 68 int (*target_func)(void); 69 int expect_result; 70 bool pin; 71 } bpf_testcase_table[] = { 72 { 73 .prog_id = LLVM_TESTCASE_BASE, 74 .desc = "Basic BPF filtering", 75 .name = "[basic_bpf_test]", 76 .msg_compile_fail = "fix 'perf test LLVM' first", 77 .msg_load_fail = "load bpf object failed", 78 .target_func = &epoll_pwait_loop, 79 .expect_result = (NR_ITERS + 1) / 2, 80 }, 81 { 82 .prog_id = LLVM_TESTCASE_BASE, 83 .desc = "BPF pinning", 84 .name = "[bpf_pinning]", 85 .msg_compile_fail = "fix kbuild first", 86 .msg_load_fail = "check your vmlinux setting?", 87 .target_func = &epoll_pwait_loop, 88 .expect_result = (NR_ITERS + 1) / 2, 89 .pin = true, 90 }, 91 #ifdef HAVE_BPF_PROLOGUE 92 { 93 .prog_id = LLVM_TESTCASE_BPF_PROLOGUE, 94 .desc = "BPF prologue generation", 95 .name = "[bpf_prologue_test]", 96 .msg_compile_fail = "fix kbuild first", 97 .msg_load_fail = "check your vmlinux setting?", 98 .target_func = &llseek_loop, 99 .expect_result = (NR_ITERS + 1) / 4, 100 }, 101 #endif 102 { 103 .prog_id = LLVM_TESTCASE_BPF_RELOCATION, 104 .desc = "BPF relocation checker", 105 .name = "[bpf_relocation_test]", 106 .msg_compile_fail = "fix 'perf test LLVM' first", 107 .msg_load_fail = "libbpf error when dealing with relocation", 108 }, 109 }; 110 111 static int do_test(struct bpf_object *obj, int (*func)(void), 112 int expect) 113 { 114 struct record_opts opts = { 115 .target = { 116 .uid = UINT_MAX, 117 .uses_mmap = true, 118 }, 119 .freq = 0, 120 .mmap_pages = 256, 121 .default_interval = 1, 122 }; 123 124 char pid[16]; 125 char sbuf[STRERR_BUFSIZE]; 126 struct evlist *evlist; 127 int i, ret = TEST_FAIL, err = 0, count = 0; 128 129 struct parse_events_state parse_state; 130 struct parse_events_error parse_error; 131 132 bzero(&parse_error, sizeof(parse_error)); 133 bzero(&parse_state, sizeof(parse_state)); 134 parse_state.error = &parse_error; 135 INIT_LIST_HEAD(&parse_state.list); 136 137 err = parse_events_load_bpf_obj(&parse_state, &parse_state.list, obj, NULL); 138 if (err || list_empty(&parse_state.list)) { 139 pr_debug("Failed to add events selected by BPF\n"); 140 return TEST_FAIL; 141 } 142 143 snprintf(pid, sizeof(pid), "%d", getpid()); 144 pid[sizeof(pid) - 1] = '\0'; 145 opts.target.tid = opts.target.pid = pid; 146 147 /* Instead of perf_evlist__new_default, don't add default events */ 148 evlist = evlist__new(); 149 if (!evlist) { 150 pr_debug("Not enough memory to create evlist\n"); 151 return TEST_FAIL; 152 } 153 154 err = perf_evlist__create_maps(evlist, &opts.target); 155 if (err < 0) { 156 pr_debug("Not enough memory to create thread/cpu maps\n"); 157 goto out_delete_evlist; 158 } 159 160 perf_evlist__splice_list_tail(evlist, &parse_state.list); 161 evlist->nr_groups = parse_state.nr_groups; 162 163 perf_evlist__config(evlist, &opts, NULL); 164 165 err = evlist__open(evlist); 166 if (err < 0) { 167 pr_debug("perf_evlist__open: %s\n", 168 str_error_r(errno, sbuf, sizeof(sbuf))); 169 goto out_delete_evlist; 170 } 171 172 err = evlist__mmap(evlist, opts.mmap_pages); 173 if (err < 0) { 174 pr_debug("evlist__mmap: %s\n", 175 str_error_r(errno, sbuf, sizeof(sbuf))); 176 goto out_delete_evlist; 177 } 178 179 evlist__enable(evlist); 180 (*func)(); 181 evlist__disable(evlist); 182 183 for (i = 0; i < evlist->core.nr_mmaps; i++) { 184 union perf_event *event; 185 struct mmap *md; 186 187 md = &evlist->mmap[i]; 188 if (perf_mmap__read_init(&md->core) < 0) 189 continue; 190 191 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 192 const u32 type = event->header.type; 193 194 if (type == PERF_RECORD_SAMPLE) 195 count ++; 196 } 197 perf_mmap__read_done(&md->core); 198 } 199 200 if (count != expect) { 201 pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect, count); 202 goto out_delete_evlist; 203 } 204 205 ret = TEST_OK; 206 207 out_delete_evlist: 208 evlist__delete(evlist); 209 return ret; 210 } 211 212 static struct bpf_object * 213 prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name) 214 { 215 struct bpf_object *obj; 216 217 obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name); 218 if (IS_ERR(obj)) { 219 pr_debug("Compile BPF program failed.\n"); 220 return NULL; 221 } 222 return obj; 223 } 224 225 static int __test__bpf(int idx) 226 { 227 int ret; 228 void *obj_buf; 229 size_t obj_buf_sz; 230 struct bpf_object *obj; 231 232 ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz, 233 bpf_testcase_table[idx].prog_id, 234 true, NULL); 235 if (ret != TEST_OK || !obj_buf || !obj_buf_sz) { 236 pr_debug("Unable to get BPF object, %s\n", 237 bpf_testcase_table[idx].msg_compile_fail); 238 if (idx == 0) 239 return TEST_SKIP; 240 else 241 return TEST_FAIL; 242 } 243 244 obj = prepare_bpf(obj_buf, obj_buf_sz, 245 bpf_testcase_table[idx].name); 246 if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) { 247 if (!obj) 248 pr_debug("Fail to load BPF object: %s\n", 249 bpf_testcase_table[idx].msg_load_fail); 250 else 251 pr_debug("Success unexpectedly: %s\n", 252 bpf_testcase_table[idx].msg_load_fail); 253 ret = TEST_FAIL; 254 goto out; 255 } 256 257 if (obj) { 258 ret = do_test(obj, 259 bpf_testcase_table[idx].target_func, 260 bpf_testcase_table[idx].expect_result); 261 if (ret != TEST_OK) 262 goto out; 263 if (bpf_testcase_table[idx].pin) { 264 int err; 265 266 if (!bpf_fs__mount()) { 267 pr_debug("BPF filesystem not mounted\n"); 268 ret = TEST_FAIL; 269 goto out; 270 } 271 err = mkdir(PERF_TEST_BPF_PATH, 0777); 272 if (err && errno != EEXIST) { 273 pr_debug("Failed to make perf_test dir: %s\n", 274 strerror(errno)); 275 ret = TEST_FAIL; 276 goto out; 277 } 278 if (bpf_object__pin(obj, PERF_TEST_BPF_PATH)) 279 ret = TEST_FAIL; 280 if (rm_rf(PERF_TEST_BPF_PATH)) 281 ret = TEST_FAIL; 282 } 283 } 284 285 out: 286 bpf__clear(); 287 return ret; 288 } 289 290 int test__bpf_subtest_get_nr(void) 291 { 292 return (int)ARRAY_SIZE(bpf_testcase_table); 293 } 294 295 const char *test__bpf_subtest_get_desc(int i) 296 { 297 if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table)) 298 return NULL; 299 return bpf_testcase_table[i].desc; 300 } 301 302 static int check_env(void) 303 { 304 int err; 305 unsigned int kver_int; 306 char license[] = "GPL"; 307 308 struct bpf_insn insns[] = { 309 BPF_MOV64_IMM(BPF_REG_0, 1), 310 BPF_EXIT_INSN(), 311 }; 312 313 err = fetch_kernel_version(&kver_int, NULL, 0); 314 if (err) { 315 pr_debug("Unable to get kernel version\n"); 316 return err; 317 } 318 319 err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns, 320 sizeof(insns) / sizeof(insns[0]), 321 license, kver_int, NULL, 0); 322 if (err < 0) { 323 pr_err("Missing basic BPF support, skip this test: %s\n", 324 strerror(errno)); 325 return err; 326 } 327 close(err); 328 329 return 0; 330 } 331 332 int test__bpf(struct test *test __maybe_unused, int i) 333 { 334 int err; 335 336 if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table)) 337 return TEST_FAIL; 338 339 if (geteuid() != 0) { 340 pr_debug("Only root can run BPF test\n"); 341 return TEST_SKIP; 342 } 343 344 if (check_env()) 345 return TEST_SKIP; 346 347 err = __test__bpf(i); 348 return err; 349 } 350 351 #else 352 int test__bpf_subtest_get_nr(void) 353 { 354 return 0; 355 } 356 357 const char *test__bpf_subtest_get_desc(int i __maybe_unused) 358 { 359 return NULL; 360 } 361 362 int test__bpf(struct test *test __maybe_unused, int i __maybe_unused) 363 { 364 pr_debug("Skip BPF test because BPF support is not compiled\n"); 365 return TEST_SKIP; 366 } 367 #endif 368
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.