bpf.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. #include <errno.h>
  2. #include <stdio.h>
  3. #include <sys/epoll.h>
  4. #include <sys/types.h>
  5. #include <sys/stat.h>
  6. #include <fcntl.h>
  7. #include <util/util.h>
  8. #include <util/bpf-loader.h>
  9. #include <util/evlist.h>
  10. #include <linux/bpf.h>
  11. #include <linux/filter.h>
  12. #include <linux/kernel.h>
  13. #include <api/fs/fs.h>
  14. #include <bpf/bpf.h>
  15. #include "tests.h"
  16. #include "llvm.h"
  17. #include "debug.h"
  18. #define NR_ITERS 111
  19. #define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test"
  20. #ifdef HAVE_LIBBPF_SUPPORT
  21. static int epoll_pwait_loop(void)
  22. {
  23. int i;
  24. /* Should fail NR_ITERS times */
  25. for (i = 0; i < NR_ITERS; i++)
  26. epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
  27. return 0;
  28. }
  29. #ifdef HAVE_BPF_PROLOGUE
  30. static int llseek_loop(void)
  31. {
  32. int fds[2], i;
  33. fds[0] = open("/dev/null", O_RDONLY);
  34. fds[1] = open("/dev/null", O_RDWR);
  35. if (fds[0] < 0 || fds[1] < 0)
  36. return -1;
  37. for (i = 0; i < NR_ITERS; i++) {
  38. lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
  39. lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
  40. }
  41. close(fds[0]);
  42. close(fds[1]);
  43. return 0;
  44. }
  45. #endif
  46. static struct {
  47. enum test_llvm__testcase prog_id;
  48. const char *desc;
  49. const char *name;
  50. const char *msg_compile_fail;
  51. const char *msg_load_fail;
  52. int (*target_func)(void);
  53. int expect_result;
  54. bool pin;
  55. } bpf_testcase_table[] = {
  56. {
  57. .prog_id = LLVM_TESTCASE_BASE,
  58. .desc = "Basic BPF filtering",
  59. .name = "[basic_bpf_test]",
  60. .msg_compile_fail = "fix 'perf test LLVM' first",
  61. .msg_load_fail = "load bpf object failed",
  62. .target_func = &epoll_pwait_loop,
  63. .expect_result = (NR_ITERS + 1) / 2,
  64. },
  65. {
  66. .prog_id = LLVM_TESTCASE_BASE,
  67. .desc = "BPF pinning",
  68. .name = "[bpf_pinning]",
  69. .msg_compile_fail = "fix kbuild first",
  70. .msg_load_fail = "check your vmlinux setting?",
  71. .target_func = &epoll_pwait_loop,
  72. .expect_result = (NR_ITERS + 1) / 2,
  73. .pin = true,
  74. },
  75. #ifdef HAVE_BPF_PROLOGUE
  76. {
  77. .prog_id = LLVM_TESTCASE_BPF_PROLOGUE,
  78. .desc = "BPF prologue generation",
  79. .name = "[bpf_prologue_test]",
  80. .msg_compile_fail = "fix kbuild first",
  81. .msg_load_fail = "check your vmlinux setting?",
  82. .target_func = &llseek_loop,
  83. .expect_result = (NR_ITERS + 1) / 4,
  84. },
  85. #endif
  86. {
  87. .prog_id = LLVM_TESTCASE_BPF_RELOCATION,
  88. .desc = "BPF relocation checker",
  89. .name = "[bpf_relocation_test]",
  90. .msg_compile_fail = "fix 'perf test LLVM' first",
  91. .msg_load_fail = "libbpf error when dealing with relocation",
  92. },
  93. };
  94. static int do_test(struct bpf_object *obj, int (*func)(void),
  95. int expect)
  96. {
  97. struct record_opts opts = {
  98. .target = {
  99. .uid = UINT_MAX,
  100. .uses_mmap = true,
  101. },
  102. .freq = 0,
  103. .mmap_pages = 256,
  104. .default_interval = 1,
  105. };
  106. char pid[16];
  107. char sbuf[STRERR_BUFSIZE];
  108. struct perf_evlist *evlist;
  109. int i, ret = TEST_FAIL, err = 0, count = 0;
  110. struct parse_events_state parse_state;
  111. struct parse_events_error parse_error;
  112. bzero(&parse_error, sizeof(parse_error));
  113. bzero(&parse_state, sizeof(parse_state));
  114. parse_state.error = &parse_error;
  115. INIT_LIST_HEAD(&parse_state.list);
  116. err = parse_events_load_bpf_obj(&parse_state, &parse_state.list, obj, NULL);
  117. if (err || list_empty(&parse_state.list)) {
  118. pr_debug("Failed to add events selected by BPF\n");
  119. return TEST_FAIL;
  120. }
  121. snprintf(pid, sizeof(pid), "%d", getpid());
  122. pid[sizeof(pid) - 1] = '\0';
  123. opts.target.tid = opts.target.pid = pid;
  124. /* Instead of perf_evlist__new_default, don't add default events */
  125. evlist = perf_evlist__new();
  126. if (!evlist) {
  127. pr_debug("Not enough memory to create evlist\n");
  128. return TEST_FAIL;
  129. }
  130. err = perf_evlist__create_maps(evlist, &opts.target);
  131. if (err < 0) {
  132. pr_debug("Not enough memory to create thread/cpu maps\n");
  133. goto out_delete_evlist;
  134. }
  135. perf_evlist__splice_list_tail(evlist, &parse_state.list);
  136. evlist->nr_groups = parse_state.nr_groups;
  137. perf_evlist__config(evlist, &opts, NULL);
  138. err = perf_evlist__open(evlist);
  139. if (err < 0) {
  140. pr_debug("perf_evlist__open: %s\n",
  141. str_error_r(errno, sbuf, sizeof(sbuf)));
  142. goto out_delete_evlist;
  143. }
  144. err = perf_evlist__mmap(evlist, opts.mmap_pages);
  145. if (err < 0) {
  146. pr_debug("perf_evlist__mmap: %s\n",
  147. str_error_r(errno, sbuf, sizeof(sbuf)));
  148. goto out_delete_evlist;
  149. }
  150. perf_evlist__enable(evlist);
  151. (*func)();
  152. perf_evlist__disable(evlist);
  153. for (i = 0; i < evlist->nr_mmaps; i++) {
  154. union perf_event *event;
  155. struct perf_mmap *md;
  156. md = &evlist->mmap[i];
  157. if (perf_mmap__read_init(md) < 0)
  158. continue;
  159. while ((event = perf_mmap__read_event(md)) != NULL) {
  160. const u32 type = event->header.type;
  161. if (type == PERF_RECORD_SAMPLE)
  162. count ++;
  163. }
  164. perf_mmap__read_done(md);
  165. }
  166. if (count != expect) {
  167. pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect, count);
  168. goto out_delete_evlist;
  169. }
  170. ret = TEST_OK;
  171. out_delete_evlist:
  172. perf_evlist__delete(evlist);
  173. return ret;
  174. }
  175. static struct bpf_object *
  176. prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
  177. {
  178. struct bpf_object *obj;
  179. obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
  180. if (IS_ERR(obj)) {
  181. pr_debug("Compile BPF program failed.\n");
  182. return NULL;
  183. }
  184. return obj;
  185. }
  186. static int __test__bpf(int idx)
  187. {
  188. int ret;
  189. void *obj_buf;
  190. size_t obj_buf_sz;
  191. struct bpf_object *obj;
  192. ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
  193. bpf_testcase_table[idx].prog_id,
  194. true, NULL);
  195. if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
  196. pr_debug("Unable to get BPF object, %s\n",
  197. bpf_testcase_table[idx].msg_compile_fail);
  198. if (idx == 0)
  199. return TEST_SKIP;
  200. else
  201. return TEST_FAIL;
  202. }
  203. obj = prepare_bpf(obj_buf, obj_buf_sz,
  204. bpf_testcase_table[idx].name);
  205. if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
  206. if (!obj)
  207. pr_debug("Fail to load BPF object: %s\n",
  208. bpf_testcase_table[idx].msg_load_fail);
  209. else
  210. pr_debug("Success unexpectedly: %s\n",
  211. bpf_testcase_table[idx].msg_load_fail);
  212. ret = TEST_FAIL;
  213. goto out;
  214. }
  215. if (obj) {
  216. ret = do_test(obj,
  217. bpf_testcase_table[idx].target_func,
  218. bpf_testcase_table[idx].expect_result);
  219. if (ret != TEST_OK)
  220. goto out;
  221. if (bpf_testcase_table[idx].pin) {
  222. int err;
  223. if (!bpf_fs__mount()) {
  224. pr_debug("BPF filesystem not mounted\n");
  225. ret = TEST_FAIL;
  226. goto out;
  227. }
  228. err = mkdir(PERF_TEST_BPF_PATH, 0777);
  229. if (err && errno != EEXIST) {
  230. pr_debug("Failed to make perf_test dir: %s\n",
  231. strerror(errno));
  232. ret = TEST_FAIL;
  233. goto out;
  234. }
  235. if (bpf_object__pin(obj, PERF_TEST_BPF_PATH))
  236. ret = TEST_FAIL;
  237. if (rm_rf(PERF_TEST_BPF_PATH))
  238. ret = TEST_FAIL;
  239. }
  240. }
  241. out:
  242. bpf__clear();
  243. return ret;
  244. }
  245. int test__bpf_subtest_get_nr(void)
  246. {
  247. return (int)ARRAY_SIZE(bpf_testcase_table);
  248. }
  249. const char *test__bpf_subtest_get_desc(int i)
  250. {
  251. if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
  252. return NULL;
  253. return bpf_testcase_table[i].desc;
  254. }
  255. static int check_env(void)
  256. {
  257. int err;
  258. unsigned int kver_int;
  259. char license[] = "GPL";
  260. struct bpf_insn insns[] = {
  261. BPF_MOV64_IMM(BPF_REG_0, 1),
  262. BPF_EXIT_INSN(),
  263. };
  264. err = fetch_kernel_version(&kver_int, NULL, 0);
  265. if (err) {
  266. pr_debug("Unable to get kernel version\n");
  267. return err;
  268. }
  269. err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
  270. sizeof(insns) / sizeof(insns[0]),
  271. license, kver_int, NULL, 0);
  272. if (err < 0) {
  273. pr_err("Missing basic BPF support, skip this test: %s\n",
  274. strerror(errno));
  275. return err;
  276. }
  277. close(err);
  278. return 0;
  279. }
  280. int test__bpf(struct test *test __maybe_unused, int i)
  281. {
  282. int err;
  283. if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
  284. return TEST_FAIL;
  285. if (geteuid() != 0) {
  286. pr_debug("Only root can run BPF test\n");
  287. return TEST_SKIP;
  288. }
  289. if (check_env())
  290. return TEST_SKIP;
  291. err = __test__bpf(i);
  292. return err;
  293. }
  294. #else
  295. int test__bpf_subtest_get_nr(void)
  296. {
  297. return 0;
  298. }
  299. const char *test__bpf_subtest_get_desc(int i __maybe_unused)
  300. {
  301. return NULL;
  302. }
  303. int test__bpf(struct test *test __maybe_unused, int i __maybe_unused)
  304. {
  305. pr_debug("Skip BPF test because BPF support is not compiled\n");
  306. return TEST_SKIP;
  307. }
  308. #endif