bpf_load.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <stdio.h>
  3. #include <sys/types.h>
  4. #include <sys/stat.h>
  5. #include <fcntl.h>
  6. #include <libelf.h>
  7. #include <gelf.h>
  8. #include <errno.h>
  9. #include <unistd.h>
  10. #include <string.h>
  11. #include <stdbool.h>
  12. #include <stdlib.h>
  13. #include <linux/bpf.h>
  14. #include <linux/filter.h>
  15. #include <linux/perf_event.h>
  16. #include <linux/netlink.h>
  17. #include <linux/rtnetlink.h>
  18. #include <linux/types.h>
  19. #include <sys/types.h>
  20. #include <sys/socket.h>
  21. #include <sys/syscall.h>
  22. #include <sys/ioctl.h>
  23. #include <sys/mman.h>
  24. #include <poll.h>
  25. #include <ctype.h>
  26. #include <assert.h>
  27. #include <bpf/bpf.h>
  28. #include "bpf_load.h"
  29. #include "perf-sys.h"
  30. #define DEBUGFS "/sys/kernel/debug/tracing/"
  31. static char license[128];
  32. static int kern_version;
  33. static bool processed_sec[128];
  34. char bpf_log_buf[BPF_LOG_BUF_SIZE];
  35. int map_fd[MAX_MAPS];
  36. int prog_fd[MAX_PROGS];
  37. int event_fd[MAX_PROGS];
  38. int prog_cnt;
  39. int prog_array_fd = -1;
  40. struct bpf_map_data map_data[MAX_MAPS];
  41. int map_data_count = 0;
  42. static int populate_prog_array(const char *event, int prog_fd)
  43. {
  44. int ind = atoi(event), err;
  45. err = bpf_map_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
  46. if (err < 0) {
  47. printf("failed to store prog_fd in prog_array\n");
  48. return -1;
  49. }
  50. return 0;
  51. }
  52. static int write_kprobe_events(const char *val)
  53. {
  54. int fd, ret, flags;
  55. if (val == NULL)
  56. return -1;
  57. else if (val[0] == '\0')
  58. flags = O_WRONLY | O_TRUNC;
  59. else
  60. flags = O_WRONLY | O_APPEND;
  61. fd = open("/sys/kernel/debug/tracing/kprobe_events", flags);
  62. ret = write(fd, val, strlen(val));
  63. close(fd);
  64. return ret;
  65. }
  66. static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
  67. {
  68. bool is_socket = strncmp(event, "socket", 6) == 0;
  69. bool is_kprobe = strncmp(event, "kprobe/", 7) == 0;
  70. bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0;
  71. bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0;
  72. bool is_raw_tracepoint = strncmp(event, "raw_tracepoint/", 15) == 0;
  73. bool is_xdp = strncmp(event, "xdp", 3) == 0;
  74. bool is_perf_event = strncmp(event, "perf_event", 10) == 0;
  75. bool is_cgroup_skb = strncmp(event, "cgroup/skb", 10) == 0;
  76. bool is_cgroup_sk = strncmp(event, "cgroup/sock", 11) == 0;
  77. bool is_sockops = strncmp(event, "sockops", 7) == 0;
  78. bool is_sk_skb = strncmp(event, "sk_skb", 6) == 0;
  79. bool is_sk_msg = strncmp(event, "sk_msg", 6) == 0;
  80. size_t insns_cnt = size / sizeof(struct bpf_insn);
  81. enum bpf_prog_type prog_type;
  82. char buf[256];
  83. int fd, efd, err, id;
  84. struct perf_event_attr attr = {};
  85. attr.type = PERF_TYPE_TRACEPOINT;
  86. attr.sample_type = PERF_SAMPLE_RAW;
  87. attr.sample_period = 1;
  88. attr.wakeup_events = 1;
  89. if (is_socket) {
  90. prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
  91. } else if (is_kprobe || is_kretprobe) {
  92. prog_type = BPF_PROG_TYPE_KPROBE;
  93. } else if (is_tracepoint) {
  94. prog_type = BPF_PROG_TYPE_TRACEPOINT;
  95. } else if (is_raw_tracepoint) {
  96. prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT;
  97. } else if (is_xdp) {
  98. prog_type = BPF_PROG_TYPE_XDP;
  99. } else if (is_perf_event) {
  100. prog_type = BPF_PROG_TYPE_PERF_EVENT;
  101. } else if (is_cgroup_skb) {
  102. prog_type = BPF_PROG_TYPE_CGROUP_SKB;
  103. } else if (is_cgroup_sk) {
  104. prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
  105. } else if (is_sockops) {
  106. prog_type = BPF_PROG_TYPE_SOCK_OPS;
  107. } else if (is_sk_skb) {
  108. prog_type = BPF_PROG_TYPE_SK_SKB;
  109. } else if (is_sk_msg) {
  110. prog_type = BPF_PROG_TYPE_SK_MSG;
  111. } else {
  112. printf("Unknown event '%s'\n", event);
  113. return -1;
  114. }
  115. if (prog_cnt == MAX_PROGS)
  116. return -1;
  117. fd = bpf_load_program(prog_type, prog, insns_cnt, license, kern_version,
  118. bpf_log_buf, BPF_LOG_BUF_SIZE);
  119. if (fd < 0) {
  120. printf("bpf_load_program() err=%d\n%s", errno, bpf_log_buf);
  121. return -1;
  122. }
  123. prog_fd[prog_cnt++] = fd;
  124. if (is_xdp || is_perf_event || is_cgroup_skb || is_cgroup_sk)
  125. return 0;
  126. if (is_socket || is_sockops || is_sk_skb || is_sk_msg) {
  127. if (is_socket)
  128. event += 6;
  129. else
  130. event += 7;
  131. if (*event != '/')
  132. return 0;
  133. event++;
  134. if (!isdigit(*event)) {
  135. printf("invalid prog number\n");
  136. return -1;
  137. }
  138. return populate_prog_array(event, fd);
  139. }
  140. if (is_raw_tracepoint) {
  141. efd = bpf_raw_tracepoint_open(event + 15, fd);
  142. if (efd < 0) {
  143. printf("tracepoint %s %s\n", event + 15, strerror(errno));
  144. return -1;
  145. }
  146. event_fd[prog_cnt - 1] = efd;
  147. return 0;
  148. }
  149. if (is_kprobe || is_kretprobe) {
  150. bool need_normal_check = true;
  151. const char *event_prefix = "";
  152. if (is_kprobe)
  153. event += 7;
  154. else
  155. event += 10;
  156. if (*event == 0) {
  157. printf("event name cannot be empty\n");
  158. return -1;
  159. }
  160. if (isdigit(*event))
  161. return populate_prog_array(event, fd);
  162. #ifdef __x86_64__
  163. if (strncmp(event, "sys_", 4) == 0) {
  164. snprintf(buf, sizeof(buf), "%c:__x64_%s __x64_%s",
  165. is_kprobe ? 'p' : 'r', event, event);
  166. err = write_kprobe_events(buf);
  167. if (err >= 0) {
  168. need_normal_check = false;
  169. event_prefix = "__x64_";
  170. }
  171. }
  172. #endif
  173. if (need_normal_check) {
  174. snprintf(buf, sizeof(buf), "%c:%s %s",
  175. is_kprobe ? 'p' : 'r', event, event);
  176. err = write_kprobe_events(buf);
  177. if (err < 0) {
  178. printf("failed to create kprobe '%s' error '%s'\n",
  179. event, strerror(errno));
  180. return -1;
  181. }
  182. }
  183. strcpy(buf, DEBUGFS);
  184. strcat(buf, "events/kprobes/");
  185. strcat(buf, event_prefix);
  186. strcat(buf, event);
  187. strcat(buf, "/id");
  188. } else if (is_tracepoint) {
  189. event += 11;
  190. if (*event == 0) {
  191. printf("event name cannot be empty\n");
  192. return -1;
  193. }
  194. strcpy(buf, DEBUGFS);
  195. strcat(buf, "events/");
  196. strcat(buf, event);
  197. strcat(buf, "/id");
  198. }
  199. efd = open(buf, O_RDONLY, 0);
  200. if (efd < 0) {
  201. printf("failed to open event %s\n", event);
  202. return -1;
  203. }
  204. err = read(efd, buf, sizeof(buf));
  205. if (err < 0 || err >= sizeof(buf)) {
  206. printf("read from '%s' failed '%s'\n", event, strerror(errno));
  207. return -1;
  208. }
  209. close(efd);
  210. buf[err] = 0;
  211. id = atoi(buf);
  212. attr.config = id;
  213. efd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
  214. if (efd < 0) {
  215. printf("event %d fd %d err %s\n", id, efd, strerror(errno));
  216. return -1;
  217. }
  218. event_fd[prog_cnt - 1] = efd;
  219. err = ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
  220. if (err < 0) {
  221. printf("ioctl PERF_EVENT_IOC_ENABLE failed err %s\n",
  222. strerror(errno));
  223. return -1;
  224. }
  225. err = ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
  226. if (err < 0) {
  227. printf("ioctl PERF_EVENT_IOC_SET_BPF failed err %s\n",
  228. strerror(errno));
  229. return -1;
  230. }
  231. return 0;
  232. }
  233. static int load_maps(struct bpf_map_data *maps, int nr_maps,
  234. fixup_map_cb fixup_map)
  235. {
  236. int i, numa_node;
  237. for (i = 0; i < nr_maps; i++) {
  238. if (fixup_map) {
  239. fixup_map(&maps[i], i);
  240. /* Allow userspace to assign map FD prior to creation */
  241. if (maps[i].fd != -1) {
  242. map_fd[i] = maps[i].fd;
  243. continue;
  244. }
  245. }
  246. numa_node = maps[i].def.map_flags & BPF_F_NUMA_NODE ?
  247. maps[i].def.numa_node : -1;
  248. if (maps[i].def.type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
  249. maps[i].def.type == BPF_MAP_TYPE_HASH_OF_MAPS) {
  250. int inner_map_fd = map_fd[maps[i].def.inner_map_idx];
  251. map_fd[i] = bpf_create_map_in_map_node(maps[i].def.type,
  252. maps[i].name,
  253. maps[i].def.key_size,
  254. inner_map_fd,
  255. maps[i].def.max_entries,
  256. maps[i].def.map_flags,
  257. numa_node);
  258. } else {
  259. map_fd[i] = bpf_create_map_node(maps[i].def.type,
  260. maps[i].name,
  261. maps[i].def.key_size,
  262. maps[i].def.value_size,
  263. maps[i].def.max_entries,
  264. maps[i].def.map_flags,
  265. numa_node);
  266. }
  267. if (map_fd[i] < 0) {
  268. printf("failed to create a map: %d %s\n",
  269. errno, strerror(errno));
  270. return 1;
  271. }
  272. maps[i].fd = map_fd[i];
  273. if (maps[i].def.type == BPF_MAP_TYPE_PROG_ARRAY)
  274. prog_array_fd = map_fd[i];
  275. }
  276. return 0;
  277. }
  278. static int get_sec(Elf *elf, int i, GElf_Ehdr *ehdr, char **shname,
  279. GElf_Shdr *shdr, Elf_Data **data)
  280. {
  281. Elf_Scn *scn;
  282. scn = elf_getscn(elf, i);
  283. if (!scn)
  284. return 1;
  285. if (gelf_getshdr(scn, shdr) != shdr)
  286. return 2;
  287. *shname = elf_strptr(elf, ehdr->e_shstrndx, shdr->sh_name);
  288. if (!*shname || !shdr->sh_size)
  289. return 3;
  290. *data = elf_getdata(scn, 0);
  291. if (!*data || elf_getdata(scn, *data) != NULL)
  292. return 4;
  293. return 0;
  294. }
  295. static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols,
  296. GElf_Shdr *shdr, struct bpf_insn *insn,
  297. struct bpf_map_data *maps, int nr_maps)
  298. {
  299. int i, nrels;
  300. nrels = shdr->sh_size / shdr->sh_entsize;
  301. for (i = 0; i < nrels; i++) {
  302. GElf_Sym sym;
  303. GElf_Rel rel;
  304. unsigned int insn_idx;
  305. bool match = false;
  306. int j, map_idx;
  307. gelf_getrel(data, i, &rel);
  308. insn_idx = rel.r_offset / sizeof(struct bpf_insn);
  309. gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym);
  310. if (insn[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
  311. printf("invalid relo for insn[%d].code 0x%x\n",
  312. insn_idx, insn[insn_idx].code);
  313. return 1;
  314. }
  315. insn[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
  316. /* Match FD relocation against recorded map_data[] offset */
  317. for (map_idx = 0; map_idx < nr_maps; map_idx++) {
  318. if (maps[map_idx].elf_offset == sym.st_value) {
  319. match = true;
  320. break;
  321. }
  322. }
  323. if (match) {
  324. insn[insn_idx].imm = maps[map_idx].fd;
  325. } else {
  326. printf("invalid relo for insn[%d] no map_data match\n",
  327. insn_idx);
  328. return 1;
  329. }
  330. }
  331. return 0;
  332. }
  333. static int cmp_symbols(const void *l, const void *r)
  334. {
  335. const GElf_Sym *lsym = (const GElf_Sym *)l;
  336. const GElf_Sym *rsym = (const GElf_Sym *)r;
  337. if (lsym->st_value < rsym->st_value)
  338. return -1;
  339. else if (lsym->st_value > rsym->st_value)
  340. return 1;
  341. else
  342. return 0;
  343. }
  344. static int load_elf_maps_section(struct bpf_map_data *maps, int maps_shndx,
  345. Elf *elf, Elf_Data *symbols, int strtabidx)
  346. {
  347. int map_sz_elf, map_sz_copy;
  348. bool validate_zero = false;
  349. Elf_Data *data_maps;
  350. int i, nr_maps;
  351. GElf_Sym *sym;
  352. Elf_Scn *scn;
  353. int copy_sz;
  354. if (maps_shndx < 0)
  355. return -EINVAL;
  356. if (!symbols)
  357. return -EINVAL;
  358. /* Get data for maps section via elf index */
  359. scn = elf_getscn(elf, maps_shndx);
  360. if (scn)
  361. data_maps = elf_getdata(scn, NULL);
  362. if (!scn || !data_maps) {
  363. printf("Failed to get Elf_Data from maps section %d\n",
  364. maps_shndx);
  365. return -EINVAL;
  366. }
  367. /* For each map get corrosponding symbol table entry */
  368. sym = calloc(MAX_MAPS+1, sizeof(GElf_Sym));
  369. for (i = 0, nr_maps = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
  370. assert(nr_maps < MAX_MAPS+1);
  371. if (!gelf_getsym(symbols, i, &sym[nr_maps]))
  372. continue;
  373. if (sym[nr_maps].st_shndx != maps_shndx)
  374. continue;
  375. /* Only increment iif maps section */
  376. nr_maps++;
  377. }
  378. /* Align to map_fd[] order, via sort on offset in sym.st_value */
  379. qsort(sym, nr_maps, sizeof(GElf_Sym), cmp_symbols);
  380. /* Keeping compatible with ELF maps section changes
  381. * ------------------------------------------------
  382. * The program size of struct bpf_load_map_def is known by loader
  383. * code, but struct stored in ELF file can be different.
  384. *
  385. * Unfortunately sym[i].st_size is zero. To calculate the
  386. * struct size stored in the ELF file, assume all struct have
  387. * the same size, and simply divide with number of map
  388. * symbols.
  389. */
  390. map_sz_elf = data_maps->d_size / nr_maps;
  391. map_sz_copy = sizeof(struct bpf_load_map_def);
  392. if (map_sz_elf < map_sz_copy) {
  393. /*
  394. * Backward compat, loading older ELF file with
  395. * smaller struct, keeping remaining bytes zero.
  396. */
  397. map_sz_copy = map_sz_elf;
  398. } else if (map_sz_elf > map_sz_copy) {
  399. /*
  400. * Forward compat, loading newer ELF file with larger
  401. * struct with unknown features. Assume zero means
  402. * feature not used. Thus, validate rest of struct
  403. * data is zero.
  404. */
  405. validate_zero = true;
  406. }
  407. /* Memcpy relevant part of ELF maps data to loader maps */
  408. for (i = 0; i < nr_maps; i++) {
  409. struct bpf_load_map_def *def;
  410. unsigned char *addr, *end;
  411. const char *map_name;
  412. size_t offset;
  413. map_name = elf_strptr(elf, strtabidx, sym[i].st_name);
  414. maps[i].name = strdup(map_name);
  415. if (!maps[i].name) {
  416. printf("strdup(%s): %s(%d)\n", map_name,
  417. strerror(errno), errno);
  418. free(sym);
  419. return -errno;
  420. }
  421. /* Symbol value is offset into ELF maps section data area */
  422. offset = sym[i].st_value;
  423. def = (struct bpf_load_map_def *)(data_maps->d_buf + offset);
  424. maps[i].elf_offset = offset;
  425. memset(&maps[i].def, 0, sizeof(struct bpf_load_map_def));
  426. memcpy(&maps[i].def, def, map_sz_copy);
  427. /* Verify no newer features were requested */
  428. if (validate_zero) {
  429. addr = (unsigned char*) def + map_sz_copy;
  430. end = (unsigned char*) def + map_sz_elf;
  431. for (; addr < end; addr++) {
  432. if (*addr != 0) {
  433. free(sym);
  434. return -EFBIG;
  435. }
  436. }
  437. }
  438. }
  439. free(sym);
  440. return nr_maps;
  441. }
  442. static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
  443. {
  444. int fd, i, ret, maps_shndx = -1, strtabidx = -1;
  445. Elf *elf;
  446. GElf_Ehdr ehdr;
  447. GElf_Shdr shdr, shdr_prog;
  448. Elf_Data *data, *data_prog, *data_maps = NULL, *symbols = NULL;
  449. char *shname, *shname_prog;
  450. int nr_maps = 0;
  451. /* reset global variables */
  452. kern_version = 0;
  453. memset(license, 0, sizeof(license));
  454. memset(processed_sec, 0, sizeof(processed_sec));
  455. if (elf_version(EV_CURRENT) == EV_NONE)
  456. return 1;
  457. fd = open(path, O_RDONLY, 0);
  458. if (fd < 0)
  459. return 1;
  460. elf = elf_begin(fd, ELF_C_READ, NULL);
  461. if (!elf)
  462. return 1;
  463. if (gelf_getehdr(elf, &ehdr) != &ehdr)
  464. return 1;
  465. /* clear all kprobes */
  466. i = write_kprobe_events("");
  467. /* scan over all elf sections to get license and map info */
  468. for (i = 1; i < ehdr.e_shnum; i++) {
  469. if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
  470. continue;
  471. if (0) /* helpful for llvm debugging */
  472. printf("section %d:%s data %p size %zd link %d flags %d\n",
  473. i, shname, data->d_buf, data->d_size,
  474. shdr.sh_link, (int) shdr.sh_flags);
  475. if (strcmp(shname, "license") == 0) {
  476. processed_sec[i] = true;
  477. memcpy(license, data->d_buf, data->d_size);
  478. } else if (strcmp(shname, "version") == 0) {
  479. processed_sec[i] = true;
  480. if (data->d_size != sizeof(int)) {
  481. printf("invalid size of version section %zd\n",
  482. data->d_size);
  483. return 1;
  484. }
  485. memcpy(&kern_version, data->d_buf, sizeof(int));
  486. } else if (strcmp(shname, "maps") == 0) {
  487. int j;
  488. maps_shndx = i;
  489. data_maps = data;
  490. for (j = 0; j < MAX_MAPS; j++)
  491. map_data[j].fd = -1;
  492. } else if (shdr.sh_type == SHT_SYMTAB) {
  493. strtabidx = shdr.sh_link;
  494. symbols = data;
  495. }
  496. }
  497. ret = 1;
  498. if (!symbols) {
  499. printf("missing SHT_SYMTAB section\n");
  500. goto done;
  501. }
  502. if (data_maps) {
  503. nr_maps = load_elf_maps_section(map_data, maps_shndx,
  504. elf, symbols, strtabidx);
  505. if (nr_maps < 0) {
  506. printf("Error: Failed loading ELF maps (errno:%d):%s\n",
  507. nr_maps, strerror(-nr_maps));
  508. goto done;
  509. }
  510. if (load_maps(map_data, nr_maps, fixup_map))
  511. goto done;
  512. map_data_count = nr_maps;
  513. processed_sec[maps_shndx] = true;
  514. }
  515. /* process all relo sections, and rewrite bpf insns for maps */
  516. for (i = 1; i < ehdr.e_shnum; i++) {
  517. if (processed_sec[i])
  518. continue;
  519. if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
  520. continue;
  521. if (shdr.sh_type == SHT_REL) {
  522. struct bpf_insn *insns;
  523. /* locate prog sec that need map fixup (relocations) */
  524. if (get_sec(elf, shdr.sh_info, &ehdr, &shname_prog,
  525. &shdr_prog, &data_prog))
  526. continue;
  527. if (shdr_prog.sh_type != SHT_PROGBITS ||
  528. !(shdr_prog.sh_flags & SHF_EXECINSTR))
  529. continue;
  530. insns = (struct bpf_insn *) data_prog->d_buf;
  531. processed_sec[i] = true; /* relo section */
  532. if (parse_relo_and_apply(data, symbols, &shdr, insns,
  533. map_data, nr_maps))
  534. continue;
  535. }
  536. }
  537. /* load programs */
  538. for (i = 1; i < ehdr.e_shnum; i++) {
  539. if (processed_sec[i])
  540. continue;
  541. if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
  542. continue;
  543. if (memcmp(shname, "kprobe/", 7) == 0 ||
  544. memcmp(shname, "kretprobe/", 10) == 0 ||
  545. memcmp(shname, "tracepoint/", 11) == 0 ||
  546. memcmp(shname, "raw_tracepoint/", 15) == 0 ||
  547. memcmp(shname, "xdp", 3) == 0 ||
  548. memcmp(shname, "perf_event", 10) == 0 ||
  549. memcmp(shname, "socket", 6) == 0 ||
  550. memcmp(shname, "cgroup/", 7) == 0 ||
  551. memcmp(shname, "sockops", 7) == 0 ||
  552. memcmp(shname, "sk_skb", 6) == 0 ||
  553. memcmp(shname, "sk_msg", 6) == 0) {
  554. ret = load_and_attach(shname, data->d_buf,
  555. data->d_size);
  556. if (ret != 0)
  557. goto done;
  558. }
  559. }
  560. done:
  561. close(fd);
  562. return ret;
  563. }
  564. int load_bpf_file(char *path)
  565. {
  566. return do_load_bpf_file(path, NULL);
  567. }
  568. int load_bpf_file_fixup_map(const char *path, fixup_map_cb fixup_map)
  569. {
  570. return do_load_bpf_file(path, fixup_map);
  571. }
  572. void read_trace_pipe(void)
  573. {
  574. int trace_fd;
  575. trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
  576. if (trace_fd < 0)
  577. return;
  578. while (1) {
  579. static char buf[4096];
  580. ssize_t sz;
  581. sz = read(trace_fd, buf, sizeof(buf) - 1);
  582. if (sz > 0) {
  583. buf[sz] = 0;
  584. puts(buf);
  585. }
  586. }
  587. }