bpf-loader.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773
  1. /*
  2. * bpf-loader.c
  3. *
  4. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  5. * Copyright (C) 2015 Huawei Inc.
  6. */
  7. #include <linux/bpf.h>
  8. #include <bpf/libbpf.h>
  9. #include <bpf/bpf.h>
  10. #include <linux/err.h>
  11. #include <linux/string.h>
  12. #include "perf.h"
  13. #include "debug.h"
  14. #include "bpf-loader.h"
  15. #include "bpf-prologue.h"
  16. #include "llvm-utils.h"
  17. #include "probe-event.h"
  18. #include "probe-finder.h" // for MAX_PROBES
  19. #include "parse-events.h"
  20. #include "llvm-utils.h"
  21. #define DEFINE_PRINT_FN(name, level) \
  22. static int libbpf_##name(const char *fmt, ...) \
  23. { \
  24. va_list args; \
  25. int ret; \
  26. \
  27. va_start(args, fmt); \
  28. ret = veprintf(level, verbose, pr_fmt(fmt), args);\
  29. va_end(args); \
  30. return ret; \
  31. }
  32. DEFINE_PRINT_FN(warning, 1)
  33. DEFINE_PRINT_FN(info, 1)
  34. DEFINE_PRINT_FN(debug, 1)
  35. struct bpf_prog_priv {
  36. bool is_tp;
  37. char *sys_name;
  38. char *evt_name;
  39. struct perf_probe_event pev;
  40. bool need_prologue;
  41. struct bpf_insn *insns_buf;
  42. int nr_types;
  43. int *type_mapping;
  44. };
  45. static bool libbpf_initialized;
  46. struct bpf_object *
  47. bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
  48. {
  49. struct bpf_object *obj;
  50. if (!libbpf_initialized) {
  51. libbpf_set_print(libbpf_warning,
  52. libbpf_info,
  53. libbpf_debug);
  54. libbpf_initialized = true;
  55. }
  56. obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
  57. if (IS_ERR(obj)) {
  58. pr_debug("bpf: failed to load buffer\n");
  59. return ERR_PTR(-EINVAL);
  60. }
  61. return obj;
  62. }
  63. struct bpf_object *bpf__prepare_load(const char *filename, bool source)
  64. {
  65. struct bpf_object *obj;
  66. if (!libbpf_initialized) {
  67. libbpf_set_print(libbpf_warning,
  68. libbpf_info,
  69. libbpf_debug);
  70. libbpf_initialized = true;
  71. }
  72. if (source) {
  73. int err;
  74. void *obj_buf;
  75. size_t obj_buf_sz;
  76. err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
  77. if (err)
  78. return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
  79. obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
  80. free(obj_buf);
  81. } else
  82. obj = bpf_object__open(filename);
  83. if (IS_ERR(obj)) {
  84. pr_debug("bpf: failed to load %s\n", filename);
  85. return obj;
  86. }
  87. return obj;
  88. }
  89. void bpf__clear(void)
  90. {
  91. struct bpf_object *obj, *tmp;
  92. bpf_object__for_each_safe(obj, tmp) {
  93. bpf__unprobe(obj);
  94. bpf_object__close(obj);
  95. }
  96. }
  97. static void
  98. clear_prog_priv(struct bpf_program *prog __maybe_unused,
  99. void *_priv)
  100. {
  101. struct bpf_prog_priv *priv = _priv;
  102. cleanup_perf_probe_events(&priv->pev, 1);
  103. zfree(&priv->insns_buf);
  104. zfree(&priv->type_mapping);
  105. zfree(&priv->sys_name);
  106. zfree(&priv->evt_name);
  107. free(priv);
  108. }
  109. static int
  110. prog_config__exec(const char *value, struct perf_probe_event *pev)
  111. {
  112. pev->uprobes = true;
  113. pev->target = strdup(value);
  114. if (!pev->target)
  115. return -ENOMEM;
  116. return 0;
  117. }
  118. static int
  119. prog_config__module(const char *value, struct perf_probe_event *pev)
  120. {
  121. pev->uprobes = false;
  122. pev->target = strdup(value);
  123. if (!pev->target)
  124. return -ENOMEM;
  125. return 0;
  126. }
  127. static int
  128. prog_config__bool(const char *value, bool *pbool, bool invert)
  129. {
  130. int err;
  131. bool bool_value;
  132. if (!pbool)
  133. return -EINVAL;
  134. err = strtobool(value, &bool_value);
  135. if (err)
  136. return err;
  137. *pbool = invert ? !bool_value : bool_value;
  138. return 0;
  139. }
  140. static int
  141. prog_config__inlines(const char *value,
  142. struct perf_probe_event *pev __maybe_unused)
  143. {
  144. return prog_config__bool(value, &probe_conf.no_inlines, true);
  145. }
  146. static int
  147. prog_config__force(const char *value,
  148. struct perf_probe_event *pev __maybe_unused)
  149. {
  150. return prog_config__bool(value, &probe_conf.force_add, false);
  151. }
  152. static struct {
  153. const char *key;
  154. const char *usage;
  155. const char *desc;
  156. int (*func)(const char *, struct perf_probe_event *);
  157. } bpf_prog_config_terms[] = {
  158. {
  159. .key = "exec",
  160. .usage = "exec=<full path of file>",
  161. .desc = "Set uprobe target",
  162. .func = prog_config__exec,
  163. },
  164. {
  165. .key = "module",
  166. .usage = "module=<module name> ",
  167. .desc = "Set kprobe module",
  168. .func = prog_config__module,
  169. },
  170. {
  171. .key = "inlines",
  172. .usage = "inlines=[yes|no] ",
  173. .desc = "Probe at inline symbol",
  174. .func = prog_config__inlines,
  175. },
  176. {
  177. .key = "force",
  178. .usage = "force=[yes|no] ",
  179. .desc = "Forcibly add events with existing name",
  180. .func = prog_config__force,
  181. },
  182. };
  183. static int
  184. do_prog_config(const char *key, const char *value,
  185. struct perf_probe_event *pev)
  186. {
  187. unsigned int i;
  188. pr_debug("config bpf program: %s=%s\n", key, value);
  189. for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
  190. if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
  191. return bpf_prog_config_terms[i].func(value, pev);
  192. pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
  193. key, value);
  194. pr_debug("\nHint: Valid options are:\n");
  195. for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
  196. pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
  197. bpf_prog_config_terms[i].desc);
  198. pr_debug("\n");
  199. return -BPF_LOADER_ERRNO__PROGCONF_TERM;
  200. }
  201. static const char *
  202. parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
  203. {
  204. char *text = strdup(config_str);
  205. char *sep, *line;
  206. const char *main_str = NULL;
  207. int err = 0;
  208. if (!text) {
  209. pr_debug("No enough memory: dup config_str failed\n");
  210. return ERR_PTR(-ENOMEM);
  211. }
  212. line = text;
  213. while ((sep = strchr(line, ';'))) {
  214. char *equ;
  215. *sep = '\0';
  216. equ = strchr(line, '=');
  217. if (!equ) {
  218. pr_warning("WARNING: invalid config in BPF object: %s\n",
  219. line);
  220. pr_warning("\tShould be 'key=value'.\n");
  221. goto nextline;
  222. }
  223. *equ = '\0';
  224. err = do_prog_config(line, equ + 1, pev);
  225. if (err)
  226. break;
  227. nextline:
  228. line = sep + 1;
  229. }
  230. if (!err)
  231. main_str = config_str + (line - text);
  232. free(text);
  233. return err ? ERR_PTR(err) : main_str;
  234. }
  235. static int
  236. parse_prog_config(const char *config_str, const char **p_main_str,
  237. bool *is_tp, struct perf_probe_event *pev)
  238. {
  239. int err;
  240. const char *main_str = parse_prog_config_kvpair(config_str, pev);
  241. if (IS_ERR(main_str))
  242. return PTR_ERR(main_str);
  243. *p_main_str = main_str;
  244. if (!strchr(main_str, '=')) {
  245. /* Is a tracepoint event? */
  246. const char *s = strchr(main_str, ':');
  247. if (!s) {
  248. pr_debug("bpf: '%s' is not a valid tracepoint\n",
  249. config_str);
  250. return -BPF_LOADER_ERRNO__CONFIG;
  251. }
  252. *is_tp = true;
  253. return 0;
  254. }
  255. *is_tp = false;
  256. err = parse_perf_probe_command(main_str, pev);
  257. if (err < 0) {
  258. pr_debug("bpf: '%s' is not a valid config string\n",
  259. config_str);
  260. /* parse failed, don't need clear pev. */
  261. return -BPF_LOADER_ERRNO__CONFIG;
  262. }
  263. return 0;
  264. }
  265. static int
  266. config_bpf_program(struct bpf_program *prog)
  267. {
  268. struct perf_probe_event *pev = NULL;
  269. struct bpf_prog_priv *priv = NULL;
  270. const char *config_str, *main_str;
  271. bool is_tp = false;
  272. int err;
  273. /* Initialize per-program probing setting */
  274. probe_conf.no_inlines = false;
  275. probe_conf.force_add = false;
  276. config_str = bpf_program__title(prog, false);
  277. if (IS_ERR(config_str)) {
  278. pr_debug("bpf: unable to get title for program\n");
  279. return PTR_ERR(config_str);
  280. }
  281. priv = calloc(sizeof(*priv), 1);
  282. if (!priv) {
  283. pr_debug("bpf: failed to alloc priv\n");
  284. return -ENOMEM;
  285. }
  286. pev = &priv->pev;
  287. pr_debug("bpf: config program '%s'\n", config_str);
  288. err = parse_prog_config(config_str, &main_str, &is_tp, pev);
  289. if (err)
  290. goto errout;
  291. if (is_tp) {
  292. char *s = strchr(main_str, ':');
  293. priv->is_tp = true;
  294. priv->sys_name = strndup(main_str, s - main_str);
  295. priv->evt_name = strdup(s + 1);
  296. goto set_priv;
  297. }
  298. if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
  299. pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
  300. config_str, PERF_BPF_PROBE_GROUP);
  301. err = -BPF_LOADER_ERRNO__GROUP;
  302. goto errout;
  303. } else if (!pev->group)
  304. pev->group = strdup(PERF_BPF_PROBE_GROUP);
  305. if (!pev->group) {
  306. pr_debug("bpf: strdup failed\n");
  307. err = -ENOMEM;
  308. goto errout;
  309. }
  310. if (!pev->event) {
  311. pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
  312. config_str);
  313. err = -BPF_LOADER_ERRNO__EVENTNAME;
  314. goto errout;
  315. }
  316. pr_debug("bpf: config '%s' is ok\n", config_str);
  317. set_priv:
  318. err = bpf_program__set_priv(prog, priv, clear_prog_priv);
  319. if (err) {
  320. pr_debug("Failed to set priv for program '%s'\n", config_str);
  321. goto errout;
  322. }
  323. return 0;
  324. errout:
  325. if (pev)
  326. clear_perf_probe_event(pev);
  327. free(priv);
  328. return err;
  329. }
  330. static int bpf__prepare_probe(void)
  331. {
  332. static int err = 0;
  333. static bool initialized = false;
  334. /*
  335. * Make err static, so if init failed the first, bpf__prepare_probe()
  336. * fails each time without calling init_probe_symbol_maps multiple
  337. * times.
  338. */
  339. if (initialized)
  340. return err;
  341. initialized = true;
  342. err = init_probe_symbol_maps(false);
  343. if (err < 0)
  344. pr_debug("Failed to init_probe_symbol_maps\n");
  345. probe_conf.max_probes = MAX_PROBES;
  346. return err;
  347. }
  348. static int
  349. preproc_gen_prologue(struct bpf_program *prog, int n,
  350. struct bpf_insn *orig_insns, int orig_insns_cnt,
  351. struct bpf_prog_prep_result *res)
  352. {
  353. struct bpf_prog_priv *priv = bpf_program__priv(prog);
  354. struct probe_trace_event *tev;
  355. struct perf_probe_event *pev;
  356. struct bpf_insn *buf;
  357. size_t prologue_cnt = 0;
  358. int i, err;
  359. if (IS_ERR(priv) || !priv || priv->is_tp)
  360. goto errout;
  361. pev = &priv->pev;
  362. if (n < 0 || n >= priv->nr_types)
  363. goto errout;
  364. /* Find a tev belongs to that type */
  365. for (i = 0; i < pev->ntevs; i++) {
  366. if (priv->type_mapping[i] == n)
  367. break;
  368. }
  369. if (i >= pev->ntevs) {
  370. pr_debug("Internal error: prologue type %d not found\n", n);
  371. return -BPF_LOADER_ERRNO__PROLOGUE;
  372. }
  373. tev = &pev->tevs[i];
  374. buf = priv->insns_buf;
  375. err = bpf__gen_prologue(tev->args, tev->nargs,
  376. buf, &prologue_cnt,
  377. BPF_MAXINSNS - orig_insns_cnt);
  378. if (err) {
  379. const char *title;
  380. title = bpf_program__title(prog, false);
  381. if (!title)
  382. title = "[unknown]";
  383. pr_debug("Failed to generate prologue for program %s\n",
  384. title);
  385. return err;
  386. }
  387. memcpy(&buf[prologue_cnt], orig_insns,
  388. sizeof(struct bpf_insn) * orig_insns_cnt);
  389. res->new_insn_ptr = buf;
  390. res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
  391. res->pfd = NULL;
  392. return 0;
  393. errout:
  394. pr_debug("Internal error in preproc_gen_prologue\n");
  395. return -BPF_LOADER_ERRNO__PROLOGUE;
  396. }
  397. /*
  398. * compare_tev_args is reflexive, transitive and antisymmetric.
  399. * I can proof it but this margin is too narrow to contain.
  400. */
  401. static int compare_tev_args(const void *ptev1, const void *ptev2)
  402. {
  403. int i, ret;
  404. const struct probe_trace_event *tev1 =
  405. *(const struct probe_trace_event **)ptev1;
  406. const struct probe_trace_event *tev2 =
  407. *(const struct probe_trace_event **)ptev2;
  408. ret = tev2->nargs - tev1->nargs;
  409. if (ret)
  410. return ret;
  411. for (i = 0; i < tev1->nargs; i++) {
  412. struct probe_trace_arg *arg1, *arg2;
  413. struct probe_trace_arg_ref *ref1, *ref2;
  414. arg1 = &tev1->args[i];
  415. arg2 = &tev2->args[i];
  416. ret = strcmp(arg1->value, arg2->value);
  417. if (ret)
  418. return ret;
  419. ref1 = arg1->ref;
  420. ref2 = arg2->ref;
  421. while (ref1 && ref2) {
  422. ret = ref2->offset - ref1->offset;
  423. if (ret)
  424. return ret;
  425. ref1 = ref1->next;
  426. ref2 = ref2->next;
  427. }
  428. if (ref1 || ref2)
  429. return ref2 ? 1 : -1;
  430. }
  431. return 0;
  432. }
  433. /*
  434. * Assign a type number to each tevs in a pev.
  435. * mapping is an array with same slots as tevs in that pev.
  436. * nr_types will be set to number of types.
  437. */
  438. static int map_prologue(struct perf_probe_event *pev, int *mapping,
  439. int *nr_types)
  440. {
  441. int i, type = 0;
  442. struct probe_trace_event **ptevs;
  443. size_t array_sz = sizeof(*ptevs) * pev->ntevs;
  444. ptevs = malloc(array_sz);
  445. if (!ptevs) {
  446. pr_debug("No enough memory: alloc ptevs failed\n");
  447. return -ENOMEM;
  448. }
  449. pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
  450. for (i = 0; i < pev->ntevs; i++)
  451. ptevs[i] = &pev->tevs[i];
  452. qsort(ptevs, pev->ntevs, sizeof(*ptevs),
  453. compare_tev_args);
  454. for (i = 0; i < pev->ntevs; i++) {
  455. int n;
  456. n = ptevs[i] - pev->tevs;
  457. if (i == 0) {
  458. mapping[n] = type;
  459. pr_debug("mapping[%d]=%d\n", n, type);
  460. continue;
  461. }
  462. if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
  463. mapping[n] = type;
  464. else
  465. mapping[n] = ++type;
  466. pr_debug("mapping[%d]=%d\n", n, mapping[n]);
  467. }
  468. free(ptevs);
  469. *nr_types = type + 1;
  470. return 0;
  471. }
  472. static int hook_load_preprocessor(struct bpf_program *prog)
  473. {
  474. struct bpf_prog_priv *priv = bpf_program__priv(prog);
  475. struct perf_probe_event *pev;
  476. bool need_prologue = false;
  477. int err, i;
  478. if (IS_ERR(priv) || !priv) {
  479. pr_debug("Internal error when hook preprocessor\n");
  480. return -BPF_LOADER_ERRNO__INTERNAL;
  481. }
  482. if (priv->is_tp) {
  483. priv->need_prologue = false;
  484. return 0;
  485. }
  486. pev = &priv->pev;
  487. for (i = 0; i < pev->ntevs; i++) {
  488. struct probe_trace_event *tev = &pev->tevs[i];
  489. if (tev->nargs > 0) {
  490. need_prologue = true;
  491. break;
  492. }
  493. }
  494. /*
  495. * Since all tevs don't have argument, we don't need generate
  496. * prologue.
  497. */
  498. if (!need_prologue) {
  499. priv->need_prologue = false;
  500. return 0;
  501. }
  502. priv->need_prologue = true;
  503. priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
  504. if (!priv->insns_buf) {
  505. pr_debug("No enough memory: alloc insns_buf failed\n");
  506. return -ENOMEM;
  507. }
  508. priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
  509. if (!priv->type_mapping) {
  510. pr_debug("No enough memory: alloc type_mapping failed\n");
  511. return -ENOMEM;
  512. }
  513. memset(priv->type_mapping, -1,
  514. sizeof(int) * pev->ntevs);
  515. err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
  516. if (err)
  517. return err;
  518. err = bpf_program__set_prep(prog, priv->nr_types,
  519. preproc_gen_prologue);
  520. return err;
  521. }
  522. int bpf__probe(struct bpf_object *obj)
  523. {
  524. int err = 0;
  525. struct bpf_program *prog;
  526. struct bpf_prog_priv *priv;
  527. struct perf_probe_event *pev;
  528. err = bpf__prepare_probe();
  529. if (err) {
  530. pr_debug("bpf__prepare_probe failed\n");
  531. return err;
  532. }
  533. bpf_object__for_each_program(prog, obj) {
  534. err = config_bpf_program(prog);
  535. if (err)
  536. goto out;
  537. priv = bpf_program__priv(prog);
  538. if (IS_ERR(priv) || !priv) {
  539. err = PTR_ERR(priv);
  540. goto out;
  541. }
  542. if (priv->is_tp) {
  543. bpf_program__set_tracepoint(prog);
  544. continue;
  545. }
  546. bpf_program__set_kprobe(prog);
  547. pev = &priv->pev;
  548. err = convert_perf_probe_events(pev, 1);
  549. if (err < 0) {
  550. pr_debug("bpf_probe: failed to convert perf probe events");
  551. goto out;
  552. }
  553. err = apply_perf_probe_events(pev, 1);
  554. if (err < 0) {
  555. pr_debug("bpf_probe: failed to apply perf probe events");
  556. goto out;
  557. }
  558. /*
  559. * After probing, let's consider prologue, which
  560. * adds program fetcher to BPF programs.
  561. *
  562. * hook_load_preprocessorr() hooks pre-processor
  563. * to bpf_program, let it generate prologue
  564. * dynamically during loading.
  565. */
  566. err = hook_load_preprocessor(prog);
  567. if (err)
  568. goto out;
  569. }
  570. out:
  571. return err < 0 ? err : 0;
  572. }
  573. #define EVENTS_WRITE_BUFSIZE 4096
  574. int bpf__unprobe(struct bpf_object *obj)
  575. {
  576. int err, ret = 0;
  577. struct bpf_program *prog;
  578. bpf_object__for_each_program(prog, obj) {
  579. struct bpf_prog_priv *priv = bpf_program__priv(prog);
  580. int i;
  581. if (IS_ERR(priv) || !priv || priv->is_tp)
  582. continue;
  583. for (i = 0; i < priv->pev.ntevs; i++) {
  584. struct probe_trace_event *tev = &priv->pev.tevs[i];
  585. char name_buf[EVENTS_WRITE_BUFSIZE];
  586. struct strfilter *delfilter;
  587. snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
  588. "%s:%s", tev->group, tev->event);
  589. name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
  590. delfilter = strfilter__new(name_buf, NULL);
  591. if (!delfilter) {
  592. pr_debug("Failed to create filter for unprobing\n");
  593. ret = -ENOMEM;
  594. continue;
  595. }
  596. err = del_perf_probe_events(delfilter);
  597. strfilter__delete(delfilter);
  598. if (err) {
  599. pr_debug("Failed to delete %s\n", name_buf);
  600. ret = err;
  601. continue;
  602. }
  603. }
  604. }
  605. return ret;
  606. }
  607. int bpf__load(struct bpf_object *obj)
  608. {
  609. int err;
  610. err = bpf_object__load(obj);
  611. if (err) {
  612. pr_debug("bpf: load objects failed\n");
  613. return err;
  614. }
  615. return 0;
  616. }
  617. int bpf__foreach_event(struct bpf_object *obj,
  618. bpf_prog_iter_callback_t func,
  619. void *arg)
  620. {
  621. struct bpf_program *prog;
  622. int err;
  623. bpf_object__for_each_program(prog, obj) {
  624. struct bpf_prog_priv *priv = bpf_program__priv(prog);
  625. struct probe_trace_event *tev;
  626. struct perf_probe_event *pev;
  627. int i, fd;
  628. if (IS_ERR(priv) || !priv) {
  629. pr_debug("bpf: failed to get private field\n");
  630. return -BPF_LOADER_ERRNO__INTERNAL;
  631. }
  632. if (priv->is_tp) {
  633. fd = bpf_program__fd(prog);
  634. err = (*func)(priv->sys_name, priv->evt_name, fd, arg);
  635. if (err) {
  636. pr_debug("bpf: tracepoint call back failed, stop iterate\n");
  637. return err;
  638. }
  639. continue;
  640. }
  641. pev = &priv->pev;
  642. for (i = 0; i < pev->ntevs; i++) {
  643. tev = &pev->tevs[i];
  644. if (priv->need_prologue) {
  645. int type = priv->type_mapping[i];
  646. fd = bpf_program__nth_fd(prog, type);
  647. } else {
  648. fd = bpf_program__fd(prog);
  649. }
  650. if (fd < 0) {
  651. pr_debug("bpf: failed to get file descriptor\n");
  652. return fd;
  653. }
  654. err = (*func)(tev->group, tev->event, fd, arg);
  655. if (err) {
  656. pr_debug("bpf: call back failed, stop iterate\n");
  657. return err;
  658. }
  659. }
  660. }
  661. return 0;
  662. }
  663. enum bpf_map_op_type {
  664. BPF_MAP_OP_SET_VALUE,
  665. BPF_MAP_OP_SET_EVSEL,
  666. };
  667. enum bpf_map_key_type {
  668. BPF_MAP_KEY_ALL,
  669. BPF_MAP_KEY_RANGES,
  670. };
  671. struct bpf_map_op {
  672. struct list_head list;
  673. enum bpf_map_op_type op_type;
  674. enum bpf_map_key_type key_type;
  675. union {
  676. struct parse_events_array array;
  677. } k;
  678. union {
  679. u64 value;
  680. struct perf_evsel *evsel;
  681. } v;
  682. };
  683. struct bpf_map_priv {
  684. struct list_head ops_list;
  685. };
  686. static void
  687. bpf_map_op__delete(struct bpf_map_op *op)
  688. {
  689. if (!list_empty(&op->list))
  690. list_del(&op->list);
  691. if (op->key_type == BPF_MAP_KEY_RANGES)
  692. parse_events__clear_array(&op->k.array);
  693. free(op);
  694. }
  695. static void
  696. bpf_map_priv__purge(struct bpf_map_priv *priv)
  697. {
  698. struct bpf_map_op *pos, *n;
  699. list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
  700. list_del_init(&pos->list);
  701. bpf_map_op__delete(pos);
  702. }
  703. }
  704. static void
  705. bpf_map_priv__clear(struct bpf_map *map __maybe_unused,
  706. void *_priv)
  707. {
  708. struct bpf_map_priv *priv = _priv;
  709. bpf_map_priv__purge(priv);
  710. free(priv);
  711. }
  712. static int
  713. bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
  714. {
  715. op->key_type = BPF_MAP_KEY_ALL;
  716. if (!term)
  717. return 0;
  718. if (term->array.nr_ranges) {
  719. size_t memsz = term->array.nr_ranges *
  720. sizeof(op->k.array.ranges[0]);
  721. op->k.array.ranges = memdup(term->array.ranges, memsz);
  722. if (!op->k.array.ranges) {
  723. pr_debug("No enough memory to alloc indices for map\n");
  724. return -ENOMEM;
  725. }
  726. op->key_type = BPF_MAP_KEY_RANGES;
  727. op->k.array.nr_ranges = term->array.nr_ranges;
  728. }
  729. return 0;
  730. }
  731. static struct bpf_map_op *
  732. bpf_map_op__new(struct parse_events_term *term)
  733. {
  734. struct bpf_map_op *op;
  735. int err;
  736. op = zalloc(sizeof(*op));
  737. if (!op) {
  738. pr_debug("Failed to alloc bpf_map_op\n");
  739. return ERR_PTR(-ENOMEM);
  740. }
  741. INIT_LIST_HEAD(&op->list);
  742. err = bpf_map_op_setkey(op, term);
  743. if (err) {
  744. free(op);
  745. return ERR_PTR(err);
  746. }
  747. return op;
  748. }
  749. static struct bpf_map_op *
  750. bpf_map_op__clone(struct bpf_map_op *op)
  751. {
  752. struct bpf_map_op *newop;
  753. newop = memdup(op, sizeof(*op));
  754. if (!newop) {
  755. pr_debug("Failed to alloc bpf_map_op\n");
  756. return NULL;
  757. }
  758. INIT_LIST_HEAD(&newop->list);
  759. if (op->key_type == BPF_MAP_KEY_RANGES) {
  760. size_t memsz = op->k.array.nr_ranges *
  761. sizeof(op->k.array.ranges[0]);
  762. newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
  763. if (!newop->k.array.ranges) {
  764. pr_debug("Failed to alloc indices for map\n");
  765. free(newop);
  766. return NULL;
  767. }
  768. }
  769. return newop;
  770. }
  771. static struct bpf_map_priv *
  772. bpf_map_priv__clone(struct bpf_map_priv *priv)
  773. {
  774. struct bpf_map_priv *newpriv;
  775. struct bpf_map_op *pos, *newop;
  776. newpriv = zalloc(sizeof(*newpriv));
  777. if (!newpriv) {
  778. pr_debug("No enough memory to alloc map private\n");
  779. return NULL;
  780. }
  781. INIT_LIST_HEAD(&newpriv->ops_list);
  782. list_for_each_entry(pos, &priv->ops_list, list) {
  783. newop = bpf_map_op__clone(pos);
  784. if (!newop) {
  785. bpf_map_priv__purge(newpriv);
  786. return NULL;
  787. }
  788. list_add_tail(&newop->list, &newpriv->ops_list);
  789. }
  790. return newpriv;
  791. }
  792. static int
  793. bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
  794. {
  795. const char *map_name = bpf_map__name(map);
  796. struct bpf_map_priv *priv = bpf_map__priv(map);
  797. if (IS_ERR(priv)) {
  798. pr_debug("Failed to get private from map %s\n", map_name);
  799. return PTR_ERR(priv);
  800. }
  801. if (!priv) {
  802. priv = zalloc(sizeof(*priv));
  803. if (!priv) {
  804. pr_debug("No enough memory to alloc map private\n");
  805. return -ENOMEM;
  806. }
  807. INIT_LIST_HEAD(&priv->ops_list);
  808. if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) {
  809. free(priv);
  810. return -BPF_LOADER_ERRNO__INTERNAL;
  811. }
  812. }
  813. list_add_tail(&op->list, &priv->ops_list);
  814. return 0;
  815. }
  816. static struct bpf_map_op *
  817. bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
  818. {
  819. struct bpf_map_op *op;
  820. int err;
  821. op = bpf_map_op__new(term);
  822. if (IS_ERR(op))
  823. return op;
  824. err = bpf_map__add_op(map, op);
  825. if (err) {
  826. bpf_map_op__delete(op);
  827. return ERR_PTR(err);
  828. }
  829. return op;
  830. }
  831. static int
  832. __bpf_map__config_value(struct bpf_map *map,
  833. struct parse_events_term *term)
  834. {
  835. struct bpf_map_op *op;
  836. const char *map_name = bpf_map__name(map);
  837. const struct bpf_map_def *def = bpf_map__def(map);
  838. if (IS_ERR(def)) {
  839. pr_debug("Unable to get map definition from '%s'\n",
  840. map_name);
  841. return -BPF_LOADER_ERRNO__INTERNAL;
  842. }
  843. if (def->type != BPF_MAP_TYPE_ARRAY) {
  844. pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
  845. map_name);
  846. return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
  847. }
  848. if (def->key_size < sizeof(unsigned int)) {
  849. pr_debug("Map %s has incorrect key size\n", map_name);
  850. return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
  851. }
  852. switch (def->value_size) {
  853. case 1:
  854. case 2:
  855. case 4:
  856. case 8:
  857. break;
  858. default:
  859. pr_debug("Map %s has incorrect value size\n", map_name);
  860. return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
  861. }
  862. op = bpf_map__add_newop(map, term);
  863. if (IS_ERR(op))
  864. return PTR_ERR(op);
  865. op->op_type = BPF_MAP_OP_SET_VALUE;
  866. op->v.value = term->val.num;
  867. return 0;
  868. }
  869. static int
  870. bpf_map__config_value(struct bpf_map *map,
  871. struct parse_events_term *term,
  872. struct perf_evlist *evlist __maybe_unused)
  873. {
  874. if (!term->err_val) {
  875. pr_debug("Config value not set\n");
  876. return -BPF_LOADER_ERRNO__OBJCONF_CONF;
  877. }
  878. if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
  879. pr_debug("ERROR: wrong value type for 'value'\n");
  880. return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
  881. }
  882. return __bpf_map__config_value(map, term);
  883. }
  884. static int
  885. __bpf_map__config_event(struct bpf_map *map,
  886. struct parse_events_term *term,
  887. struct perf_evlist *evlist)
  888. {
  889. struct perf_evsel *evsel;
  890. const struct bpf_map_def *def;
  891. struct bpf_map_op *op;
  892. const char *map_name = bpf_map__name(map);
  893. evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str);
  894. if (!evsel) {
  895. pr_debug("Event (for '%s') '%s' doesn't exist\n",
  896. map_name, term->val.str);
  897. return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
  898. }
  899. def = bpf_map__def(map);
  900. if (IS_ERR(def)) {
  901. pr_debug("Unable to get map definition from '%s'\n",
  902. map_name);
  903. return PTR_ERR(def);
  904. }
  905. /*
  906. * No need to check key_size and value_size:
  907. * kernel has already checked them.
  908. */
  909. if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
  910. pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
  911. map_name);
  912. return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
  913. }
  914. op = bpf_map__add_newop(map, term);
  915. if (IS_ERR(op))
  916. return PTR_ERR(op);
  917. op->op_type = BPF_MAP_OP_SET_EVSEL;
  918. op->v.evsel = evsel;
  919. return 0;
  920. }
  921. static int
  922. bpf_map__config_event(struct bpf_map *map,
  923. struct parse_events_term *term,
  924. struct perf_evlist *evlist)
  925. {
  926. if (!term->err_val) {
  927. pr_debug("Config value not set\n");
  928. return -BPF_LOADER_ERRNO__OBJCONF_CONF;
  929. }
  930. if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
  931. pr_debug("ERROR: wrong value type for 'event'\n");
  932. return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
  933. }
  934. return __bpf_map__config_event(map, term, evlist);
  935. }
  936. struct bpf_obj_config__map_func {
  937. const char *config_opt;
  938. int (*config_func)(struct bpf_map *, struct parse_events_term *,
  939. struct perf_evlist *);
  940. };
  941. struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
  942. {"value", bpf_map__config_value},
  943. {"event", bpf_map__config_event},
  944. };
  945. static int
  946. config_map_indices_range_check(struct parse_events_term *term,
  947. struct bpf_map *map,
  948. const char *map_name)
  949. {
  950. struct parse_events_array *array = &term->array;
  951. const struct bpf_map_def *def;
  952. unsigned int i;
  953. if (!array->nr_ranges)
  954. return 0;
  955. if (!array->ranges) {
  956. pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
  957. map_name, (int)array->nr_ranges);
  958. return -BPF_LOADER_ERRNO__INTERNAL;
  959. }
  960. def = bpf_map__def(map);
  961. if (IS_ERR(def)) {
  962. pr_debug("ERROR: Unable to get map definition from '%s'\n",
  963. map_name);
  964. return -BPF_LOADER_ERRNO__INTERNAL;
  965. }
  966. for (i = 0; i < array->nr_ranges; i++) {
  967. unsigned int start = array->ranges[i].start;
  968. size_t length = array->ranges[i].length;
  969. unsigned int idx = start + length - 1;
  970. if (idx >= def->max_entries) {
  971. pr_debug("ERROR: index %d too large\n", idx);
  972. return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
  973. }
  974. }
  975. return 0;
  976. }
  977. static int
  978. bpf__obj_config_map(struct bpf_object *obj,
  979. struct parse_events_term *term,
  980. struct perf_evlist *evlist,
  981. int *key_scan_pos)
  982. {
  983. /* key is "map:<mapname>.<config opt>" */
  984. char *map_name = strdup(term->config + sizeof("map:") - 1);
  985. struct bpf_map *map;
  986. int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
  987. char *map_opt;
  988. size_t i;
  989. if (!map_name)
  990. return -ENOMEM;
  991. map_opt = strchr(map_name, '.');
  992. if (!map_opt) {
  993. pr_debug("ERROR: Invalid map config: %s\n", map_name);
  994. goto out;
  995. }
  996. *map_opt++ = '\0';
  997. if (*map_opt == '\0') {
  998. pr_debug("ERROR: Invalid map option: %s\n", term->config);
  999. goto out;
  1000. }
  1001. map = bpf_object__find_map_by_name(obj, map_name);
  1002. if (!map) {
  1003. pr_debug("ERROR: Map %s doesn't exist\n", map_name);
  1004. err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
  1005. goto out;
  1006. }
  1007. *key_scan_pos += strlen(map_opt);
  1008. err = config_map_indices_range_check(term, map, map_name);
  1009. if (err)
  1010. goto out;
  1011. *key_scan_pos -= strlen(map_opt);
  1012. for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
  1013. struct bpf_obj_config__map_func *func =
  1014. &bpf_obj_config__map_funcs[i];
  1015. if (strcmp(map_opt, func->config_opt) == 0) {
  1016. err = func->config_func(map, term, evlist);
  1017. goto out;
  1018. }
  1019. }
  1020. pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
  1021. err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
  1022. out:
  1023. free(map_name);
  1024. if (!err)
  1025. key_scan_pos += strlen(map_opt);
  1026. return err;
  1027. }
  1028. int bpf__config_obj(struct bpf_object *obj,
  1029. struct parse_events_term *term,
  1030. struct perf_evlist *evlist,
  1031. int *error_pos)
  1032. {
  1033. int key_scan_pos = 0;
  1034. int err;
  1035. if (!obj || !term || !term->config)
  1036. return -EINVAL;
  1037. if (!prefixcmp(term->config, "map:")) {
  1038. key_scan_pos = sizeof("map:") - 1;
  1039. err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
  1040. goto out;
  1041. }
  1042. err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
  1043. out:
  1044. if (error_pos)
  1045. *error_pos = key_scan_pos;
  1046. return err;
  1047. }
  1048. typedef int (*map_config_func_t)(const char *name, int map_fd,
  1049. const struct bpf_map_def *pdef,
  1050. struct bpf_map_op *op,
  1051. void *pkey, void *arg);
  1052. static int
  1053. foreach_key_array_all(map_config_func_t func,
  1054. void *arg, const char *name,
  1055. int map_fd, const struct bpf_map_def *pdef,
  1056. struct bpf_map_op *op)
  1057. {
  1058. unsigned int i;
  1059. int err;
  1060. for (i = 0; i < pdef->max_entries; i++) {
  1061. err = func(name, map_fd, pdef, op, &i, arg);
  1062. if (err) {
  1063. pr_debug("ERROR: failed to insert value to %s[%u]\n",
  1064. name, i);
  1065. return err;
  1066. }
  1067. }
  1068. return 0;
  1069. }
  1070. static int
  1071. foreach_key_array_ranges(map_config_func_t func, void *arg,
  1072. const char *name, int map_fd,
  1073. const struct bpf_map_def *pdef,
  1074. struct bpf_map_op *op)
  1075. {
  1076. unsigned int i, j;
  1077. int err;
  1078. for (i = 0; i < op->k.array.nr_ranges; i++) {
  1079. unsigned int start = op->k.array.ranges[i].start;
  1080. size_t length = op->k.array.ranges[i].length;
  1081. for (j = 0; j < length; j++) {
  1082. unsigned int idx = start + j;
  1083. err = func(name, map_fd, pdef, op, &idx, arg);
  1084. if (err) {
  1085. pr_debug("ERROR: failed to insert value to %s[%u]\n",
  1086. name, idx);
  1087. return err;
  1088. }
  1089. }
  1090. }
  1091. return 0;
  1092. }
  1093. static int
  1094. bpf_map_config_foreach_key(struct bpf_map *map,
  1095. map_config_func_t func,
  1096. void *arg)
  1097. {
  1098. int err, map_fd;
  1099. struct bpf_map_op *op;
  1100. const struct bpf_map_def *def;
  1101. const char *name = bpf_map__name(map);
  1102. struct bpf_map_priv *priv = bpf_map__priv(map);
  1103. if (IS_ERR(priv)) {
  1104. pr_debug("ERROR: failed to get private from map %s\n", name);
  1105. return -BPF_LOADER_ERRNO__INTERNAL;
  1106. }
  1107. if (!priv || list_empty(&priv->ops_list)) {
  1108. pr_debug("INFO: nothing to config for map %s\n", name);
  1109. return 0;
  1110. }
  1111. def = bpf_map__def(map);
  1112. if (IS_ERR(def)) {
  1113. pr_debug("ERROR: failed to get definition from map %s\n", name);
  1114. return -BPF_LOADER_ERRNO__INTERNAL;
  1115. }
  1116. map_fd = bpf_map__fd(map);
  1117. if (map_fd < 0) {
  1118. pr_debug("ERROR: failed to get fd from map %s\n", name);
  1119. return map_fd;
  1120. }
  1121. list_for_each_entry(op, &priv->ops_list, list) {
  1122. switch (def->type) {
  1123. case BPF_MAP_TYPE_ARRAY:
  1124. case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
  1125. switch (op->key_type) {
  1126. case BPF_MAP_KEY_ALL:
  1127. err = foreach_key_array_all(func, arg, name,
  1128. map_fd, def, op);
  1129. break;
  1130. case BPF_MAP_KEY_RANGES:
  1131. err = foreach_key_array_ranges(func, arg, name,
  1132. map_fd, def,
  1133. op);
  1134. break;
  1135. default:
  1136. pr_debug("ERROR: keytype for map '%s' invalid\n",
  1137. name);
  1138. return -BPF_LOADER_ERRNO__INTERNAL;
  1139. }
  1140. if (err)
  1141. return err;
  1142. break;
  1143. default:
  1144. pr_debug("ERROR: type of '%s' incorrect\n", name);
  1145. return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
  1146. }
  1147. }
  1148. return 0;
  1149. }
  1150. static int
  1151. apply_config_value_for_key(int map_fd, void *pkey,
  1152. size_t val_size, u64 val)
  1153. {
  1154. int err = 0;
  1155. switch (val_size) {
  1156. case 1: {
  1157. u8 _val = (u8)(val);
  1158. err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
  1159. break;
  1160. }
  1161. case 2: {
  1162. u16 _val = (u16)(val);
  1163. err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
  1164. break;
  1165. }
  1166. case 4: {
  1167. u32 _val = (u32)(val);
  1168. err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
  1169. break;
  1170. }
  1171. case 8: {
  1172. err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
  1173. break;
  1174. }
  1175. default:
  1176. pr_debug("ERROR: invalid value size\n");
  1177. return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
  1178. }
  1179. if (err && errno)
  1180. err = -errno;
  1181. return err;
  1182. }
  1183. static int
  1184. apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
  1185. struct perf_evsel *evsel)
  1186. {
  1187. struct xyarray *xy = evsel->fd;
  1188. struct perf_event_attr *attr;
  1189. unsigned int key, events;
  1190. bool check_pass = false;
  1191. int *evt_fd;
  1192. int err;
  1193. if (!xy) {
  1194. pr_debug("ERROR: evsel not ready for map %s\n", name);
  1195. return -BPF_LOADER_ERRNO__INTERNAL;
  1196. }
  1197. if (xy->row_size / xy->entry_size != 1) {
  1198. pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
  1199. name);
  1200. return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
  1201. }
  1202. attr = &evsel->attr;
  1203. if (attr->inherit) {
  1204. pr_debug("ERROR: Can't put inherit event into map %s\n", name);
  1205. return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
  1206. }
  1207. if (perf_evsel__is_bpf_output(evsel))
  1208. check_pass = true;
  1209. if (attr->type == PERF_TYPE_RAW)
  1210. check_pass = true;
  1211. if (attr->type == PERF_TYPE_HARDWARE)
  1212. check_pass = true;
  1213. if (!check_pass) {
  1214. pr_debug("ERROR: Event type is wrong for map %s\n", name);
  1215. return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
  1216. }
  1217. events = xy->entries / (xy->row_size / xy->entry_size);
  1218. key = *((unsigned int *)pkey);
  1219. if (key >= events) {
  1220. pr_debug("ERROR: there is no event %d for map %s\n",
  1221. key, name);
  1222. return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
  1223. }
  1224. evt_fd = xyarray__entry(xy, key, 0);
  1225. err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
  1226. if (err && errno)
  1227. err = -errno;
  1228. return err;
  1229. }
  1230. static int
  1231. apply_obj_config_map_for_key(const char *name, int map_fd,
  1232. const struct bpf_map_def *pdef,
  1233. struct bpf_map_op *op,
  1234. void *pkey, void *arg __maybe_unused)
  1235. {
  1236. int err;
  1237. switch (op->op_type) {
  1238. case BPF_MAP_OP_SET_VALUE:
  1239. err = apply_config_value_for_key(map_fd, pkey,
  1240. pdef->value_size,
  1241. op->v.value);
  1242. break;
  1243. case BPF_MAP_OP_SET_EVSEL:
  1244. err = apply_config_evsel_for_key(name, map_fd, pkey,
  1245. op->v.evsel);
  1246. break;
  1247. default:
  1248. pr_debug("ERROR: unknown value type for '%s'\n", name);
  1249. err = -BPF_LOADER_ERRNO__INTERNAL;
  1250. }
  1251. return err;
  1252. }
  1253. static int
  1254. apply_obj_config_map(struct bpf_map *map)
  1255. {
  1256. return bpf_map_config_foreach_key(map,
  1257. apply_obj_config_map_for_key,
  1258. NULL);
  1259. }
  1260. static int
  1261. apply_obj_config_object(struct bpf_object *obj)
  1262. {
  1263. struct bpf_map *map;
  1264. int err;
  1265. bpf_map__for_each(map, obj) {
  1266. err = apply_obj_config_map(map);
  1267. if (err)
  1268. return err;
  1269. }
  1270. return 0;
  1271. }
  1272. int bpf__apply_obj_config(void)
  1273. {
  1274. struct bpf_object *obj, *tmp;
  1275. int err;
  1276. bpf_object__for_each_safe(obj, tmp) {
  1277. err = apply_obj_config_object(obj);
  1278. if (err)
  1279. return err;
  1280. }
  1281. return 0;
  1282. }
  1283. #define bpf__for_each_map(pos, obj, objtmp) \
  1284. bpf_object__for_each_safe(obj, objtmp) \
  1285. bpf_map__for_each(pos, obj)
  1286. #define bpf__for_each_stdout_map(pos, obj, objtmp) \
  1287. bpf__for_each_map(pos, obj, objtmp) \
  1288. if (bpf_map__name(pos) && \
  1289. (strcmp("__bpf_stdout__", \
  1290. bpf_map__name(pos)) == 0))
  1291. int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
  1292. {
  1293. struct bpf_map_priv *tmpl_priv = NULL;
  1294. struct bpf_object *obj, *tmp;
  1295. struct perf_evsel *evsel = NULL;
  1296. struct bpf_map *map;
  1297. int err;
  1298. bool need_init = false;
  1299. bpf__for_each_stdout_map(map, obj, tmp) {
  1300. struct bpf_map_priv *priv = bpf_map__priv(map);
  1301. if (IS_ERR(priv))
  1302. return -BPF_LOADER_ERRNO__INTERNAL;
  1303. /*
  1304. * No need to check map type: type should have been
  1305. * verified by kernel.
  1306. */
  1307. if (!need_init && !priv)
  1308. need_init = !priv;
  1309. if (!tmpl_priv && priv)
  1310. tmpl_priv = priv;
  1311. }
  1312. if (!need_init)
  1313. return 0;
  1314. if (!tmpl_priv) {
  1315. err = parse_events(evlist, "bpf-output/no-inherit=1,name=__bpf_stdout__/",
  1316. NULL);
  1317. if (err) {
  1318. pr_debug("ERROR: failed to create bpf-output event\n");
  1319. return -err;
  1320. }
  1321. evsel = perf_evlist__last(evlist);
  1322. }
  1323. bpf__for_each_stdout_map(map, obj, tmp) {
  1324. struct bpf_map_priv *priv = bpf_map__priv(map);
  1325. if (IS_ERR(priv))
  1326. return -BPF_LOADER_ERRNO__INTERNAL;
  1327. if (priv)
  1328. continue;
  1329. if (tmpl_priv) {
  1330. priv = bpf_map_priv__clone(tmpl_priv);
  1331. if (!priv)
  1332. return -ENOMEM;
  1333. err = bpf_map__set_priv(map, priv, bpf_map_priv__clear);
  1334. if (err) {
  1335. bpf_map_priv__clear(map, priv);
  1336. return err;
  1337. }
  1338. } else if (evsel) {
  1339. struct bpf_map_op *op;
  1340. op = bpf_map__add_newop(map, NULL);
  1341. if (IS_ERR(op))
  1342. return PTR_ERR(op);
  1343. op->op_type = BPF_MAP_OP_SET_EVSEL;
  1344. op->v.evsel = evsel;
  1345. }
  1346. }
  1347. return 0;
  1348. }
  1349. #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
  1350. #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
  1351. #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
  1352. static const char *bpf_loader_strerror_table[NR_ERRNO] = {
  1353. [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
  1354. [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
  1355. [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
  1356. [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
  1357. [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
  1358. [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
  1359. [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
  1360. [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
  1361. [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
  1362. [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
  1363. [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
  1364. [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
  1365. [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
  1366. [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
  1367. [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
  1368. [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
  1369. [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
  1370. [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
  1371. [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
  1372. [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
  1373. [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
  1374. [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
  1375. [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
  1376. };
  1377. static int
  1378. bpf_loader_strerror(int err, char *buf, size_t size)
  1379. {
  1380. char sbuf[STRERR_BUFSIZE];
  1381. const char *msg;
  1382. if (!buf || !size)
  1383. return -1;
  1384. err = err > 0 ? err : -err;
  1385. if (err >= __LIBBPF_ERRNO__START)
  1386. return libbpf_strerror(err, buf, size);
  1387. if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
  1388. msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
  1389. snprintf(buf, size, "%s", msg);
  1390. buf[size - 1] = '\0';
  1391. return 0;
  1392. }
  1393. if (err >= __BPF_LOADER_ERRNO__END)
  1394. snprintf(buf, size, "Unknown bpf loader error %d", err);
  1395. else
  1396. snprintf(buf, size, "%s",
  1397. str_error_r(err, sbuf, sizeof(sbuf)));
  1398. buf[size - 1] = '\0';
  1399. return -1;
  1400. }
  1401. #define bpf__strerror_head(err, buf, size) \
  1402. char sbuf[STRERR_BUFSIZE], *emsg;\
  1403. if (!size)\
  1404. return 0;\
  1405. if (err < 0)\
  1406. err = -err;\
  1407. bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
  1408. emsg = sbuf;\
  1409. switch (err) {\
  1410. default:\
  1411. scnprintf(buf, size, "%s", emsg);\
  1412. break;
  1413. #define bpf__strerror_entry(val, fmt...)\
  1414. case val: {\
  1415. scnprintf(buf, size, fmt);\
  1416. break;\
  1417. }
  1418. #define bpf__strerror_end(buf, size)\
  1419. }\
  1420. buf[size - 1] = '\0';
  1421. int bpf__strerror_prepare_load(const char *filename, bool source,
  1422. int err, char *buf, size_t size)
  1423. {
  1424. size_t n;
  1425. int ret;
  1426. n = snprintf(buf, size, "Failed to load %s%s: ",
  1427. filename, source ? " from source" : "");
  1428. if (n >= size) {
  1429. buf[size - 1] = '\0';
  1430. return 0;
  1431. }
  1432. buf += n;
  1433. size -= n;
  1434. ret = bpf_loader_strerror(err, buf, size);
  1435. buf[size - 1] = '\0';
  1436. return ret;
  1437. }
  1438. int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
  1439. int err, char *buf, size_t size)
  1440. {
  1441. bpf__strerror_head(err, buf, size);
  1442. case BPF_LOADER_ERRNO__PROGCONF_TERM: {
  1443. scnprintf(buf, size, "%s (add -v to see detail)", emsg);
  1444. break;
  1445. }
  1446. bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
  1447. bpf__strerror_entry(EACCES, "You need to be root");
  1448. bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
  1449. bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
  1450. bpf__strerror_end(buf, size);
  1451. return 0;
  1452. }
  1453. int bpf__strerror_load(struct bpf_object *obj,
  1454. int err, char *buf, size_t size)
  1455. {
  1456. bpf__strerror_head(err, buf, size);
  1457. case LIBBPF_ERRNO__KVER: {
  1458. unsigned int obj_kver = bpf_object__kversion(obj);
  1459. unsigned int real_kver;
  1460. if (fetch_kernel_version(&real_kver, NULL, 0)) {
  1461. scnprintf(buf, size, "Unable to fetch kernel version");
  1462. break;
  1463. }
  1464. if (obj_kver != real_kver) {
  1465. scnprintf(buf, size,
  1466. "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
  1467. KVER_PARAM(obj_kver),
  1468. KVER_PARAM(real_kver));
  1469. break;
  1470. }
  1471. scnprintf(buf, size, "Failed to load program for unknown reason");
  1472. break;
  1473. }
  1474. bpf__strerror_end(buf, size);
  1475. return 0;
  1476. }
  1477. int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
  1478. struct parse_events_term *term __maybe_unused,
  1479. struct perf_evlist *evlist __maybe_unused,
  1480. int *error_pos __maybe_unused, int err,
  1481. char *buf, size_t size)
  1482. {
  1483. bpf__strerror_head(err, buf, size);
  1484. bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
  1485. "Can't use this config term with this map type");
  1486. bpf__strerror_end(buf, size);
  1487. return 0;
  1488. }
  1489. int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
  1490. {
  1491. bpf__strerror_head(err, buf, size);
  1492. bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
  1493. "Cannot set event to BPF map in multi-thread tracing");
  1494. bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
  1495. "%s (Hint: use -i to turn off inherit)", emsg);
  1496. bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
  1497. "Can only put raw, hardware and BPF output event into a BPF map");
  1498. bpf__strerror_end(buf, size);
  1499. return 0;
  1500. }
  1501. int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
  1502. int err, char *buf, size_t size)
  1503. {
  1504. bpf__strerror_head(err, buf, size);
  1505. bpf__strerror_end(buf, size);
  1506. return 0;
  1507. }