cfg.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
  2. /*
  3. * Copyright (C) 2018 Netronome Systems, Inc.
  4. *
  5. * This software is dual licensed under the GNU General License Version 2,
  6. * June 1991 as shown in the file COPYING in the top-level directory of this
  7. * source tree or the BSD 2-Clause License provided below. You have the
  8. * option to license this software under the complete terms of either license.
  9. *
  10. * The BSD 2-Clause License:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * 1. Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * 2. Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  26. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  27. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  28. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
  29. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  30. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  31. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  32. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  33. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  34. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  35. * POSSIBILITY OF SUCH DAMAGE.
  36. */
  37. #include <linux/list.h>
  38. #include <stdlib.h>
  39. #include <string.h>
  40. #include "cfg.h"
  41. #include "main.h"
  42. #include "xlated_dumper.h"
  43. struct cfg {
  44. struct list_head funcs;
  45. int func_num;
  46. };
  47. struct func_node {
  48. struct list_head l;
  49. struct list_head bbs;
  50. struct bpf_insn *start;
  51. struct bpf_insn *end;
  52. int idx;
  53. int bb_num;
  54. };
  55. struct bb_node {
  56. struct list_head l;
  57. struct list_head e_prevs;
  58. struct list_head e_succs;
  59. struct bpf_insn *head;
  60. struct bpf_insn *tail;
  61. int idx;
  62. };
  63. #define EDGE_FLAG_EMPTY 0x0
  64. #define EDGE_FLAG_FALLTHROUGH 0x1
  65. #define EDGE_FLAG_JUMP 0x2
  66. struct edge_node {
  67. struct list_head l;
  68. struct bb_node *src;
  69. struct bb_node *dst;
  70. int flags;
  71. };
  72. #define ENTRY_BLOCK_INDEX 0
  73. #define EXIT_BLOCK_INDEX 1
  74. #define NUM_FIXED_BLOCKS 2
  75. #define func_prev(func) list_prev_entry(func, l)
  76. #define func_next(func) list_next_entry(func, l)
  77. #define bb_prev(bb) list_prev_entry(bb, l)
  78. #define bb_next(bb) list_next_entry(bb, l)
  79. #define entry_bb(func) func_first_bb(func)
  80. #define exit_bb(func) func_last_bb(func)
  81. #define cfg_first_func(cfg) \
  82. list_first_entry(&cfg->funcs, struct func_node, l)
  83. #define cfg_last_func(cfg) \
  84. list_last_entry(&cfg->funcs, struct func_node, l)
  85. #define func_first_bb(func) \
  86. list_first_entry(&func->bbs, struct bb_node, l)
  87. #define func_last_bb(func) \
  88. list_last_entry(&func->bbs, struct bb_node, l)
  89. static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
  90. {
  91. struct func_node *new_func, *func;
  92. list_for_each_entry(func, &cfg->funcs, l) {
  93. if (func->start == insn)
  94. return func;
  95. else if (func->start > insn)
  96. break;
  97. }
  98. func = func_prev(func);
  99. new_func = calloc(1, sizeof(*new_func));
  100. if (!new_func) {
  101. p_err("OOM when allocating FUNC node");
  102. return NULL;
  103. }
  104. new_func->start = insn;
  105. new_func->idx = cfg->func_num;
  106. list_add(&new_func->l, &func->l);
  107. cfg->func_num++;
  108. return new_func;
  109. }
  110. static struct bb_node *func_append_bb(struct func_node *func,
  111. struct bpf_insn *insn)
  112. {
  113. struct bb_node *new_bb, *bb;
  114. list_for_each_entry(bb, &func->bbs, l) {
  115. if (bb->head == insn)
  116. return bb;
  117. else if (bb->head > insn)
  118. break;
  119. }
  120. bb = bb_prev(bb);
  121. new_bb = calloc(1, sizeof(*new_bb));
  122. if (!new_bb) {
  123. p_err("OOM when allocating BB node");
  124. return NULL;
  125. }
  126. new_bb->head = insn;
  127. INIT_LIST_HEAD(&new_bb->e_prevs);
  128. INIT_LIST_HEAD(&new_bb->e_succs);
  129. list_add(&new_bb->l, &bb->l);
  130. return new_bb;
  131. }
  132. static struct bb_node *func_insert_dummy_bb(struct list_head *after)
  133. {
  134. struct bb_node *bb;
  135. bb = calloc(1, sizeof(*bb));
  136. if (!bb) {
  137. p_err("OOM when allocating BB node");
  138. return NULL;
  139. }
  140. INIT_LIST_HEAD(&bb->e_prevs);
  141. INIT_LIST_HEAD(&bb->e_succs);
  142. list_add(&bb->l, after);
  143. return bb;
  144. }
  145. static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
  146. struct bpf_insn *end)
  147. {
  148. struct func_node *func, *last_func;
  149. func = cfg_append_func(cfg, cur);
  150. if (!func)
  151. return true;
  152. for (; cur < end; cur++) {
  153. if (cur->code != (BPF_JMP | BPF_CALL))
  154. continue;
  155. if (cur->src_reg != BPF_PSEUDO_CALL)
  156. continue;
  157. func = cfg_append_func(cfg, cur + cur->off + 1);
  158. if (!func)
  159. return true;
  160. }
  161. last_func = cfg_last_func(cfg);
  162. last_func->end = end - 1;
  163. func = cfg_first_func(cfg);
  164. list_for_each_entry_from(func, &last_func->l, l) {
  165. func->end = func_next(func)->start - 1;
  166. }
  167. return false;
  168. }
  169. static bool func_partition_bb_head(struct func_node *func)
  170. {
  171. struct bpf_insn *cur, *end;
  172. struct bb_node *bb;
  173. cur = func->start;
  174. end = func->end;
  175. INIT_LIST_HEAD(&func->bbs);
  176. bb = func_append_bb(func, cur);
  177. if (!bb)
  178. return true;
  179. for (; cur <= end; cur++) {
  180. if (BPF_CLASS(cur->code) == BPF_JMP) {
  181. u8 opcode = BPF_OP(cur->code);
  182. if (opcode == BPF_EXIT || opcode == BPF_CALL)
  183. continue;
  184. bb = func_append_bb(func, cur + cur->off + 1);
  185. if (!bb)
  186. return true;
  187. if (opcode != BPF_JA) {
  188. bb = func_append_bb(func, cur + 1);
  189. if (!bb)
  190. return true;
  191. }
  192. }
  193. }
  194. return false;
  195. }
  196. static void func_partition_bb_tail(struct func_node *func)
  197. {
  198. unsigned int bb_idx = NUM_FIXED_BLOCKS;
  199. struct bb_node *bb, *last;
  200. last = func_last_bb(func);
  201. last->tail = func->end;
  202. bb = func_first_bb(func);
  203. list_for_each_entry_from(bb, &last->l, l) {
  204. bb->tail = bb_next(bb)->head - 1;
  205. bb->idx = bb_idx++;
  206. }
  207. last->idx = bb_idx++;
  208. func->bb_num = bb_idx;
  209. }
  210. static bool func_add_special_bb(struct func_node *func)
  211. {
  212. struct bb_node *bb;
  213. bb = func_insert_dummy_bb(&func->bbs);
  214. if (!bb)
  215. return true;
  216. bb->idx = ENTRY_BLOCK_INDEX;
  217. bb = func_insert_dummy_bb(&func_last_bb(func)->l);
  218. if (!bb)
  219. return true;
  220. bb->idx = EXIT_BLOCK_INDEX;
  221. return false;
  222. }
  223. static bool func_partition_bb(struct func_node *func)
  224. {
  225. if (func_partition_bb_head(func))
  226. return true;
  227. func_partition_bb_tail(func);
  228. return false;
  229. }
  230. static struct bb_node *func_search_bb_with_head(struct func_node *func,
  231. struct bpf_insn *insn)
  232. {
  233. struct bb_node *bb;
  234. list_for_each_entry(bb, &func->bbs, l) {
  235. if (bb->head == insn)
  236. return bb;
  237. }
  238. return NULL;
  239. }
  240. static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst,
  241. int flags)
  242. {
  243. struct edge_node *e;
  244. e = calloc(1, sizeof(*e));
  245. if (!e) {
  246. p_err("OOM when allocating edge node");
  247. return NULL;
  248. }
  249. if (src)
  250. e->src = src;
  251. if (dst)
  252. e->dst = dst;
  253. e->flags |= flags;
  254. return e;
  255. }
  256. static bool func_add_bb_edges(struct func_node *func)
  257. {
  258. struct bpf_insn *insn;
  259. struct edge_node *e;
  260. struct bb_node *bb;
  261. bb = entry_bb(func);
  262. e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH);
  263. if (!e)
  264. return true;
  265. list_add_tail(&e->l, &bb->e_succs);
  266. bb = exit_bb(func);
  267. e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH);
  268. if (!e)
  269. return true;
  270. list_add_tail(&e->l, &bb->e_prevs);
  271. bb = entry_bb(func);
  272. bb = bb_next(bb);
  273. list_for_each_entry_from(bb, &exit_bb(func)->l, l) {
  274. e = new_edge(bb, NULL, EDGE_FLAG_EMPTY);
  275. if (!e)
  276. return true;
  277. e->src = bb;
  278. insn = bb->tail;
  279. if (BPF_CLASS(insn->code) != BPF_JMP ||
  280. BPF_OP(insn->code) == BPF_EXIT) {
  281. e->dst = bb_next(bb);
  282. e->flags |= EDGE_FLAG_FALLTHROUGH;
  283. list_add_tail(&e->l, &bb->e_succs);
  284. continue;
  285. } else if (BPF_OP(insn->code) == BPF_JA) {
  286. e->dst = func_search_bb_with_head(func,
  287. insn + insn->off + 1);
  288. e->flags |= EDGE_FLAG_JUMP;
  289. list_add_tail(&e->l, &bb->e_succs);
  290. continue;
  291. }
  292. e->dst = bb_next(bb);
  293. e->flags |= EDGE_FLAG_FALLTHROUGH;
  294. list_add_tail(&e->l, &bb->e_succs);
  295. e = new_edge(bb, NULL, EDGE_FLAG_JUMP);
  296. if (!e)
  297. return true;
  298. e->src = bb;
  299. e->dst = func_search_bb_with_head(func, insn + insn->off + 1);
  300. list_add_tail(&e->l, &bb->e_succs);
  301. }
  302. return false;
  303. }
  304. static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len)
  305. {
  306. int cnt = len / sizeof(*insn);
  307. struct func_node *func;
  308. INIT_LIST_HEAD(&cfg->funcs);
  309. if (cfg_partition_funcs(cfg, insn, insn + cnt))
  310. return true;
  311. list_for_each_entry(func, &cfg->funcs, l) {
  312. if (func_partition_bb(func) || func_add_special_bb(func))
  313. return true;
  314. if (func_add_bb_edges(func))
  315. return true;
  316. }
  317. return false;
  318. }
  319. static void cfg_destroy(struct cfg *cfg)
  320. {
  321. struct func_node *func, *func2;
  322. list_for_each_entry_safe(func, func2, &cfg->funcs, l) {
  323. struct bb_node *bb, *bb2;
  324. list_for_each_entry_safe(bb, bb2, &func->bbs, l) {
  325. struct edge_node *e, *e2;
  326. list_for_each_entry_safe(e, e2, &bb->e_prevs, l) {
  327. list_del(&e->l);
  328. free(e);
  329. }
  330. list_for_each_entry_safe(e, e2, &bb->e_succs, l) {
  331. list_del(&e->l);
  332. free(e);
  333. }
  334. list_del(&bb->l);
  335. free(bb);
  336. }
  337. list_del(&func->l);
  338. free(func);
  339. }
  340. }
  341. static void draw_bb_node(struct func_node *func, struct bb_node *bb)
  342. {
  343. const char *shape;
  344. if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX)
  345. shape = "Mdiamond";
  346. else
  347. shape = "record";
  348. printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"",
  349. func->idx, bb->idx, shape);
  350. if (bb->idx == ENTRY_BLOCK_INDEX) {
  351. printf("ENTRY");
  352. } else if (bb->idx == EXIT_BLOCK_INDEX) {
  353. printf("EXIT");
  354. } else {
  355. unsigned int start_idx;
  356. struct dump_data dd = {};
  357. printf("{");
  358. kernel_syms_load(&dd);
  359. start_idx = bb->head - func->start;
  360. dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx);
  361. kernel_syms_destroy(&dd);
  362. printf("}");
  363. }
  364. printf("\"];\n\n");
  365. }
  366. static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
  367. {
  368. const char *style = "\"solid,bold\"";
  369. const char *color = "black";
  370. int func_idx = func->idx;
  371. struct edge_node *e;
  372. int weight = 10;
  373. if (list_empty(&bb->e_succs))
  374. return;
  375. list_for_each_entry(e, &bb->e_succs, l) {
  376. printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true",
  377. func_idx, e->src->idx, func_idx, e->dst->idx,
  378. style, color, weight);
  379. printf("];\n");
  380. }
  381. }
  382. static void func_output_bb_def(struct func_node *func)
  383. {
  384. struct bb_node *bb;
  385. list_for_each_entry(bb, &func->bbs, l) {
  386. draw_bb_node(func, bb);
  387. }
  388. }
  389. static void func_output_edges(struct func_node *func)
  390. {
  391. int func_idx = func->idx;
  392. struct bb_node *bb;
  393. list_for_each_entry(bb, &func->bbs, l) {
  394. draw_bb_succ_edges(func, bb);
  395. }
  396. /* Add an invisible edge from ENTRY to EXIT, this is to
  397. * improve the graph layout.
  398. */
  399. printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n",
  400. func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
  401. }
  402. static void cfg_dump(struct cfg *cfg)
  403. {
  404. struct func_node *func;
  405. printf("digraph \"DOT graph for eBPF program\" {\n");
  406. list_for_each_entry(func, &cfg->funcs, l) {
  407. printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
  408. func->idx, func->idx);
  409. func_output_bb_def(func);
  410. func_output_edges(func);
  411. printf("}\n");
  412. }
  413. printf("}\n");
  414. }
  415. void dump_xlated_cfg(void *buf, unsigned int len)
  416. {
  417. struct bpf_insn *insn = buf;
  418. struct cfg cfg;
  419. memset(&cfg, 0, sizeof(cfg));
  420. if (cfg_build(&cfg, insn, len))
  421. return;
  422. cfg_dump(&cfg);
  423. cfg_destroy(&cfg);
  424. }