bpf-prologue.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. /*
  2. * bpf-prologue.c
  3. *
  4. * Copyright (C) 2015 He Kuang <hekuang@huawei.com>
  5. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  6. * Copyright (C) 2015 Huawei Inc.
  7. */
  8. #include <bpf/libbpf.h>
  9. #include "perf.h"
  10. #include "debug.h"
  11. #include "bpf-loader.h"
  12. #include "bpf-prologue.h"
  13. #include "probe-finder.h"
  14. #include <dwarf-regs.h>
  15. #include <linux/filter.h>
  16. #define BPF_REG_SIZE 8
  17. #define JMP_TO_ERROR_CODE -1
  18. #define JMP_TO_SUCCESS_CODE -2
  19. #define JMP_TO_USER_CODE -3
  20. struct bpf_insn_pos {
  21. struct bpf_insn *begin;
  22. struct bpf_insn *end;
  23. struct bpf_insn *pos;
  24. };
  25. static inline int
  26. pos_get_cnt(struct bpf_insn_pos *pos)
  27. {
  28. return pos->pos - pos->begin;
  29. }
  30. static int
  31. append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos)
  32. {
  33. if (!pos->pos)
  34. return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
  35. if (pos->pos + 1 >= pos->end) {
  36. pr_err("bpf prologue: prologue too long\n");
  37. pos->pos = NULL;
  38. return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
  39. }
  40. *(pos->pos)++ = new_insn;
  41. return 0;
  42. }
  43. static int
  44. check_pos(struct bpf_insn_pos *pos)
  45. {
  46. if (!pos->pos || pos->pos >= pos->end)
  47. return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
  48. return 0;
  49. }
  50. /* Give it a shorter name */
  51. #define ins(i, p) append_insn((i), (p))
  52. /*
  53. * Give a register name (in 'reg'), generate instruction to
  54. * load register into an eBPF register rd:
  55. * 'ldd target_reg, offset(ctx_reg)', where:
  56. * ctx_reg is pre initialized to pointer of 'struct pt_regs'.
  57. */
  58. static int
  59. gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg,
  60. const char *reg, int target_reg)
  61. {
  62. int offset = regs_query_register_offset(reg);
  63. if (offset < 0) {
  64. pr_err("bpf: prologue: failed to get register %s\n",
  65. reg);
  66. return offset;
  67. }
  68. ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos);
  69. return check_pos(pos);
  70. }
  71. /*
  72. * Generate a BPF_FUNC_probe_read function call.
  73. *
  74. * src_base_addr_reg is a register holding base address,
  75. * dst_addr_reg is a register holding dest address (on stack),
  76. * result is:
  77. *
  78. * *[dst_addr_reg] = *([src_base_addr_reg] + offset)
  79. *
  80. * Arguments of BPF_FUNC_probe_read:
  81. * ARG1: ptr to stack (dest)
  82. * ARG2: size (8)
  83. * ARG3: unsafe ptr (src)
  84. */
  85. static int
  86. gen_read_mem(struct bpf_insn_pos *pos,
  87. int src_base_addr_reg,
  88. int dst_addr_reg,
  89. long offset)
  90. {
  91. /* mov arg3, src_base_addr_reg */
  92. if (src_base_addr_reg != BPF_REG_ARG3)
  93. ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos);
  94. /* add arg3, #offset */
  95. if (offset)
  96. ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos);
  97. /* mov arg2, #reg_size */
  98. ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos);
  99. /* mov arg1, dst_addr_reg */
  100. if (dst_addr_reg != BPF_REG_ARG1)
  101. ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
  102. /* Call probe_read */
  103. ins(BPF_EMIT_CALL(BPF_FUNC_probe_read), pos);
  104. /*
  105. * Error processing: if read fail, goto error code,
  106. * will be relocated. Target should be the start of
  107. * error processing code.
  108. */
  109. ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE),
  110. pos);
  111. return check_pos(pos);
  112. }
  113. /*
  114. * Each arg should be bare register. Fetch and save them into argument
  115. * registers (r3 - r5).
  116. *
  117. * BPF_REG_1 should have been initialized with pointer to
  118. * 'struct pt_regs'.
  119. */
  120. static int
  121. gen_prologue_fastpath(struct bpf_insn_pos *pos,
  122. struct probe_trace_arg *args, int nargs)
  123. {
  124. int i, err = 0;
  125. for (i = 0; i < nargs; i++) {
  126. err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value,
  127. BPF_PROLOGUE_START_ARG_REG + i);
  128. if (err)
  129. goto errout;
  130. }
  131. return check_pos(pos);
  132. errout:
  133. return err;
  134. }
  135. /*
  136. * Slow path:
  137. * At least one argument has the form of 'offset($rx)'.
  138. *
  139. * Following code first stores them into stack, then loads all of then
  140. * to r2 - r5.
  141. * Before final loading, the final result should be:
  142. *
  143. * low address
  144. * BPF_REG_FP - 24 ARG3
  145. * BPF_REG_FP - 16 ARG2
  146. * BPF_REG_FP - 8 ARG1
  147. * BPF_REG_FP
  148. * high address
  149. *
  150. * For each argument (described as: offn(...off2(off1(reg)))),
  151. * generates following code:
  152. *
  153. * r7 <- fp
  154. * r7 <- r7 - stack_offset // Ideal code should initialize r7 using
  155. * // fp before generating args. However,
  156. * // eBPF won't regard r7 as stack pointer
  157. * // if it is generated by minus 8 from
  158. * // another stack pointer except fp.
  159. * // This is why we have to set r7
  160. * // to fp for each variable.
  161. * r3 <- value of 'reg'-> generated using gen_ldx_reg_from_ctx()
  162. * (r7) <- r3 // skip following instructions for bare reg
  163. * r3 <- r3 + off1 . // skip if off1 == 0
  164. * r2 <- 8 \
  165. * r1 <- r7 |-> generated by gen_read_mem()
  166. * call probe_read /
  167. * jnei r0, 0, err ./
  168. * r3 <- (r7)
  169. * r3 <- r3 + off2 . // skip if off2 == 0
  170. * r2 <- 8 \ // r2 may be broken by probe_read, so set again
  171. * r1 <- r7 |-> generated by gen_read_mem()
  172. * call probe_read /
  173. * jnei r0, 0, err ./
  174. * ...
  175. */
  176. static int
  177. gen_prologue_slowpath(struct bpf_insn_pos *pos,
  178. struct probe_trace_arg *args, int nargs)
  179. {
  180. int err, i;
  181. for (i = 0; i < nargs; i++) {
  182. struct probe_trace_arg *arg = &args[i];
  183. const char *reg = arg->value;
  184. struct probe_trace_arg_ref *ref = NULL;
  185. int stack_offset = (i + 1) * -8;
  186. pr_debug("prologue: fetch arg %d, base reg is %s\n",
  187. i, reg);
  188. /* value of base register is stored into ARG3 */
  189. err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg,
  190. BPF_REG_ARG3);
  191. if (err) {
  192. pr_err("prologue: failed to get offset of register %s\n",
  193. reg);
  194. goto errout;
  195. }
  196. /* Make r7 the stack pointer. */
  197. ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos);
  198. /* r7 += -8 */
  199. ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos);
  200. /*
  201. * Store r3 (base register) onto stack
  202. * Ensure fp[offset] is set.
  203. * fp is the only valid base register when storing
  204. * into stack. We are not allowed to use r7 as base
  205. * register here.
  206. */
  207. ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
  208. stack_offset), pos);
  209. ref = arg->ref;
  210. while (ref) {
  211. pr_debug("prologue: arg %d: offset %ld\n",
  212. i, ref->offset);
  213. err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
  214. ref->offset);
  215. if (err) {
  216. pr_err("prologue: failed to generate probe_read function call\n");
  217. goto errout;
  218. }
  219. ref = ref->next;
  220. /*
  221. * Load previous result into ARG3. Use
  222. * BPF_REG_FP instead of r7 because verifier
  223. * allows FP based addressing only.
  224. */
  225. if (ref)
  226. ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3,
  227. BPF_REG_FP, stack_offset), pos);
  228. }
  229. }
  230. /* Final pass: read to registers */
  231. for (i = 0; i < nargs; i++)
  232. ins(BPF_LDX_MEM(BPF_DW, BPF_PROLOGUE_START_ARG_REG + i,
  233. BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
  234. ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);
  235. return check_pos(pos);
  236. errout:
  237. return err;
  238. }
  239. static int
  240. prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code,
  241. struct bpf_insn *success_code, struct bpf_insn *user_code)
  242. {
  243. struct bpf_insn *insn;
  244. if (check_pos(pos))
  245. return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
  246. for (insn = pos->begin; insn < pos->pos; insn++) {
  247. struct bpf_insn *target;
  248. u8 class = BPF_CLASS(insn->code);
  249. u8 opcode;
  250. if (class != BPF_JMP)
  251. continue;
  252. opcode = BPF_OP(insn->code);
  253. if (opcode == BPF_CALL)
  254. continue;
  255. switch (insn->off) {
  256. case JMP_TO_ERROR_CODE:
  257. target = error_code;
  258. break;
  259. case JMP_TO_SUCCESS_CODE:
  260. target = success_code;
  261. break;
  262. case JMP_TO_USER_CODE:
  263. target = user_code;
  264. break;
  265. default:
  266. pr_err("bpf prologue: internal error: relocation failed\n");
  267. return -BPF_LOADER_ERRNO__PROLOGUE;
  268. }
  269. insn->off = target - (insn + 1);
  270. }
  271. return 0;
  272. }
  273. int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
  274. struct bpf_insn *new_prog, size_t *new_cnt,
  275. size_t cnt_space)
  276. {
  277. struct bpf_insn *success_code = NULL;
  278. struct bpf_insn *error_code = NULL;
  279. struct bpf_insn *user_code = NULL;
  280. struct bpf_insn_pos pos;
  281. bool fastpath = true;
  282. int err = 0, i;
  283. if (!new_prog || !new_cnt)
  284. return -EINVAL;
  285. if (cnt_space > BPF_MAXINSNS)
  286. cnt_space = BPF_MAXINSNS;
  287. pos.begin = new_prog;
  288. pos.end = new_prog + cnt_space;
  289. pos.pos = new_prog;
  290. if (!nargs) {
  291. ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0),
  292. &pos);
  293. if (check_pos(&pos))
  294. goto errout;
  295. *new_cnt = pos_get_cnt(&pos);
  296. return 0;
  297. }
  298. if (nargs > BPF_PROLOGUE_MAX_ARGS) {
  299. pr_warning("bpf: prologue: %d arguments are dropped\n",
  300. nargs - BPF_PROLOGUE_MAX_ARGS);
  301. nargs = BPF_PROLOGUE_MAX_ARGS;
  302. }
  303. /* First pass: validation */
  304. for (i = 0; i < nargs; i++) {
  305. struct probe_trace_arg_ref *ref = args[i].ref;
  306. if (args[i].value[0] == '@') {
  307. /* TODO: fetch global variable */
  308. pr_err("bpf: prologue: global %s%+ld not support\n",
  309. args[i].value, ref ? ref->offset : 0);
  310. return -ENOTSUP;
  311. }
  312. while (ref) {
  313. /* fastpath is true if all args has ref == NULL */
  314. fastpath = false;
  315. /*
  316. * Instruction encodes immediate value using
  317. * s32, ref->offset is long. On systems which
  318. * can't fill long in s32, refuse to process if
  319. * ref->offset too large (or small).
  320. */
  321. #ifdef __LP64__
  322. #define OFFSET_MAX ((1LL << 31) - 1)
  323. #define OFFSET_MIN ((1LL << 31) * -1)
  324. if (ref->offset > OFFSET_MAX ||
  325. ref->offset < OFFSET_MIN) {
  326. pr_err("bpf: prologue: offset out of bound: %ld\n",
  327. ref->offset);
  328. return -BPF_LOADER_ERRNO__PROLOGUEOOB;
  329. }
  330. #endif
  331. ref = ref->next;
  332. }
  333. }
  334. pr_debug("prologue: pass validation\n");
  335. if (fastpath) {
  336. /* If all variables are registers... */
  337. pr_debug("prologue: fast path\n");
  338. err = gen_prologue_fastpath(&pos, args, nargs);
  339. if (err)
  340. goto errout;
  341. } else {
  342. pr_debug("prologue: slow path\n");
  343. /* Initialization: move ctx to a callee saved register. */
  344. ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos);
  345. err = gen_prologue_slowpath(&pos, args, nargs);
  346. if (err)
  347. goto errout;
  348. /*
  349. * start of ERROR_CODE (only slow pass needs error code)
  350. * mov r2 <- 1 // r2 is error number
  351. * mov r3 <- 0 // r3, r4... should be touched or
  352. * // verifier would complain
  353. * mov r4 <- 0
  354. * ...
  355. * goto usercode
  356. */
  357. error_code = pos.pos;
  358. ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1),
  359. &pos);
  360. for (i = 0; i < nargs; i++)
  361. ins(BPF_ALU64_IMM(BPF_MOV,
  362. BPF_PROLOGUE_START_ARG_REG + i,
  363. 0),
  364. &pos);
  365. ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE),
  366. &pos);
  367. }
  368. /*
  369. * start of SUCCESS_CODE:
  370. * mov r2 <- 0
  371. * goto usercode // skip
  372. */
  373. success_code = pos.pos;
  374. ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos);
  375. /*
  376. * start of USER_CODE:
  377. * Restore ctx to r1
  378. */
  379. user_code = pos.pos;
  380. if (!fastpath) {
  381. /*
  382. * Only slow path needs restoring of ctx. In fast path,
  383. * register are loaded directly from r1.
  384. */
  385. ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos);
  386. err = prologue_relocate(&pos, error_code, success_code,
  387. user_code);
  388. if (err)
  389. goto errout;
  390. }
  391. err = check_pos(&pos);
  392. if (err)
  393. goto errout;
  394. *new_cnt = pos_get_cnt(&pos);
  395. return 0;
  396. errout:
  397. return err;
  398. }