bpf_jit_comp.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690
  1. /* bpf_jit_comp.c: BPF JIT compiler
  2. *
  3. * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
  4. *
  5. * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
  6. * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; version 2
  11. * of the License.
  12. */
  13. #include <linux/moduleloader.h>
  14. #include <asm/cacheflush.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/filter.h>
  17. #include <linux/if_vlan.h>
  18. #include "bpf_jit32.h"
  19. int bpf_jit_enable __read_mostly;
  20. static inline void bpf_flush_icache(void *start, void *end)
  21. {
  22. smp_wmb();
  23. flush_icache_range((unsigned long)start, (unsigned long)end);
  24. }
  25. static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
  26. struct codegen_context *ctx)
  27. {
  28. int i;
  29. const struct sock_filter *filter = fp->insns;
  30. if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
  31. /* Make stackframe */
  32. if (ctx->seen & SEEN_DATAREF) {
  33. /* If we call any helpers (for loads), save LR */
  34. EMIT(PPC_INST_MFLR | __PPC_RT(R0));
  35. PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
  36. /* Back up non-volatile regs. */
  37. PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
  38. PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
  39. }
  40. if (ctx->seen & SEEN_MEM) {
  41. /*
  42. * Conditionally save regs r15-r31 as some will be used
  43. * for M[] data.
  44. */
  45. for (i = r_M; i < (r_M+16); i++) {
  46. if (ctx->seen & (1 << (i-r_M)))
  47. PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
  48. }
  49. }
  50. PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
  51. }
  52. if (ctx->seen & SEEN_DATAREF) {
  53. /*
  54. * If this filter needs to access skb data,
  55. * prepare r_D and r_HL:
  56. * r_HL = skb->len - skb->data_len
  57. * r_D = skb->data
  58. */
  59. PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
  60. data_len));
  61. PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
  62. PPC_SUB(r_HL, r_HL, r_scratch1);
  63. PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
  64. }
  65. if (ctx->seen & SEEN_XREG) {
  66. /*
  67. * TODO: Could also detect whether first instr. sets X and
  68. * avoid this (as below, with A).
  69. */
  70. PPC_LI(r_X, 0);
  71. }
  72. /* make sure we dont leak kernel information to user */
  73. if (bpf_needs_clear_a(&filter[0]))
  74. PPC_LI(r_A, 0);
  75. }
  76. static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
  77. {
  78. int i;
  79. if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
  80. PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
  81. if (ctx->seen & SEEN_DATAREF) {
  82. PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
  83. PPC_MTLR(0);
  84. PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
  85. PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
  86. }
  87. if (ctx->seen & SEEN_MEM) {
  88. /* Restore any saved non-vol registers */
  89. for (i = r_M; i < (r_M+16); i++) {
  90. if (ctx->seen & (1 << (i-r_M)))
  91. PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
  92. }
  93. }
  94. }
  95. /* The RETs have left a return value in R3. */
  96. PPC_BLR();
  97. }
  98. #define CHOOSE_LOAD_FUNC(K, func) \
  99. ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
  100. /* Assemble the body code between the prologue & epilogue. */
  101. static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
  102. struct codegen_context *ctx,
  103. unsigned int *addrs)
  104. {
  105. const struct sock_filter *filter = fp->insns;
  106. int flen = fp->len;
  107. u8 *func;
  108. unsigned int true_cond;
  109. int i;
  110. /* Start of epilogue code */
  111. unsigned int exit_addr = addrs[flen];
  112. for (i = 0; i < flen; i++) {
  113. unsigned int K = filter[i].k;
  114. u16 code = bpf_anc_helper(&filter[i]);
  115. /*
  116. * addrs[] maps a BPF bytecode address into a real offset from
  117. * the start of the body code.
  118. */
  119. addrs[i] = ctx->idx * 4;
  120. switch (code) {
  121. /*** ALU ops ***/
  122. case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
  123. ctx->seen |= SEEN_XREG;
  124. PPC_ADD(r_A, r_A, r_X);
  125. break;
  126. case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
  127. if (!K)
  128. break;
  129. PPC_ADDI(r_A, r_A, IMM_L(K));
  130. if (K >= 32768)
  131. PPC_ADDIS(r_A, r_A, IMM_HA(K));
  132. break;
  133. case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
  134. ctx->seen |= SEEN_XREG;
  135. PPC_SUB(r_A, r_A, r_X);
  136. break;
  137. case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
  138. if (!K)
  139. break;
  140. PPC_ADDI(r_A, r_A, IMM_L(-K));
  141. if (K >= 32768)
  142. PPC_ADDIS(r_A, r_A, IMM_HA(-K));
  143. break;
  144. case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
  145. ctx->seen |= SEEN_XREG;
  146. PPC_MULW(r_A, r_A, r_X);
  147. break;
  148. case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
  149. if (K < 32768)
  150. PPC_MULI(r_A, r_A, K);
  151. else {
  152. PPC_LI32(r_scratch1, K);
  153. PPC_MULW(r_A, r_A, r_scratch1);
  154. }
  155. break;
  156. case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
  157. case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
  158. ctx->seen |= SEEN_XREG;
  159. PPC_CMPWI(r_X, 0);
  160. if (ctx->pc_ret0 != -1) {
  161. PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
  162. } else {
  163. PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
  164. PPC_LI(r_ret, 0);
  165. PPC_JMP(exit_addr);
  166. }
  167. if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
  168. PPC_DIVWU(r_scratch1, r_A, r_X);
  169. PPC_MULW(r_scratch1, r_X, r_scratch1);
  170. PPC_SUB(r_A, r_A, r_scratch1);
  171. } else {
  172. PPC_DIVWU(r_A, r_A, r_X);
  173. }
  174. break;
  175. case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
  176. PPC_LI32(r_scratch2, K);
  177. PPC_DIVWU(r_scratch1, r_A, r_scratch2);
  178. PPC_MULW(r_scratch1, r_scratch2, r_scratch1);
  179. PPC_SUB(r_A, r_A, r_scratch1);
  180. break;
  181. case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
  182. if (K == 1)
  183. break;
  184. PPC_LI32(r_scratch1, K);
  185. PPC_DIVWU(r_A, r_A, r_scratch1);
  186. break;
  187. case BPF_ALU | BPF_AND | BPF_X:
  188. ctx->seen |= SEEN_XREG;
  189. PPC_AND(r_A, r_A, r_X);
  190. break;
  191. case BPF_ALU | BPF_AND | BPF_K:
  192. if (!IMM_H(K))
  193. PPC_ANDI(r_A, r_A, K);
  194. else {
  195. PPC_LI32(r_scratch1, K);
  196. PPC_AND(r_A, r_A, r_scratch1);
  197. }
  198. break;
  199. case BPF_ALU | BPF_OR | BPF_X:
  200. ctx->seen |= SEEN_XREG;
  201. PPC_OR(r_A, r_A, r_X);
  202. break;
  203. case BPF_ALU | BPF_OR | BPF_K:
  204. if (IMM_L(K))
  205. PPC_ORI(r_A, r_A, IMM_L(K));
  206. if (K >= 65536)
  207. PPC_ORIS(r_A, r_A, IMM_H(K));
  208. break;
  209. case BPF_ANC | SKF_AD_ALU_XOR_X:
  210. case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
  211. ctx->seen |= SEEN_XREG;
  212. PPC_XOR(r_A, r_A, r_X);
  213. break;
  214. case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
  215. if (IMM_L(K))
  216. PPC_XORI(r_A, r_A, IMM_L(K));
  217. if (K >= 65536)
  218. PPC_XORIS(r_A, r_A, IMM_H(K));
  219. break;
  220. case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
  221. ctx->seen |= SEEN_XREG;
  222. PPC_SLW(r_A, r_A, r_X);
  223. break;
  224. case BPF_ALU | BPF_LSH | BPF_K:
  225. if (K == 0)
  226. break;
  227. else
  228. PPC_SLWI(r_A, r_A, K);
  229. break;
  230. case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
  231. ctx->seen |= SEEN_XREG;
  232. PPC_SRW(r_A, r_A, r_X);
  233. break;
  234. case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
  235. if (K == 0)
  236. break;
  237. else
  238. PPC_SRWI(r_A, r_A, K);
  239. break;
  240. case BPF_ALU | BPF_NEG:
  241. PPC_NEG(r_A, r_A);
  242. break;
  243. case BPF_RET | BPF_K:
  244. PPC_LI32(r_ret, K);
  245. if (!K) {
  246. if (ctx->pc_ret0 == -1)
  247. ctx->pc_ret0 = i;
  248. }
  249. /*
  250. * If this isn't the very last instruction, branch to
  251. * the epilogue if we've stuff to clean up. Otherwise,
  252. * if there's nothing to tidy, just return. If we /are/
  253. * the last instruction, we're about to fall through to
  254. * the epilogue to return.
  255. */
  256. if (i != flen - 1) {
  257. /*
  258. * Note: 'seen' is properly valid only on pass
  259. * #2. Both parts of this conditional are the
  260. * same instruction size though, meaning the
  261. * first pass will still correctly determine the
  262. * code size/addresses.
  263. */
  264. if (ctx->seen)
  265. PPC_JMP(exit_addr);
  266. else
  267. PPC_BLR();
  268. }
  269. break;
  270. case BPF_RET | BPF_A:
  271. PPC_MR(r_ret, r_A);
  272. if (i != flen - 1) {
  273. if (ctx->seen)
  274. PPC_JMP(exit_addr);
  275. else
  276. PPC_BLR();
  277. }
  278. break;
  279. case BPF_MISC | BPF_TAX: /* X = A */
  280. PPC_MR(r_X, r_A);
  281. break;
  282. case BPF_MISC | BPF_TXA: /* A = X */
  283. ctx->seen |= SEEN_XREG;
  284. PPC_MR(r_A, r_X);
  285. break;
  286. /*** Constant loads/M[] access ***/
  287. case BPF_LD | BPF_IMM: /* A = K */
  288. PPC_LI32(r_A, K);
  289. break;
  290. case BPF_LDX | BPF_IMM: /* X = K */
  291. PPC_LI32(r_X, K);
  292. break;
  293. case BPF_LD | BPF_MEM: /* A = mem[K] */
  294. PPC_MR(r_A, r_M + (K & 0xf));
  295. ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
  296. break;
  297. case BPF_LDX | BPF_MEM: /* X = mem[K] */
  298. PPC_MR(r_X, r_M + (K & 0xf));
  299. ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
  300. break;
  301. case BPF_ST: /* mem[K] = A */
  302. PPC_MR(r_M + (K & 0xf), r_A);
  303. ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
  304. break;
  305. case BPF_STX: /* mem[K] = X */
  306. PPC_MR(r_M + (K & 0xf), r_X);
  307. ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
  308. break;
  309. case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
  310. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
  311. PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
  312. break;
  313. case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
  314. PPC_LWZ_OFFS(r_A, r_skb, K);
  315. break;
  316. case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
  317. PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
  318. break;
  319. /*** Ancillary info loads ***/
  320. case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
  321. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
  322. protocol) != 2);
  323. PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
  324. protocol));
  325. break;
  326. case BPF_ANC | SKF_AD_IFINDEX:
  327. case BPF_ANC | SKF_AD_HATYPE:
  328. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
  329. ifindex) != 4);
  330. BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
  331. type) != 2);
  332. PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
  333. dev));
  334. PPC_CMPDI(r_scratch1, 0);
  335. if (ctx->pc_ret0 != -1) {
  336. PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
  337. } else {
  338. /* Exit, returning 0; first pass hits here. */
  339. PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
  340. PPC_LI(r_ret, 0);
  341. PPC_JMP(exit_addr);
  342. }
  343. if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
  344. PPC_LWZ_OFFS(r_A, r_scratch1,
  345. offsetof(struct net_device, ifindex));
  346. } else {
  347. PPC_LHZ_OFFS(r_A, r_scratch1,
  348. offsetof(struct net_device, type));
  349. }
  350. break;
  351. case BPF_ANC | SKF_AD_MARK:
  352. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
  353. PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
  354. mark));
  355. break;
  356. case BPF_ANC | SKF_AD_RXHASH:
  357. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
  358. PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
  359. hash));
  360. break;
  361. case BPF_ANC | SKF_AD_VLAN_TAG:
  362. case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
  363. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
  364. BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
  365. PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
  366. vlan_tci));
  367. if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
  368. PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
  369. } else {
  370. PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
  371. PPC_SRWI(r_A, r_A, 12);
  372. }
  373. break;
  374. case BPF_ANC | SKF_AD_QUEUE:
  375. BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
  376. queue_mapping) != 2);
  377. PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
  378. queue_mapping));
  379. break;
  380. case BPF_ANC | SKF_AD_PKTTYPE:
  381. PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
  382. PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
  383. PPC_SRWI(r_A, r_A, 5);
  384. break;
  385. case BPF_ANC | SKF_AD_CPU:
  386. PPC_BPF_LOAD_CPU(r_A);
  387. break;
  388. /*** Absolute loads from packet header/data ***/
  389. case BPF_LD | BPF_W | BPF_ABS:
  390. func = CHOOSE_LOAD_FUNC(K, sk_load_word);
  391. goto common_load;
  392. case BPF_LD | BPF_H | BPF_ABS:
  393. func = CHOOSE_LOAD_FUNC(K, sk_load_half);
  394. goto common_load;
  395. case BPF_LD | BPF_B | BPF_ABS:
  396. func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
  397. common_load:
  398. /* Load from [K]. */
  399. ctx->seen |= SEEN_DATAREF;
  400. PPC_FUNC_ADDR(r_scratch1, func);
  401. PPC_MTLR(r_scratch1);
  402. PPC_LI32(r_addr, K);
  403. PPC_BLRL();
  404. /*
  405. * Helper returns 'lt' condition on error, and an
  406. * appropriate return value in r3
  407. */
  408. PPC_BCC(COND_LT, exit_addr);
  409. break;
  410. /*** Indirect loads from packet header/data ***/
  411. case BPF_LD | BPF_W | BPF_IND:
  412. func = sk_load_word;
  413. goto common_load_ind;
  414. case BPF_LD | BPF_H | BPF_IND:
  415. func = sk_load_half;
  416. goto common_load_ind;
  417. case BPF_LD | BPF_B | BPF_IND:
  418. func = sk_load_byte;
  419. common_load_ind:
  420. /*
  421. * Load from [X + K]. Negative offsets are tested for
  422. * in the helper functions.
  423. */
  424. ctx->seen |= SEEN_DATAREF | SEEN_XREG;
  425. PPC_FUNC_ADDR(r_scratch1, func);
  426. PPC_MTLR(r_scratch1);
  427. PPC_ADDI(r_addr, r_X, IMM_L(K));
  428. if (K >= 32768)
  429. PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
  430. PPC_BLRL();
  431. /* If error, cr0.LT set */
  432. PPC_BCC(COND_LT, exit_addr);
  433. break;
  434. case BPF_LDX | BPF_B | BPF_MSH:
  435. func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
  436. goto common_load;
  437. break;
  438. /*** Jump and branches ***/
  439. case BPF_JMP | BPF_JA:
  440. if (K != 0)
  441. PPC_JMP(addrs[i + 1 + K]);
  442. break;
  443. case BPF_JMP | BPF_JGT | BPF_K:
  444. case BPF_JMP | BPF_JGT | BPF_X:
  445. true_cond = COND_GT;
  446. goto cond_branch;
  447. case BPF_JMP | BPF_JGE | BPF_K:
  448. case BPF_JMP | BPF_JGE | BPF_X:
  449. true_cond = COND_GE;
  450. goto cond_branch;
  451. case BPF_JMP | BPF_JEQ | BPF_K:
  452. case BPF_JMP | BPF_JEQ | BPF_X:
  453. true_cond = COND_EQ;
  454. goto cond_branch;
  455. case BPF_JMP | BPF_JSET | BPF_K:
  456. case BPF_JMP | BPF_JSET | BPF_X:
  457. true_cond = COND_NE;
  458. /* Fall through */
  459. cond_branch:
  460. /* same targets, can avoid doing the test :) */
  461. if (filter[i].jt == filter[i].jf) {
  462. if (filter[i].jt > 0)
  463. PPC_JMP(addrs[i + 1 + filter[i].jt]);
  464. break;
  465. }
  466. switch (code) {
  467. case BPF_JMP | BPF_JGT | BPF_X:
  468. case BPF_JMP | BPF_JGE | BPF_X:
  469. case BPF_JMP | BPF_JEQ | BPF_X:
  470. ctx->seen |= SEEN_XREG;
  471. PPC_CMPLW(r_A, r_X);
  472. break;
  473. case BPF_JMP | BPF_JSET | BPF_X:
  474. ctx->seen |= SEEN_XREG;
  475. PPC_AND_DOT(r_scratch1, r_A, r_X);
  476. break;
  477. case BPF_JMP | BPF_JEQ | BPF_K:
  478. case BPF_JMP | BPF_JGT | BPF_K:
  479. case BPF_JMP | BPF_JGE | BPF_K:
  480. if (K < 32768)
  481. PPC_CMPLWI(r_A, K);
  482. else {
  483. PPC_LI32(r_scratch1, K);
  484. PPC_CMPLW(r_A, r_scratch1);
  485. }
  486. break;
  487. case BPF_JMP | BPF_JSET | BPF_K:
  488. if (K < 32768)
  489. /* PPC_ANDI is /only/ dot-form */
  490. PPC_ANDI(r_scratch1, r_A, K);
  491. else {
  492. PPC_LI32(r_scratch1, K);
  493. PPC_AND_DOT(r_scratch1, r_A,
  494. r_scratch1);
  495. }
  496. break;
  497. }
  498. /* Sometimes branches are constructed "backward", with
  499. * the false path being the branch and true path being
  500. * a fallthrough to the next instruction.
  501. */
  502. if (filter[i].jt == 0)
  503. /* Swap the sense of the branch */
  504. PPC_BCC(true_cond ^ COND_CMP_TRUE,
  505. addrs[i + 1 + filter[i].jf]);
  506. else {
  507. PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
  508. if (filter[i].jf != 0)
  509. PPC_JMP(addrs[i + 1 + filter[i].jf]);
  510. }
  511. break;
  512. default:
  513. /* The filter contains something cruel & unusual.
  514. * We don't handle it, but also there shouldn't be
  515. * anything missing from our list.
  516. */
  517. if (printk_ratelimit())
  518. pr_err("BPF filter opcode %04x (@%d) unsupported\n",
  519. filter[i].code, i);
  520. return -ENOTSUPP;
  521. }
  522. }
  523. /* Set end-of-body-code address for exit. */
  524. addrs[i] = ctx->idx * 4;
  525. return 0;
  526. }
  527. void bpf_jit_compile(struct bpf_prog *fp)
  528. {
  529. unsigned int proglen;
  530. unsigned int alloclen;
  531. u32 *image = NULL;
  532. u32 *code_base;
  533. unsigned int *addrs;
  534. struct codegen_context cgctx;
  535. int pass;
  536. int flen = fp->len;
  537. if (!bpf_jit_enable)
  538. return;
  539. addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
  540. if (addrs == NULL)
  541. return;
  542. /*
  543. * There are multiple assembly passes as the generated code will change
  544. * size as it settles down, figuring out the max branch offsets/exit
  545. * paths required.
  546. *
  547. * The range of standard conditional branches is +/- 32Kbytes. Since
  548. * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
  549. * finish with 8 bytes/instruction. Not feasible, so long jumps are
  550. * used, distinct from short branches.
  551. *
  552. * Current:
  553. *
  554. * For now, both branch types assemble to 2 words (short branches padded
  555. * with a NOP); this is less efficient, but assembly will always complete
  556. * after exactly 3 passes:
  557. *
  558. * First pass: No code buffer; Program is "faux-generated" -- no code
  559. * emitted but maximum size of output determined (and addrs[] filled
  560. * in). Also, we note whether we use M[], whether we use skb data, etc.
  561. * All generation choices assumed to be 'worst-case', e.g. branches all
  562. * far (2 instructions), return path code reduction not available, etc.
  563. *
  564. * Second pass: Code buffer allocated with size determined previously.
  565. * Prologue generated to support features we have seen used. Exit paths
  566. * determined and addrs[] is filled in again, as code may be slightly
  567. * smaller as a result.
  568. *
  569. * Third pass: Code generated 'for real', and branch destinations
  570. * determined from now-accurate addrs[] map.
  571. *
  572. * Ideal:
  573. *
  574. * If we optimise this, near branches will be shorter. On the
  575. * first assembly pass, we should err on the side of caution and
  576. * generate the biggest code. On subsequent passes, branches will be
  577. * generated short or long and code size will reduce. With smaller
  578. * code, more branches may fall into the short category, and code will
  579. * reduce more.
  580. *
  581. * Finally, if we see one pass generate code the same size as the
  582. * previous pass we have converged and should now generate code for
  583. * real. Allocating at the end will also save the memory that would
  584. * otherwise be wasted by the (small) current code shrinkage.
  585. * Preferably, we should do a small number of passes (e.g. 5) and if we
  586. * haven't converged by then, get impatient and force code to generate
  587. * as-is, even if the odd branch would be left long. The chances of a
  588. * long jump are tiny with all but the most enormous of BPF filter
  589. * inputs, so we should usually converge on the third pass.
  590. */
  591. cgctx.idx = 0;
  592. cgctx.seen = 0;
  593. cgctx.pc_ret0 = -1;
  594. /* Scouting faux-generate pass 0 */
  595. if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
  596. /* We hit something illegal or unsupported. */
  597. goto out;
  598. /*
  599. * Pretend to build prologue, given the features we've seen. This will
  600. * update ctgtx.idx as it pretends to output instructions, then we can
  601. * calculate total size from idx.
  602. */
  603. bpf_jit_build_prologue(fp, 0, &cgctx);
  604. bpf_jit_build_epilogue(0, &cgctx);
  605. proglen = cgctx.idx * 4;
  606. alloclen = proglen + FUNCTION_DESCR_SIZE;
  607. image = module_alloc(alloclen);
  608. if (!image)
  609. goto out;
  610. code_base = image + (FUNCTION_DESCR_SIZE/4);
  611. /* Code generation passes 1-2 */
  612. for (pass = 1; pass < 3; pass++) {
  613. /* Now build the prologue, body code & epilogue for real. */
  614. cgctx.idx = 0;
  615. bpf_jit_build_prologue(fp, code_base, &cgctx);
  616. bpf_jit_build_body(fp, code_base, &cgctx, addrs);
  617. bpf_jit_build_epilogue(code_base, &cgctx);
  618. if (bpf_jit_enable > 1)
  619. pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
  620. proglen - (cgctx.idx * 4), cgctx.seen);
  621. }
  622. if (bpf_jit_enable > 1)
  623. /* Note that we output the base address of the code_base
  624. * rather than image, since opcodes are in code_base.
  625. */
  626. bpf_jit_dump(flen, proglen, pass, code_base);
  627. if (image) {
  628. bpf_flush_icache(code_base, code_base + (proglen/4));
  629. #ifdef CONFIG_PPC64
  630. /* Function descriptor nastiness: Address + TOC */
  631. ((u64 *)image)[0] = (u64)code_base;
  632. ((u64 *)image)[1] = local_paca->kernel_toc;
  633. #endif
  634. fp->bpf_func = (void *)image;
  635. fp->jited = 1;
  636. }
  637. out:
  638. kfree(addrs);
  639. return;
  640. }
  641. void bpf_jit_free(struct bpf_prog *fp)
  642. {
  643. if (fp->jited)
  644. module_memfree(fp->bpf_func);
  645. bpf_prog_unlock_free(fp);
  646. }