bpf_jit64.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. /*
  2. * bpf_jit64.h: BPF JIT compiler for PPC64
  3. *
  4. * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
  5. * IBM Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; version 2
  10. * of the License.
  11. */
  12. #ifndef _BPF_JIT64_H
  13. #define _BPF_JIT64_H
  14. #include "bpf_jit.h"
  15. /*
  16. * Stack layout:
  17. * Ensure the top half (upto local_tmp_var) stays consistent
  18. * with our redzone usage.
  19. *
  20. * [ prev sp ] <-------------
  21. * [ nv gpr save area ] 8*8 |
  22. * [ tail_call_cnt ] 8 |
  23. * [ local_tmp_var ] 8 |
  24. * fp (r31) --> [ ebpf stack space ] 512 |
  25. * [ frame header ] 32/112 |
  26. * sp (r1) ---> [ stack pointer ] --------------
  27. */
  28. /* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */
  29. #define BPF_PPC_STACK_SAVE (8*8)
  30. /* for bpf JIT code internal usage */
  31. #define BPF_PPC_STACK_LOCALS 16
  32. /* Ensure this is quadword aligned */
  33. #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + MAX_BPF_STACK + \
  34. BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
  35. #ifndef __ASSEMBLY__
  36. /* BPF register usage */
  37. #define SKB_HLEN_REG (MAX_BPF_JIT_REG + 0)
  38. #define SKB_DATA_REG (MAX_BPF_JIT_REG + 1)
  39. #define TMP_REG_1 (MAX_BPF_JIT_REG + 2)
  40. #define TMP_REG_2 (MAX_BPF_JIT_REG + 3)
  41. /* BPF to ppc register mappings */
  42. static const int b2p[] = {
  43. /* function return value */
  44. [BPF_REG_0] = 8,
  45. /* function arguments */
  46. [BPF_REG_1] = 3,
  47. [BPF_REG_2] = 4,
  48. [BPF_REG_3] = 5,
  49. [BPF_REG_4] = 6,
  50. [BPF_REG_5] = 7,
  51. /* non volatile registers */
  52. [BPF_REG_6] = 27,
  53. [BPF_REG_7] = 28,
  54. [BPF_REG_8] = 29,
  55. [BPF_REG_9] = 30,
  56. /* frame pointer aka BPF_REG_10 */
  57. [BPF_REG_FP] = 31,
  58. /* eBPF jit internal registers */
  59. [BPF_REG_AX] = 2,
  60. [SKB_HLEN_REG] = 25,
  61. [SKB_DATA_REG] = 26,
  62. [TMP_REG_1] = 9,
  63. [TMP_REG_2] = 10
  64. };
  65. /* PPC NVR range -- update this if we ever use NVRs below r24 */
  66. #define BPF_PPC_NVR_MIN 24
  67. /* Assembly helpers */
  68. #define DECLARE_LOAD_FUNC(func) u64 func(u64 r3, u64 r4); \
  69. u64 func##_negative_offset(u64 r3, u64 r4); \
  70. u64 func##_positive_offset(u64 r3, u64 r4);
  71. DECLARE_LOAD_FUNC(sk_load_word);
  72. DECLARE_LOAD_FUNC(sk_load_half);
  73. DECLARE_LOAD_FUNC(sk_load_byte);
  74. #define CHOOSE_LOAD_FUNC(imm, func) \
  75. (imm < 0 ? \
  76. (imm >= SKF_LL_OFF ? func##_negative_offset : func) : \
  77. func##_positive_offset)
  78. #define SEEN_FUNC 0x1000 /* might call external helpers */
  79. #define SEEN_STACK 0x2000 /* uses BPF stack */
  80. #define SEEN_SKB 0x4000 /* uses sk_buff */
  81. #define SEEN_TAILCALL 0x8000 /* uses tail calls */
  82. struct codegen_context {
  83. /*
  84. * This is used to track register usage as well
  85. * as calls to external helpers.
  86. * - register usage is tracked with corresponding
  87. * bits (r3-r10 and r25-r31)
  88. * - rest of the bits can be used to track other
  89. * things -- for now, we use bits 16 to 23
  90. * encoded in SEEN_* macros above
  91. */
  92. unsigned int seen;
  93. unsigned int idx;
  94. };
  95. #endif /* !__ASSEMBLY__ */
  96. #endif