bpf-helper.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. /*
  2. * Example wrapper around BPF macros.
  3. *
  4. * Copyright (c) 2012 The Chromium OS Authors <chromium-os-dev@chromium.org>
  5. * Author: Will Drewry <wad@chromium.org>
  6. *
  7. * The code may be used by anyone for any purpose,
  8. * and can serve as a starting point for developing
  9. * applications using prctl(PR_SET_SECCOMP, 2, ...).
  10. *
  11. * No guarantees are provided with respect to the correctness
  12. * or functionality of this code.
  13. */
  14. #ifndef __BPF_HELPER_H__
  15. #define __BPF_HELPER_H__
  16. #include <asm/bitsperlong.h> /* for __BITS_PER_LONG */
  17. #include <endian.h>
  18. #include <linux/filter.h>
  19. #include <linux/seccomp.h> /* for seccomp_data */
  20. #include <linux/types.h>
  21. #include <linux/unistd.h>
  22. #include <stddef.h>
  23. #define BPF_LABELS_MAX 256
  24. struct bpf_labels {
  25. int count;
  26. struct __bpf_label {
  27. const char *label;
  28. __u32 location;
  29. } labels[BPF_LABELS_MAX];
  30. };
  31. int bpf_resolve_jumps(struct bpf_labels *labels,
  32. struct sock_filter *filter, size_t count);
  33. __u32 seccomp_bpf_label(struct bpf_labels *labels, const char *label);
  34. void seccomp_bpf_print(struct sock_filter *filter, size_t count);
  35. #define JUMP_JT 0xff
  36. #define JUMP_JF 0xff
  37. #define LABEL_JT 0xfe
  38. #define LABEL_JF 0xfe
  39. #define ALLOW \
  40. BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
  41. #define DENY \
  42. BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL)
  43. #define JUMP(labels, label) \
  44. BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
  45. JUMP_JT, JUMP_JF)
  46. #define LABEL(labels, label) \
  47. BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
  48. LABEL_JT, LABEL_JF)
  49. #define SYSCALL(nr, jt) \
  50. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (nr), 0, 1), \
  51. jt
  52. /* Lame, but just an example */
  53. #define FIND_LABEL(labels, label) seccomp_bpf_label((labels), #label)
  54. #define EXPAND(...) __VA_ARGS__
  55. /* Ensure that we load the logically correct offset. */
  56. #if __BYTE_ORDER == __LITTLE_ENDIAN
  57. #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
  58. #elif __BYTE_ORDER == __BIG_ENDIAN
  59. #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
  60. #else
  61. #error "Unknown endianness"
  62. #endif
  63. /* Map all width-sensitive operations */
  64. #if __BITS_PER_LONG == 32
  65. #define JEQ(x, jt) JEQ32(x, EXPAND(jt))
  66. #define JNE(x, jt) JNE32(x, EXPAND(jt))
  67. #define JGT(x, jt) JGT32(x, EXPAND(jt))
  68. #define JLT(x, jt) JLT32(x, EXPAND(jt))
  69. #define JGE(x, jt) JGE32(x, EXPAND(jt))
  70. #define JLE(x, jt) JLE32(x, EXPAND(jt))
  71. #define JA(x, jt) JA32(x, EXPAND(jt))
  72. #define ARG(i) ARG_32(i)
  73. #elif __BITS_PER_LONG == 64
  74. /* Ensure that we load the logically correct offset. */
  75. #if __BYTE_ORDER == __LITTLE_ENDIAN
  76. #define ENDIAN(_lo, _hi) _lo, _hi
  77. #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
  78. #elif __BYTE_ORDER == __BIG_ENDIAN
  79. #define ENDIAN(_lo, _hi) _hi, _lo
  80. #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
  81. #endif
  82. union arg64 {
  83. struct {
  84. __u32 ENDIAN(lo32, hi32);
  85. };
  86. __u64 u64;
  87. };
  88. #define JEQ(x, jt) \
  89. JEQ64(((union arg64){.u64 = (x)}).lo32, \
  90. ((union arg64){.u64 = (x)}).hi32, \
  91. EXPAND(jt))
  92. #define JGT(x, jt) \
  93. JGT64(((union arg64){.u64 = (x)}).lo32, \
  94. ((union arg64){.u64 = (x)}).hi32, \
  95. EXPAND(jt))
  96. #define JGE(x, jt) \
  97. JGE64(((union arg64){.u64 = (x)}).lo32, \
  98. ((union arg64){.u64 = (x)}).hi32, \
  99. EXPAND(jt))
  100. #define JNE(x, jt) \
  101. JNE64(((union arg64){.u64 = (x)}).lo32, \
  102. ((union arg64){.u64 = (x)}).hi32, \
  103. EXPAND(jt))
  104. #define JLT(x, jt) \
  105. JLT64(((union arg64){.u64 = (x)}).lo32, \
  106. ((union arg64){.u64 = (x)}).hi32, \
  107. EXPAND(jt))
  108. #define JLE(x, jt) \
  109. JLE64(((union arg64){.u64 = (x)}).lo32, \
  110. ((union arg64){.u64 = (x)}).hi32, \
  111. EXPAND(jt))
  112. #define JA(x, jt) \
  113. JA64(((union arg64){.u64 = (x)}).lo32, \
  114. ((union arg64){.u64 = (x)}).hi32, \
  115. EXPAND(jt))
  116. #define ARG(i) ARG_64(i)
  117. #else
  118. #error __BITS_PER_LONG value unusable.
  119. #endif
  120. /* Loads the arg into A */
  121. #define ARG_32(idx) \
  122. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
  123. /* Loads lo into M[0] and hi into M[1] and A */
  124. #define ARG_64(idx) \
  125. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
  126. BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
  127. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, HI_ARG(idx)), \
  128. BPF_STMT(BPF_ST, 1) /* hi -> M[1] */
  129. #define JEQ32(value, jt) \
  130. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 0, 1), \
  131. jt
  132. #define JNE32(value, jt) \
  133. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
  134. jt
  135. #define JA32(value, jt) \
  136. BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
  137. jt
  138. #define JGE32(value, jt) \
  139. BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
  140. jt
  141. #define JGT32(value, jt) \
  142. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
  143. jt
  144. #define JLE32(value, jt) \
  145. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
  146. jt
  147. #define JLT32(value, jt) \
  148. BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
  149. jt
  150. /*
  151. * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both
  152. * A and M[1]. This invariant is kept by restoring A if necessary.
  153. */
  154. #define JEQ64(lo, hi, jt) \
  155. /* if (hi != arg.hi) goto NOMATCH; */ \
  156. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
  157. BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
  158. /* if (lo != arg.lo) goto NOMATCH; */ \
  159. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
  160. BPF_STMT(BPF_LD+BPF_MEM, 1), \
  161. jt, \
  162. BPF_STMT(BPF_LD+BPF_MEM, 1)
  163. #define JNE64(lo, hi, jt) \
  164. /* if (hi != arg.hi) goto MATCH; */ \
  165. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
  166. BPF_STMT(BPF_LD+BPF_MEM, 0), \
  167. /* if (lo != arg.lo) goto MATCH; */ \
  168. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
  169. BPF_STMT(BPF_LD+BPF_MEM, 1), \
  170. jt, \
  171. BPF_STMT(BPF_LD+BPF_MEM, 1)
  172. #define JA64(lo, hi, jt) \
  173. /* if (hi & arg.hi) goto MATCH; */ \
  174. BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
  175. BPF_STMT(BPF_LD+BPF_MEM, 0), \
  176. /* if (lo & arg.lo) goto MATCH; */ \
  177. BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
  178. BPF_STMT(BPF_LD+BPF_MEM, 1), \
  179. jt, \
  180. BPF_STMT(BPF_LD+BPF_MEM, 1)
  181. #define JGE64(lo, hi, jt) \
  182. /* if (hi > arg.hi) goto MATCH; */ \
  183. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
  184. /* if (hi != arg.hi) goto NOMATCH; */ \
  185. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
  186. BPF_STMT(BPF_LD+BPF_MEM, 0), \
  187. /* if (lo >= arg.lo) goto MATCH; */ \
  188. BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
  189. BPF_STMT(BPF_LD+BPF_MEM, 1), \
  190. jt, \
  191. BPF_STMT(BPF_LD+BPF_MEM, 1)
  192. #define JGT64(lo, hi, jt) \
  193. /* if (hi > arg.hi) goto MATCH; */ \
  194. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
  195. /* if (hi != arg.hi) goto NOMATCH; */ \
  196. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
  197. BPF_STMT(BPF_LD+BPF_MEM, 0), \
  198. /* if (lo > arg.lo) goto MATCH; */ \
  199. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
  200. BPF_STMT(BPF_LD+BPF_MEM, 1), \
  201. jt, \
  202. BPF_STMT(BPF_LD+BPF_MEM, 1)
  203. #define JLE64(lo, hi, jt) \
  204. /* if (hi < arg.hi) goto MATCH; */ \
  205. BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
  206. /* if (hi != arg.hi) goto NOMATCH; */ \
  207. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
  208. BPF_STMT(BPF_LD+BPF_MEM, 0), \
  209. /* if (lo <= arg.lo) goto MATCH; */ \
  210. BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
  211. BPF_STMT(BPF_LD+BPF_MEM, 1), \
  212. jt, \
  213. BPF_STMT(BPF_LD+BPF_MEM, 1)
  214. #define JLT64(lo, hi, jt) \
  215. /* if (hi < arg.hi) goto MATCH; */ \
  216. BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
  217. /* if (hi != arg.hi) goto NOMATCH; */ \
  218. BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
  219. BPF_STMT(BPF_LD+BPF_MEM, 0), \
  220. /* if (lo < arg.lo) goto MATCH; */ \
  221. BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \
  222. BPF_STMT(BPF_LD+BPF_MEM, 1), \
  223. jt, \
  224. BPF_STMT(BPF_LD+BPF_MEM, 1)
  225. #define LOAD_SYSCALL_NR \
  226. BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \
  227. offsetof(struct seccomp_data, nr))
  228. #endif /* __BPF_HELPER_H__ */