bpf-cgroup.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _BPF_CGROUP_H
  3. #define _BPF_CGROUP_H
  4. #include <linux/errno.h>
  5. #include <linux/jump_label.h>
  6. #include <linux/percpu.h>
  7. #include <linux/rbtree.h>
  8. #include <uapi/linux/bpf.h>
  9. struct sock;
  10. struct sockaddr;
  11. struct cgroup;
  12. struct sk_buff;
  13. struct bpf_map;
  14. struct bpf_prog;
  15. struct bpf_sock_ops_kern;
  16. struct bpf_cgroup_storage;
  17. #ifdef CONFIG_CGROUP_BPF
  18. extern struct static_key_false cgroup_bpf_enabled_key;
  19. #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
  20. DECLARE_PER_CPU(void*, bpf_cgroup_storage);
  21. struct bpf_cgroup_storage_map;
  22. struct bpf_storage_buffer {
  23. struct rcu_head rcu;
  24. char data[0];
  25. };
  26. struct bpf_cgroup_storage {
  27. struct bpf_storage_buffer *buf;
  28. struct bpf_cgroup_storage_map *map;
  29. struct bpf_cgroup_storage_key key;
  30. struct list_head list;
  31. struct rb_node node;
  32. struct rcu_head rcu;
  33. };
  34. struct bpf_prog_list {
  35. struct list_head node;
  36. struct bpf_prog *prog;
  37. struct bpf_cgroup_storage *storage;
  38. };
  39. struct bpf_prog_array;
  40. struct cgroup_bpf {
  41. /* array of effective progs in this cgroup */
  42. struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
  43. /* attached progs to this cgroup and attach flags
  44. * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
  45. * have either zero or one element
  46. * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
  47. */
  48. struct list_head progs[MAX_BPF_ATTACH_TYPE];
  49. u32 flags[MAX_BPF_ATTACH_TYPE];
  50. /* temp storage for effective prog array used by prog_attach/detach */
  51. struct bpf_prog_array __rcu *inactive;
  52. };
  53. void cgroup_bpf_put(struct cgroup *cgrp);
  54. int cgroup_bpf_inherit(struct cgroup *cgrp);
  55. int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
  56. enum bpf_attach_type type, u32 flags);
  57. int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  58. enum bpf_attach_type type, u32 flags);
  59. int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  60. union bpf_attr __user *uattr);
  61. /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
  62. int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
  63. enum bpf_attach_type type, u32 flags);
  64. int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
  65. enum bpf_attach_type type, u32 flags);
  66. int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
  67. union bpf_attr __user *uattr);
  68. int __cgroup_bpf_run_filter_skb(struct sock *sk,
  69. struct sk_buff *skb,
  70. enum bpf_attach_type type);
  71. int __cgroup_bpf_run_filter_sk(struct sock *sk,
  72. enum bpf_attach_type type);
  73. int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
  74. struct sockaddr *uaddr,
  75. enum bpf_attach_type type,
  76. void *t_ctx);
  77. int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
  78. struct bpf_sock_ops_kern *sock_ops,
  79. enum bpf_attach_type type);
  80. int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
  81. short access, enum bpf_attach_type type);
  82. static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage)
  83. {
  84. struct bpf_storage_buffer *buf;
  85. if (!storage)
  86. return;
  87. buf = READ_ONCE(storage->buf);
  88. this_cpu_write(bpf_cgroup_storage, &buf->data[0]);
  89. }
  90. struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog);
  91. void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
  92. void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
  93. struct cgroup *cgroup,
  94. enum bpf_attach_type type);
  95. void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
  96. int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
  97. void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
  98. /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
  99. #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
  100. ({ \
  101. int __ret = 0; \
  102. if (cgroup_bpf_enabled) \
  103. __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
  104. BPF_CGROUP_INET_INGRESS); \
  105. \
  106. __ret; \
  107. })
  108. #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
  109. ({ \
  110. int __ret = 0; \
  111. if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
  112. typeof(sk) __sk = sk_to_full_sk(sk); \
  113. if (sk_fullsock(__sk)) \
  114. __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
  115. BPF_CGROUP_INET_EGRESS); \
  116. } \
  117. __ret; \
  118. })
  119. #define BPF_CGROUP_RUN_SK_PROG(sk, type) \
  120. ({ \
  121. int __ret = 0; \
  122. if (cgroup_bpf_enabled) { \
  123. __ret = __cgroup_bpf_run_filter_sk(sk, type); \
  124. } \
  125. __ret; \
  126. })
  127. #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
  128. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
  129. #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
  130. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
  131. #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
  132. BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
  133. #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
  134. ({ \
  135. int __ret = 0; \
  136. if (cgroup_bpf_enabled) \
  137. __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
  138. NULL); \
  139. __ret; \
  140. })
  141. #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
  142. ({ \
  143. int __ret = 0; \
  144. if (cgroup_bpf_enabled) { \
  145. lock_sock(sk); \
  146. __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
  147. t_ctx); \
  148. release_sock(sk); \
  149. } \
  150. __ret; \
  151. })
  152. #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
  153. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
  154. #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
  155. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
  156. #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
  157. sk->sk_prot->pre_connect)
  158. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
  159. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
  160. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
  161. BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
  162. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
  163. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
  164. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
  165. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
  166. #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
  167. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
  168. #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
  169. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
  170. #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
  171. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
  172. #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
  173. BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
  174. #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
  175. ({ \
  176. int __ret = 0; \
  177. if (cgroup_bpf_enabled && (sock_ops)->sk) { \
  178. typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
  179. if (__sk && sk_fullsock(__sk)) \
  180. __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
  181. sock_ops, \
  182. BPF_CGROUP_SOCK_OPS); \
  183. } \
  184. __ret; \
  185. })
  186. #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
  187. ({ \
  188. int __ret = 0; \
  189. if (cgroup_bpf_enabled) \
  190. __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
  191. access, \
  192. BPF_CGROUP_DEVICE); \
  193. \
  194. __ret; \
  195. })
  196. int cgroup_bpf_prog_attach(const union bpf_attr *attr,
  197. enum bpf_prog_type ptype, struct bpf_prog *prog);
  198. int cgroup_bpf_prog_detach(const union bpf_attr *attr,
  199. enum bpf_prog_type ptype);
  200. int cgroup_bpf_prog_query(const union bpf_attr *attr,
  201. union bpf_attr __user *uattr);
  202. #else
  203. struct bpf_prog;
  204. struct cgroup_bpf {};
  205. static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
  206. static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
  207. static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
  208. enum bpf_prog_type ptype,
  209. struct bpf_prog *prog)
  210. {
  211. return -EINVAL;
  212. }
  213. static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
  214. enum bpf_prog_type ptype)
  215. {
  216. return -EINVAL;
  217. }
  218. static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
  219. union bpf_attr __user *uattr)
  220. {
  221. return -EINVAL;
  222. }
  223. static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {}
  224. static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
  225. struct bpf_map *map) { return 0; }
  226. static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
  227. struct bpf_map *map) {}
  228. static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
  229. struct bpf_prog *prog) { return 0; }
  230. static inline void bpf_cgroup_storage_free(
  231. struct bpf_cgroup_storage *storage) {}
  232. #define cgroup_bpf_enabled (0)
  233. #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
  234. #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
  235. #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
  236. #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
  237. #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
  238. #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
  239. #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
  240. #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
  241. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
  242. #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
  243. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
  244. #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
  245. #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
  246. #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
  247. #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
  248. #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
  249. #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
  250. #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
  251. #endif /* CONFIG_CGROUP_BPF */
  252. #endif /* _BPF_CGROUP_H */