netfilter.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __LINUX_NETFILTER_H
  3. #define __LINUX_NETFILTER_H
  4. #include <linux/init.h>
  5. #include <linux/skbuff.h>
  6. #include <linux/net.h>
  7. #include <linux/if.h>
  8. #include <linux/in.h>
  9. #include <linux/in6.h>
  10. #include <linux/wait.h>
  11. #include <linux/list.h>
  12. #include <linux/static_key.h>
  13. #include <linux/netfilter_defs.h>
  14. #include <linux/netdevice.h>
  15. #include <net/net_namespace.h>
  16. #ifdef CONFIG_NETFILTER
  17. static inline int NF_DROP_GETERR(int verdict)
  18. {
  19. return -(verdict >> NF_VERDICT_QBITS);
  20. }
  21. static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
  22. const union nf_inet_addr *a2)
  23. {
  24. return a1->all[0] == a2->all[0] &&
  25. a1->all[1] == a2->all[1] &&
  26. a1->all[2] == a2->all[2] &&
  27. a1->all[3] == a2->all[3];
  28. }
  29. static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
  30. union nf_inet_addr *result,
  31. const union nf_inet_addr *mask)
  32. {
  33. result->all[0] = a1->all[0] & mask->all[0];
  34. result->all[1] = a1->all[1] & mask->all[1];
  35. result->all[2] = a1->all[2] & mask->all[2];
  36. result->all[3] = a1->all[3] & mask->all[3];
  37. }
  38. int netfilter_init(void);
  39. struct sk_buff;
  40. struct nf_hook_ops;
  41. struct sock;
  42. struct nf_hook_state {
  43. unsigned int hook;
  44. u_int8_t pf;
  45. struct net_device *in;
  46. struct net_device *out;
  47. struct sock *sk;
  48. struct net *net;
  49. int (*okfn)(struct net *, struct sock *, struct sk_buff *);
  50. };
  51. typedef unsigned int nf_hookfn(void *priv,
  52. struct sk_buff *skb,
  53. const struct nf_hook_state *state);
  54. struct nf_hook_ops {
  55. /* User fills in from here down. */
  56. nf_hookfn *hook;
  57. struct net_device *dev;
  58. void *priv;
  59. u_int8_t pf;
  60. unsigned int hooknum;
  61. /* Hooks are ordered in ascending priority. */
  62. int priority;
  63. };
  64. struct nf_hook_entry {
  65. nf_hookfn *hook;
  66. void *priv;
  67. };
  68. struct nf_hook_entries_rcu_head {
  69. struct rcu_head head;
  70. void *allocation;
  71. };
  72. struct nf_hook_entries {
  73. u16 num_hook_entries;
  74. /* padding */
  75. struct nf_hook_entry hooks[];
  76. /* trailer: pointers to original orig_ops of each hook,
  77. * followed by rcu_head and scratch space used for freeing
  78. * the structure via call_rcu.
  79. *
  80. * This is not part of struct nf_hook_entry since its only
  81. * needed in slow path (hook register/unregister):
  82. * const struct nf_hook_ops *orig_ops[]
  83. *
  84. * For the same reason, we store this at end -- its
  85. * only needed when a hook is deleted, not during
  86. * packet path processing:
  87. * struct nf_hook_entries_rcu_head head
  88. */
  89. };
  90. static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e)
  91. {
  92. unsigned int n = e->num_hook_entries;
  93. const void *hook_end;
  94. hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */
  95. return (struct nf_hook_ops **)hook_end;
  96. }
  97. static inline int
  98. nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb,
  99. struct nf_hook_state *state)
  100. {
  101. return entry->hook(entry->priv, skb, state);
  102. }
  103. static inline void nf_hook_state_init(struct nf_hook_state *p,
  104. unsigned int hook,
  105. u_int8_t pf,
  106. struct net_device *indev,
  107. struct net_device *outdev,
  108. struct sock *sk,
  109. struct net *net,
  110. int (*okfn)(struct net *, struct sock *, struct sk_buff *))
  111. {
  112. p->hook = hook;
  113. p->pf = pf;
  114. p->in = indev;
  115. p->out = outdev;
  116. p->sk = sk;
  117. p->net = net;
  118. p->okfn = okfn;
  119. }
  120. struct nf_sockopt_ops {
  121. struct list_head list;
  122. u_int8_t pf;
  123. /* Non-inclusive ranges: use 0/0/NULL to never get called. */
  124. int set_optmin;
  125. int set_optmax;
  126. int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
  127. #ifdef CONFIG_COMPAT
  128. int (*compat_set)(struct sock *sk, int optval,
  129. void __user *user, unsigned int len);
  130. #endif
  131. int get_optmin;
  132. int get_optmax;
  133. int (*get)(struct sock *sk, int optval, void __user *user, int *len);
  134. #ifdef CONFIG_COMPAT
  135. int (*compat_get)(struct sock *sk, int optval,
  136. void __user *user, int *len);
  137. #endif
  138. /* Use the module struct to lock set/get code in place */
  139. struct module *owner;
  140. };
  141. /* Function to register/unregister hook points. */
  142. int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
  143. void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
  144. int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
  145. unsigned int n);
  146. void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
  147. unsigned int n);
  148. /* Functions to register get/setsockopt ranges (non-inclusive). You
  149. need to check permissions yourself! */
  150. int nf_register_sockopt(struct nf_sockopt_ops *reg);
  151. void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
  152. #ifdef CONFIG_JUMP_LABEL
  153. extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
  154. #endif
  155. int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
  156. const struct nf_hook_entries *e, unsigned int i);
  157. /**
  158. * nf_hook - call a netfilter hook
  159. *
  160. * Returns 1 if the hook has allowed the packet to pass. The function
  161. * okfn must be invoked by the caller in this case. Any other return
  162. * value indicates the packet has been consumed by the hook.
  163. */
  164. static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
  165. struct sock *sk, struct sk_buff *skb,
  166. struct net_device *indev, struct net_device *outdev,
  167. int (*okfn)(struct net *, struct sock *, struct sk_buff *))
  168. {
  169. struct nf_hook_entries *hook_head = NULL;
  170. int ret = 1;
  171. #ifdef CONFIG_JUMP_LABEL
  172. if (__builtin_constant_p(pf) &&
  173. __builtin_constant_p(hook) &&
  174. !static_key_false(&nf_hooks_needed[pf][hook]))
  175. return 1;
  176. #endif
  177. rcu_read_lock();
  178. switch (pf) {
  179. case NFPROTO_IPV4:
  180. hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
  181. break;
  182. case NFPROTO_IPV6:
  183. hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
  184. break;
  185. case NFPROTO_ARP:
  186. #ifdef CONFIG_NETFILTER_FAMILY_ARP
  187. if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
  188. break;
  189. hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
  190. #endif
  191. break;
  192. case NFPROTO_BRIDGE:
  193. #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
  194. hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
  195. #endif
  196. break;
  197. #if IS_ENABLED(CONFIG_DECNET)
  198. case NFPROTO_DECNET:
  199. hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
  200. break;
  201. #endif
  202. default:
  203. WARN_ON_ONCE(1);
  204. break;
  205. }
  206. if (hook_head) {
  207. struct nf_hook_state state;
  208. nf_hook_state_init(&state, hook, pf, indev, outdev,
  209. sk, net, okfn);
  210. ret = nf_hook_slow(skb, &state, hook_head, 0);
  211. }
  212. rcu_read_unlock();
  213. return ret;
  214. }
  215. /* Activate hook; either okfn or kfree_skb called, unless a hook
  216. returns NF_STOLEN (in which case, it's up to the hook to deal with
  217. the consequences).
  218. Returns -ERRNO if packet dropped. Zero means queued, stolen or
  219. accepted.
  220. */
  221. /* RR:
  222. > I don't want nf_hook to return anything because people might forget
  223. > about async and trust the return value to mean "packet was ok".
  224. AK:
  225. Just document it clearly, then you can expect some sense from kernel
  226. coders :)
  227. */
  228. static inline int
  229. NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
  230. struct sk_buff *skb, struct net_device *in, struct net_device *out,
  231. int (*okfn)(struct net *, struct sock *, struct sk_buff *),
  232. bool cond)
  233. {
  234. int ret;
  235. if (!cond ||
  236. ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1))
  237. ret = okfn(net, sk, skb);
  238. return ret;
  239. }
  240. static inline int
  241. NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb,
  242. struct net_device *in, struct net_device *out,
  243. int (*okfn)(struct net *, struct sock *, struct sk_buff *))
  244. {
  245. int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn);
  246. if (ret == 1)
  247. ret = okfn(net, sk, skb);
  248. return ret;
  249. }
  250. static inline void
  251. NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
  252. struct list_head *head, struct net_device *in, struct net_device *out,
  253. int (*okfn)(struct net *, struct sock *, struct sk_buff *))
  254. {
  255. struct sk_buff *skb, *next;
  256. struct list_head sublist;
  257. INIT_LIST_HEAD(&sublist);
  258. list_for_each_entry_safe(skb, next, head, list) {
  259. list_del(&skb->list);
  260. if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
  261. list_add_tail(&skb->list, &sublist);
  262. }
  263. /* Put passed packets back on main list */
  264. list_splice(&sublist, head);
  265. }
  266. /* Call setsockopt() */
  267. int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
  268. unsigned int len);
  269. int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
  270. int *len);
  271. #ifdef CONFIG_COMPAT
  272. int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
  273. char __user *opt, unsigned int len);
  274. int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
  275. char __user *opt, int *len);
  276. #endif
  277. /* Call this before modifying an existing packet: ensures it is
  278. modifiable and linear to the point you care about (writable_len).
  279. Returns true or false. */
  280. int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
  281. struct flowi;
  282. struct nf_queue_entry;
  283. __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
  284. unsigned int dataoff, u_int8_t protocol,
  285. unsigned short family);
  286. __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
  287. unsigned int dataoff, unsigned int len,
  288. u_int8_t protocol, unsigned short family);
  289. int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
  290. bool strict, unsigned short family);
  291. int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
  292. #include <net/flow.h>
  293. struct nf_conn;
  294. enum nf_nat_manip_type;
  295. struct nlattr;
  296. enum ip_conntrack_dir;
  297. struct nf_nat_hook {
  298. int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip,
  299. const struct nlattr *attr);
  300. void (*decode_session)(struct sk_buff *skb, struct flowi *fl);
  301. unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
  302. enum nf_nat_manip_type mtype,
  303. enum ip_conntrack_dir dir);
  304. };
  305. extern struct nf_nat_hook __rcu *nf_nat_hook;
  306. static inline void
  307. nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
  308. {
  309. #ifdef CONFIG_NF_NAT_NEEDED
  310. struct nf_nat_hook *nat_hook;
  311. rcu_read_lock();
  312. nat_hook = rcu_dereference(nf_nat_hook);
  313. if (nat_hook && nat_hook->decode_session)
  314. nat_hook->decode_session(skb, fl);
  315. rcu_read_unlock();
  316. #endif
  317. }
  318. #else /* !CONFIG_NETFILTER */
  319. static inline int
  320. NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
  321. struct sk_buff *skb, struct net_device *in, struct net_device *out,
  322. int (*okfn)(struct net *, struct sock *, struct sk_buff *),
  323. bool cond)
  324. {
  325. return okfn(net, sk, skb);
  326. }
  327. static inline int
  328. NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
  329. struct sk_buff *skb, struct net_device *in, struct net_device *out,
  330. int (*okfn)(struct net *, struct sock *, struct sk_buff *))
  331. {
  332. return okfn(net, sk, skb);
  333. }
  334. static inline void
  335. NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
  336. struct list_head *head, struct net_device *in, struct net_device *out,
  337. int (*okfn)(struct net *, struct sock *, struct sk_buff *))
  338. {
  339. /* nothing to do */
  340. }
  341. static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
  342. struct sock *sk, struct sk_buff *skb,
  343. struct net_device *indev, struct net_device *outdev,
  344. int (*okfn)(struct net *, struct sock *, struct sk_buff *))
  345. {
  346. return 1;
  347. }
  348. struct flowi;
  349. static inline void
  350. nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
  351. {
  352. }
  353. #endif /*CONFIG_NETFILTER*/
  354. #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  355. #include <linux/netfilter/nf_conntrack_zones_common.h>
  356. extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
  357. void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
  358. struct nf_conntrack_tuple;
  359. bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
  360. const struct sk_buff *skb);
  361. #else
  362. static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
  363. struct nf_conntrack_tuple;
  364. static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
  365. const struct sk_buff *skb)
  366. {
  367. return false;
  368. }
  369. #endif
  370. struct nf_conn;
  371. enum ip_conntrack_info;
  372. struct nf_ct_hook {
  373. int (*update)(struct net *net, struct sk_buff *skb);
  374. void (*destroy)(struct nf_conntrack *);
  375. bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
  376. const struct sk_buff *);
  377. };
  378. extern struct nf_ct_hook __rcu *nf_ct_hook;
  379. struct nlattr;
  380. struct nfnl_ct_hook {
  381. struct nf_conn *(*get_ct)(const struct sk_buff *skb,
  382. enum ip_conntrack_info *ctinfo);
  383. size_t (*build_size)(const struct nf_conn *ct);
  384. int (*build)(struct sk_buff *skb, struct nf_conn *ct,
  385. enum ip_conntrack_info ctinfo,
  386. u_int16_t ct_attr, u_int16_t ct_info_attr);
  387. int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
  388. int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
  389. u32 portid, u32 report);
  390. void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
  391. enum ip_conntrack_info ctinfo, s32 off);
  392. };
  393. extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
  394. /**
  395. * nf_skb_duplicated - TEE target has sent a packet
  396. *
  397. * When a xtables target sends a packet, the OUTPUT and POSTROUTING
  398. * hooks are traversed again, i.e. nft and xtables are invoked recursively.
  399. *
  400. * This is used by xtables TEE target to prevent the duplicated skb from
  401. * being duplicated again.
  402. */
  403. DECLARE_PER_CPU(bool, nf_skb_duplicated);
  404. #endif /*__LINUX_NETFILTER_H*/