nf_queue.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. /*
  2. * Rusty Russell (C)2000 -- This code is GPL.
  3. * Patrick McHardy (c) 2006-2012
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/slab.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/proc_fs.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/netfilter.h>
  12. #include <linux/netfilter_ipv4.h>
  13. #include <linux/netfilter_ipv6.h>
  14. #include <linux/netfilter_bridge.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/rcupdate.h>
  17. #include <net/protocol.h>
  18. #include <net/netfilter/nf_queue.h>
  19. #include <net/dst.h>
  20. #include "nf_internals.h"
  21. /*
  22. * Hook for nfnetlink_queue to register its queue handler.
  23. * We do this so that most of the NFQUEUE code can be modular.
  24. *
  25. * Once the queue is registered it must reinject all packets it
  26. * receives, no matter what.
  27. */
  28. /* return EBUSY when somebody else is registered, return EEXIST if the
  29. * same handler is registered, return 0 in case of success. */
  30. void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
  31. {
  32. /* should never happen, we only have one queueing backend in kernel */
  33. WARN_ON(rcu_access_pointer(net->nf.queue_handler));
  34. rcu_assign_pointer(net->nf.queue_handler, qh);
  35. }
  36. EXPORT_SYMBOL(nf_register_queue_handler);
  37. /* The caller must flush their queue before this */
  38. void nf_unregister_queue_handler(struct net *net)
  39. {
  40. RCU_INIT_POINTER(net->nf.queue_handler, NULL);
  41. }
  42. EXPORT_SYMBOL(nf_unregister_queue_handler);
  43. void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
  44. {
  45. struct nf_hook_state *state = &entry->state;
  46. /* Release those devices we held, or Alexey will kill me. */
  47. if (state->in)
  48. dev_put(state->in);
  49. if (state->out)
  50. dev_put(state->out);
  51. if (state->sk)
  52. sock_put(state->sk);
  53. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  54. if (entry->skb->nf_bridge) {
  55. struct net_device *physdev;
  56. physdev = nf_bridge_get_physindev(entry->skb);
  57. if (physdev)
  58. dev_put(physdev);
  59. physdev = nf_bridge_get_physoutdev(entry->skb);
  60. if (physdev)
  61. dev_put(physdev);
  62. }
  63. #endif
  64. }
  65. EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
  66. /* Bump dev refs so they don't vanish while packet is out */
  67. void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
  68. {
  69. struct nf_hook_state *state = &entry->state;
  70. if (state->in)
  71. dev_hold(state->in);
  72. if (state->out)
  73. dev_hold(state->out);
  74. if (state->sk)
  75. sock_hold(state->sk);
  76. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  77. if (entry->skb->nf_bridge) {
  78. struct net_device *physdev;
  79. physdev = nf_bridge_get_physindev(entry->skb);
  80. if (physdev)
  81. dev_hold(physdev);
  82. physdev = nf_bridge_get_physoutdev(entry->skb);
  83. if (physdev)
  84. dev_hold(physdev);
  85. }
  86. #endif
  87. }
  88. EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
  89. void nf_queue_nf_hook_drop(struct net *net)
  90. {
  91. const struct nf_queue_handler *qh;
  92. rcu_read_lock();
  93. qh = rcu_dereference(net->nf.queue_handler);
  94. if (qh)
  95. qh->nf_hook_drop(net);
  96. rcu_read_unlock();
  97. }
  98. EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
  99. static void nf_ip_saveroute(const struct sk_buff *skb,
  100. struct nf_queue_entry *entry)
  101. {
  102. struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
  103. if (entry->state.hook == NF_INET_LOCAL_OUT) {
  104. const struct iphdr *iph = ip_hdr(skb);
  105. rt_info->tos = iph->tos;
  106. rt_info->daddr = iph->daddr;
  107. rt_info->saddr = iph->saddr;
  108. rt_info->mark = skb->mark;
  109. }
  110. }
  111. static void nf_ip6_saveroute(const struct sk_buff *skb,
  112. struct nf_queue_entry *entry)
  113. {
  114. struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
  115. if (entry->state.hook == NF_INET_LOCAL_OUT) {
  116. const struct ipv6hdr *iph = ipv6_hdr(skb);
  117. rt_info->daddr = iph->daddr;
  118. rt_info->saddr = iph->saddr;
  119. rt_info->mark = skb->mark;
  120. }
  121. }
  122. static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
  123. const struct nf_hook_entries *entries,
  124. unsigned int index, unsigned int queuenum)
  125. {
  126. int status = -ENOENT;
  127. struct nf_queue_entry *entry = NULL;
  128. const struct nf_queue_handler *qh;
  129. struct net *net = state->net;
  130. unsigned int route_key_size;
  131. /* QUEUE == DROP if no one is waiting, to be safe. */
  132. qh = rcu_dereference(net->nf.queue_handler);
  133. if (!qh) {
  134. status = -ESRCH;
  135. goto err;
  136. }
  137. switch (state->pf) {
  138. case AF_INET:
  139. route_key_size = sizeof(struct ip_rt_info);
  140. break;
  141. case AF_INET6:
  142. route_key_size = sizeof(struct ip6_rt_info);
  143. break;
  144. default:
  145. route_key_size = 0;
  146. break;
  147. }
  148. entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
  149. if (!entry) {
  150. status = -ENOMEM;
  151. goto err;
  152. }
  153. if (skb_dst(skb) && !skb_dst_force(skb)) {
  154. status = -ENETDOWN;
  155. goto err;
  156. }
  157. *entry = (struct nf_queue_entry) {
  158. .skb = skb,
  159. .state = *state,
  160. .hook_index = index,
  161. .size = sizeof(*entry) + route_key_size,
  162. };
  163. nf_queue_entry_get_refs(entry);
  164. switch (entry->state.pf) {
  165. case AF_INET:
  166. nf_ip_saveroute(skb, entry);
  167. break;
  168. case AF_INET6:
  169. nf_ip6_saveroute(skb, entry);
  170. break;
  171. }
  172. status = qh->outfn(entry, queuenum);
  173. if (status < 0) {
  174. nf_queue_entry_release_refs(entry);
  175. goto err;
  176. }
  177. return 0;
  178. err:
  179. kfree(entry);
  180. return status;
  181. }
  182. /* Packets leaving via this function must come back through nf_reinject(). */
  183. int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
  184. const struct nf_hook_entries *entries, unsigned int index,
  185. unsigned int verdict)
  186. {
  187. int ret;
  188. ret = __nf_queue(skb, state, entries, index, verdict >> NF_VERDICT_QBITS);
  189. if (ret < 0) {
  190. if (ret == -ESRCH &&
  191. (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
  192. return 1;
  193. kfree_skb(skb);
  194. }
  195. return 0;
  196. }
  197. static unsigned int nf_iterate(struct sk_buff *skb,
  198. struct nf_hook_state *state,
  199. const struct nf_hook_entries *hooks,
  200. unsigned int *index)
  201. {
  202. const struct nf_hook_entry *hook;
  203. unsigned int verdict, i = *index;
  204. while (i < hooks->num_hook_entries) {
  205. hook = &hooks->hooks[i];
  206. repeat:
  207. verdict = nf_hook_entry_hookfn(hook, skb, state);
  208. if (verdict != NF_ACCEPT) {
  209. *index = i;
  210. if (verdict != NF_REPEAT)
  211. return verdict;
  212. goto repeat;
  213. }
  214. i++;
  215. }
  216. *index = i;
  217. return NF_ACCEPT;
  218. }
  219. static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
  220. {
  221. switch (pf) {
  222. #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
  223. case NFPROTO_BRIDGE:
  224. return rcu_dereference(net->nf.hooks_bridge[hooknum]);
  225. #endif
  226. case NFPROTO_IPV4:
  227. return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
  228. case NFPROTO_IPV6:
  229. return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
  230. default:
  231. WARN_ON_ONCE(1);
  232. return NULL;
  233. }
  234. return NULL;
  235. }
  236. /* Caller must hold rcu read-side lock */
  237. void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
  238. {
  239. const struct nf_hook_entry *hook_entry;
  240. const struct nf_hook_entries *hooks;
  241. struct sk_buff *skb = entry->skb;
  242. const struct net *net;
  243. unsigned int i;
  244. int err;
  245. u8 pf;
  246. net = entry->state.net;
  247. pf = entry->state.pf;
  248. hooks = nf_hook_entries_head(net, pf, entry->state.hook);
  249. nf_queue_entry_release_refs(entry);
  250. i = entry->hook_index;
  251. if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
  252. kfree_skb(skb);
  253. kfree(entry);
  254. return;
  255. }
  256. hook_entry = &hooks->hooks[i];
  257. /* Continue traversal iff userspace said ok... */
  258. if (verdict == NF_REPEAT)
  259. verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
  260. if (verdict == NF_ACCEPT) {
  261. if (nf_reroute(skb, entry) < 0)
  262. verdict = NF_DROP;
  263. }
  264. if (verdict == NF_ACCEPT) {
  265. next_hook:
  266. ++i;
  267. verdict = nf_iterate(skb, &entry->state, hooks, &i);
  268. }
  269. switch (verdict & NF_VERDICT_MASK) {
  270. case NF_ACCEPT:
  271. case NF_STOP:
  272. local_bh_disable();
  273. entry->state.okfn(entry->state.net, entry->state.sk, skb);
  274. local_bh_enable();
  275. break;
  276. case NF_QUEUE:
  277. err = nf_queue(skb, &entry->state, hooks, i, verdict);
  278. if (err == 1)
  279. goto next_hook;
  280. break;
  281. case NF_STOLEN:
  282. break;
  283. default:
  284. kfree_skb(skb);
  285. }
  286. kfree(entry);
  287. }
  288. EXPORT_SYMBOL(nf_reinject);