nf_queue.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. * Rusty Russell (C)2000 -- This code is GPL.
  3. * Patrick McHardy (c) 2006-2012
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/slab.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/proc_fs.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/netfilter.h>
  12. #include <linux/netfilter_bridge.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/rcupdate.h>
  15. #include <net/protocol.h>
  16. #include <net/netfilter/nf_queue.h>
  17. #include <net/dst.h>
  18. #include "nf_internals.h"
  19. /*
  20. * Hook for nfnetlink_queue to register its queue handler.
  21. * We do this so that most of the NFQUEUE code can be modular.
  22. *
  23. * Once the queue is registered it must reinject all packets it
  24. * receives, no matter what.
  25. */
  26. static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
  27. /* return EBUSY when somebody else is registered, return EEXIST if the
  28. * same handler is registered, return 0 in case of success. */
  29. void nf_register_queue_handler(const struct nf_queue_handler *qh)
  30. {
  31. /* should never happen, we only have one queueing backend in kernel */
  32. WARN_ON(rcu_access_pointer(queue_handler));
  33. rcu_assign_pointer(queue_handler, qh);
  34. }
  35. EXPORT_SYMBOL(nf_register_queue_handler);
  36. /* The caller must flush their queue before this */
  37. void nf_unregister_queue_handler(void)
  38. {
  39. RCU_INIT_POINTER(queue_handler, NULL);
  40. synchronize_rcu();
  41. }
  42. EXPORT_SYMBOL(nf_unregister_queue_handler);
  43. void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
  44. {
  45. struct nf_hook_state *state = &entry->state;
  46. /* Release those devices we held, or Alexey will kill me. */
  47. if (state->in)
  48. dev_put(state->in);
  49. if (state->out)
  50. dev_put(state->out);
  51. if (state->sk)
  52. sock_put(state->sk);
  53. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  54. if (entry->skb->nf_bridge) {
  55. struct net_device *physdev;
  56. physdev = nf_bridge_get_physindev(entry->skb);
  57. if (physdev)
  58. dev_put(physdev);
  59. physdev = nf_bridge_get_physoutdev(entry->skb);
  60. if (physdev)
  61. dev_put(physdev);
  62. }
  63. #endif
  64. /* Drop reference to owner of hook which queued us. */
  65. module_put(entry->elem->owner);
  66. }
  67. EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
  68. /* Bump dev refs so they don't vanish while packet is out */
  69. bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
  70. {
  71. struct nf_hook_state *state = &entry->state;
  72. if (!try_module_get(entry->elem->owner))
  73. return false;
  74. if (state->in)
  75. dev_hold(state->in);
  76. if (state->out)
  77. dev_hold(state->out);
  78. if (state->sk)
  79. sock_hold(state->sk);
  80. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  81. if (entry->skb->nf_bridge) {
  82. struct net_device *physdev;
  83. physdev = nf_bridge_get_physindev(entry->skb);
  84. if (physdev)
  85. dev_hold(physdev);
  86. physdev = nf_bridge_get_physoutdev(entry->skb);
  87. if (physdev)
  88. dev_hold(physdev);
  89. }
  90. #endif
  91. return true;
  92. }
  93. EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
  94. void nf_queue_nf_hook_drop(struct nf_hook_ops *ops)
  95. {
  96. const struct nf_queue_handler *qh;
  97. struct net *net;
  98. rtnl_lock();
  99. rcu_read_lock();
  100. qh = rcu_dereference(queue_handler);
  101. if (qh) {
  102. for_each_net(net) {
  103. qh->nf_hook_drop(net, ops);
  104. }
  105. }
  106. rcu_read_unlock();
  107. rtnl_unlock();
  108. }
  109. /*
  110. * Any packet that leaves via this function must come back
  111. * through nf_reinject().
  112. */
  113. int nf_queue(struct sk_buff *skb,
  114. struct nf_hook_ops *elem,
  115. struct nf_hook_state *state,
  116. unsigned int queuenum)
  117. {
  118. int status = -ENOENT;
  119. struct nf_queue_entry *entry = NULL;
  120. const struct nf_afinfo *afinfo;
  121. const struct nf_queue_handler *qh;
  122. /* QUEUE == DROP if no one is waiting, to be safe. */
  123. rcu_read_lock();
  124. qh = rcu_dereference(queue_handler);
  125. if (!qh) {
  126. status = -ESRCH;
  127. goto err_unlock;
  128. }
  129. afinfo = nf_get_afinfo(state->pf);
  130. if (!afinfo)
  131. goto err_unlock;
  132. entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
  133. if (!entry) {
  134. status = -ENOMEM;
  135. goto err_unlock;
  136. }
  137. *entry = (struct nf_queue_entry) {
  138. .skb = skb,
  139. .elem = elem,
  140. .state = *state,
  141. .size = sizeof(*entry) + afinfo->route_key_size,
  142. };
  143. if (!nf_queue_entry_get_refs(entry)) {
  144. status = -ECANCELED;
  145. goto err_unlock;
  146. }
  147. skb_dst_force(skb);
  148. afinfo->saveroute(skb, entry);
  149. status = qh->outfn(entry, queuenum);
  150. rcu_read_unlock();
  151. if (status < 0) {
  152. nf_queue_entry_release_refs(entry);
  153. goto err;
  154. }
  155. return 0;
  156. err_unlock:
  157. rcu_read_unlock();
  158. err:
  159. kfree(entry);
  160. return status;
  161. }
  162. void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
  163. {
  164. struct sk_buff *skb = entry->skb;
  165. struct nf_hook_ops *elem = entry->elem;
  166. const struct nf_afinfo *afinfo;
  167. int err;
  168. rcu_read_lock();
  169. nf_queue_entry_release_refs(entry);
  170. /* Continue traversal iff userspace said ok... */
  171. if (verdict == NF_REPEAT) {
  172. elem = list_entry(elem->list.prev, struct nf_hook_ops, list);
  173. verdict = NF_ACCEPT;
  174. }
  175. if (verdict == NF_ACCEPT) {
  176. afinfo = nf_get_afinfo(entry->state.pf);
  177. if (!afinfo || afinfo->reroute(skb, entry) < 0)
  178. verdict = NF_DROP;
  179. }
  180. entry->state.thresh = INT_MIN;
  181. if (verdict == NF_ACCEPT) {
  182. next_hook:
  183. verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook],
  184. skb, &entry->state, &elem);
  185. }
  186. switch (verdict & NF_VERDICT_MASK) {
  187. case NF_ACCEPT:
  188. case NF_STOP:
  189. local_bh_disable();
  190. entry->state.okfn(entry->state.sk, skb);
  191. local_bh_enable();
  192. break;
  193. case NF_QUEUE:
  194. err = nf_queue(skb, elem, &entry->state,
  195. verdict >> NF_VERDICT_QBITS);
  196. if (err < 0) {
  197. if (err == -ECANCELED)
  198. goto next_hook;
  199. if (err == -ESRCH &&
  200. (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
  201. goto next_hook;
  202. kfree_skb(skb);
  203. }
  204. break;
  205. case NF_STOLEN:
  206. break;
  207. default:
  208. kfree_skb(skb);
  209. }
  210. rcu_read_unlock();
  211. kfree(entry);
  212. }
  213. EXPORT_SYMBOL(nf_reinject);