xfrm_device.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. /*
  2. * xfrm_device.c - IPsec device offloading code.
  3. *
  4. * Copyright (c) 2015 secunet Security Networks AG
  5. *
  6. * Author:
  7. * Steffen Klassert <steffen.klassert@secunet.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/errno.h>
  15. #include <linux/module.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <net/dst.h>
  21. #include <net/xfrm.h>
  22. #include <linux/notifier.h>
  23. #ifdef CONFIG_XFRM_OFFLOAD
  24. struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
  25. {
  26. int err;
  27. unsigned long flags;
  28. struct xfrm_state *x;
  29. struct sk_buff *skb2;
  30. struct softnet_data *sd;
  31. netdev_features_t esp_features = features;
  32. struct xfrm_offload *xo = xfrm_offload(skb);
  33. if (!xo)
  34. return skb;
  35. if (!(features & NETIF_F_HW_ESP))
  36. esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
  37. x = skb->sp->xvec[skb->sp->len - 1];
  38. if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
  39. return skb;
  40. local_irq_save(flags);
  41. sd = this_cpu_ptr(&softnet_data);
  42. err = !skb_queue_empty(&sd->xfrm_backlog);
  43. local_irq_restore(flags);
  44. if (err) {
  45. *again = true;
  46. return skb;
  47. }
  48. if (skb_is_gso(skb)) {
  49. struct net_device *dev = skb->dev;
  50. if (unlikely(x->xso.dev != dev)) {
  51. struct sk_buff *segs;
  52. /* Packet got rerouted, fixup features and segment it. */
  53. esp_features = esp_features & ~(NETIF_F_HW_ESP
  54. | NETIF_F_GSO_ESP);
  55. segs = skb_gso_segment(skb, esp_features);
  56. if (IS_ERR(segs)) {
  57. kfree_skb(skb);
  58. atomic_long_inc(&dev->tx_dropped);
  59. return NULL;
  60. } else {
  61. consume_skb(skb);
  62. skb = segs;
  63. }
  64. }
  65. }
  66. if (!skb->next) {
  67. x->outer_mode->xmit(x, skb);
  68. xo->flags |= XFRM_DEV_RESUME;
  69. err = x->type_offload->xmit(x, skb, esp_features);
  70. if (err) {
  71. if (err == -EINPROGRESS)
  72. return NULL;
  73. XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  74. kfree_skb(skb);
  75. return NULL;
  76. }
  77. skb_push(skb, skb->data - skb_mac_header(skb));
  78. return skb;
  79. }
  80. skb2 = skb;
  81. do {
  82. struct sk_buff *nskb = skb2->next;
  83. skb2->next = NULL;
  84. xo = xfrm_offload(skb2);
  85. xo->flags |= XFRM_DEV_RESUME;
  86. x->outer_mode->xmit(x, skb2);
  87. err = x->type_offload->xmit(x, skb2, esp_features);
  88. if (!err) {
  89. skb2->next = nskb;
  90. } else if (err != -EINPROGRESS) {
  91. XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  92. skb2->next = nskb;
  93. kfree_skb_list(skb2);
  94. return NULL;
  95. } else {
  96. if (skb == skb2)
  97. skb = nskb;
  98. if (!skb)
  99. return NULL;
  100. goto skip_push;
  101. }
  102. skb_push(skb2, skb2->data - skb_mac_header(skb2));
  103. skip_push:
  104. skb2 = nskb;
  105. } while (skb2);
  106. return skb;
  107. }
  108. EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
  109. int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
  110. struct xfrm_user_offload *xuo)
  111. {
  112. int err;
  113. struct dst_entry *dst;
  114. struct net_device *dev;
  115. struct xfrm_state_offload *xso = &x->xso;
  116. xfrm_address_t *saddr;
  117. xfrm_address_t *daddr;
  118. if (!x->type_offload)
  119. return -EINVAL;
  120. /* We don't yet support UDP encapsulation and TFC padding. */
  121. if (x->encap || x->tfcpad)
  122. return -EINVAL;
  123. dev = dev_get_by_index(net, xuo->ifindex);
  124. if (!dev) {
  125. if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
  126. saddr = &x->props.saddr;
  127. daddr = &x->id.daddr;
  128. } else {
  129. saddr = &x->id.daddr;
  130. daddr = &x->props.saddr;
  131. }
  132. dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
  133. x->props.family,
  134. xfrm_smark_get(0, x));
  135. if (IS_ERR(dst))
  136. return 0;
  137. dev = dst->dev;
  138. dev_hold(dev);
  139. dst_release(dst);
  140. }
  141. if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
  142. xso->dev = NULL;
  143. dev_put(dev);
  144. return 0;
  145. }
  146. if (x->props.flags & XFRM_STATE_ESN &&
  147. !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
  148. xso->dev = NULL;
  149. dev_put(dev);
  150. return -EINVAL;
  151. }
  152. xso->dev = dev;
  153. xso->num_exthdrs = 1;
  154. xso->flags = xuo->flags;
  155. err = dev->xfrmdev_ops->xdo_dev_state_add(x);
  156. if (err) {
  157. xso->dev = NULL;
  158. dev_put(dev);
  159. return err;
  160. }
  161. return 0;
  162. }
  163. EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
  164. bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
  165. {
  166. int mtu;
  167. struct dst_entry *dst = skb_dst(skb);
  168. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  169. struct net_device *dev = x->xso.dev;
  170. if (!x->type_offload || x->encap)
  171. return false;
  172. if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
  173. (!xdst->child->xfrm && x->type->get_mtu)) {
  174. mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
  175. if (skb->len <= mtu)
  176. goto ok;
  177. if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
  178. goto ok;
  179. }
  180. return false;
  181. ok:
  182. if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
  183. return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
  184. return true;
  185. }
  186. EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
  187. void xfrm_dev_resume(struct sk_buff *skb)
  188. {
  189. struct net_device *dev = skb->dev;
  190. int ret = NETDEV_TX_BUSY;
  191. struct netdev_queue *txq;
  192. struct softnet_data *sd;
  193. unsigned long flags;
  194. rcu_read_lock();
  195. txq = netdev_pick_tx(dev, skb, NULL);
  196. HARD_TX_LOCK(dev, txq, smp_processor_id());
  197. if (!netif_xmit_frozen_or_stopped(txq))
  198. skb = dev_hard_start_xmit(skb, dev, txq, &ret);
  199. HARD_TX_UNLOCK(dev, txq);
  200. if (!dev_xmit_complete(ret)) {
  201. local_irq_save(flags);
  202. sd = this_cpu_ptr(&softnet_data);
  203. skb_queue_tail(&sd->xfrm_backlog, skb);
  204. raise_softirq_irqoff(NET_TX_SOFTIRQ);
  205. local_irq_restore(flags);
  206. }
  207. rcu_read_unlock();
  208. }
  209. EXPORT_SYMBOL_GPL(xfrm_dev_resume);
  210. void xfrm_dev_backlog(struct softnet_data *sd)
  211. {
  212. struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
  213. struct sk_buff_head list;
  214. struct sk_buff *skb;
  215. if (skb_queue_empty(xfrm_backlog))
  216. return;
  217. __skb_queue_head_init(&list);
  218. spin_lock(&xfrm_backlog->lock);
  219. skb_queue_splice_init(xfrm_backlog, &list);
  220. spin_unlock(&xfrm_backlog->lock);
  221. while (!skb_queue_empty(&list)) {
  222. skb = __skb_dequeue(&list);
  223. xfrm_dev_resume(skb);
  224. }
  225. }
  226. #endif
  227. static int xfrm_api_check(struct net_device *dev)
  228. {
  229. #ifdef CONFIG_XFRM_OFFLOAD
  230. if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
  231. !(dev->features & NETIF_F_HW_ESP))
  232. return NOTIFY_BAD;
  233. if ((dev->features & NETIF_F_HW_ESP) &&
  234. (!(dev->xfrmdev_ops &&
  235. dev->xfrmdev_ops->xdo_dev_state_add &&
  236. dev->xfrmdev_ops->xdo_dev_state_delete)))
  237. return NOTIFY_BAD;
  238. #else
  239. if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
  240. return NOTIFY_BAD;
  241. #endif
  242. return NOTIFY_DONE;
  243. }
  244. static int xfrm_dev_register(struct net_device *dev)
  245. {
  246. return xfrm_api_check(dev);
  247. }
  248. static int xfrm_dev_feat_change(struct net_device *dev)
  249. {
  250. return xfrm_api_check(dev);
  251. }
  252. static int xfrm_dev_down(struct net_device *dev)
  253. {
  254. if (dev->features & NETIF_F_HW_ESP)
  255. xfrm_dev_state_flush(dev_net(dev), dev, true);
  256. return NOTIFY_DONE;
  257. }
  258. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  259. {
  260. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  261. switch (event) {
  262. case NETDEV_REGISTER:
  263. return xfrm_dev_register(dev);
  264. case NETDEV_FEAT_CHANGE:
  265. return xfrm_dev_feat_change(dev);
  266. case NETDEV_DOWN:
  267. case NETDEV_UNREGISTER:
  268. return xfrm_dev_down(dev);
  269. }
  270. return NOTIFY_DONE;
  271. }
  272. static struct notifier_block xfrm_dev_notifier = {
  273. .notifier_call = xfrm_dev_event,
  274. };
  275. void __init xfrm_dev_init(void)
  276. {
  277. register_netdevice_notifier(&xfrm_dev_notifier);
  278. }