tx.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /* This program is free software; you can redistribute it and/or modify
  2. * it under the terms of the GNU General Public License version 2
  3. * as published by the Free Software Foundation.
  4. *
  5. * This program is distributed in the hope that it will be useful,
  6. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  7. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  8. * GNU General Public License for more details.
  9. */
  10. #include <net/6lowpan.h>
  11. #include <net/ndisc.h>
  12. #include <net/ieee802154_netdev.h>
  13. #include <net/mac802154.h>
  14. #include "6lowpan_i.h"
  15. #define LOWPAN_FRAG1_HEAD_SIZE 0x4
  16. #define LOWPAN_FRAGN_HEAD_SIZE 0x5
  17. struct lowpan_addr_info {
  18. struct ieee802154_addr daddr;
  19. struct ieee802154_addr saddr;
  20. };
  21. static inline struct
  22. lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
  23. {
  24. WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
  25. return (struct lowpan_addr_info *)(skb->data -
  26. sizeof(struct lowpan_addr_info));
  27. }
  28. /* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET
  29. * sockets gives an 8 byte array for addresses only!
  30. *
  31. * TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no
  32. * sense here. We should disable it, the right use-case would be AF_INET6
  33. * RAW/DGRAM sockets.
  34. */
  35. int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
  36. unsigned short type, const void *daddr,
  37. const void *saddr, unsigned int len)
  38. {
  39. struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
  40. struct lowpan_addr_info *info = lowpan_skb_priv(skb);
  41. struct lowpan_802154_neigh *llneigh = NULL;
  42. const struct ipv6hdr *hdr = ipv6_hdr(skb);
  43. struct neighbour *n;
  44. if (!daddr)
  45. return -EINVAL;
  46. /* TODO:
  47. * if this package isn't ipv6 one, where should it be routed?
  48. */
  49. if (type != ETH_P_IPV6)
  50. return 0;
  51. /* intra-pan communication */
  52. info->saddr.pan_id = wpan_dev->pan_id;
  53. info->daddr.pan_id = info->saddr.pan_id;
  54. if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
  55. info->daddr.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
  56. info->daddr.mode = IEEE802154_ADDR_SHORT;
  57. } else {
  58. __le16 short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
  59. n = neigh_lookup(&nd_tbl, &hdr->daddr, ldev);
  60. if (n) {
  61. llneigh = lowpan_802154_neigh(neighbour_priv(n));
  62. read_lock_bh(&n->lock);
  63. short_addr = llneigh->short_addr;
  64. read_unlock_bh(&n->lock);
  65. }
  66. if (llneigh &&
  67. lowpan_802154_is_valid_src_short_addr(short_addr)) {
  68. info->daddr.short_addr = short_addr;
  69. info->daddr.mode = IEEE802154_ADDR_SHORT;
  70. } else {
  71. info->daddr.mode = IEEE802154_ADDR_LONG;
  72. ieee802154_be64_to_le64(&info->daddr.extended_addr,
  73. daddr);
  74. }
  75. if (n)
  76. neigh_release(n);
  77. }
  78. if (!saddr) {
  79. if (lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr)) {
  80. info->saddr.mode = IEEE802154_ADDR_SHORT;
  81. info->saddr.short_addr = wpan_dev->short_addr;
  82. } else {
  83. info->saddr.mode = IEEE802154_ADDR_LONG;
  84. info->saddr.extended_addr = wpan_dev->extended_addr;
  85. }
  86. } else {
  87. info->saddr.mode = IEEE802154_ADDR_LONG;
  88. ieee802154_be64_to_le64(&info->saddr.extended_addr, saddr);
  89. }
  90. return 0;
  91. }
  92. static struct sk_buff*
  93. lowpan_alloc_frag(struct sk_buff *skb, int size,
  94. const struct ieee802154_hdr *master_hdr, bool frag1)
  95. {
  96. struct net_device *wdev = lowpan_802154_dev(skb->dev)->wdev;
  97. struct sk_buff *frag;
  98. int rc;
  99. frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size,
  100. GFP_ATOMIC);
  101. if (likely(frag)) {
  102. frag->dev = wdev;
  103. frag->priority = skb->priority;
  104. skb_reserve(frag, wdev->needed_headroom);
  105. skb_reset_network_header(frag);
  106. *mac_cb(frag) = *mac_cb(skb);
  107. if (frag1) {
  108. skb_put_data(frag, skb_mac_header(skb), skb->mac_len);
  109. } else {
  110. rc = wpan_dev_hard_header(frag, wdev,
  111. &master_hdr->dest,
  112. &master_hdr->source, size);
  113. if (rc < 0) {
  114. kfree_skb(frag);
  115. return ERR_PTR(rc);
  116. }
  117. }
  118. } else {
  119. frag = ERR_PTR(-ENOMEM);
  120. }
  121. return frag;
  122. }
  123. static int
  124. lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
  125. u8 *frag_hdr, int frag_hdrlen,
  126. int offset, int len, bool frag1)
  127. {
  128. struct sk_buff *frag;
  129. raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
  130. frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1);
  131. if (IS_ERR(frag))
  132. return PTR_ERR(frag);
  133. skb_put_data(frag, frag_hdr, frag_hdrlen);
  134. skb_put_data(frag, skb_network_header(skb) + offset, len);
  135. raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
  136. return dev_queue_xmit(frag);
  137. }
  138. static int
  139. lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
  140. const struct ieee802154_hdr *wpan_hdr, u16 dgram_size,
  141. u16 dgram_offset)
  142. {
  143. __be16 frag_tag;
  144. u8 frag_hdr[5];
  145. int frag_cap, frag_len, payload_cap, rc;
  146. int skb_unprocessed, skb_offset;
  147. frag_tag = htons(lowpan_802154_dev(ldev)->fragment_tag);
  148. lowpan_802154_dev(ldev)->fragment_tag++;
  149. frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
  150. frag_hdr[1] = dgram_size & 0xff;
  151. memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
  152. payload_cap = ieee802154_max_payload(wpan_hdr);
  153. frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
  154. skb_network_header_len(skb), 8);
  155. skb_offset = skb_network_header_len(skb);
  156. skb_unprocessed = skb->len - skb->mac_len - skb_offset;
  157. rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
  158. LOWPAN_FRAG1_HEAD_SIZE, 0,
  159. frag_len + skb_network_header_len(skb),
  160. true);
  161. if (rc) {
  162. pr_debug("%s unable to send FRAG1 packet (tag: %d)",
  163. __func__, ntohs(frag_tag));
  164. goto err;
  165. }
  166. frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
  167. frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
  168. frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
  169. do {
  170. dgram_offset += frag_len;
  171. skb_offset += frag_len;
  172. skb_unprocessed -= frag_len;
  173. frag_len = min(frag_cap, skb_unprocessed);
  174. frag_hdr[4] = dgram_offset >> 3;
  175. rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
  176. LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
  177. frag_len, false);
  178. if (rc) {
  179. pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
  180. __func__, ntohs(frag_tag), skb_offset);
  181. goto err;
  182. }
  183. } while (skb_unprocessed > frag_cap);
  184. ldev->stats.tx_packets++;
  185. ldev->stats.tx_bytes += dgram_size;
  186. consume_skb(skb);
  187. return NET_XMIT_SUCCESS;
  188. err:
  189. kfree_skb(skb);
  190. return rc;
  191. }
  192. static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
  193. u16 *dgram_size, u16 *dgram_offset)
  194. {
  195. struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
  196. struct ieee802154_mac_cb *cb = mac_cb_init(skb);
  197. struct lowpan_addr_info info;
  198. memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
  199. *dgram_size = skb->len;
  200. lowpan_header_compress(skb, ldev, &info.daddr, &info.saddr);
  201. /* dgram_offset = (saved bytes after compression) + lowpan header len */
  202. *dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
  203. cb->type = IEEE802154_FC_TYPE_DATA;
  204. if (info.daddr.mode == IEEE802154_ADDR_SHORT &&
  205. ieee802154_is_broadcast_short_addr(info.daddr.short_addr))
  206. cb->ackreq = false;
  207. else
  208. cb->ackreq = wpan_dev->ackreq;
  209. return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev,
  210. &info.daddr, &info.saddr, 0);
  211. }
  212. netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
  213. {
  214. struct ieee802154_hdr wpan_hdr;
  215. int max_single, ret;
  216. u16 dgram_size, dgram_offset;
  217. pr_debug("package xmit\n");
  218. WARN_ON_ONCE(skb->len > IPV6_MIN_MTU);
  219. /* We must take a copy of the skb before we modify/replace the ipv6
  220. * header as the header could be used elsewhere
  221. */
  222. if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
  223. skb_tailroom(skb) < ldev->needed_tailroom)) {
  224. struct sk_buff *nskb;
  225. nskb = skb_copy_expand(skb, ldev->needed_headroom,
  226. ldev->needed_tailroom, GFP_ATOMIC);
  227. if (likely(nskb)) {
  228. consume_skb(skb);
  229. skb = nskb;
  230. } else {
  231. kfree_skb(skb);
  232. return NET_XMIT_DROP;
  233. }
  234. } else {
  235. skb = skb_unshare(skb, GFP_ATOMIC);
  236. if (!skb)
  237. return NET_XMIT_DROP;
  238. }
  239. ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
  240. if (ret < 0) {
  241. kfree_skb(skb);
  242. return NET_XMIT_DROP;
  243. }
  244. if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
  245. kfree_skb(skb);
  246. return NET_XMIT_DROP;
  247. }
  248. max_single = ieee802154_max_payload(&wpan_hdr);
  249. if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
  250. skb->dev = lowpan_802154_dev(ldev)->wdev;
  251. ldev->stats.tx_packets++;
  252. ldev->stats.tx_bytes += dgram_size;
  253. return dev_queue_xmit(skb);
  254. } else {
  255. netdev_tx_t rc;
  256. pr_debug("frame is too big, fragmentation is needed\n");
  257. rc = lowpan_xmit_fragmented(skb, ldev, &wpan_hdr, dgram_size,
  258. dgram_offset);
  259. return rc < 0 ? NET_XMIT_DROP : rc;
  260. }
  261. }