ip6_output.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659
  1. /*
  2. * IPv6 output functions
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. *
  8. * Based on linux/net/ipv4/ip_output.c
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. *
  15. * Changes:
  16. * A.N.Kuznetsov : airthmetics in fragmentation.
  17. * extension headers are implemented.
  18. * route changes now work.
  19. * ip6_forward does not confuse sniffers.
  20. * etc.
  21. *
  22. * H. von Brand : Added missing #include <linux/string.h>
  23. * Imran Patel : frag id should be in NBO
  24. * Kazunori MIYAZAWA @USAGI
  25. * : add ip6_append_data and related functions
  26. * for datagram xmit
  27. */
  28. #include <linux/errno.h>
  29. #include <linux/kernel.h>
  30. #include <linux/string.h>
  31. #include <linux/socket.h>
  32. #include <linux/net.h>
  33. #include <linux/netdevice.h>
  34. #include <linux/if_arp.h>
  35. #include <linux/in6.h>
  36. #include <linux/tcp.h>
  37. #include <linux/route.h>
  38. #include <linux/module.h>
  39. #include <linux/slab.h>
  40. #include <linux/netfilter.h>
  41. #include <linux/netfilter_ipv6.h>
  42. #include <net/sock.h>
  43. #include <net/snmp.h>
  44. #include <net/ipv6.h>
  45. #include <net/ndisc.h>
  46. #include <net/protocol.h>
  47. #include <net/ip6_route.h>
  48. #include <net/addrconf.h>
  49. #include <net/rawv6.h>
  50. #include <net/icmp.h>
  51. #include <net/xfrm.h>
  52. #include <net/checksum.h>
  53. #include <linux/mroute6.h>
  54. int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
  55. int __ip6_local_out(struct sk_buff *skb)
  56. {
  57. int len;
  58. len = skb->len - sizeof(struct ipv6hdr);
  59. if (len > IPV6_MAXPLEN)
  60. len = 0;
  61. ipv6_hdr(skb)->payload_len = htons(len);
  62. return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
  63. skb_dst(skb)->dev, dst_output);
  64. }
  65. int ip6_local_out(struct sk_buff *skb)
  66. {
  67. int err;
  68. err = __ip6_local_out(skb);
  69. if (likely(err == 1))
  70. err = dst_output(skb);
  71. return err;
  72. }
  73. EXPORT_SYMBOL_GPL(ip6_local_out);
  74. /* dev_loopback_xmit for use with netfilter. */
  75. static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
  76. {
  77. skb_reset_mac_header(newskb);
  78. __skb_pull(newskb, skb_network_offset(newskb));
  79. newskb->pkt_type = PACKET_LOOPBACK;
  80. newskb->ip_summed = CHECKSUM_UNNECESSARY;
  81. WARN_ON(!skb_dst(newskb));
  82. netif_rx_ni(newskb);
  83. return 0;
  84. }
  85. static int ip6_finish_output2(struct sk_buff *skb)
  86. {
  87. struct dst_entry *dst = skb_dst(skb);
  88. struct net_device *dev = dst->dev;
  89. struct neighbour *neigh;
  90. int res;
  91. skb->protocol = htons(ETH_P_IPV6);
  92. skb->dev = dev;
  93. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
  94. struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
  95. if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
  96. ((mroute6_socket(dev_net(dev), skb) &&
  97. !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
  98. ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
  99. &ipv6_hdr(skb)->saddr))) {
  100. struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  101. /* Do not check for IFF_ALLMULTI; multicast routing
  102. is not supported in any case.
  103. */
  104. if (newskb)
  105. NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
  106. newskb, NULL, newskb->dev,
  107. ip6_dev_loopback_xmit);
  108. if (ipv6_hdr(skb)->hop_limit == 0) {
  109. IP6_INC_STATS(dev_net(dev), idev,
  110. IPSTATS_MIB_OUTDISCARDS);
  111. kfree_skb(skb);
  112. return 0;
  113. }
  114. }
  115. IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
  116. skb->len);
  117. }
  118. rcu_read_lock();
  119. if (dst->hh) {
  120. res = neigh_hh_output(dst->hh, skb);
  121. rcu_read_unlock();
  122. return res;
  123. } else {
  124. neigh = dst_get_neighbour(dst);
  125. if (neigh) {
  126. res = neigh->output(skb);
  127. rcu_read_unlock();
  128. return res;
  129. }
  130. rcu_read_unlock();
  131. }
  132. IP6_INC_STATS_BH(dev_net(dst->dev),
  133. ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
  134. kfree_skb(skb);
  135. return -EINVAL;
  136. }
  137. static int ip6_finish_output(struct sk_buff *skb)
  138. {
  139. if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
  140. dst_allfrag(skb_dst(skb)))
  141. return ip6_fragment(skb, ip6_finish_output2);
  142. else
  143. return ip6_finish_output2(skb);
  144. }
  145. int ip6_output(struct sk_buff *skb)
  146. {
  147. struct net_device *dev = skb_dst(skb)->dev;
  148. struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
  149. if (unlikely(idev->cnf.disable_ipv6)) {
  150. IP6_INC_STATS(dev_net(dev), idev,
  151. IPSTATS_MIB_OUTDISCARDS);
  152. kfree_skb(skb);
  153. return 0;
  154. }
  155. return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
  156. ip6_finish_output,
  157. !(IP6CB(skb)->flags & IP6SKB_REROUTED));
  158. }
  159. /*
  160. * xmit an sk_buff (used by TCP, SCTP and DCCP)
  161. */
  162. int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
  163. struct ipv6_txoptions *opt)
  164. {
  165. struct net *net = sock_net(sk);
  166. struct ipv6_pinfo *np = inet6_sk(sk);
  167. struct in6_addr *first_hop = &fl6->daddr;
  168. struct dst_entry *dst = skb_dst(skb);
  169. struct ipv6hdr *hdr;
  170. u8 proto = fl6->flowi6_proto;
  171. int seg_len = skb->len;
  172. int hlimit = -1;
  173. int tclass = 0;
  174. u32 mtu;
  175. if (opt) {
  176. unsigned int head_room;
  177. /* First: exthdrs may take lots of space (~8K for now)
  178. MAX_HEADER is not enough.
  179. */
  180. head_room = opt->opt_nflen + opt->opt_flen;
  181. seg_len += head_room;
  182. head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
  183. if (skb_headroom(skb) < head_room) {
  184. struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
  185. if (skb2 == NULL) {
  186. IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  187. IPSTATS_MIB_OUTDISCARDS);
  188. kfree_skb(skb);
  189. return -ENOBUFS;
  190. }
  191. kfree_skb(skb);
  192. skb = skb2;
  193. skb_set_owner_w(skb, sk);
  194. }
  195. if (opt->opt_flen)
  196. ipv6_push_frag_opts(skb, opt, &proto);
  197. if (opt->opt_nflen)
  198. ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
  199. }
  200. skb_push(skb, sizeof(struct ipv6hdr));
  201. skb_reset_network_header(skb);
  202. hdr = ipv6_hdr(skb);
  203. /*
  204. * Fill in the IPv6 header
  205. */
  206. if (np) {
  207. tclass = np->tclass;
  208. hlimit = np->hop_limit;
  209. }
  210. if (hlimit < 0)
  211. hlimit = ip6_dst_hoplimit(dst);
  212. *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel;
  213. hdr->payload_len = htons(seg_len);
  214. hdr->nexthdr = proto;
  215. hdr->hop_limit = hlimit;
  216. ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
  217. ipv6_addr_copy(&hdr->daddr, first_hop);
  218. skb->priority = sk->sk_priority;
  219. skb->mark = sk->sk_mark;
  220. mtu = dst_mtu(dst);
  221. if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
  222. IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
  223. IPSTATS_MIB_OUT, skb->len);
  224. return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
  225. dst->dev, dst_output);
  226. }
  227. if (net_ratelimit())
  228. printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
  229. skb->dev = dst->dev;
  230. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  231. IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
  232. kfree_skb(skb);
  233. return -EMSGSIZE;
  234. }
  235. EXPORT_SYMBOL(ip6_xmit);
  236. /*
  237. * To avoid extra problems ND packets are send through this
  238. * routine. It's code duplication but I really want to avoid
  239. * extra checks since ipv6_build_header is used by TCP (which
  240. * is for us performance critical)
  241. */
  242. int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
  243. const struct in6_addr *saddr, const struct in6_addr *daddr,
  244. int proto, int len)
  245. {
  246. struct ipv6_pinfo *np = inet6_sk(sk);
  247. struct ipv6hdr *hdr;
  248. skb->protocol = htons(ETH_P_IPV6);
  249. skb->dev = dev;
  250. skb_reset_network_header(skb);
  251. skb_put(skb, sizeof(struct ipv6hdr));
  252. hdr = ipv6_hdr(skb);
  253. *(__be32*)hdr = htonl(0x60000000);
  254. hdr->payload_len = htons(len);
  255. hdr->nexthdr = proto;
  256. hdr->hop_limit = np->hop_limit;
  257. ipv6_addr_copy(&hdr->saddr, saddr);
  258. ipv6_addr_copy(&hdr->daddr, daddr);
  259. return 0;
  260. }
  261. static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
  262. {
  263. struct ip6_ra_chain *ra;
  264. struct sock *last = NULL;
  265. read_lock(&ip6_ra_lock);
  266. for (ra = ip6_ra_chain; ra; ra = ra->next) {
  267. struct sock *sk = ra->sk;
  268. if (sk && ra->sel == sel &&
  269. (!sk->sk_bound_dev_if ||
  270. sk->sk_bound_dev_if == skb->dev->ifindex)) {
  271. if (last) {
  272. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  273. if (skb2)
  274. rawv6_rcv(last, skb2);
  275. }
  276. last = sk;
  277. }
  278. }
  279. if (last) {
  280. rawv6_rcv(last, skb);
  281. read_unlock(&ip6_ra_lock);
  282. return 1;
  283. }
  284. read_unlock(&ip6_ra_lock);
  285. return 0;
  286. }
  287. static int ip6_forward_proxy_check(struct sk_buff *skb)
  288. {
  289. struct ipv6hdr *hdr = ipv6_hdr(skb);
  290. u8 nexthdr = hdr->nexthdr;
  291. int offset;
  292. if (ipv6_ext_hdr(nexthdr)) {
  293. offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
  294. if (offset < 0)
  295. return 0;
  296. } else
  297. offset = sizeof(struct ipv6hdr);
  298. if (nexthdr == IPPROTO_ICMPV6) {
  299. struct icmp6hdr *icmp6;
  300. if (!pskb_may_pull(skb, (skb_network_header(skb) +
  301. offset + 1 - skb->data)))
  302. return 0;
  303. icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
  304. switch (icmp6->icmp6_type) {
  305. case NDISC_ROUTER_SOLICITATION:
  306. case NDISC_ROUTER_ADVERTISEMENT:
  307. case NDISC_NEIGHBOUR_SOLICITATION:
  308. case NDISC_NEIGHBOUR_ADVERTISEMENT:
  309. case NDISC_REDIRECT:
  310. /* For reaction involving unicast neighbor discovery
  311. * message destined to the proxied address, pass it to
  312. * input function.
  313. */
  314. return 1;
  315. default:
  316. break;
  317. }
  318. }
  319. /*
  320. * The proxying router can't forward traffic sent to a link-local
  321. * address, so signal the sender and discard the packet. This
  322. * behavior is clarified by the MIPv6 specification.
  323. */
  324. if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
  325. dst_link_failure(skb);
  326. return -1;
  327. }
  328. return 0;
  329. }
  330. static inline int ip6_forward_finish(struct sk_buff *skb)
  331. {
  332. return dst_output(skb);
  333. }
  334. int ip6_forward(struct sk_buff *skb)
  335. {
  336. struct dst_entry *dst = skb_dst(skb);
  337. struct ipv6hdr *hdr = ipv6_hdr(skb);
  338. struct inet6_skb_parm *opt = IP6CB(skb);
  339. struct net *net = dev_net(dst->dev);
  340. struct neighbour *n;
  341. u32 mtu;
  342. if (net->ipv6.devconf_all->forwarding == 0)
  343. goto error;
  344. if (skb_warn_if_lro(skb))
  345. goto drop;
  346. if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
  347. IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  348. goto drop;
  349. }
  350. if (skb->pkt_type != PACKET_HOST)
  351. goto drop;
  352. skb_forward_csum(skb);
  353. /*
  354. * We DO NOT make any processing on
  355. * RA packets, pushing them to user level AS IS
  356. * without ane WARRANTY that application will be able
  357. * to interpret them. The reason is that we
  358. * cannot make anything clever here.
  359. *
  360. * We are not end-node, so that if packet contains
  361. * AH/ESP, we cannot make anything.
  362. * Defragmentation also would be mistake, RA packets
  363. * cannot be fragmented, because there is no warranty
  364. * that different fragments will go along one path. --ANK
  365. */
  366. if (opt->ra) {
  367. u8 *ptr = skb_network_header(skb) + opt->ra;
  368. if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
  369. return 0;
  370. }
  371. /*
  372. * check and decrement ttl
  373. */
  374. if (hdr->hop_limit <= 1) {
  375. /* Force OUTPUT device used as source address */
  376. skb->dev = dst->dev;
  377. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
  378. IP6_INC_STATS_BH(net,
  379. ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
  380. kfree_skb(skb);
  381. return -ETIMEDOUT;
  382. }
  383. /* XXX: idev->cnf.proxy_ndp? */
  384. if (net->ipv6.devconf_all->proxy_ndp &&
  385. pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
  386. int proxied = ip6_forward_proxy_check(skb);
  387. if (proxied > 0)
  388. return ip6_input(skb);
  389. else if (proxied < 0) {
  390. IP6_INC_STATS(net, ip6_dst_idev(dst),
  391. IPSTATS_MIB_INDISCARDS);
  392. goto drop;
  393. }
  394. }
  395. if (!xfrm6_route_forward(skb)) {
  396. IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
  397. goto drop;
  398. }
  399. dst = skb_dst(skb);
  400. /* IPv6 specs say nothing about it, but it is clear that we cannot
  401. send redirects to source routed frames.
  402. We don't send redirects to frames decapsulated from IPsec.
  403. */
  404. n = dst_get_neighbour(dst);
  405. if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
  406. struct in6_addr *target = NULL;
  407. struct rt6_info *rt;
  408. /*
  409. * incoming and outgoing devices are the same
  410. * send a redirect.
  411. */
  412. rt = (struct rt6_info *) dst;
  413. if ((rt->rt6i_flags & RTF_GATEWAY))
  414. target = (struct in6_addr*)&n->primary_key;
  415. else
  416. target = &hdr->daddr;
  417. if (!rt->rt6i_peer)
  418. rt6_bind_peer(rt, 1);
  419. /* Limit redirects both by destination (here)
  420. and by source (inside ndisc_send_redirect)
  421. */
  422. if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
  423. ndisc_send_redirect(skb, n, target);
  424. } else {
  425. int addrtype = ipv6_addr_type(&hdr->saddr);
  426. /* This check is security critical. */
  427. if (addrtype == IPV6_ADDR_ANY ||
  428. addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
  429. goto error;
  430. if (addrtype & IPV6_ADDR_LINKLOCAL) {
  431. icmpv6_send(skb, ICMPV6_DEST_UNREACH,
  432. ICMPV6_NOT_NEIGHBOUR, 0);
  433. goto error;
  434. }
  435. }
  436. mtu = dst_mtu(dst);
  437. if (mtu < IPV6_MIN_MTU)
  438. mtu = IPV6_MIN_MTU;
  439. if (skb->len > mtu && !skb_is_gso(skb)) {
  440. /* Again, force OUTPUT device used as source address */
  441. skb->dev = dst->dev;
  442. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  443. IP6_INC_STATS_BH(net,
  444. ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
  445. IP6_INC_STATS_BH(net,
  446. ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
  447. kfree_skb(skb);
  448. return -EMSGSIZE;
  449. }
  450. if (skb_cow(skb, dst->dev->hard_header_len)) {
  451. IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
  452. goto drop;
  453. }
  454. hdr = ipv6_hdr(skb);
  455. /* Mangling hops number delayed to point after skb COW */
  456. hdr->hop_limit--;
  457. IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
  458. return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
  459. ip6_forward_finish);
  460. error:
  461. IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
  462. drop:
  463. kfree_skb(skb);
  464. return -EINVAL;
  465. }
  466. static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
  467. {
  468. to->pkt_type = from->pkt_type;
  469. to->priority = from->priority;
  470. to->protocol = from->protocol;
  471. skb_dst_drop(to);
  472. skb_dst_set(to, dst_clone(skb_dst(from)));
  473. to->dev = from->dev;
  474. to->mark = from->mark;
  475. #ifdef CONFIG_NET_SCHED
  476. to->tc_index = from->tc_index;
  477. #endif
  478. nf_copy(to, from);
  479. #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
  480. defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
  481. to->nf_trace = from->nf_trace;
  482. #endif
  483. skb_copy_secmark(to, from);
  484. }
  485. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
  486. {
  487. u16 offset = sizeof(struct ipv6hdr);
  488. struct ipv6_opt_hdr *exthdr =
  489. (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
  490. unsigned int packet_len = skb->tail - skb->network_header;
  491. int found_rhdr = 0;
  492. *nexthdr = &ipv6_hdr(skb)->nexthdr;
  493. while (offset + 1 <= packet_len) {
  494. switch (**nexthdr) {
  495. case NEXTHDR_HOP:
  496. break;
  497. case NEXTHDR_ROUTING:
  498. found_rhdr = 1;
  499. break;
  500. case NEXTHDR_DEST:
  501. #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
  502. if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
  503. break;
  504. #endif
  505. if (found_rhdr)
  506. return offset;
  507. break;
  508. default :
  509. return offset;
  510. }
  511. offset += ipv6_optlen(exthdr);
  512. *nexthdr = &exthdr->nexthdr;
  513. exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
  514. offset);
  515. }
  516. return offset;
  517. }
  518. static u32 hashidentrnd __read_mostly;
  519. #define FID_HASH_SZ 16
  520. static u32 ipv6_fragmentation_id[FID_HASH_SZ];
  521. void __init initialize_hashidentrnd(void)
  522. {
  523. get_random_bytes(&hashidentrnd, sizeof(hashidentrnd));
  524. }
  525. static u32 __ipv6_select_ident(const struct in6_addr *addr)
  526. {
  527. u32 newid, oldid, hash = jhash2((u32 *)addr, 4, hashidentrnd);
  528. u32 *pid = &ipv6_fragmentation_id[hash % FID_HASH_SZ];
  529. do {
  530. oldid = *pid;
  531. newid = oldid + 1;
  532. if (!(hash + newid))
  533. newid++;
  534. } while (cmpxchg(pid, oldid, newid) != oldid);
  535. return hash + newid;
  536. }
  537. void ipv6_select_ident(struct frag_hdr *fhdr, struct in6_addr *addr)
  538. {
  539. fhdr->identification = htonl(__ipv6_select_ident(addr));
  540. }
  541. int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
  542. {
  543. struct sk_buff *frag;
  544. struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
  545. struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
  546. struct ipv6hdr *tmp_hdr;
  547. struct frag_hdr *fh;
  548. unsigned int mtu, hlen, left, len;
  549. __be32 frag_id = 0;
  550. int ptr, offset = 0, err=0;
  551. u8 *prevhdr, nexthdr = 0;
  552. struct net *net = dev_net(skb_dst(skb)->dev);
  553. hlen = ip6_find_1stfragopt(skb, &prevhdr);
  554. nexthdr = *prevhdr;
  555. mtu = ip6_skb_dst_mtu(skb);
  556. /* We must not fragment if the socket is set to force MTU discovery
  557. * or if the skb it not generated by a local socket.
  558. */
  559. if (!skb->local_df && skb->len > mtu) {
  560. skb->dev = skb_dst(skb)->dev;
  561. icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  562. IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  563. IPSTATS_MIB_FRAGFAILS);
  564. kfree_skb(skb);
  565. return -EMSGSIZE;
  566. }
  567. if (np && np->frag_size < mtu) {
  568. if (np->frag_size)
  569. mtu = np->frag_size;
  570. }
  571. mtu -= hlen + sizeof(struct frag_hdr);
  572. if (skb_has_frag_list(skb)) {
  573. int first_len = skb_pagelen(skb);
  574. struct sk_buff *frag2;
  575. if (first_len - hlen > mtu ||
  576. ((first_len - hlen) & 7) ||
  577. skb_cloned(skb))
  578. goto slow_path;
  579. skb_walk_frags(skb, frag) {
  580. /* Correct geometry. */
  581. if (frag->len > mtu ||
  582. ((frag->len & 7) && frag->next) ||
  583. skb_headroom(frag) < hlen)
  584. goto slow_path_clean;
  585. /* Partially cloned skb? */
  586. if (skb_shared(frag))
  587. goto slow_path_clean;
  588. BUG_ON(frag->sk);
  589. if (skb->sk) {
  590. frag->sk = skb->sk;
  591. frag->destructor = sock_wfree;
  592. }
  593. skb->truesize -= frag->truesize;
  594. }
  595. err = 0;
  596. offset = 0;
  597. frag = skb_shinfo(skb)->frag_list;
  598. skb_frag_list_init(skb);
  599. /* BUILD HEADER */
  600. *prevhdr = NEXTHDR_FRAGMENT;
  601. tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
  602. if (!tmp_hdr) {
  603. IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  604. IPSTATS_MIB_FRAGFAILS);
  605. return -ENOMEM;
  606. }
  607. __skb_pull(skb, hlen);
  608. fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
  609. __skb_push(skb, hlen);
  610. skb_reset_network_header(skb);
  611. memcpy(skb_network_header(skb), tmp_hdr, hlen);
  612. ipv6_select_ident(fh, &rt->rt6i_dst.addr);
  613. fh->nexthdr = nexthdr;
  614. fh->reserved = 0;
  615. fh->frag_off = htons(IP6_MF);
  616. frag_id = fh->identification;
  617. first_len = skb_pagelen(skb);
  618. skb->data_len = first_len - skb_headlen(skb);
  619. skb->len = first_len;
  620. ipv6_hdr(skb)->payload_len = htons(first_len -
  621. sizeof(struct ipv6hdr));
  622. dst_hold(&rt->dst);
  623. for (;;) {
  624. /* Prepare header of the next frame,
  625. * before previous one went down. */
  626. if (frag) {
  627. frag->ip_summed = CHECKSUM_NONE;
  628. skb_reset_transport_header(frag);
  629. fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
  630. __skb_push(frag, hlen);
  631. skb_reset_network_header(frag);
  632. memcpy(skb_network_header(frag), tmp_hdr,
  633. hlen);
  634. offset += skb->len - hlen - sizeof(struct frag_hdr);
  635. fh->nexthdr = nexthdr;
  636. fh->reserved = 0;
  637. fh->frag_off = htons(offset);
  638. if (frag->next != NULL)
  639. fh->frag_off |= htons(IP6_MF);
  640. fh->identification = frag_id;
  641. ipv6_hdr(frag)->payload_len =
  642. htons(frag->len -
  643. sizeof(struct ipv6hdr));
  644. ip6_copy_metadata(frag, skb);
  645. }
  646. err = output(skb);
  647. if(!err)
  648. IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
  649. IPSTATS_MIB_FRAGCREATES);
  650. if (err || !frag)
  651. break;
  652. skb = frag;
  653. frag = skb->next;
  654. skb->next = NULL;
  655. }
  656. kfree(tmp_hdr);
  657. if (err == 0) {
  658. IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
  659. IPSTATS_MIB_FRAGOKS);
  660. dst_release(&rt->dst);
  661. return 0;
  662. }
  663. while (frag) {
  664. skb = frag->next;
  665. kfree_skb(frag);
  666. frag = skb;
  667. }
  668. IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
  669. IPSTATS_MIB_FRAGFAILS);
  670. dst_release(&rt->dst);
  671. return err;
  672. slow_path_clean:
  673. skb_walk_frags(skb, frag2) {
  674. if (frag2 == frag)
  675. break;
  676. frag2->sk = NULL;
  677. frag2->destructor = NULL;
  678. skb->truesize += frag2->truesize;
  679. }
  680. }
  681. slow_path:
  682. left = skb->len - hlen; /* Space per frame */
  683. ptr = hlen; /* Where to start from */
  684. /*
  685. * Fragment the datagram.
  686. */
  687. *prevhdr = NEXTHDR_FRAGMENT;
  688. /*
  689. * Keep copying data until we run out.
  690. */
  691. while(left > 0) {
  692. len = left;
  693. /* IF: it doesn't fit, use 'mtu' - the data space left */
  694. if (len > mtu)
  695. len = mtu;
  696. /* IF: we are not sending up to and including the packet end
  697. then align the next start on an eight byte boundary */
  698. if (len < left) {
  699. len &= ~7;
  700. }
  701. /*
  702. * Allocate buffer.
  703. */
  704. if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
  705. NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
  706. IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  707. IPSTATS_MIB_FRAGFAILS);
  708. err = -ENOMEM;
  709. goto fail;
  710. }
  711. /*
  712. * Set up data on packet
  713. */
  714. ip6_copy_metadata(frag, skb);
  715. skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
  716. skb_put(frag, len + hlen + sizeof(struct frag_hdr));
  717. skb_reset_network_header(frag);
  718. fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
  719. frag->transport_header = (frag->network_header + hlen +
  720. sizeof(struct frag_hdr));
  721. /*
  722. * Charge the memory for the fragment to any owner
  723. * it might possess
  724. */
  725. if (skb->sk)
  726. skb_set_owner_w(frag, skb->sk);
  727. /*
  728. * Copy the packet header into the new buffer.
  729. */
  730. skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
  731. /*
  732. * Build fragment header.
  733. */
  734. fh->nexthdr = nexthdr;
  735. fh->reserved = 0;
  736. if (!frag_id) {
  737. ipv6_select_ident(fh, &rt->rt6i_dst.addr);
  738. frag_id = fh->identification;
  739. } else
  740. fh->identification = frag_id;
  741. /*
  742. * Copy a block of the IP datagram.
  743. */
  744. if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
  745. BUG();
  746. left -= len;
  747. fh->frag_off = htons(offset);
  748. if (left > 0)
  749. fh->frag_off |= htons(IP6_MF);
  750. ipv6_hdr(frag)->payload_len = htons(frag->len -
  751. sizeof(struct ipv6hdr));
  752. ptr += len;
  753. offset += len;
  754. /*
  755. * Put this fragment into the sending queue.
  756. */
  757. err = output(frag);
  758. if (err)
  759. goto fail;
  760. IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  761. IPSTATS_MIB_FRAGCREATES);
  762. }
  763. IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  764. IPSTATS_MIB_FRAGOKS);
  765. kfree_skb(skb);
  766. return err;
  767. fail:
  768. IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  769. IPSTATS_MIB_FRAGFAILS);
  770. kfree_skb(skb);
  771. return err;
  772. }
  773. static inline int ip6_rt_check(const struct rt6key *rt_key,
  774. const struct in6_addr *fl_addr,
  775. const struct in6_addr *addr_cache)
  776. {
  777. return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
  778. (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
  779. }
  780. static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
  781. struct dst_entry *dst,
  782. const struct flowi6 *fl6)
  783. {
  784. struct ipv6_pinfo *np = inet6_sk(sk);
  785. struct rt6_info *rt = (struct rt6_info *)dst;
  786. if (!dst)
  787. goto out;
  788. /* Yes, checking route validity in not connected
  789. * case is not very simple. Take into account,
  790. * that we do not support routing by source, TOS,
  791. * and MSG_DONTROUTE --ANK (980726)
  792. *
  793. * 1. ip6_rt_check(): If route was host route,
  794. * check that cached destination is current.
  795. * If it is network route, we still may
  796. * check its validity using saved pointer
  797. * to the last used address: daddr_cache.
  798. * We do not want to save whole address now,
  799. * (because main consumer of this service
  800. * is tcp, which has not this problem),
  801. * so that the last trick works only on connected
  802. * sockets.
  803. * 2. oif also should be the same.
  804. */
  805. if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
  806. #ifdef CONFIG_IPV6_SUBTREES
  807. ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
  808. #endif
  809. (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
  810. dst_release(dst);
  811. dst = NULL;
  812. }
  813. out:
  814. return dst;
  815. }
  816. static int ip6_dst_lookup_tail(struct sock *sk,
  817. struct dst_entry **dst, struct flowi6 *fl6)
  818. {
  819. struct net *net = sock_net(sk);
  820. #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
  821. struct neighbour *n;
  822. #endif
  823. int err;
  824. if (*dst == NULL)
  825. *dst = ip6_route_output(net, sk, fl6);
  826. if ((err = (*dst)->error))
  827. goto out_err_release;
  828. if (ipv6_addr_any(&fl6->saddr)) {
  829. struct rt6_info *rt = (struct rt6_info *) *dst;
  830. err = ip6_route_get_saddr(net, rt, &fl6->daddr,
  831. sk ? inet6_sk(sk)->srcprefs : 0,
  832. &fl6->saddr);
  833. if (err)
  834. goto out_err_release;
  835. }
  836. #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
  837. /*
  838. * Here if the dst entry we've looked up
  839. * has a neighbour entry that is in the INCOMPLETE
  840. * state and the src address from the flow is
  841. * marked as OPTIMISTIC, we release the found
  842. * dst entry and replace it instead with the
  843. * dst entry of the nexthop router
  844. */
  845. rcu_read_lock();
  846. n = dst_get_neighbour(*dst);
  847. if (n && !(n->nud_state & NUD_VALID)) {
  848. struct inet6_ifaddr *ifp;
  849. struct flowi6 fl_gw6;
  850. int redirect;
  851. rcu_read_unlock();
  852. ifp = ipv6_get_ifaddr(net, &fl6->saddr,
  853. (*dst)->dev, 1);
  854. redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
  855. if (ifp)
  856. in6_ifa_put(ifp);
  857. if (redirect) {
  858. /*
  859. * We need to get the dst entry for the
  860. * default router instead
  861. */
  862. dst_release(*dst);
  863. memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
  864. memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
  865. *dst = ip6_route_output(net, sk, &fl_gw6);
  866. if ((err = (*dst)->error))
  867. goto out_err_release;
  868. }
  869. } else {
  870. rcu_read_unlock();
  871. }
  872. #endif
  873. return 0;
  874. out_err_release:
  875. if (err == -ENETUNREACH)
  876. IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES);
  877. dst_release(*dst);
  878. *dst = NULL;
  879. return err;
  880. }
  881. /**
  882. * ip6_dst_lookup - perform route lookup on flow
  883. * @sk: socket which provides route info
  884. * @dst: pointer to dst_entry * for result
  885. * @fl6: flow to lookup
  886. *
  887. * This function performs a route lookup on the given flow.
  888. *
  889. * It returns zero on success, or a standard errno code on error.
  890. */
  891. int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
  892. {
  893. *dst = NULL;
  894. return ip6_dst_lookup_tail(sk, dst, fl6);
  895. }
  896. EXPORT_SYMBOL_GPL(ip6_dst_lookup);
  897. /**
  898. * ip6_dst_lookup_flow - perform route lookup on flow with ipsec
  899. * @sk: socket which provides route info
  900. * @fl6: flow to lookup
  901. * @final_dst: final destination address for ipsec lookup
  902. * @can_sleep: we are in a sleepable context
  903. *
  904. * This function performs a route lookup on the given flow.
  905. *
  906. * It returns a valid dst pointer on success, or a pointer encoded
  907. * error code.
  908. */
  909. struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
  910. const struct in6_addr *final_dst,
  911. bool can_sleep)
  912. {
  913. struct dst_entry *dst = NULL;
  914. int err;
  915. err = ip6_dst_lookup_tail(sk, &dst, fl6);
  916. if (err)
  917. return ERR_PTR(err);
  918. if (final_dst)
  919. ipv6_addr_copy(&fl6->daddr, final_dst);
  920. if (can_sleep)
  921. fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
  922. return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
  923. }
  924. EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
  925. /**
  926. * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
  927. * @sk: socket which provides the dst cache and route info
  928. * @fl6: flow to lookup
  929. * @final_dst: final destination address for ipsec lookup
  930. * @can_sleep: we are in a sleepable context
  931. *
  932. * This function performs a route lookup on the given flow with the
  933. * possibility of using the cached route in the socket if it is valid.
  934. * It will take the socket dst lock when operating on the dst cache.
  935. * As a result, this function can only be used in process context.
  936. *
  937. * It returns a valid dst pointer on success, or a pointer encoded
  938. * error code.
  939. */
  940. struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
  941. const struct in6_addr *final_dst,
  942. bool can_sleep)
  943. {
  944. struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
  945. int err;
  946. dst = ip6_sk_dst_check(sk, dst, fl6);
  947. err = ip6_dst_lookup_tail(sk, &dst, fl6);
  948. if (err)
  949. return ERR_PTR(err);
  950. if (final_dst)
  951. ipv6_addr_copy(&fl6->daddr, final_dst);
  952. if (can_sleep)
  953. fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
  954. return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
  955. }
  956. EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
  957. static inline int ip6_ufo_append_data(struct sock *sk,
  958. int getfrag(void *from, char *to, int offset, int len,
  959. int odd, struct sk_buff *skb),
  960. void *from, int length, int hh_len, int fragheaderlen,
  961. int transhdrlen, int mtu,unsigned int flags,
  962. struct rt6_info *rt)
  963. {
  964. struct sk_buff *skb;
  965. int err;
  966. /* There is support for UDP large send offload by network
  967. * device, so create one single skb packet containing complete
  968. * udp datagram
  969. */
  970. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
  971. skb = sock_alloc_send_skb(sk,
  972. hh_len + fragheaderlen + transhdrlen + 20,
  973. (flags & MSG_DONTWAIT), &err);
  974. if (skb == NULL)
  975. return -ENOMEM;
  976. /* reserve space for Hardware header */
  977. skb_reserve(skb, hh_len);
  978. /* create space for UDP/IP header */
  979. skb_put(skb,fragheaderlen + transhdrlen);
  980. /* initialize network header pointer */
  981. skb_reset_network_header(skb);
  982. /* initialize protocol header pointer */
  983. skb->transport_header = skb->network_header + fragheaderlen;
  984. skb->ip_summed = CHECKSUM_PARTIAL;
  985. skb->csum = 0;
  986. }
  987. err = skb_append_datato_frags(sk,skb, getfrag, from,
  988. (length - transhdrlen));
  989. if (!err) {
  990. struct frag_hdr fhdr;
  991. /* Specify the length of each IPv6 datagram fragment.
  992. * It has to be a multiple of 8.
  993. */
  994. skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
  995. sizeof(struct frag_hdr)) & ~7;
  996. skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  997. ipv6_select_ident(&fhdr, &rt->rt6i_dst.addr);
  998. skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
  999. __skb_queue_tail(&sk->sk_write_queue, skb);
  1000. return 0;
  1001. }
  1002. /* There is not enough support do UPD LSO,
  1003. * so follow normal path
  1004. */
  1005. kfree_skb(skb);
  1006. return err;
  1007. }
  1008. static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
  1009. gfp_t gfp)
  1010. {
  1011. return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
  1012. }
  1013. static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
  1014. gfp_t gfp)
  1015. {
  1016. return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
  1017. }
  1018. int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
  1019. int offset, int len, int odd, struct sk_buff *skb),
  1020. void *from, int length, int transhdrlen,
  1021. int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
  1022. struct rt6_info *rt, unsigned int flags, int dontfrag)
  1023. {
  1024. struct inet_sock *inet = inet_sk(sk);
  1025. struct ipv6_pinfo *np = inet6_sk(sk);
  1026. struct inet_cork *cork;
  1027. struct sk_buff *skb;
  1028. unsigned int maxfraglen, fragheaderlen;
  1029. int exthdrlen;
  1030. int hh_len;
  1031. int mtu;
  1032. int copy;
  1033. int err;
  1034. int offset = 0;
  1035. int csummode = CHECKSUM_NONE;
  1036. __u8 tx_flags = 0;
  1037. if (flags&MSG_PROBE)
  1038. return 0;
  1039. cork = &inet->cork.base;
  1040. if (skb_queue_empty(&sk->sk_write_queue)) {
  1041. /*
  1042. * setup for corking
  1043. */
  1044. if (opt) {
  1045. if (WARN_ON(np->cork.opt))
  1046. return -EINVAL;
  1047. np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
  1048. if (unlikely(np->cork.opt == NULL))
  1049. return -ENOBUFS;
  1050. np->cork.opt->tot_len = opt->tot_len;
  1051. np->cork.opt->opt_flen = opt->opt_flen;
  1052. np->cork.opt->opt_nflen = opt->opt_nflen;
  1053. np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
  1054. sk->sk_allocation);
  1055. if (opt->dst0opt && !np->cork.opt->dst0opt)
  1056. return -ENOBUFS;
  1057. np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
  1058. sk->sk_allocation);
  1059. if (opt->dst1opt && !np->cork.opt->dst1opt)
  1060. return -ENOBUFS;
  1061. np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
  1062. sk->sk_allocation);
  1063. if (opt->hopopt && !np->cork.opt->hopopt)
  1064. return -ENOBUFS;
  1065. np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
  1066. sk->sk_allocation);
  1067. if (opt->srcrt && !np->cork.opt->srcrt)
  1068. return -ENOBUFS;
  1069. /* need source address above miyazawa*/
  1070. }
  1071. dst_hold(&rt->dst);
  1072. cork->dst = &rt->dst;
  1073. inet->cork.fl.u.ip6 = *fl6;
  1074. np->cork.hop_limit = hlimit;
  1075. np->cork.tclass = tclass;
  1076. mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
  1077. rt->dst.dev->mtu : dst_mtu(rt->dst.path);
  1078. if (np->frag_size < mtu) {
  1079. if (np->frag_size)
  1080. mtu = np->frag_size;
  1081. }
  1082. cork->fragsize = mtu;
  1083. if (dst_allfrag(rt->dst.path))
  1084. cork->flags |= IPCORK_ALLFRAG;
  1085. cork->length = 0;
  1086. sk->sk_sndmsg_page = NULL;
  1087. sk->sk_sndmsg_off = 0;
  1088. exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
  1089. rt->rt6i_nfheader_len;
  1090. length += exthdrlen;
  1091. transhdrlen += exthdrlen;
  1092. } else {
  1093. rt = (struct rt6_info *)cork->dst;
  1094. fl6 = &inet->cork.fl.u.ip6;
  1095. opt = np->cork.opt;
  1096. transhdrlen = 0;
  1097. exthdrlen = 0;
  1098. mtu = cork->fragsize;
  1099. }
  1100. hh_len = LL_RESERVED_SPACE(rt->dst.dev);
  1101. fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
  1102. (opt ? opt->opt_nflen : 0);
  1103. maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
  1104. if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
  1105. if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
  1106. ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
  1107. return -EMSGSIZE;
  1108. }
  1109. }
  1110. /* For UDP, check if TX timestamp is enabled */
  1111. if (sk->sk_type == SOCK_DGRAM) {
  1112. err = sock_tx_timestamp(sk, &tx_flags);
  1113. if (err)
  1114. goto error;
  1115. }
  1116. /*
  1117. * Let's try using as much space as possible.
  1118. * Use MTU if total length of the message fits into the MTU.
  1119. * Otherwise, we need to reserve fragment header and
  1120. * fragment alignment (= 8-15 octects, in total).
  1121. *
  1122. * Note that we may need to "move" the data from the tail of
  1123. * of the buffer to the new fragment when we split
  1124. * the message.
  1125. *
  1126. * FIXME: It may be fragmented into multiple chunks
  1127. * at once if non-fragmentable extension headers
  1128. * are too large.
  1129. * --yoshfuji
  1130. */
  1131. cork->length += length;
  1132. if (length > mtu) {
  1133. int proto = sk->sk_protocol;
  1134. if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
  1135. ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
  1136. return -EMSGSIZE;
  1137. }
  1138. if (proto == IPPROTO_UDP &&
  1139. (rt->dst.dev->features & NETIF_F_UFO)) {
  1140. err = ip6_ufo_append_data(sk, getfrag, from, length,
  1141. hh_len, fragheaderlen,
  1142. transhdrlen, mtu, flags, rt);
  1143. if (err)
  1144. goto error;
  1145. return 0;
  1146. }
  1147. }
  1148. if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
  1149. goto alloc_new_skb;
  1150. while (length > 0) {
  1151. /* Check if the remaining data fits into current packet. */
  1152. copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
  1153. if (copy < length)
  1154. copy = maxfraglen - skb->len;
  1155. if (copy <= 0) {
  1156. char *data;
  1157. unsigned int datalen;
  1158. unsigned int fraglen;
  1159. unsigned int fraggap;
  1160. unsigned int alloclen;
  1161. struct sk_buff *skb_prev;
  1162. alloc_new_skb:
  1163. skb_prev = skb;
  1164. /* There's no room in the current skb */
  1165. if (skb_prev)
  1166. fraggap = skb_prev->len - maxfraglen;
  1167. else
  1168. fraggap = 0;
  1169. /*
  1170. * If remaining data exceeds the mtu,
  1171. * we know we need more fragment(s).
  1172. */
  1173. datalen = length + fraggap;
  1174. if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
  1175. datalen = maxfraglen - fragheaderlen;
  1176. fraglen = datalen + fragheaderlen;
  1177. if ((flags & MSG_MORE) &&
  1178. !(rt->dst.dev->features&NETIF_F_SG))
  1179. alloclen = mtu;
  1180. else
  1181. alloclen = datalen + fragheaderlen;
  1182. /*
  1183. * The last fragment gets additional space at tail.
  1184. * Note: we overallocate on fragments with MSG_MODE
  1185. * because we have no idea if we're the last one.
  1186. */
  1187. if (datalen == length + fraggap)
  1188. alloclen += rt->dst.trailer_len;
  1189. /*
  1190. * We just reserve space for fragment header.
  1191. * Note: this may be overallocation if the message
  1192. * (without MSG_MORE) fits into the MTU.
  1193. */
  1194. alloclen += sizeof(struct frag_hdr);
  1195. if (transhdrlen) {
  1196. skb = sock_alloc_send_skb(sk,
  1197. alloclen + hh_len,
  1198. (flags & MSG_DONTWAIT), &err);
  1199. } else {
  1200. skb = NULL;
  1201. if (atomic_read(&sk->sk_wmem_alloc) <=
  1202. 2 * sk->sk_sndbuf)
  1203. skb = sock_wmalloc(sk,
  1204. alloclen + hh_len, 1,
  1205. sk->sk_allocation);
  1206. if (unlikely(skb == NULL))
  1207. err = -ENOBUFS;
  1208. else {
  1209. /* Only the initial fragment
  1210. * is time stamped.
  1211. */
  1212. tx_flags = 0;
  1213. }
  1214. }
  1215. if (skb == NULL)
  1216. goto error;
  1217. /*
  1218. * Fill in the control structures
  1219. */
  1220. skb->ip_summed = csummode;
  1221. skb->csum = 0;
  1222. /* reserve for fragmentation */
  1223. skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
  1224. if (sk->sk_type == SOCK_DGRAM)
  1225. skb_shinfo(skb)->tx_flags = tx_flags;
  1226. /*
  1227. * Find where to start putting bytes
  1228. */
  1229. data = skb_put(skb, fraglen);
  1230. skb_set_network_header(skb, exthdrlen);
  1231. data += fragheaderlen;
  1232. skb->transport_header = (skb->network_header +
  1233. fragheaderlen);
  1234. if (fraggap) {
  1235. skb->csum = skb_copy_and_csum_bits(
  1236. skb_prev, maxfraglen,
  1237. data + transhdrlen, fraggap, 0);
  1238. skb_prev->csum = csum_sub(skb_prev->csum,
  1239. skb->csum);
  1240. data += fraggap;
  1241. pskb_trim_unique(skb_prev, maxfraglen);
  1242. }
  1243. copy = datalen - transhdrlen - fraggap;
  1244. if (copy < 0) {
  1245. err = -EINVAL;
  1246. kfree_skb(skb);
  1247. goto error;
  1248. } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
  1249. err = -EFAULT;
  1250. kfree_skb(skb);
  1251. goto error;
  1252. }
  1253. offset += copy;
  1254. length -= datalen - fraggap;
  1255. transhdrlen = 0;
  1256. exthdrlen = 0;
  1257. csummode = CHECKSUM_NONE;
  1258. /*
  1259. * Put the packet on the pending queue
  1260. */
  1261. __skb_queue_tail(&sk->sk_write_queue, skb);
  1262. continue;
  1263. }
  1264. if (copy > length)
  1265. copy = length;
  1266. if (!(rt->dst.dev->features&NETIF_F_SG)) {
  1267. unsigned int off;
  1268. off = skb->len;
  1269. if (getfrag(from, skb_put(skb, copy),
  1270. offset, copy, off, skb) < 0) {
  1271. __skb_trim(skb, off);
  1272. err = -EFAULT;
  1273. goto error;
  1274. }
  1275. } else {
  1276. int i = skb_shinfo(skb)->nr_frags;
  1277. skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
  1278. struct page *page = sk->sk_sndmsg_page;
  1279. int off = sk->sk_sndmsg_off;
  1280. unsigned int left;
  1281. if (page && (left = PAGE_SIZE - off) > 0) {
  1282. if (copy >= left)
  1283. copy = left;
  1284. if (page != frag->page) {
  1285. if (i == MAX_SKB_FRAGS) {
  1286. err = -EMSGSIZE;
  1287. goto error;
  1288. }
  1289. get_page(page);
  1290. skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
  1291. frag = &skb_shinfo(skb)->frags[i];
  1292. }
  1293. } else if(i < MAX_SKB_FRAGS) {
  1294. if (copy > PAGE_SIZE)
  1295. copy = PAGE_SIZE;
  1296. page = alloc_pages(sk->sk_allocation, 0);
  1297. if (page == NULL) {
  1298. err = -ENOMEM;
  1299. goto error;
  1300. }
  1301. sk->sk_sndmsg_page = page;
  1302. sk->sk_sndmsg_off = 0;
  1303. skb_fill_page_desc(skb, i, page, 0, 0);
  1304. frag = &skb_shinfo(skb)->frags[i];
  1305. } else {
  1306. err = -EMSGSIZE;
  1307. goto error;
  1308. }
  1309. if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
  1310. err = -EFAULT;
  1311. goto error;
  1312. }
  1313. sk->sk_sndmsg_off += copy;
  1314. frag->size += copy;
  1315. skb->len += copy;
  1316. skb->data_len += copy;
  1317. skb->truesize += copy;
  1318. atomic_add(copy, &sk->sk_wmem_alloc);
  1319. }
  1320. offset += copy;
  1321. length -= copy;
  1322. }
  1323. return 0;
  1324. error:
  1325. cork->length -= length;
  1326. IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
  1327. return err;
  1328. }
  1329. static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
  1330. {
  1331. if (np->cork.opt) {
  1332. kfree(np->cork.opt->dst0opt);
  1333. kfree(np->cork.opt->dst1opt);
  1334. kfree(np->cork.opt->hopopt);
  1335. kfree(np->cork.opt->srcrt);
  1336. kfree(np->cork.opt);
  1337. np->cork.opt = NULL;
  1338. }
  1339. if (inet->cork.base.dst) {
  1340. dst_release(inet->cork.base.dst);
  1341. inet->cork.base.dst = NULL;
  1342. inet->cork.base.flags &= ~IPCORK_ALLFRAG;
  1343. }
  1344. memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
  1345. }
  1346. int ip6_push_pending_frames(struct sock *sk)
  1347. {
  1348. struct sk_buff *skb, *tmp_skb;
  1349. struct sk_buff **tail_skb;
  1350. struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
  1351. struct inet_sock *inet = inet_sk(sk);
  1352. struct ipv6_pinfo *np = inet6_sk(sk);
  1353. struct net *net = sock_net(sk);
  1354. struct ipv6hdr *hdr;
  1355. struct ipv6_txoptions *opt = np->cork.opt;
  1356. struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
  1357. struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
  1358. unsigned char proto = fl6->flowi6_proto;
  1359. int err = 0;
  1360. if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
  1361. goto out;
  1362. tail_skb = &(skb_shinfo(skb)->frag_list);
  1363. /* move skb->data to ip header from ext header */
  1364. if (skb->data < skb_network_header(skb))
  1365. __skb_pull(skb, skb_network_offset(skb));
  1366. while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
  1367. __skb_pull(tmp_skb, skb_network_header_len(skb));
  1368. *tail_skb = tmp_skb;
  1369. tail_skb = &(tmp_skb->next);
  1370. skb->len += tmp_skb->len;
  1371. skb->data_len += tmp_skb->len;
  1372. skb->truesize += tmp_skb->truesize;
  1373. tmp_skb->destructor = NULL;
  1374. tmp_skb->sk = NULL;
  1375. }
  1376. /* Allow local fragmentation. */
  1377. if (np->pmtudisc < IPV6_PMTUDISC_DO)
  1378. skb->local_df = 1;
  1379. ipv6_addr_copy(final_dst, &fl6->daddr);
  1380. __skb_pull(skb, skb_network_header_len(skb));
  1381. if (opt && opt->opt_flen)
  1382. ipv6_push_frag_opts(skb, opt, &proto);
  1383. if (opt && opt->opt_nflen)
  1384. ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
  1385. skb_push(skb, sizeof(struct ipv6hdr));
  1386. skb_reset_network_header(skb);
  1387. hdr = ipv6_hdr(skb);
  1388. *(__be32*)hdr = fl6->flowlabel |
  1389. htonl(0x60000000 | ((int)np->cork.tclass << 20));
  1390. hdr->hop_limit = np->cork.hop_limit;
  1391. hdr->nexthdr = proto;
  1392. ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
  1393. ipv6_addr_copy(&hdr->daddr, final_dst);
  1394. skb->priority = sk->sk_priority;
  1395. skb->mark = sk->sk_mark;
  1396. skb_dst_set(skb, dst_clone(&rt->dst));
  1397. IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
  1398. if (proto == IPPROTO_ICMPV6) {
  1399. struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
  1400. ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
  1401. ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
  1402. }
  1403. err = ip6_local_out(skb);
  1404. if (err) {
  1405. if (err > 0)
  1406. err = net_xmit_errno(err);
  1407. if (err)
  1408. goto error;
  1409. }
  1410. out:
  1411. ip6_cork_release(inet, np);
  1412. return err;
  1413. error:
  1414. IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
  1415. goto out;
  1416. }
  1417. void ip6_flush_pending_frames(struct sock *sk)
  1418. {
  1419. struct sk_buff *skb;
  1420. while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
  1421. if (skb_dst(skb))
  1422. IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
  1423. IPSTATS_MIB_OUTDISCARDS);
  1424. kfree_skb(skb);
  1425. }
  1426. ip6_cork_release(inet_sk(sk), inet6_sk(sk));
  1427. }