exthdrs.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173
  1. /*
  2. * Extension Header handling for IPv6
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. * Andi Kleen <ak@muc.de>
  8. * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the License, or (at your option) any later version.
  14. */
  15. /* Changes:
  16. * yoshfuji : ensure not to overrun while parsing
  17. * tlv options.
  18. * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
  19. * YOSHIFUJI Hideaki @USAGI Register inbound extension header
  20. * handlers as inet6_protocol{}.
  21. */
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/socket.h>
  25. #include <linux/sockios.h>
  26. #include <linux/net.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/in6.h>
  29. #include <linux/icmpv6.h>
  30. #include <linux/slab.h>
  31. #include <linux/export.h>
  32. #include <net/dst.h>
  33. #include <net/sock.h>
  34. #include <net/snmp.h>
  35. #include <net/ipv6.h>
  36. #include <net/protocol.h>
  37. #include <net/transp_v6.h>
  38. #include <net/rawv6.h>
  39. #include <net/ndisc.h>
  40. #include <net/ip6_route.h>
  41. #include <net/addrconf.h>
  42. #include <net/calipso.h>
  43. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  44. #include <net/xfrm.h>
  45. #endif
  46. #include <linux/seg6.h>
  47. #include <net/seg6.h>
  48. #ifdef CONFIG_IPV6_SEG6_HMAC
  49. #include <net/seg6_hmac.h>
  50. #endif
  51. #include <linux/uaccess.h>
  52. /*
  53. * Parsing tlv encoded headers.
  54. *
  55. * Parsing function "func" returns true, if parsing succeed
  56. * and false, if it failed.
  57. * It MUST NOT touch skb->h.
  58. */
  59. struct tlvtype_proc {
  60. int type;
  61. bool (*func)(struct sk_buff *skb, int offset);
  62. };
  63. /*********************
  64. Generic functions
  65. *********************/
  66. /* An unknown option is detected, decide what to do */
  67. static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff,
  68. bool disallow_unknowns)
  69. {
  70. if (disallow_unknowns) {
  71. /* If unknown TLVs are disallowed by configuration
  72. * then always silently drop packet. Note this also
  73. * means no ICMP parameter problem is sent which
  74. * could be a good property to mitigate a reflection DOS
  75. * attack.
  76. */
  77. goto drop;
  78. }
  79. switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
  80. case 0: /* ignore */
  81. return true;
  82. case 1: /* drop packet */
  83. break;
  84. case 3: /* Send ICMP if not a multicast address and drop packet */
  85. /* Actually, it is redundant check. icmp_send
  86. will recheck in any case.
  87. */
  88. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
  89. break;
  90. /* fall through */
  91. case 2: /* send ICMP PARM PROB regardless and drop packet */
  92. icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
  93. return false;
  94. }
  95. drop:
  96. kfree_skb(skb);
  97. return false;
  98. }
  99. /* Parse tlv encoded option header (hop-by-hop or destination) */
  100. static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
  101. struct sk_buff *skb,
  102. int max_count)
  103. {
  104. int len = (skb_transport_header(skb)[1] + 1) << 3;
  105. const unsigned char *nh = skb_network_header(skb);
  106. int off = skb_network_header_len(skb);
  107. const struct tlvtype_proc *curr;
  108. bool disallow_unknowns = false;
  109. int tlv_count = 0;
  110. int padlen = 0;
  111. if (unlikely(max_count < 0)) {
  112. disallow_unknowns = true;
  113. max_count = -max_count;
  114. }
  115. if (skb_transport_offset(skb) + len > skb_headlen(skb))
  116. goto bad;
  117. off += 2;
  118. len -= 2;
  119. while (len > 0) {
  120. int optlen = nh[off + 1] + 2;
  121. int i;
  122. switch (nh[off]) {
  123. case IPV6_TLV_PAD1:
  124. optlen = 1;
  125. padlen++;
  126. if (padlen > 7)
  127. goto bad;
  128. break;
  129. case IPV6_TLV_PADN:
  130. /* RFC 2460 states that the purpose of PadN is
  131. * to align the containing header to multiples
  132. * of 8. 7 is therefore the highest valid value.
  133. * See also RFC 4942, Section 2.1.9.5.
  134. */
  135. padlen += optlen;
  136. if (padlen > 7)
  137. goto bad;
  138. /* RFC 4942 recommends receiving hosts to
  139. * actively check PadN payload to contain
  140. * only zeroes.
  141. */
  142. for (i = 2; i < optlen; i++) {
  143. if (nh[off + i] != 0)
  144. goto bad;
  145. }
  146. break;
  147. default: /* Other TLV code so scan list */
  148. if (optlen > len)
  149. goto bad;
  150. tlv_count++;
  151. if (tlv_count > max_count)
  152. goto bad;
  153. for (curr = procs; curr->type >= 0; curr++) {
  154. if (curr->type == nh[off]) {
  155. /* type specific length/alignment
  156. checks will be performed in the
  157. func(). */
  158. if (curr->func(skb, off) == false)
  159. return false;
  160. break;
  161. }
  162. }
  163. if (curr->type < 0 &&
  164. !ip6_tlvopt_unknown(skb, off, disallow_unknowns))
  165. return false;
  166. padlen = 0;
  167. break;
  168. }
  169. off += optlen;
  170. len -= optlen;
  171. }
  172. if (len == 0)
  173. return true;
  174. bad:
  175. kfree_skb(skb);
  176. return false;
  177. }
  178. /*****************************
  179. Destination options header.
  180. *****************************/
  181. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  182. static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
  183. {
  184. struct ipv6_destopt_hao *hao;
  185. struct inet6_skb_parm *opt = IP6CB(skb);
  186. struct ipv6hdr *ipv6h = ipv6_hdr(skb);
  187. int ret;
  188. if (opt->dsthao) {
  189. net_dbg_ratelimited("hao duplicated\n");
  190. goto discard;
  191. }
  192. opt->dsthao = opt->dst1;
  193. opt->dst1 = 0;
  194. hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
  195. if (hao->length != 16) {
  196. net_dbg_ratelimited("hao invalid option length = %d\n",
  197. hao->length);
  198. goto discard;
  199. }
  200. if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
  201. net_dbg_ratelimited("hao is not an unicast addr: %pI6\n",
  202. &hao->addr);
  203. goto discard;
  204. }
  205. ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
  206. (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS);
  207. if (unlikely(ret < 0))
  208. goto discard;
  209. if (skb_cloned(skb)) {
  210. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
  211. goto discard;
  212. /* update all variable using below by copied skbuff */
  213. hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) +
  214. optoff);
  215. ipv6h = ipv6_hdr(skb);
  216. }
  217. if (skb->ip_summed == CHECKSUM_COMPLETE)
  218. skb->ip_summed = CHECKSUM_NONE;
  219. swap(ipv6h->saddr, hao->addr);
  220. if (skb->tstamp == 0)
  221. __net_timestamp(skb);
  222. return true;
  223. discard:
  224. kfree_skb(skb);
  225. return false;
  226. }
  227. #endif
  228. static const struct tlvtype_proc tlvprocdestopt_lst[] = {
  229. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  230. {
  231. .type = IPV6_TLV_HAO,
  232. .func = ipv6_dest_hao,
  233. },
  234. #endif
  235. {-1, NULL}
  236. };
  237. static int ipv6_destopt_rcv(struct sk_buff *skb)
  238. {
  239. struct inet6_dev *idev = __in6_dev_get(skb->dev);
  240. struct inet6_skb_parm *opt = IP6CB(skb);
  241. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  242. __u16 dstbuf;
  243. #endif
  244. struct dst_entry *dst = skb_dst(skb);
  245. struct net *net = dev_net(skb->dev);
  246. int extlen;
  247. if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
  248. !pskb_may_pull(skb, (skb_transport_offset(skb) +
  249. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  250. __IP6_INC_STATS(dev_net(dst->dev), idev,
  251. IPSTATS_MIB_INHDRERRORS);
  252. fail_and_free:
  253. kfree_skb(skb);
  254. return -1;
  255. }
  256. extlen = (skb_transport_header(skb)[1] + 1) << 3;
  257. if (extlen > net->ipv6.sysctl.max_dst_opts_len)
  258. goto fail_and_free;
  259. opt->lastopt = opt->dst1 = skb_network_header_len(skb);
  260. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  261. dstbuf = opt->dst1;
  262. #endif
  263. if (ip6_parse_tlv(tlvprocdestopt_lst, skb,
  264. init_net.ipv6.sysctl.max_dst_opts_cnt)) {
  265. skb->transport_header += extlen;
  266. opt = IP6CB(skb);
  267. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  268. opt->nhoff = dstbuf;
  269. #else
  270. opt->nhoff = opt->dst1;
  271. #endif
  272. return 1;
  273. }
  274. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  275. return -1;
  276. }
  277. static void seg6_update_csum(struct sk_buff *skb)
  278. {
  279. struct ipv6_sr_hdr *hdr;
  280. struct in6_addr *addr;
  281. __be32 from, to;
  282. /* srh is at transport offset and seg_left is already decremented
  283. * but daddr is not yet updated with next segment
  284. */
  285. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  286. addr = hdr->segments + hdr->segments_left;
  287. hdr->segments_left++;
  288. from = *(__be32 *)hdr;
  289. hdr->segments_left--;
  290. to = *(__be32 *)hdr;
  291. /* update skb csum with diff resulting from seg_left decrement */
  292. update_csum_diff4(skb, from, to);
  293. /* compute csum diff between current and next segment and update */
  294. update_csum_diff16(skb, (__be32 *)(&ipv6_hdr(skb)->daddr),
  295. (__be32 *)addr);
  296. }
  297. static int ipv6_srh_rcv(struct sk_buff *skb)
  298. {
  299. struct inet6_skb_parm *opt = IP6CB(skb);
  300. struct net *net = dev_net(skb->dev);
  301. struct ipv6_sr_hdr *hdr;
  302. struct inet6_dev *idev;
  303. struct in6_addr *addr;
  304. int accept_seg6;
  305. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  306. idev = __in6_dev_get(skb->dev);
  307. accept_seg6 = net->ipv6.devconf_all->seg6_enabled;
  308. if (accept_seg6 > idev->cnf.seg6_enabled)
  309. accept_seg6 = idev->cnf.seg6_enabled;
  310. if (!accept_seg6) {
  311. kfree_skb(skb);
  312. return -1;
  313. }
  314. #ifdef CONFIG_IPV6_SEG6_HMAC
  315. if (!seg6_hmac_validate_skb(skb)) {
  316. kfree_skb(skb);
  317. return -1;
  318. }
  319. #endif
  320. looped_back:
  321. if (hdr->segments_left == 0) {
  322. if (hdr->nexthdr == NEXTHDR_IPV6) {
  323. int offset = (hdr->hdrlen + 1) << 3;
  324. skb_postpull_rcsum(skb, skb_network_header(skb),
  325. skb_network_header_len(skb));
  326. if (!pskb_pull(skb, offset)) {
  327. kfree_skb(skb);
  328. return -1;
  329. }
  330. skb_postpull_rcsum(skb, skb_transport_header(skb),
  331. offset);
  332. skb_reset_network_header(skb);
  333. skb_reset_transport_header(skb);
  334. skb->encapsulation = 0;
  335. __skb_tunnel_rx(skb, skb->dev, net);
  336. netif_rx(skb);
  337. return -1;
  338. }
  339. opt->srcrt = skb_network_header_len(skb);
  340. opt->lastopt = opt->srcrt;
  341. skb->transport_header += (hdr->hdrlen + 1) << 3;
  342. opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
  343. return 1;
  344. }
  345. if (hdr->segments_left >= (hdr->hdrlen >> 1)) {
  346. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  347. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  348. ((&hdr->segments_left) -
  349. skb_network_header(skb)));
  350. return -1;
  351. }
  352. if (skb_cloned(skb)) {
  353. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
  354. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  355. IPSTATS_MIB_OUTDISCARDS);
  356. kfree_skb(skb);
  357. return -1;
  358. }
  359. }
  360. hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
  361. hdr->segments_left--;
  362. addr = hdr->segments + hdr->segments_left;
  363. skb_push(skb, sizeof(struct ipv6hdr));
  364. if (skb->ip_summed == CHECKSUM_COMPLETE)
  365. seg6_update_csum(skb);
  366. ipv6_hdr(skb)->daddr = *addr;
  367. skb_dst_drop(skb);
  368. ip6_route_input(skb);
  369. if (skb_dst(skb)->error) {
  370. dst_input(skb);
  371. return -1;
  372. }
  373. if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
  374. if (ipv6_hdr(skb)->hop_limit <= 1) {
  375. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  376. icmpv6_send(skb, ICMPV6_TIME_EXCEED,
  377. ICMPV6_EXC_HOPLIMIT, 0);
  378. kfree_skb(skb);
  379. return -1;
  380. }
  381. ipv6_hdr(skb)->hop_limit--;
  382. skb_pull(skb, sizeof(struct ipv6hdr));
  383. goto looped_back;
  384. }
  385. dst_input(skb);
  386. return -1;
  387. }
  388. /********************************
  389. Routing header.
  390. ********************************/
  391. /* called with rcu_read_lock() */
  392. static int ipv6_rthdr_rcv(struct sk_buff *skb)
  393. {
  394. struct inet6_dev *idev = __in6_dev_get(skb->dev);
  395. struct inet6_skb_parm *opt = IP6CB(skb);
  396. struct in6_addr *addr = NULL;
  397. struct in6_addr daddr;
  398. int n, i;
  399. struct ipv6_rt_hdr *hdr;
  400. struct rt0_hdr *rthdr;
  401. struct net *net = dev_net(skb->dev);
  402. int accept_source_route = net->ipv6.devconf_all->accept_source_route;
  403. idev = __in6_dev_get(skb->dev);
  404. if (idev && accept_source_route > idev->cnf.accept_source_route)
  405. accept_source_route = idev->cnf.accept_source_route;
  406. if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
  407. !pskb_may_pull(skb, (skb_transport_offset(skb) +
  408. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  409. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  410. kfree_skb(skb);
  411. return -1;
  412. }
  413. hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
  414. if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
  415. skb->pkt_type != PACKET_HOST) {
  416. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
  417. kfree_skb(skb);
  418. return -1;
  419. }
  420. /* segment routing */
  421. if (hdr->type == IPV6_SRCRT_TYPE_4)
  422. return ipv6_srh_rcv(skb);
  423. looped_back:
  424. if (hdr->segments_left == 0) {
  425. switch (hdr->type) {
  426. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  427. case IPV6_SRCRT_TYPE_2:
  428. /* Silently discard type 2 header unless it was
  429. * processed by own
  430. */
  431. if (!addr) {
  432. __IP6_INC_STATS(net, idev,
  433. IPSTATS_MIB_INADDRERRORS);
  434. kfree_skb(skb);
  435. return -1;
  436. }
  437. break;
  438. #endif
  439. default:
  440. break;
  441. }
  442. opt->lastopt = opt->srcrt = skb_network_header_len(skb);
  443. skb->transport_header += (hdr->hdrlen + 1) << 3;
  444. opt->dst0 = opt->dst1;
  445. opt->dst1 = 0;
  446. opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
  447. return 1;
  448. }
  449. switch (hdr->type) {
  450. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  451. case IPV6_SRCRT_TYPE_2:
  452. if (accept_source_route < 0)
  453. goto unknown_rh;
  454. /* Silently discard invalid RTH type 2 */
  455. if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
  456. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  457. kfree_skb(skb);
  458. return -1;
  459. }
  460. break;
  461. #endif
  462. default:
  463. goto unknown_rh;
  464. }
  465. /*
  466. * This is the routing header forwarding algorithm from
  467. * RFC 2460, page 16.
  468. */
  469. n = hdr->hdrlen >> 1;
  470. if (hdr->segments_left > n) {
  471. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  472. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  473. ((&hdr->segments_left) -
  474. skb_network_header(skb)));
  475. return -1;
  476. }
  477. /* We are about to mangle packet header. Be careful!
  478. Do not damage packets queued somewhere.
  479. */
  480. if (skb_cloned(skb)) {
  481. /* the copy is a forwarded packet */
  482. if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
  483. __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
  484. IPSTATS_MIB_OUTDISCARDS);
  485. kfree_skb(skb);
  486. return -1;
  487. }
  488. hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
  489. }
  490. if (skb->ip_summed == CHECKSUM_COMPLETE)
  491. skb->ip_summed = CHECKSUM_NONE;
  492. i = n - --hdr->segments_left;
  493. rthdr = (struct rt0_hdr *) hdr;
  494. addr = rthdr->addr;
  495. addr += i - 1;
  496. switch (hdr->type) {
  497. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  498. case IPV6_SRCRT_TYPE_2:
  499. if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
  500. (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
  501. IPPROTO_ROUTING) < 0) {
  502. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
  503. kfree_skb(skb);
  504. return -1;
  505. }
  506. if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
  507. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
  508. kfree_skb(skb);
  509. return -1;
  510. }
  511. break;
  512. #endif
  513. default:
  514. break;
  515. }
  516. if (ipv6_addr_is_multicast(addr)) {
  517. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
  518. kfree_skb(skb);
  519. return -1;
  520. }
  521. daddr = *addr;
  522. *addr = ipv6_hdr(skb)->daddr;
  523. ipv6_hdr(skb)->daddr = daddr;
  524. skb_dst_drop(skb);
  525. ip6_route_input(skb);
  526. if (skb_dst(skb)->error) {
  527. skb_push(skb, skb->data - skb_network_header(skb));
  528. dst_input(skb);
  529. return -1;
  530. }
  531. if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
  532. if (ipv6_hdr(skb)->hop_limit <= 1) {
  533. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  534. icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
  535. 0);
  536. kfree_skb(skb);
  537. return -1;
  538. }
  539. ipv6_hdr(skb)->hop_limit--;
  540. goto looped_back;
  541. }
  542. skb_push(skb, skb->data - skb_network_header(skb));
  543. dst_input(skb);
  544. return -1;
  545. unknown_rh:
  546. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  547. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
  548. (&hdr->type) - skb_network_header(skb));
  549. return -1;
  550. }
  551. static const struct inet6_protocol rthdr_protocol = {
  552. .handler = ipv6_rthdr_rcv,
  553. .flags = INET6_PROTO_NOPOLICY,
  554. };
  555. static const struct inet6_protocol destopt_protocol = {
  556. .handler = ipv6_destopt_rcv,
  557. .flags = INET6_PROTO_NOPOLICY,
  558. };
  559. static const struct inet6_protocol nodata_protocol = {
  560. .handler = dst_discard,
  561. .flags = INET6_PROTO_NOPOLICY,
  562. };
  563. int __init ipv6_exthdrs_init(void)
  564. {
  565. int ret;
  566. ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  567. if (ret)
  568. goto out;
  569. ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  570. if (ret)
  571. goto out_rthdr;
  572. ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE);
  573. if (ret)
  574. goto out_destopt;
  575. out:
  576. return ret;
  577. out_destopt:
  578. inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  579. out_rthdr:
  580. inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  581. goto out;
  582. };
  583. void ipv6_exthdrs_exit(void)
  584. {
  585. inet6_del_protocol(&nodata_protocol, IPPROTO_NONE);
  586. inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
  587. inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
  588. }
  589. /**********************************
  590. Hop-by-hop options.
  591. **********************************/
  592. /*
  593. * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
  594. */
  595. static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
  596. {
  597. return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
  598. }
  599. static inline struct net *ipv6_skb_net(struct sk_buff *skb)
  600. {
  601. return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
  602. }
  603. /* Router Alert as of RFC 2711 */
  604. static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
  605. {
  606. const unsigned char *nh = skb_network_header(skb);
  607. if (nh[optoff + 1] == 2) {
  608. IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
  609. memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
  610. return true;
  611. }
  612. net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
  613. nh[optoff + 1]);
  614. kfree_skb(skb);
  615. return false;
  616. }
  617. /* Jumbo payload */
  618. static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
  619. {
  620. const unsigned char *nh = skb_network_header(skb);
  621. struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
  622. struct net *net = ipv6_skb_net(skb);
  623. u32 pkt_len;
  624. if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
  625. net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
  626. nh[optoff+1]);
  627. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  628. goto drop;
  629. }
  630. pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
  631. if (pkt_len <= IPV6_MAXPLEN) {
  632. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  633. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
  634. return false;
  635. }
  636. if (ipv6_hdr(skb)->payload_len) {
  637. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  638. icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
  639. return false;
  640. }
  641. if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
  642. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INTRUNCATEDPKTS);
  643. goto drop;
  644. }
  645. if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
  646. goto drop;
  647. IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM;
  648. return true;
  649. drop:
  650. kfree_skb(skb);
  651. return false;
  652. }
  653. /* CALIPSO RFC 5570 */
  654. static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff)
  655. {
  656. const unsigned char *nh = skb_network_header(skb);
  657. if (nh[optoff + 1] < 8)
  658. goto drop;
  659. if (nh[optoff + 6] * 4 + 8 > nh[optoff + 1])
  660. goto drop;
  661. if (!calipso_validate(skb, nh + optoff))
  662. goto drop;
  663. return true;
  664. drop:
  665. kfree_skb(skb);
  666. return false;
  667. }
  668. static const struct tlvtype_proc tlvprochopopt_lst[] = {
  669. {
  670. .type = IPV6_TLV_ROUTERALERT,
  671. .func = ipv6_hop_ra,
  672. },
  673. {
  674. .type = IPV6_TLV_JUMBO,
  675. .func = ipv6_hop_jumbo,
  676. },
  677. {
  678. .type = IPV6_TLV_CALIPSO,
  679. .func = ipv6_hop_calipso,
  680. },
  681. { -1, }
  682. };
  683. int ipv6_parse_hopopts(struct sk_buff *skb)
  684. {
  685. struct inet6_skb_parm *opt = IP6CB(skb);
  686. struct net *net = dev_net(skb->dev);
  687. int extlen;
  688. /*
  689. * skb_network_header(skb) is equal to skb->data, and
  690. * skb_network_header_len(skb) is always equal to
  691. * sizeof(struct ipv6hdr) by definition of
  692. * hop-by-hop options.
  693. */
  694. if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
  695. !pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
  696. ((skb_transport_header(skb)[1] + 1) << 3)))) {
  697. fail_and_free:
  698. kfree_skb(skb);
  699. return -1;
  700. }
  701. extlen = (skb_transport_header(skb)[1] + 1) << 3;
  702. if (extlen > net->ipv6.sysctl.max_hbh_opts_len)
  703. goto fail_and_free;
  704. opt->flags |= IP6SKB_HOPBYHOP;
  705. if (ip6_parse_tlv(tlvprochopopt_lst, skb,
  706. init_net.ipv6.sysctl.max_hbh_opts_cnt)) {
  707. skb->transport_header += extlen;
  708. opt = IP6CB(skb);
  709. opt->nhoff = sizeof(struct ipv6hdr);
  710. return 1;
  711. }
  712. return -1;
  713. }
  714. /*
  715. * Creating outbound headers.
  716. *
  717. * "build" functions work when skb is filled from head to tail (datagram)
  718. * "push" functions work when headers are added from tail to head (tcp)
  719. *
  720. * In both cases we assume, that caller reserved enough room
  721. * for headers.
  722. */
  723. static void ipv6_push_rthdr0(struct sk_buff *skb, u8 *proto,
  724. struct ipv6_rt_hdr *opt,
  725. struct in6_addr **addr_p, struct in6_addr *saddr)
  726. {
  727. struct rt0_hdr *phdr, *ihdr;
  728. int hops;
  729. ihdr = (struct rt0_hdr *) opt;
  730. phdr = skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
  731. memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
  732. hops = ihdr->rt_hdr.hdrlen >> 1;
  733. if (hops > 1)
  734. memcpy(phdr->addr, ihdr->addr + 1,
  735. (hops - 1) * sizeof(struct in6_addr));
  736. phdr->addr[hops - 1] = **addr_p;
  737. *addr_p = ihdr->addr;
  738. phdr->rt_hdr.nexthdr = *proto;
  739. *proto = NEXTHDR_ROUTING;
  740. }
  741. static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
  742. struct ipv6_rt_hdr *opt,
  743. struct in6_addr **addr_p, struct in6_addr *saddr)
  744. {
  745. struct ipv6_sr_hdr *sr_phdr, *sr_ihdr;
  746. int plen, hops;
  747. sr_ihdr = (struct ipv6_sr_hdr *)opt;
  748. plen = (sr_ihdr->hdrlen + 1) << 3;
  749. sr_phdr = skb_push(skb, plen);
  750. memcpy(sr_phdr, sr_ihdr, sizeof(struct ipv6_sr_hdr));
  751. hops = sr_ihdr->first_segment + 1;
  752. memcpy(sr_phdr->segments + 1, sr_ihdr->segments + 1,
  753. (hops - 1) * sizeof(struct in6_addr));
  754. sr_phdr->segments[0] = **addr_p;
  755. *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
  756. if (sr_ihdr->hdrlen > hops * 2) {
  757. int tlvs_offset, tlvs_length;
  758. tlvs_offset = (1 + hops * 2) << 3;
  759. tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
  760. memcpy((char *)sr_phdr + tlvs_offset,
  761. (char *)sr_ihdr + tlvs_offset, tlvs_length);
  762. }
  763. #ifdef CONFIG_IPV6_SEG6_HMAC
  764. if (sr_has_hmac(sr_phdr)) {
  765. struct net *net = NULL;
  766. if (skb->dev)
  767. net = dev_net(skb->dev);
  768. else if (skb->sk)
  769. net = sock_net(skb->sk);
  770. WARN_ON(!net);
  771. if (net)
  772. seg6_push_hmac(net, saddr, sr_phdr);
  773. }
  774. #endif
  775. sr_phdr->nexthdr = *proto;
  776. *proto = NEXTHDR_ROUTING;
  777. }
  778. static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
  779. struct ipv6_rt_hdr *opt,
  780. struct in6_addr **addr_p, struct in6_addr *saddr)
  781. {
  782. switch (opt->type) {
  783. case IPV6_SRCRT_TYPE_0:
  784. case IPV6_SRCRT_STRICT:
  785. case IPV6_SRCRT_TYPE_2:
  786. ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
  787. break;
  788. case IPV6_SRCRT_TYPE_4:
  789. ipv6_push_rthdr4(skb, proto, opt, addr_p, saddr);
  790. break;
  791. default:
  792. break;
  793. }
  794. }
  795. static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt)
  796. {
  797. struct ipv6_opt_hdr *h = skb_push(skb, ipv6_optlen(opt));
  798. memcpy(h, opt, ipv6_optlen(opt));
  799. h->nexthdr = *proto;
  800. *proto = type;
  801. }
  802. void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
  803. u8 *proto,
  804. struct in6_addr **daddr, struct in6_addr *saddr)
  805. {
  806. if (opt->srcrt) {
  807. ipv6_push_rthdr(skb, proto, opt->srcrt, daddr, saddr);
  808. /*
  809. * IPV6_RTHDRDSTOPTS is ignored
  810. * unless IPV6_RTHDR is set (RFC3542).
  811. */
  812. if (opt->dst0opt)
  813. ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt);
  814. }
  815. if (opt->hopopt)
  816. ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
  817. }
  818. void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
  819. {
  820. if (opt->dst1opt)
  821. ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
  822. }
  823. EXPORT_SYMBOL(ipv6_push_frag_opts);
  824. struct ipv6_txoptions *
  825. ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
  826. {
  827. struct ipv6_txoptions *opt2;
  828. opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
  829. if (opt2) {
  830. long dif = (char *)opt2 - (char *)opt;
  831. memcpy(opt2, opt, opt->tot_len);
  832. if (opt2->hopopt)
  833. *((char **)&opt2->hopopt) += dif;
  834. if (opt2->dst0opt)
  835. *((char **)&opt2->dst0opt) += dif;
  836. if (opt2->dst1opt)
  837. *((char **)&opt2->dst1opt) += dif;
  838. if (opt2->srcrt)
  839. *((char **)&opt2->srcrt) += dif;
  840. refcount_set(&opt2->refcnt, 1);
  841. }
  842. return opt2;
  843. }
  844. EXPORT_SYMBOL_GPL(ipv6_dup_options);
  845. static void ipv6_renew_option(int renewtype,
  846. struct ipv6_opt_hdr **dest,
  847. struct ipv6_opt_hdr *old,
  848. struct ipv6_opt_hdr *new,
  849. int newtype, char **p)
  850. {
  851. struct ipv6_opt_hdr *src;
  852. src = (renewtype == newtype ? new : old);
  853. if (!src)
  854. return;
  855. memcpy(*p, src, ipv6_optlen(src));
  856. *dest = (struct ipv6_opt_hdr *)*p;
  857. *p += CMSG_ALIGN(ipv6_optlen(*dest));
  858. }
  859. /**
  860. * ipv6_renew_options - replace a specific ext hdr with a new one.
  861. *
  862. * @sk: sock from which to allocate memory
  863. * @opt: original options
  864. * @newtype: option type to replace in @opt
  865. * @newopt: new option of type @newtype to replace (user-mem)
  866. * @newoptlen: length of @newopt
  867. *
  868. * Returns a new set of options which is a copy of @opt with the
  869. * option type @newtype replaced with @newopt.
  870. *
  871. * @opt may be NULL, in which case a new set of options is returned
  872. * containing just @newopt.
  873. *
  874. * @newopt may be NULL, in which case the specified option type is
  875. * not copied into the new set of options.
  876. *
  877. * The new set of options is allocated from the socket option memory
  878. * buffer of @sk.
  879. */
  880. struct ipv6_txoptions *
  881. ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
  882. int newtype, struct ipv6_opt_hdr *newopt)
  883. {
  884. int tot_len = 0;
  885. char *p;
  886. struct ipv6_txoptions *opt2;
  887. if (opt) {
  888. if (newtype != IPV6_HOPOPTS && opt->hopopt)
  889. tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
  890. if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
  891. tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
  892. if (newtype != IPV6_RTHDR && opt->srcrt)
  893. tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
  894. if (newtype != IPV6_DSTOPTS && opt->dst1opt)
  895. tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
  896. }
  897. if (newopt)
  898. tot_len += CMSG_ALIGN(ipv6_optlen(newopt));
  899. if (!tot_len)
  900. return NULL;
  901. tot_len += sizeof(*opt2);
  902. opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
  903. if (!opt2)
  904. return ERR_PTR(-ENOBUFS);
  905. memset(opt2, 0, tot_len);
  906. refcount_set(&opt2->refcnt, 1);
  907. opt2->tot_len = tot_len;
  908. p = (char *)(opt2 + 1);
  909. ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt,
  910. (opt ? opt->hopopt : NULL),
  911. newopt, newtype, &p);
  912. ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt,
  913. (opt ? opt->dst0opt : NULL),
  914. newopt, newtype, &p);
  915. ipv6_renew_option(IPV6_RTHDR,
  916. (struct ipv6_opt_hdr **)&opt2->srcrt,
  917. (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL),
  918. newopt, newtype, &p);
  919. ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt,
  920. (opt ? opt->dst1opt : NULL),
  921. newopt, newtype, &p);
  922. opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
  923. (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
  924. (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0);
  925. opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
  926. return opt2;
  927. }
  928. struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
  929. struct ipv6_txoptions *opt)
  930. {
  931. /*
  932. * ignore the dest before srcrt unless srcrt is being included.
  933. * --yoshfuji
  934. */
  935. if (opt && opt->dst0opt && !opt->srcrt) {
  936. if (opt_space != opt) {
  937. memcpy(opt_space, opt, sizeof(*opt_space));
  938. opt = opt_space;
  939. }
  940. opt->opt_nflen -= ipv6_optlen(opt->dst0opt);
  941. opt->dst0opt = NULL;
  942. }
  943. return opt;
  944. }
  945. EXPORT_SYMBOL_GPL(ipv6_fixup_options);
  946. /**
  947. * fl6_update_dst - update flowi destination address with info given
  948. * by srcrt option, if any.
  949. *
  950. * @fl6: flowi6 for which daddr is to be updated
  951. * @opt: struct ipv6_txoptions in which to look for srcrt opt
  952. * @orig: copy of original daddr address if modified
  953. *
  954. * Returns NULL if no txoptions or no srcrt, otherwise returns orig
  955. * and initial value of fl6->daddr set in orig
  956. */
  957. struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
  958. const struct ipv6_txoptions *opt,
  959. struct in6_addr *orig)
  960. {
  961. if (!opt || !opt->srcrt)
  962. return NULL;
  963. *orig = fl6->daddr;
  964. switch (opt->srcrt->type) {
  965. case IPV6_SRCRT_TYPE_0:
  966. case IPV6_SRCRT_STRICT:
  967. case IPV6_SRCRT_TYPE_2:
  968. fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
  969. break;
  970. case IPV6_SRCRT_TYPE_4:
  971. {
  972. struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)opt->srcrt;
  973. fl6->daddr = srh->segments[srh->segments_left];
  974. break;
  975. }
  976. default:
  977. return NULL;
  978. }
  979. return orig;
  980. }
  981. EXPORT_SYMBOL_GPL(fl6_update_dst);