output_core.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. /*
  2. * IPv6 library code, needed by static components when full IPv6 support is
  3. * not configured or static. These functions are needed by GSO/GRO implementation.
  4. */
  5. #include <linux/export.h>
  6. #include <net/ip.h>
  7. #include <net/ipv6.h>
  8. #include <net/ip6_fib.h>
  9. #include <net/addrconf.h>
  10. #include <net/secure_seq.h>
  11. #include <linux/netfilter.h>
  12. static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
  13. const struct in6_addr *dst,
  14. const struct in6_addr *src)
  15. {
  16. u32 hash, id;
  17. hash = __ipv6_addr_jhash(dst, hashrnd);
  18. hash = __ipv6_addr_jhash(src, hash);
  19. hash ^= net_hash_mix(net);
  20. /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
  21. * set the hight order instead thus minimizing possible future
  22. * collisions.
  23. */
  24. id = ip_idents_reserve(hash, 1);
  25. if (unlikely(!id))
  26. id = 1 << 31;
  27. return id;
  28. }
  29. /* This function exists only for tap drivers that must support broken
  30. * clients requesting UFO without specifying an IPv6 fragment ID.
  31. *
  32. * This is similar to ipv6_select_ident() but we use an independent hash
  33. * seed to limit information leakage.
  34. *
  35. * The network header must be set before calling this.
  36. */
  37. void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
  38. {
  39. static u32 ip6_proxy_idents_hashrnd __read_mostly;
  40. struct in6_addr buf[2];
  41. struct in6_addr *addrs;
  42. u32 id;
  43. addrs = skb_header_pointer(skb,
  44. skb_network_offset(skb) +
  45. offsetof(struct ipv6hdr, saddr),
  46. sizeof(buf), buf);
  47. if (!addrs)
  48. return;
  49. net_get_random_once(&ip6_proxy_idents_hashrnd,
  50. sizeof(ip6_proxy_idents_hashrnd));
  51. id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
  52. &addrs[1], &addrs[0]);
  53. skb_shinfo(skb)->ip6_frag_id = htonl(id);
  54. }
  55. EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
  56. __be32 ipv6_select_ident(struct net *net,
  57. const struct in6_addr *daddr,
  58. const struct in6_addr *saddr)
  59. {
  60. static u32 ip6_idents_hashrnd __read_mostly;
  61. u32 id;
  62. net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
  63. id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr);
  64. return htonl(id);
  65. }
  66. EXPORT_SYMBOL(ipv6_select_ident);
  67. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
  68. {
  69. unsigned int offset = sizeof(struct ipv6hdr);
  70. unsigned int packet_len = skb_tail_pointer(skb) -
  71. skb_network_header(skb);
  72. int found_rhdr = 0;
  73. *nexthdr = &ipv6_hdr(skb)->nexthdr;
  74. while (offset <= packet_len) {
  75. struct ipv6_opt_hdr *exthdr;
  76. switch (**nexthdr) {
  77. case NEXTHDR_HOP:
  78. break;
  79. case NEXTHDR_ROUTING:
  80. found_rhdr = 1;
  81. break;
  82. case NEXTHDR_DEST:
  83. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  84. if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
  85. break;
  86. #endif
  87. if (found_rhdr)
  88. return offset;
  89. break;
  90. default:
  91. return offset;
  92. }
  93. if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
  94. return -EINVAL;
  95. exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
  96. offset);
  97. offset += ipv6_optlen(exthdr);
  98. if (offset > IPV6_MAXPLEN)
  99. return -EINVAL;
  100. *nexthdr = &exthdr->nexthdr;
  101. }
  102. return -EINVAL;
  103. }
  104. EXPORT_SYMBOL(ip6_find_1stfragopt);
  105. #if IS_ENABLED(CONFIG_IPV6)
  106. int ip6_dst_hoplimit(struct dst_entry *dst)
  107. {
  108. int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
  109. if (hoplimit == 0) {
  110. struct net_device *dev = dst->dev;
  111. struct inet6_dev *idev;
  112. rcu_read_lock();
  113. idev = __in6_dev_get(dev);
  114. if (idev)
  115. hoplimit = idev->cnf.hop_limit;
  116. else
  117. hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
  118. rcu_read_unlock();
  119. }
  120. return hoplimit;
  121. }
  122. EXPORT_SYMBOL(ip6_dst_hoplimit);
  123. #endif
  124. int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
  125. {
  126. int len;
  127. len = skb->len - sizeof(struct ipv6hdr);
  128. if (len > IPV6_MAXPLEN)
  129. len = 0;
  130. ipv6_hdr(skb)->payload_len = htons(len);
  131. IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
  132. /* if egress device is enslaved to an L3 master device pass the
  133. * skb to its handler for processing
  134. */
  135. skb = l3mdev_ip6_out(sk, skb);
  136. if (unlikely(!skb))
  137. return 0;
  138. skb->protocol = htons(ETH_P_IPV6);
  139. return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
  140. net, sk, skb, NULL, skb_dst(skb)->dev,
  141. dst_output);
  142. }
  143. EXPORT_SYMBOL_GPL(__ip6_local_out);
  144. int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
  145. {
  146. int err;
  147. err = __ip6_local_out(net, sk, skb);
  148. if (likely(err == 1))
  149. err = dst_output(net, sk, skb);
  150. return err;
  151. }
  152. EXPORT_SYMBOL_GPL(ip6_local_out);