output_core.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. /*
  2. * IPv6 library code, needed by static components when full IPv6 support is
  3. * not configured or static. These functions are needed by GSO/GRO implementation.
  4. */
  5. #include <linux/export.h>
  6. #include <net/ip.h>
  7. #include <net/ipv6.h>
  8. #include <net/ip6_fib.h>
  9. #include <net/addrconf.h>
  10. #include <net/secure_seq.h>
  11. #include <linux/netfilter.h>
  12. static u32 __ipv6_select_ident(struct net *net,
  13. const struct in6_addr *dst,
  14. const struct in6_addr *src)
  15. {
  16. const struct {
  17. struct in6_addr dst;
  18. struct in6_addr src;
  19. } __aligned(SIPHASH_ALIGNMENT) combined = {
  20. .dst = *dst,
  21. .src = *src,
  22. };
  23. u32 hash, id;
  24. /* Note the following code is not safe, but this is okay. */
  25. if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
  26. get_random_bytes(&net->ipv4.ip_id_key,
  27. sizeof(net->ipv4.ip_id_key));
  28. hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key);
  29. /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
  30. * set the hight order instead thus minimizing possible future
  31. * collisions.
  32. */
  33. id = ip_idents_reserve(hash, 1);
  34. if (unlikely(!id))
  35. id = 1 << 31;
  36. return id;
  37. }
  38. /* This function exists only for tap drivers that must support broken
  39. * clients requesting UFO without specifying an IPv6 fragment ID.
  40. *
  41. * This is similar to ipv6_select_ident() but we use an independent hash
  42. * seed to limit information leakage.
  43. *
  44. * The network header must be set before calling this.
  45. */
  46. __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
  47. {
  48. struct in6_addr buf[2];
  49. struct in6_addr *addrs;
  50. u32 id;
  51. addrs = skb_header_pointer(skb,
  52. skb_network_offset(skb) +
  53. offsetof(struct ipv6hdr, saddr),
  54. sizeof(buf), buf);
  55. if (!addrs)
  56. return 0;
  57. id = __ipv6_select_ident(net, &addrs[1], &addrs[0]);
  58. return htonl(id);
  59. }
  60. EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
  61. __be32 ipv6_select_ident(struct net *net,
  62. const struct in6_addr *daddr,
  63. const struct in6_addr *saddr)
  64. {
  65. u32 id;
  66. id = __ipv6_select_ident(net, daddr, saddr);
  67. return htonl(id);
  68. }
  69. EXPORT_SYMBOL(ipv6_select_ident);
  70. int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
  71. {
  72. unsigned int offset = sizeof(struct ipv6hdr);
  73. unsigned int packet_len = skb_tail_pointer(skb) -
  74. skb_network_header(skb);
  75. int found_rhdr = 0;
  76. *nexthdr = &ipv6_hdr(skb)->nexthdr;
  77. while (offset <= packet_len) {
  78. struct ipv6_opt_hdr *exthdr;
  79. switch (**nexthdr) {
  80. case NEXTHDR_HOP:
  81. break;
  82. case NEXTHDR_ROUTING:
  83. found_rhdr = 1;
  84. break;
  85. case NEXTHDR_DEST:
  86. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  87. if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
  88. break;
  89. #endif
  90. if (found_rhdr)
  91. return offset;
  92. break;
  93. default:
  94. return offset;
  95. }
  96. if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
  97. return -EINVAL;
  98. exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
  99. offset);
  100. offset += ipv6_optlen(exthdr);
  101. if (offset > IPV6_MAXPLEN)
  102. return -EINVAL;
  103. *nexthdr = &exthdr->nexthdr;
  104. }
  105. return -EINVAL;
  106. }
  107. EXPORT_SYMBOL(ip6_find_1stfragopt);
  108. #if IS_ENABLED(CONFIG_IPV6)
  109. int ip6_dst_hoplimit(struct dst_entry *dst)
  110. {
  111. int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
  112. if (hoplimit == 0) {
  113. struct net_device *dev = dst->dev;
  114. struct inet6_dev *idev;
  115. rcu_read_lock();
  116. idev = __in6_dev_get(dev);
  117. if (idev)
  118. hoplimit = idev->cnf.hop_limit;
  119. else
  120. hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
  121. rcu_read_unlock();
  122. }
  123. return hoplimit;
  124. }
  125. EXPORT_SYMBOL(ip6_dst_hoplimit);
  126. #endif
  127. int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
  128. {
  129. int len;
  130. len = skb->len - sizeof(struct ipv6hdr);
  131. if (len > IPV6_MAXPLEN)
  132. len = 0;
  133. ipv6_hdr(skb)->payload_len = htons(len);
  134. IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
  135. /* if egress device is enslaved to an L3 master device pass the
  136. * skb to its handler for processing
  137. */
  138. skb = l3mdev_ip6_out(sk, skb);
  139. if (unlikely(!skb))
  140. return 0;
  141. skb->protocol = htons(ETH_P_IPV6);
  142. return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
  143. net, sk, skb, NULL, skb_dst(skb)->dev,
  144. dst_output);
  145. }
  146. EXPORT_SYMBOL_GPL(__ip6_local_out);
  147. int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
  148. {
  149. int err;
  150. err = __ip6_local_out(net, sk, skb);
  151. if (likely(err == 1))
  152. err = dst_output(net, sk, skb);
  153. return err;
  154. }
  155. EXPORT_SYMBOL_GPL(ip6_local_out);