internal.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef MPLS_INTERNAL_H
  3. #define MPLS_INTERNAL_H
  4. #include <net/mpls.h>
  5. /* put a reasonable limit on the number of labels
  6. * we will accept from userspace
  7. */
  8. #define MAX_NEW_LABELS 30
  9. struct mpls_entry_decoded {
  10. u32 label;
  11. u8 ttl;
  12. u8 tc;
  13. u8 bos;
  14. };
  15. struct mpls_pcpu_stats {
  16. struct mpls_link_stats stats;
  17. struct u64_stats_sync syncp;
  18. };
  19. struct mpls_dev {
  20. int input_enabled;
  21. struct net_device *dev;
  22. struct mpls_pcpu_stats __percpu *stats;
  23. struct ctl_table_header *sysctl;
  24. struct rcu_head rcu;
  25. };
  26. #if BITS_PER_LONG == 32
  27. #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \
  28. do { \
  29. __typeof__(*(mdev)->stats) *ptr = \
  30. raw_cpu_ptr((mdev)->stats); \
  31. local_bh_disable(); \
  32. u64_stats_update_begin(&ptr->syncp); \
  33. ptr->stats.pkts_field++; \
  34. ptr->stats.bytes_field += (len); \
  35. u64_stats_update_end(&ptr->syncp); \
  36. local_bh_enable(); \
  37. } while (0)
  38. #define MPLS_INC_STATS(mdev, field) \
  39. do { \
  40. __typeof__(*(mdev)->stats) *ptr = \
  41. raw_cpu_ptr((mdev)->stats); \
  42. local_bh_disable(); \
  43. u64_stats_update_begin(&ptr->syncp); \
  44. ptr->stats.field++; \
  45. u64_stats_update_end(&ptr->syncp); \
  46. local_bh_enable(); \
  47. } while (0)
  48. #else
  49. #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \
  50. do { \
  51. this_cpu_inc((mdev)->stats->stats.pkts_field); \
  52. this_cpu_add((mdev)->stats->stats.bytes_field, (len)); \
  53. } while (0)
  54. #define MPLS_INC_STATS(mdev, field) \
  55. this_cpu_inc((mdev)->stats->stats.field)
  56. #endif
  57. struct sk_buff;
  58. #define LABEL_NOT_SPECIFIED (1 << 20)
  59. /* This maximum ha length copied from the definition of struct neighbour */
  60. #define VIA_ALEN_ALIGN sizeof(unsigned long)
  61. #define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, VIA_ALEN_ALIGN))
  62. enum mpls_payload_type {
  63. MPT_UNSPEC, /* IPv4 or IPv6 */
  64. MPT_IPV4 = 4,
  65. MPT_IPV6 = 6,
  66. /* Other types not implemented:
  67. * - Pseudo-wire with or without control word (RFC4385)
  68. * - GAL (RFC5586)
  69. */
  70. };
  71. struct mpls_nh { /* next hop label forwarding entry */
  72. struct net_device __rcu *nh_dev;
  73. /* nh_flags is accessed under RCU in the packet path; it is
  74. * modified handling netdev events with rtnl lock held
  75. */
  76. unsigned int nh_flags;
  77. u8 nh_labels;
  78. u8 nh_via_alen;
  79. u8 nh_via_table;
  80. u8 nh_reserved1;
  81. u32 nh_label[0];
  82. };
  83. /* offset of via from beginning of mpls_nh */
  84. #define MPLS_NH_VIA_OFF(num_labels) \
  85. ALIGN(sizeof(struct mpls_nh) + (num_labels) * sizeof(u32), \
  86. VIA_ALEN_ALIGN)
  87. /* all nexthops within a route have the same size based on the
  88. * max number of labels and max via length across all nexthops
  89. */
  90. #define MPLS_NH_SIZE(num_labels, max_via_alen) \
  91. (MPLS_NH_VIA_OFF((num_labels)) + \
  92. ALIGN((max_via_alen), VIA_ALEN_ALIGN))
  93. enum mpls_ttl_propagation {
  94. MPLS_TTL_PROP_DEFAULT,
  95. MPLS_TTL_PROP_ENABLED,
  96. MPLS_TTL_PROP_DISABLED,
  97. };
  98. /* The route, nexthops and vias are stored together in the same memory
  99. * block:
  100. *
  101. * +----------------------+
  102. * | mpls_route |
  103. * +----------------------+
  104. * | mpls_nh 0 |
  105. * +----------------------+
  106. * | alignment padding | 4 bytes for odd number of labels
  107. * +----------------------+
  108. * | via[rt_max_alen] 0 |
  109. * +----------------------+
  110. * | alignment padding | via's aligned on sizeof(unsigned long)
  111. * +----------------------+
  112. * | ... |
  113. * +----------------------+
  114. * | mpls_nh n-1 |
  115. * +----------------------+
  116. * | via[rt_max_alen] n-1 |
  117. * +----------------------+
  118. */
  119. struct mpls_route { /* next hop label forwarding entry */
  120. struct rcu_head rt_rcu;
  121. u8 rt_protocol;
  122. u8 rt_payload_type;
  123. u8 rt_max_alen;
  124. u8 rt_ttl_propagate;
  125. u8 rt_nhn;
  126. /* rt_nhn_alive is accessed under RCU in the packet path; it
  127. * is modified handling netdev events with rtnl lock held
  128. */
  129. u8 rt_nhn_alive;
  130. u8 rt_nh_size;
  131. u8 rt_via_offset;
  132. u8 rt_reserved1;
  133. struct mpls_nh rt_nh[0];
  134. };
  135. #define for_nexthops(rt) { \
  136. int nhsel; struct mpls_nh *nh; u8 *__nh; \
  137. for (nhsel = 0, nh = (rt)->rt_nh, __nh = (u8 *)((rt)->rt_nh); \
  138. nhsel < (rt)->rt_nhn; \
  139. __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++)
  140. #define change_nexthops(rt) { \
  141. int nhsel; struct mpls_nh *nh; u8 *__nh; \
  142. for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh), \
  143. __nh = (u8 *)((rt)->rt_nh); \
  144. nhsel < (rt)->rt_nhn; \
  145. __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++)
  146. #define endfor_nexthops(rt) }
  147. static inline struct mpls_shim_hdr mpls_entry_encode(u32 label, unsigned ttl, unsigned tc, bool bos)
  148. {
  149. struct mpls_shim_hdr result;
  150. result.label_stack_entry =
  151. cpu_to_be32((label << MPLS_LS_LABEL_SHIFT) |
  152. (tc << MPLS_LS_TC_SHIFT) |
  153. (bos ? (1 << MPLS_LS_S_SHIFT) : 0) |
  154. (ttl << MPLS_LS_TTL_SHIFT));
  155. return result;
  156. }
  157. static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *hdr)
  158. {
  159. struct mpls_entry_decoded result;
  160. unsigned entry = be32_to_cpu(hdr->label_stack_entry);
  161. result.label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
  162. result.ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
  163. result.tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT;
  164. result.bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT;
  165. return result;
  166. }
  167. static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
  168. {
  169. return rcu_dereference_rtnl(dev->mpls_ptr);
  170. }
  171. int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels,
  172. const u32 label[]);
  173. int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels,
  174. u32 label[], struct netlink_ext_ack *extack);
  175. bool mpls_output_possible(const struct net_device *dev);
  176. unsigned int mpls_dev_mtu(const struct net_device *dev);
  177. bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
  178. void mpls_stats_inc_outucastpkts(struct net_device *dev,
  179. const struct sk_buff *skb);
  180. #endif /* MPLS_INTERNAL_H */