rmnet_handlers.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  3. *
  4. * RMNET Data ingress/egress handler
  5. */
  6. #include <linux/netdevice.h>
  7. #include <linux/netdev_features.h>
  8. #include <linux/if_arp.h>
  9. #include <net/sock.h>
  10. #include "rmnet_private.h"
  11. #include "rmnet_config.h"
  12. #include "rmnet_vnd.h"
  13. #include "rmnet_map.h"
  14. #include "rmnet_handlers.h"
  15. #define RMNET_IP_VERSION_4 0x40
  16. #define RMNET_IP_VERSION_6 0x60
  17. /* Helper Functions */
  18. static void rmnet_set_skb_proto(struct sk_buff *skb)
  19. {
  20. switch (skb->data[0] & 0xF0) {
  21. case RMNET_IP_VERSION_4:
  22. skb->protocol = htons(ETH_P_IP);
  23. break;
  24. case RMNET_IP_VERSION_6:
  25. skb->protocol = htons(ETH_P_IPV6);
  26. break;
  27. default:
  28. skb->protocol = htons(ETH_P_MAP);
  29. break;
  30. }
  31. }
  32. /* Generic handler */
  33. static void
  34. rmnet_deliver_skb(struct sk_buff *skb)
  35. {
  36. struct rmnet_priv *priv = netdev_priv(skb->dev);
  37. skb_reset_transport_header(skb);
  38. skb_reset_network_header(skb);
  39. rmnet_vnd_rx_fixup(skb, skb->dev);
  40. skb->pkt_type = PACKET_HOST;
  41. skb_set_mac_header(skb, 0);
  42. gro_cells_receive(&priv->gro_cells, skb);
  43. }
  44. /* MAP handler */
  45. static void
  46. __rmnet_map_ingress_handler(struct sk_buff *skb,
  47. struct rmnet_port *port)
  48. {
  49. struct rmnet_endpoint *ep;
  50. u16 len, pad;
  51. u8 mux_id;
  52. if (RMNET_MAP_GET_CD_BIT(skb)) {
  53. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  54. return rmnet_map_command(skb, port);
  55. goto free_skb;
  56. }
  57. mux_id = RMNET_MAP_GET_MUX_ID(skb);
  58. pad = RMNET_MAP_GET_PAD(skb);
  59. len = RMNET_MAP_GET_LENGTH(skb) - pad;
  60. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  61. goto free_skb;
  62. ep = rmnet_get_endpoint(port, mux_id);
  63. if (!ep)
  64. goto free_skb;
  65. skb->dev = ep->egress_dev;
  66. /* Subtract MAP header */
  67. skb_pull(skb, sizeof(struct rmnet_map_header));
  68. rmnet_set_skb_proto(skb);
  69. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  70. if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
  71. skb->ip_summed = CHECKSUM_UNNECESSARY;
  72. }
  73. skb_trim(skb, len);
  74. rmnet_deliver_skb(skb);
  75. return;
  76. free_skb:
  77. kfree_skb(skb);
  78. }
  79. static void
  80. rmnet_map_ingress_handler(struct sk_buff *skb,
  81. struct rmnet_port *port)
  82. {
  83. struct sk_buff *skbn;
  84. if (skb->dev->type == ARPHRD_ETHER) {
  85. if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
  86. kfree_skb(skb);
  87. return;
  88. }
  89. skb_push(skb, ETH_HLEN);
  90. }
  91. if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
  92. while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
  93. __rmnet_map_ingress_handler(skbn, port);
  94. consume_skb(skb);
  95. } else {
  96. __rmnet_map_ingress_handler(skb, port);
  97. }
  98. }
  99. static int rmnet_map_egress_handler(struct sk_buff *skb,
  100. struct rmnet_port *port, u8 mux_id,
  101. struct net_device *orig_dev)
  102. {
  103. int required_headroom, additional_header_len;
  104. struct rmnet_map_header *map_header;
  105. additional_header_len = 0;
  106. required_headroom = sizeof(struct rmnet_map_header);
  107. if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
  108. additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
  109. required_headroom += additional_header_len;
  110. }
  111. if (skb_headroom(skb) < required_headroom) {
  112. if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
  113. return -ENOMEM;
  114. }
  115. if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
  116. rmnet_map_checksum_uplink_packet(skb, orig_dev);
  117. map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
  118. if (!map_header)
  119. return -ENOMEM;
  120. map_header->mux_id = mux_id;
  121. skb->protocol = htons(ETH_P_MAP);
  122. return 0;
  123. }
  124. static void
  125. rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
  126. {
  127. if (skb_mac_header_was_set(skb))
  128. skb_push(skb, skb->mac_len);
  129. if (bridge_dev) {
  130. skb->dev = bridge_dev;
  131. dev_queue_xmit(skb);
  132. }
  133. }
  134. /* Ingress / Egress Entry Points */
  135. /* Processes packet as per ingress data format for receiving device. Logical
  136. * endpoint is determined from packet inspection. Packet is then sent to the
  137. * egress device listed in the logical endpoint configuration.
  138. */
  139. rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
  140. {
  141. struct sk_buff *skb = *pskb;
  142. struct rmnet_port *port;
  143. struct net_device *dev;
  144. if (!skb)
  145. goto done;
  146. if (skb->pkt_type == PACKET_LOOPBACK)
  147. return RX_HANDLER_PASS;
  148. dev = skb->dev;
  149. port = rmnet_get_port_rcu(dev);
  150. if (unlikely(!port)) {
  151. atomic_long_inc(&skb->dev->rx_nohandler);
  152. kfree_skb(skb);
  153. goto done;
  154. }
  155. switch (port->rmnet_mode) {
  156. case RMNET_EPMODE_VND:
  157. rmnet_map_ingress_handler(skb, port);
  158. break;
  159. case RMNET_EPMODE_BRIDGE:
  160. rmnet_bridge_handler(skb, port->bridge_ep);
  161. break;
  162. }
  163. done:
  164. return RX_HANDLER_CONSUMED;
  165. }
  166. /* Modifies packet as per logical endpoint configuration and egress data format
  167. * for egress device configured in logical endpoint. Packet is then transmitted
  168. * on the egress device.
  169. */
  170. void rmnet_egress_handler(struct sk_buff *skb)
  171. {
  172. struct net_device *orig_dev;
  173. struct rmnet_port *port;
  174. struct rmnet_priv *priv;
  175. u8 mux_id;
  176. sk_pacing_shift_update(skb->sk, 8);
  177. orig_dev = skb->dev;
  178. priv = netdev_priv(orig_dev);
  179. skb->dev = priv->real_dev;
  180. mux_id = priv->mux_id;
  181. port = rmnet_get_port_rcu(skb->dev);
  182. if (!port)
  183. goto drop;
  184. if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
  185. goto drop;
  186. rmnet_vnd_tx_fixup(skb, orig_dev);
  187. dev_queue_xmit(skb);
  188. return;
  189. drop:
  190. this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
  191. kfree_skb(skb);
  192. }