rmnet_handlers.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RMNET Data ingress/egress handler
  13. *
  14. */
  15. #include <linux/netdevice.h>
  16. #include <linux/netdev_features.h>
  17. #include <linux/if_arp.h>
  18. #include <net/sock.h>
  19. #include "rmnet_private.h"
  20. #include "rmnet_config.h"
  21. #include "rmnet_vnd.h"
  22. #include "rmnet_map.h"
  23. #include "rmnet_handlers.h"
  24. #define RMNET_IP_VERSION_4 0x40
  25. #define RMNET_IP_VERSION_6 0x60
  26. /* Helper Functions */
  27. static void rmnet_set_skb_proto(struct sk_buff *skb)
  28. {
  29. switch (skb->data[0] & 0xF0) {
  30. case RMNET_IP_VERSION_4:
  31. skb->protocol = htons(ETH_P_IP);
  32. break;
  33. case RMNET_IP_VERSION_6:
  34. skb->protocol = htons(ETH_P_IPV6);
  35. break;
  36. default:
  37. skb->protocol = htons(ETH_P_MAP);
  38. break;
  39. }
  40. }
  41. /* Generic handler */
  42. static void
  43. rmnet_deliver_skb(struct sk_buff *skb)
  44. {
  45. struct rmnet_priv *priv = netdev_priv(skb->dev);
  46. skb_reset_transport_header(skb);
  47. skb_reset_network_header(skb);
  48. rmnet_vnd_rx_fixup(skb, skb->dev);
  49. skb->pkt_type = PACKET_HOST;
  50. skb_set_mac_header(skb, 0);
  51. gro_cells_receive(&priv->gro_cells, skb);
  52. }
  53. /* MAP handler */
  54. static void
  55. __rmnet_map_ingress_handler(struct sk_buff *skb,
  56. struct rmnet_port *port)
  57. {
  58. struct rmnet_endpoint *ep;
  59. u16 len, pad;
  60. u8 mux_id;
  61. if (RMNET_MAP_GET_CD_BIT(skb)) {
  62. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
  63. return rmnet_map_command(skb, port);
  64. goto free_skb;
  65. }
  66. mux_id = RMNET_MAP_GET_MUX_ID(skb);
  67. pad = RMNET_MAP_GET_PAD(skb);
  68. len = RMNET_MAP_GET_LENGTH(skb) - pad;
  69. if (mux_id >= RMNET_MAX_LOGICAL_EP)
  70. goto free_skb;
  71. ep = rmnet_get_endpoint(port, mux_id);
  72. if (!ep)
  73. goto free_skb;
  74. skb->dev = ep->egress_dev;
  75. /* Subtract MAP header */
  76. skb_pull(skb, sizeof(struct rmnet_map_header));
  77. rmnet_set_skb_proto(skb);
  78. if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
  79. if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
  80. skb->ip_summed = CHECKSUM_UNNECESSARY;
  81. }
  82. skb_trim(skb, len);
  83. rmnet_deliver_skb(skb);
  84. return;
  85. free_skb:
  86. kfree_skb(skb);
  87. }
  88. static void
  89. rmnet_map_ingress_handler(struct sk_buff *skb,
  90. struct rmnet_port *port)
  91. {
  92. struct sk_buff *skbn;
  93. if (skb->dev->type == ARPHRD_ETHER) {
  94. if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
  95. kfree_skb(skb);
  96. return;
  97. }
  98. skb_push(skb, ETH_HLEN);
  99. }
  100. if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
  101. while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
  102. __rmnet_map_ingress_handler(skbn, port);
  103. consume_skb(skb);
  104. } else {
  105. __rmnet_map_ingress_handler(skb, port);
  106. }
  107. }
  108. static int rmnet_map_egress_handler(struct sk_buff *skb,
  109. struct rmnet_port *port, u8 mux_id,
  110. struct net_device *orig_dev)
  111. {
  112. int required_headroom, additional_header_len;
  113. struct rmnet_map_header *map_header;
  114. additional_header_len = 0;
  115. required_headroom = sizeof(struct rmnet_map_header);
  116. if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
  117. additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
  118. required_headroom += additional_header_len;
  119. }
  120. if (skb_headroom(skb) < required_headroom) {
  121. if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
  122. return -ENOMEM;
  123. }
  124. if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
  125. rmnet_map_checksum_uplink_packet(skb, orig_dev);
  126. map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
  127. if (!map_header)
  128. return -ENOMEM;
  129. map_header->mux_id = mux_id;
  130. skb->protocol = htons(ETH_P_MAP);
  131. return 0;
  132. }
  133. static void
  134. rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
  135. {
  136. if (skb_mac_header_was_set(skb))
  137. skb_push(skb, skb->mac_len);
  138. if (bridge_dev) {
  139. skb->dev = bridge_dev;
  140. dev_queue_xmit(skb);
  141. }
  142. }
  143. /* Ingress / Egress Entry Points */
  144. /* Processes packet as per ingress data format for receiving device. Logical
  145. * endpoint is determined from packet inspection. Packet is then sent to the
  146. * egress device listed in the logical endpoint configuration.
  147. */
  148. rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
  149. {
  150. struct sk_buff *skb = *pskb;
  151. struct rmnet_port *port;
  152. struct net_device *dev;
  153. if (!skb)
  154. goto done;
  155. if (skb->pkt_type == PACKET_LOOPBACK)
  156. return RX_HANDLER_PASS;
  157. dev = skb->dev;
  158. port = rmnet_get_port_rcu(dev);
  159. switch (port->rmnet_mode) {
  160. case RMNET_EPMODE_VND:
  161. rmnet_map_ingress_handler(skb, port);
  162. break;
  163. case RMNET_EPMODE_BRIDGE:
  164. rmnet_bridge_handler(skb, port->bridge_ep);
  165. break;
  166. }
  167. done:
  168. return RX_HANDLER_CONSUMED;
  169. }
  170. /* Modifies packet as per logical endpoint configuration and egress data format
  171. * for egress device configured in logical endpoint. Packet is then transmitted
  172. * on the egress device.
  173. */
  174. void rmnet_egress_handler(struct sk_buff *skb)
  175. {
  176. struct net_device *orig_dev;
  177. struct rmnet_port *port;
  178. struct rmnet_priv *priv;
  179. u8 mux_id;
  180. sk_pacing_shift_update(skb->sk, 8);
  181. orig_dev = skb->dev;
  182. priv = netdev_priv(orig_dev);
  183. skb->dev = priv->real_dev;
  184. mux_id = priv->mux_id;
  185. port = rmnet_get_port_rcu(skb->dev);
  186. if (!port)
  187. goto drop;
  188. if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
  189. goto drop;
  190. rmnet_vnd_tx_fixup(skb, orig_dev);
  191. dev_queue_xmit(skb);
  192. return;
  193. drop:
  194. this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
  195. kfree_skb(skb);
  196. }