dsa.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. /*
  2. * net/dsa/dsa.c - Hardware switch handling
  3. * Copyright (c) 2008-2009 Marvell Semiconductor
  4. * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/device.h>
  12. #include <linux/list.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/slab.h>
  15. #include <linux/module.h>
  16. #include <linux/notifier.h>
  17. #include <linux/of.h>
  18. #include <linux/of_mdio.h>
  19. #include <linux/of_platform.h>
  20. #include <linux/of_net.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/sysfs.h>
  23. #include <linux/phy_fixed.h>
  24. #include <linux/ptp_classify.h>
  25. #include <linux/etherdevice.h>
  26. #include "dsa_priv.h"
  27. static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
  28. struct net_device *dev)
  29. {
  30. /* Just return the original SKB */
  31. return skb;
  32. }
  33. static const struct dsa_device_ops none_ops = {
  34. .xmit = dsa_slave_notag_xmit,
  35. .rcv = NULL,
  36. };
  37. const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
  38. #ifdef CONFIG_NET_DSA_TAG_BRCM
  39. [DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops,
  40. #endif
  41. #ifdef CONFIG_NET_DSA_TAG_BRCM_PREPEND
  42. [DSA_TAG_PROTO_BRCM_PREPEND] = &brcm_prepend_netdev_ops,
  43. #endif
  44. #ifdef CONFIG_NET_DSA_TAG_DSA
  45. [DSA_TAG_PROTO_DSA] = &dsa_netdev_ops,
  46. #endif
  47. #ifdef CONFIG_NET_DSA_TAG_EDSA
  48. [DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops,
  49. #endif
  50. #ifdef CONFIG_NET_DSA_TAG_KSZ
  51. [DSA_TAG_PROTO_KSZ] = &ksz_netdev_ops,
  52. #endif
  53. #ifdef CONFIG_NET_DSA_TAG_LAN9303
  54. [DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops,
  55. #endif
  56. #ifdef CONFIG_NET_DSA_TAG_MTK
  57. [DSA_TAG_PROTO_MTK] = &mtk_netdev_ops,
  58. #endif
  59. #ifdef CONFIG_NET_DSA_TAG_QCA
  60. [DSA_TAG_PROTO_QCA] = &qca_netdev_ops,
  61. #endif
  62. #ifdef CONFIG_NET_DSA_TAG_TRAILER
  63. [DSA_TAG_PROTO_TRAILER] = &trailer_netdev_ops,
  64. #endif
  65. [DSA_TAG_PROTO_NONE] = &none_ops,
  66. };
  67. const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol)
  68. {
  69. const struct dsa_device_ops *ops;
  70. if (tag_protocol >= DSA_TAG_LAST)
  71. return ERR_PTR(-EINVAL);
  72. ops = dsa_device_ops[tag_protocol];
  73. if (!ops)
  74. return ERR_PTR(-ENOPROTOOPT);
  75. return ops;
  76. }
  77. static int dev_is_class(struct device *dev, void *class)
  78. {
  79. if (dev->class != NULL && !strcmp(dev->class->name, class))
  80. return 1;
  81. return 0;
  82. }
  83. static struct device *dev_find_class(struct device *parent, char *class)
  84. {
  85. if (dev_is_class(parent, class)) {
  86. get_device(parent);
  87. return parent;
  88. }
  89. return device_find_child(parent, class, dev_is_class);
  90. }
  91. struct net_device *dsa_dev_to_net_device(struct device *dev)
  92. {
  93. struct device *d;
  94. d = dev_find_class(dev, "net");
  95. if (d != NULL) {
  96. struct net_device *nd;
  97. nd = to_net_dev(d);
  98. dev_hold(nd);
  99. put_device(d);
  100. return nd;
  101. }
  102. return NULL;
  103. }
  104. EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
  105. /* Determine if we should defer delivery of skb until we have a rx timestamp.
  106. *
  107. * Called from dsa_switch_rcv. For now, this will only work if tagging is
  108. * enabled on the switch. Normally the MAC driver would retrieve the hardware
  109. * timestamp when it reads the packet out of the hardware. However in a DSA
  110. * switch, the DSA driver owning the interface to which the packet is
  111. * delivered is never notified unless we do so here.
  112. */
  113. static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
  114. struct sk_buff *skb)
  115. {
  116. struct dsa_switch *ds = p->dp->ds;
  117. unsigned int type;
  118. if (skb_headroom(skb) < ETH_HLEN)
  119. return false;
  120. __skb_push(skb, ETH_HLEN);
  121. type = ptp_classify_raw(skb);
  122. __skb_pull(skb, ETH_HLEN);
  123. if (type == PTP_CLASS_NONE)
  124. return false;
  125. if (likely(ds->ops->port_rxtstamp))
  126. return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
  127. return false;
  128. }
  129. static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
  130. struct packet_type *pt, struct net_device *unused)
  131. {
  132. struct dsa_port *cpu_dp = dev->dsa_ptr;
  133. struct sk_buff *nskb = NULL;
  134. struct pcpu_sw_netstats *s;
  135. struct dsa_slave_priv *p;
  136. if (unlikely(!cpu_dp)) {
  137. kfree_skb(skb);
  138. return 0;
  139. }
  140. skb = skb_unshare(skb, GFP_ATOMIC);
  141. if (!skb)
  142. return 0;
  143. nskb = cpu_dp->rcv(skb, dev, pt);
  144. if (!nskb) {
  145. kfree_skb(skb);
  146. return 0;
  147. }
  148. skb = nskb;
  149. p = netdev_priv(skb->dev);
  150. skb_push(skb, ETH_HLEN);
  151. skb->pkt_type = PACKET_HOST;
  152. skb->protocol = eth_type_trans(skb, skb->dev);
  153. s = this_cpu_ptr(p->stats64);
  154. u64_stats_update_begin(&s->syncp);
  155. s->rx_packets++;
  156. s->rx_bytes += skb->len;
  157. u64_stats_update_end(&s->syncp);
  158. if (dsa_skb_defer_rx_timestamp(p, skb))
  159. return 0;
  160. netif_receive_skb(skb);
  161. return 0;
  162. }
  163. #ifdef CONFIG_PM_SLEEP
  164. static bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
  165. {
  166. return dsa_is_user_port(ds, p) && ds->ports[p].slave;
  167. }
  168. int dsa_switch_suspend(struct dsa_switch *ds)
  169. {
  170. int i, ret = 0;
  171. /* Suspend slave network devices */
  172. for (i = 0; i < ds->num_ports; i++) {
  173. if (!dsa_is_port_initialized(ds, i))
  174. continue;
  175. ret = dsa_slave_suspend(ds->ports[i].slave);
  176. if (ret)
  177. return ret;
  178. }
  179. if (ds->ops->suspend)
  180. ret = ds->ops->suspend(ds);
  181. return ret;
  182. }
  183. EXPORT_SYMBOL_GPL(dsa_switch_suspend);
  184. int dsa_switch_resume(struct dsa_switch *ds)
  185. {
  186. int i, ret = 0;
  187. if (ds->ops->resume)
  188. ret = ds->ops->resume(ds);
  189. if (ret)
  190. return ret;
  191. /* Resume slave network devices */
  192. for (i = 0; i < ds->num_ports; i++) {
  193. if (!dsa_is_port_initialized(ds, i))
  194. continue;
  195. ret = dsa_slave_resume(ds->ports[i].slave);
  196. if (ret)
  197. return ret;
  198. }
  199. return 0;
  200. }
  201. EXPORT_SYMBOL_GPL(dsa_switch_resume);
  202. #endif
  203. static struct packet_type dsa_pack_type __read_mostly = {
  204. .type = cpu_to_be16(ETH_P_XDSA),
  205. .func = dsa_switch_rcv,
  206. };
  207. static struct workqueue_struct *dsa_owq;
  208. bool dsa_schedule_work(struct work_struct *work)
  209. {
  210. return queue_work(dsa_owq, work);
  211. }
  212. static ATOMIC_NOTIFIER_HEAD(dsa_notif_chain);
  213. int register_dsa_notifier(struct notifier_block *nb)
  214. {
  215. return atomic_notifier_chain_register(&dsa_notif_chain, nb);
  216. }
  217. EXPORT_SYMBOL_GPL(register_dsa_notifier);
  218. int unregister_dsa_notifier(struct notifier_block *nb)
  219. {
  220. return atomic_notifier_chain_unregister(&dsa_notif_chain, nb);
  221. }
  222. EXPORT_SYMBOL_GPL(unregister_dsa_notifier);
  223. int call_dsa_notifiers(unsigned long val, struct net_device *dev,
  224. struct dsa_notifier_info *info)
  225. {
  226. info->dev = dev;
  227. return atomic_notifier_call_chain(&dsa_notif_chain, val, info);
  228. }
  229. EXPORT_SYMBOL_GPL(call_dsa_notifiers);
  230. static int __init dsa_init_module(void)
  231. {
  232. int rc;
  233. dsa_owq = alloc_ordered_workqueue("dsa_ordered",
  234. WQ_MEM_RECLAIM);
  235. if (!dsa_owq)
  236. return -ENOMEM;
  237. rc = dsa_slave_register_notifier();
  238. if (rc)
  239. goto register_notifier_fail;
  240. rc = dsa_legacy_register();
  241. if (rc)
  242. goto legacy_register_fail;
  243. dev_add_pack(&dsa_pack_type);
  244. return 0;
  245. legacy_register_fail:
  246. dsa_slave_unregister_notifier();
  247. register_notifier_fail:
  248. destroy_workqueue(dsa_owq);
  249. return rc;
  250. }
  251. module_init(dsa_init_module);
  252. static void __exit dsa_cleanup_module(void)
  253. {
  254. dsa_slave_unregister_notifier();
  255. dev_remove_pack(&dsa_pack_type);
  256. dsa_legacy_unregister();
  257. destroy_workqueue(dsa_owq);
  258. }
  259. module_exit(dsa_cleanup_module);
  260. MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
  261. MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
  262. MODULE_LICENSE("GPL");
  263. MODULE_ALIAS("platform:dsa");