fec_mpc52xx.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092
  1. /*
  2. * Driver for the MPC5200 Fast Ethernet Controller
  3. *
  4. * Originally written by Dale Farnsworth <dfarnsworth@mvista.com> and
  5. * now maintained by Sylvain Munaut <tnt@246tNt.com>
  6. *
  7. * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
  8. * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
  9. * Copyright (C) 2003-2004 MontaVista, Software, Inc.
  10. *
  11. * This file is licensed under the terms of the GNU General Public License
  12. * version 2. This program is licensed "as is" without any warranty of any
  13. * kind, whether express or implied.
  14. *
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/dma-mapping.h>
  18. #include <linux/module.h>
  19. #include <linux/kernel.h>
  20. #include <linux/types.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/slab.h>
  23. #include <linux/errno.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/crc32.h>
  27. #include <linux/hardirq.h>
  28. #include <linux/delay.h>
  29. #include <linux/of_device.h>
  30. #include <linux/of_mdio.h>
  31. #include <linux/of_net.h>
  32. #include <linux/of_platform.h>
  33. #include <linux/netdevice.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/ethtool.h>
  36. #include <linux/skbuff.h>
  37. #include <asm/io.h>
  38. #include <asm/delay.h>
  39. #include <asm/mpc52xx.h>
  40. #include <linux/fsl/bestcomm/bestcomm.h>
  41. #include <linux/fsl/bestcomm/fec.h>
  42. #include "fec_mpc52xx.h"
  43. #define DRIVER_NAME "mpc52xx-fec"
  44. /* Private driver data structure */
  45. struct mpc52xx_fec_priv {
  46. struct net_device *ndev;
  47. int duplex;
  48. int speed;
  49. int r_irq;
  50. int t_irq;
  51. struct mpc52xx_fec __iomem *fec;
  52. struct bcom_task *rx_dmatsk;
  53. struct bcom_task *tx_dmatsk;
  54. spinlock_t lock;
  55. int msg_enable;
  56. /* MDIO link details */
  57. unsigned int mdio_speed;
  58. struct device_node *phy_node;
  59. enum phy_state link;
  60. int seven_wire_mode;
  61. };
  62. static irqreturn_t mpc52xx_fec_interrupt(int, void *);
  63. static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *);
  64. static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *);
  65. static void mpc52xx_fec_stop(struct net_device *dev);
  66. static void mpc52xx_fec_start(struct net_device *dev);
  67. static void mpc52xx_fec_reset(struct net_device *dev);
  68. #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
  69. NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
  70. static int debug = -1; /* the above default */
  71. module_param(debug, int, 0);
  72. MODULE_PARM_DESC(debug, "debugging messages level");
  73. static void mpc52xx_fec_tx_timeout(struct net_device *dev)
  74. {
  75. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  76. unsigned long flags;
  77. dev_warn(&dev->dev, "transmit timed out\n");
  78. spin_lock_irqsave(&priv->lock, flags);
  79. mpc52xx_fec_reset(dev);
  80. dev->stats.tx_errors++;
  81. spin_unlock_irqrestore(&priv->lock, flags);
  82. netif_wake_queue(dev);
  83. }
  84. static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
  85. {
  86. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  87. struct mpc52xx_fec __iomem *fec = priv->fec;
  88. out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
  89. out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
  90. }
  91. static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
  92. {
  93. struct sockaddr *sock = addr;
  94. memcpy(dev->dev_addr, sock->sa_data, dev->addr_len);
  95. mpc52xx_fec_set_paddr(dev, sock->sa_data);
  96. return 0;
  97. }
  98. static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task *s)
  99. {
  100. while (!bcom_queue_empty(s)) {
  101. struct bcom_fec_bd *bd;
  102. struct sk_buff *skb;
  103. skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd);
  104. dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
  105. DMA_FROM_DEVICE);
  106. kfree_skb(skb);
  107. }
  108. }
  109. static void
  110. mpc52xx_fec_rx_submit(struct net_device *dev, struct sk_buff *rskb)
  111. {
  112. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  113. struct bcom_fec_bd *bd;
  114. bd = (struct bcom_fec_bd *) bcom_prepare_next_buffer(priv->rx_dmatsk);
  115. bd->status = FEC_RX_BUFFER_SIZE;
  116. bd->skb_pa = dma_map_single(dev->dev.parent, rskb->data,
  117. FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
  118. bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
  119. }
  120. static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk)
  121. {
  122. struct sk_buff *skb;
  123. while (!bcom_queue_full(rxtsk)) {
  124. skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
  125. if (!skb)
  126. return -EAGAIN;
  127. /* zero out the initial receive buffers to aid debugging */
  128. memset(skb->data, 0, FEC_RX_BUFFER_SIZE);
  129. mpc52xx_fec_rx_submit(dev, skb);
  130. }
  131. return 0;
  132. }
  133. /* based on generic_adjust_link from fs_enet-main.c */
  134. static void mpc52xx_fec_adjust_link(struct net_device *dev)
  135. {
  136. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  137. struct phy_device *phydev = dev->phydev;
  138. int new_state = 0;
  139. if (phydev->link != PHY_DOWN) {
  140. if (phydev->duplex != priv->duplex) {
  141. struct mpc52xx_fec __iomem *fec = priv->fec;
  142. u32 rcntrl;
  143. u32 tcntrl;
  144. new_state = 1;
  145. priv->duplex = phydev->duplex;
  146. rcntrl = in_be32(&fec->r_cntrl);
  147. tcntrl = in_be32(&fec->x_cntrl);
  148. rcntrl &= ~FEC_RCNTRL_DRT;
  149. tcntrl &= ~FEC_TCNTRL_FDEN;
  150. if (phydev->duplex == DUPLEX_FULL)
  151. tcntrl |= FEC_TCNTRL_FDEN; /* FD enable */
  152. else
  153. rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
  154. out_be32(&fec->r_cntrl, rcntrl);
  155. out_be32(&fec->x_cntrl, tcntrl);
  156. }
  157. if (phydev->speed != priv->speed) {
  158. new_state = 1;
  159. priv->speed = phydev->speed;
  160. }
  161. if (priv->link == PHY_DOWN) {
  162. new_state = 1;
  163. priv->link = phydev->link;
  164. }
  165. } else if (priv->link) {
  166. new_state = 1;
  167. priv->link = PHY_DOWN;
  168. priv->speed = 0;
  169. priv->duplex = -1;
  170. }
  171. if (new_state && netif_msg_link(priv))
  172. phy_print_status(phydev);
  173. }
  174. static int mpc52xx_fec_open(struct net_device *dev)
  175. {
  176. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  177. struct phy_device *phydev = NULL;
  178. int err = -EBUSY;
  179. if (priv->phy_node) {
  180. phydev = of_phy_connect(priv->ndev, priv->phy_node,
  181. mpc52xx_fec_adjust_link, 0, 0);
  182. if (!phydev) {
  183. dev_err(&dev->dev, "of_phy_connect failed\n");
  184. return -ENODEV;
  185. }
  186. phy_start(phydev);
  187. }
  188. if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
  189. DRIVER_NAME "_ctrl", dev)) {
  190. dev_err(&dev->dev, "ctrl interrupt request failed\n");
  191. goto free_phy;
  192. }
  193. if (request_irq(priv->r_irq, mpc52xx_fec_rx_interrupt, 0,
  194. DRIVER_NAME "_rx", dev)) {
  195. dev_err(&dev->dev, "rx interrupt request failed\n");
  196. goto free_ctrl_irq;
  197. }
  198. if (request_irq(priv->t_irq, mpc52xx_fec_tx_interrupt, 0,
  199. DRIVER_NAME "_tx", dev)) {
  200. dev_err(&dev->dev, "tx interrupt request failed\n");
  201. goto free_2irqs;
  202. }
  203. bcom_fec_rx_reset(priv->rx_dmatsk);
  204. bcom_fec_tx_reset(priv->tx_dmatsk);
  205. err = mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
  206. if (err) {
  207. dev_err(&dev->dev, "mpc52xx_fec_alloc_rx_buffers failed\n");
  208. goto free_irqs;
  209. }
  210. bcom_enable(priv->rx_dmatsk);
  211. bcom_enable(priv->tx_dmatsk);
  212. mpc52xx_fec_start(dev);
  213. netif_start_queue(dev);
  214. return 0;
  215. free_irqs:
  216. free_irq(priv->t_irq, dev);
  217. free_2irqs:
  218. free_irq(priv->r_irq, dev);
  219. free_ctrl_irq:
  220. free_irq(dev->irq, dev);
  221. free_phy:
  222. if (phydev) {
  223. phy_stop(phydev);
  224. phy_disconnect(phydev);
  225. }
  226. return err;
  227. }
  228. static int mpc52xx_fec_close(struct net_device *dev)
  229. {
  230. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  231. struct phy_device *phydev = dev->phydev;
  232. netif_stop_queue(dev);
  233. mpc52xx_fec_stop(dev);
  234. mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
  235. free_irq(dev->irq, dev);
  236. free_irq(priv->r_irq, dev);
  237. free_irq(priv->t_irq, dev);
  238. if (phydev) {
  239. /* power down phy */
  240. phy_stop(phydev);
  241. phy_disconnect(phydev);
  242. }
  243. return 0;
  244. }
  245. /* This will only be invoked if your driver is _not_ in XOFF state.
  246. * What this means is that you need not check it, and that this
  247. * invariant will hold if you make sure that the netif_*_queue()
  248. * calls are done at the proper times.
  249. */
  250. static netdev_tx_t
  251. mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
  252. {
  253. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  254. struct bcom_fec_bd *bd;
  255. unsigned long flags;
  256. if (bcom_queue_full(priv->tx_dmatsk)) {
  257. if (net_ratelimit())
  258. dev_err(&dev->dev, "transmit queue overrun\n");
  259. return NETDEV_TX_BUSY;
  260. }
  261. spin_lock_irqsave(&priv->lock, flags);
  262. bd = (struct bcom_fec_bd *)
  263. bcom_prepare_next_buffer(priv->tx_dmatsk);
  264. bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC;
  265. bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len,
  266. DMA_TO_DEVICE);
  267. skb_tx_timestamp(skb);
  268. bcom_submit_next_buffer(priv->tx_dmatsk, skb);
  269. spin_unlock_irqrestore(&priv->lock, flags);
  270. if (bcom_queue_full(priv->tx_dmatsk)) {
  271. netif_stop_queue(dev);
  272. }
  273. return NETDEV_TX_OK;
  274. }
  275. #ifdef CONFIG_NET_POLL_CONTROLLER
  276. static void mpc52xx_fec_poll_controller(struct net_device *dev)
  277. {
  278. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  279. disable_irq(priv->t_irq);
  280. mpc52xx_fec_tx_interrupt(priv->t_irq, dev);
  281. enable_irq(priv->t_irq);
  282. disable_irq(priv->r_irq);
  283. mpc52xx_fec_rx_interrupt(priv->r_irq, dev);
  284. enable_irq(priv->r_irq);
  285. }
  286. #endif
  287. /* This handles BestComm transmit task interrupts
  288. */
  289. static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
  290. {
  291. struct net_device *dev = dev_id;
  292. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  293. spin_lock(&priv->lock);
  294. while (bcom_buffer_done(priv->tx_dmatsk)) {
  295. struct sk_buff *skb;
  296. struct bcom_fec_bd *bd;
  297. skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL,
  298. (struct bcom_bd **)&bd);
  299. dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
  300. DMA_TO_DEVICE);
  301. dev_kfree_skb_irq(skb);
  302. }
  303. spin_unlock(&priv->lock);
  304. netif_wake_queue(dev);
  305. return IRQ_HANDLED;
  306. }
  307. static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
  308. {
  309. struct net_device *dev = dev_id;
  310. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  311. struct sk_buff *rskb; /* received sk_buff */
  312. struct sk_buff *skb; /* new sk_buff to enqueue in its place */
  313. struct bcom_fec_bd *bd;
  314. u32 status, physaddr;
  315. int length;
  316. spin_lock(&priv->lock);
  317. while (bcom_buffer_done(priv->rx_dmatsk)) {
  318. rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
  319. (struct bcom_bd **)&bd);
  320. physaddr = bd->skb_pa;
  321. /* Test for errors in received frame */
  322. if (status & BCOM_FEC_RX_BD_ERRORS) {
  323. /* Drop packet and reuse the buffer */
  324. mpc52xx_fec_rx_submit(dev, rskb);
  325. dev->stats.rx_dropped++;
  326. continue;
  327. }
  328. /* skbs are allocated on open, so now we allocate a new one,
  329. * and remove the old (with the packet) */
  330. skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
  331. if (!skb) {
  332. /* Can't get a new one : reuse the same & drop pkt */
  333. dev_notice(&dev->dev, "Low memory - dropped packet.\n");
  334. mpc52xx_fec_rx_submit(dev, rskb);
  335. dev->stats.rx_dropped++;
  336. continue;
  337. }
  338. /* Enqueue the new sk_buff back on the hardware */
  339. mpc52xx_fec_rx_submit(dev, skb);
  340. /* Process the received skb - Drop the spin lock while
  341. * calling into the network stack */
  342. spin_unlock(&priv->lock);
  343. dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
  344. DMA_FROM_DEVICE);
  345. length = status & BCOM_FEC_RX_BD_LEN_MASK;
  346. skb_put(rskb, length - 4); /* length without CRC32 */
  347. rskb->protocol = eth_type_trans(rskb, dev);
  348. if (!skb_defer_rx_timestamp(rskb))
  349. netif_rx(rskb);
  350. spin_lock(&priv->lock);
  351. }
  352. spin_unlock(&priv->lock);
  353. return IRQ_HANDLED;
  354. }
  355. static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
  356. {
  357. struct net_device *dev = dev_id;
  358. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  359. struct mpc52xx_fec __iomem *fec = priv->fec;
  360. u32 ievent;
  361. ievent = in_be32(&fec->ievent);
  362. ievent &= ~FEC_IEVENT_MII; /* mii is handled separately */
  363. if (!ievent)
  364. return IRQ_NONE;
  365. out_be32(&fec->ievent, ievent); /* clear pending events */
  366. /* on fifo error, soft-reset fec */
  367. if (ievent & (FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) {
  368. if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR))
  369. dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n");
  370. if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
  371. dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
  372. spin_lock(&priv->lock);
  373. mpc52xx_fec_reset(dev);
  374. spin_unlock(&priv->lock);
  375. return IRQ_HANDLED;
  376. }
  377. if (ievent & ~FEC_IEVENT_TFINT)
  378. dev_dbg(&dev->dev, "ievent: %08x\n", ievent);
  379. return IRQ_HANDLED;
  380. }
  381. /*
  382. * Get the current statistics.
  383. * This may be called with the card open or closed.
  384. */
  385. static struct net_device_stats *mpc52xx_fec_get_stats(struct net_device *dev)
  386. {
  387. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  388. struct net_device_stats *stats = &dev->stats;
  389. struct mpc52xx_fec __iomem *fec = priv->fec;
  390. stats->rx_bytes = in_be32(&fec->rmon_r_octets);
  391. stats->rx_packets = in_be32(&fec->rmon_r_packets);
  392. stats->rx_errors = in_be32(&fec->rmon_r_crc_align) +
  393. in_be32(&fec->rmon_r_undersize) +
  394. in_be32(&fec->rmon_r_oversize) +
  395. in_be32(&fec->rmon_r_frag) +
  396. in_be32(&fec->rmon_r_jab);
  397. stats->tx_bytes = in_be32(&fec->rmon_t_octets);
  398. stats->tx_packets = in_be32(&fec->rmon_t_packets);
  399. stats->tx_errors = in_be32(&fec->rmon_t_crc_align) +
  400. in_be32(&fec->rmon_t_undersize) +
  401. in_be32(&fec->rmon_t_oversize) +
  402. in_be32(&fec->rmon_t_frag) +
  403. in_be32(&fec->rmon_t_jab);
  404. stats->multicast = in_be32(&fec->rmon_r_mc_pkt);
  405. stats->collisions = in_be32(&fec->rmon_t_col);
  406. /* detailed rx_errors: */
  407. stats->rx_length_errors = in_be32(&fec->rmon_r_undersize)
  408. + in_be32(&fec->rmon_r_oversize)
  409. + in_be32(&fec->rmon_r_frag)
  410. + in_be32(&fec->rmon_r_jab);
  411. stats->rx_over_errors = in_be32(&fec->r_macerr);
  412. stats->rx_crc_errors = in_be32(&fec->ieee_r_crc);
  413. stats->rx_frame_errors = in_be32(&fec->ieee_r_align);
  414. stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop);
  415. stats->rx_missed_errors = in_be32(&fec->rmon_r_drop);
  416. /* detailed tx_errors: */
  417. stats->tx_aborted_errors = 0;
  418. stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr);
  419. stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop);
  420. stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe);
  421. stats->tx_window_errors = in_be32(&fec->ieee_t_lcol);
  422. return stats;
  423. }
  424. /*
  425. * Read MIB counters in order to reset them,
  426. * then zero all the stats fields in memory
  427. */
  428. static void mpc52xx_fec_reset_stats(struct net_device *dev)
  429. {
  430. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  431. struct mpc52xx_fec __iomem *fec = priv->fec;
  432. out_be32(&fec->mib_control, FEC_MIB_DISABLE);
  433. memset_io(&fec->rmon_t_drop, 0,
  434. offsetof(struct mpc52xx_fec, reserved10) -
  435. offsetof(struct mpc52xx_fec, rmon_t_drop));
  436. out_be32(&fec->mib_control, 0);
  437. memset(&dev->stats, 0, sizeof(dev->stats));
  438. }
  439. /*
  440. * Set or clear the multicast filter for this adaptor.
  441. */
  442. static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
  443. {
  444. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  445. struct mpc52xx_fec __iomem *fec = priv->fec;
  446. u32 rx_control;
  447. rx_control = in_be32(&fec->r_cntrl);
  448. if (dev->flags & IFF_PROMISC) {
  449. rx_control |= FEC_RCNTRL_PROM;
  450. out_be32(&fec->r_cntrl, rx_control);
  451. } else {
  452. rx_control &= ~FEC_RCNTRL_PROM;
  453. out_be32(&fec->r_cntrl, rx_control);
  454. if (dev->flags & IFF_ALLMULTI) {
  455. out_be32(&fec->gaddr1, 0xffffffff);
  456. out_be32(&fec->gaddr2, 0xffffffff);
  457. } else {
  458. u32 crc;
  459. struct netdev_hw_addr *ha;
  460. u32 gaddr1 = 0x00000000;
  461. u32 gaddr2 = 0x00000000;
  462. netdev_for_each_mc_addr(ha, dev) {
  463. crc = ether_crc_le(6, ha->addr) >> 26;
  464. if (crc >= 32)
  465. gaddr1 |= 1 << (crc-32);
  466. else
  467. gaddr2 |= 1 << crc;
  468. }
  469. out_be32(&fec->gaddr1, gaddr1);
  470. out_be32(&fec->gaddr2, gaddr2);
  471. }
  472. }
  473. }
  474. /**
  475. * mpc52xx_fec_hw_init
  476. * @dev: network device
  477. *
  478. * Setup various hardware setting, only needed once on start
  479. */
  480. static void mpc52xx_fec_hw_init(struct net_device *dev)
  481. {
  482. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  483. struct mpc52xx_fec __iomem *fec = priv->fec;
  484. int i;
  485. /* Whack a reset. We should wait for this. */
  486. out_be32(&fec->ecntrl, FEC_ECNTRL_RESET);
  487. for (i = 0; i < FEC_RESET_DELAY; ++i) {
  488. if ((in_be32(&fec->ecntrl) & FEC_ECNTRL_RESET) == 0)
  489. break;
  490. udelay(1);
  491. }
  492. if (i == FEC_RESET_DELAY)
  493. dev_err(&dev->dev, "FEC Reset timeout!\n");
  494. /* set pause to 0x20 frames */
  495. out_be32(&fec->op_pause, FEC_OP_PAUSE_OPCODE | 0x20);
  496. /* high service request will be deasserted when there's < 7 bytes in fifo
  497. * low service request will be deasserted when there's < 4*7 bytes in fifo
  498. */
  499. out_be32(&fec->rfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
  500. out_be32(&fec->tfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
  501. /* alarm when <= x bytes in FIFO */
  502. out_be32(&fec->rfifo_alarm, 0x0000030c);
  503. out_be32(&fec->tfifo_alarm, 0x00000100);
  504. /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
  505. out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
  506. /* enable crc generation */
  507. out_be32(&fec->xmit_fsm, FEC_XMIT_FSM_APPEND_CRC | FEC_XMIT_FSM_ENABLE_CRC);
  508. out_be32(&fec->iaddr1, 0x00000000); /* No individual filter */
  509. out_be32(&fec->iaddr2, 0x00000000); /* No individual filter */
  510. /* set phy speed.
  511. * this can't be done in phy driver, since it needs to be called
  512. * before fec stuff (even on resume) */
  513. out_be32(&fec->mii_speed, priv->mdio_speed);
  514. }
  515. /**
  516. * mpc52xx_fec_start
  517. * @dev: network device
  518. *
  519. * This function is called to start or restart the FEC during a link
  520. * change. This happens on fifo errors or when switching between half
  521. * and full duplex.
  522. */
  523. static void mpc52xx_fec_start(struct net_device *dev)
  524. {
  525. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  526. struct mpc52xx_fec __iomem *fec = priv->fec;
  527. u32 rcntrl;
  528. u32 tcntrl;
  529. u32 tmp;
  530. /* clear sticky error bits */
  531. tmp = FEC_FIFO_STATUS_ERR | FEC_FIFO_STATUS_UF | FEC_FIFO_STATUS_OF;
  532. out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & tmp);
  533. out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & tmp);
  534. /* FIFOs will reset on mpc52xx_fec_enable */
  535. out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_ENABLE_IS_RESET);
  536. /* Set station address. */
  537. mpc52xx_fec_set_paddr(dev, dev->dev_addr);
  538. mpc52xx_fec_set_multicast_list(dev);
  539. /* set max frame len, enable flow control, select mii mode */
  540. rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */
  541. rcntrl |= FEC_RCNTRL_FCE;
  542. if (!priv->seven_wire_mode)
  543. rcntrl |= FEC_RCNTRL_MII_MODE;
  544. if (priv->duplex == DUPLEX_FULL)
  545. tcntrl = FEC_TCNTRL_FDEN; /* FD enable */
  546. else {
  547. rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
  548. tcntrl = 0;
  549. }
  550. out_be32(&fec->r_cntrl, rcntrl);
  551. out_be32(&fec->x_cntrl, tcntrl);
  552. /* Clear any outstanding interrupt. */
  553. out_be32(&fec->ievent, 0xffffffff);
  554. /* Enable interrupts we wish to service. */
  555. out_be32(&fec->imask, FEC_IMASK_ENABLE);
  556. /* And last, enable the transmit and receive processing. */
  557. out_be32(&fec->ecntrl, FEC_ECNTRL_ETHER_EN);
  558. out_be32(&fec->r_des_active, 0x01000000);
  559. }
  560. /**
  561. * mpc52xx_fec_stop
  562. * @dev: network device
  563. *
  564. * stop all activity on fec and empty dma buffers
  565. */
  566. static void mpc52xx_fec_stop(struct net_device *dev)
  567. {
  568. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  569. struct mpc52xx_fec __iomem *fec = priv->fec;
  570. unsigned long timeout;
  571. /* disable all interrupts */
  572. out_be32(&fec->imask, 0);
  573. /* Disable the rx task. */
  574. bcom_disable(priv->rx_dmatsk);
  575. /* Wait for tx queue to drain, but only if we're in process context */
  576. if (!in_interrupt()) {
  577. timeout = jiffies + msecs_to_jiffies(2000);
  578. while (time_before(jiffies, timeout) &&
  579. !bcom_queue_empty(priv->tx_dmatsk))
  580. msleep(100);
  581. if (time_after_eq(jiffies, timeout))
  582. dev_err(&dev->dev, "queues didn't drain\n");
  583. #if 1
  584. if (time_after_eq(jiffies, timeout)) {
  585. dev_err(&dev->dev, " tx: index: %i, outdex: %i\n",
  586. priv->tx_dmatsk->index,
  587. priv->tx_dmatsk->outdex);
  588. dev_err(&dev->dev, " rx: index: %i, outdex: %i\n",
  589. priv->rx_dmatsk->index,
  590. priv->rx_dmatsk->outdex);
  591. }
  592. #endif
  593. }
  594. bcom_disable(priv->tx_dmatsk);
  595. /* Stop FEC */
  596. out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN);
  597. }
  598. /* reset fec and bestcomm tasks */
  599. static void mpc52xx_fec_reset(struct net_device *dev)
  600. {
  601. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  602. struct mpc52xx_fec __iomem *fec = priv->fec;
  603. mpc52xx_fec_stop(dev);
  604. out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status));
  605. out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO);
  606. mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
  607. mpc52xx_fec_hw_init(dev);
  608. bcom_fec_rx_reset(priv->rx_dmatsk);
  609. bcom_fec_tx_reset(priv->tx_dmatsk);
  610. mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
  611. bcom_enable(priv->rx_dmatsk);
  612. bcom_enable(priv->tx_dmatsk);
  613. mpc52xx_fec_start(dev);
  614. netif_wake_queue(dev);
  615. }
  616. /* ethtool interface */
  617. static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
  618. {
  619. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  620. return priv->msg_enable;
  621. }
  622. static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
  623. {
  624. struct mpc52xx_fec_priv *priv = netdev_priv(dev);
  625. priv->msg_enable = level;
  626. }
  627. static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
  628. .get_link = ethtool_op_get_link,
  629. .get_msglevel = mpc52xx_fec_get_msglevel,
  630. .set_msglevel = mpc52xx_fec_set_msglevel,
  631. .get_ts_info = ethtool_op_get_ts_info,
  632. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  633. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  634. };
  635. static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  636. {
  637. struct phy_device *phydev = dev->phydev;
  638. if (!phydev)
  639. return -ENOTSUPP;
  640. return phy_mii_ioctl(phydev, rq, cmd);
  641. }
  642. static const struct net_device_ops mpc52xx_fec_netdev_ops = {
  643. .ndo_open = mpc52xx_fec_open,
  644. .ndo_stop = mpc52xx_fec_close,
  645. .ndo_start_xmit = mpc52xx_fec_start_xmit,
  646. .ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
  647. .ndo_set_mac_address = mpc52xx_fec_set_mac_address,
  648. .ndo_validate_addr = eth_validate_addr,
  649. .ndo_do_ioctl = mpc52xx_fec_ioctl,
  650. .ndo_tx_timeout = mpc52xx_fec_tx_timeout,
  651. .ndo_get_stats = mpc52xx_fec_get_stats,
  652. #ifdef CONFIG_NET_POLL_CONTROLLER
  653. .ndo_poll_controller = mpc52xx_fec_poll_controller,
  654. #endif
  655. };
  656. /* ======================================================================== */
  657. /* OF Driver */
  658. /* ======================================================================== */
  659. static int mpc52xx_fec_probe(struct platform_device *op)
  660. {
  661. int rv;
  662. struct net_device *ndev;
  663. struct mpc52xx_fec_priv *priv = NULL;
  664. struct resource mem;
  665. const u32 *prop;
  666. int prop_size;
  667. struct device_node *np = op->dev.of_node;
  668. const char *mac_addr;
  669. phys_addr_t rx_fifo;
  670. phys_addr_t tx_fifo;
  671. /* Get the ether ndev & it's private zone */
  672. ndev = alloc_etherdev(sizeof(struct mpc52xx_fec_priv));
  673. if (!ndev)
  674. return -ENOMEM;
  675. priv = netdev_priv(ndev);
  676. priv->ndev = ndev;
  677. /* Reserve FEC control zone */
  678. rv = of_address_to_resource(np, 0, &mem);
  679. if (rv) {
  680. pr_err("Error while parsing device node resource\n");
  681. goto err_netdev;
  682. }
  683. if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) {
  684. pr_err("invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
  685. (unsigned long)resource_size(&mem),
  686. sizeof(struct mpc52xx_fec));
  687. rv = -EINVAL;
  688. goto err_netdev;
  689. }
  690. if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec),
  691. DRIVER_NAME)) {
  692. rv = -EBUSY;
  693. goto err_netdev;
  694. }
  695. /* Init ether ndev with what we have */
  696. ndev->netdev_ops = &mpc52xx_fec_netdev_ops;
  697. ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops;
  698. ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
  699. ndev->base_addr = mem.start;
  700. SET_NETDEV_DEV(ndev, &op->dev);
  701. spin_lock_init(&priv->lock);
  702. /* ioremap the zones */
  703. priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec));
  704. if (!priv->fec) {
  705. rv = -ENOMEM;
  706. goto err_mem_region;
  707. }
  708. /* Bestcomm init */
  709. rx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, rfifo_data);
  710. tx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, tfifo_data);
  711. priv->rx_dmatsk = bcom_fec_rx_init(FEC_RX_NUM_BD, rx_fifo, FEC_RX_BUFFER_SIZE);
  712. priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo);
  713. if (!priv->rx_dmatsk || !priv->tx_dmatsk) {
  714. pr_err("Can not init SDMA tasks\n");
  715. rv = -ENOMEM;
  716. goto err_rx_tx_dmatsk;
  717. }
  718. /* Get the IRQ we need one by one */
  719. /* Control */
  720. ndev->irq = irq_of_parse_and_map(np, 0);
  721. /* RX */
  722. priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
  723. /* TX */
  724. priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
  725. /*
  726. * MAC address init:
  727. *
  728. * First try to read MAC address from DT
  729. */
  730. mac_addr = of_get_mac_address(np);
  731. if (mac_addr) {
  732. memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
  733. } else {
  734. struct mpc52xx_fec __iomem *fec = priv->fec;
  735. /*
  736. * If the MAC addresse is not provided via DT then read
  737. * it back from the controller regs
  738. */
  739. *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
  740. *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
  741. }
  742. /*
  743. * Check if the MAC address is valid, if not get a random one
  744. */
  745. if (!is_valid_ether_addr(ndev->dev_addr)) {
  746. eth_hw_addr_random(ndev);
  747. dev_warn(&ndev->dev, "using random MAC address %pM\n",
  748. ndev->dev_addr);
  749. }
  750. priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
  751. /*
  752. * Link mode configuration
  753. */
  754. /* Start with safe defaults for link connection */
  755. priv->speed = 100;
  756. priv->duplex = DUPLEX_HALF;
  757. priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
  758. /* The current speed preconfigures the speed of the MII link */
  759. prop = of_get_property(np, "current-speed", &prop_size);
  760. if (prop && (prop_size >= sizeof(u32) * 2)) {
  761. priv->speed = prop[0];
  762. priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
  763. }
  764. /* If there is a phy handle, then get the PHY node */
  765. priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
  766. /* the 7-wire property means don't use MII mode */
  767. if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
  768. priv->seven_wire_mode = 1;
  769. dev_info(&ndev->dev, "using 7-wire PHY mode\n");
  770. }
  771. /* Hardware init */
  772. mpc52xx_fec_hw_init(ndev);
  773. mpc52xx_fec_reset_stats(ndev);
  774. rv = register_netdev(ndev);
  775. if (rv < 0)
  776. goto err_node;
  777. /* We're done ! */
  778. platform_set_drvdata(op, ndev);
  779. netdev_info(ndev, "%pOF MAC %pM\n",
  780. op->dev.of_node, ndev->dev_addr);
  781. return 0;
  782. err_node:
  783. of_node_put(priv->phy_node);
  784. irq_dispose_mapping(ndev->irq);
  785. err_rx_tx_dmatsk:
  786. if (priv->rx_dmatsk)
  787. bcom_fec_rx_release(priv->rx_dmatsk);
  788. if (priv->tx_dmatsk)
  789. bcom_fec_tx_release(priv->tx_dmatsk);
  790. iounmap(priv->fec);
  791. err_mem_region:
  792. release_mem_region(mem.start, sizeof(struct mpc52xx_fec));
  793. err_netdev:
  794. free_netdev(ndev);
  795. return rv;
  796. }
  797. static int
  798. mpc52xx_fec_remove(struct platform_device *op)
  799. {
  800. struct net_device *ndev;
  801. struct mpc52xx_fec_priv *priv;
  802. ndev = platform_get_drvdata(op);
  803. priv = netdev_priv(ndev);
  804. unregister_netdev(ndev);
  805. of_node_put(priv->phy_node);
  806. priv->phy_node = NULL;
  807. irq_dispose_mapping(ndev->irq);
  808. bcom_fec_rx_release(priv->rx_dmatsk);
  809. bcom_fec_tx_release(priv->tx_dmatsk);
  810. iounmap(priv->fec);
  811. release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec));
  812. free_netdev(ndev);
  813. return 0;
  814. }
  815. #ifdef CONFIG_PM
  816. static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state)
  817. {
  818. struct net_device *dev = platform_get_drvdata(op);
  819. if (netif_running(dev))
  820. mpc52xx_fec_close(dev);
  821. return 0;
  822. }
  823. static int mpc52xx_fec_of_resume(struct platform_device *op)
  824. {
  825. struct net_device *dev = platform_get_drvdata(op);
  826. mpc52xx_fec_hw_init(dev);
  827. mpc52xx_fec_reset_stats(dev);
  828. if (netif_running(dev))
  829. mpc52xx_fec_open(dev);
  830. return 0;
  831. }
  832. #endif
  833. static const struct of_device_id mpc52xx_fec_match[] = {
  834. { .compatible = "fsl,mpc5200b-fec", },
  835. { .compatible = "fsl,mpc5200-fec", },
  836. { .compatible = "mpc5200-fec", },
  837. { }
  838. };
  839. MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
  840. static struct platform_driver mpc52xx_fec_driver = {
  841. .driver = {
  842. .name = DRIVER_NAME,
  843. .of_match_table = mpc52xx_fec_match,
  844. },
  845. .probe = mpc52xx_fec_probe,
  846. .remove = mpc52xx_fec_remove,
  847. #ifdef CONFIG_PM
  848. .suspend = mpc52xx_fec_of_suspend,
  849. .resume = mpc52xx_fec_of_resume,
  850. #endif
  851. };
  852. /* ======================================================================== */
  853. /* Module */
  854. /* ======================================================================== */
  855. static struct platform_driver * const drivers[] = {
  856. #ifdef CONFIG_FEC_MPC52xx_MDIO
  857. &mpc52xx_fec_mdio_driver,
  858. #endif
  859. &mpc52xx_fec_driver,
  860. };
  861. static int __init
  862. mpc52xx_fec_init(void)
  863. {
  864. return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  865. }
  866. static void __exit
  867. mpc52xx_fec_exit(void)
  868. {
  869. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  870. }
  871. module_init(mpc52xx_fec_init);
  872. module_exit(mpc52xx_fec_exit);
  873. MODULE_LICENSE("GPL");
  874. MODULE_AUTHOR("Dale Farnsworth");
  875. MODULE_DESCRIPTION("Ethernet driver for the Freescale MPC52xx FEC");