dnet.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Dave DNET Ethernet Controller driver
  4. *
  5. * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
  6. * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
  7. */
  8. #include <linux/io.h>
  9. #include <linux/module.h>
  10. #include <linux/moduleparam.h>
  11. #include <linux/kernel.h>
  12. #include <linux/types.h>
  13. #include <linux/slab.h>
  14. #include <linux/delay.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/phy.h>
  21. #include "dnet.h"
  22. #undef DEBUG
  23. /* function for reading internal MAC register */
  24. static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
  25. {
  26. u16 data_read;
  27. /* issue a read */
  28. dnet_writel(bp, reg, MACREG_ADDR);
  29. /* since a read/write op to the MAC is very slow,
  30. * we must wait before reading the data */
  31. ndelay(500);
  32. /* read data read from the MAC register */
  33. data_read = dnet_readl(bp, MACREG_DATA);
  34. /* all done */
  35. return data_read;
  36. }
  37. /* function for writing internal MAC register */
  38. static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
  39. {
  40. /* load data to write */
  41. dnet_writel(bp, val, MACREG_DATA);
  42. /* issue a write */
  43. dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
  44. /* since a read/write op to the MAC is very slow,
  45. * we must wait before exiting */
  46. ndelay(500);
  47. }
  48. static void __dnet_set_hwaddr(struct dnet *bp)
  49. {
  50. u16 tmp;
  51. tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
  52. dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
  53. tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
  54. dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
  55. tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
  56. dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
  57. }
  58. static void dnet_get_hwaddr(struct dnet *bp)
  59. {
  60. u16 tmp;
  61. u8 addr[6];
  62. /*
  63. * from MAC docs:
  64. * "Note that the MAC address is stored in the registers in Hexadecimal
  65. * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
  66. * would require writing 0xAC (octet 0) to address 0x0B (high byte of
  67. * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
  68. * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
  69. * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
  70. * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
  71. * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
  72. * Mac_addr[15:0]).
  73. */
  74. tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
  75. *((__be16 *)addr) = cpu_to_be16(tmp);
  76. tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
  77. *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
  78. tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
  79. *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
  80. if (is_valid_ether_addr(addr))
  81. memcpy(bp->dev->dev_addr, addr, sizeof(addr));
  82. }
  83. static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  84. {
  85. struct dnet *bp = bus->priv;
  86. u16 value;
  87. while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
  88. & DNET_INTERNAL_GMII_MNG_CMD_FIN))
  89. cpu_relax();
  90. /* only 5 bits allowed for phy-addr and reg_offset */
  91. mii_id &= 0x1f;
  92. regnum &= 0x1f;
  93. /* prepare reg_value for a read */
  94. value = (mii_id << 8);
  95. value |= regnum;
  96. /* write control word */
  97. dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
  98. /* wait for end of transfer */
  99. while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
  100. & DNET_INTERNAL_GMII_MNG_CMD_FIN))
  101. cpu_relax();
  102. value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
  103. pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
  104. return value;
  105. }
  106. static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
  107. u16 value)
  108. {
  109. struct dnet *bp = bus->priv;
  110. u16 tmp;
  111. pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
  112. while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
  113. & DNET_INTERNAL_GMII_MNG_CMD_FIN))
  114. cpu_relax();
  115. /* prepare for a write operation */
  116. tmp = (1 << 13);
  117. /* only 5 bits allowed for phy-addr and reg_offset */
  118. mii_id &= 0x1f;
  119. regnum &= 0x1f;
  120. /* only 16 bits on data */
  121. value &= 0xffff;
  122. /* prepare reg_value for a write */
  123. tmp |= (mii_id << 8);
  124. tmp |= regnum;
  125. /* write data to write first */
  126. dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
  127. /* write control word */
  128. dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
  129. while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
  130. & DNET_INTERNAL_GMII_MNG_CMD_FIN))
  131. cpu_relax();
  132. return 0;
  133. }
  134. static void dnet_handle_link_change(struct net_device *dev)
  135. {
  136. struct dnet *bp = netdev_priv(dev);
  137. struct phy_device *phydev = dev->phydev;
  138. unsigned long flags;
  139. u32 mode_reg, ctl_reg;
  140. int status_change = 0;
  141. spin_lock_irqsave(&bp->lock, flags);
  142. mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
  143. ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
  144. if (phydev->link) {
  145. if (bp->duplex != phydev->duplex) {
  146. if (phydev->duplex)
  147. ctl_reg &=
  148. ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
  149. else
  150. ctl_reg |=
  151. DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
  152. bp->duplex = phydev->duplex;
  153. status_change = 1;
  154. }
  155. if (bp->speed != phydev->speed) {
  156. status_change = 1;
  157. switch (phydev->speed) {
  158. case 1000:
  159. mode_reg |= DNET_INTERNAL_MODE_GBITEN;
  160. break;
  161. case 100:
  162. case 10:
  163. mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
  164. break;
  165. default:
  166. printk(KERN_WARNING
  167. "%s: Ack! Speed (%d) is not "
  168. "10/100/1000!\n", dev->name,
  169. phydev->speed);
  170. break;
  171. }
  172. bp->speed = phydev->speed;
  173. }
  174. }
  175. if (phydev->link != bp->link) {
  176. if (phydev->link) {
  177. mode_reg |=
  178. (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
  179. } else {
  180. mode_reg &=
  181. ~(DNET_INTERNAL_MODE_RXEN |
  182. DNET_INTERNAL_MODE_TXEN);
  183. bp->speed = 0;
  184. bp->duplex = -1;
  185. }
  186. bp->link = phydev->link;
  187. status_change = 1;
  188. }
  189. if (status_change) {
  190. dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
  191. dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
  192. }
  193. spin_unlock_irqrestore(&bp->lock, flags);
  194. if (status_change) {
  195. if (phydev->link)
  196. printk(KERN_INFO "%s: link up (%d/%s)\n",
  197. dev->name, phydev->speed,
  198. DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
  199. else
  200. printk(KERN_INFO "%s: link down\n", dev->name);
  201. }
  202. }
  203. static int dnet_mii_probe(struct net_device *dev)
  204. {
  205. struct dnet *bp = netdev_priv(dev);
  206. struct phy_device *phydev = NULL;
  207. /* find the first phy */
  208. phydev = phy_find_first(bp->mii_bus);
  209. if (!phydev) {
  210. printk(KERN_ERR "%s: no PHY found\n", dev->name);
  211. return -ENODEV;
  212. }
  213. /* TODO : add pin_irq */
  214. /* attach the mac to the phy */
  215. if (bp->capabilities & DNET_HAS_RMII) {
  216. phydev = phy_connect(dev, phydev_name(phydev),
  217. &dnet_handle_link_change,
  218. PHY_INTERFACE_MODE_RMII);
  219. } else {
  220. phydev = phy_connect(dev, phydev_name(phydev),
  221. &dnet_handle_link_change,
  222. PHY_INTERFACE_MODE_MII);
  223. }
  224. if (IS_ERR(phydev)) {
  225. printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
  226. return PTR_ERR(phydev);
  227. }
  228. /* mask with MAC supported features */
  229. if (bp->capabilities & DNET_HAS_GIGABIT)
  230. phy_set_max_speed(phydev, SPEED_1000);
  231. else
  232. phy_set_max_speed(phydev, SPEED_100);
  233. phy_support_asym_pause(phydev);
  234. bp->link = 0;
  235. bp->speed = 0;
  236. bp->duplex = -1;
  237. return 0;
  238. }
  239. static int dnet_mii_init(struct dnet *bp)
  240. {
  241. int err;
  242. bp->mii_bus = mdiobus_alloc();
  243. if (bp->mii_bus == NULL)
  244. return -ENOMEM;
  245. bp->mii_bus->name = "dnet_mii_bus";
  246. bp->mii_bus->read = &dnet_mdio_read;
  247. bp->mii_bus->write = &dnet_mdio_write;
  248. snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
  249. bp->pdev->name, bp->pdev->id);
  250. bp->mii_bus->priv = bp;
  251. if (mdiobus_register(bp->mii_bus)) {
  252. err = -ENXIO;
  253. goto err_out;
  254. }
  255. if (dnet_mii_probe(bp->dev) != 0) {
  256. err = -ENXIO;
  257. goto err_out_unregister_bus;
  258. }
  259. return 0;
  260. err_out_unregister_bus:
  261. mdiobus_unregister(bp->mii_bus);
  262. err_out:
  263. mdiobus_free(bp->mii_bus);
  264. return err;
  265. }
  266. /* For Neptune board: LINK1000 as Link LED and TX as activity LED */
  267. static int dnet_phy_marvell_fixup(struct phy_device *phydev)
  268. {
  269. return phy_write(phydev, 0x18, 0x4148);
  270. }
  271. static void dnet_update_stats(struct dnet *bp)
  272. {
  273. u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
  274. u32 *p = &bp->hw_stats.rx_pkt_ignr;
  275. u32 *end = &bp->hw_stats.rx_byte + 1;
  276. WARN_ON((unsigned long)(end - p - 1) !=
  277. (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
  278. for (; p < end; p++, reg++)
  279. *p += readl(reg);
  280. reg = bp->regs + DNET_TX_UNICAST_CNT;
  281. p = &bp->hw_stats.tx_unicast;
  282. end = &bp->hw_stats.tx_byte + 1;
  283. WARN_ON((unsigned long)(end - p - 1) !=
  284. (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
  285. for (; p < end; p++, reg++)
  286. *p += readl(reg);
  287. }
  288. static int dnet_poll(struct napi_struct *napi, int budget)
  289. {
  290. struct dnet *bp = container_of(napi, struct dnet, napi);
  291. struct net_device *dev = bp->dev;
  292. int npackets = 0;
  293. unsigned int pkt_len;
  294. struct sk_buff *skb;
  295. unsigned int *data_ptr;
  296. u32 int_enable;
  297. u32 cmd_word;
  298. int i;
  299. while (npackets < budget) {
  300. /*
  301. * break out of while loop if there are no more
  302. * packets waiting
  303. */
  304. if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
  305. break;
  306. cmd_word = dnet_readl(bp, RX_LEN_FIFO);
  307. pkt_len = cmd_word & 0xFFFF;
  308. if (cmd_word & 0xDF180000)
  309. printk(KERN_ERR "%s packet receive error %x\n",
  310. __func__, cmd_word);
  311. skb = netdev_alloc_skb(dev, pkt_len + 5);
  312. if (skb != NULL) {
  313. /* Align IP on 16 byte boundaries */
  314. skb_reserve(skb, 2);
  315. /*
  316. * 'skb_put()' points to the start of sk_buff
  317. * data area.
  318. */
  319. data_ptr = skb_put(skb, pkt_len);
  320. for (i = 0; i < (pkt_len + 3) >> 2; i++)
  321. *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
  322. skb->protocol = eth_type_trans(skb, dev);
  323. netif_receive_skb(skb);
  324. npackets++;
  325. } else
  326. printk(KERN_NOTICE
  327. "%s: No memory to allocate a sk_buff of "
  328. "size %u.\n", dev->name, pkt_len);
  329. }
  330. if (npackets < budget) {
  331. /* We processed all packets available. Tell NAPI it can
  332. * stop polling then re-enable rx interrupts.
  333. */
  334. napi_complete_done(napi, npackets);
  335. int_enable = dnet_readl(bp, INTR_ENB);
  336. int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
  337. dnet_writel(bp, int_enable, INTR_ENB);
  338. }
  339. return npackets;
  340. }
  341. static irqreturn_t dnet_interrupt(int irq, void *dev_id)
  342. {
  343. struct net_device *dev = dev_id;
  344. struct dnet *bp = netdev_priv(dev);
  345. u32 int_src, int_enable, int_current;
  346. unsigned long flags;
  347. unsigned int handled = 0;
  348. spin_lock_irqsave(&bp->lock, flags);
  349. /* read and clear the DNET irq (clear on read) */
  350. int_src = dnet_readl(bp, INTR_SRC);
  351. int_enable = dnet_readl(bp, INTR_ENB);
  352. int_current = int_src & int_enable;
  353. /* restart the queue if we had stopped it for TX fifo almost full */
  354. if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
  355. int_enable = dnet_readl(bp, INTR_ENB);
  356. int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
  357. dnet_writel(bp, int_enable, INTR_ENB);
  358. netif_wake_queue(dev);
  359. handled = 1;
  360. }
  361. /* RX FIFO error checking */
  362. if (int_current &
  363. (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
  364. printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
  365. dnet_readl(bp, RX_STATUS), int_current);
  366. /* we can only flush the RX FIFOs */
  367. dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
  368. ndelay(500);
  369. dnet_writel(bp, 0, SYS_CTL);
  370. handled = 1;
  371. }
  372. /* TX FIFO error checking */
  373. if (int_current &
  374. (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
  375. printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
  376. dnet_readl(bp, TX_STATUS), int_current);
  377. /* we can only flush the TX FIFOs */
  378. dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
  379. ndelay(500);
  380. dnet_writel(bp, 0, SYS_CTL);
  381. handled = 1;
  382. }
  383. if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
  384. if (napi_schedule_prep(&bp->napi)) {
  385. /*
  386. * There's no point taking any more interrupts
  387. * until we have processed the buffers
  388. */
  389. /* Disable Rx interrupts and schedule NAPI poll */
  390. int_enable = dnet_readl(bp, INTR_ENB);
  391. int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
  392. dnet_writel(bp, int_enable, INTR_ENB);
  393. __napi_schedule(&bp->napi);
  394. }
  395. handled = 1;
  396. }
  397. if (!handled)
  398. pr_debug("%s: irq %x remains\n", __func__, int_current);
  399. spin_unlock_irqrestore(&bp->lock, flags);
  400. return IRQ_RETVAL(handled);
  401. }
  402. #ifdef DEBUG
  403. static inline void dnet_print_skb(struct sk_buff *skb)
  404. {
  405. int k;
  406. printk(KERN_DEBUG PFX "data:");
  407. for (k = 0; k < skb->len; k++)
  408. printk(" %02x", (unsigned int)skb->data[k]);
  409. printk("\n");
  410. }
  411. #else
  412. #define dnet_print_skb(skb) do {} while (0)
  413. #endif
  414. static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  415. {
  416. struct dnet *bp = netdev_priv(dev);
  417. u32 tx_status, irq_enable;
  418. unsigned int len, i, tx_cmd, wrsz;
  419. unsigned long flags;
  420. unsigned int *bufp;
  421. tx_status = dnet_readl(bp, TX_STATUS);
  422. pr_debug("start_xmit: len %u head %p data %p\n",
  423. skb->len, skb->head, skb->data);
  424. dnet_print_skb(skb);
  425. /* frame size (words) */
  426. len = (skb->len + 3) >> 2;
  427. spin_lock_irqsave(&bp->lock, flags);
  428. tx_status = dnet_readl(bp, TX_STATUS);
  429. bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
  430. wrsz = (u32) skb->len + 3;
  431. wrsz += ((unsigned long) skb->data) & 0x3;
  432. wrsz >>= 2;
  433. tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
  434. /* check if there is enough room for the current frame */
  435. if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
  436. for (i = 0; i < wrsz; i++)
  437. dnet_writel(bp, *bufp++, TX_DATA_FIFO);
  438. /*
  439. * inform MAC that a packet's written and ready to be
  440. * shipped out
  441. */
  442. dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
  443. }
  444. if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
  445. netif_stop_queue(dev);
  446. tx_status = dnet_readl(bp, INTR_SRC);
  447. irq_enable = dnet_readl(bp, INTR_ENB);
  448. irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
  449. dnet_writel(bp, irq_enable, INTR_ENB);
  450. }
  451. skb_tx_timestamp(skb);
  452. /* free the buffer */
  453. dev_kfree_skb(skb);
  454. spin_unlock_irqrestore(&bp->lock, flags);
  455. return NETDEV_TX_OK;
  456. }
  457. static void dnet_reset_hw(struct dnet *bp)
  458. {
  459. /* put ts_mac in IDLE state i.e. disable rx/tx */
  460. dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
  461. /*
  462. * RX FIFO almost full threshold: only cmd FIFO almost full is
  463. * implemented for RX side
  464. */
  465. dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
  466. /*
  467. * TX FIFO almost empty threshold: only data FIFO almost empty
  468. * is implemented for TX side
  469. */
  470. dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
  471. /* flush rx/tx fifos */
  472. dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
  473. SYS_CTL);
  474. msleep(1);
  475. dnet_writel(bp, 0, SYS_CTL);
  476. }
  477. static void dnet_init_hw(struct dnet *bp)
  478. {
  479. u32 config;
  480. dnet_reset_hw(bp);
  481. __dnet_set_hwaddr(bp);
  482. config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
  483. if (bp->dev->flags & IFF_PROMISC)
  484. /* Copy All Frames */
  485. config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
  486. if (!(bp->dev->flags & IFF_BROADCAST))
  487. /* No BroadCast */
  488. config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
  489. config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
  490. DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
  491. DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
  492. DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
  493. dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
  494. /* clear irq before enabling them */
  495. config = dnet_readl(bp, INTR_SRC);
  496. /* enable RX/TX interrupt, recv packet ready interrupt */
  497. dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
  498. DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
  499. DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
  500. DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
  501. DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
  502. }
  503. static int dnet_open(struct net_device *dev)
  504. {
  505. struct dnet *bp = netdev_priv(dev);
  506. /* if the phy is not yet register, retry later */
  507. if (!dev->phydev)
  508. return -EAGAIN;
  509. napi_enable(&bp->napi);
  510. dnet_init_hw(bp);
  511. phy_start_aneg(dev->phydev);
  512. /* schedule a link state check */
  513. phy_start(dev->phydev);
  514. netif_start_queue(dev);
  515. return 0;
  516. }
  517. static int dnet_close(struct net_device *dev)
  518. {
  519. struct dnet *bp = netdev_priv(dev);
  520. netif_stop_queue(dev);
  521. napi_disable(&bp->napi);
  522. if (dev->phydev)
  523. phy_stop(dev->phydev);
  524. dnet_reset_hw(bp);
  525. netif_carrier_off(dev);
  526. return 0;
  527. }
  528. static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
  529. {
  530. pr_debug("%s\n", __func__);
  531. pr_debug("----------------------------- RX statistics "
  532. "-------------------------------\n");
  533. pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
  534. pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
  535. pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
  536. pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
  537. pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
  538. pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
  539. pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
  540. pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
  541. pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
  542. pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
  543. pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
  544. pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
  545. pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
  546. pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
  547. pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
  548. pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
  549. pr_debug("----------------------------- TX statistics "
  550. "-------------------------------\n");
  551. pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
  552. pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
  553. pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
  554. pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
  555. pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
  556. pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
  557. pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
  558. pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
  559. }
  560. static struct net_device_stats *dnet_get_stats(struct net_device *dev)
  561. {
  562. struct dnet *bp = netdev_priv(dev);
  563. struct net_device_stats *nstat = &dev->stats;
  564. struct dnet_stats *hwstat = &bp->hw_stats;
  565. /* read stats from hardware */
  566. dnet_update_stats(bp);
  567. /* Convert HW stats into netdevice stats */
  568. nstat->rx_errors = (hwstat->rx_len_chk_err +
  569. hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
  570. /* ignore IGP violation error
  571. hwstat->rx_ipg_viol + */
  572. hwstat->rx_crc_err +
  573. hwstat->rx_pre_shrink +
  574. hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
  575. nstat->tx_errors = hwstat->tx_bad_fcs;
  576. nstat->rx_length_errors = (hwstat->rx_len_chk_err +
  577. hwstat->rx_lng_frm +
  578. hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
  579. nstat->rx_crc_errors = hwstat->rx_crc_err;
  580. nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
  581. nstat->rx_packets = hwstat->rx_ok_pkt;
  582. nstat->tx_packets = (hwstat->tx_unicast +
  583. hwstat->tx_multicast + hwstat->tx_brdcast);
  584. nstat->rx_bytes = hwstat->rx_byte;
  585. nstat->tx_bytes = hwstat->tx_byte;
  586. nstat->multicast = hwstat->rx_multicast;
  587. nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
  588. dnet_print_pretty_hwstats(hwstat);
  589. return nstat;
  590. }
  591. static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  592. {
  593. struct phy_device *phydev = dev->phydev;
  594. if (!netif_running(dev))
  595. return -EINVAL;
  596. if (!phydev)
  597. return -ENODEV;
  598. return phy_mii_ioctl(phydev, rq, cmd);
  599. }
  600. static void dnet_get_drvinfo(struct net_device *dev,
  601. struct ethtool_drvinfo *info)
  602. {
  603. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  604. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  605. strlcpy(info->bus_info, "0", sizeof(info->bus_info));
  606. }
  607. static const struct ethtool_ops dnet_ethtool_ops = {
  608. .get_drvinfo = dnet_get_drvinfo,
  609. .get_link = ethtool_op_get_link,
  610. .get_ts_info = ethtool_op_get_ts_info,
  611. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  612. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  613. };
  614. static const struct net_device_ops dnet_netdev_ops = {
  615. .ndo_open = dnet_open,
  616. .ndo_stop = dnet_close,
  617. .ndo_get_stats = dnet_get_stats,
  618. .ndo_start_xmit = dnet_start_xmit,
  619. .ndo_do_ioctl = dnet_ioctl,
  620. .ndo_set_mac_address = eth_mac_addr,
  621. .ndo_validate_addr = eth_validate_addr,
  622. };
  623. static int dnet_probe(struct platform_device *pdev)
  624. {
  625. struct resource *res;
  626. struct net_device *dev;
  627. struct dnet *bp;
  628. struct phy_device *phydev;
  629. int err;
  630. unsigned int irq;
  631. irq = platform_get_irq(pdev, 0);
  632. dev = alloc_etherdev(sizeof(*bp));
  633. if (!dev)
  634. return -ENOMEM;
  635. /* TODO: Actually, we have some interesting features... */
  636. dev->features |= 0;
  637. bp = netdev_priv(dev);
  638. bp->dev = dev;
  639. platform_set_drvdata(pdev, dev);
  640. SET_NETDEV_DEV(dev, &pdev->dev);
  641. spin_lock_init(&bp->lock);
  642. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  643. bp->regs = devm_ioremap_resource(&pdev->dev, res);
  644. if (IS_ERR(bp->regs)) {
  645. err = PTR_ERR(bp->regs);
  646. goto err_out_free_dev;
  647. }
  648. dev->irq = irq;
  649. err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
  650. if (err) {
  651. dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
  652. irq, err);
  653. goto err_out_free_dev;
  654. }
  655. dev->netdev_ops = &dnet_netdev_ops;
  656. netif_napi_add(dev, &bp->napi, dnet_poll, 64);
  657. dev->ethtool_ops = &dnet_ethtool_ops;
  658. dev->base_addr = (unsigned long)bp->regs;
  659. bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
  660. dnet_get_hwaddr(bp);
  661. if (!is_valid_ether_addr(dev->dev_addr)) {
  662. /* choose a random ethernet address */
  663. eth_hw_addr_random(dev);
  664. __dnet_set_hwaddr(bp);
  665. }
  666. err = register_netdev(dev);
  667. if (err) {
  668. dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
  669. goto err_out_free_irq;
  670. }
  671. /* register the PHY board fixup (for Marvell 88E1111) */
  672. err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
  673. dnet_phy_marvell_fixup);
  674. /* we can live without it, so just issue a warning */
  675. if (err)
  676. dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
  677. err = dnet_mii_init(bp);
  678. if (err)
  679. goto err_out_unregister_netdev;
  680. dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
  681. bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr);
  682. dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
  683. (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
  684. (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
  685. (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
  686. (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
  687. phydev = dev->phydev;
  688. phy_attached_info(phydev);
  689. return 0;
  690. err_out_unregister_netdev:
  691. unregister_netdev(dev);
  692. err_out_free_irq:
  693. free_irq(dev->irq, dev);
  694. err_out_free_dev:
  695. free_netdev(dev);
  696. return err;
  697. }
  698. static int dnet_remove(struct platform_device *pdev)
  699. {
  700. struct net_device *dev;
  701. struct dnet *bp;
  702. dev = platform_get_drvdata(pdev);
  703. if (dev) {
  704. bp = netdev_priv(dev);
  705. if (dev->phydev)
  706. phy_disconnect(dev->phydev);
  707. mdiobus_unregister(bp->mii_bus);
  708. mdiobus_free(bp->mii_bus);
  709. unregister_netdev(dev);
  710. free_irq(dev->irq, dev);
  711. free_netdev(dev);
  712. }
  713. return 0;
  714. }
  715. static struct platform_driver dnet_driver = {
  716. .probe = dnet_probe,
  717. .remove = dnet_remove,
  718. .driver = {
  719. .name = "dnet",
  720. },
  721. };
  722. module_platform_driver(dnet_driver);
  723. MODULE_LICENSE("GPL");
  724. MODULE_DESCRIPTION("Dave DNET Ethernet driver");
  725. MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
  726. "Matteo Vit <matteo.vit@dave.eu>");