nb8800.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554
  1. /*
  2. * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
  3. *
  4. * Mostly rewritten, based on driver from Sigma Designs. Original
  5. * copyright notice below.
  6. *
  7. *
  8. * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
  9. *
  10. * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. */
  22. #include <linux/module.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/delay.h>
  25. #include <linux/ethtool.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/of_device.h>
  29. #include <linux/of_mdio.h>
  30. #include <linux/of_net.h>
  31. #include <linux/dma-mapping.h>
  32. #include <linux/phy.h>
  33. #include <linux/cache.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/io.h>
  36. #include <linux/iopoll.h>
  37. #include <asm/barrier.h>
  38. #include "nb8800.h"
  39. static void nb8800_tx_done(struct net_device *dev);
  40. static int nb8800_dma_stop(struct net_device *dev);
  41. static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg)
  42. {
  43. return readb_relaxed(priv->base + reg);
  44. }
  45. static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg)
  46. {
  47. return readl_relaxed(priv->base + reg);
  48. }
  49. static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val)
  50. {
  51. writeb_relaxed(val, priv->base + reg);
  52. }
  53. static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val)
  54. {
  55. writew_relaxed(val, priv->base + reg);
  56. }
  57. static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val)
  58. {
  59. writel_relaxed(val, priv->base + reg);
  60. }
  61. static inline void nb8800_maskb(struct nb8800_priv *priv, int reg,
  62. u32 mask, u32 val)
  63. {
  64. u32 old = nb8800_readb(priv, reg);
  65. u32 new = (old & ~mask) | (val & mask);
  66. if (new != old)
  67. nb8800_writeb(priv, reg, new);
  68. }
  69. static inline void nb8800_maskl(struct nb8800_priv *priv, int reg,
  70. u32 mask, u32 val)
  71. {
  72. u32 old = nb8800_readl(priv, reg);
  73. u32 new = (old & ~mask) | (val & mask);
  74. if (new != old)
  75. nb8800_writel(priv, reg, new);
  76. }
  77. static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits,
  78. bool set)
  79. {
  80. nb8800_maskb(priv, reg, bits, set ? bits : 0);
  81. }
  82. static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits)
  83. {
  84. nb8800_maskb(priv, reg, bits, bits);
  85. }
  86. static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits)
  87. {
  88. nb8800_maskb(priv, reg, bits, 0);
  89. }
  90. static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits,
  91. bool set)
  92. {
  93. nb8800_maskl(priv, reg, bits, set ? bits : 0);
  94. }
  95. static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits)
  96. {
  97. nb8800_maskl(priv, reg, bits, bits);
  98. }
  99. static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits)
  100. {
  101. nb8800_maskl(priv, reg, bits, 0);
  102. }
  103. static int nb8800_mdio_wait(struct mii_bus *bus)
  104. {
  105. struct nb8800_priv *priv = bus->priv;
  106. u32 val;
  107. return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD,
  108. val, !(val & MDIO_CMD_GO), 1, 1000);
  109. }
  110. static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd)
  111. {
  112. struct nb8800_priv *priv = bus->priv;
  113. int err;
  114. err = nb8800_mdio_wait(bus);
  115. if (err)
  116. return err;
  117. nb8800_writel(priv, NB8800_MDIO_CMD, cmd);
  118. udelay(10);
  119. nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO);
  120. return nb8800_mdio_wait(bus);
  121. }
  122. static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg)
  123. {
  124. struct nb8800_priv *priv = bus->priv;
  125. u32 val;
  126. int err;
  127. err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg));
  128. if (err)
  129. return err;
  130. val = nb8800_readl(priv, NB8800_MDIO_STS);
  131. if (val & MDIO_STS_ERR)
  132. return 0xffff;
  133. return val & 0xffff;
  134. }
  135. static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
  136. {
  137. u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) |
  138. MDIO_CMD_DATA(val) | MDIO_CMD_WR;
  139. return nb8800_mdio_cmd(bus, cmd);
  140. }
  141. static void nb8800_mac_tx(struct net_device *dev, bool enable)
  142. {
  143. struct nb8800_priv *priv = netdev_priv(dev);
  144. while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN)
  145. cpu_relax();
  146. nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable);
  147. }
  148. static void nb8800_mac_rx(struct net_device *dev, bool enable)
  149. {
  150. nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable);
  151. }
  152. static void nb8800_mac_af(struct net_device *dev, bool enable)
  153. {
  154. nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable);
  155. }
  156. static void nb8800_start_rx(struct net_device *dev)
  157. {
  158. nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN);
  159. }
  160. static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi)
  161. {
  162. struct nb8800_priv *priv = netdev_priv(dev);
  163. struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
  164. struct nb8800_rx_buf *rxb = &priv->rx_bufs[i];
  165. int size = L1_CACHE_ALIGN(RX_BUF_SIZE);
  166. dma_addr_t dma_addr;
  167. struct page *page;
  168. unsigned long offset;
  169. void *data;
  170. data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size);
  171. if (!data)
  172. return -ENOMEM;
  173. page = virt_to_head_page(data);
  174. offset = data - page_address(page);
  175. dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
  176. DMA_FROM_DEVICE);
  177. if (dma_mapping_error(&dev->dev, dma_addr)) {
  178. skb_free_frag(data);
  179. return -ENOMEM;
  180. }
  181. rxb->page = page;
  182. rxb->offset = offset;
  183. rxd->desc.s_addr = dma_addr;
  184. return 0;
  185. }
  186. static void nb8800_receive(struct net_device *dev, unsigned int i,
  187. unsigned int len)
  188. {
  189. struct nb8800_priv *priv = netdev_priv(dev);
  190. struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
  191. struct page *page = priv->rx_bufs[i].page;
  192. int offset = priv->rx_bufs[i].offset;
  193. void *data = page_address(page) + offset;
  194. dma_addr_t dma = rxd->desc.s_addr;
  195. struct sk_buff *skb;
  196. unsigned int size;
  197. int err;
  198. size = len <= RX_COPYBREAK ? len : RX_COPYHDR;
  199. skb = napi_alloc_skb(&priv->napi, size);
  200. if (!skb) {
  201. netdev_err(dev, "rx skb allocation failed\n");
  202. dev->stats.rx_dropped++;
  203. return;
  204. }
  205. if (len <= RX_COPYBREAK) {
  206. dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
  207. memcpy(skb_put(skb, len), data, len);
  208. dma_sync_single_for_device(&dev->dev, dma, len,
  209. DMA_FROM_DEVICE);
  210. } else {
  211. err = nb8800_alloc_rx(dev, i, true);
  212. if (err) {
  213. netdev_err(dev, "rx buffer allocation failed\n");
  214. dev->stats.rx_dropped++;
  215. dev_kfree_skb(skb);
  216. return;
  217. }
  218. dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
  219. memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR);
  220. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
  221. offset + RX_COPYHDR, len - RX_COPYHDR,
  222. RX_BUF_SIZE);
  223. }
  224. skb->protocol = eth_type_trans(skb, dev);
  225. napi_gro_receive(&priv->napi, skb);
  226. }
  227. static void nb8800_rx_error(struct net_device *dev, u32 report)
  228. {
  229. if (report & RX_LENGTH_ERR)
  230. dev->stats.rx_length_errors++;
  231. if (report & RX_FCS_ERR)
  232. dev->stats.rx_crc_errors++;
  233. if (report & RX_FIFO_OVERRUN)
  234. dev->stats.rx_fifo_errors++;
  235. if (report & RX_ALIGNMENT_ERROR)
  236. dev->stats.rx_frame_errors++;
  237. dev->stats.rx_errors++;
  238. }
  239. static int nb8800_poll(struct napi_struct *napi, int budget)
  240. {
  241. struct net_device *dev = napi->dev;
  242. struct nb8800_priv *priv = netdev_priv(dev);
  243. struct nb8800_rx_desc *rxd;
  244. unsigned int last = priv->rx_eoc;
  245. unsigned int next;
  246. int work = 0;
  247. nb8800_tx_done(dev);
  248. again:
  249. do {
  250. struct nb8800_rx_buf *rxb;
  251. unsigned int len;
  252. next = (last + 1) % RX_DESC_COUNT;
  253. rxb = &priv->rx_bufs[next];
  254. rxd = &priv->rx_descs[next];
  255. if (!rxd->report)
  256. break;
  257. len = RX_BYTES_TRANSFERRED(rxd->report);
  258. if (IS_RX_ERROR(rxd->report))
  259. nb8800_rx_error(dev, rxd->report);
  260. else
  261. nb8800_receive(dev, next, len);
  262. dev->stats.rx_packets++;
  263. dev->stats.rx_bytes += len;
  264. if (rxd->report & RX_MULTICAST_PKT)
  265. dev->stats.multicast++;
  266. rxd->report = 0;
  267. last = next;
  268. work++;
  269. } while (work < budget);
  270. if (work) {
  271. priv->rx_descs[last].desc.config |= DESC_EOC;
  272. wmb(); /* ensure new EOC is written before clearing old */
  273. priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC;
  274. priv->rx_eoc = last;
  275. nb8800_start_rx(dev);
  276. }
  277. if (work < budget) {
  278. nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
  279. /* If a packet arrived after we last checked but
  280. * before writing RX_ITR, the interrupt will be
  281. * delayed, so we retrieve it now.
  282. */
  283. if (priv->rx_descs[next].report)
  284. goto again;
  285. napi_complete_done(napi, work);
  286. }
  287. return work;
  288. }
  289. static void __nb8800_tx_dma_start(struct net_device *dev)
  290. {
  291. struct nb8800_priv *priv = netdev_priv(dev);
  292. struct nb8800_tx_buf *txb;
  293. u32 txc_cr;
  294. txb = &priv->tx_bufs[priv->tx_queue];
  295. if (!txb->ready)
  296. return;
  297. txc_cr = nb8800_readl(priv, NB8800_TXC_CR);
  298. if (txc_cr & TCR_EN)
  299. return;
  300. nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
  301. wmb(); /* ensure desc addr is written before starting DMA */
  302. nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN);
  303. priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT;
  304. }
  305. static void nb8800_tx_dma_start(struct net_device *dev)
  306. {
  307. struct nb8800_priv *priv = netdev_priv(dev);
  308. spin_lock_irq(&priv->tx_lock);
  309. __nb8800_tx_dma_start(dev);
  310. spin_unlock_irq(&priv->tx_lock);
  311. }
  312. static void nb8800_tx_dma_start_irq(struct net_device *dev)
  313. {
  314. struct nb8800_priv *priv = netdev_priv(dev);
  315. spin_lock(&priv->tx_lock);
  316. __nb8800_tx_dma_start(dev);
  317. spin_unlock(&priv->tx_lock);
  318. }
  319. static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
  320. {
  321. struct nb8800_priv *priv = netdev_priv(dev);
  322. struct nb8800_tx_desc *txd;
  323. struct nb8800_tx_buf *txb;
  324. struct nb8800_dma_desc *desc;
  325. dma_addr_t dma_addr;
  326. unsigned int dma_len;
  327. unsigned int align;
  328. unsigned int next;
  329. if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
  330. netif_stop_queue(dev);
  331. return NETDEV_TX_BUSY;
  332. }
  333. align = (8 - (uintptr_t)skb->data) & 7;
  334. dma_len = skb->len - align;
  335. dma_addr = dma_map_single(&dev->dev, skb->data + align,
  336. dma_len, DMA_TO_DEVICE);
  337. if (dma_mapping_error(&dev->dev, dma_addr)) {
  338. netdev_err(dev, "tx dma mapping error\n");
  339. kfree_skb(skb);
  340. dev->stats.tx_dropped++;
  341. return NETDEV_TX_OK;
  342. }
  343. if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
  344. netif_stop_queue(dev);
  345. skb->xmit_more = 0;
  346. }
  347. next = priv->tx_next;
  348. txb = &priv->tx_bufs[next];
  349. txd = &priv->tx_descs[next];
  350. desc = &txd->desc[0];
  351. next = (next + 1) % TX_DESC_COUNT;
  352. if (align) {
  353. memcpy(txd->buf, skb->data, align);
  354. desc->s_addr =
  355. txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
  356. desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]);
  357. desc->config = DESC_BTS(2) | DESC_DS | align;
  358. desc++;
  359. }
  360. desc->s_addr = dma_addr;
  361. desc->n_addr = priv->tx_bufs[next].dma_desc;
  362. desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
  363. if (!skb->xmit_more)
  364. desc->config |= DESC_EOC;
  365. txb->skb = skb;
  366. txb->dma_addr = dma_addr;
  367. txb->dma_len = dma_len;
  368. if (!priv->tx_chain) {
  369. txb->chain_len = 1;
  370. priv->tx_chain = txb;
  371. } else {
  372. priv->tx_chain->chain_len++;
  373. }
  374. netdev_sent_queue(dev, skb->len);
  375. priv->tx_next = next;
  376. if (!skb->xmit_more) {
  377. smp_wmb();
  378. priv->tx_chain->ready = true;
  379. priv->tx_chain = NULL;
  380. nb8800_tx_dma_start(dev);
  381. }
  382. return NETDEV_TX_OK;
  383. }
  384. static void nb8800_tx_error(struct net_device *dev, u32 report)
  385. {
  386. if (report & TX_LATE_COLLISION)
  387. dev->stats.collisions++;
  388. if (report & TX_PACKET_DROPPED)
  389. dev->stats.tx_dropped++;
  390. if (report & TX_FIFO_UNDERRUN)
  391. dev->stats.tx_fifo_errors++;
  392. dev->stats.tx_errors++;
  393. }
  394. static void nb8800_tx_done(struct net_device *dev)
  395. {
  396. struct nb8800_priv *priv = netdev_priv(dev);
  397. unsigned int limit = priv->tx_next;
  398. unsigned int done = priv->tx_done;
  399. unsigned int packets = 0;
  400. unsigned int len = 0;
  401. while (done != limit) {
  402. struct nb8800_tx_desc *txd = &priv->tx_descs[done];
  403. struct nb8800_tx_buf *txb = &priv->tx_bufs[done];
  404. struct sk_buff *skb;
  405. if (!txd->report)
  406. break;
  407. skb = txb->skb;
  408. len += skb->len;
  409. dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len,
  410. DMA_TO_DEVICE);
  411. if (IS_TX_ERROR(txd->report)) {
  412. nb8800_tx_error(dev, txd->report);
  413. kfree_skb(skb);
  414. } else {
  415. consume_skb(skb);
  416. }
  417. dev->stats.tx_packets++;
  418. dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report);
  419. dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report);
  420. txb->skb = NULL;
  421. txb->ready = false;
  422. txd->report = 0;
  423. done = (done + 1) % TX_DESC_COUNT;
  424. packets++;
  425. }
  426. if (packets) {
  427. smp_mb__before_atomic();
  428. atomic_add(packets, &priv->tx_free);
  429. netdev_completed_queue(dev, packets, len);
  430. netif_wake_queue(dev);
  431. priv->tx_done = done;
  432. }
  433. }
  434. static irqreturn_t nb8800_irq(int irq, void *dev_id)
  435. {
  436. struct net_device *dev = dev_id;
  437. struct nb8800_priv *priv = netdev_priv(dev);
  438. irqreturn_t ret = IRQ_NONE;
  439. u32 val;
  440. /* tx interrupt */
  441. val = nb8800_readl(priv, NB8800_TXC_SR);
  442. if (val) {
  443. nb8800_writel(priv, NB8800_TXC_SR, val);
  444. if (val & TSR_DI)
  445. nb8800_tx_dma_start_irq(dev);
  446. if (val & TSR_TI)
  447. napi_schedule_irqoff(&priv->napi);
  448. if (unlikely(val & TSR_DE))
  449. netdev_err(dev, "TX DMA error\n");
  450. /* should never happen with automatic status retrieval */
  451. if (unlikely(val & TSR_TO))
  452. netdev_err(dev, "TX Status FIFO overflow\n");
  453. ret = IRQ_HANDLED;
  454. }
  455. /* rx interrupt */
  456. val = nb8800_readl(priv, NB8800_RXC_SR);
  457. if (val) {
  458. nb8800_writel(priv, NB8800_RXC_SR, val);
  459. if (likely(val & (RSR_RI | RSR_DI))) {
  460. nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll);
  461. napi_schedule_irqoff(&priv->napi);
  462. }
  463. if (unlikely(val & RSR_DE))
  464. netdev_err(dev, "RX DMA error\n");
  465. /* should never happen with automatic status retrieval */
  466. if (unlikely(val & RSR_RO))
  467. netdev_err(dev, "RX Status FIFO overflow\n");
  468. ret = IRQ_HANDLED;
  469. }
  470. return ret;
  471. }
  472. static void nb8800_mac_config(struct net_device *dev)
  473. {
  474. struct nb8800_priv *priv = netdev_priv(dev);
  475. bool gigabit = priv->speed == SPEED_1000;
  476. u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE;
  477. u32 mac_mode = 0;
  478. u32 slot_time;
  479. u32 phy_clk;
  480. u32 ict;
  481. if (!priv->duplex)
  482. mac_mode |= HALF_DUPLEX;
  483. if (gigabit) {
  484. if (phy_interface_is_rgmii(dev->phydev))
  485. mac_mode |= RGMII_MODE;
  486. mac_mode |= GMAC_MODE;
  487. phy_clk = 125000000;
  488. /* Should be 512 but register is only 8 bits */
  489. slot_time = 255;
  490. } else {
  491. phy_clk = 25000000;
  492. slot_time = 128;
  493. }
  494. ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk));
  495. nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict);
  496. nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time);
  497. nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode);
  498. }
  499. static void nb8800_pause_config(struct net_device *dev)
  500. {
  501. struct nb8800_priv *priv = netdev_priv(dev);
  502. struct phy_device *phydev = dev->phydev;
  503. u32 rxcr;
  504. if (priv->pause_aneg) {
  505. if (!phydev || !phydev->link)
  506. return;
  507. priv->pause_rx = phydev->pause;
  508. priv->pause_tx = phydev->pause ^ phydev->asym_pause;
  509. }
  510. nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx);
  511. rxcr = nb8800_readl(priv, NB8800_RXC_CR);
  512. if (!!(rxcr & RCR_FL) == priv->pause_tx)
  513. return;
  514. if (netif_running(dev)) {
  515. napi_disable(&priv->napi);
  516. netif_tx_lock_bh(dev);
  517. nb8800_dma_stop(dev);
  518. nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
  519. nb8800_start_rx(dev);
  520. netif_tx_unlock_bh(dev);
  521. napi_enable(&priv->napi);
  522. } else {
  523. nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
  524. }
  525. }
  526. static void nb8800_link_reconfigure(struct net_device *dev)
  527. {
  528. struct nb8800_priv *priv = netdev_priv(dev);
  529. struct phy_device *phydev = dev->phydev;
  530. int change = 0;
  531. if (phydev->link) {
  532. if (phydev->speed != priv->speed) {
  533. priv->speed = phydev->speed;
  534. change = 1;
  535. }
  536. if (phydev->duplex != priv->duplex) {
  537. priv->duplex = phydev->duplex;
  538. change = 1;
  539. }
  540. if (change)
  541. nb8800_mac_config(dev);
  542. nb8800_pause_config(dev);
  543. }
  544. if (phydev->link != priv->link) {
  545. priv->link = phydev->link;
  546. change = 1;
  547. }
  548. if (change)
  549. phy_print_status(phydev);
  550. }
  551. static void nb8800_update_mac_addr(struct net_device *dev)
  552. {
  553. struct nb8800_priv *priv = netdev_priv(dev);
  554. int i;
  555. for (i = 0; i < ETH_ALEN; i++)
  556. nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]);
  557. for (i = 0; i < ETH_ALEN; i++)
  558. nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]);
  559. }
  560. static int nb8800_set_mac_address(struct net_device *dev, void *addr)
  561. {
  562. struct sockaddr *sock = addr;
  563. if (netif_running(dev))
  564. return -EBUSY;
  565. ether_addr_copy(dev->dev_addr, sock->sa_data);
  566. nb8800_update_mac_addr(dev);
  567. return 0;
  568. }
  569. static void nb8800_mc_init(struct net_device *dev, int val)
  570. {
  571. struct nb8800_priv *priv = netdev_priv(dev);
  572. nb8800_writeb(priv, NB8800_MC_INIT, val);
  573. readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val,
  574. 1, 1000);
  575. }
  576. static void nb8800_set_rx_mode(struct net_device *dev)
  577. {
  578. struct nb8800_priv *priv = netdev_priv(dev);
  579. struct netdev_hw_addr *ha;
  580. int i;
  581. if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  582. nb8800_mac_af(dev, false);
  583. return;
  584. }
  585. nb8800_mac_af(dev, true);
  586. nb8800_mc_init(dev, 0);
  587. netdev_for_each_mc_addr(ha, dev) {
  588. for (i = 0; i < ETH_ALEN; i++)
  589. nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]);
  590. nb8800_mc_init(dev, 0xff);
  591. }
  592. }
  593. #define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
  594. #define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
  595. static void nb8800_dma_free(struct net_device *dev)
  596. {
  597. struct nb8800_priv *priv = netdev_priv(dev);
  598. unsigned int i;
  599. if (priv->rx_bufs) {
  600. for (i = 0; i < RX_DESC_COUNT; i++)
  601. if (priv->rx_bufs[i].page)
  602. put_page(priv->rx_bufs[i].page);
  603. kfree(priv->rx_bufs);
  604. priv->rx_bufs = NULL;
  605. }
  606. if (priv->tx_bufs) {
  607. for (i = 0; i < TX_DESC_COUNT; i++)
  608. kfree_skb(priv->tx_bufs[i].skb);
  609. kfree(priv->tx_bufs);
  610. priv->tx_bufs = NULL;
  611. }
  612. if (priv->rx_descs) {
  613. dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs,
  614. priv->rx_desc_dma);
  615. priv->rx_descs = NULL;
  616. }
  617. if (priv->tx_descs) {
  618. dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs,
  619. priv->tx_desc_dma);
  620. priv->tx_descs = NULL;
  621. }
  622. }
  623. static void nb8800_dma_reset(struct net_device *dev)
  624. {
  625. struct nb8800_priv *priv = netdev_priv(dev);
  626. struct nb8800_rx_desc *rxd;
  627. struct nb8800_tx_desc *txd;
  628. unsigned int i;
  629. for (i = 0; i < RX_DESC_COUNT; i++) {
  630. dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd);
  631. rxd = &priv->rx_descs[i];
  632. rxd->desc.n_addr = rx_dma + sizeof(*rxd);
  633. rxd->desc.r_addr =
  634. rx_dma + offsetof(struct nb8800_rx_desc, report);
  635. rxd->desc.config = priv->rx_dma_config;
  636. rxd->report = 0;
  637. }
  638. rxd->desc.n_addr = priv->rx_desc_dma;
  639. rxd->desc.config |= DESC_EOC;
  640. priv->rx_eoc = RX_DESC_COUNT - 1;
  641. for (i = 0; i < TX_DESC_COUNT; i++) {
  642. struct nb8800_tx_buf *txb = &priv->tx_bufs[i];
  643. dma_addr_t r_dma = txb->dma_desc +
  644. offsetof(struct nb8800_tx_desc, report);
  645. txd = &priv->tx_descs[i];
  646. txd->desc[0].r_addr = r_dma;
  647. txd->desc[1].r_addr = r_dma;
  648. txd->report = 0;
  649. }
  650. priv->tx_next = 0;
  651. priv->tx_queue = 0;
  652. priv->tx_done = 0;
  653. atomic_set(&priv->tx_free, TX_DESC_COUNT);
  654. nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma);
  655. wmb(); /* ensure all setup is written before starting */
  656. }
  657. static int nb8800_dma_init(struct net_device *dev)
  658. {
  659. struct nb8800_priv *priv = netdev_priv(dev);
  660. unsigned int n_rx = RX_DESC_COUNT;
  661. unsigned int n_tx = TX_DESC_COUNT;
  662. unsigned int i;
  663. int err;
  664. priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE,
  665. &priv->rx_desc_dma, GFP_KERNEL);
  666. if (!priv->rx_descs)
  667. goto err_out;
  668. priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL);
  669. if (!priv->rx_bufs)
  670. goto err_out;
  671. for (i = 0; i < n_rx; i++) {
  672. err = nb8800_alloc_rx(dev, i, false);
  673. if (err)
  674. goto err_out;
  675. }
  676. priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE,
  677. &priv->tx_desc_dma, GFP_KERNEL);
  678. if (!priv->tx_descs)
  679. goto err_out;
  680. priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL);
  681. if (!priv->tx_bufs)
  682. goto err_out;
  683. for (i = 0; i < n_tx; i++)
  684. priv->tx_bufs[i].dma_desc =
  685. priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc);
  686. nb8800_dma_reset(dev);
  687. return 0;
  688. err_out:
  689. nb8800_dma_free(dev);
  690. return -ENOMEM;
  691. }
  692. static int nb8800_dma_stop(struct net_device *dev)
  693. {
  694. struct nb8800_priv *priv = netdev_priv(dev);
  695. struct nb8800_tx_buf *txb = &priv->tx_bufs[0];
  696. struct nb8800_tx_desc *txd = &priv->tx_descs[0];
  697. int retry = 5;
  698. u32 txcr;
  699. u32 rxcr;
  700. int err;
  701. unsigned int i;
  702. /* wait for tx to finish */
  703. err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr,
  704. !(txcr & TCR_EN) &&
  705. priv->tx_done == priv->tx_next,
  706. 1000, 1000000);
  707. if (err)
  708. return err;
  709. /* The rx DMA only stops if it reaches the end of chain.
  710. * To make this happen, we set the EOC flag on all rx
  711. * descriptors, put the device in loopback mode, and send
  712. * a few dummy frames. The interrupt handler will ignore
  713. * these since NAPI is disabled and no real frames are in
  714. * the tx queue.
  715. */
  716. for (i = 0; i < RX_DESC_COUNT; i++)
  717. priv->rx_descs[i].desc.config |= DESC_EOC;
  718. txd->desc[0].s_addr =
  719. txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
  720. txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8;
  721. memset(txd->buf, 0, sizeof(txd->buf));
  722. nb8800_mac_af(dev, false);
  723. nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
  724. do {
  725. nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
  726. wmb();
  727. nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN);
  728. err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR,
  729. rxcr, !(rxcr & RCR_EN),
  730. 1000, 100000);
  731. } while (err && --retry);
  732. nb8800_mac_af(dev, true);
  733. nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
  734. nb8800_dma_reset(dev);
  735. return retry ? 0 : -ETIMEDOUT;
  736. }
  737. static void nb8800_pause_adv(struct net_device *dev)
  738. {
  739. struct nb8800_priv *priv = netdev_priv(dev);
  740. struct phy_device *phydev = dev->phydev;
  741. u32 adv = 0;
  742. if (!phydev)
  743. return;
  744. if (priv->pause_rx)
  745. adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
  746. if (priv->pause_tx)
  747. adv ^= ADVERTISED_Asym_Pause;
  748. phydev->supported |= adv;
  749. phydev->advertising |= adv;
  750. }
  751. static int nb8800_open(struct net_device *dev)
  752. {
  753. struct nb8800_priv *priv = netdev_priv(dev);
  754. struct phy_device *phydev;
  755. int err;
  756. /* clear any pending interrupts */
  757. nb8800_writel(priv, NB8800_RXC_SR, 0xf);
  758. nb8800_writel(priv, NB8800_TXC_SR, 0xf);
  759. err = nb8800_dma_init(dev);
  760. if (err)
  761. return err;
  762. err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev);
  763. if (err)
  764. goto err_free_dma;
  765. nb8800_mac_rx(dev, true);
  766. nb8800_mac_tx(dev, true);
  767. phydev = of_phy_connect(dev, priv->phy_node,
  768. nb8800_link_reconfigure, 0,
  769. priv->phy_mode);
  770. if (!phydev)
  771. goto err_free_irq;
  772. nb8800_pause_adv(dev);
  773. netdev_reset_queue(dev);
  774. napi_enable(&priv->napi);
  775. netif_start_queue(dev);
  776. nb8800_start_rx(dev);
  777. phy_start(phydev);
  778. return 0;
  779. err_free_irq:
  780. free_irq(dev->irq, dev);
  781. err_free_dma:
  782. nb8800_dma_free(dev);
  783. return err;
  784. }
  785. static int nb8800_stop(struct net_device *dev)
  786. {
  787. struct nb8800_priv *priv = netdev_priv(dev);
  788. struct phy_device *phydev = dev->phydev;
  789. phy_stop(phydev);
  790. netif_stop_queue(dev);
  791. napi_disable(&priv->napi);
  792. nb8800_dma_stop(dev);
  793. nb8800_mac_rx(dev, false);
  794. nb8800_mac_tx(dev, false);
  795. phy_disconnect(phydev);
  796. free_irq(dev->irq, dev);
  797. nb8800_dma_free(dev);
  798. return 0;
  799. }
  800. static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  801. {
  802. return phy_mii_ioctl(dev->phydev, rq, cmd);
  803. }
  804. static const struct net_device_ops nb8800_netdev_ops = {
  805. .ndo_open = nb8800_open,
  806. .ndo_stop = nb8800_stop,
  807. .ndo_start_xmit = nb8800_xmit,
  808. .ndo_set_mac_address = nb8800_set_mac_address,
  809. .ndo_set_rx_mode = nb8800_set_rx_mode,
  810. .ndo_do_ioctl = nb8800_ioctl,
  811. .ndo_change_mtu = eth_change_mtu,
  812. .ndo_validate_addr = eth_validate_addr,
  813. };
  814. static int nb8800_nway_reset(struct net_device *dev)
  815. {
  816. struct phy_device *phydev = dev->phydev;
  817. if (!phydev)
  818. return -ENODEV;
  819. return genphy_restart_aneg(phydev);
  820. }
  821. static void nb8800_get_pauseparam(struct net_device *dev,
  822. struct ethtool_pauseparam *pp)
  823. {
  824. struct nb8800_priv *priv = netdev_priv(dev);
  825. pp->autoneg = priv->pause_aneg;
  826. pp->rx_pause = priv->pause_rx;
  827. pp->tx_pause = priv->pause_tx;
  828. }
  829. static int nb8800_set_pauseparam(struct net_device *dev,
  830. struct ethtool_pauseparam *pp)
  831. {
  832. struct nb8800_priv *priv = netdev_priv(dev);
  833. struct phy_device *phydev = dev->phydev;
  834. priv->pause_aneg = pp->autoneg;
  835. priv->pause_rx = pp->rx_pause;
  836. priv->pause_tx = pp->tx_pause;
  837. nb8800_pause_adv(dev);
  838. if (!priv->pause_aneg)
  839. nb8800_pause_config(dev);
  840. else if (phydev)
  841. phy_start_aneg(phydev);
  842. return 0;
  843. }
  844. static const char nb8800_stats_names[][ETH_GSTRING_LEN] = {
  845. "rx_bytes_ok",
  846. "rx_frames_ok",
  847. "rx_undersize_frames",
  848. "rx_fragment_frames",
  849. "rx_64_byte_frames",
  850. "rx_127_byte_frames",
  851. "rx_255_byte_frames",
  852. "rx_511_byte_frames",
  853. "rx_1023_byte_frames",
  854. "rx_max_size_frames",
  855. "rx_oversize_frames",
  856. "rx_bad_fcs_frames",
  857. "rx_broadcast_frames",
  858. "rx_multicast_frames",
  859. "rx_control_frames",
  860. "rx_pause_frames",
  861. "rx_unsup_control_frames",
  862. "rx_align_error_frames",
  863. "rx_overrun_frames",
  864. "rx_jabber_frames",
  865. "rx_bytes",
  866. "rx_frames",
  867. "tx_bytes_ok",
  868. "tx_frames_ok",
  869. "tx_64_byte_frames",
  870. "tx_127_byte_frames",
  871. "tx_255_byte_frames",
  872. "tx_511_byte_frames",
  873. "tx_1023_byte_frames",
  874. "tx_max_size_frames",
  875. "tx_oversize_frames",
  876. "tx_broadcast_frames",
  877. "tx_multicast_frames",
  878. "tx_control_frames",
  879. "tx_pause_frames",
  880. "tx_underrun_frames",
  881. "tx_single_collision_frames",
  882. "tx_multi_collision_frames",
  883. "tx_deferred_collision_frames",
  884. "tx_late_collision_frames",
  885. "tx_excessive_collision_frames",
  886. "tx_bytes",
  887. "tx_frames",
  888. "tx_collisions",
  889. };
  890. #define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
  891. static int nb8800_get_sset_count(struct net_device *dev, int sset)
  892. {
  893. if (sset == ETH_SS_STATS)
  894. return NB8800_NUM_STATS;
  895. return -EOPNOTSUPP;
  896. }
  897. static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf)
  898. {
  899. if (sset == ETH_SS_STATS)
  900. memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names));
  901. }
  902. static u32 nb8800_read_stat(struct net_device *dev, int index)
  903. {
  904. struct nb8800_priv *priv = netdev_priv(dev);
  905. nb8800_writeb(priv, NB8800_STAT_INDEX, index);
  906. return nb8800_readl(priv, NB8800_STAT_DATA);
  907. }
  908. static void nb8800_get_ethtool_stats(struct net_device *dev,
  909. struct ethtool_stats *estats, u64 *st)
  910. {
  911. unsigned int i;
  912. u32 rx, tx;
  913. for (i = 0; i < NB8800_NUM_STATS / 2; i++) {
  914. rx = nb8800_read_stat(dev, i);
  915. tx = nb8800_read_stat(dev, i | 0x80);
  916. st[i] = rx;
  917. st[i + NB8800_NUM_STATS / 2] = tx;
  918. }
  919. }
  920. static const struct ethtool_ops nb8800_ethtool_ops = {
  921. .nway_reset = nb8800_nway_reset,
  922. .get_link = ethtool_op_get_link,
  923. .get_pauseparam = nb8800_get_pauseparam,
  924. .set_pauseparam = nb8800_set_pauseparam,
  925. .get_sset_count = nb8800_get_sset_count,
  926. .get_strings = nb8800_get_strings,
  927. .get_ethtool_stats = nb8800_get_ethtool_stats,
  928. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  929. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  930. };
  931. static int nb8800_hw_init(struct net_device *dev)
  932. {
  933. struct nb8800_priv *priv = netdev_priv(dev);
  934. u32 val;
  935. val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS;
  936. nb8800_writeb(priv, NB8800_TX_CTL1, val);
  937. /* Collision retry count */
  938. nb8800_writeb(priv, NB8800_TX_CTL2, 5);
  939. val = RX_PAD_STRIP | RX_AF_EN;
  940. nb8800_writeb(priv, NB8800_RX_CTL, val);
  941. /* Chosen by fair dice roll */
  942. nb8800_writeb(priv, NB8800_RANDOM_SEED, 4);
  943. /* TX cycles per deferral period */
  944. nb8800_writeb(priv, NB8800_TX_SDP, 12);
  945. /* The following three threshold values have been
  946. * experimentally determined for good results.
  947. */
  948. /* RX/TX FIFO threshold for partial empty (64-bit entries) */
  949. nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0);
  950. /* RX/TX FIFO threshold for partial full (64-bit entries) */
  951. nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255);
  952. /* Buffer size for transmit (64-bit entries) */
  953. nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64);
  954. /* Configure tx DMA */
  955. val = nb8800_readl(priv, NB8800_TXC_CR);
  956. val &= TCR_LE; /* keep endian setting */
  957. val |= TCR_DM; /* DMA descriptor mode */
  958. val |= TCR_RS; /* automatically store tx status */
  959. val |= TCR_DIE; /* interrupt on DMA chain completion */
  960. val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */
  961. val |= TCR_BTS(2); /* 32-byte bus transaction size */
  962. nb8800_writel(priv, NB8800_TXC_CR, val);
  963. /* TX complete interrupt after 10 ms or 7 frames (see above) */
  964. val = clk_get_rate(priv->clk) / 100;
  965. nb8800_writel(priv, NB8800_TX_ITR, val);
  966. /* Configure rx DMA */
  967. val = nb8800_readl(priv, NB8800_RXC_CR);
  968. val &= RCR_LE; /* keep endian setting */
  969. val |= RCR_DM; /* DMA descriptor mode */
  970. val |= RCR_RS; /* automatically store rx status */
  971. val |= RCR_DIE; /* interrupt at end of DMA chain */
  972. val |= RCR_RFI(7); /* interrupt after 7 frames received */
  973. val |= RCR_BTS(2); /* 32-byte bus transaction size */
  974. nb8800_writel(priv, NB8800_RXC_CR, val);
  975. /* The rx interrupt can fire before the DMA has completed
  976. * unless a small delay is added. 50 us is hopefully enough.
  977. */
  978. priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000;
  979. /* In NAPI poll mode we want to disable interrupts, but the
  980. * hardware does not permit this. Delay 10 ms instead.
  981. */
  982. priv->rx_itr_poll = clk_get_rate(priv->clk) / 100;
  983. nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
  984. priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF;
  985. /* Flow control settings */
  986. /* Pause time of 0.1 ms */
  987. val = 100000 / 512;
  988. nb8800_writeb(priv, NB8800_PQ1, val >> 8);
  989. nb8800_writeb(priv, NB8800_PQ2, val & 0xff);
  990. /* Auto-negotiate by default */
  991. priv->pause_aneg = true;
  992. priv->pause_rx = true;
  993. priv->pause_tx = true;
  994. nb8800_mc_init(dev, 0);
  995. return 0;
  996. }
  997. static int nb8800_tangox_init(struct net_device *dev)
  998. {
  999. struct nb8800_priv *priv = netdev_priv(dev);
  1000. u32 pad_mode = PAD_MODE_MII;
  1001. switch (priv->phy_mode) {
  1002. case PHY_INTERFACE_MODE_MII:
  1003. case PHY_INTERFACE_MODE_GMII:
  1004. pad_mode = PAD_MODE_MII;
  1005. break;
  1006. case PHY_INTERFACE_MODE_RGMII:
  1007. case PHY_INTERFACE_MODE_RGMII_ID:
  1008. case PHY_INTERFACE_MODE_RGMII_RXID:
  1009. case PHY_INTERFACE_MODE_RGMII_TXID:
  1010. pad_mode = PAD_MODE_RGMII;
  1011. break;
  1012. default:
  1013. dev_err(dev->dev.parent, "unsupported phy mode %s\n",
  1014. phy_modes(priv->phy_mode));
  1015. return -EINVAL;
  1016. }
  1017. nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode);
  1018. return 0;
  1019. }
  1020. static int nb8800_tangox_reset(struct net_device *dev)
  1021. {
  1022. struct nb8800_priv *priv = netdev_priv(dev);
  1023. int clk_div;
  1024. nb8800_writeb(priv, NB8800_TANGOX_RESET, 0);
  1025. usleep_range(1000, 10000);
  1026. nb8800_writeb(priv, NB8800_TANGOX_RESET, 1);
  1027. wmb(); /* ensure reset is cleared before proceeding */
  1028. clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK);
  1029. nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div);
  1030. return 0;
  1031. }
  1032. static const struct nb8800_ops nb8800_tangox_ops = {
  1033. .init = nb8800_tangox_init,
  1034. .reset = nb8800_tangox_reset,
  1035. };
  1036. static int nb8800_tango4_init(struct net_device *dev)
  1037. {
  1038. struct nb8800_priv *priv = netdev_priv(dev);
  1039. int err;
  1040. err = nb8800_tangox_init(dev);
  1041. if (err)
  1042. return err;
  1043. /* On tango4 interrupt on DMA completion per frame works and gives
  1044. * better performance despite generating more rx interrupts.
  1045. */
  1046. /* Disable unnecessary interrupt on rx completion */
  1047. nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7));
  1048. /* Request interrupt on descriptor DMA completion */
  1049. priv->rx_dma_config |= DESC_ID;
  1050. return 0;
  1051. }
  1052. static const struct nb8800_ops nb8800_tango4_ops = {
  1053. .init = nb8800_tango4_init,
  1054. .reset = nb8800_tangox_reset,
  1055. };
  1056. static const struct of_device_id nb8800_dt_ids[] = {
  1057. {
  1058. .compatible = "aurora,nb8800",
  1059. },
  1060. {
  1061. .compatible = "sigma,smp8642-ethernet",
  1062. .data = &nb8800_tangox_ops,
  1063. },
  1064. {
  1065. .compatible = "sigma,smp8734-ethernet",
  1066. .data = &nb8800_tango4_ops,
  1067. },
  1068. { }
  1069. };
  1070. MODULE_DEVICE_TABLE(of, nb8800_dt_ids);
  1071. static int nb8800_probe(struct platform_device *pdev)
  1072. {
  1073. const struct of_device_id *match;
  1074. const struct nb8800_ops *ops = NULL;
  1075. struct nb8800_priv *priv;
  1076. struct resource *res;
  1077. struct net_device *dev;
  1078. struct mii_bus *bus;
  1079. const unsigned char *mac;
  1080. void __iomem *base;
  1081. int irq;
  1082. int ret;
  1083. match = of_match_device(nb8800_dt_ids, &pdev->dev);
  1084. if (match)
  1085. ops = match->data;
  1086. irq = platform_get_irq(pdev, 0);
  1087. if (irq <= 0) {
  1088. dev_err(&pdev->dev, "No IRQ\n");
  1089. return -EINVAL;
  1090. }
  1091. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1092. base = devm_ioremap_resource(&pdev->dev, res);
  1093. if (IS_ERR(base))
  1094. return PTR_ERR(base);
  1095. dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start);
  1096. dev = alloc_etherdev(sizeof(*priv));
  1097. if (!dev)
  1098. return -ENOMEM;
  1099. platform_set_drvdata(pdev, dev);
  1100. SET_NETDEV_DEV(dev, &pdev->dev);
  1101. priv = netdev_priv(dev);
  1102. priv->base = base;
  1103. priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
  1104. if (priv->phy_mode < 0)
  1105. priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
  1106. priv->clk = devm_clk_get(&pdev->dev, NULL);
  1107. if (IS_ERR(priv->clk)) {
  1108. dev_err(&pdev->dev, "failed to get clock\n");
  1109. ret = PTR_ERR(priv->clk);
  1110. goto err_free_dev;
  1111. }
  1112. ret = clk_prepare_enable(priv->clk);
  1113. if (ret)
  1114. goto err_free_dev;
  1115. spin_lock_init(&priv->tx_lock);
  1116. if (ops && ops->reset) {
  1117. ret = ops->reset(dev);
  1118. if (ret)
  1119. goto err_disable_clk;
  1120. }
  1121. bus = devm_mdiobus_alloc(&pdev->dev);
  1122. if (!bus) {
  1123. ret = -ENOMEM;
  1124. goto err_disable_clk;
  1125. }
  1126. bus->name = "nb8800-mii";
  1127. bus->read = nb8800_mdio_read;
  1128. bus->write = nb8800_mdio_write;
  1129. bus->parent = &pdev->dev;
  1130. snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii",
  1131. (unsigned long)res->start);
  1132. bus->priv = priv;
  1133. ret = of_mdiobus_register(bus, pdev->dev.of_node);
  1134. if (ret) {
  1135. dev_err(&pdev->dev, "failed to register MII bus\n");
  1136. goto err_disable_clk;
  1137. }
  1138. if (of_phy_is_fixed_link(pdev->dev.of_node)) {
  1139. ret = of_phy_register_fixed_link(pdev->dev.of_node);
  1140. if (ret < 0) {
  1141. dev_err(&pdev->dev, "bad fixed-link spec\n");
  1142. goto err_free_bus;
  1143. }
  1144. priv->phy_node = of_node_get(pdev->dev.of_node);
  1145. }
  1146. if (!priv->phy_node)
  1147. priv->phy_node = of_parse_phandle(pdev->dev.of_node,
  1148. "phy-handle", 0);
  1149. if (!priv->phy_node) {
  1150. dev_err(&pdev->dev, "no PHY specified\n");
  1151. ret = -ENODEV;
  1152. goto err_free_bus;
  1153. }
  1154. priv->mii_bus = bus;
  1155. ret = nb8800_hw_init(dev);
  1156. if (ret)
  1157. goto err_deregister_fixed_link;
  1158. if (ops && ops->init) {
  1159. ret = ops->init(dev);
  1160. if (ret)
  1161. goto err_deregister_fixed_link;
  1162. }
  1163. dev->netdev_ops = &nb8800_netdev_ops;
  1164. dev->ethtool_ops = &nb8800_ethtool_ops;
  1165. dev->flags |= IFF_MULTICAST;
  1166. dev->irq = irq;
  1167. mac = of_get_mac_address(pdev->dev.of_node);
  1168. if (mac)
  1169. ether_addr_copy(dev->dev_addr, mac);
  1170. if (!is_valid_ether_addr(dev->dev_addr))
  1171. eth_hw_addr_random(dev);
  1172. nb8800_update_mac_addr(dev);
  1173. netif_carrier_off(dev);
  1174. ret = register_netdev(dev);
  1175. if (ret) {
  1176. netdev_err(dev, "failed to register netdev\n");
  1177. goto err_free_dma;
  1178. }
  1179. netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT);
  1180. netdev_info(dev, "MAC address %pM\n", dev->dev_addr);
  1181. return 0;
  1182. err_free_dma:
  1183. nb8800_dma_free(dev);
  1184. err_deregister_fixed_link:
  1185. if (of_phy_is_fixed_link(pdev->dev.of_node))
  1186. of_phy_deregister_fixed_link(pdev->dev.of_node);
  1187. err_free_bus:
  1188. of_node_put(priv->phy_node);
  1189. mdiobus_unregister(bus);
  1190. err_disable_clk:
  1191. clk_disable_unprepare(priv->clk);
  1192. err_free_dev:
  1193. free_netdev(dev);
  1194. return ret;
  1195. }
  1196. static int nb8800_remove(struct platform_device *pdev)
  1197. {
  1198. struct net_device *ndev = platform_get_drvdata(pdev);
  1199. struct nb8800_priv *priv = netdev_priv(ndev);
  1200. unregister_netdev(ndev);
  1201. if (of_phy_is_fixed_link(pdev->dev.of_node))
  1202. of_phy_deregister_fixed_link(pdev->dev.of_node);
  1203. of_node_put(priv->phy_node);
  1204. mdiobus_unregister(priv->mii_bus);
  1205. clk_disable_unprepare(priv->clk);
  1206. nb8800_dma_free(ndev);
  1207. free_netdev(ndev);
  1208. return 0;
  1209. }
  1210. static struct platform_driver nb8800_driver = {
  1211. .driver = {
  1212. .name = "nb8800",
  1213. .of_match_table = nb8800_dt_ids,
  1214. },
  1215. .probe = nb8800_probe,
  1216. .remove = nb8800_remove,
  1217. };
  1218. module_platform_driver(nb8800_driver);
  1219. MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
  1220. MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
  1221. MODULE_LICENSE("GPL");