ravb_main.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825
  1. /* Renesas Ethernet AVB device driver
  2. *
  3. * Copyright (C) 2014-2015 Renesas Electronics Corporation
  4. * Copyright (C) 2015 Renesas Solutions Corp.
  5. * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
  6. *
  7. * Based on the SuperH Ethernet driver
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms and conditions of the GNU General Public License version 2,
  11. * as published by the Free Software Foundation.
  12. */
  13. #include <linux/cache.h>
  14. #include <linux/clk.h>
  15. #include <linux/delay.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/err.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/ethtool.h>
  20. #include <linux/if_vlan.h>
  21. #include <linux/kernel.h>
  22. #include <linux/list.h>
  23. #include <linux/module.h>
  24. #include <linux/net_tstamp.h>
  25. #include <linux/of.h>
  26. #include <linux/of_device.h>
  27. #include <linux/of_irq.h>
  28. #include <linux/of_mdio.h>
  29. #include <linux/of_net.h>
  30. #include <linux/pm_runtime.h>
  31. #include <linux/slab.h>
  32. #include <linux/spinlock.h>
  33. #include "ravb.h"
  34. #define RAVB_DEF_MSG_ENABLE \
  35. (NETIF_MSG_LINK | \
  36. NETIF_MSG_TIMER | \
  37. NETIF_MSG_RX_ERR | \
  38. NETIF_MSG_TX_ERR)
  39. int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
  40. {
  41. int i;
  42. for (i = 0; i < 10000; i++) {
  43. if ((ravb_read(ndev, reg) & mask) == value)
  44. return 0;
  45. udelay(10);
  46. }
  47. return -ETIMEDOUT;
  48. }
  49. static int ravb_config(struct net_device *ndev)
  50. {
  51. int error;
  52. /* Set config mode */
  53. ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
  54. CCC);
  55. /* Check if the operating mode is changed to the config mode */
  56. error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
  57. if (error)
  58. netdev_err(ndev, "failed to switch device to config mode\n");
  59. return error;
  60. }
  61. static void ravb_set_duplex(struct net_device *ndev)
  62. {
  63. struct ravb_private *priv = netdev_priv(ndev);
  64. u32 ecmr = ravb_read(ndev, ECMR);
  65. if (priv->duplex) /* Full */
  66. ecmr |= ECMR_DM;
  67. else /* Half */
  68. ecmr &= ~ECMR_DM;
  69. ravb_write(ndev, ecmr, ECMR);
  70. }
  71. static void ravb_set_rate(struct net_device *ndev)
  72. {
  73. struct ravb_private *priv = netdev_priv(ndev);
  74. switch (priv->speed) {
  75. case 100: /* 100BASE */
  76. ravb_write(ndev, GECMR_SPEED_100, GECMR);
  77. break;
  78. case 1000: /* 1000BASE */
  79. ravb_write(ndev, GECMR_SPEED_1000, GECMR);
  80. break;
  81. default:
  82. break;
  83. }
  84. }
  85. static void ravb_set_buffer_align(struct sk_buff *skb)
  86. {
  87. u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
  88. if (reserve)
  89. skb_reserve(skb, RAVB_ALIGN - reserve);
  90. }
  91. /* Get MAC address from the MAC address registers
  92. *
  93. * Ethernet AVB device doesn't have ROM for MAC address.
  94. * This function gets the MAC address that was used by a bootloader.
  95. */
  96. static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
  97. {
  98. if (mac) {
  99. ether_addr_copy(ndev->dev_addr, mac);
  100. } else {
  101. ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24);
  102. ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF;
  103. ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF;
  104. ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF;
  105. ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF;
  106. ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF;
  107. }
  108. }
  109. static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
  110. {
  111. struct ravb_private *priv = container_of(ctrl, struct ravb_private,
  112. mdiobb);
  113. u32 pir = ravb_read(priv->ndev, PIR);
  114. if (set)
  115. pir |= mask;
  116. else
  117. pir &= ~mask;
  118. ravb_write(priv->ndev, pir, PIR);
  119. }
  120. /* MDC pin control */
  121. static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
  122. {
  123. ravb_mdio_ctrl(ctrl, PIR_MDC, level);
  124. }
  125. /* Data I/O pin control */
  126. static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
  127. {
  128. ravb_mdio_ctrl(ctrl, PIR_MMD, output);
  129. }
  130. /* Set data bit */
  131. static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
  132. {
  133. ravb_mdio_ctrl(ctrl, PIR_MDO, value);
  134. }
  135. /* Get data bit */
  136. static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
  137. {
  138. struct ravb_private *priv = container_of(ctrl, struct ravb_private,
  139. mdiobb);
  140. return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
  141. }
  142. /* MDIO bus control struct */
  143. static struct mdiobb_ops bb_ops = {
  144. .owner = THIS_MODULE,
  145. .set_mdc = ravb_set_mdc,
  146. .set_mdio_dir = ravb_set_mdio_dir,
  147. .set_mdio_data = ravb_set_mdio_data,
  148. .get_mdio_data = ravb_get_mdio_data,
  149. };
  150. /* Free skb's and DMA buffers for Ethernet AVB */
  151. static void ravb_ring_free(struct net_device *ndev, int q)
  152. {
  153. struct ravb_private *priv = netdev_priv(ndev);
  154. int ring_size;
  155. int i;
  156. /* Free RX skb ringbuffer */
  157. if (priv->rx_skb[q]) {
  158. for (i = 0; i < priv->num_rx_ring[q]; i++)
  159. dev_kfree_skb(priv->rx_skb[q][i]);
  160. }
  161. kfree(priv->rx_skb[q]);
  162. priv->rx_skb[q] = NULL;
  163. /* Free TX skb ringbuffer */
  164. if (priv->tx_skb[q]) {
  165. for (i = 0; i < priv->num_tx_ring[q]; i++)
  166. dev_kfree_skb(priv->tx_skb[q][i]);
  167. }
  168. kfree(priv->tx_skb[q]);
  169. priv->tx_skb[q] = NULL;
  170. /* Free aligned TX buffers */
  171. if (priv->tx_buffers[q]) {
  172. for (i = 0; i < priv->num_tx_ring[q]; i++)
  173. kfree(priv->tx_buffers[q][i]);
  174. }
  175. kfree(priv->tx_buffers[q]);
  176. priv->tx_buffers[q] = NULL;
  177. if (priv->rx_ring[q]) {
  178. ring_size = sizeof(struct ravb_ex_rx_desc) *
  179. (priv->num_rx_ring[q] + 1);
  180. dma_free_coherent(NULL, ring_size, priv->rx_ring[q],
  181. priv->rx_desc_dma[q]);
  182. priv->rx_ring[q] = NULL;
  183. }
  184. if (priv->tx_ring[q]) {
  185. ring_size = sizeof(struct ravb_tx_desc) *
  186. (priv->num_tx_ring[q] + 1);
  187. dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
  188. priv->tx_desc_dma[q]);
  189. priv->tx_ring[q] = NULL;
  190. }
  191. }
  192. /* Format skb and descriptor buffer for Ethernet AVB */
  193. static void ravb_ring_format(struct net_device *ndev, int q)
  194. {
  195. struct ravb_private *priv = netdev_priv(ndev);
  196. struct ravb_ex_rx_desc *rx_desc = NULL;
  197. struct ravb_tx_desc *tx_desc = NULL;
  198. struct ravb_desc *desc = NULL;
  199. int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
  200. int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
  201. struct sk_buff *skb;
  202. dma_addr_t dma_addr;
  203. void *buffer;
  204. int i;
  205. priv->cur_rx[q] = 0;
  206. priv->cur_tx[q] = 0;
  207. priv->dirty_rx[q] = 0;
  208. priv->dirty_tx[q] = 0;
  209. memset(priv->rx_ring[q], 0, rx_ring_size);
  210. /* Build RX ring buffer */
  211. for (i = 0; i < priv->num_rx_ring[q]; i++) {
  212. priv->rx_skb[q][i] = NULL;
  213. skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
  214. if (!skb)
  215. break;
  216. ravb_set_buffer_align(skb);
  217. /* RX descriptor */
  218. rx_desc = &priv->rx_ring[q][i];
  219. /* The size of the buffer should be on 16-byte boundary. */
  220. rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
  221. dma_addr = dma_map_single(&ndev->dev, skb->data,
  222. ALIGN(PKT_BUF_SZ, 16),
  223. DMA_FROM_DEVICE);
  224. if (dma_mapping_error(&ndev->dev, dma_addr)) {
  225. dev_kfree_skb(skb);
  226. break;
  227. }
  228. priv->rx_skb[q][i] = skb;
  229. rx_desc->dptr = cpu_to_le32(dma_addr);
  230. rx_desc->die_dt = DT_FEMPTY;
  231. }
  232. rx_desc = &priv->rx_ring[q][i];
  233. rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
  234. rx_desc->die_dt = DT_LINKFIX; /* type */
  235. priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
  236. memset(priv->tx_ring[q], 0, tx_ring_size);
  237. /* Build TX ring buffer */
  238. for (i = 0; i < priv->num_tx_ring[q]; i++) {
  239. priv->tx_skb[q][i] = NULL;
  240. priv->tx_buffers[q][i] = NULL;
  241. buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
  242. if (!buffer)
  243. break;
  244. /* Aligned TX buffer */
  245. priv->tx_buffers[q][i] = buffer;
  246. tx_desc = &priv->tx_ring[q][i];
  247. tx_desc->die_dt = DT_EEMPTY;
  248. }
  249. tx_desc = &priv->tx_ring[q][i];
  250. tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
  251. tx_desc->die_dt = DT_LINKFIX; /* type */
  252. /* RX descriptor base address for best effort */
  253. desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
  254. desc->die_dt = DT_LINKFIX; /* type */
  255. desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
  256. /* TX descriptor base address for best effort */
  257. desc = &priv->desc_bat[q];
  258. desc->die_dt = DT_LINKFIX; /* type */
  259. desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
  260. }
  261. /* Init skb and descriptor buffer for Ethernet AVB */
  262. static int ravb_ring_init(struct net_device *ndev, int q)
  263. {
  264. struct ravb_private *priv = netdev_priv(ndev);
  265. int ring_size;
  266. /* Allocate RX and TX skb rings */
  267. priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
  268. sizeof(*priv->rx_skb[q]), GFP_KERNEL);
  269. priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
  270. sizeof(*priv->tx_skb[q]), GFP_KERNEL);
  271. if (!priv->rx_skb[q] || !priv->tx_skb[q])
  272. goto error;
  273. /* Allocate rings for the aligned buffers */
  274. priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
  275. sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
  276. if (!priv->tx_buffers[q])
  277. goto error;
  278. /* Allocate all RX descriptors. */
  279. ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
  280. priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
  281. &priv->rx_desc_dma[q],
  282. GFP_KERNEL);
  283. if (!priv->rx_ring[q])
  284. goto error;
  285. priv->dirty_rx[q] = 0;
  286. /* Allocate all TX descriptors. */
  287. ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
  288. priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
  289. &priv->tx_desc_dma[q],
  290. GFP_KERNEL);
  291. if (!priv->tx_ring[q])
  292. goto error;
  293. return 0;
  294. error:
  295. ravb_ring_free(ndev, q);
  296. return -ENOMEM;
  297. }
  298. /* E-MAC init function */
  299. static void ravb_emac_init(struct net_device *ndev)
  300. {
  301. struct ravb_private *priv = netdev_priv(ndev);
  302. u32 ecmr;
  303. /* Receive frame limit set register */
  304. ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
  305. /* PAUSE prohibition */
  306. ecmr = ravb_read(ndev, ECMR);
  307. ecmr &= ECMR_DM;
  308. ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
  309. ravb_write(ndev, ecmr, ECMR);
  310. ravb_set_rate(ndev);
  311. /* Set MAC address */
  312. ravb_write(ndev,
  313. (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
  314. (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
  315. ravb_write(ndev,
  316. (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
  317. ravb_write(ndev, 1, MPR);
  318. /* E-MAC status register clear */
  319. ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
  320. /* E-MAC interrupt enable register */
  321. ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
  322. }
  323. /* Device init function for Ethernet AVB */
  324. static int ravb_dmac_init(struct net_device *ndev)
  325. {
  326. int error;
  327. /* Set CONFIG mode */
  328. error = ravb_config(ndev);
  329. if (error)
  330. return error;
  331. error = ravb_ring_init(ndev, RAVB_BE);
  332. if (error)
  333. return error;
  334. error = ravb_ring_init(ndev, RAVB_NC);
  335. if (error) {
  336. ravb_ring_free(ndev, RAVB_BE);
  337. return error;
  338. }
  339. /* Descriptor format */
  340. ravb_ring_format(ndev, RAVB_BE);
  341. ravb_ring_format(ndev, RAVB_NC);
  342. #if defined(__LITTLE_ENDIAN)
  343. ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
  344. #else
  345. ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
  346. #endif
  347. /* Set AVB RX */
  348. ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
  349. /* Set FIFO size */
  350. ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
  351. /* Timestamp enable */
  352. ravb_write(ndev, TCCR_TFEN, TCCR);
  353. /* Interrupt enable: */
  354. /* Frame receive */
  355. ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
  356. /* Receive FIFO full warning */
  357. ravb_write(ndev, RIC1_RFWE, RIC1);
  358. /* Receive FIFO full error, descriptor empty */
  359. ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
  360. /* Frame transmitted, timestamp FIFO updated */
  361. ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
  362. /* Setting the control will start the AVB-DMAC process. */
  363. ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
  364. CCC);
  365. return 0;
  366. }
  367. /* Free TX skb function for AVB-IP */
  368. static int ravb_tx_free(struct net_device *ndev, int q)
  369. {
  370. struct ravb_private *priv = netdev_priv(ndev);
  371. struct net_device_stats *stats = &priv->stats[q];
  372. struct ravb_tx_desc *desc;
  373. int free_num = 0;
  374. int entry = 0;
  375. u32 size;
  376. for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
  377. entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
  378. desc = &priv->tx_ring[q][entry];
  379. if (desc->die_dt != DT_FEMPTY)
  380. break;
  381. /* Descriptor type must be checked before all other reads */
  382. dma_rmb();
  383. size = le16_to_cpu(desc->ds_tagl) & TX_DS;
  384. /* Free the original skb. */
  385. if (priv->tx_skb[q][entry]) {
  386. dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
  387. size, DMA_TO_DEVICE);
  388. dev_kfree_skb_any(priv->tx_skb[q][entry]);
  389. priv->tx_skb[q][entry] = NULL;
  390. free_num++;
  391. }
  392. stats->tx_packets++;
  393. stats->tx_bytes += size;
  394. desc->die_dt = DT_EEMPTY;
  395. }
  396. return free_num;
  397. }
  398. static void ravb_get_tx_tstamp(struct net_device *ndev)
  399. {
  400. struct ravb_private *priv = netdev_priv(ndev);
  401. struct ravb_tstamp_skb *ts_skb, *ts_skb2;
  402. struct skb_shared_hwtstamps shhwtstamps;
  403. struct sk_buff *skb;
  404. struct timespec64 ts;
  405. u16 tag, tfa_tag;
  406. int count;
  407. u32 tfa2;
  408. count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
  409. while (count--) {
  410. tfa2 = ravb_read(ndev, TFA2);
  411. tfa_tag = (tfa2 & TFA2_TST) >> 16;
  412. ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
  413. ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
  414. ravb_read(ndev, TFA1);
  415. memset(&shhwtstamps, 0, sizeof(shhwtstamps));
  416. shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
  417. list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
  418. list) {
  419. skb = ts_skb->skb;
  420. tag = ts_skb->tag;
  421. list_del(&ts_skb->list);
  422. kfree(ts_skb);
  423. if (tag == tfa_tag) {
  424. skb_tstamp_tx(skb, &shhwtstamps);
  425. break;
  426. }
  427. }
  428. ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
  429. }
  430. }
  431. /* Packet receive function for Ethernet AVB */
  432. static bool ravb_rx(struct net_device *ndev, int *quota, int q)
  433. {
  434. struct ravb_private *priv = netdev_priv(ndev);
  435. int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
  436. int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
  437. priv->cur_rx[q];
  438. struct net_device_stats *stats = &priv->stats[q];
  439. struct ravb_ex_rx_desc *desc;
  440. struct sk_buff *skb;
  441. dma_addr_t dma_addr;
  442. struct timespec64 ts;
  443. u16 pkt_len = 0;
  444. u8 desc_status;
  445. int limit;
  446. boguscnt = min(boguscnt, *quota);
  447. limit = boguscnt;
  448. desc = &priv->rx_ring[q][entry];
  449. while (desc->die_dt != DT_FEMPTY) {
  450. /* Descriptor type must be checked before all other reads */
  451. dma_rmb();
  452. desc_status = desc->msc;
  453. pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
  454. if (--boguscnt < 0)
  455. break;
  456. if (desc_status & MSC_MC)
  457. stats->multicast++;
  458. if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
  459. MSC_CEEF)) {
  460. stats->rx_errors++;
  461. if (desc_status & MSC_CRC)
  462. stats->rx_crc_errors++;
  463. if (desc_status & MSC_RFE)
  464. stats->rx_frame_errors++;
  465. if (desc_status & (MSC_RTLF | MSC_RTSF))
  466. stats->rx_length_errors++;
  467. if (desc_status & MSC_CEEF)
  468. stats->rx_missed_errors++;
  469. } else {
  470. u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
  471. skb = priv->rx_skb[q][entry];
  472. priv->rx_skb[q][entry] = NULL;
  473. dma_sync_single_for_cpu(&ndev->dev,
  474. le32_to_cpu(desc->dptr),
  475. ALIGN(PKT_BUF_SZ, 16),
  476. DMA_FROM_DEVICE);
  477. get_ts &= (q == RAVB_NC) ?
  478. RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
  479. ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
  480. if (get_ts) {
  481. struct skb_shared_hwtstamps *shhwtstamps;
  482. shhwtstamps = skb_hwtstamps(skb);
  483. memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  484. ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
  485. 32) | le32_to_cpu(desc->ts_sl);
  486. ts.tv_nsec = le32_to_cpu(desc->ts_n);
  487. shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
  488. }
  489. skb_put(skb, pkt_len);
  490. skb->protocol = eth_type_trans(skb, ndev);
  491. napi_gro_receive(&priv->napi[q], skb);
  492. stats->rx_packets++;
  493. stats->rx_bytes += pkt_len;
  494. }
  495. entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
  496. desc = &priv->rx_ring[q][entry];
  497. }
  498. /* Refill the RX ring buffers. */
  499. for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
  500. entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
  501. desc = &priv->rx_ring[q][entry];
  502. /* The size of the buffer should be on 16-byte boundary. */
  503. desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
  504. if (!priv->rx_skb[q][entry]) {
  505. skb = netdev_alloc_skb(ndev,
  506. PKT_BUF_SZ + RAVB_ALIGN - 1);
  507. if (!skb)
  508. break; /* Better luck next round. */
  509. ravb_set_buffer_align(skb);
  510. dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
  511. ALIGN(PKT_BUF_SZ, 16),
  512. DMA_FROM_DEVICE);
  513. dma_addr = dma_map_single(&ndev->dev, skb->data,
  514. le16_to_cpu(desc->ds_cc),
  515. DMA_FROM_DEVICE);
  516. skb_checksum_none_assert(skb);
  517. if (dma_mapping_error(&ndev->dev, dma_addr)) {
  518. dev_kfree_skb_any(skb);
  519. break;
  520. }
  521. desc->dptr = cpu_to_le32(dma_addr);
  522. priv->rx_skb[q][entry] = skb;
  523. }
  524. /* Descriptor type must be set after all the above writes */
  525. dma_wmb();
  526. desc->die_dt = DT_FEMPTY;
  527. }
  528. *quota -= limit - (++boguscnt);
  529. return boguscnt <= 0;
  530. }
  531. static void ravb_rcv_snd_disable(struct net_device *ndev)
  532. {
  533. /* Disable TX and RX */
  534. ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
  535. }
  536. static void ravb_rcv_snd_enable(struct net_device *ndev)
  537. {
  538. /* Enable TX and RX */
  539. ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
  540. }
  541. /* function for waiting dma process finished */
  542. static int ravb_stop_dma(struct net_device *ndev)
  543. {
  544. int error;
  545. /* Wait for stopping the hardware TX process */
  546. error = ravb_wait(ndev, TCCR,
  547. TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
  548. if (error)
  549. return error;
  550. error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
  551. 0);
  552. if (error)
  553. return error;
  554. /* Stop the E-MAC's RX/TX processes. */
  555. ravb_rcv_snd_disable(ndev);
  556. /* Wait for stopping the RX DMA process */
  557. error = ravb_wait(ndev, CSR, CSR_RPO, 0);
  558. if (error)
  559. return error;
  560. /* Stop AVB-DMAC process */
  561. return ravb_config(ndev);
  562. }
  563. /* E-MAC interrupt handler */
  564. static void ravb_emac_interrupt(struct net_device *ndev)
  565. {
  566. struct ravb_private *priv = netdev_priv(ndev);
  567. u32 ecsr, psr;
  568. ecsr = ravb_read(ndev, ECSR);
  569. ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
  570. if (ecsr & ECSR_ICD)
  571. ndev->stats.tx_carrier_errors++;
  572. if (ecsr & ECSR_LCHNG) {
  573. /* Link changed */
  574. if (priv->no_avb_link)
  575. return;
  576. psr = ravb_read(ndev, PSR);
  577. if (priv->avb_link_active_low)
  578. psr ^= PSR_LMON;
  579. if (!(psr & PSR_LMON)) {
  580. /* DIsable RX and TX */
  581. ravb_rcv_snd_disable(ndev);
  582. } else {
  583. /* Enable RX and TX */
  584. ravb_rcv_snd_enable(ndev);
  585. }
  586. }
  587. }
  588. /* Error interrupt handler */
  589. static void ravb_error_interrupt(struct net_device *ndev)
  590. {
  591. struct ravb_private *priv = netdev_priv(ndev);
  592. u32 eis, ris2;
  593. eis = ravb_read(ndev, EIS);
  594. ravb_write(ndev, ~EIS_QFS, EIS);
  595. if (eis & EIS_QFS) {
  596. ris2 = ravb_read(ndev, RIS2);
  597. ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
  598. /* Receive Descriptor Empty int */
  599. if (ris2 & RIS2_QFF0)
  600. priv->stats[RAVB_BE].rx_over_errors++;
  601. /* Receive Descriptor Empty int */
  602. if (ris2 & RIS2_QFF1)
  603. priv->stats[RAVB_NC].rx_over_errors++;
  604. /* Receive FIFO Overflow int */
  605. if (ris2 & RIS2_RFFF)
  606. priv->rx_fifo_errors++;
  607. }
  608. }
  609. static irqreturn_t ravb_interrupt(int irq, void *dev_id)
  610. {
  611. struct net_device *ndev = dev_id;
  612. struct ravb_private *priv = netdev_priv(ndev);
  613. irqreturn_t result = IRQ_NONE;
  614. u32 iss;
  615. spin_lock(&priv->lock);
  616. /* Get interrupt status */
  617. iss = ravb_read(ndev, ISS);
  618. /* Received and transmitted interrupts */
  619. if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
  620. u32 ris0 = ravb_read(ndev, RIS0);
  621. u32 ric0 = ravb_read(ndev, RIC0);
  622. u32 tis = ravb_read(ndev, TIS);
  623. u32 tic = ravb_read(ndev, TIC);
  624. int q;
  625. /* Timestamp updated */
  626. if (tis & TIS_TFUF) {
  627. ravb_write(ndev, ~TIS_TFUF, TIS);
  628. ravb_get_tx_tstamp(ndev);
  629. result = IRQ_HANDLED;
  630. }
  631. /* Network control and best effort queue RX/TX */
  632. for (q = RAVB_NC; q >= RAVB_BE; q--) {
  633. if (((ris0 & ric0) & BIT(q)) ||
  634. ((tis & tic) & BIT(q))) {
  635. if (napi_schedule_prep(&priv->napi[q])) {
  636. /* Mask RX and TX interrupts */
  637. ravb_write(ndev, ric0 & ~BIT(q), RIC0);
  638. ravb_write(ndev, tic & ~BIT(q), TIC);
  639. __napi_schedule(&priv->napi[q]);
  640. } else {
  641. netdev_warn(ndev,
  642. "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
  643. ris0, ric0);
  644. netdev_warn(ndev,
  645. " tx status 0x%08x, tx mask 0x%08x.\n",
  646. tis, tic);
  647. }
  648. result = IRQ_HANDLED;
  649. }
  650. }
  651. }
  652. /* E-MAC status summary */
  653. if (iss & ISS_MS) {
  654. ravb_emac_interrupt(ndev);
  655. result = IRQ_HANDLED;
  656. }
  657. /* Error status summary */
  658. if (iss & ISS_ES) {
  659. ravb_error_interrupt(ndev);
  660. result = IRQ_HANDLED;
  661. }
  662. if (iss & ISS_CGIS)
  663. result = ravb_ptp_interrupt(ndev);
  664. mmiowb();
  665. spin_unlock(&priv->lock);
  666. return result;
  667. }
  668. static int ravb_poll(struct napi_struct *napi, int budget)
  669. {
  670. struct net_device *ndev = napi->dev;
  671. struct ravb_private *priv = netdev_priv(ndev);
  672. unsigned long flags;
  673. int q = napi - priv->napi;
  674. int mask = BIT(q);
  675. int quota = budget;
  676. u32 ris0, tis;
  677. for (;;) {
  678. tis = ravb_read(ndev, TIS);
  679. ris0 = ravb_read(ndev, RIS0);
  680. if (!((ris0 & mask) || (tis & mask)))
  681. break;
  682. /* Processing RX Descriptor Ring */
  683. if (ris0 & mask) {
  684. /* Clear RX interrupt */
  685. ravb_write(ndev, ~mask, RIS0);
  686. if (ravb_rx(ndev, &quota, q))
  687. goto out;
  688. }
  689. /* Processing TX Descriptor Ring */
  690. if (tis & mask) {
  691. spin_lock_irqsave(&priv->lock, flags);
  692. /* Clear TX interrupt */
  693. ravb_write(ndev, ~mask, TIS);
  694. ravb_tx_free(ndev, q);
  695. netif_wake_subqueue(ndev, q);
  696. mmiowb();
  697. spin_unlock_irqrestore(&priv->lock, flags);
  698. }
  699. }
  700. napi_complete(napi);
  701. /* Re-enable RX/TX interrupts */
  702. spin_lock_irqsave(&priv->lock, flags);
  703. ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
  704. ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC);
  705. mmiowb();
  706. spin_unlock_irqrestore(&priv->lock, flags);
  707. /* Receive error message handling */
  708. priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
  709. priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
  710. if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
  711. ndev->stats.rx_over_errors = priv->rx_over_errors;
  712. netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
  713. }
  714. if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
  715. ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
  716. netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
  717. }
  718. out:
  719. return budget - quota;
  720. }
  721. /* PHY state control function */
  722. static void ravb_adjust_link(struct net_device *ndev)
  723. {
  724. struct ravb_private *priv = netdev_priv(ndev);
  725. struct phy_device *phydev = priv->phydev;
  726. bool new_state = false;
  727. if (phydev->link) {
  728. if (phydev->duplex != priv->duplex) {
  729. new_state = true;
  730. priv->duplex = phydev->duplex;
  731. ravb_set_duplex(ndev);
  732. }
  733. if (phydev->speed != priv->speed) {
  734. new_state = true;
  735. priv->speed = phydev->speed;
  736. ravb_set_rate(ndev);
  737. }
  738. if (!priv->link) {
  739. ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
  740. ECMR);
  741. new_state = true;
  742. priv->link = phydev->link;
  743. if (priv->no_avb_link)
  744. ravb_rcv_snd_enable(ndev);
  745. }
  746. } else if (priv->link) {
  747. new_state = true;
  748. priv->link = 0;
  749. priv->speed = 0;
  750. priv->duplex = -1;
  751. if (priv->no_avb_link)
  752. ravb_rcv_snd_disable(ndev);
  753. }
  754. if (new_state && netif_msg_link(priv))
  755. phy_print_status(phydev);
  756. }
  757. /* PHY init function */
  758. static int ravb_phy_init(struct net_device *ndev)
  759. {
  760. struct device_node *np = ndev->dev.parent->of_node;
  761. struct ravb_private *priv = netdev_priv(ndev);
  762. struct phy_device *phydev;
  763. struct device_node *pn;
  764. priv->link = 0;
  765. priv->speed = 0;
  766. priv->duplex = -1;
  767. /* Try connecting to PHY */
  768. pn = of_parse_phandle(np, "phy-handle", 0);
  769. phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
  770. priv->phy_interface);
  771. if (!phydev) {
  772. netdev_err(ndev, "failed to connect PHY\n");
  773. return -ENOENT;
  774. }
  775. netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
  776. phydev->addr, phydev->irq, phydev->drv->name);
  777. priv->phydev = phydev;
  778. return 0;
  779. }
  780. /* PHY control start function */
  781. static int ravb_phy_start(struct net_device *ndev)
  782. {
  783. struct ravb_private *priv = netdev_priv(ndev);
  784. int error;
  785. error = ravb_phy_init(ndev);
  786. if (error)
  787. return error;
  788. phy_start(priv->phydev);
  789. return 0;
  790. }
  791. static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
  792. {
  793. struct ravb_private *priv = netdev_priv(ndev);
  794. int error = -ENODEV;
  795. unsigned long flags;
  796. if (priv->phydev) {
  797. spin_lock_irqsave(&priv->lock, flags);
  798. error = phy_ethtool_gset(priv->phydev, ecmd);
  799. spin_unlock_irqrestore(&priv->lock, flags);
  800. }
  801. return error;
  802. }
  803. static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
  804. {
  805. struct ravb_private *priv = netdev_priv(ndev);
  806. unsigned long flags;
  807. int error;
  808. if (!priv->phydev)
  809. return -ENODEV;
  810. spin_lock_irqsave(&priv->lock, flags);
  811. /* Disable TX and RX */
  812. ravb_rcv_snd_disable(ndev);
  813. error = phy_ethtool_sset(priv->phydev, ecmd);
  814. if (error)
  815. goto error_exit;
  816. if (ecmd->duplex == DUPLEX_FULL)
  817. priv->duplex = 1;
  818. else
  819. priv->duplex = 0;
  820. ravb_set_duplex(ndev);
  821. error_exit:
  822. mdelay(1);
  823. /* Enable TX and RX */
  824. ravb_rcv_snd_enable(ndev);
  825. mmiowb();
  826. spin_unlock_irqrestore(&priv->lock, flags);
  827. return error;
  828. }
  829. static int ravb_nway_reset(struct net_device *ndev)
  830. {
  831. struct ravb_private *priv = netdev_priv(ndev);
  832. int error = -ENODEV;
  833. unsigned long flags;
  834. if (priv->phydev) {
  835. spin_lock_irqsave(&priv->lock, flags);
  836. error = phy_start_aneg(priv->phydev);
  837. spin_unlock_irqrestore(&priv->lock, flags);
  838. }
  839. return error;
  840. }
  841. static u32 ravb_get_msglevel(struct net_device *ndev)
  842. {
  843. struct ravb_private *priv = netdev_priv(ndev);
  844. return priv->msg_enable;
  845. }
  846. static void ravb_set_msglevel(struct net_device *ndev, u32 value)
  847. {
  848. struct ravb_private *priv = netdev_priv(ndev);
  849. priv->msg_enable = value;
  850. }
  851. static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
  852. "rx_queue_0_current",
  853. "tx_queue_0_current",
  854. "rx_queue_0_dirty",
  855. "tx_queue_0_dirty",
  856. "rx_queue_0_packets",
  857. "tx_queue_0_packets",
  858. "rx_queue_0_bytes",
  859. "tx_queue_0_bytes",
  860. "rx_queue_0_mcast_packets",
  861. "rx_queue_0_errors",
  862. "rx_queue_0_crc_errors",
  863. "rx_queue_0_frame_errors",
  864. "rx_queue_0_length_errors",
  865. "rx_queue_0_missed_errors",
  866. "rx_queue_0_over_errors",
  867. "rx_queue_1_current",
  868. "tx_queue_1_current",
  869. "rx_queue_1_dirty",
  870. "tx_queue_1_dirty",
  871. "rx_queue_1_packets",
  872. "tx_queue_1_packets",
  873. "rx_queue_1_bytes",
  874. "tx_queue_1_bytes",
  875. "rx_queue_1_mcast_packets",
  876. "rx_queue_1_errors",
  877. "rx_queue_1_crc_errors",
  878. "rx_queue_1_frame_errors_",
  879. "rx_queue_1_length_errors",
  880. "rx_queue_1_missed_errors",
  881. "rx_queue_1_over_errors",
  882. };
  883. #define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
  884. static int ravb_get_sset_count(struct net_device *netdev, int sset)
  885. {
  886. switch (sset) {
  887. case ETH_SS_STATS:
  888. return RAVB_STATS_LEN;
  889. default:
  890. return -EOPNOTSUPP;
  891. }
  892. }
  893. static void ravb_get_ethtool_stats(struct net_device *ndev,
  894. struct ethtool_stats *stats, u64 *data)
  895. {
  896. struct ravb_private *priv = netdev_priv(ndev);
  897. int i = 0;
  898. int q;
  899. /* Device-specific stats */
  900. for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
  901. struct net_device_stats *stats = &priv->stats[q];
  902. data[i++] = priv->cur_rx[q];
  903. data[i++] = priv->cur_tx[q];
  904. data[i++] = priv->dirty_rx[q];
  905. data[i++] = priv->dirty_tx[q];
  906. data[i++] = stats->rx_packets;
  907. data[i++] = stats->tx_packets;
  908. data[i++] = stats->rx_bytes;
  909. data[i++] = stats->tx_bytes;
  910. data[i++] = stats->multicast;
  911. data[i++] = stats->rx_errors;
  912. data[i++] = stats->rx_crc_errors;
  913. data[i++] = stats->rx_frame_errors;
  914. data[i++] = stats->rx_length_errors;
  915. data[i++] = stats->rx_missed_errors;
  916. data[i++] = stats->rx_over_errors;
  917. }
  918. }
  919. static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
  920. {
  921. switch (stringset) {
  922. case ETH_SS_STATS:
  923. memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
  924. break;
  925. }
  926. }
  927. static void ravb_get_ringparam(struct net_device *ndev,
  928. struct ethtool_ringparam *ring)
  929. {
  930. struct ravb_private *priv = netdev_priv(ndev);
  931. ring->rx_max_pending = BE_RX_RING_MAX;
  932. ring->tx_max_pending = BE_TX_RING_MAX;
  933. ring->rx_pending = priv->num_rx_ring[RAVB_BE];
  934. ring->tx_pending = priv->num_tx_ring[RAVB_BE];
  935. }
  936. static int ravb_set_ringparam(struct net_device *ndev,
  937. struct ethtool_ringparam *ring)
  938. {
  939. struct ravb_private *priv = netdev_priv(ndev);
  940. int error;
  941. if (ring->tx_pending > BE_TX_RING_MAX ||
  942. ring->rx_pending > BE_RX_RING_MAX ||
  943. ring->tx_pending < BE_TX_RING_MIN ||
  944. ring->rx_pending < BE_RX_RING_MIN)
  945. return -EINVAL;
  946. if (ring->rx_mini_pending || ring->rx_jumbo_pending)
  947. return -EINVAL;
  948. if (netif_running(ndev)) {
  949. netif_device_detach(ndev);
  950. /* Stop PTP Clock driver */
  951. ravb_ptp_stop(ndev);
  952. /* Wait for DMA stopping */
  953. error = ravb_stop_dma(ndev);
  954. if (error) {
  955. netdev_err(ndev,
  956. "cannot set ringparam! Any AVB processes are still running?\n");
  957. return error;
  958. }
  959. synchronize_irq(ndev->irq);
  960. /* Free all the skb's in the RX queue and the DMA buffers. */
  961. ravb_ring_free(ndev, RAVB_BE);
  962. ravb_ring_free(ndev, RAVB_NC);
  963. }
  964. /* Set new parameters */
  965. priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
  966. priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
  967. if (netif_running(ndev)) {
  968. error = ravb_dmac_init(ndev);
  969. if (error) {
  970. netdev_err(ndev,
  971. "%s: ravb_dmac_init() failed, error %d\n",
  972. __func__, error);
  973. return error;
  974. }
  975. ravb_emac_init(ndev);
  976. /* Initialise PTP Clock driver */
  977. ravb_ptp_init(ndev, priv->pdev);
  978. netif_device_attach(ndev);
  979. }
  980. return 0;
  981. }
  982. static int ravb_get_ts_info(struct net_device *ndev,
  983. struct ethtool_ts_info *info)
  984. {
  985. struct ravb_private *priv = netdev_priv(ndev);
  986. info->so_timestamping =
  987. SOF_TIMESTAMPING_TX_SOFTWARE |
  988. SOF_TIMESTAMPING_RX_SOFTWARE |
  989. SOF_TIMESTAMPING_SOFTWARE |
  990. SOF_TIMESTAMPING_TX_HARDWARE |
  991. SOF_TIMESTAMPING_RX_HARDWARE |
  992. SOF_TIMESTAMPING_RAW_HARDWARE;
  993. info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
  994. info->rx_filters =
  995. (1 << HWTSTAMP_FILTER_NONE) |
  996. (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
  997. (1 << HWTSTAMP_FILTER_ALL);
  998. info->phc_index = ptp_clock_index(priv->ptp.clock);
  999. return 0;
  1000. }
  1001. static const struct ethtool_ops ravb_ethtool_ops = {
  1002. .get_settings = ravb_get_settings,
  1003. .set_settings = ravb_set_settings,
  1004. .nway_reset = ravb_nway_reset,
  1005. .get_msglevel = ravb_get_msglevel,
  1006. .set_msglevel = ravb_set_msglevel,
  1007. .get_link = ethtool_op_get_link,
  1008. .get_strings = ravb_get_strings,
  1009. .get_ethtool_stats = ravb_get_ethtool_stats,
  1010. .get_sset_count = ravb_get_sset_count,
  1011. .get_ringparam = ravb_get_ringparam,
  1012. .set_ringparam = ravb_set_ringparam,
  1013. .get_ts_info = ravb_get_ts_info,
  1014. };
  1015. /* Network device open function for Ethernet AVB */
  1016. static int ravb_open(struct net_device *ndev)
  1017. {
  1018. struct ravb_private *priv = netdev_priv(ndev);
  1019. int error;
  1020. napi_enable(&priv->napi[RAVB_BE]);
  1021. napi_enable(&priv->napi[RAVB_NC]);
  1022. error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
  1023. ndev);
  1024. if (error) {
  1025. netdev_err(ndev, "cannot request IRQ\n");
  1026. goto out_napi_off;
  1027. }
  1028. /* Device init */
  1029. error = ravb_dmac_init(ndev);
  1030. if (error)
  1031. goto out_free_irq;
  1032. ravb_emac_init(ndev);
  1033. /* Initialise PTP Clock driver */
  1034. ravb_ptp_init(ndev, priv->pdev);
  1035. netif_tx_start_all_queues(ndev);
  1036. /* PHY control start */
  1037. error = ravb_phy_start(ndev);
  1038. if (error)
  1039. goto out_ptp_stop;
  1040. return 0;
  1041. out_ptp_stop:
  1042. /* Stop PTP Clock driver */
  1043. ravb_ptp_stop(ndev);
  1044. out_free_irq:
  1045. free_irq(ndev->irq, ndev);
  1046. out_napi_off:
  1047. napi_disable(&priv->napi[RAVB_NC]);
  1048. napi_disable(&priv->napi[RAVB_BE]);
  1049. return error;
  1050. }
  1051. /* Timeout function for Ethernet AVB */
  1052. static void ravb_tx_timeout(struct net_device *ndev)
  1053. {
  1054. struct ravb_private *priv = netdev_priv(ndev);
  1055. netif_err(priv, tx_err, ndev,
  1056. "transmit timed out, status %08x, resetting...\n",
  1057. ravb_read(ndev, ISS));
  1058. /* tx_errors count up */
  1059. ndev->stats.tx_errors++;
  1060. schedule_work(&priv->work);
  1061. }
  1062. static void ravb_tx_timeout_work(struct work_struct *work)
  1063. {
  1064. struct ravb_private *priv = container_of(work, struct ravb_private,
  1065. work);
  1066. struct net_device *ndev = priv->ndev;
  1067. netif_tx_stop_all_queues(ndev);
  1068. /* Stop PTP Clock driver */
  1069. ravb_ptp_stop(ndev);
  1070. /* Wait for DMA stopping */
  1071. ravb_stop_dma(ndev);
  1072. ravb_ring_free(ndev, RAVB_BE);
  1073. ravb_ring_free(ndev, RAVB_NC);
  1074. /* Device init */
  1075. ravb_dmac_init(ndev);
  1076. ravb_emac_init(ndev);
  1077. /* Initialise PTP Clock driver */
  1078. ravb_ptp_init(ndev, priv->pdev);
  1079. netif_tx_start_all_queues(ndev);
  1080. }
  1081. /* Packet transmit function for Ethernet AVB */
  1082. static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  1083. {
  1084. struct ravb_private *priv = netdev_priv(ndev);
  1085. struct ravb_tstamp_skb *ts_skb = NULL;
  1086. u16 q = skb_get_queue_mapping(skb);
  1087. struct ravb_tx_desc *desc;
  1088. unsigned long flags;
  1089. u32 dma_addr;
  1090. void *buffer;
  1091. u32 entry;
  1092. u32 tccr;
  1093. spin_lock_irqsave(&priv->lock, flags);
  1094. if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
  1095. netif_err(priv, tx_queued, ndev,
  1096. "still transmitting with the full ring!\n");
  1097. netif_stop_subqueue(ndev, q);
  1098. spin_unlock_irqrestore(&priv->lock, flags);
  1099. return NETDEV_TX_BUSY;
  1100. }
  1101. entry = priv->cur_tx[q] % priv->num_tx_ring[q];
  1102. priv->tx_skb[q][entry] = skb;
  1103. if (skb_put_padto(skb, ETH_ZLEN))
  1104. goto drop;
  1105. buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
  1106. memcpy(buffer, skb->data, skb->len);
  1107. desc = &priv->tx_ring[q][entry];
  1108. desc->ds_tagl = cpu_to_le16(skb->len);
  1109. dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
  1110. if (dma_mapping_error(&ndev->dev, dma_addr))
  1111. goto drop;
  1112. desc->dptr = cpu_to_le32(dma_addr);
  1113. /* TX timestamp required */
  1114. if (q == RAVB_NC) {
  1115. ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
  1116. if (!ts_skb) {
  1117. dma_unmap_single(&ndev->dev, dma_addr, skb->len,
  1118. DMA_TO_DEVICE);
  1119. goto drop;
  1120. }
  1121. ts_skb->skb = skb;
  1122. ts_skb->tag = priv->ts_skb_tag++;
  1123. priv->ts_skb_tag &= 0x3ff;
  1124. list_add_tail(&ts_skb->list, &priv->ts_skb_list);
  1125. /* TAG and timestamp required flag */
  1126. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  1127. skb_tx_timestamp(skb);
  1128. desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
  1129. desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
  1130. }
  1131. /* Descriptor type must be set after all the above writes */
  1132. dma_wmb();
  1133. desc->die_dt = DT_FSINGLE;
  1134. tccr = ravb_read(ndev, TCCR);
  1135. if (!(tccr & (TCCR_TSRQ0 << q)))
  1136. ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
  1137. priv->cur_tx[q]++;
  1138. if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
  1139. !ravb_tx_free(ndev, q))
  1140. netif_stop_subqueue(ndev, q);
  1141. exit:
  1142. mmiowb();
  1143. spin_unlock_irqrestore(&priv->lock, flags);
  1144. return NETDEV_TX_OK;
  1145. drop:
  1146. dev_kfree_skb_any(skb);
  1147. priv->tx_skb[q][entry] = NULL;
  1148. goto exit;
  1149. }
  1150. static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
  1151. void *accel_priv, select_queue_fallback_t fallback)
  1152. {
  1153. /* If skb needs TX timestamp, it is handled in network control queue */
  1154. return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
  1155. RAVB_BE;
  1156. }
  1157. static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
  1158. {
  1159. struct ravb_private *priv = netdev_priv(ndev);
  1160. struct net_device_stats *nstats, *stats0, *stats1;
  1161. nstats = &ndev->stats;
  1162. stats0 = &priv->stats[RAVB_BE];
  1163. stats1 = &priv->stats[RAVB_NC];
  1164. nstats->tx_dropped += ravb_read(ndev, TROCR);
  1165. ravb_write(ndev, 0, TROCR); /* (write clear) */
  1166. nstats->collisions += ravb_read(ndev, CDCR);
  1167. ravb_write(ndev, 0, CDCR); /* (write clear) */
  1168. nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
  1169. ravb_write(ndev, 0, LCCR); /* (write clear) */
  1170. nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
  1171. ravb_write(ndev, 0, CERCR); /* (write clear) */
  1172. nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
  1173. ravb_write(ndev, 0, CEECR); /* (write clear) */
  1174. nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
  1175. nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
  1176. nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
  1177. nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
  1178. nstats->multicast = stats0->multicast + stats1->multicast;
  1179. nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
  1180. nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
  1181. nstats->rx_frame_errors =
  1182. stats0->rx_frame_errors + stats1->rx_frame_errors;
  1183. nstats->rx_length_errors =
  1184. stats0->rx_length_errors + stats1->rx_length_errors;
  1185. nstats->rx_missed_errors =
  1186. stats0->rx_missed_errors + stats1->rx_missed_errors;
  1187. nstats->rx_over_errors =
  1188. stats0->rx_over_errors + stats1->rx_over_errors;
  1189. return nstats;
  1190. }
  1191. /* Update promiscuous bit */
  1192. static void ravb_set_rx_mode(struct net_device *ndev)
  1193. {
  1194. struct ravb_private *priv = netdev_priv(ndev);
  1195. unsigned long flags;
  1196. u32 ecmr;
  1197. spin_lock_irqsave(&priv->lock, flags);
  1198. ecmr = ravb_read(ndev, ECMR);
  1199. if (ndev->flags & IFF_PROMISC)
  1200. ecmr |= ECMR_PRM;
  1201. else
  1202. ecmr &= ~ECMR_PRM;
  1203. ravb_write(ndev, ecmr, ECMR);
  1204. mmiowb();
  1205. spin_unlock_irqrestore(&priv->lock, flags);
  1206. }
  1207. /* Device close function for Ethernet AVB */
  1208. static int ravb_close(struct net_device *ndev)
  1209. {
  1210. struct ravb_private *priv = netdev_priv(ndev);
  1211. struct ravb_tstamp_skb *ts_skb, *ts_skb2;
  1212. netif_tx_stop_all_queues(ndev);
  1213. /* Disable interrupts by clearing the interrupt masks. */
  1214. ravb_write(ndev, 0, RIC0);
  1215. ravb_write(ndev, 0, RIC1);
  1216. ravb_write(ndev, 0, RIC2);
  1217. ravb_write(ndev, 0, TIC);
  1218. /* Stop PTP Clock driver */
  1219. ravb_ptp_stop(ndev);
  1220. /* Set the config mode to stop the AVB-DMAC's processes */
  1221. if (ravb_stop_dma(ndev) < 0)
  1222. netdev_err(ndev,
  1223. "device will be stopped after h/w processes are done.\n");
  1224. /* Clear the timestamp list */
  1225. list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
  1226. list_del(&ts_skb->list);
  1227. kfree(ts_skb);
  1228. }
  1229. /* PHY disconnect */
  1230. if (priv->phydev) {
  1231. phy_stop(priv->phydev);
  1232. phy_disconnect(priv->phydev);
  1233. priv->phydev = NULL;
  1234. }
  1235. free_irq(ndev->irq, ndev);
  1236. napi_disable(&priv->napi[RAVB_NC]);
  1237. napi_disable(&priv->napi[RAVB_BE]);
  1238. /* Free all the skb's in the RX queue and the DMA buffers. */
  1239. ravb_ring_free(ndev, RAVB_BE);
  1240. ravb_ring_free(ndev, RAVB_NC);
  1241. return 0;
  1242. }
  1243. static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
  1244. {
  1245. struct ravb_private *priv = netdev_priv(ndev);
  1246. struct hwtstamp_config config;
  1247. config.flags = 0;
  1248. config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
  1249. HWTSTAMP_TX_OFF;
  1250. if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
  1251. config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
  1252. else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
  1253. config.rx_filter = HWTSTAMP_FILTER_ALL;
  1254. else
  1255. config.rx_filter = HWTSTAMP_FILTER_NONE;
  1256. return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
  1257. -EFAULT : 0;
  1258. }
  1259. /* Control hardware time stamping */
  1260. static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
  1261. {
  1262. struct ravb_private *priv = netdev_priv(ndev);
  1263. struct hwtstamp_config config;
  1264. u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
  1265. u32 tstamp_tx_ctrl;
  1266. if (copy_from_user(&config, req->ifr_data, sizeof(config)))
  1267. return -EFAULT;
  1268. /* Reserved for future extensions */
  1269. if (config.flags)
  1270. return -EINVAL;
  1271. switch (config.tx_type) {
  1272. case HWTSTAMP_TX_OFF:
  1273. tstamp_tx_ctrl = 0;
  1274. break;
  1275. case HWTSTAMP_TX_ON:
  1276. tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
  1277. break;
  1278. default:
  1279. return -ERANGE;
  1280. }
  1281. switch (config.rx_filter) {
  1282. case HWTSTAMP_FILTER_NONE:
  1283. tstamp_rx_ctrl = 0;
  1284. break;
  1285. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  1286. tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
  1287. break;
  1288. default:
  1289. config.rx_filter = HWTSTAMP_FILTER_ALL;
  1290. tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
  1291. }
  1292. priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
  1293. priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
  1294. return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
  1295. -EFAULT : 0;
  1296. }
  1297. /* ioctl to device function */
  1298. static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
  1299. {
  1300. struct ravb_private *priv = netdev_priv(ndev);
  1301. struct phy_device *phydev = priv->phydev;
  1302. if (!netif_running(ndev))
  1303. return -EINVAL;
  1304. if (!phydev)
  1305. return -ENODEV;
  1306. switch (cmd) {
  1307. case SIOCGHWTSTAMP:
  1308. return ravb_hwtstamp_get(ndev, req);
  1309. case SIOCSHWTSTAMP:
  1310. return ravb_hwtstamp_set(ndev, req);
  1311. }
  1312. return phy_mii_ioctl(phydev, req, cmd);
  1313. }
  1314. static const struct net_device_ops ravb_netdev_ops = {
  1315. .ndo_open = ravb_open,
  1316. .ndo_stop = ravb_close,
  1317. .ndo_start_xmit = ravb_start_xmit,
  1318. .ndo_select_queue = ravb_select_queue,
  1319. .ndo_get_stats = ravb_get_stats,
  1320. .ndo_set_rx_mode = ravb_set_rx_mode,
  1321. .ndo_tx_timeout = ravb_tx_timeout,
  1322. .ndo_do_ioctl = ravb_do_ioctl,
  1323. .ndo_validate_addr = eth_validate_addr,
  1324. .ndo_set_mac_address = eth_mac_addr,
  1325. .ndo_change_mtu = eth_change_mtu,
  1326. };
  1327. /* MDIO bus init function */
  1328. static int ravb_mdio_init(struct ravb_private *priv)
  1329. {
  1330. struct platform_device *pdev = priv->pdev;
  1331. struct device *dev = &pdev->dev;
  1332. int error;
  1333. /* Bitbang init */
  1334. priv->mdiobb.ops = &bb_ops;
  1335. /* MII controller setting */
  1336. priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
  1337. if (!priv->mii_bus)
  1338. return -ENOMEM;
  1339. /* Hook up MII support for ethtool */
  1340. priv->mii_bus->name = "ravb_mii";
  1341. priv->mii_bus->parent = dev;
  1342. snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
  1343. pdev->name, pdev->id);
  1344. /* Register MDIO bus */
  1345. error = of_mdiobus_register(priv->mii_bus, dev->of_node);
  1346. if (error)
  1347. goto out_free_bus;
  1348. return 0;
  1349. out_free_bus:
  1350. free_mdio_bitbang(priv->mii_bus);
  1351. return error;
  1352. }
  1353. /* MDIO bus release function */
  1354. static int ravb_mdio_release(struct ravb_private *priv)
  1355. {
  1356. /* Unregister mdio bus */
  1357. mdiobus_unregister(priv->mii_bus);
  1358. /* Free bitbang info */
  1359. free_mdio_bitbang(priv->mii_bus);
  1360. return 0;
  1361. }
  1362. static int ravb_probe(struct platform_device *pdev)
  1363. {
  1364. struct device_node *np = pdev->dev.of_node;
  1365. struct ravb_private *priv;
  1366. struct net_device *ndev;
  1367. int error, irq, q;
  1368. struct resource *res;
  1369. if (!np) {
  1370. dev_err(&pdev->dev,
  1371. "this driver is required to be instantiated from device tree\n");
  1372. return -EINVAL;
  1373. }
  1374. /* Get base address */
  1375. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1376. if (!res) {
  1377. dev_err(&pdev->dev, "invalid resource\n");
  1378. return -EINVAL;
  1379. }
  1380. ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
  1381. NUM_TX_QUEUE, NUM_RX_QUEUE);
  1382. if (!ndev)
  1383. return -ENOMEM;
  1384. pm_runtime_enable(&pdev->dev);
  1385. pm_runtime_get_sync(&pdev->dev);
  1386. /* The Ether-specific entries in the device structure. */
  1387. ndev->base_addr = res->start;
  1388. ndev->dma = -1;
  1389. irq = platform_get_irq(pdev, 0);
  1390. if (irq < 0) {
  1391. error = -ENODEV;
  1392. goto out_release;
  1393. }
  1394. ndev->irq = irq;
  1395. SET_NETDEV_DEV(ndev, &pdev->dev);
  1396. priv = netdev_priv(ndev);
  1397. priv->ndev = ndev;
  1398. priv->pdev = pdev;
  1399. priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
  1400. priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
  1401. priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
  1402. priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
  1403. priv->addr = devm_ioremap_resource(&pdev->dev, res);
  1404. if (IS_ERR(priv->addr)) {
  1405. error = PTR_ERR(priv->addr);
  1406. goto out_release;
  1407. }
  1408. spin_lock_init(&priv->lock);
  1409. INIT_WORK(&priv->work, ravb_tx_timeout_work);
  1410. priv->phy_interface = of_get_phy_mode(np);
  1411. priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
  1412. priv->avb_link_active_low =
  1413. of_property_read_bool(np, "renesas,ether-link-active-low");
  1414. /* Set function */
  1415. ndev->netdev_ops = &ravb_netdev_ops;
  1416. ndev->ethtool_ops = &ravb_ethtool_ops;
  1417. /* Set AVB config mode */
  1418. ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
  1419. CCC);
  1420. /* Set CSEL value */
  1421. ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
  1422. CCC);
  1423. /* Set GTI value */
  1424. ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI);
  1425. /* Request GTI loading */
  1426. ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
  1427. /* Allocate descriptor base address table */
  1428. priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
  1429. priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size,
  1430. &priv->desc_bat_dma, GFP_KERNEL);
  1431. if (!priv->desc_bat) {
  1432. dev_err(&ndev->dev,
  1433. "Cannot allocate desc base address table (size %d bytes)\n",
  1434. priv->desc_bat_size);
  1435. error = -ENOMEM;
  1436. goto out_release;
  1437. }
  1438. for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
  1439. priv->desc_bat[q].die_dt = DT_EOS;
  1440. ravb_write(ndev, priv->desc_bat_dma, DBAT);
  1441. /* Initialise HW timestamp list */
  1442. INIT_LIST_HEAD(&priv->ts_skb_list);
  1443. /* Debug message level */
  1444. priv->msg_enable = RAVB_DEF_MSG_ENABLE;
  1445. /* Read and set MAC address */
  1446. ravb_read_mac_address(ndev, of_get_mac_address(np));
  1447. if (!is_valid_ether_addr(ndev->dev_addr)) {
  1448. dev_warn(&pdev->dev,
  1449. "no valid MAC address supplied, using a random one\n");
  1450. eth_hw_addr_random(ndev);
  1451. }
  1452. /* MDIO bus init */
  1453. error = ravb_mdio_init(priv);
  1454. if (error) {
  1455. dev_err(&ndev->dev, "failed to initialize MDIO\n");
  1456. goto out_dma_free;
  1457. }
  1458. netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
  1459. netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
  1460. /* Network device register */
  1461. error = register_netdev(ndev);
  1462. if (error)
  1463. goto out_napi_del;
  1464. /* Print device information */
  1465. netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
  1466. (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
  1467. platform_set_drvdata(pdev, ndev);
  1468. return 0;
  1469. out_napi_del:
  1470. netif_napi_del(&priv->napi[RAVB_NC]);
  1471. netif_napi_del(&priv->napi[RAVB_BE]);
  1472. ravb_mdio_release(priv);
  1473. out_dma_free:
  1474. dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
  1475. priv->desc_bat_dma);
  1476. out_release:
  1477. if (ndev)
  1478. free_netdev(ndev);
  1479. pm_runtime_put(&pdev->dev);
  1480. pm_runtime_disable(&pdev->dev);
  1481. return error;
  1482. }
  1483. static int ravb_remove(struct platform_device *pdev)
  1484. {
  1485. struct net_device *ndev = platform_get_drvdata(pdev);
  1486. struct ravb_private *priv = netdev_priv(ndev);
  1487. dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
  1488. priv->desc_bat_dma);
  1489. /* Set reset mode */
  1490. ravb_write(ndev, CCC_OPC_RESET, CCC);
  1491. pm_runtime_put_sync(&pdev->dev);
  1492. unregister_netdev(ndev);
  1493. netif_napi_del(&priv->napi[RAVB_NC]);
  1494. netif_napi_del(&priv->napi[RAVB_BE]);
  1495. ravb_mdio_release(priv);
  1496. pm_runtime_disable(&pdev->dev);
  1497. free_netdev(ndev);
  1498. platform_set_drvdata(pdev, NULL);
  1499. return 0;
  1500. }
  1501. #ifdef CONFIG_PM
  1502. static int ravb_runtime_nop(struct device *dev)
  1503. {
  1504. /* Runtime PM callback shared between ->runtime_suspend()
  1505. * and ->runtime_resume(). Simply returns success.
  1506. *
  1507. * This driver re-initializes all registers after
  1508. * pm_runtime_get_sync() anyway so there is no need
  1509. * to save and restore registers here.
  1510. */
  1511. return 0;
  1512. }
  1513. static const struct dev_pm_ops ravb_dev_pm_ops = {
  1514. .runtime_suspend = ravb_runtime_nop,
  1515. .runtime_resume = ravb_runtime_nop,
  1516. };
  1517. #define RAVB_PM_OPS (&ravb_dev_pm_ops)
  1518. #else
  1519. #define RAVB_PM_OPS NULL
  1520. #endif
  1521. static const struct of_device_id ravb_match_table[] = {
  1522. { .compatible = "renesas,etheravb-r8a7790" },
  1523. { .compatible = "renesas,etheravb-r8a7794" },
  1524. { }
  1525. };
  1526. MODULE_DEVICE_TABLE(of, ravb_match_table);
  1527. static struct platform_driver ravb_driver = {
  1528. .probe = ravb_probe,
  1529. .remove = ravb_remove,
  1530. .driver = {
  1531. .name = "ravb",
  1532. .pm = RAVB_PM_OPS,
  1533. .of_match_table = ravb_match_table,
  1534. },
  1535. };
  1536. module_platform_driver(ravb_driver);
  1537. MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
  1538. MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
  1539. MODULE_LICENSE("GPL v2");