altera_tse_main.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Altera Triple-Speed Ethernet MAC driver
  3. * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
  4. *
  5. * Contributors:
  6. * Dalon Westergreen
  7. * Thomas Chou
  8. * Ian Abbott
  9. * Yuriy Kozlov
  10. * Tobias Klauser
  11. * Andriy Smolskyy
  12. * Roman Bulgakov
  13. * Dmytro Mytarchuk
  14. * Matthew Gerlach
  15. *
  16. * Original driver contributed by SLS.
  17. * Major updates contributed by GlobalLogic
  18. */
  19. #include <linux/atomic.h>
  20. #include <linux/delay.h>
  21. #include <linux/etherdevice.h>
  22. #include <linux/if_vlan.h>
  23. #include <linux/init.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/io.h>
  26. #include <linux/kernel.h>
  27. #include <linux/module.h>
  28. #include <linux/mii.h>
  29. #include <linux/netdevice.h>
  30. #include <linux/of_device.h>
  31. #include <linux/of_mdio.h>
  32. #include <linux/of_net.h>
  33. #include <linux/of_platform.h>
  34. #include <linux/phy.h>
  35. #include <linux/platform_device.h>
  36. #include <linux/skbuff.h>
  37. #include <asm/cacheflush.h>
  38. #include "altera_utils.h"
  39. #include "altera_tse.h"
  40. #include "altera_sgdma.h"
  41. #include "altera_msgdma.h"
  42. static atomic_t instance_count = ATOMIC_INIT(~0);
  43. /* Module parameters */
  44. static int debug = -1;
  45. module_param(debug, int, 0644);
  46. MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  47. static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  48. NETIF_MSG_LINK | NETIF_MSG_IFUP |
  49. NETIF_MSG_IFDOWN);
  50. #define RX_DESCRIPTORS 64
  51. static int dma_rx_num = RX_DESCRIPTORS;
  52. module_param(dma_rx_num, int, 0644);
  53. MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
  54. #define TX_DESCRIPTORS 64
  55. static int dma_tx_num = TX_DESCRIPTORS;
  56. module_param(dma_tx_num, int, 0644);
  57. MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
  58. #define POLL_PHY (-1)
  59. /* Make sure DMA buffer size is larger than the max frame size
  60. * plus some alignment offset and a VLAN header. If the max frame size is
  61. * 1518, a VLAN header would be additional 4 bytes and additional
  62. * headroom for alignment is 2 bytes, 2048 is just fine.
  63. */
  64. #define ALTERA_RXDMABUFFER_SIZE 2048
  65. /* Allow network stack to resume queueing packets after we've
  66. * finished transmitting at least 1/4 of the packets in the queue.
  67. */
  68. #define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
  69. #define TXQUEUESTOP_THRESHHOLD 2
  70. static const struct of_device_id altera_tse_ids[];
  71. static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  72. {
  73. return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
  74. }
  75. /* PCS Register read/write functions
  76. */
  77. static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
  78. {
  79. return csrrd32(priv->mac_dev,
  80. tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
  81. }
  82. static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
  83. u16 value)
  84. {
  85. csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
  86. }
  87. /* Check PCS scratch memory */
  88. static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
  89. {
  90. sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
  91. return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
  92. }
  93. /* MDIO specific functions
  94. */
  95. static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  96. {
  97. struct net_device *ndev = bus->priv;
  98. struct altera_tse_private *priv = netdev_priv(ndev);
  99. /* set MDIO address */
  100. csrwr32((mii_id & 0x1f), priv->mac_dev,
  101. tse_csroffs(mdio_phy1_addr));
  102. /* get the data */
  103. return csrrd32(priv->mac_dev,
  104. tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff;
  105. }
  106. static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
  107. u16 value)
  108. {
  109. struct net_device *ndev = bus->priv;
  110. struct altera_tse_private *priv = netdev_priv(ndev);
  111. /* set MDIO address */
  112. csrwr32((mii_id & 0x1f), priv->mac_dev,
  113. tse_csroffs(mdio_phy1_addr));
  114. /* write the data */
  115. csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4);
  116. return 0;
  117. }
  118. static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
  119. {
  120. struct altera_tse_private *priv = netdev_priv(dev);
  121. int ret;
  122. struct device_node *mdio_node = NULL;
  123. struct mii_bus *mdio = NULL;
  124. struct device_node *child_node = NULL;
  125. for_each_child_of_node(priv->device->of_node, child_node) {
  126. if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
  127. mdio_node = child_node;
  128. break;
  129. }
  130. }
  131. if (mdio_node) {
  132. netdev_dbg(dev, "FOUND MDIO subnode\n");
  133. } else {
  134. netdev_dbg(dev, "NO MDIO subnode\n");
  135. return 0;
  136. }
  137. mdio = mdiobus_alloc();
  138. if (mdio == NULL) {
  139. netdev_err(dev, "Error allocating MDIO bus\n");
  140. return -ENOMEM;
  141. }
  142. mdio->name = ALTERA_TSE_RESOURCE_NAME;
  143. mdio->read = &altera_tse_mdio_read;
  144. mdio->write = &altera_tse_mdio_write;
  145. snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
  146. mdio->priv = dev;
  147. mdio->parent = priv->device;
  148. ret = of_mdiobus_register(mdio, mdio_node);
  149. if (ret != 0) {
  150. netdev_err(dev, "Cannot register MDIO bus %s\n",
  151. mdio->id);
  152. goto out_free_mdio;
  153. }
  154. if (netif_msg_drv(priv))
  155. netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
  156. priv->mdio = mdio;
  157. return 0;
  158. out_free_mdio:
  159. mdiobus_free(mdio);
  160. mdio = NULL;
  161. return ret;
  162. }
  163. static void altera_tse_mdio_destroy(struct net_device *dev)
  164. {
  165. struct altera_tse_private *priv = netdev_priv(dev);
  166. if (priv->mdio == NULL)
  167. return;
  168. if (netif_msg_drv(priv))
  169. netdev_info(dev, "MDIO bus %s: removed\n",
  170. priv->mdio->id);
  171. mdiobus_unregister(priv->mdio);
  172. mdiobus_free(priv->mdio);
  173. priv->mdio = NULL;
  174. }
  175. static int tse_init_rx_buffer(struct altera_tse_private *priv,
  176. struct tse_buffer *rxbuffer, int len)
  177. {
  178. rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
  179. if (!rxbuffer->skb)
  180. return -ENOMEM;
  181. rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
  182. len,
  183. DMA_FROM_DEVICE);
  184. if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
  185. netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
  186. dev_kfree_skb_any(rxbuffer->skb);
  187. return -EINVAL;
  188. }
  189. rxbuffer->dma_addr &= (dma_addr_t)~3;
  190. rxbuffer->len = len;
  191. return 0;
  192. }
  193. static void tse_free_rx_buffer(struct altera_tse_private *priv,
  194. struct tse_buffer *rxbuffer)
  195. {
  196. struct sk_buff *skb = rxbuffer->skb;
  197. dma_addr_t dma_addr = rxbuffer->dma_addr;
  198. if (skb != NULL) {
  199. if (dma_addr)
  200. dma_unmap_single(priv->device, dma_addr,
  201. rxbuffer->len,
  202. DMA_FROM_DEVICE);
  203. dev_kfree_skb_any(skb);
  204. rxbuffer->skb = NULL;
  205. rxbuffer->dma_addr = 0;
  206. }
  207. }
  208. /* Unmap and free Tx buffer resources
  209. */
  210. static void tse_free_tx_buffer(struct altera_tse_private *priv,
  211. struct tse_buffer *buffer)
  212. {
  213. if (buffer->dma_addr) {
  214. if (buffer->mapped_as_page)
  215. dma_unmap_page(priv->device, buffer->dma_addr,
  216. buffer->len, DMA_TO_DEVICE);
  217. else
  218. dma_unmap_single(priv->device, buffer->dma_addr,
  219. buffer->len, DMA_TO_DEVICE);
  220. buffer->dma_addr = 0;
  221. }
  222. if (buffer->skb) {
  223. dev_kfree_skb_any(buffer->skb);
  224. buffer->skb = NULL;
  225. }
  226. }
  227. static int alloc_init_skbufs(struct altera_tse_private *priv)
  228. {
  229. unsigned int rx_descs = priv->rx_ring_size;
  230. unsigned int tx_descs = priv->tx_ring_size;
  231. int ret = -ENOMEM;
  232. int i;
  233. /* Create Rx ring buffer */
  234. priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
  235. GFP_KERNEL);
  236. if (!priv->rx_ring)
  237. goto err_rx_ring;
  238. /* Create Tx ring buffer */
  239. priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
  240. GFP_KERNEL);
  241. if (!priv->tx_ring)
  242. goto err_tx_ring;
  243. priv->tx_cons = 0;
  244. priv->tx_prod = 0;
  245. /* Init Rx ring */
  246. for (i = 0; i < rx_descs; i++) {
  247. ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
  248. priv->rx_dma_buf_sz);
  249. if (ret)
  250. goto err_init_rx_buffers;
  251. }
  252. priv->rx_cons = 0;
  253. priv->rx_prod = 0;
  254. return 0;
  255. err_init_rx_buffers:
  256. while (--i >= 0)
  257. tse_free_rx_buffer(priv, &priv->rx_ring[i]);
  258. kfree(priv->tx_ring);
  259. err_tx_ring:
  260. kfree(priv->rx_ring);
  261. err_rx_ring:
  262. return ret;
  263. }
  264. static void free_skbufs(struct net_device *dev)
  265. {
  266. struct altera_tse_private *priv = netdev_priv(dev);
  267. unsigned int rx_descs = priv->rx_ring_size;
  268. unsigned int tx_descs = priv->tx_ring_size;
  269. int i;
  270. /* Release the DMA TX/RX socket buffers */
  271. for (i = 0; i < rx_descs; i++)
  272. tse_free_rx_buffer(priv, &priv->rx_ring[i]);
  273. for (i = 0; i < tx_descs; i++)
  274. tse_free_tx_buffer(priv, &priv->tx_ring[i]);
  275. kfree(priv->tx_ring);
  276. }
  277. /* Reallocate the skb for the reception process
  278. */
  279. static inline void tse_rx_refill(struct altera_tse_private *priv)
  280. {
  281. unsigned int rxsize = priv->rx_ring_size;
  282. unsigned int entry;
  283. int ret;
  284. for (; priv->rx_cons - priv->rx_prod > 0;
  285. priv->rx_prod++) {
  286. entry = priv->rx_prod % rxsize;
  287. if (likely(priv->rx_ring[entry].skb == NULL)) {
  288. ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
  289. priv->rx_dma_buf_sz);
  290. if (unlikely(ret != 0))
  291. break;
  292. priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
  293. }
  294. }
  295. }
  296. /* Pull out the VLAN tag and fix up the packet
  297. */
  298. static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
  299. {
  300. struct ethhdr *eth_hdr;
  301. u16 vid;
  302. if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  303. !__vlan_get_tag(skb, &vid)) {
  304. eth_hdr = (struct ethhdr *)skb->data;
  305. memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
  306. skb_pull(skb, VLAN_HLEN);
  307. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
  308. }
  309. }
  310. /* Receive a packet: retrieve and pass over to upper levels
  311. */
  312. static int tse_rx(struct altera_tse_private *priv, int limit)
  313. {
  314. unsigned int count = 0;
  315. unsigned int next_entry;
  316. struct sk_buff *skb;
  317. unsigned int entry = priv->rx_cons % priv->rx_ring_size;
  318. u32 rxstatus;
  319. u16 pktlength;
  320. u16 pktstatus;
  321. /* Check for count < limit first as get_rx_status is changing
  322. * the response-fifo so we must process the next packet
  323. * after calling get_rx_status if a response is pending.
  324. * (reading the last byte of the response pops the value from the fifo.)
  325. */
  326. while ((count < limit) &&
  327. ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
  328. pktstatus = rxstatus >> 16;
  329. pktlength = rxstatus & 0xffff;
  330. if ((pktstatus & 0xFF) || (pktlength == 0))
  331. netdev_err(priv->dev,
  332. "RCV pktstatus %08X pktlength %08X\n",
  333. pktstatus, pktlength);
  334. /* DMA trasfer from TSE starts with 2 aditional bytes for
  335. * IP payload alignment. Status returned by get_rx_status()
  336. * contains DMA transfer length. Packet is 2 bytes shorter.
  337. */
  338. pktlength -= 2;
  339. count++;
  340. next_entry = (++priv->rx_cons) % priv->rx_ring_size;
  341. skb = priv->rx_ring[entry].skb;
  342. if (unlikely(!skb)) {
  343. netdev_err(priv->dev,
  344. "%s: Inconsistent Rx descriptor chain\n",
  345. __func__);
  346. priv->dev->stats.rx_dropped++;
  347. break;
  348. }
  349. priv->rx_ring[entry].skb = NULL;
  350. skb_put(skb, pktlength);
  351. dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
  352. priv->rx_ring[entry].len, DMA_FROM_DEVICE);
  353. if (netif_msg_pktdata(priv)) {
  354. netdev_info(priv->dev, "frame received %d bytes\n",
  355. pktlength);
  356. print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
  357. 16, 1, skb->data, pktlength, true);
  358. }
  359. tse_rx_vlan(priv->dev, skb);
  360. skb->protocol = eth_type_trans(skb, priv->dev);
  361. skb_checksum_none_assert(skb);
  362. napi_gro_receive(&priv->napi, skb);
  363. priv->dev->stats.rx_packets++;
  364. priv->dev->stats.rx_bytes += pktlength;
  365. entry = next_entry;
  366. tse_rx_refill(priv);
  367. }
  368. return count;
  369. }
  370. /* Reclaim resources after transmission completes
  371. */
  372. static int tse_tx_complete(struct altera_tse_private *priv)
  373. {
  374. unsigned int txsize = priv->tx_ring_size;
  375. u32 ready;
  376. unsigned int entry;
  377. struct tse_buffer *tx_buff;
  378. int txcomplete = 0;
  379. spin_lock(&priv->tx_lock);
  380. ready = priv->dmaops->tx_completions(priv);
  381. /* Free sent buffers */
  382. while (ready && (priv->tx_cons != priv->tx_prod)) {
  383. entry = priv->tx_cons % txsize;
  384. tx_buff = &priv->tx_ring[entry];
  385. if (netif_msg_tx_done(priv))
  386. netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
  387. __func__, priv->tx_prod, priv->tx_cons);
  388. if (likely(tx_buff->skb))
  389. priv->dev->stats.tx_packets++;
  390. tse_free_tx_buffer(priv, tx_buff);
  391. priv->tx_cons++;
  392. txcomplete++;
  393. ready--;
  394. }
  395. if (unlikely(netif_queue_stopped(priv->dev) &&
  396. tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
  397. if (netif_queue_stopped(priv->dev) &&
  398. tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
  399. if (netif_msg_tx_done(priv))
  400. netdev_dbg(priv->dev, "%s: restart transmit\n",
  401. __func__);
  402. netif_wake_queue(priv->dev);
  403. }
  404. }
  405. spin_unlock(&priv->tx_lock);
  406. return txcomplete;
  407. }
  408. /* NAPI polling function
  409. */
  410. static int tse_poll(struct napi_struct *napi, int budget)
  411. {
  412. struct altera_tse_private *priv =
  413. container_of(napi, struct altera_tse_private, napi);
  414. int rxcomplete = 0;
  415. unsigned long int flags;
  416. tse_tx_complete(priv);
  417. rxcomplete = tse_rx(priv, budget);
  418. if (rxcomplete < budget) {
  419. napi_complete_done(napi, rxcomplete);
  420. netdev_dbg(priv->dev,
  421. "NAPI Complete, did %d packets with budget %d\n",
  422. rxcomplete, budget);
  423. spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
  424. priv->dmaops->enable_rxirq(priv);
  425. priv->dmaops->enable_txirq(priv);
  426. spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
  427. }
  428. return rxcomplete;
  429. }
  430. /* DMA TX & RX FIFO interrupt routing
  431. */
  432. static irqreturn_t altera_isr(int irq, void *dev_id)
  433. {
  434. struct net_device *dev = dev_id;
  435. struct altera_tse_private *priv;
  436. if (unlikely(!dev)) {
  437. pr_err("%s: invalid dev pointer\n", __func__);
  438. return IRQ_NONE;
  439. }
  440. priv = netdev_priv(dev);
  441. spin_lock(&priv->rxdma_irq_lock);
  442. /* reset IRQs */
  443. priv->dmaops->clear_rxirq(priv);
  444. priv->dmaops->clear_txirq(priv);
  445. spin_unlock(&priv->rxdma_irq_lock);
  446. if (likely(napi_schedule_prep(&priv->napi))) {
  447. spin_lock(&priv->rxdma_irq_lock);
  448. priv->dmaops->disable_rxirq(priv);
  449. priv->dmaops->disable_txirq(priv);
  450. spin_unlock(&priv->rxdma_irq_lock);
  451. __napi_schedule(&priv->napi);
  452. }
  453. return IRQ_HANDLED;
  454. }
  455. /* Transmit a packet (called by the kernel). Dispatches
  456. * either the SGDMA method for transmitting or the
  457. * MSGDMA method, assumes no scatter/gather support,
  458. * implying an assumption that there's only one
  459. * physically contiguous fragment starting at
  460. * skb->data, for length of skb_headlen(skb).
  461. */
  462. static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
  463. {
  464. struct altera_tse_private *priv = netdev_priv(dev);
  465. unsigned int txsize = priv->tx_ring_size;
  466. unsigned int entry;
  467. struct tse_buffer *buffer = NULL;
  468. int nfrags = skb_shinfo(skb)->nr_frags;
  469. unsigned int nopaged_len = skb_headlen(skb);
  470. enum netdev_tx ret = NETDEV_TX_OK;
  471. dma_addr_t dma_addr;
  472. spin_lock_bh(&priv->tx_lock);
  473. if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
  474. if (!netif_queue_stopped(dev)) {
  475. netif_stop_queue(dev);
  476. /* This is a hard error, log it. */
  477. netdev_err(priv->dev,
  478. "%s: Tx list full when queue awake\n",
  479. __func__);
  480. }
  481. ret = NETDEV_TX_BUSY;
  482. goto out;
  483. }
  484. /* Map the first skb fragment */
  485. entry = priv->tx_prod % txsize;
  486. buffer = &priv->tx_ring[entry];
  487. dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
  488. DMA_TO_DEVICE);
  489. if (dma_mapping_error(priv->device, dma_addr)) {
  490. netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
  491. ret = NETDEV_TX_OK;
  492. goto out;
  493. }
  494. buffer->skb = skb;
  495. buffer->dma_addr = dma_addr;
  496. buffer->len = nopaged_len;
  497. priv->dmaops->tx_buffer(priv, buffer);
  498. skb_tx_timestamp(skb);
  499. priv->tx_prod++;
  500. dev->stats.tx_bytes += skb->len;
  501. if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
  502. if (netif_msg_hw(priv))
  503. netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
  504. __func__);
  505. netif_stop_queue(dev);
  506. }
  507. out:
  508. spin_unlock_bh(&priv->tx_lock);
  509. return ret;
  510. }
  511. /* Called every time the controller might need to be made
  512. * aware of new link state. The PHY code conveys this
  513. * information through variables in the phydev structure, and this
  514. * function converts those variables into the appropriate
  515. * register values, and can bring down the device if needed.
  516. */
  517. static void altera_tse_adjust_link(struct net_device *dev)
  518. {
  519. struct altera_tse_private *priv = netdev_priv(dev);
  520. struct phy_device *phydev = dev->phydev;
  521. int new_state = 0;
  522. /* only change config if there is a link */
  523. spin_lock(&priv->mac_cfg_lock);
  524. if (phydev->link) {
  525. /* Read old config */
  526. u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
  527. /* Check duplex */
  528. if (phydev->duplex != priv->oldduplex) {
  529. new_state = 1;
  530. if (!(phydev->duplex))
  531. cfg_reg |= MAC_CMDCFG_HD_ENA;
  532. else
  533. cfg_reg &= ~MAC_CMDCFG_HD_ENA;
  534. netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
  535. dev->name, phydev->duplex);
  536. priv->oldduplex = phydev->duplex;
  537. }
  538. /* Check speed */
  539. if (phydev->speed != priv->oldspeed) {
  540. new_state = 1;
  541. switch (phydev->speed) {
  542. case 1000:
  543. cfg_reg |= MAC_CMDCFG_ETH_SPEED;
  544. cfg_reg &= ~MAC_CMDCFG_ENA_10;
  545. break;
  546. case 100:
  547. cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
  548. cfg_reg &= ~MAC_CMDCFG_ENA_10;
  549. break;
  550. case 10:
  551. cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
  552. cfg_reg |= MAC_CMDCFG_ENA_10;
  553. break;
  554. default:
  555. if (netif_msg_link(priv))
  556. netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
  557. phydev->speed);
  558. break;
  559. }
  560. priv->oldspeed = phydev->speed;
  561. }
  562. iowrite32(cfg_reg, &priv->mac_dev->command_config);
  563. if (!priv->oldlink) {
  564. new_state = 1;
  565. priv->oldlink = 1;
  566. }
  567. } else if (priv->oldlink) {
  568. new_state = 1;
  569. priv->oldlink = 0;
  570. priv->oldspeed = 0;
  571. priv->oldduplex = -1;
  572. }
  573. if (new_state && netif_msg_link(priv))
  574. phy_print_status(phydev);
  575. spin_unlock(&priv->mac_cfg_lock);
  576. }
  577. static struct phy_device *connect_local_phy(struct net_device *dev)
  578. {
  579. struct altera_tse_private *priv = netdev_priv(dev);
  580. struct phy_device *phydev = NULL;
  581. char phy_id_fmt[MII_BUS_ID_SIZE + 3];
  582. if (priv->phy_addr != POLL_PHY) {
  583. snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
  584. priv->mdio->id, priv->phy_addr);
  585. netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
  586. phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
  587. priv->phy_iface);
  588. if (IS_ERR(phydev)) {
  589. netdev_err(dev, "Could not attach to PHY\n");
  590. phydev = NULL;
  591. }
  592. } else {
  593. int ret;
  594. phydev = phy_find_first(priv->mdio);
  595. if (phydev == NULL) {
  596. netdev_err(dev, "No PHY found\n");
  597. return phydev;
  598. }
  599. ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
  600. priv->phy_iface);
  601. if (ret != 0) {
  602. netdev_err(dev, "Could not attach to PHY\n");
  603. phydev = NULL;
  604. }
  605. }
  606. return phydev;
  607. }
  608. static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
  609. {
  610. struct altera_tse_private *priv = netdev_priv(dev);
  611. struct device_node *np = priv->device->of_node;
  612. int ret = 0;
  613. priv->phy_iface = of_get_phy_mode(np);
  614. /* Avoid get phy addr and create mdio if no phy is present */
  615. if (!priv->phy_iface)
  616. return 0;
  617. /* try to get PHY address from device tree, use PHY autodetection if
  618. * no valid address is given
  619. */
  620. if (of_property_read_u32(priv->device->of_node, "phy-addr",
  621. &priv->phy_addr)) {
  622. priv->phy_addr = POLL_PHY;
  623. }
  624. if (!((priv->phy_addr == POLL_PHY) ||
  625. ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
  626. netdev_err(dev, "invalid phy-addr specified %d\n",
  627. priv->phy_addr);
  628. return -ENODEV;
  629. }
  630. /* Create/attach to MDIO bus */
  631. ret = altera_tse_mdio_create(dev,
  632. atomic_add_return(1, &instance_count));
  633. if (ret)
  634. return -ENODEV;
  635. return 0;
  636. }
  637. /* Initialize driver's PHY state, and attach to the PHY
  638. */
  639. static int init_phy(struct net_device *dev)
  640. {
  641. struct altera_tse_private *priv = netdev_priv(dev);
  642. struct phy_device *phydev;
  643. struct device_node *phynode;
  644. bool fixed_link = false;
  645. int rc = 0;
  646. /* Avoid init phy in case of no phy present */
  647. if (!priv->phy_iface)
  648. return 0;
  649. priv->oldlink = 0;
  650. priv->oldspeed = 0;
  651. priv->oldduplex = -1;
  652. phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
  653. if (!phynode) {
  654. /* check if a fixed-link is defined in device-tree */
  655. if (of_phy_is_fixed_link(priv->device->of_node)) {
  656. rc = of_phy_register_fixed_link(priv->device->of_node);
  657. if (rc < 0) {
  658. netdev_err(dev, "cannot register fixed PHY\n");
  659. return rc;
  660. }
  661. /* In the case of a fixed PHY, the DT node associated
  662. * to the PHY is the Ethernet MAC DT node.
  663. */
  664. phynode = of_node_get(priv->device->of_node);
  665. fixed_link = true;
  666. netdev_dbg(dev, "fixed-link detected\n");
  667. phydev = of_phy_connect(dev, phynode,
  668. &altera_tse_adjust_link,
  669. 0, priv->phy_iface);
  670. } else {
  671. netdev_dbg(dev, "no phy-handle found\n");
  672. if (!priv->mdio) {
  673. netdev_err(dev, "No phy-handle nor local mdio specified\n");
  674. return -ENODEV;
  675. }
  676. phydev = connect_local_phy(dev);
  677. }
  678. } else {
  679. netdev_dbg(dev, "phy-handle found\n");
  680. phydev = of_phy_connect(dev, phynode,
  681. &altera_tse_adjust_link, 0, priv->phy_iface);
  682. }
  683. of_node_put(phynode);
  684. if (!phydev) {
  685. netdev_err(dev, "Could not find the PHY\n");
  686. if (fixed_link)
  687. of_phy_deregister_fixed_link(priv->device->of_node);
  688. return -ENODEV;
  689. }
  690. /* Stop Advertising 1000BASE Capability if interface is not GMII
  691. */
  692. if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
  693. (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
  694. phy_set_max_speed(phydev, SPEED_100);
  695. /* Broken HW is sometimes missing the pull-up resistor on the
  696. * MDIO line, which results in reads to non-existent devices returning
  697. * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
  698. * device as well. If a fixed-link is used the phy_id is always 0.
  699. * Note: phydev->phy_id is the result of reading the UID PHY registers.
  700. */
  701. if ((phydev->phy_id == 0) && !fixed_link) {
  702. netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
  703. phy_disconnect(phydev);
  704. return -ENODEV;
  705. }
  706. netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
  707. phydev->mdio.addr, phydev->phy_id, phydev->link);
  708. return 0;
  709. }
  710. static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
  711. {
  712. u32 msb;
  713. u32 lsb;
  714. msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
  715. lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
  716. /* Set primary MAC address */
  717. csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
  718. csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
  719. }
  720. /* MAC software reset.
  721. * When reset is triggered, the MAC function completes the current
  722. * transmission or reception, and subsequently disables the transmit and
  723. * receive logic, flushes the receive FIFO buffer, and resets the statistics
  724. * counters.
  725. */
  726. static int reset_mac(struct altera_tse_private *priv)
  727. {
  728. int counter;
  729. u32 dat;
  730. dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
  731. dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
  732. dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
  733. csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
  734. counter = 0;
  735. while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
  736. if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
  737. MAC_CMDCFG_SW_RESET))
  738. break;
  739. udelay(1);
  740. }
  741. if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
  742. dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
  743. dat &= ~MAC_CMDCFG_SW_RESET;
  744. csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
  745. return -1;
  746. }
  747. return 0;
  748. }
  749. /* Initialize MAC core registers
  750. */
  751. static int init_mac(struct altera_tse_private *priv)
  752. {
  753. unsigned int cmd = 0;
  754. u32 frm_length;
  755. /* Setup Rx FIFO */
  756. csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
  757. priv->mac_dev, tse_csroffs(rx_section_empty));
  758. csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
  759. tse_csroffs(rx_section_full));
  760. csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
  761. tse_csroffs(rx_almost_empty));
  762. csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
  763. tse_csroffs(rx_almost_full));
  764. /* Setup Tx FIFO */
  765. csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
  766. priv->mac_dev, tse_csroffs(tx_section_empty));
  767. csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
  768. tse_csroffs(tx_section_full));
  769. csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
  770. tse_csroffs(tx_almost_empty));
  771. csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
  772. tse_csroffs(tx_almost_full));
  773. /* MAC Address Configuration */
  774. tse_update_mac_addr(priv, priv->dev->dev_addr);
  775. /* MAC Function Configuration */
  776. frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
  777. csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
  778. csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
  779. tse_csroffs(tx_ipg_length));
  780. /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
  781. * start address
  782. */
  783. tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
  784. ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
  785. tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
  786. ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
  787. ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
  788. /* Set the MAC options */
  789. cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
  790. cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */
  791. cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
  792. cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
  793. * with CRC errors
  794. */
  795. cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
  796. cmd &= ~MAC_CMDCFG_TX_ENA;
  797. cmd &= ~MAC_CMDCFG_RX_ENA;
  798. /* Default speed and duplex setting, full/100 */
  799. cmd &= ~MAC_CMDCFG_HD_ENA;
  800. cmd &= ~MAC_CMDCFG_ETH_SPEED;
  801. cmd &= ~MAC_CMDCFG_ENA_10;
  802. csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
  803. csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
  804. tse_csroffs(pause_quanta));
  805. if (netif_msg_hw(priv))
  806. dev_dbg(priv->device,
  807. "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
  808. return 0;
  809. }
  810. /* Start/stop MAC transmission logic
  811. */
  812. static void tse_set_mac(struct altera_tse_private *priv, bool enable)
  813. {
  814. u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
  815. if (enable)
  816. value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
  817. else
  818. value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
  819. csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
  820. }
  821. /* Change the MTU
  822. */
  823. static int tse_change_mtu(struct net_device *dev, int new_mtu)
  824. {
  825. if (netif_running(dev)) {
  826. netdev_err(dev, "must be stopped to change its MTU\n");
  827. return -EBUSY;
  828. }
  829. dev->mtu = new_mtu;
  830. netdev_update_features(dev);
  831. return 0;
  832. }
  833. static void altera_tse_set_mcfilter(struct net_device *dev)
  834. {
  835. struct altera_tse_private *priv = netdev_priv(dev);
  836. int i;
  837. struct netdev_hw_addr *ha;
  838. /* clear the hash filter */
  839. for (i = 0; i < 64; i++)
  840. csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
  841. netdev_for_each_mc_addr(ha, dev) {
  842. unsigned int hash = 0;
  843. int mac_octet;
  844. for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
  845. unsigned char xor_bit = 0;
  846. unsigned char octet = ha->addr[mac_octet];
  847. unsigned int bitshift;
  848. for (bitshift = 0; bitshift < 8; bitshift++)
  849. xor_bit ^= ((octet >> bitshift) & 0x01);
  850. hash = (hash << 1) | xor_bit;
  851. }
  852. csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
  853. }
  854. }
  855. static void altera_tse_set_mcfilterall(struct net_device *dev)
  856. {
  857. struct altera_tse_private *priv = netdev_priv(dev);
  858. int i;
  859. /* set the hash filter */
  860. for (i = 0; i < 64; i++)
  861. csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
  862. }
  863. /* Set or clear the multicast filter for this adaptor
  864. */
  865. static void tse_set_rx_mode_hashfilter(struct net_device *dev)
  866. {
  867. struct altera_tse_private *priv = netdev_priv(dev);
  868. spin_lock(&priv->mac_cfg_lock);
  869. if (dev->flags & IFF_PROMISC)
  870. tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
  871. MAC_CMDCFG_PROMIS_EN);
  872. if (dev->flags & IFF_ALLMULTI)
  873. altera_tse_set_mcfilterall(dev);
  874. else
  875. altera_tse_set_mcfilter(dev);
  876. spin_unlock(&priv->mac_cfg_lock);
  877. }
  878. /* Set or clear the multicast filter for this adaptor
  879. */
  880. static void tse_set_rx_mode(struct net_device *dev)
  881. {
  882. struct altera_tse_private *priv = netdev_priv(dev);
  883. spin_lock(&priv->mac_cfg_lock);
  884. if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
  885. !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
  886. tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
  887. MAC_CMDCFG_PROMIS_EN);
  888. else
  889. tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
  890. MAC_CMDCFG_PROMIS_EN);
  891. spin_unlock(&priv->mac_cfg_lock);
  892. }
  893. /* Initialise (if necessary) the SGMII PCS component
  894. */
  895. static int init_sgmii_pcs(struct net_device *dev)
  896. {
  897. struct altera_tse_private *priv = netdev_priv(dev);
  898. int n;
  899. unsigned int tmp_reg = 0;
  900. if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
  901. return 0; /* Nothing to do, not in SGMII mode */
  902. /* The TSE SGMII PCS block looks a little like a PHY, it is
  903. * mapped into the zeroth MDIO space of the MAC and it has
  904. * ID registers like a PHY would. Sadly this is often
  905. * configured to zeroes, so don't be surprised if it does
  906. * show 0x00000000.
  907. */
  908. if (sgmii_pcs_scratch_test(priv, 0x0000) &&
  909. sgmii_pcs_scratch_test(priv, 0xffff) &&
  910. sgmii_pcs_scratch_test(priv, 0xa5a5) &&
  911. sgmii_pcs_scratch_test(priv, 0x5a5a)) {
  912. netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
  913. sgmii_pcs_read(priv, MII_PHYSID1),
  914. sgmii_pcs_read(priv, MII_PHYSID2));
  915. } else {
  916. netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
  917. return -ENOMEM;
  918. }
  919. /* Starting on page 5-29 of the MegaCore Function User Guide
  920. * Set SGMII Link timer to 1.6ms
  921. */
  922. sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
  923. sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
  924. /* Enable SGMII Interface and Enable SGMII Auto Negotiation */
  925. sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
  926. /* Enable Autonegotiation */
  927. tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
  928. tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
  929. sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
  930. /* Reset PCS block */
  931. tmp_reg |= BMCR_RESET;
  932. sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
  933. for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
  934. if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
  935. netdev_info(dev, "SGMII PCS block initialised OK\n");
  936. return 0;
  937. }
  938. udelay(1);
  939. }
  940. /* We failed to reset the block, return a timeout */
  941. netdev_err(dev, "SGMII PCS block reset failed.\n");
  942. return -ETIMEDOUT;
  943. }
  944. /* Open and initialize the interface
  945. */
  946. static int tse_open(struct net_device *dev)
  947. {
  948. struct altera_tse_private *priv = netdev_priv(dev);
  949. int ret = 0;
  950. int i;
  951. unsigned long int flags;
  952. /* Reset and configure TSE MAC and probe associated PHY */
  953. ret = priv->dmaops->init_dma(priv);
  954. if (ret != 0) {
  955. netdev_err(dev, "Cannot initialize DMA\n");
  956. goto phy_error;
  957. }
  958. if (netif_msg_ifup(priv))
  959. netdev_warn(dev, "device MAC address %pM\n",
  960. dev->dev_addr);
  961. if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
  962. netdev_warn(dev, "TSE revision %x\n", priv->revision);
  963. spin_lock(&priv->mac_cfg_lock);
  964. /* no-op if MAC not operating in SGMII mode*/
  965. ret = init_sgmii_pcs(dev);
  966. if (ret) {
  967. netdev_err(dev,
  968. "Cannot init the SGMII PCS (error: %d)\n", ret);
  969. spin_unlock(&priv->mac_cfg_lock);
  970. goto phy_error;
  971. }
  972. ret = reset_mac(priv);
  973. /* Note that reset_mac will fail if the clocks are gated by the PHY
  974. * due to the PHY being put into isolation or power down mode.
  975. * This is not an error if reset fails due to no clock.
  976. */
  977. if (ret)
  978. netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
  979. ret = init_mac(priv);
  980. spin_unlock(&priv->mac_cfg_lock);
  981. if (ret) {
  982. netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
  983. goto alloc_skbuf_error;
  984. }
  985. priv->dmaops->reset_dma(priv);
  986. /* Create and initialize the TX/RX descriptors chains. */
  987. priv->rx_ring_size = dma_rx_num;
  988. priv->tx_ring_size = dma_tx_num;
  989. ret = alloc_init_skbufs(priv);
  990. if (ret) {
  991. netdev_err(dev, "DMA descriptors initialization failed\n");
  992. goto alloc_skbuf_error;
  993. }
  994. /* Register RX interrupt */
  995. ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
  996. dev->name, dev);
  997. if (ret) {
  998. netdev_err(dev, "Unable to register RX interrupt %d\n",
  999. priv->rx_irq);
  1000. goto init_error;
  1001. }
  1002. /* Register TX interrupt */
  1003. ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
  1004. dev->name, dev);
  1005. if (ret) {
  1006. netdev_err(dev, "Unable to register TX interrupt %d\n",
  1007. priv->tx_irq);
  1008. goto tx_request_irq_error;
  1009. }
  1010. /* Enable DMA interrupts */
  1011. spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
  1012. priv->dmaops->enable_rxirq(priv);
  1013. priv->dmaops->enable_txirq(priv);
  1014. /* Setup RX descriptor chain */
  1015. for (i = 0; i < priv->rx_ring_size; i++)
  1016. priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
  1017. spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
  1018. if (dev->phydev)
  1019. phy_start(dev->phydev);
  1020. napi_enable(&priv->napi);
  1021. netif_start_queue(dev);
  1022. priv->dmaops->start_rxdma(priv);
  1023. /* Start MAC Rx/Tx */
  1024. spin_lock(&priv->mac_cfg_lock);
  1025. tse_set_mac(priv, true);
  1026. spin_unlock(&priv->mac_cfg_lock);
  1027. return 0;
  1028. tx_request_irq_error:
  1029. free_irq(priv->rx_irq, dev);
  1030. init_error:
  1031. free_skbufs(dev);
  1032. alloc_skbuf_error:
  1033. phy_error:
  1034. return ret;
  1035. }
  1036. /* Stop TSE MAC interface and put the device in an inactive state
  1037. */
  1038. static int tse_shutdown(struct net_device *dev)
  1039. {
  1040. struct altera_tse_private *priv = netdev_priv(dev);
  1041. int ret;
  1042. unsigned long int flags;
  1043. /* Stop the PHY */
  1044. if (dev->phydev)
  1045. phy_stop(dev->phydev);
  1046. netif_stop_queue(dev);
  1047. napi_disable(&priv->napi);
  1048. /* Disable DMA interrupts */
  1049. spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
  1050. priv->dmaops->disable_rxirq(priv);
  1051. priv->dmaops->disable_txirq(priv);
  1052. spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
  1053. /* Free the IRQ lines */
  1054. free_irq(priv->rx_irq, dev);
  1055. free_irq(priv->tx_irq, dev);
  1056. /* disable and reset the MAC, empties fifo */
  1057. spin_lock(&priv->mac_cfg_lock);
  1058. spin_lock(&priv->tx_lock);
  1059. ret = reset_mac(priv);
  1060. /* Note that reset_mac will fail if the clocks are gated by the PHY
  1061. * due to the PHY being put into isolation or power down mode.
  1062. * This is not an error if reset fails due to no clock.
  1063. */
  1064. if (ret)
  1065. netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
  1066. priv->dmaops->reset_dma(priv);
  1067. free_skbufs(dev);
  1068. spin_unlock(&priv->tx_lock);
  1069. spin_unlock(&priv->mac_cfg_lock);
  1070. priv->dmaops->uninit_dma(priv);
  1071. return 0;
  1072. }
  1073. static struct net_device_ops altera_tse_netdev_ops = {
  1074. .ndo_open = tse_open,
  1075. .ndo_stop = tse_shutdown,
  1076. .ndo_start_xmit = tse_start_xmit,
  1077. .ndo_set_mac_address = eth_mac_addr,
  1078. .ndo_set_rx_mode = tse_set_rx_mode,
  1079. .ndo_change_mtu = tse_change_mtu,
  1080. .ndo_validate_addr = eth_validate_addr,
  1081. };
  1082. static int request_and_map(struct platform_device *pdev, const char *name,
  1083. struct resource **res, void __iomem **ptr)
  1084. {
  1085. struct resource *region;
  1086. struct device *device = &pdev->dev;
  1087. *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  1088. if (*res == NULL) {
  1089. dev_err(device, "resource %s not defined\n", name);
  1090. return -ENODEV;
  1091. }
  1092. region = devm_request_mem_region(device, (*res)->start,
  1093. resource_size(*res), dev_name(device));
  1094. if (region == NULL) {
  1095. dev_err(device, "unable to request %s\n", name);
  1096. return -EBUSY;
  1097. }
  1098. *ptr = devm_ioremap_nocache(device, region->start,
  1099. resource_size(region));
  1100. if (*ptr == NULL) {
  1101. dev_err(device, "ioremap_nocache of %s failed!", name);
  1102. return -ENOMEM;
  1103. }
  1104. return 0;
  1105. }
  1106. /* Probe Altera TSE MAC device
  1107. */
  1108. static int altera_tse_probe(struct platform_device *pdev)
  1109. {
  1110. struct net_device *ndev;
  1111. int ret = -ENODEV;
  1112. struct resource *control_port;
  1113. struct resource *dma_res;
  1114. struct altera_tse_private *priv;
  1115. const unsigned char *macaddr;
  1116. void __iomem *descmap;
  1117. const struct of_device_id *of_id = NULL;
  1118. ndev = alloc_etherdev(sizeof(struct altera_tse_private));
  1119. if (!ndev) {
  1120. dev_err(&pdev->dev, "Could not allocate network device\n");
  1121. return -ENODEV;
  1122. }
  1123. SET_NETDEV_DEV(ndev, &pdev->dev);
  1124. priv = netdev_priv(ndev);
  1125. priv->device = &pdev->dev;
  1126. priv->dev = ndev;
  1127. priv->msg_enable = netif_msg_init(debug, default_msg_level);
  1128. of_id = of_match_device(altera_tse_ids, &pdev->dev);
  1129. if (of_id)
  1130. priv->dmaops = (struct altera_dmaops *)of_id->data;
  1131. if (priv->dmaops &&
  1132. priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
  1133. /* Get the mapped address to the SGDMA descriptor memory */
  1134. ret = request_and_map(pdev, "s1", &dma_res, &descmap);
  1135. if (ret)
  1136. goto err_free_netdev;
  1137. /* Start of that memory is for transmit descriptors */
  1138. priv->tx_dma_desc = descmap;
  1139. /* First half is for tx descriptors, other half for tx */
  1140. priv->txdescmem = resource_size(dma_res)/2;
  1141. priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
  1142. priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
  1143. priv->txdescmem));
  1144. priv->rxdescmem = resource_size(dma_res)/2;
  1145. priv->rxdescmem_busaddr = dma_res->start;
  1146. priv->rxdescmem_busaddr += priv->txdescmem;
  1147. if (upper_32_bits(priv->rxdescmem_busaddr)) {
  1148. dev_dbg(priv->device,
  1149. "SGDMA bus addresses greater than 32-bits\n");
  1150. ret = -EINVAL;
  1151. goto err_free_netdev;
  1152. }
  1153. if (upper_32_bits(priv->txdescmem_busaddr)) {
  1154. dev_dbg(priv->device,
  1155. "SGDMA bus addresses greater than 32-bits\n");
  1156. ret = -EINVAL;
  1157. goto err_free_netdev;
  1158. }
  1159. } else if (priv->dmaops &&
  1160. priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
  1161. ret = request_and_map(pdev, "rx_resp", &dma_res,
  1162. &priv->rx_dma_resp);
  1163. if (ret)
  1164. goto err_free_netdev;
  1165. ret = request_and_map(pdev, "tx_desc", &dma_res,
  1166. &priv->tx_dma_desc);
  1167. if (ret)
  1168. goto err_free_netdev;
  1169. priv->txdescmem = resource_size(dma_res);
  1170. priv->txdescmem_busaddr = dma_res->start;
  1171. ret = request_and_map(pdev, "rx_desc", &dma_res,
  1172. &priv->rx_dma_desc);
  1173. if (ret)
  1174. goto err_free_netdev;
  1175. priv->rxdescmem = resource_size(dma_res);
  1176. priv->rxdescmem_busaddr = dma_res->start;
  1177. } else {
  1178. goto err_free_netdev;
  1179. }
  1180. if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
  1181. dma_set_coherent_mask(priv->device,
  1182. DMA_BIT_MASK(priv->dmaops->dmamask));
  1183. else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
  1184. dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
  1185. else
  1186. goto err_free_netdev;
  1187. /* MAC address space */
  1188. ret = request_and_map(pdev, "control_port", &control_port,
  1189. (void __iomem **)&priv->mac_dev);
  1190. if (ret)
  1191. goto err_free_netdev;
  1192. /* xSGDMA Rx Dispatcher address space */
  1193. ret = request_and_map(pdev, "rx_csr", &dma_res,
  1194. &priv->rx_dma_csr);
  1195. if (ret)
  1196. goto err_free_netdev;
  1197. /* xSGDMA Tx Dispatcher address space */
  1198. ret = request_and_map(pdev, "tx_csr", &dma_res,
  1199. &priv->tx_dma_csr);
  1200. if (ret)
  1201. goto err_free_netdev;
  1202. /* Rx IRQ */
  1203. priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
  1204. if (priv->rx_irq == -ENXIO) {
  1205. dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
  1206. ret = -ENXIO;
  1207. goto err_free_netdev;
  1208. }
  1209. /* Tx IRQ */
  1210. priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
  1211. if (priv->tx_irq == -ENXIO) {
  1212. dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
  1213. ret = -ENXIO;
  1214. goto err_free_netdev;
  1215. }
  1216. /* get FIFO depths from device tree */
  1217. if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
  1218. &priv->rx_fifo_depth)) {
  1219. dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
  1220. ret = -ENXIO;
  1221. goto err_free_netdev;
  1222. }
  1223. if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
  1224. &priv->tx_fifo_depth)) {
  1225. dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
  1226. ret = -ENXIO;
  1227. goto err_free_netdev;
  1228. }
  1229. /* get hash filter settings for this instance */
  1230. priv->hash_filter =
  1231. of_property_read_bool(pdev->dev.of_node,
  1232. "altr,has-hash-multicast-filter");
  1233. /* Set hash filter to not set for now until the
  1234. * multicast filter receive issue is debugged
  1235. */
  1236. priv->hash_filter = 0;
  1237. /* get supplemental address settings for this instance */
  1238. priv->added_unicast =
  1239. of_property_read_bool(pdev->dev.of_node,
  1240. "altr,has-supplementary-unicast");
  1241. priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
  1242. /* Max MTU is 1500, ETH_DATA_LEN */
  1243. priv->dev->max_mtu = ETH_DATA_LEN;
  1244. /* Get the max mtu from the device tree. Note that the
  1245. * "max-frame-size" parameter is actually max mtu. Definition
  1246. * in the ePAPR v1.1 spec and usage differ, so go with usage.
  1247. */
  1248. of_property_read_u32(pdev->dev.of_node, "max-frame-size",
  1249. &priv->dev->max_mtu);
  1250. /* The DMA buffer size already accounts for an alignment bias
  1251. * to avoid unaligned access exceptions for the NIOS processor,
  1252. */
  1253. priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
  1254. /* get default MAC address from device tree */
  1255. macaddr = of_get_mac_address(pdev->dev.of_node);
  1256. if (!IS_ERR(macaddr))
  1257. ether_addr_copy(ndev->dev_addr, macaddr);
  1258. else
  1259. eth_hw_addr_random(ndev);
  1260. /* get phy addr and create mdio */
  1261. ret = altera_tse_phy_get_addr_mdio_create(ndev);
  1262. if (ret)
  1263. goto err_free_netdev;
  1264. /* initialize netdev */
  1265. ndev->mem_start = control_port->start;
  1266. ndev->mem_end = control_port->end;
  1267. ndev->netdev_ops = &altera_tse_netdev_ops;
  1268. altera_tse_set_ethtool_ops(ndev);
  1269. altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
  1270. if (priv->hash_filter)
  1271. altera_tse_netdev_ops.ndo_set_rx_mode =
  1272. tse_set_rx_mode_hashfilter;
  1273. /* Scatter/gather IO is not supported,
  1274. * so it is turned off
  1275. */
  1276. ndev->hw_features &= ~NETIF_F_SG;
  1277. ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
  1278. /* VLAN offloading of tagging, stripping and filtering is not
  1279. * supported by hardware, but driver will accommodate the
  1280. * extra 4-byte VLAN tag for processing by upper layers
  1281. */
  1282. ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
  1283. /* setup NAPI interface */
  1284. netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
  1285. spin_lock_init(&priv->mac_cfg_lock);
  1286. spin_lock_init(&priv->tx_lock);
  1287. spin_lock_init(&priv->rxdma_irq_lock);
  1288. netif_carrier_off(ndev);
  1289. ret = register_netdev(ndev);
  1290. if (ret) {
  1291. dev_err(&pdev->dev, "failed to register TSE net device\n");
  1292. goto err_register_netdev;
  1293. }
  1294. platform_set_drvdata(pdev, ndev);
  1295. priv->revision = ioread32(&priv->mac_dev->megacore_revision);
  1296. if (netif_msg_probe(priv))
  1297. dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
  1298. (priv->revision >> 8) & 0xff,
  1299. priv->revision & 0xff,
  1300. (unsigned long) control_port->start, priv->rx_irq,
  1301. priv->tx_irq);
  1302. ret = init_phy(ndev);
  1303. if (ret != 0) {
  1304. netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
  1305. goto err_init_phy;
  1306. }
  1307. return 0;
  1308. err_init_phy:
  1309. unregister_netdev(ndev);
  1310. err_register_netdev:
  1311. netif_napi_del(&priv->napi);
  1312. altera_tse_mdio_destroy(ndev);
  1313. err_free_netdev:
  1314. free_netdev(ndev);
  1315. return ret;
  1316. }
  1317. /* Remove Altera TSE MAC device
  1318. */
  1319. static int altera_tse_remove(struct platform_device *pdev)
  1320. {
  1321. struct net_device *ndev = platform_get_drvdata(pdev);
  1322. struct altera_tse_private *priv = netdev_priv(ndev);
  1323. if (ndev->phydev) {
  1324. phy_disconnect(ndev->phydev);
  1325. if (of_phy_is_fixed_link(priv->device->of_node))
  1326. of_phy_deregister_fixed_link(priv->device->of_node);
  1327. }
  1328. platform_set_drvdata(pdev, NULL);
  1329. altera_tse_mdio_destroy(ndev);
  1330. unregister_netdev(ndev);
  1331. free_netdev(ndev);
  1332. return 0;
  1333. }
  1334. static const struct altera_dmaops altera_dtype_sgdma = {
  1335. .altera_dtype = ALTERA_DTYPE_SGDMA,
  1336. .dmamask = 32,
  1337. .reset_dma = sgdma_reset,
  1338. .enable_txirq = sgdma_enable_txirq,
  1339. .enable_rxirq = sgdma_enable_rxirq,
  1340. .disable_txirq = sgdma_disable_txirq,
  1341. .disable_rxirq = sgdma_disable_rxirq,
  1342. .clear_txirq = sgdma_clear_txirq,
  1343. .clear_rxirq = sgdma_clear_rxirq,
  1344. .tx_buffer = sgdma_tx_buffer,
  1345. .tx_completions = sgdma_tx_completions,
  1346. .add_rx_desc = sgdma_add_rx_desc,
  1347. .get_rx_status = sgdma_rx_status,
  1348. .init_dma = sgdma_initialize,
  1349. .uninit_dma = sgdma_uninitialize,
  1350. .start_rxdma = sgdma_start_rxdma,
  1351. };
  1352. static const struct altera_dmaops altera_dtype_msgdma = {
  1353. .altera_dtype = ALTERA_DTYPE_MSGDMA,
  1354. .dmamask = 64,
  1355. .reset_dma = msgdma_reset,
  1356. .enable_txirq = msgdma_enable_txirq,
  1357. .enable_rxirq = msgdma_enable_rxirq,
  1358. .disable_txirq = msgdma_disable_txirq,
  1359. .disable_rxirq = msgdma_disable_rxirq,
  1360. .clear_txirq = msgdma_clear_txirq,
  1361. .clear_rxirq = msgdma_clear_rxirq,
  1362. .tx_buffer = msgdma_tx_buffer,
  1363. .tx_completions = msgdma_tx_completions,
  1364. .add_rx_desc = msgdma_add_rx_desc,
  1365. .get_rx_status = msgdma_rx_status,
  1366. .init_dma = msgdma_initialize,
  1367. .uninit_dma = msgdma_uninitialize,
  1368. .start_rxdma = msgdma_start_rxdma,
  1369. };
  1370. static const struct of_device_id altera_tse_ids[] = {
  1371. { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
  1372. { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
  1373. { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
  1374. {},
  1375. };
  1376. MODULE_DEVICE_TABLE(of, altera_tse_ids);
  1377. static struct platform_driver altera_tse_driver = {
  1378. .probe = altera_tse_probe,
  1379. .remove = altera_tse_remove,
  1380. .suspend = NULL,
  1381. .resume = NULL,
  1382. .driver = {
  1383. .name = ALTERA_TSE_RESOURCE_NAME,
  1384. .of_match_table = altera_tse_ids,
  1385. },
  1386. };
  1387. module_platform_driver(altera_tse_driver);
  1388. MODULE_AUTHOR("Altera Corporation");
  1389. MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
  1390. MODULE_LICENSE("GPL v2");