dwc-xlgmac-net.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351
  1. /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
  2. *
  3. * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
  4. *
  5. * This program is dual-licensed; you may select either version 2 of
  6. * the GNU General Public License ("GPL") or BSD license ("BSD").
  7. *
  8. * This Synopsys DWC XLGMAC software driver and associated documentation
  9. * (hereinafter the "Software") is an unsupported proprietary work of
  10. * Synopsys, Inc. unless otherwise expressly agreed to in writing between
  11. * Synopsys and you. The Software IS NOT an item of Licensed Software or a
  12. * Licensed Product under any End User Software License Agreement or
  13. * Agreement for Licensed Products with Synopsys or any supplement thereto.
  14. * Synopsys is a registered trademark of Synopsys, Inc. Other names included
  15. * in the SOFTWARE may be the trademarks of their respective owners.
  16. */
  17. #include <linux/netdevice.h>
  18. #include <linux/tcp.h>
  19. #include <linux/interrupt.h>
  20. #include "dwc-xlgmac.h"
  21. #include "dwc-xlgmac-reg.h"
  22. static int xlgmac_one_poll(struct napi_struct *, int);
  23. static int xlgmac_all_poll(struct napi_struct *, int);
  24. static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
  25. {
  26. return (ring->dma_desc_count - (ring->cur - ring->dirty));
  27. }
  28. static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
  29. {
  30. return (ring->cur - ring->dirty);
  31. }
  32. static int xlgmac_maybe_stop_tx_queue(
  33. struct xlgmac_channel *channel,
  34. struct xlgmac_ring *ring,
  35. unsigned int count)
  36. {
  37. struct xlgmac_pdata *pdata = channel->pdata;
  38. if (count > xlgmac_tx_avail_desc(ring)) {
  39. netif_info(pdata, drv, pdata->netdev,
  40. "Tx queue stopped, not enough descriptors available\n");
  41. netif_stop_subqueue(pdata->netdev, channel->queue_index);
  42. ring->tx.queue_stopped = 1;
  43. /* If we haven't notified the hardware because of xmit_more
  44. * support, tell it now
  45. */
  46. if (ring->tx.xmit_more)
  47. pdata->hw_ops.tx_start_xmit(channel, ring);
  48. return NETDEV_TX_BUSY;
  49. }
  50. return 0;
  51. }
  52. static void xlgmac_prep_vlan(struct sk_buff *skb,
  53. struct xlgmac_pkt_info *pkt_info)
  54. {
  55. if (skb_vlan_tag_present(skb))
  56. pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
  57. }
  58. static int xlgmac_prep_tso(struct sk_buff *skb,
  59. struct xlgmac_pkt_info *pkt_info)
  60. {
  61. int ret;
  62. if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
  63. TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
  64. TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
  65. return 0;
  66. ret = skb_cow_head(skb, 0);
  67. if (ret)
  68. return ret;
  69. pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  70. pkt_info->tcp_header_len = tcp_hdrlen(skb);
  71. pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
  72. pkt_info->mss = skb_shinfo(skb)->gso_size;
  73. XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
  74. XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
  75. pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
  76. XLGMAC_PR("mss=%u\n", pkt_info->mss);
  77. /* Update the number of packets that will ultimately be transmitted
  78. * along with the extra bytes for each extra packet
  79. */
  80. pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
  81. pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
  82. return 0;
  83. }
  84. static int xlgmac_is_tso(struct sk_buff *skb)
  85. {
  86. if (skb->ip_summed != CHECKSUM_PARTIAL)
  87. return 0;
  88. if (!skb_is_gso(skb))
  89. return 0;
  90. return 1;
  91. }
  92. static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
  93. struct xlgmac_ring *ring,
  94. struct sk_buff *skb,
  95. struct xlgmac_pkt_info *pkt_info)
  96. {
  97. struct skb_frag_struct *frag;
  98. unsigned int context_desc;
  99. unsigned int len;
  100. unsigned int i;
  101. pkt_info->skb = skb;
  102. context_desc = 0;
  103. pkt_info->desc_count = 0;
  104. pkt_info->tx_packets = 1;
  105. pkt_info->tx_bytes = skb->len;
  106. if (xlgmac_is_tso(skb)) {
  107. /* TSO requires an extra descriptor if mss is different */
  108. if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
  109. context_desc = 1;
  110. pkt_info->desc_count++;
  111. }
  112. /* TSO requires an extra descriptor for TSO header */
  113. pkt_info->desc_count++;
  114. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  115. pkt_info->attributes,
  116. TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
  117. TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
  118. 1);
  119. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  120. pkt_info->attributes,
  121. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
  122. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
  123. 1);
  124. } else if (skb->ip_summed == CHECKSUM_PARTIAL)
  125. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  126. pkt_info->attributes,
  127. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
  128. TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
  129. 1);
  130. if (skb_vlan_tag_present(skb)) {
  131. /* VLAN requires an extra descriptor if tag is different */
  132. if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
  133. /* We can share with the TSO context descriptor */
  134. if (!context_desc) {
  135. context_desc = 1;
  136. pkt_info->desc_count++;
  137. }
  138. pkt_info->attributes = XLGMAC_SET_REG_BITS(
  139. pkt_info->attributes,
  140. TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
  141. TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
  142. 1);
  143. }
  144. for (len = skb_headlen(skb); len;) {
  145. pkt_info->desc_count++;
  146. len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
  147. }
  148. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  149. frag = &skb_shinfo(skb)->frags[i];
  150. for (len = skb_frag_size(frag); len; ) {
  151. pkt_info->desc_count++;
  152. len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
  153. }
  154. }
  155. }
  156. static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
  157. {
  158. unsigned int rx_buf_size;
  159. if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
  160. netdev_alert(netdev, "MTU exceeds maximum supported value\n");
  161. return -EINVAL;
  162. }
  163. rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  164. rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
  165. rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
  166. ~(XLGMAC_RX_BUF_ALIGN - 1);
  167. return rx_buf_size;
  168. }
  169. static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
  170. {
  171. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  172. struct xlgmac_channel *channel;
  173. enum xlgmac_int int_id;
  174. unsigned int i;
  175. channel = pdata->channel_head;
  176. for (i = 0; i < pdata->channel_count; i++, channel++) {
  177. if (channel->tx_ring && channel->rx_ring)
  178. int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
  179. else if (channel->tx_ring)
  180. int_id = XLGMAC_INT_DMA_CH_SR_TI;
  181. else if (channel->rx_ring)
  182. int_id = XLGMAC_INT_DMA_CH_SR_RI;
  183. else
  184. continue;
  185. hw_ops->enable_int(channel, int_id);
  186. }
  187. }
  188. static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
  189. {
  190. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  191. struct xlgmac_channel *channel;
  192. enum xlgmac_int int_id;
  193. unsigned int i;
  194. channel = pdata->channel_head;
  195. for (i = 0; i < pdata->channel_count; i++, channel++) {
  196. if (channel->tx_ring && channel->rx_ring)
  197. int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
  198. else if (channel->tx_ring)
  199. int_id = XLGMAC_INT_DMA_CH_SR_TI;
  200. else if (channel->rx_ring)
  201. int_id = XLGMAC_INT_DMA_CH_SR_RI;
  202. else
  203. continue;
  204. hw_ops->disable_int(channel, int_id);
  205. }
  206. }
  207. static irqreturn_t xlgmac_isr(int irq, void *data)
  208. {
  209. unsigned int dma_isr, dma_ch_isr, mac_isr;
  210. struct xlgmac_pdata *pdata = data;
  211. struct xlgmac_channel *channel;
  212. struct xlgmac_hw_ops *hw_ops;
  213. unsigned int i, ti, ri;
  214. hw_ops = &pdata->hw_ops;
  215. /* The DMA interrupt status register also reports MAC and MTL
  216. * interrupts. So for polling mode, we just need to check for
  217. * this register to be non-zero
  218. */
  219. dma_isr = readl(pdata->mac_regs + DMA_ISR);
  220. if (!dma_isr)
  221. return IRQ_HANDLED;
  222. netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
  223. for (i = 0; i < pdata->channel_count; i++) {
  224. if (!(dma_isr & (1 << i)))
  225. continue;
  226. channel = pdata->channel_head + i;
  227. dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
  228. netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
  229. i, dma_ch_isr);
  230. /* The TI or RI interrupt bits may still be set even if using
  231. * per channel DMA interrupts. Check to be sure those are not
  232. * enabled before using the private data napi structure.
  233. */
  234. ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
  235. DMA_CH_SR_TI_LEN);
  236. ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
  237. DMA_CH_SR_RI_LEN);
  238. if (!pdata->per_channel_irq && (ti || ri)) {
  239. if (napi_schedule_prep(&pdata->napi)) {
  240. /* Disable Tx and Rx interrupts */
  241. xlgmac_disable_rx_tx_ints(pdata);
  242. pdata->stats.napi_poll_isr++;
  243. /* Turn on polling */
  244. __napi_schedule_irqoff(&pdata->napi);
  245. }
  246. }
  247. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS,
  248. DMA_CH_SR_TPS_LEN))
  249. pdata->stats.tx_process_stopped++;
  250. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS,
  251. DMA_CH_SR_RPS_LEN))
  252. pdata->stats.rx_process_stopped++;
  253. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS,
  254. DMA_CH_SR_TBU_LEN))
  255. pdata->stats.tx_buffer_unavailable++;
  256. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
  257. DMA_CH_SR_RBU_LEN))
  258. pdata->stats.rx_buffer_unavailable++;
  259. /* Restart the device on a Fatal Bus Error */
  260. if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
  261. DMA_CH_SR_FBE_LEN)) {
  262. pdata->stats.fatal_bus_error++;
  263. schedule_work(&pdata->restart_work);
  264. }
  265. /* Clear all interrupt signals */
  266. writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
  267. }
  268. if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
  269. DMA_ISR_MACIS_LEN)) {
  270. mac_isr = readl(pdata->mac_regs + MAC_ISR);
  271. if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
  272. MAC_ISR_MMCTXIS_LEN))
  273. hw_ops->tx_mmc_int(pdata);
  274. if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
  275. MAC_ISR_MMCRXIS_LEN))
  276. hw_ops->rx_mmc_int(pdata);
  277. }
  278. return IRQ_HANDLED;
  279. }
  280. static irqreturn_t xlgmac_dma_isr(int irq, void *data)
  281. {
  282. struct xlgmac_channel *channel = data;
  283. /* Per channel DMA interrupts are enabled, so we use the per
  284. * channel napi structure and not the private data napi structure
  285. */
  286. if (napi_schedule_prep(&channel->napi)) {
  287. /* Disable Tx and Rx interrupts */
  288. disable_irq_nosync(channel->dma_irq);
  289. /* Turn on polling */
  290. __napi_schedule_irqoff(&channel->napi);
  291. }
  292. return IRQ_HANDLED;
  293. }
  294. static void xlgmac_tx_timer(struct timer_list *t)
  295. {
  296. struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
  297. struct xlgmac_pdata *pdata = channel->pdata;
  298. struct napi_struct *napi;
  299. napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
  300. if (napi_schedule_prep(napi)) {
  301. /* Disable Tx and Rx interrupts */
  302. if (pdata->per_channel_irq)
  303. disable_irq_nosync(channel->dma_irq);
  304. else
  305. xlgmac_disable_rx_tx_ints(pdata);
  306. pdata->stats.napi_poll_txtimer++;
  307. /* Turn on polling */
  308. __napi_schedule(napi);
  309. }
  310. channel->tx_timer_active = 0;
  311. }
  312. static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
  313. {
  314. struct xlgmac_channel *channel;
  315. unsigned int i;
  316. channel = pdata->channel_head;
  317. for (i = 0; i < pdata->channel_count; i++, channel++) {
  318. if (!channel->tx_ring)
  319. break;
  320. timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0);
  321. }
  322. }
  323. static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
  324. {
  325. struct xlgmac_channel *channel;
  326. unsigned int i;
  327. channel = pdata->channel_head;
  328. for (i = 0; i < pdata->channel_count; i++, channel++) {
  329. if (!channel->tx_ring)
  330. break;
  331. del_timer_sync(&channel->tx_timer);
  332. }
  333. }
  334. static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
  335. {
  336. struct xlgmac_channel *channel;
  337. unsigned int i;
  338. if (pdata->per_channel_irq) {
  339. channel = pdata->channel_head;
  340. for (i = 0; i < pdata->channel_count; i++, channel++) {
  341. if (add)
  342. netif_napi_add(pdata->netdev, &channel->napi,
  343. xlgmac_one_poll,
  344. NAPI_POLL_WEIGHT);
  345. napi_enable(&channel->napi);
  346. }
  347. } else {
  348. if (add)
  349. netif_napi_add(pdata->netdev, &pdata->napi,
  350. xlgmac_all_poll, NAPI_POLL_WEIGHT);
  351. napi_enable(&pdata->napi);
  352. }
  353. }
  354. static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
  355. {
  356. struct xlgmac_channel *channel;
  357. unsigned int i;
  358. if (pdata->per_channel_irq) {
  359. channel = pdata->channel_head;
  360. for (i = 0; i < pdata->channel_count; i++, channel++) {
  361. napi_disable(&channel->napi);
  362. if (del)
  363. netif_napi_del(&channel->napi);
  364. }
  365. } else {
  366. napi_disable(&pdata->napi);
  367. if (del)
  368. netif_napi_del(&pdata->napi);
  369. }
  370. }
  371. static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
  372. {
  373. struct net_device *netdev = pdata->netdev;
  374. struct xlgmac_channel *channel;
  375. unsigned int i;
  376. int ret;
  377. ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
  378. IRQF_SHARED, netdev->name, pdata);
  379. if (ret) {
  380. netdev_alert(netdev, "error requesting irq %d\n",
  381. pdata->dev_irq);
  382. return ret;
  383. }
  384. if (!pdata->per_channel_irq)
  385. return 0;
  386. channel = pdata->channel_head;
  387. for (i = 0; i < pdata->channel_count; i++, channel++) {
  388. snprintf(channel->dma_irq_name,
  389. sizeof(channel->dma_irq_name) - 1,
  390. "%s-TxRx-%u", netdev_name(netdev),
  391. channel->queue_index);
  392. ret = devm_request_irq(pdata->dev, channel->dma_irq,
  393. xlgmac_dma_isr, 0,
  394. channel->dma_irq_name, channel);
  395. if (ret) {
  396. netdev_alert(netdev, "error requesting irq %d\n",
  397. channel->dma_irq);
  398. goto err_irq;
  399. }
  400. }
  401. return 0;
  402. err_irq:
  403. /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
  404. for (i--, channel--; i < pdata->channel_count; i--, channel--)
  405. devm_free_irq(pdata->dev, channel->dma_irq, channel);
  406. devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
  407. return ret;
  408. }
  409. static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
  410. {
  411. struct xlgmac_channel *channel;
  412. unsigned int i;
  413. devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
  414. if (!pdata->per_channel_irq)
  415. return;
  416. channel = pdata->channel_head;
  417. for (i = 0; i < pdata->channel_count; i++, channel++)
  418. devm_free_irq(pdata->dev, channel->dma_irq, channel);
  419. }
  420. static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
  421. {
  422. struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
  423. struct xlgmac_desc_data *desc_data;
  424. struct xlgmac_channel *channel;
  425. struct xlgmac_ring *ring;
  426. unsigned int i, j;
  427. channel = pdata->channel_head;
  428. for (i = 0; i < pdata->channel_count; i++, channel++) {
  429. ring = channel->tx_ring;
  430. if (!ring)
  431. break;
  432. for (j = 0; j < ring->dma_desc_count; j++) {
  433. desc_data = XLGMAC_GET_DESC_DATA(ring, j);
  434. desc_ops->unmap_desc_data(pdata, desc_data);
  435. }
  436. }
  437. }
  438. static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
  439. {
  440. struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
  441. struct xlgmac_desc_data *desc_data;
  442. struct xlgmac_channel *channel;
  443. struct xlgmac_ring *ring;
  444. unsigned int i, j;
  445. channel = pdata->channel_head;
  446. for (i = 0; i < pdata->channel_count; i++, channel++) {
  447. ring = channel->rx_ring;
  448. if (!ring)
  449. break;
  450. for (j = 0; j < ring->dma_desc_count; j++) {
  451. desc_data = XLGMAC_GET_DESC_DATA(ring, j);
  452. desc_ops->unmap_desc_data(pdata, desc_data);
  453. }
  454. }
  455. }
  456. static int xlgmac_start(struct xlgmac_pdata *pdata)
  457. {
  458. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  459. struct net_device *netdev = pdata->netdev;
  460. int ret;
  461. hw_ops->init(pdata);
  462. xlgmac_napi_enable(pdata, 1);
  463. ret = xlgmac_request_irqs(pdata);
  464. if (ret)
  465. goto err_napi;
  466. hw_ops->enable_tx(pdata);
  467. hw_ops->enable_rx(pdata);
  468. netif_tx_start_all_queues(netdev);
  469. return 0;
  470. err_napi:
  471. xlgmac_napi_disable(pdata, 1);
  472. hw_ops->exit(pdata);
  473. return ret;
  474. }
  475. static void xlgmac_stop(struct xlgmac_pdata *pdata)
  476. {
  477. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  478. struct net_device *netdev = pdata->netdev;
  479. struct xlgmac_channel *channel;
  480. struct netdev_queue *txq;
  481. unsigned int i;
  482. netif_tx_stop_all_queues(netdev);
  483. xlgmac_stop_timers(pdata);
  484. hw_ops->disable_tx(pdata);
  485. hw_ops->disable_rx(pdata);
  486. xlgmac_free_irqs(pdata);
  487. xlgmac_napi_disable(pdata, 1);
  488. hw_ops->exit(pdata);
  489. channel = pdata->channel_head;
  490. for (i = 0; i < pdata->channel_count; i++, channel++) {
  491. if (!channel->tx_ring)
  492. continue;
  493. txq = netdev_get_tx_queue(netdev, channel->queue_index);
  494. netdev_tx_reset_queue(txq);
  495. }
  496. }
  497. static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
  498. {
  499. /* If not running, "restart" will happen on open */
  500. if (!netif_running(pdata->netdev))
  501. return;
  502. xlgmac_stop(pdata);
  503. xlgmac_free_tx_data(pdata);
  504. xlgmac_free_rx_data(pdata);
  505. xlgmac_start(pdata);
  506. }
  507. static void xlgmac_restart(struct work_struct *work)
  508. {
  509. struct xlgmac_pdata *pdata = container_of(work,
  510. struct xlgmac_pdata,
  511. restart_work);
  512. rtnl_lock();
  513. xlgmac_restart_dev(pdata);
  514. rtnl_unlock();
  515. }
  516. static int xlgmac_open(struct net_device *netdev)
  517. {
  518. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  519. struct xlgmac_desc_ops *desc_ops;
  520. int ret;
  521. desc_ops = &pdata->desc_ops;
  522. /* TODO: Initialize the phy */
  523. /* Calculate the Rx buffer size before allocating rings */
  524. ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
  525. if (ret < 0)
  526. return ret;
  527. pdata->rx_buf_size = ret;
  528. /* Allocate the channels and rings */
  529. ret = desc_ops->alloc_channles_and_rings(pdata);
  530. if (ret)
  531. return ret;
  532. INIT_WORK(&pdata->restart_work, xlgmac_restart);
  533. xlgmac_init_timers(pdata);
  534. ret = xlgmac_start(pdata);
  535. if (ret)
  536. goto err_channels_and_rings;
  537. return 0;
  538. err_channels_and_rings:
  539. desc_ops->free_channels_and_rings(pdata);
  540. return ret;
  541. }
  542. static int xlgmac_close(struct net_device *netdev)
  543. {
  544. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  545. struct xlgmac_desc_ops *desc_ops;
  546. desc_ops = &pdata->desc_ops;
  547. /* Stop the device */
  548. xlgmac_stop(pdata);
  549. /* Free the channels and rings */
  550. desc_ops->free_channels_and_rings(pdata);
  551. return 0;
  552. }
  553. static void xlgmac_tx_timeout(struct net_device *netdev)
  554. {
  555. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  556. netdev_warn(netdev, "tx timeout, device restarting\n");
  557. schedule_work(&pdata->restart_work);
  558. }
  559. static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
  560. {
  561. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  562. struct xlgmac_pkt_info *tx_pkt_info;
  563. struct xlgmac_desc_ops *desc_ops;
  564. struct xlgmac_channel *channel;
  565. struct xlgmac_hw_ops *hw_ops;
  566. struct netdev_queue *txq;
  567. struct xlgmac_ring *ring;
  568. int ret;
  569. desc_ops = &pdata->desc_ops;
  570. hw_ops = &pdata->hw_ops;
  571. XLGMAC_PR("skb->len = %d\n", skb->len);
  572. channel = pdata->channel_head + skb->queue_mapping;
  573. txq = netdev_get_tx_queue(netdev, channel->queue_index);
  574. ring = channel->tx_ring;
  575. tx_pkt_info = &ring->pkt_info;
  576. if (skb->len == 0) {
  577. netif_err(pdata, tx_err, netdev,
  578. "empty skb received from stack\n");
  579. dev_kfree_skb_any(skb);
  580. return NETDEV_TX_OK;
  581. }
  582. /* Prepare preliminary packet info for TX */
  583. memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
  584. xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
  585. /* Check that there are enough descriptors available */
  586. ret = xlgmac_maybe_stop_tx_queue(channel, ring,
  587. tx_pkt_info->desc_count);
  588. if (ret)
  589. return ret;
  590. ret = xlgmac_prep_tso(skb, tx_pkt_info);
  591. if (ret) {
  592. netif_err(pdata, tx_err, netdev,
  593. "error processing TSO packet\n");
  594. dev_kfree_skb_any(skb);
  595. return ret;
  596. }
  597. xlgmac_prep_vlan(skb, tx_pkt_info);
  598. if (!desc_ops->map_tx_skb(channel, skb)) {
  599. dev_kfree_skb_any(skb);
  600. return NETDEV_TX_OK;
  601. }
  602. /* Report on the actual number of bytes (to be) sent */
  603. netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
  604. /* Configure required descriptor fields for transmission */
  605. hw_ops->dev_xmit(channel);
  606. if (netif_msg_pktdata(pdata))
  607. xlgmac_print_pkt(netdev, skb, true);
  608. /* Stop the queue in advance if there may not be enough descriptors */
  609. xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
  610. return NETDEV_TX_OK;
  611. }
  612. static void xlgmac_get_stats64(struct net_device *netdev,
  613. struct rtnl_link_stats64 *s)
  614. {
  615. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  616. struct xlgmac_stats *pstats = &pdata->stats;
  617. pdata->hw_ops.read_mmc_stats(pdata);
  618. s->rx_packets = pstats->rxframecount_gb;
  619. s->rx_bytes = pstats->rxoctetcount_gb;
  620. s->rx_errors = pstats->rxframecount_gb -
  621. pstats->rxbroadcastframes_g -
  622. pstats->rxmulticastframes_g -
  623. pstats->rxunicastframes_g;
  624. s->multicast = pstats->rxmulticastframes_g;
  625. s->rx_length_errors = pstats->rxlengtherror;
  626. s->rx_crc_errors = pstats->rxcrcerror;
  627. s->rx_fifo_errors = pstats->rxfifooverflow;
  628. s->tx_packets = pstats->txframecount_gb;
  629. s->tx_bytes = pstats->txoctetcount_gb;
  630. s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
  631. s->tx_dropped = netdev->stats.tx_dropped;
  632. }
  633. static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
  634. {
  635. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  636. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  637. struct sockaddr *saddr = addr;
  638. if (!is_valid_ether_addr(saddr->sa_data))
  639. return -EADDRNOTAVAIL;
  640. memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
  641. hw_ops->set_mac_address(pdata, netdev->dev_addr);
  642. return 0;
  643. }
  644. static int xlgmac_ioctl(struct net_device *netdev,
  645. struct ifreq *ifreq, int cmd)
  646. {
  647. if (!netif_running(netdev))
  648. return -ENODEV;
  649. return 0;
  650. }
  651. static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
  652. {
  653. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  654. int ret;
  655. ret = xlgmac_calc_rx_buf_size(netdev, mtu);
  656. if (ret < 0)
  657. return ret;
  658. pdata->rx_buf_size = ret;
  659. netdev->mtu = mtu;
  660. xlgmac_restart_dev(pdata);
  661. return 0;
  662. }
  663. static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
  664. __be16 proto,
  665. u16 vid)
  666. {
  667. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  668. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  669. set_bit(vid, pdata->active_vlans);
  670. hw_ops->update_vlan_hash_table(pdata);
  671. return 0;
  672. }
  673. static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
  674. __be16 proto,
  675. u16 vid)
  676. {
  677. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  678. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  679. clear_bit(vid, pdata->active_vlans);
  680. hw_ops->update_vlan_hash_table(pdata);
  681. return 0;
  682. }
  683. #ifdef CONFIG_NET_POLL_CONTROLLER
  684. static void xlgmac_poll_controller(struct net_device *netdev)
  685. {
  686. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  687. struct xlgmac_channel *channel;
  688. unsigned int i;
  689. if (pdata->per_channel_irq) {
  690. channel = pdata->channel_head;
  691. for (i = 0; i < pdata->channel_count; i++, channel++)
  692. xlgmac_dma_isr(channel->dma_irq, channel);
  693. } else {
  694. disable_irq(pdata->dev_irq);
  695. xlgmac_isr(pdata->dev_irq, pdata);
  696. enable_irq(pdata->dev_irq);
  697. }
  698. }
  699. #endif /* CONFIG_NET_POLL_CONTROLLER */
  700. static int xlgmac_set_features(struct net_device *netdev,
  701. netdev_features_t features)
  702. {
  703. netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
  704. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  705. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  706. int ret = 0;
  707. rxhash = pdata->netdev_features & NETIF_F_RXHASH;
  708. rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
  709. rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
  710. rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
  711. if ((features & NETIF_F_RXHASH) && !rxhash)
  712. ret = hw_ops->enable_rss(pdata);
  713. else if (!(features & NETIF_F_RXHASH) && rxhash)
  714. ret = hw_ops->disable_rss(pdata);
  715. if (ret)
  716. return ret;
  717. if ((features & NETIF_F_RXCSUM) && !rxcsum)
  718. hw_ops->enable_rx_csum(pdata);
  719. else if (!(features & NETIF_F_RXCSUM) && rxcsum)
  720. hw_ops->disable_rx_csum(pdata);
  721. if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
  722. hw_ops->enable_rx_vlan_stripping(pdata);
  723. else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
  724. hw_ops->disable_rx_vlan_stripping(pdata);
  725. if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
  726. hw_ops->enable_rx_vlan_filtering(pdata);
  727. else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
  728. hw_ops->disable_rx_vlan_filtering(pdata);
  729. pdata->netdev_features = features;
  730. return 0;
  731. }
  732. static void xlgmac_set_rx_mode(struct net_device *netdev)
  733. {
  734. struct xlgmac_pdata *pdata = netdev_priv(netdev);
  735. struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
  736. hw_ops->config_rx_mode(pdata);
  737. }
  738. static const struct net_device_ops xlgmac_netdev_ops = {
  739. .ndo_open = xlgmac_open,
  740. .ndo_stop = xlgmac_close,
  741. .ndo_start_xmit = xlgmac_xmit,
  742. .ndo_tx_timeout = xlgmac_tx_timeout,
  743. .ndo_get_stats64 = xlgmac_get_stats64,
  744. .ndo_change_mtu = xlgmac_change_mtu,
  745. .ndo_set_mac_address = xlgmac_set_mac_address,
  746. .ndo_validate_addr = eth_validate_addr,
  747. .ndo_do_ioctl = xlgmac_ioctl,
  748. .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid,
  749. .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid,
  750. #ifdef CONFIG_NET_POLL_CONTROLLER
  751. .ndo_poll_controller = xlgmac_poll_controller,
  752. #endif
  753. .ndo_set_features = xlgmac_set_features,
  754. .ndo_set_rx_mode = xlgmac_set_rx_mode,
  755. };
  756. const struct net_device_ops *xlgmac_get_netdev_ops(void)
  757. {
  758. return &xlgmac_netdev_ops;
  759. }
  760. static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
  761. {
  762. struct xlgmac_pdata *pdata = channel->pdata;
  763. struct xlgmac_ring *ring = channel->rx_ring;
  764. struct xlgmac_desc_data *desc_data;
  765. struct xlgmac_desc_ops *desc_ops;
  766. struct xlgmac_hw_ops *hw_ops;
  767. desc_ops = &pdata->desc_ops;
  768. hw_ops = &pdata->hw_ops;
  769. while (ring->dirty != ring->cur) {
  770. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
  771. /* Reset desc_data values */
  772. desc_ops->unmap_desc_data(pdata, desc_data);
  773. if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
  774. break;
  775. hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
  776. ring->dirty++;
  777. }
  778. /* Make sure everything is written before the register write */
  779. wmb();
  780. /* Update the Rx Tail Pointer Register with address of
  781. * the last cleaned entry
  782. */
  783. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
  784. writel(lower_32_bits(desc_data->dma_desc_addr),
  785. XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
  786. }
  787. static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
  788. struct napi_struct *napi,
  789. struct xlgmac_desc_data *desc_data,
  790. unsigned int len)
  791. {
  792. unsigned int copy_len;
  793. struct sk_buff *skb;
  794. u8 *packet;
  795. skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
  796. if (!skb)
  797. return NULL;
  798. /* Start with the header buffer which may contain just the header
  799. * or the header plus data
  800. */
  801. dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
  802. desc_data->rx.hdr.dma_off,
  803. desc_data->rx.hdr.dma_len,
  804. DMA_FROM_DEVICE);
  805. packet = page_address(desc_data->rx.hdr.pa.pages) +
  806. desc_data->rx.hdr.pa.pages_offset;
  807. copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
  808. copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
  809. skb_copy_to_linear_data(skb, packet, copy_len);
  810. skb_put(skb, copy_len);
  811. len -= copy_len;
  812. if (len) {
  813. /* Add the remaining data as a frag */
  814. dma_sync_single_range_for_cpu(pdata->dev,
  815. desc_data->rx.buf.dma_base,
  816. desc_data->rx.buf.dma_off,
  817. desc_data->rx.buf.dma_len,
  818. DMA_FROM_DEVICE);
  819. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  820. desc_data->rx.buf.pa.pages,
  821. desc_data->rx.buf.pa.pages_offset,
  822. len, desc_data->rx.buf.dma_len);
  823. desc_data->rx.buf.pa.pages = NULL;
  824. }
  825. return skb;
  826. }
  827. static int xlgmac_tx_poll(struct xlgmac_channel *channel)
  828. {
  829. struct xlgmac_pdata *pdata = channel->pdata;
  830. struct xlgmac_ring *ring = channel->tx_ring;
  831. struct net_device *netdev = pdata->netdev;
  832. unsigned int tx_packets = 0, tx_bytes = 0;
  833. struct xlgmac_desc_data *desc_data;
  834. struct xlgmac_dma_desc *dma_desc;
  835. struct xlgmac_desc_ops *desc_ops;
  836. struct xlgmac_hw_ops *hw_ops;
  837. struct netdev_queue *txq;
  838. int processed = 0;
  839. unsigned int cur;
  840. desc_ops = &pdata->desc_ops;
  841. hw_ops = &pdata->hw_ops;
  842. /* Nothing to do if there isn't a Tx ring for this channel */
  843. if (!ring)
  844. return 0;
  845. cur = ring->cur;
  846. /* Be sure we get ring->cur before accessing descriptor data */
  847. smp_rmb();
  848. txq = netdev_get_tx_queue(netdev, channel->queue_index);
  849. while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
  850. (ring->dirty != cur)) {
  851. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
  852. dma_desc = desc_data->dma_desc;
  853. if (!hw_ops->tx_complete(dma_desc))
  854. break;
  855. /* Make sure descriptor fields are read after reading
  856. * the OWN bit
  857. */
  858. dma_rmb();
  859. if (netif_msg_tx_done(pdata))
  860. xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
  861. if (hw_ops->is_last_desc(dma_desc)) {
  862. tx_packets += desc_data->tx.packets;
  863. tx_bytes += desc_data->tx.bytes;
  864. }
  865. /* Free the SKB and reset the descriptor for re-use */
  866. desc_ops->unmap_desc_data(pdata, desc_data);
  867. hw_ops->tx_desc_reset(desc_data);
  868. processed++;
  869. ring->dirty++;
  870. }
  871. if (!processed)
  872. return 0;
  873. netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
  874. if ((ring->tx.queue_stopped == 1) &&
  875. (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
  876. ring->tx.queue_stopped = 0;
  877. netif_tx_wake_queue(txq);
  878. }
  879. XLGMAC_PR("processed=%d\n", processed);
  880. return processed;
  881. }
  882. static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
  883. {
  884. struct xlgmac_pdata *pdata = channel->pdata;
  885. struct xlgmac_ring *ring = channel->rx_ring;
  886. struct net_device *netdev = pdata->netdev;
  887. unsigned int len, dma_desc_len, max_len;
  888. unsigned int context_next, context;
  889. struct xlgmac_desc_data *desc_data;
  890. struct xlgmac_pkt_info *pkt_info;
  891. unsigned int incomplete, error;
  892. struct xlgmac_hw_ops *hw_ops;
  893. unsigned int received = 0;
  894. struct napi_struct *napi;
  895. struct sk_buff *skb;
  896. int packet_count = 0;
  897. hw_ops = &pdata->hw_ops;
  898. /* Nothing to do if there isn't a Rx ring for this channel */
  899. if (!ring)
  900. return 0;
  901. incomplete = 0;
  902. context_next = 0;
  903. napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
  904. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
  905. pkt_info = &ring->pkt_info;
  906. while (packet_count < budget) {
  907. /* First time in loop see if we need to restore state */
  908. if (!received && desc_data->state_saved) {
  909. skb = desc_data->state.skb;
  910. error = desc_data->state.error;
  911. len = desc_data->state.len;
  912. } else {
  913. memset(pkt_info, 0, sizeof(*pkt_info));
  914. skb = NULL;
  915. error = 0;
  916. len = 0;
  917. }
  918. read_again:
  919. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
  920. if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
  921. xlgmac_rx_refresh(channel);
  922. if (hw_ops->dev_read(channel))
  923. break;
  924. received++;
  925. ring->cur++;
  926. incomplete = XLGMAC_GET_REG_BITS(
  927. pkt_info->attributes,
  928. RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
  929. RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
  930. context_next = XLGMAC_GET_REG_BITS(
  931. pkt_info->attributes,
  932. RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
  933. RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
  934. context = XLGMAC_GET_REG_BITS(
  935. pkt_info->attributes,
  936. RX_PACKET_ATTRIBUTES_CONTEXT_POS,
  937. RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
  938. /* Earlier error, just drain the remaining data */
  939. if ((incomplete || context_next) && error)
  940. goto read_again;
  941. if (error || pkt_info->errors) {
  942. if (pkt_info->errors)
  943. netif_err(pdata, rx_err, netdev,
  944. "error in received packet\n");
  945. dev_kfree_skb(skb);
  946. goto next_packet;
  947. }
  948. if (!context) {
  949. /* Length is cumulative, get this descriptor's length */
  950. dma_desc_len = desc_data->rx.len - len;
  951. len += dma_desc_len;
  952. if (dma_desc_len && !skb) {
  953. skb = xlgmac_create_skb(pdata, napi, desc_data,
  954. dma_desc_len);
  955. if (!skb)
  956. error = 1;
  957. } else if (dma_desc_len) {
  958. dma_sync_single_range_for_cpu(
  959. pdata->dev,
  960. desc_data->rx.buf.dma_base,
  961. desc_data->rx.buf.dma_off,
  962. desc_data->rx.buf.dma_len,
  963. DMA_FROM_DEVICE);
  964. skb_add_rx_frag(
  965. skb, skb_shinfo(skb)->nr_frags,
  966. desc_data->rx.buf.pa.pages,
  967. desc_data->rx.buf.pa.pages_offset,
  968. dma_desc_len,
  969. desc_data->rx.buf.dma_len);
  970. desc_data->rx.buf.pa.pages = NULL;
  971. }
  972. }
  973. if (incomplete || context_next)
  974. goto read_again;
  975. if (!skb)
  976. goto next_packet;
  977. /* Be sure we don't exceed the configured MTU */
  978. max_len = netdev->mtu + ETH_HLEN;
  979. if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  980. (skb->protocol == htons(ETH_P_8021Q)))
  981. max_len += VLAN_HLEN;
  982. if (skb->len > max_len) {
  983. netif_err(pdata, rx_err, netdev,
  984. "packet length exceeds configured MTU\n");
  985. dev_kfree_skb(skb);
  986. goto next_packet;
  987. }
  988. if (netif_msg_pktdata(pdata))
  989. xlgmac_print_pkt(netdev, skb, false);
  990. skb_checksum_none_assert(skb);
  991. if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
  992. RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
  993. RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
  994. skb->ip_summed = CHECKSUM_UNNECESSARY;
  995. if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
  996. RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
  997. RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) {
  998. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  999. pkt_info->vlan_ctag);
  1000. pdata->stats.rx_vlan_packets++;
  1001. }
  1002. if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
  1003. RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
  1004. RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
  1005. skb_set_hash(skb, pkt_info->rss_hash,
  1006. pkt_info->rss_hash_type);
  1007. skb->dev = netdev;
  1008. skb->protocol = eth_type_trans(skb, netdev);
  1009. skb_record_rx_queue(skb, channel->queue_index);
  1010. napi_gro_receive(napi, skb);
  1011. next_packet:
  1012. packet_count++;
  1013. }
  1014. /* Check if we need to save state before leaving */
  1015. if (received && (incomplete || context_next)) {
  1016. desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
  1017. desc_data->state_saved = 1;
  1018. desc_data->state.skb = skb;
  1019. desc_data->state.len = len;
  1020. desc_data->state.error = error;
  1021. }
  1022. XLGMAC_PR("packet_count = %d\n", packet_count);
  1023. return packet_count;
  1024. }
  1025. static int xlgmac_one_poll(struct napi_struct *napi, int budget)
  1026. {
  1027. struct xlgmac_channel *channel = container_of(napi,
  1028. struct xlgmac_channel,
  1029. napi);
  1030. int processed = 0;
  1031. XLGMAC_PR("budget=%d\n", budget);
  1032. /* Cleanup Tx ring first */
  1033. xlgmac_tx_poll(channel);
  1034. /* Process Rx ring next */
  1035. processed = xlgmac_rx_poll(channel, budget);
  1036. /* If we processed everything, we are done */
  1037. if (processed < budget) {
  1038. /* Turn off polling */
  1039. napi_complete_done(napi, processed);
  1040. /* Enable Tx and Rx interrupts */
  1041. enable_irq(channel->dma_irq);
  1042. }
  1043. XLGMAC_PR("received = %d\n", processed);
  1044. return processed;
  1045. }
  1046. static int xlgmac_all_poll(struct napi_struct *napi, int budget)
  1047. {
  1048. struct xlgmac_pdata *pdata = container_of(napi,
  1049. struct xlgmac_pdata,
  1050. napi);
  1051. struct xlgmac_channel *channel;
  1052. int processed, last_processed;
  1053. int ring_budget;
  1054. unsigned int i;
  1055. XLGMAC_PR("budget=%d\n", budget);
  1056. processed = 0;
  1057. ring_budget = budget / pdata->rx_ring_count;
  1058. do {
  1059. last_processed = processed;
  1060. channel = pdata->channel_head;
  1061. for (i = 0; i < pdata->channel_count; i++, channel++) {
  1062. /* Cleanup Tx ring first */
  1063. xlgmac_tx_poll(channel);
  1064. /* Process Rx ring next */
  1065. if (ring_budget > (budget - processed))
  1066. ring_budget = budget - processed;
  1067. processed += xlgmac_rx_poll(channel, ring_budget);
  1068. }
  1069. } while ((processed < budget) && (processed != last_processed));
  1070. /* If we processed everything, we are done */
  1071. if (processed < budget) {
  1072. /* Turn off polling */
  1073. napi_complete_done(napi, processed);
  1074. /* Enable Tx and Rx interrupts */
  1075. xlgmac_enable_rx_tx_ints(pdata);
  1076. }
  1077. XLGMAC_PR("received = %d\n", processed);
  1078. return processed;
  1079. }