dwxgmac2_dma.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. // SPDX-License-Identifier: (GPL-2.0 OR MIT)
  2. /*
  3. * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
  4. * stmmac XGMAC support.
  5. */
  6. #include <linux/iopoll.h>
  7. #include "stmmac.h"
  8. #include "dwxgmac2.h"
  9. static int dwxgmac2_dma_reset(void __iomem *ioaddr)
  10. {
  11. u32 value = readl(ioaddr + XGMAC_DMA_MODE);
  12. /* DMA SW reset */
  13. writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
  14. return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
  15. !(value & XGMAC_SWR), 0, 100000);
  16. }
  17. static void dwxgmac2_dma_init(void __iomem *ioaddr,
  18. struct stmmac_dma_cfg *dma_cfg, int atds)
  19. {
  20. u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
  21. if (dma_cfg->aal)
  22. value |= XGMAC_AAL;
  23. writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
  24. }
  25. static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
  26. struct stmmac_dma_cfg *dma_cfg, u32 chan)
  27. {
  28. u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
  29. if (dma_cfg->pblx8)
  30. value |= XGMAC_PBLx8;
  31. writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
  32. writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
  33. }
  34. static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
  35. struct stmmac_dma_cfg *dma_cfg,
  36. u32 dma_rx_phy, u32 chan)
  37. {
  38. u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
  39. u32 value;
  40. value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
  41. value &= ~XGMAC_RxPBL;
  42. value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
  43. writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
  44. writel(dma_rx_phy, ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
  45. }
  46. static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
  47. struct stmmac_dma_cfg *dma_cfg,
  48. u32 dma_tx_phy, u32 chan)
  49. {
  50. u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
  51. u32 value;
  52. value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
  53. value &= ~XGMAC_TxPBL;
  54. value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
  55. value |= XGMAC_OSP;
  56. writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
  57. writel(dma_tx_phy, ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
  58. }
  59. static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
  60. {
  61. u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
  62. int i;
  63. if (axi->axi_lpi_en)
  64. value |= XGMAC_EN_LPI;
  65. if (axi->axi_xit_frm)
  66. value |= XGMAC_LPI_XIT_PKT;
  67. value &= ~XGMAC_WR_OSR_LMT;
  68. value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
  69. XGMAC_WR_OSR_LMT;
  70. value &= ~XGMAC_RD_OSR_LMT;
  71. value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
  72. XGMAC_RD_OSR_LMT;
  73. value &= ~XGMAC_BLEN;
  74. for (i = 0; i < AXI_BLEN; i++) {
  75. if (axi->axi_blen[i])
  76. value &= ~XGMAC_UNDEF;
  77. switch (axi->axi_blen[i]) {
  78. case 256:
  79. value |= XGMAC_BLEN256;
  80. break;
  81. case 128:
  82. value |= XGMAC_BLEN128;
  83. break;
  84. case 64:
  85. value |= XGMAC_BLEN64;
  86. break;
  87. case 32:
  88. value |= XGMAC_BLEN32;
  89. break;
  90. case 16:
  91. value |= XGMAC_BLEN16;
  92. break;
  93. case 8:
  94. value |= XGMAC_BLEN8;
  95. break;
  96. case 4:
  97. value |= XGMAC_BLEN4;
  98. break;
  99. }
  100. }
  101. writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
  102. }
  103. static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
  104. u32 channel, int fifosz, u8 qmode)
  105. {
  106. u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
  107. unsigned int rqs = fifosz / 256 - 1;
  108. if (mode == SF_DMA_MODE) {
  109. value |= XGMAC_RSF;
  110. } else {
  111. value &= ~XGMAC_RSF;
  112. value &= ~XGMAC_RTC;
  113. if (mode <= 64)
  114. value |= 0x0 << XGMAC_RTC_SHIFT;
  115. else if (mode <= 96)
  116. value |= 0x2 << XGMAC_RTC_SHIFT;
  117. else
  118. value |= 0x3 << XGMAC_RTC_SHIFT;
  119. }
  120. value &= ~XGMAC_RQS;
  121. value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
  122. writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
  123. /* Enable MTL RX overflow */
  124. value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
  125. writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
  126. }
  127. static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
  128. u32 channel, int fifosz, u8 qmode)
  129. {
  130. u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
  131. unsigned int tqs = fifosz / 256 - 1;
  132. if (mode == SF_DMA_MODE) {
  133. value |= XGMAC_TSF;
  134. } else {
  135. value &= ~XGMAC_TSF;
  136. value &= ~XGMAC_TTC;
  137. if (mode <= 64)
  138. value |= 0x0 << XGMAC_TTC_SHIFT;
  139. else if (mode <= 96)
  140. value |= 0x2 << XGMAC_TTC_SHIFT;
  141. else if (mode <= 128)
  142. value |= 0x3 << XGMAC_TTC_SHIFT;
  143. else if (mode <= 192)
  144. value |= 0x4 << XGMAC_TTC_SHIFT;
  145. else if (mode <= 256)
  146. value |= 0x5 << XGMAC_TTC_SHIFT;
  147. else if (mode <= 384)
  148. value |= 0x6 << XGMAC_TTC_SHIFT;
  149. else
  150. value |= 0x7 << XGMAC_TTC_SHIFT;
  151. }
  152. value &= ~XGMAC_TXQEN;
  153. if (qmode != MTL_QUEUE_AVB)
  154. value |= 0x2 << XGMAC_TXQEN_SHIFT;
  155. else
  156. value |= 0x1 << XGMAC_TXQEN_SHIFT;
  157. value &= ~XGMAC_TQS;
  158. value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
  159. writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
  160. }
  161. static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan)
  162. {
  163. writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
  164. }
  165. static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan)
  166. {
  167. writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
  168. }
  169. static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
  170. {
  171. u32 value;
  172. value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
  173. value |= XGMAC_TXST;
  174. writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
  175. value = readl(ioaddr + XGMAC_TX_CONFIG);
  176. value |= XGMAC_CONFIG_TE;
  177. writel(value, ioaddr + XGMAC_TX_CONFIG);
  178. }
  179. static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan)
  180. {
  181. u32 value;
  182. value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
  183. value &= ~XGMAC_TXST;
  184. writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
  185. value = readl(ioaddr + XGMAC_TX_CONFIG);
  186. value &= ~XGMAC_CONFIG_TE;
  187. writel(value, ioaddr + XGMAC_TX_CONFIG);
  188. }
  189. static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan)
  190. {
  191. u32 value;
  192. value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
  193. value |= XGMAC_RXST;
  194. writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
  195. value = readl(ioaddr + XGMAC_RX_CONFIG);
  196. value |= XGMAC_CONFIG_RE;
  197. writel(value, ioaddr + XGMAC_RX_CONFIG);
  198. }
  199. static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
  200. {
  201. u32 value;
  202. value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
  203. value &= ~XGMAC_RXST;
  204. writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
  205. value = readl(ioaddr + XGMAC_RX_CONFIG);
  206. value &= ~XGMAC_CONFIG_RE;
  207. writel(value, ioaddr + XGMAC_RX_CONFIG);
  208. }
  209. static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
  210. struct stmmac_extra_stats *x, u32 chan)
  211. {
  212. u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
  213. u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
  214. int ret = 0;
  215. /* ABNORMAL interrupts */
  216. if (unlikely(intr_status & XGMAC_AIS)) {
  217. if (unlikely(intr_status & XGMAC_TPS)) {
  218. x->tx_process_stopped_irq++;
  219. ret |= tx_hard_error;
  220. }
  221. if (unlikely(intr_status & XGMAC_FBE)) {
  222. x->fatal_bus_error_irq++;
  223. ret |= tx_hard_error;
  224. }
  225. }
  226. /* TX/RX NORMAL interrupts */
  227. if (likely(intr_status & XGMAC_NIS)) {
  228. x->normal_irq_n++;
  229. if (likely(intr_status & XGMAC_RI)) {
  230. if (likely(intr_en & XGMAC_RIE)) {
  231. x->rx_normal_irq_n++;
  232. ret |= handle_rx;
  233. }
  234. }
  235. if (likely(intr_status & XGMAC_TI)) {
  236. x->tx_normal_irq_n++;
  237. ret |= handle_tx;
  238. }
  239. }
  240. /* Clear interrupts */
  241. writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
  242. return ret;
  243. }
  244. static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
  245. struct dma_features *dma_cap)
  246. {
  247. u32 hw_cap;
  248. /* MAC HW feature 0 */
  249. hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
  250. dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
  251. dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
  252. dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
  253. dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
  254. dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
  255. dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
  256. dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
  257. dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
  258. /* MAC HW feature 1 */
  259. hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
  260. dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
  261. dma_cap->tx_fifo_size =
  262. 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
  263. dma_cap->rx_fifo_size =
  264. 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
  265. /* MAC HW feature 2 */
  266. hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
  267. dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
  268. dma_cap->number_tx_channel =
  269. ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
  270. dma_cap->number_rx_channel =
  271. ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
  272. dma_cap->number_tx_queues =
  273. ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
  274. dma_cap->number_rx_queues =
  275. ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
  276. }
  277. static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
  278. {
  279. u32 i;
  280. for (i = 0; i < nchan; i++)
  281. writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i));
  282. }
  283. static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
  284. {
  285. writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
  286. }
  287. static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
  288. {
  289. writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
  290. }
  291. static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
  292. {
  293. writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
  294. }
  295. static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
  296. {
  297. writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
  298. }
  299. static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
  300. {
  301. u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
  302. if (en)
  303. value |= XGMAC_TSE;
  304. else
  305. value &= ~XGMAC_TSE;
  306. writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
  307. }
  308. static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
  309. {
  310. u32 value;
  311. value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
  312. value &= ~XGMAC_RBSZ;
  313. value |= bfsize << XGMAC_RBSZ_SHIFT;
  314. writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
  315. }
  316. const struct stmmac_dma_ops dwxgmac210_dma_ops = {
  317. .reset = dwxgmac2_dma_reset,
  318. .init = dwxgmac2_dma_init,
  319. .init_chan = dwxgmac2_dma_init_chan,
  320. .init_rx_chan = dwxgmac2_dma_init_rx_chan,
  321. .init_tx_chan = dwxgmac2_dma_init_tx_chan,
  322. .axi = dwxgmac2_dma_axi,
  323. .dump_regs = NULL,
  324. .dma_rx_mode = dwxgmac2_dma_rx_mode,
  325. .dma_tx_mode = dwxgmac2_dma_tx_mode,
  326. .enable_dma_irq = dwxgmac2_enable_dma_irq,
  327. .disable_dma_irq = dwxgmac2_disable_dma_irq,
  328. .start_tx = dwxgmac2_dma_start_tx,
  329. .stop_tx = dwxgmac2_dma_stop_tx,
  330. .start_rx = dwxgmac2_dma_start_rx,
  331. .stop_rx = dwxgmac2_dma_stop_rx,
  332. .dma_interrupt = dwxgmac2_dma_interrupt,
  333. .get_hw_feature = dwxgmac2_get_hw_feature,
  334. .rx_watchdog = dwxgmac2_rx_watchdog,
  335. .set_rx_ring_len = dwxgmac2_set_rx_ring_len,
  336. .set_tx_ring_len = dwxgmac2_set_tx_ring_len,
  337. .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
  338. .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
  339. .enable_tso = dwxgmac2_enable_tso,
  340. .set_bfsize = dwxgmac2_set_bfsize,
  341. };