dwmac4_dma.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /*
  2. * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
  3. * DWC Ether MAC version 4.xx has been used for developing this code.
  4. *
  5. * This contains the functions to handle the dma.
  6. *
  7. * Copyright (C) 2015 STMicroelectronics Ltd
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms and conditions of the GNU General Public License,
  11. * version 2, as published by the Free Software Foundation.
  12. *
  13. * Author: Alexandre Torgue <alexandre.torgue@st.com>
  14. */
  15. #include <linux/io.h>
  16. #include "dwmac4.h"
  17. #include "dwmac4_dma.h"
  18. static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
  19. {
  20. u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
  21. int i;
  22. pr_info("dwmac4: Master AXI performs %s burst length\n",
  23. (value & DMA_SYS_BUS_FB) ? "fixed" : "any");
  24. if (axi->axi_lpi_en)
  25. value |= DMA_AXI_EN_LPI;
  26. if (axi->axi_xit_frm)
  27. value |= DMA_AXI_LPI_XIT_FRM;
  28. value &= ~DMA_AXI_WR_OSR_LMT;
  29. value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
  30. DMA_AXI_WR_OSR_LMT_SHIFT;
  31. value &= ~DMA_AXI_RD_OSR_LMT;
  32. value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
  33. DMA_AXI_RD_OSR_LMT_SHIFT;
  34. /* Depending on the UNDEF bit the Master AXI will perform any burst
  35. * length according to the BLEN programmed (by default all BLEN are
  36. * set).
  37. */
  38. for (i = 0; i < AXI_BLEN; i++) {
  39. switch (axi->axi_blen[i]) {
  40. case 256:
  41. value |= DMA_AXI_BLEN256;
  42. break;
  43. case 128:
  44. value |= DMA_AXI_BLEN128;
  45. break;
  46. case 64:
  47. value |= DMA_AXI_BLEN64;
  48. break;
  49. case 32:
  50. value |= DMA_AXI_BLEN32;
  51. break;
  52. case 16:
  53. value |= DMA_AXI_BLEN16;
  54. break;
  55. case 8:
  56. value |= DMA_AXI_BLEN8;
  57. break;
  58. case 4:
  59. value |= DMA_AXI_BLEN4;
  60. break;
  61. }
  62. }
  63. writel(value, ioaddr + DMA_SYS_BUS_MODE);
  64. }
  65. static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl,
  66. u32 dma_tx_phy, u32 dma_rx_phy,
  67. u32 channel)
  68. {
  69. u32 value;
  70. /* set PBL for each channels. Currently we affect same configuration
  71. * on each channel
  72. */
  73. value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
  74. value = value | DMA_BUS_MODE_PBL;
  75. writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
  76. value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
  77. value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT);
  78. writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
  79. value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
  80. value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT);
  81. writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
  82. /* Mask interrupts by writing to CSR7 */
  83. writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
  84. writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
  85. writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
  86. }
  87. static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
  88. int aal, u32 dma_tx, u32 dma_rx, int atds)
  89. {
  90. u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
  91. int i;
  92. /* Set the Fixed burst mode */
  93. if (fb)
  94. value |= DMA_SYS_BUS_FB;
  95. /* Mixed Burst has no effect when fb is set */
  96. if (mb)
  97. value |= DMA_SYS_BUS_MB;
  98. if (aal)
  99. value |= DMA_SYS_BUS_AAL;
  100. writel(value, ioaddr + DMA_SYS_BUS_MODE);
  101. for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
  102. dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i);
  103. }
  104. static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
  105. {
  106. pr_debug(" Channel %d\n", channel);
  107. pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0,
  108. readl(ioaddr + DMA_CHAN_CONTROL(channel)));
  109. pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4,
  110. readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)));
  111. pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8,
  112. readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)));
  113. pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14,
  114. readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)));
  115. pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c,
  116. readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)));
  117. pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20,
  118. readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)));
  119. pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28,
  120. readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)));
  121. pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c,
  122. readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)));
  123. pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30,
  124. readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)));
  125. pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34,
  126. readl(ioaddr + DMA_CHAN_INTR_ENA(channel)));
  127. pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38,
  128. readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)));
  129. pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c,
  130. readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)));
  131. pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44,
  132. readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)));
  133. pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c,
  134. readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)));
  135. pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54,
  136. readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)));
  137. pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c,
  138. readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)));
  139. pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60,
  140. readl(ioaddr + DMA_CHAN_STATUS(channel)));
  141. }
  142. static void dwmac4_dump_dma_regs(void __iomem *ioaddr)
  143. {
  144. int i;
  145. pr_debug(" GMAC4 DMA registers\n");
  146. for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
  147. _dwmac4_dump_dma_regs(ioaddr, i);
  148. }
  149. static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
  150. {
  151. int i;
  152. for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
  153. writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
  154. }
  155. static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
  156. int rxmode, u32 channel)
  157. {
  158. u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
  159. /* Following code only done for channel 0, other channels not yet
  160. * supported.
  161. */
  162. mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
  163. if (txmode == SF_DMA_MODE) {
  164. pr_debug("GMAC: enable TX store and forward mode\n");
  165. /* Transmit COE type 2 cannot be done in cut-through mode. */
  166. mtl_tx_op |= MTL_OP_MODE_TSF;
  167. } else {
  168. pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
  169. mtl_tx_op &= ~MTL_OP_MODE_TSF;
  170. mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
  171. /* Set the transmit threshold */
  172. if (txmode <= 32)
  173. mtl_tx_op |= MTL_OP_MODE_TTC_32;
  174. else if (txmode <= 64)
  175. mtl_tx_op |= MTL_OP_MODE_TTC_64;
  176. else if (txmode <= 96)
  177. mtl_tx_op |= MTL_OP_MODE_TTC_96;
  178. else if (txmode <= 128)
  179. mtl_tx_op |= MTL_OP_MODE_TTC_128;
  180. else if (txmode <= 192)
  181. mtl_tx_op |= MTL_OP_MODE_TTC_192;
  182. else if (txmode <= 256)
  183. mtl_tx_op |= MTL_OP_MODE_TTC_256;
  184. else if (txmode <= 384)
  185. mtl_tx_op |= MTL_OP_MODE_TTC_384;
  186. else
  187. mtl_tx_op |= MTL_OP_MODE_TTC_512;
  188. }
  189. writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
  190. mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
  191. if (rxmode == SF_DMA_MODE) {
  192. pr_debug("GMAC: enable RX store and forward mode\n");
  193. mtl_rx_op |= MTL_OP_MODE_RSF;
  194. } else {
  195. pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
  196. mtl_rx_op &= ~MTL_OP_MODE_RSF;
  197. mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
  198. if (rxmode <= 32)
  199. mtl_rx_op |= MTL_OP_MODE_RTC_32;
  200. else if (rxmode <= 64)
  201. mtl_rx_op |= MTL_OP_MODE_RTC_64;
  202. else if (rxmode <= 96)
  203. mtl_rx_op |= MTL_OP_MODE_RTC_96;
  204. else
  205. mtl_rx_op |= MTL_OP_MODE_RTC_128;
  206. }
  207. writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
  208. /* Enable MTL RX overflow */
  209. mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
  210. writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
  211. ioaddr + MTL_CHAN_INT_CTRL(channel));
  212. }
  213. static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
  214. int rxmode, int rxfifosz)
  215. {
  216. /* Only Channel 0 is actually configured and used */
  217. dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
  218. }
  219. static void dwmac4_get_hw_feature(void __iomem *ioaddr,
  220. struct dma_features *dma_cap)
  221. {
  222. u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
  223. /* MAC HW feature0 */
  224. dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL);
  225. dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1;
  226. dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2;
  227. dma_cap->hash_filter = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4;
  228. dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18;
  229. dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3;
  230. dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5;
  231. dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6;
  232. dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7;
  233. /* MMC */
  234. dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8;
  235. /* IEEE 1588-2008 */
  236. dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12;
  237. /* 802.3az - Energy-Efficient Ethernet (EEE) */
  238. dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13;
  239. /* TX and RX csum */
  240. dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14;
  241. dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16;
  242. /* MAC HW feature1 */
  243. hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
  244. dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
  245. dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
  246. /* MAC HW feature2 */
  247. hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
  248. /* TX and RX number of channels */
  249. dma_cap->number_rx_channel =
  250. ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
  251. dma_cap->number_tx_channel =
  252. ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
  253. /* IEEE 1588-2002 */
  254. dma_cap->time_stamp = 0;
  255. }
  256. /* Enable/disable TSO feature and set MSS */
  257. static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
  258. {
  259. u32 value;
  260. if (en) {
  261. /* enable TSO */
  262. value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
  263. writel(value | DMA_CONTROL_TSE,
  264. ioaddr + DMA_CHAN_TX_CONTROL(chan));
  265. } else {
  266. /* enable TSO */
  267. value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
  268. writel(value & ~DMA_CONTROL_TSE,
  269. ioaddr + DMA_CHAN_TX_CONTROL(chan));
  270. }
  271. }
  272. const struct stmmac_dma_ops dwmac4_dma_ops = {
  273. .reset = dwmac4_dma_reset,
  274. .init = dwmac4_dma_init,
  275. .axi = dwmac4_dma_axi,
  276. .dump_regs = dwmac4_dump_dma_regs,
  277. .dma_mode = dwmac4_dma_operation_mode,
  278. .enable_dma_irq = dwmac4_enable_dma_irq,
  279. .disable_dma_irq = dwmac4_disable_dma_irq,
  280. .start_tx = dwmac4_dma_start_tx,
  281. .stop_tx = dwmac4_dma_stop_tx,
  282. .start_rx = dwmac4_dma_start_rx,
  283. .stop_rx = dwmac4_dma_stop_rx,
  284. .dma_interrupt = dwmac4_dma_interrupt,
  285. .get_hw_feature = dwmac4_get_hw_feature,
  286. .rx_watchdog = dwmac4_rx_watchdog,
  287. .set_rx_ring_len = dwmac4_set_rx_ring_len,
  288. .set_tx_ring_len = dwmac4_set_tx_ring_len,
  289. .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
  290. .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
  291. .enable_tso = dwmac4_enable_tso,
  292. };
  293. const struct stmmac_dma_ops dwmac410_dma_ops = {
  294. .reset = dwmac4_dma_reset,
  295. .init = dwmac4_dma_init,
  296. .axi = dwmac4_dma_axi,
  297. .dump_regs = dwmac4_dump_dma_regs,
  298. .dma_mode = dwmac4_dma_operation_mode,
  299. .enable_dma_irq = dwmac410_enable_dma_irq,
  300. .disable_dma_irq = dwmac4_disable_dma_irq,
  301. .start_tx = dwmac4_dma_start_tx,
  302. .stop_tx = dwmac4_dma_stop_tx,
  303. .start_rx = dwmac4_dma_start_rx,
  304. .stop_rx = dwmac4_dma_stop_rx,
  305. .dma_interrupt = dwmac4_dma_interrupt,
  306. .get_hw_feature = dwmac4_get_hw_feature,
  307. .rx_watchdog = dwmac4_rx_watchdog,
  308. .set_rx_ring_len = dwmac4_set_rx_ring_len,
  309. .set_tx_ring_len = dwmac4_set_tx_ring_len,
  310. .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
  311. .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
  312. .enable_tso = dwmac4_enable_tso,
  313. };