dwmac4_core.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /*
  2. * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
  3. * DWC Ether MAC version 4.00 has been used for developing this code.
  4. *
  5. * This only implements the mac core functions for this chip.
  6. *
  7. * Copyright (C) 2015 STMicroelectronics Ltd
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms and conditions of the GNU General Public License,
  11. * version 2, as published by the Free Software Foundation.
  12. *
  13. * Author: Alexandre Torgue <alexandre.torgue@st.com>
  14. */
  15. #include <linux/crc32.h>
  16. #include <linux/slab.h>
  17. #include <linux/ethtool.h>
  18. #include <linux/io.h>
  19. #include <net/dsa.h>
  20. #include "stmmac.h"
  21. #include "stmmac_pcs.h"
  22. #include "dwmac4.h"
  23. #include "dwmac5.h"
  24. static void dwmac4_core_init(struct mac_device_info *hw,
  25. struct net_device *dev)
  26. {
  27. void __iomem *ioaddr = hw->pcsr;
  28. u32 value = readl(ioaddr + GMAC_CONFIG);
  29. int mtu = dev->mtu;
  30. value |= GMAC_CORE_INIT;
  31. if (mtu > 1500)
  32. value |= GMAC_CONFIG_2K;
  33. if (mtu > 2000)
  34. value |= GMAC_CONFIG_JE;
  35. if (hw->ps) {
  36. value |= GMAC_CONFIG_TE;
  37. value &= hw->link.speed_mask;
  38. switch (hw->ps) {
  39. case SPEED_1000:
  40. value |= hw->link.speed1000;
  41. break;
  42. case SPEED_100:
  43. value |= hw->link.speed100;
  44. break;
  45. case SPEED_10:
  46. value |= hw->link.speed10;
  47. break;
  48. }
  49. }
  50. writel(value, ioaddr + GMAC_CONFIG);
  51. /* Enable GMAC interrupts */
  52. value = GMAC_INT_DEFAULT_ENABLE;
  53. if (hw->pcs)
  54. value |= GMAC_PCS_IRQ_DEFAULT;
  55. writel(value, ioaddr + GMAC_INT_EN);
  56. }
  57. static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
  58. u8 mode, u32 queue)
  59. {
  60. void __iomem *ioaddr = hw->pcsr;
  61. u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
  62. value &= GMAC_RX_QUEUE_CLEAR(queue);
  63. if (mode == MTL_QUEUE_AVB)
  64. value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
  65. else if (mode == MTL_QUEUE_DCB)
  66. value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
  67. writel(value, ioaddr + GMAC_RXQ_CTRL0);
  68. }
  69. static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
  70. u32 prio, u32 queue)
  71. {
  72. void __iomem *ioaddr = hw->pcsr;
  73. u32 base_register;
  74. u32 value;
  75. base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
  76. if (queue >= 4)
  77. queue -= 4;
  78. value = readl(ioaddr + base_register);
  79. value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
  80. value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
  81. GMAC_RXQCTRL_PSRQX_MASK(queue);
  82. writel(value, ioaddr + base_register);
  83. }
  84. static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
  85. u32 prio, u32 queue)
  86. {
  87. void __iomem *ioaddr = hw->pcsr;
  88. u32 base_register;
  89. u32 value;
  90. base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
  91. if (queue >= 4)
  92. queue -= 4;
  93. value = readl(ioaddr + base_register);
  94. value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
  95. value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
  96. GMAC_TXQCTRL_PSTQX_MASK(queue);
  97. writel(value, ioaddr + base_register);
  98. }
  99. static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
  100. u8 packet, u32 queue)
  101. {
  102. void __iomem *ioaddr = hw->pcsr;
  103. u32 value;
  104. static const struct stmmac_rx_routing route_possibilities[] = {
  105. { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
  106. { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
  107. { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
  108. { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
  109. { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
  110. };
  111. value = readl(ioaddr + GMAC_RXQ_CTRL1);
  112. /* routing configuration */
  113. value &= ~route_possibilities[packet - 1].reg_mask;
  114. value |= (queue << route_possibilities[packet-1].reg_shift) &
  115. route_possibilities[packet - 1].reg_mask;
  116. /* some packets require extra ops */
  117. if (packet == PACKET_AVCPQ) {
  118. value &= ~GMAC_RXQCTRL_TACPQE;
  119. value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
  120. } else if (packet == PACKET_MCBCQ) {
  121. value &= ~GMAC_RXQCTRL_MCBCQEN;
  122. value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
  123. }
  124. writel(value, ioaddr + GMAC_RXQ_CTRL1);
  125. }
  126. static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
  127. u32 rx_alg)
  128. {
  129. void __iomem *ioaddr = hw->pcsr;
  130. u32 value = readl(ioaddr + MTL_OPERATION_MODE);
  131. value &= ~MTL_OPERATION_RAA;
  132. switch (rx_alg) {
  133. case MTL_RX_ALGORITHM_SP:
  134. value |= MTL_OPERATION_RAA_SP;
  135. break;
  136. case MTL_RX_ALGORITHM_WSP:
  137. value |= MTL_OPERATION_RAA_WSP;
  138. break;
  139. default:
  140. break;
  141. }
  142. writel(value, ioaddr + MTL_OPERATION_MODE);
  143. }
  144. static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
  145. u32 tx_alg)
  146. {
  147. void __iomem *ioaddr = hw->pcsr;
  148. u32 value = readl(ioaddr + MTL_OPERATION_MODE);
  149. value &= ~MTL_OPERATION_SCHALG_MASK;
  150. switch (tx_alg) {
  151. case MTL_TX_ALGORITHM_WRR:
  152. value |= MTL_OPERATION_SCHALG_WRR;
  153. break;
  154. case MTL_TX_ALGORITHM_WFQ:
  155. value |= MTL_OPERATION_SCHALG_WFQ;
  156. break;
  157. case MTL_TX_ALGORITHM_DWRR:
  158. value |= MTL_OPERATION_SCHALG_DWRR;
  159. break;
  160. case MTL_TX_ALGORITHM_SP:
  161. value |= MTL_OPERATION_SCHALG_SP;
  162. break;
  163. default:
  164. break;
  165. }
  166. }
  167. static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
  168. u32 weight, u32 queue)
  169. {
  170. void __iomem *ioaddr = hw->pcsr;
  171. u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
  172. value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
  173. value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
  174. writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
  175. }
  176. static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
  177. {
  178. void __iomem *ioaddr = hw->pcsr;
  179. u32 value;
  180. if (queue < 4)
  181. value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
  182. else
  183. value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
  184. if (queue == 0 || queue == 4) {
  185. value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
  186. value |= MTL_RXQ_DMA_Q04MDMACH(chan);
  187. } else {
  188. value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
  189. value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
  190. }
  191. if (queue < 4)
  192. writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
  193. else
  194. writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
  195. }
  196. static void dwmac4_config_cbs(struct mac_device_info *hw,
  197. u32 send_slope, u32 idle_slope,
  198. u32 high_credit, u32 low_credit, u32 queue)
  199. {
  200. void __iomem *ioaddr = hw->pcsr;
  201. u32 value;
  202. pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
  203. pr_debug("\tsend_slope: 0x%08x\n", send_slope);
  204. pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
  205. pr_debug("\thigh_credit: 0x%08x\n", high_credit);
  206. pr_debug("\tlow_credit: 0x%08x\n", low_credit);
  207. /* enable AV algorithm */
  208. value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
  209. value |= MTL_ETS_CTRL_AVALG;
  210. value |= MTL_ETS_CTRL_CC;
  211. writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
  212. /* configure send slope */
  213. value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
  214. value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
  215. value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
  216. writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
  217. /* configure idle slope (same register as tx weight) */
  218. dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
  219. /* configure high credit */
  220. value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
  221. value &= ~MTL_HIGH_CRED_HC_MASK;
  222. value |= high_credit & MTL_HIGH_CRED_HC_MASK;
  223. writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
  224. /* configure high credit */
  225. value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
  226. value &= ~MTL_HIGH_CRED_LC_MASK;
  227. value |= low_credit & MTL_HIGH_CRED_LC_MASK;
  228. writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
  229. }
  230. static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
  231. {
  232. void __iomem *ioaddr = hw->pcsr;
  233. int i;
  234. for (i = 0; i < GMAC_REG_NUM; i++)
  235. reg_space[i] = readl(ioaddr + i * 4);
  236. }
  237. static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
  238. {
  239. void __iomem *ioaddr = hw->pcsr;
  240. u32 value = readl(ioaddr + GMAC_CONFIG);
  241. if (hw->rx_csum)
  242. value |= GMAC_CONFIG_IPC;
  243. else
  244. value &= ~GMAC_CONFIG_IPC;
  245. writel(value, ioaddr + GMAC_CONFIG);
  246. value = readl(ioaddr + GMAC_CONFIG);
  247. return !!(value & GMAC_CONFIG_IPC);
  248. }
  249. static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
  250. {
  251. void __iomem *ioaddr = hw->pcsr;
  252. unsigned int pmt = 0;
  253. u32 config;
  254. if (mode & WAKE_MAGIC) {
  255. pr_debug("GMAC: WOL Magic frame\n");
  256. pmt |= power_down | magic_pkt_en;
  257. }
  258. if (mode & WAKE_UCAST) {
  259. pr_debug("GMAC: WOL on global unicast\n");
  260. pmt |= power_down | global_unicast | wake_up_frame_en;
  261. }
  262. if (pmt) {
  263. /* The receiver must be enabled for WOL before powering down */
  264. config = readl(ioaddr + GMAC_CONFIG);
  265. config |= GMAC_CONFIG_RE;
  266. writel(config, ioaddr + GMAC_CONFIG);
  267. }
  268. writel(pmt, ioaddr + GMAC_PMT);
  269. }
  270. static void dwmac4_set_umac_addr(struct mac_device_info *hw,
  271. unsigned char *addr, unsigned int reg_n)
  272. {
  273. void __iomem *ioaddr = hw->pcsr;
  274. stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
  275. GMAC_ADDR_LOW(reg_n));
  276. }
  277. static void dwmac4_get_umac_addr(struct mac_device_info *hw,
  278. unsigned char *addr, unsigned int reg_n)
  279. {
  280. void __iomem *ioaddr = hw->pcsr;
  281. stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
  282. GMAC_ADDR_LOW(reg_n));
  283. }
  284. static void dwmac4_set_eee_mode(struct mac_device_info *hw,
  285. bool en_tx_lpi_clockgating)
  286. {
  287. void __iomem *ioaddr = hw->pcsr;
  288. u32 value;
  289. /* Enable the link status receive on RGMII, SGMII ore SMII
  290. * receive path and instruct the transmit to enter in LPI
  291. * state.
  292. */
  293. value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
  294. value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
  295. if (en_tx_lpi_clockgating)
  296. value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
  297. writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
  298. }
  299. static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
  300. {
  301. void __iomem *ioaddr = hw->pcsr;
  302. u32 value;
  303. value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
  304. value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
  305. writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
  306. }
  307. static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
  308. {
  309. void __iomem *ioaddr = hw->pcsr;
  310. u32 value;
  311. value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
  312. if (link)
  313. value |= GMAC4_LPI_CTRL_STATUS_PLS;
  314. else
  315. value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
  316. writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
  317. }
  318. static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
  319. {
  320. void __iomem *ioaddr = hw->pcsr;
  321. int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
  322. /* Program the timers in the LPI timer control register:
  323. * LS: minimum time (ms) for which the link
  324. * status from PHY should be ok before transmitting
  325. * the LPI pattern.
  326. * TW: minimum time (us) for which the core waits
  327. * after it has stopped transmitting the LPI pattern.
  328. */
  329. writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
  330. }
  331. static void dwmac4_set_filter(struct mac_device_info *hw,
  332. struct net_device *dev)
  333. {
  334. void __iomem *ioaddr = (void __iomem *)dev->base_addr;
  335. unsigned int value = 0;
  336. if (dev->flags & IFF_PROMISC) {
  337. value = GMAC_PACKET_FILTER_PR;
  338. } else if ((dev->flags & IFF_ALLMULTI) ||
  339. (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
  340. /* Pass all multi */
  341. value = GMAC_PACKET_FILTER_PM;
  342. /* Set the 64 bits of the HASH tab. To be updated if taller
  343. * hash table is used
  344. */
  345. writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31);
  346. writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63);
  347. } else if (!netdev_mc_empty(dev)) {
  348. u32 mc_filter[2];
  349. struct netdev_hw_addr *ha;
  350. /* Hash filter for multicast */
  351. value = GMAC_PACKET_FILTER_HMC;
  352. memset(mc_filter, 0, sizeof(mc_filter));
  353. netdev_for_each_mc_addr(ha, dev) {
  354. /* The upper 6 bits of the calculated CRC are used to
  355. * index the content of the Hash Table Reg 0 and 1.
  356. */
  357. int bit_nr =
  358. (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26);
  359. /* The most significant bit determines the register
  360. * to use while the other 5 bits determines the bit
  361. * within the selected register
  362. */
  363. mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F));
  364. }
  365. writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31);
  366. writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63);
  367. }
  368. /* Handle multiple unicast addresses */
  369. if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
  370. /* Switch to promiscuous mode if more than 128 addrs
  371. * are required
  372. */
  373. value |= GMAC_PACKET_FILTER_PR;
  374. } else {
  375. struct netdev_hw_addr *ha;
  376. int reg = 1;
  377. netdev_for_each_uc_addr(ha, dev) {
  378. dwmac4_set_umac_addr(hw, ha->addr, reg);
  379. reg++;
  380. }
  381. while (reg <= GMAC_MAX_PERFECT_ADDRESSES) {
  382. writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
  383. writel(0, ioaddr + GMAC_ADDR_LOW(reg));
  384. reg++;
  385. }
  386. }
  387. writel(value, ioaddr + GMAC_PACKET_FILTER);
  388. }
  389. static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
  390. unsigned int fc, unsigned int pause_time,
  391. u32 tx_cnt)
  392. {
  393. void __iomem *ioaddr = hw->pcsr;
  394. unsigned int flow = 0;
  395. u32 queue = 0;
  396. pr_debug("GMAC Flow-Control:\n");
  397. if (fc & FLOW_RX) {
  398. pr_debug("\tReceive Flow-Control ON\n");
  399. flow |= GMAC_RX_FLOW_CTRL_RFE;
  400. }
  401. writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
  402. if (fc & FLOW_TX) {
  403. pr_debug("\tTransmit Flow-Control ON\n");
  404. if (duplex)
  405. pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
  406. for (queue = 0; queue < tx_cnt; queue++) {
  407. flow = GMAC_TX_FLOW_CTRL_TFE;
  408. if (duplex)
  409. flow |=
  410. (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
  411. writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
  412. }
  413. } else {
  414. for (queue = 0; queue < tx_cnt; queue++)
  415. writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
  416. }
  417. }
  418. static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
  419. bool loopback)
  420. {
  421. dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
  422. }
  423. static void dwmac4_rane(void __iomem *ioaddr, bool restart)
  424. {
  425. dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
  426. }
  427. static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
  428. {
  429. dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
  430. }
  431. /* RGMII or SMII interface */
  432. static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
  433. {
  434. u32 status;
  435. status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
  436. x->irq_rgmii_n++;
  437. /* Check the link status */
  438. if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
  439. int speed_value;
  440. x->pcs_link = 1;
  441. speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
  442. GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
  443. if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
  444. x->pcs_speed = SPEED_1000;
  445. else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
  446. x->pcs_speed = SPEED_100;
  447. else
  448. x->pcs_speed = SPEED_10;
  449. x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
  450. pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
  451. x->pcs_duplex ? "Full" : "Half");
  452. } else {
  453. x->pcs_link = 0;
  454. pr_info("Link is Down\n");
  455. }
  456. }
  457. static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
  458. {
  459. void __iomem *ioaddr = hw->pcsr;
  460. u32 mtl_int_qx_status;
  461. int ret = 0;
  462. mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
  463. /* Check MTL Interrupt */
  464. if (mtl_int_qx_status & MTL_INT_QX(chan)) {
  465. /* read Queue x Interrupt status */
  466. u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
  467. if (status & MTL_RX_OVERFLOW_INT) {
  468. /* clear Interrupt */
  469. writel(status | MTL_RX_OVERFLOW_INT,
  470. ioaddr + MTL_CHAN_INT_CTRL(chan));
  471. ret = CORE_IRQ_MTL_RX_OVERFLOW;
  472. }
  473. }
  474. return ret;
  475. }
  476. static int dwmac4_irq_status(struct mac_device_info *hw,
  477. struct stmmac_extra_stats *x)
  478. {
  479. void __iomem *ioaddr = hw->pcsr;
  480. u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
  481. u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
  482. int ret = 0;
  483. /* Discard disabled bits */
  484. intr_status &= intr_enable;
  485. /* Not used events (e.g. MMC interrupts) are not handled. */
  486. if ((intr_status & mmc_tx_irq))
  487. x->mmc_tx_irq_n++;
  488. if (unlikely(intr_status & mmc_rx_irq))
  489. x->mmc_rx_irq_n++;
  490. if (unlikely(intr_status & mmc_rx_csum_offload_irq))
  491. x->mmc_rx_csum_offload_irq_n++;
  492. /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
  493. if (unlikely(intr_status & pmt_irq)) {
  494. readl(ioaddr + GMAC_PMT);
  495. x->irq_receive_pmt_irq_n++;
  496. }
  497. /* MAC tx/rx EEE LPI entry/exit interrupts */
  498. if (intr_status & lpi_irq) {
  499. /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
  500. u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
  501. if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
  502. ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
  503. x->irq_tx_path_in_lpi_mode_n++;
  504. }
  505. if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
  506. ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
  507. x->irq_tx_path_exit_lpi_mode_n++;
  508. }
  509. if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
  510. x->irq_rx_path_in_lpi_mode_n++;
  511. if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
  512. x->irq_rx_path_exit_lpi_mode_n++;
  513. }
  514. dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
  515. if (intr_status & PCS_RGSMIIIS_IRQ)
  516. dwmac4_phystatus(ioaddr, x);
  517. return ret;
  518. }
  519. static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
  520. u32 rx_queues, u32 tx_queues)
  521. {
  522. u32 value;
  523. u32 queue;
  524. for (queue = 0; queue < tx_queues; queue++) {
  525. value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
  526. if (value & MTL_DEBUG_TXSTSFSTS)
  527. x->mtl_tx_status_fifo_full++;
  528. if (value & MTL_DEBUG_TXFSTS)
  529. x->mtl_tx_fifo_not_empty++;
  530. if (value & MTL_DEBUG_TWCSTS)
  531. x->mmtl_fifo_ctrl++;
  532. if (value & MTL_DEBUG_TRCSTS_MASK) {
  533. u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
  534. >> MTL_DEBUG_TRCSTS_SHIFT;
  535. if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
  536. x->mtl_tx_fifo_read_ctrl_write++;
  537. else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
  538. x->mtl_tx_fifo_read_ctrl_wait++;
  539. else if (trcsts == MTL_DEBUG_TRCSTS_READ)
  540. x->mtl_tx_fifo_read_ctrl_read++;
  541. else
  542. x->mtl_tx_fifo_read_ctrl_idle++;
  543. }
  544. if (value & MTL_DEBUG_TXPAUSED)
  545. x->mac_tx_in_pause++;
  546. }
  547. for (queue = 0; queue < rx_queues; queue++) {
  548. value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
  549. if (value & MTL_DEBUG_RXFSTS_MASK) {
  550. u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
  551. >> MTL_DEBUG_RRCSTS_SHIFT;
  552. if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
  553. x->mtl_rx_fifo_fill_level_full++;
  554. else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
  555. x->mtl_rx_fifo_fill_above_thresh++;
  556. else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
  557. x->mtl_rx_fifo_fill_below_thresh++;
  558. else
  559. x->mtl_rx_fifo_fill_level_empty++;
  560. }
  561. if (value & MTL_DEBUG_RRCSTS_MASK) {
  562. u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
  563. MTL_DEBUG_RRCSTS_SHIFT;
  564. if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
  565. x->mtl_rx_fifo_read_ctrl_flush++;
  566. else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
  567. x->mtl_rx_fifo_read_ctrl_read_data++;
  568. else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
  569. x->mtl_rx_fifo_read_ctrl_status++;
  570. else
  571. x->mtl_rx_fifo_read_ctrl_idle++;
  572. }
  573. if (value & MTL_DEBUG_RWCSTS)
  574. x->mtl_rx_fifo_ctrl_active++;
  575. }
  576. /* GMAC debug */
  577. value = readl(ioaddr + GMAC_DEBUG);
  578. if (value & GMAC_DEBUG_TFCSTS_MASK) {
  579. u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
  580. >> GMAC_DEBUG_TFCSTS_SHIFT;
  581. if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
  582. x->mac_tx_frame_ctrl_xfer++;
  583. else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
  584. x->mac_tx_frame_ctrl_pause++;
  585. else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
  586. x->mac_tx_frame_ctrl_wait++;
  587. else
  588. x->mac_tx_frame_ctrl_idle++;
  589. }
  590. if (value & GMAC_DEBUG_TPESTS)
  591. x->mac_gmii_tx_proto_engine++;
  592. if (value & GMAC_DEBUG_RFCFCSTS_MASK)
  593. x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
  594. >> GMAC_DEBUG_RFCFCSTS_SHIFT;
  595. if (value & GMAC_DEBUG_RPESTS)
  596. x->mac_gmii_rx_proto_engine++;
  597. }
  598. const struct stmmac_ops dwmac4_ops = {
  599. .core_init = dwmac4_core_init,
  600. .set_mac = stmmac_set_mac,
  601. .rx_ipc = dwmac4_rx_ipc_enable,
  602. .rx_queue_enable = dwmac4_rx_queue_enable,
  603. .rx_queue_prio = dwmac4_rx_queue_priority,
  604. .tx_queue_prio = dwmac4_tx_queue_priority,
  605. .rx_queue_routing = dwmac4_rx_queue_routing,
  606. .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
  607. .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
  608. .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
  609. .map_mtl_to_dma = dwmac4_map_mtl_dma,
  610. .config_cbs = dwmac4_config_cbs,
  611. .dump_regs = dwmac4_dump_regs,
  612. .host_irq_status = dwmac4_irq_status,
  613. .host_mtl_irq_status = dwmac4_irq_mtl_status,
  614. .flow_ctrl = dwmac4_flow_ctrl,
  615. .pmt = dwmac4_pmt,
  616. .set_umac_addr = dwmac4_set_umac_addr,
  617. .get_umac_addr = dwmac4_get_umac_addr,
  618. .set_eee_mode = dwmac4_set_eee_mode,
  619. .reset_eee_mode = dwmac4_reset_eee_mode,
  620. .set_eee_timer = dwmac4_set_eee_timer,
  621. .set_eee_pls = dwmac4_set_eee_pls,
  622. .pcs_ctrl_ane = dwmac4_ctrl_ane,
  623. .pcs_rane = dwmac4_rane,
  624. .pcs_get_adv_lp = dwmac4_get_adv_lp,
  625. .debug = dwmac4_debug,
  626. .set_filter = dwmac4_set_filter,
  627. };
  628. const struct stmmac_ops dwmac410_ops = {
  629. .core_init = dwmac4_core_init,
  630. .set_mac = stmmac_dwmac4_set_mac,
  631. .rx_ipc = dwmac4_rx_ipc_enable,
  632. .rx_queue_enable = dwmac4_rx_queue_enable,
  633. .rx_queue_prio = dwmac4_rx_queue_priority,
  634. .tx_queue_prio = dwmac4_tx_queue_priority,
  635. .rx_queue_routing = dwmac4_rx_queue_routing,
  636. .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
  637. .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
  638. .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
  639. .map_mtl_to_dma = dwmac4_map_mtl_dma,
  640. .config_cbs = dwmac4_config_cbs,
  641. .dump_regs = dwmac4_dump_regs,
  642. .host_irq_status = dwmac4_irq_status,
  643. .host_mtl_irq_status = dwmac4_irq_mtl_status,
  644. .flow_ctrl = dwmac4_flow_ctrl,
  645. .pmt = dwmac4_pmt,
  646. .set_umac_addr = dwmac4_set_umac_addr,
  647. .get_umac_addr = dwmac4_get_umac_addr,
  648. .set_eee_mode = dwmac4_set_eee_mode,
  649. .reset_eee_mode = dwmac4_reset_eee_mode,
  650. .set_eee_timer = dwmac4_set_eee_timer,
  651. .set_eee_pls = dwmac4_set_eee_pls,
  652. .pcs_ctrl_ane = dwmac4_ctrl_ane,
  653. .pcs_rane = dwmac4_rane,
  654. .pcs_get_adv_lp = dwmac4_get_adv_lp,
  655. .debug = dwmac4_debug,
  656. .set_filter = dwmac4_set_filter,
  657. };
  658. const struct stmmac_ops dwmac510_ops = {
  659. .core_init = dwmac4_core_init,
  660. .set_mac = stmmac_dwmac4_set_mac,
  661. .rx_ipc = dwmac4_rx_ipc_enable,
  662. .rx_queue_enable = dwmac4_rx_queue_enable,
  663. .rx_queue_prio = dwmac4_rx_queue_priority,
  664. .tx_queue_prio = dwmac4_tx_queue_priority,
  665. .rx_queue_routing = dwmac4_rx_queue_routing,
  666. .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
  667. .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
  668. .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
  669. .map_mtl_to_dma = dwmac4_map_mtl_dma,
  670. .config_cbs = dwmac4_config_cbs,
  671. .dump_regs = dwmac4_dump_regs,
  672. .host_irq_status = dwmac4_irq_status,
  673. .host_mtl_irq_status = dwmac4_irq_mtl_status,
  674. .flow_ctrl = dwmac4_flow_ctrl,
  675. .pmt = dwmac4_pmt,
  676. .set_umac_addr = dwmac4_set_umac_addr,
  677. .get_umac_addr = dwmac4_get_umac_addr,
  678. .set_eee_mode = dwmac4_set_eee_mode,
  679. .reset_eee_mode = dwmac4_reset_eee_mode,
  680. .set_eee_timer = dwmac4_set_eee_timer,
  681. .set_eee_pls = dwmac4_set_eee_pls,
  682. .pcs_ctrl_ane = dwmac4_ctrl_ane,
  683. .pcs_rane = dwmac4_rane,
  684. .pcs_get_adv_lp = dwmac4_get_adv_lp,
  685. .debug = dwmac4_debug,
  686. .set_filter = dwmac4_set_filter,
  687. .safety_feat_config = dwmac5_safety_feat_config,
  688. .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
  689. .safety_feat_dump = dwmac5_safety_feat_dump,
  690. .rxp_config = dwmac5_rxp_config,
  691. .flex_pps_config = dwmac5_flex_pps_config,
  692. };
  693. int dwmac4_setup(struct stmmac_priv *priv)
  694. {
  695. struct mac_device_info *mac = priv->hw;
  696. dev_info(priv->device, "\tDWMAC4/5\n");
  697. priv->dev->priv_flags |= IFF_UNICAST_FLT;
  698. mac->pcsr = priv->ioaddr;
  699. mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
  700. mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
  701. mac->mcast_bits_log2 = 0;
  702. if (mac->multicast_filter_bins)
  703. mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
  704. mac->link.duplex = GMAC_CONFIG_DM;
  705. mac->link.speed10 = GMAC_CONFIG_PS;
  706. mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
  707. mac->link.speed1000 = 0;
  708. mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
  709. mac->mii.addr = GMAC_MDIO_ADDR;
  710. mac->mii.data = GMAC_MDIO_DATA;
  711. mac->mii.addr_shift = 21;
  712. mac->mii.addr_mask = GENMASK(25, 21);
  713. mac->mii.reg_shift = 16;
  714. mac->mii.reg_mask = GENMASK(20, 16);
  715. mac->mii.clk_csr_shift = 8;
  716. mac->mii.clk_csr_mask = GENMASK(11, 8);
  717. return 0;
  718. }