xgene_enet_sgmac.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. /* Applied Micro X-Gene SoC Ethernet Driver
  2. *
  3. * Copyright (c) 2014, Applied Micro Circuits Corporation
  4. * Authors: Iyappan Subramanian <isubramanian@apm.com>
  5. * Keyur Chudgar <kchudgar@apm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the
  9. * Free Software Foundation; either version 2 of the License, or (at your
  10. * option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "xgene_enet_main.h"
  21. #include "xgene_enet_hw.h"
  22. #include "xgene_enet_sgmac.h"
  23. #include "xgene_enet_xgmac.h"
  24. static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
  25. {
  26. iowrite32(val, p->eth_csr_addr + offset);
  27. }
  28. static void xgene_enet_wr_clkrst_csr(struct xgene_enet_pdata *p, u32 offset,
  29. u32 val)
  30. {
  31. iowrite32(val, p->base_addr + offset);
  32. }
  33. static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
  34. u32 offset, u32 val)
  35. {
  36. iowrite32(val, p->eth_ring_if_addr + offset);
  37. }
  38. static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
  39. u32 offset, u32 val)
  40. {
  41. iowrite32(val, p->eth_diag_csr_addr + offset);
  42. }
  43. static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
  44. u32 offset, u32 val)
  45. {
  46. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  47. iowrite32(val, addr);
  48. }
  49. static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
  50. {
  51. return ioread32(p->eth_csr_addr + offset);
  52. }
  53. static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
  54. {
  55. return ioread32(p->eth_diag_csr_addr + offset);
  56. }
  57. static u32 xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *p, u32 offset)
  58. {
  59. return ioread32(p->mcx_mac_csr_addr + offset);
  60. }
  61. static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
  62. {
  63. struct net_device *ndev = p->ndev;
  64. u32 data, shutdown;
  65. int i = 0;
  66. shutdown = xgene_enet_rd_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR);
  67. data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
  68. if (!shutdown && data == ~0U) {
  69. netdev_dbg(ndev, "+ ecc_init done, skipping\n");
  70. return 0;
  71. }
  72. xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
  73. do {
  74. usleep_range(100, 110);
  75. data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
  76. if (data == ~0U)
  77. return 0;
  78. } while (++i < 10);
  79. netdev_err(ndev, "Failed to release memory from shutdown\n");
  80. return -ENODEV;
  81. }
  82. static void xgene_sgmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
  83. u32 *rx, u32 *tx)
  84. {
  85. u32 addr, count;
  86. addr = (pdata->enet_id != XGENE_ENET1) ?
  87. XG_MCX_ICM_ECM_DROP_COUNT_REG0_ADDR :
  88. ICM_ECM_DROP_COUNT_REG0_ADDR + pdata->port_id * OFFSET_4;
  89. count = xgene_enet_rd_mcx_csr(pdata, addr);
  90. *rx = ICM_DROP_COUNT(count);
  91. *tx = ECM_DROP_COUNT(count);
  92. /* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
  93. addr = (pdata->enet_id != XGENE_ENET1) ?
  94. XG_MCX_ECM_CONFIG0_REG_0_ADDR :
  95. ECM_CONFIG0_REG_0_ADDR + pdata->port_id * OFFSET_4;
  96. xgene_enet_rd_mcx_csr(pdata, addr);
  97. }
  98. static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
  99. {
  100. u32 val;
  101. val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
  102. xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
  103. xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
  104. }
  105. static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id,
  106. u32 reg, u16 data)
  107. {
  108. u32 addr, wr_data, done;
  109. int i;
  110. addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
  111. xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
  112. wr_data = PHY_CONTROL(data);
  113. xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data);
  114. for (i = 0; i < 10; i++) {
  115. done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
  116. if (!(done & BUSY_MASK))
  117. return;
  118. usleep_range(10, 20);
  119. }
  120. netdev_err(p->ndev, "MII_MGMT write failed\n");
  121. }
  122. static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg)
  123. {
  124. u32 addr, data, done;
  125. int i;
  126. addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
  127. xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
  128. xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
  129. for (i = 0; i < 10; i++) {
  130. done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
  131. if (!(done & BUSY_MASK)) {
  132. data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR);
  133. xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0);
  134. return data;
  135. }
  136. usleep_range(10, 20);
  137. }
  138. netdev_err(p->ndev, "MII_MGMT read failed\n");
  139. return 0;
  140. }
  141. static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
  142. {
  143. xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1);
  144. xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0);
  145. }
  146. static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
  147. {
  148. u32 addr0, addr1;
  149. u8 *dev_addr = p->ndev->dev_addr;
  150. addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
  151. (dev_addr[1] << 8) | dev_addr[0];
  152. xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0);
  153. addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR);
  154. addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16);
  155. xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1);
  156. }
  157. static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
  158. {
  159. u32 data;
  160. data = xgene_mii_phy_read(p, INT_PHY_ADDR,
  161. SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
  162. if (LINK_SPEED(data) == PHY_SPEED_1000)
  163. p->phy_speed = SPEED_1000;
  164. else if (LINK_SPEED(data) == PHY_SPEED_100)
  165. p->phy_speed = SPEED_100;
  166. else
  167. p->phy_speed = SPEED_10;
  168. return data & LINK_UP;
  169. }
  170. static void xgene_sgmii_configure(struct xgene_enet_pdata *p)
  171. {
  172. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
  173. 0x8000);
  174. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x9000);
  175. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
  176. }
  177. static void xgene_sgmii_tbi_control_reset(struct xgene_enet_pdata *p)
  178. {
  179. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
  180. 0x8000);
  181. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
  182. }
  183. static void xgene_sgmii_reset(struct xgene_enet_pdata *p)
  184. {
  185. u32 value;
  186. if (p->phy_speed == SPEED_UNKNOWN)
  187. return;
  188. value = xgene_mii_phy_read(p, INT_PHY_ADDR,
  189. SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
  190. if (!(value & LINK_UP))
  191. xgene_sgmii_tbi_control_reset(p);
  192. }
  193. static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p)
  194. {
  195. u32 icm0_addr, icm2_addr, debug_addr;
  196. u32 icm0, icm2, intf_ctl;
  197. u32 mc2, value;
  198. xgene_sgmii_reset(p);
  199. if (p->enet_id == XGENE_ENET1) {
  200. icm0_addr = ICM_CONFIG0_REG_0_ADDR + p->port_id * OFFSET_8;
  201. icm2_addr = ICM_CONFIG2_REG_0_ADDR + p->port_id * OFFSET_4;
  202. debug_addr = DEBUG_REG_ADDR;
  203. } else {
  204. icm0_addr = XG_MCX_ICM_CONFIG0_REG_0_ADDR;
  205. icm2_addr = XG_MCX_ICM_CONFIG2_REG_0_ADDR;
  206. debug_addr = XG_DEBUG_REG_ADDR;
  207. }
  208. icm0 = xgene_enet_rd_mcx_csr(p, icm0_addr);
  209. icm2 = xgene_enet_rd_mcx_csr(p, icm2_addr);
  210. mc2 = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
  211. intf_ctl = xgene_enet_rd_mac(p, INTERFACE_CONTROL_ADDR);
  212. switch (p->phy_speed) {
  213. case SPEED_10:
  214. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  215. intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
  216. CFG_MACMODE_SET(&icm0, 0);
  217. CFG_WAITASYNCRD_SET(&icm2, 500);
  218. break;
  219. case SPEED_100:
  220. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  221. intf_ctl &= ~ENET_GHD_MODE;
  222. intf_ctl |= ENET_LHD_MODE;
  223. CFG_MACMODE_SET(&icm0, 1);
  224. CFG_WAITASYNCRD_SET(&icm2, 80);
  225. break;
  226. default:
  227. ENET_INTERFACE_MODE2_SET(&mc2, 2);
  228. intf_ctl &= ~ENET_LHD_MODE;
  229. intf_ctl |= ENET_GHD_MODE;
  230. CFG_MACMODE_SET(&icm0, 2);
  231. CFG_WAITASYNCRD_SET(&icm2, 16);
  232. value = xgene_enet_rd_csr(p, debug_addr);
  233. value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
  234. xgene_enet_wr_csr(p, debug_addr, value);
  235. break;
  236. }
  237. mc2 |= FULL_DUPLEX2 | PAD_CRC;
  238. xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, mc2);
  239. xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, intf_ctl);
  240. xgene_enet_wr_mcx_csr(p, icm0_addr, icm0);
  241. xgene_enet_wr_mcx_csr(p, icm2_addr, icm2);
  242. }
  243. static void xgene_sgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size)
  244. {
  245. xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
  246. }
  247. static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p)
  248. {
  249. u32 data, loop = 10;
  250. xgene_sgmii_configure(p);
  251. while (loop--) {
  252. data = xgene_mii_phy_read(p, INT_PHY_ADDR,
  253. SGMII_STATUS_ADDR >> 2);
  254. if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
  255. break;
  256. usleep_range(1000, 2000);
  257. }
  258. if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
  259. netdev_err(p->ndev, "Auto-negotiation failed\n");
  260. }
  261. static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
  262. {
  263. u32 data;
  264. data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
  265. if (set)
  266. data |= bits;
  267. else
  268. data &= ~bits;
  269. xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
  270. }
  271. static void xgene_sgmac_flowctl_tx(struct xgene_enet_pdata *p, bool enable)
  272. {
  273. xgene_sgmac_rxtx(p, TX_FLOW_EN, enable);
  274. p->mac_ops->enable_tx_pause(p, enable);
  275. }
  276. static void xgene_sgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
  277. {
  278. xgene_sgmac_rxtx(pdata, RX_FLOW_EN, enable);
  279. }
  280. static void xgene_sgmac_init(struct xgene_enet_pdata *p)
  281. {
  282. u32 pause_thres_reg, pause_off_thres_reg;
  283. u32 enet_spare_cfg_reg, rsif_config_reg;
  284. u32 cfg_bypass_reg, rx_dv_gate_reg;
  285. u32 data, data1, data2, offset;
  286. u32 multi_dpf_reg;
  287. if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver))
  288. xgene_sgmac_reset(p);
  289. xgene_sgmii_enable_autoneg(p);
  290. xgene_sgmac_set_speed(p);
  291. xgene_sgmac_set_mac_addr(p);
  292. if (p->enet_id == XGENE_ENET1) {
  293. enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
  294. rsif_config_reg = RSIF_CONFIG_REG_ADDR;
  295. cfg_bypass_reg = CFG_BYPASS_ADDR;
  296. offset = p->port_id * OFFSET_4;
  297. rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR + offset;
  298. } else {
  299. enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
  300. rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
  301. cfg_bypass_reg = XG_CFG_BYPASS_ADDR;
  302. rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR;
  303. }
  304. data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
  305. data |= MPA_IDLE_WITH_QMI_EMPTY;
  306. xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
  307. /* Adjust MDC clock frequency */
  308. data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
  309. MGMT_CLOCK_SEL_SET(&data, 7);
  310. xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
  311. /* Enable drop if bufpool not available */
  312. data = xgene_enet_rd_csr(p, rsif_config_reg);
  313. data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
  314. xgene_enet_wr_csr(p, rsif_config_reg, data);
  315. /* Configure HW pause frame generation */
  316. multi_dpf_reg = (p->enet_id == XGENE_ENET1) ? CSR_MULTI_DPF0_ADDR :
  317. XG_MCX_MULTI_DPF0_ADDR;
  318. data = xgene_enet_rd_mcx_csr(p, multi_dpf_reg);
  319. data = (DEF_QUANTA << 16) | (data & 0xffff);
  320. xgene_enet_wr_mcx_csr(p, multi_dpf_reg, data);
  321. if (p->enet_id != XGENE_ENET1) {
  322. data = xgene_enet_rd_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR);
  323. data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF);
  324. xgene_enet_wr_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR, data);
  325. }
  326. pause_thres_reg = (p->enet_id == XGENE_ENET1) ? RXBUF_PAUSE_THRESH :
  327. XG_RXBUF_PAUSE_THRESH;
  328. pause_off_thres_reg = (p->enet_id == XGENE_ENET1) ?
  329. RXBUF_PAUSE_OFF_THRESH : 0;
  330. if (p->enet_id == XGENE_ENET1) {
  331. data1 = xgene_enet_rd_csr(p, pause_thres_reg);
  332. data2 = xgene_enet_rd_csr(p, pause_off_thres_reg);
  333. if (!(p->port_id % 2)) {
  334. data1 = (data1 & 0xffff0000) | DEF_PAUSE_THRES;
  335. data2 = (data2 & 0xffff0000) | DEF_PAUSE_OFF_THRES;
  336. } else {
  337. data1 = (data1 & 0xffff) | (DEF_PAUSE_THRES << 16);
  338. data2 = (data2 & 0xffff) | (DEF_PAUSE_OFF_THRES << 16);
  339. }
  340. xgene_enet_wr_csr(p, pause_thres_reg, data1);
  341. xgene_enet_wr_csr(p, pause_off_thres_reg, data2);
  342. } else {
  343. data = (DEF_PAUSE_OFF_THRES << 16) | DEF_PAUSE_THRES;
  344. xgene_enet_wr_csr(p, pause_thres_reg, data);
  345. }
  346. xgene_sgmac_flowctl_tx(p, p->tx_pause);
  347. xgene_sgmac_flowctl_rx(p, p->rx_pause);
  348. /* Bypass traffic gating */
  349. xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
  350. xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
  351. xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0);
  352. }
  353. static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
  354. {
  355. xgene_sgmac_rxtx(p, RX_EN, true);
  356. }
  357. static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p)
  358. {
  359. xgene_sgmac_rxtx(p, TX_EN, true);
  360. }
  361. static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p)
  362. {
  363. xgene_sgmac_rxtx(p, RX_EN, false);
  364. }
  365. static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
  366. {
  367. xgene_sgmac_rxtx(p, TX_EN, false);
  368. }
  369. static int xgene_enet_reset(struct xgene_enet_pdata *p)
  370. {
  371. struct device *dev = &p->pdev->dev;
  372. if (!xgene_ring_mgr_init(p))
  373. return -ENODEV;
  374. if (p->mdio_driver && p->enet_id == XGENE_ENET2) {
  375. xgene_enet_config_ring_if_assoc(p);
  376. return 0;
  377. }
  378. if (p->enet_id == XGENE_ENET2)
  379. xgene_enet_wr_clkrst_csr(p, XGENET_CONFIG_REG_ADDR, SGMII_EN);
  380. if (dev->of_node) {
  381. if (!IS_ERR(p->clk)) {
  382. clk_prepare_enable(p->clk);
  383. udelay(5);
  384. clk_disable_unprepare(p->clk);
  385. udelay(5);
  386. clk_prepare_enable(p->clk);
  387. udelay(5);
  388. }
  389. } else {
  390. #ifdef CONFIG_ACPI
  391. if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST"))
  392. acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
  393. "_RST", NULL, NULL);
  394. else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI"))
  395. acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
  396. "_INI", NULL, NULL);
  397. #endif
  398. }
  399. if (!p->port_id) {
  400. xgene_enet_ecc_init(p);
  401. xgene_enet_config_ring_if_assoc(p);
  402. }
  403. return 0;
  404. }
  405. static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
  406. u32 dst_ring_num, u16 bufpool_id,
  407. u16 nxtbufpool_id)
  408. {
  409. u32 cle_bypass_reg0, cle_bypass_reg1;
  410. u32 offset = p->port_id * MAC_OFFSET;
  411. u32 data, fpsel, nxtfpsel;
  412. if (p->enet_id == XGENE_ENET1) {
  413. cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
  414. cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR;
  415. } else {
  416. cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR;
  417. cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR;
  418. }
  419. data = CFG_CLE_BYPASS_EN0;
  420. xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
  421. fpsel = xgene_enet_get_fpsel(bufpool_id);
  422. nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
  423. data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel) |
  424. CFG_CLE_NXTFPSEL0(nxtfpsel);
  425. xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
  426. }
  427. static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
  428. struct xgene_enet_desc_ring *ring)
  429. {
  430. u32 addr, data;
  431. if (xgene_enet_is_bufpool(ring->id)) {
  432. addr = ENET_CFGSSQMIFPRESET_ADDR;
  433. data = BIT(xgene_enet_get_fpsel(ring->id));
  434. } else {
  435. addr = ENET_CFGSSQMIWQRESET_ADDR;
  436. data = BIT(xgene_enet_ring_bufnum(ring->id));
  437. }
  438. xgene_enet_wr_ring_if(pdata, addr, data);
  439. }
  440. static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
  441. {
  442. struct device *dev = &p->pdev->dev;
  443. if (dev->of_node) {
  444. if (!IS_ERR(p->clk))
  445. clk_disable_unprepare(p->clk);
  446. }
  447. }
  448. static void xgene_enet_link_state(struct work_struct *work)
  449. {
  450. struct xgene_enet_pdata *p = container_of(to_delayed_work(work),
  451. struct xgene_enet_pdata, link_work);
  452. struct net_device *ndev = p->ndev;
  453. u32 link, poll_interval;
  454. link = xgene_enet_link_status(p);
  455. if (link) {
  456. if (!netif_carrier_ok(ndev)) {
  457. netif_carrier_on(ndev);
  458. xgene_sgmac_set_speed(p);
  459. xgene_sgmac_rx_enable(p);
  460. xgene_sgmac_tx_enable(p);
  461. netdev_info(ndev, "Link is Up - %dMbps\n",
  462. p->phy_speed);
  463. }
  464. poll_interval = PHY_POLL_LINK_ON;
  465. } else {
  466. if (netif_carrier_ok(ndev)) {
  467. xgene_sgmac_rx_disable(p);
  468. xgene_sgmac_tx_disable(p);
  469. netif_carrier_off(ndev);
  470. netdev_info(ndev, "Link is Down\n");
  471. }
  472. poll_interval = PHY_POLL_LINK_OFF;
  473. }
  474. schedule_delayed_work(&p->link_work, poll_interval);
  475. }
  476. static void xgene_sgmac_enable_tx_pause(struct xgene_enet_pdata *p, bool enable)
  477. {
  478. u32 data, ecm_cfg_addr;
  479. if (p->enet_id == XGENE_ENET1) {
  480. ecm_cfg_addr = (!(p->port_id % 2)) ? CSR_ECM_CFG_0_ADDR :
  481. CSR_ECM_CFG_1_ADDR;
  482. } else {
  483. ecm_cfg_addr = XG_MCX_ECM_CFG_0_ADDR;
  484. }
  485. data = xgene_enet_rd_mcx_csr(p, ecm_cfg_addr);
  486. if (enable)
  487. data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
  488. else
  489. data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
  490. xgene_enet_wr_mcx_csr(p, ecm_cfg_addr, data);
  491. }
  492. const struct xgene_mac_ops xgene_sgmac_ops = {
  493. .init = xgene_sgmac_init,
  494. .reset = xgene_sgmac_reset,
  495. .rx_enable = xgene_sgmac_rx_enable,
  496. .tx_enable = xgene_sgmac_tx_enable,
  497. .rx_disable = xgene_sgmac_rx_disable,
  498. .tx_disable = xgene_sgmac_tx_disable,
  499. .get_drop_cnt = xgene_sgmac_get_drop_cnt,
  500. .set_speed = xgene_sgmac_set_speed,
  501. .set_mac_addr = xgene_sgmac_set_mac_addr,
  502. .set_framesize = xgene_sgmac_set_frame_size,
  503. .link_state = xgene_enet_link_state,
  504. .enable_tx_pause = xgene_sgmac_enable_tx_pause,
  505. .flowctl_tx = xgene_sgmac_flowctl_tx,
  506. .flowctl_rx = xgene_sgmac_flowctl_rx
  507. };
  508. const struct xgene_port_ops xgene_sgport_ops = {
  509. .reset = xgene_enet_reset,
  510. .clear = xgene_enet_clear,
  511. .cle_bypass = xgene_enet_cle_bypass,
  512. .shutdown = xgene_enet_shutdown
  513. };