mtk_eth_soc.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541
  1. /* This program is free software; you can redistribute it and/or modify
  2. * it under the terms of the GNU General Public License as published by
  3. * the Free Software Foundation; version 2 of the License
  4. *
  5. * This program is distributed in the hope that it will be useful,
  6. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  7. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  8. * GNU General Public License for more details.
  9. *
  10. * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
  11. * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
  12. * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
  13. */
  14. #include <linux/of_device.h>
  15. #include <linux/of_mdio.h>
  16. #include <linux/of_net.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/regmap.h>
  19. #include <linux/clk.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/reset.h>
  23. #include <linux/tcp.h>
  24. #include "mtk_eth_soc.h"
  25. static int mtk_msg_level = -1;
  26. module_param_named(msg_level, mtk_msg_level, int, 0);
  27. MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
  28. #define MTK_ETHTOOL_STAT(x) { #x, \
  29. offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
  30. /* strings used by ethtool */
  31. static const struct mtk_ethtool_stats {
  32. char str[ETH_GSTRING_LEN];
  33. u32 offset;
  34. } mtk_ethtool_stats[] = {
  35. MTK_ETHTOOL_STAT(tx_bytes),
  36. MTK_ETHTOOL_STAT(tx_packets),
  37. MTK_ETHTOOL_STAT(tx_skip),
  38. MTK_ETHTOOL_STAT(tx_collisions),
  39. MTK_ETHTOOL_STAT(rx_bytes),
  40. MTK_ETHTOOL_STAT(rx_packets),
  41. MTK_ETHTOOL_STAT(rx_overflow),
  42. MTK_ETHTOOL_STAT(rx_fcs_errors),
  43. MTK_ETHTOOL_STAT(rx_short_errors),
  44. MTK_ETHTOOL_STAT(rx_long_errors),
  45. MTK_ETHTOOL_STAT(rx_checksum_errors),
  46. MTK_ETHTOOL_STAT(rx_flow_control_packets),
  47. };
  48. static const char * const mtk_clks_source_name[] = {
  49. "ethif", "esw", "gp1", "gp2", "trgpll"
  50. };
  51. void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
  52. {
  53. __raw_writel(val, eth->base + reg);
  54. }
  55. u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
  56. {
  57. return __raw_readl(eth->base + reg);
  58. }
  59. static int mtk_mdio_busy_wait(struct mtk_eth *eth)
  60. {
  61. unsigned long t_start = jiffies;
  62. while (1) {
  63. if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
  64. return 0;
  65. if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
  66. break;
  67. usleep_range(10, 20);
  68. }
  69. dev_err(eth->dev, "mdio: MDIO timeout\n");
  70. return -1;
  71. }
  72. static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
  73. u32 phy_register, u32 write_data)
  74. {
  75. if (mtk_mdio_busy_wait(eth))
  76. return -1;
  77. write_data &= 0xffff;
  78. mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
  79. (phy_register << PHY_IAC_REG_SHIFT) |
  80. (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
  81. MTK_PHY_IAC);
  82. if (mtk_mdio_busy_wait(eth))
  83. return -1;
  84. return 0;
  85. }
  86. static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
  87. {
  88. u32 d;
  89. if (mtk_mdio_busy_wait(eth))
  90. return 0xffff;
  91. mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
  92. (phy_reg << PHY_IAC_REG_SHIFT) |
  93. (phy_addr << PHY_IAC_ADDR_SHIFT),
  94. MTK_PHY_IAC);
  95. if (mtk_mdio_busy_wait(eth))
  96. return 0xffff;
  97. d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
  98. return d;
  99. }
  100. static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
  101. int phy_reg, u16 val)
  102. {
  103. struct mtk_eth *eth = bus->priv;
  104. return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
  105. }
  106. static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
  107. {
  108. struct mtk_eth *eth = bus->priv;
  109. return _mtk_mdio_read(eth, phy_addr, phy_reg);
  110. }
  111. static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
  112. {
  113. u32 val;
  114. int ret;
  115. val = (speed == SPEED_1000) ?
  116. INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
  117. mtk_w32(eth, val, INTF_MODE);
  118. regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
  119. ETHSYS_TRGMII_CLK_SEL362_5,
  120. ETHSYS_TRGMII_CLK_SEL362_5);
  121. val = (speed == SPEED_1000) ? 250000000 : 500000000;
  122. ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
  123. if (ret)
  124. dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
  125. val = (speed == SPEED_1000) ?
  126. RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
  127. mtk_w32(eth, val, TRGMII_RCK_CTRL);
  128. val = (speed == SPEED_1000) ?
  129. TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
  130. mtk_w32(eth, val, TRGMII_TCK_CTRL);
  131. }
  132. static void mtk_phy_link_adjust(struct net_device *dev)
  133. {
  134. struct mtk_mac *mac = netdev_priv(dev);
  135. u16 lcl_adv = 0, rmt_adv = 0;
  136. u8 flowctrl;
  137. u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
  138. MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
  139. MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
  140. MAC_MCR_BACKPR_EN;
  141. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  142. return;
  143. switch (dev->phydev->speed) {
  144. case SPEED_1000:
  145. mcr |= MAC_MCR_SPEED_1000;
  146. break;
  147. case SPEED_100:
  148. mcr |= MAC_MCR_SPEED_100;
  149. break;
  150. };
  151. if (mac->id == 0 && !mac->trgmii)
  152. mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
  153. if (dev->phydev->link)
  154. mcr |= MAC_MCR_FORCE_LINK;
  155. if (dev->phydev->duplex) {
  156. mcr |= MAC_MCR_FORCE_DPX;
  157. if (dev->phydev->pause)
  158. rmt_adv = LPA_PAUSE_CAP;
  159. if (dev->phydev->asym_pause)
  160. rmt_adv |= LPA_PAUSE_ASYM;
  161. if (dev->phydev->advertising & ADVERTISED_Pause)
  162. lcl_adv |= ADVERTISE_PAUSE_CAP;
  163. if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
  164. lcl_adv |= ADVERTISE_PAUSE_ASYM;
  165. flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
  166. if (flowctrl & FLOW_CTRL_TX)
  167. mcr |= MAC_MCR_FORCE_TX_FC;
  168. if (flowctrl & FLOW_CTRL_RX)
  169. mcr |= MAC_MCR_FORCE_RX_FC;
  170. netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
  171. flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
  172. flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
  173. }
  174. mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
  175. if (dev->phydev->link)
  176. netif_carrier_on(dev);
  177. else
  178. netif_carrier_off(dev);
  179. }
  180. static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
  181. struct device_node *phy_node)
  182. {
  183. struct phy_device *phydev;
  184. int phy_mode;
  185. phy_mode = of_get_phy_mode(phy_node);
  186. if (phy_mode < 0) {
  187. dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
  188. return -EINVAL;
  189. }
  190. phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
  191. mtk_phy_link_adjust, 0, phy_mode);
  192. if (!phydev) {
  193. dev_err(eth->dev, "could not connect to PHY\n");
  194. return -ENODEV;
  195. }
  196. dev_info(eth->dev,
  197. "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
  198. mac->id, phydev_name(phydev), phydev->phy_id,
  199. phydev->drv->name);
  200. return 0;
  201. }
  202. static int mtk_phy_connect(struct net_device *dev)
  203. {
  204. struct mtk_mac *mac = netdev_priv(dev);
  205. struct mtk_eth *eth;
  206. struct device_node *np;
  207. u32 val;
  208. eth = mac->hw;
  209. np = of_parse_phandle(mac->of_node, "phy-handle", 0);
  210. if (!np && of_phy_is_fixed_link(mac->of_node))
  211. if (!of_phy_register_fixed_link(mac->of_node))
  212. np = of_node_get(mac->of_node);
  213. if (!np)
  214. return -ENODEV;
  215. switch (of_get_phy_mode(np)) {
  216. case PHY_INTERFACE_MODE_TRGMII:
  217. mac->trgmii = true;
  218. case PHY_INTERFACE_MODE_RGMII_TXID:
  219. case PHY_INTERFACE_MODE_RGMII_RXID:
  220. case PHY_INTERFACE_MODE_RGMII_ID:
  221. case PHY_INTERFACE_MODE_RGMII:
  222. mac->ge_mode = 0;
  223. break;
  224. case PHY_INTERFACE_MODE_MII:
  225. mac->ge_mode = 1;
  226. break;
  227. case PHY_INTERFACE_MODE_REVMII:
  228. mac->ge_mode = 2;
  229. break;
  230. case PHY_INTERFACE_MODE_RMII:
  231. if (!mac->id)
  232. goto err_phy;
  233. mac->ge_mode = 3;
  234. break;
  235. default:
  236. goto err_phy;
  237. }
  238. /* put the gmac into the right mode */
  239. regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
  240. val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
  241. val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
  242. regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
  243. /* couple phydev to net_device */
  244. if (mtk_phy_connect_node(eth, mac, np))
  245. goto err_phy;
  246. dev->phydev->autoneg = AUTONEG_ENABLE;
  247. dev->phydev->speed = 0;
  248. dev->phydev->duplex = 0;
  249. if (of_phy_is_fixed_link(mac->of_node))
  250. dev->phydev->supported |=
  251. SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  252. dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
  253. SUPPORTED_Asym_Pause;
  254. dev->phydev->advertising = dev->phydev->supported |
  255. ADVERTISED_Autoneg;
  256. phy_start_aneg(dev->phydev);
  257. of_node_put(np);
  258. return 0;
  259. err_phy:
  260. if (of_phy_is_fixed_link(mac->of_node))
  261. of_phy_deregister_fixed_link(mac->of_node);
  262. of_node_put(np);
  263. dev_err(eth->dev, "%s: invalid phy\n", __func__);
  264. return -EINVAL;
  265. }
  266. static int mtk_mdio_init(struct mtk_eth *eth)
  267. {
  268. struct device_node *mii_np;
  269. int ret;
  270. mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
  271. if (!mii_np) {
  272. dev_err(eth->dev, "no %s child node found", "mdio-bus");
  273. return -ENODEV;
  274. }
  275. if (!of_device_is_available(mii_np)) {
  276. ret = -ENODEV;
  277. goto err_put_node;
  278. }
  279. eth->mii_bus = devm_mdiobus_alloc(eth->dev);
  280. if (!eth->mii_bus) {
  281. ret = -ENOMEM;
  282. goto err_put_node;
  283. }
  284. eth->mii_bus->name = "mdio";
  285. eth->mii_bus->read = mtk_mdio_read;
  286. eth->mii_bus->write = mtk_mdio_write;
  287. eth->mii_bus->priv = eth;
  288. eth->mii_bus->parent = eth->dev;
  289. snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
  290. ret = of_mdiobus_register(eth->mii_bus, mii_np);
  291. err_put_node:
  292. of_node_put(mii_np);
  293. return ret;
  294. }
  295. static void mtk_mdio_cleanup(struct mtk_eth *eth)
  296. {
  297. if (!eth->mii_bus)
  298. return;
  299. mdiobus_unregister(eth->mii_bus);
  300. }
  301. static inline void mtk_irq_disable(struct mtk_eth *eth,
  302. unsigned reg, u32 mask)
  303. {
  304. unsigned long flags;
  305. u32 val;
  306. spin_lock_irqsave(&eth->irq_lock, flags);
  307. val = mtk_r32(eth, reg);
  308. mtk_w32(eth, val & ~mask, reg);
  309. spin_unlock_irqrestore(&eth->irq_lock, flags);
  310. }
  311. static inline void mtk_irq_enable(struct mtk_eth *eth,
  312. unsigned reg, u32 mask)
  313. {
  314. unsigned long flags;
  315. u32 val;
  316. spin_lock_irqsave(&eth->irq_lock, flags);
  317. val = mtk_r32(eth, reg);
  318. mtk_w32(eth, val | mask, reg);
  319. spin_unlock_irqrestore(&eth->irq_lock, flags);
  320. }
  321. static int mtk_set_mac_address(struct net_device *dev, void *p)
  322. {
  323. int ret = eth_mac_addr(dev, p);
  324. struct mtk_mac *mac = netdev_priv(dev);
  325. const char *macaddr = dev->dev_addr;
  326. if (ret)
  327. return ret;
  328. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  329. return -EBUSY;
  330. spin_lock_bh(&mac->hw->page_lock);
  331. mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
  332. MTK_GDMA_MAC_ADRH(mac->id));
  333. mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
  334. (macaddr[4] << 8) | macaddr[5],
  335. MTK_GDMA_MAC_ADRL(mac->id));
  336. spin_unlock_bh(&mac->hw->page_lock);
  337. return 0;
  338. }
  339. void mtk_stats_update_mac(struct mtk_mac *mac)
  340. {
  341. struct mtk_hw_stats *hw_stats = mac->hw_stats;
  342. unsigned int base = MTK_GDM1_TX_GBCNT;
  343. u64 stats;
  344. base += hw_stats->reg_offset;
  345. u64_stats_update_begin(&hw_stats->syncp);
  346. hw_stats->rx_bytes += mtk_r32(mac->hw, base);
  347. stats = mtk_r32(mac->hw, base + 0x04);
  348. if (stats)
  349. hw_stats->rx_bytes += (stats << 32);
  350. hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
  351. hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
  352. hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
  353. hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
  354. hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
  355. hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
  356. hw_stats->rx_flow_control_packets +=
  357. mtk_r32(mac->hw, base + 0x24);
  358. hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
  359. hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
  360. hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
  361. stats = mtk_r32(mac->hw, base + 0x34);
  362. if (stats)
  363. hw_stats->tx_bytes += (stats << 32);
  364. hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
  365. u64_stats_update_end(&hw_stats->syncp);
  366. }
  367. static void mtk_stats_update(struct mtk_eth *eth)
  368. {
  369. int i;
  370. for (i = 0; i < MTK_MAC_COUNT; i++) {
  371. if (!eth->mac[i] || !eth->mac[i]->hw_stats)
  372. continue;
  373. if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
  374. mtk_stats_update_mac(eth->mac[i]);
  375. spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
  376. }
  377. }
  378. }
  379. static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
  380. struct rtnl_link_stats64 *storage)
  381. {
  382. struct mtk_mac *mac = netdev_priv(dev);
  383. struct mtk_hw_stats *hw_stats = mac->hw_stats;
  384. unsigned int start;
  385. if (netif_running(dev) && netif_device_present(dev)) {
  386. if (spin_trylock(&hw_stats->stats_lock)) {
  387. mtk_stats_update_mac(mac);
  388. spin_unlock(&hw_stats->stats_lock);
  389. }
  390. }
  391. do {
  392. start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
  393. storage->rx_packets = hw_stats->rx_packets;
  394. storage->tx_packets = hw_stats->tx_packets;
  395. storage->rx_bytes = hw_stats->rx_bytes;
  396. storage->tx_bytes = hw_stats->tx_bytes;
  397. storage->collisions = hw_stats->tx_collisions;
  398. storage->rx_length_errors = hw_stats->rx_short_errors +
  399. hw_stats->rx_long_errors;
  400. storage->rx_over_errors = hw_stats->rx_overflow;
  401. storage->rx_crc_errors = hw_stats->rx_fcs_errors;
  402. storage->rx_errors = hw_stats->rx_checksum_errors;
  403. storage->tx_aborted_errors = hw_stats->tx_skip;
  404. } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
  405. storage->tx_errors = dev->stats.tx_errors;
  406. storage->rx_dropped = dev->stats.rx_dropped;
  407. storage->tx_dropped = dev->stats.tx_dropped;
  408. return storage;
  409. }
  410. static inline int mtk_max_frag_size(int mtu)
  411. {
  412. /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
  413. if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
  414. mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
  415. return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
  416. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  417. }
  418. static inline int mtk_max_buf_size(int frag_size)
  419. {
  420. int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
  421. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  422. WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
  423. return buf_size;
  424. }
  425. static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
  426. struct mtk_rx_dma *dma_rxd)
  427. {
  428. rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
  429. rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
  430. rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
  431. rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
  432. }
  433. /* the qdma core needs scratch memory to be setup */
  434. static int mtk_init_fq_dma(struct mtk_eth *eth)
  435. {
  436. dma_addr_t phy_ring_tail;
  437. int cnt = MTK_DMA_SIZE;
  438. dma_addr_t dma_addr;
  439. int i;
  440. eth->scratch_ring = dma_alloc_coherent(eth->dev,
  441. cnt * sizeof(struct mtk_tx_dma),
  442. &eth->phy_scratch_ring,
  443. GFP_ATOMIC | __GFP_ZERO);
  444. if (unlikely(!eth->scratch_ring))
  445. return -ENOMEM;
  446. eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
  447. GFP_KERNEL);
  448. if (unlikely(!eth->scratch_head))
  449. return -ENOMEM;
  450. dma_addr = dma_map_single(eth->dev,
  451. eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
  452. DMA_FROM_DEVICE);
  453. if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  454. return -ENOMEM;
  455. memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
  456. phy_ring_tail = eth->phy_scratch_ring +
  457. (sizeof(struct mtk_tx_dma) * (cnt - 1));
  458. for (i = 0; i < cnt; i++) {
  459. eth->scratch_ring[i].txd1 =
  460. (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
  461. if (i < cnt - 1)
  462. eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
  463. ((i + 1) * sizeof(struct mtk_tx_dma)));
  464. eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
  465. }
  466. mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
  467. mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
  468. mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
  469. mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
  470. return 0;
  471. }
  472. static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
  473. {
  474. void *ret = ring->dma;
  475. return ret + (desc - ring->phys);
  476. }
  477. static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
  478. struct mtk_tx_dma *txd)
  479. {
  480. int idx = txd - ring->dma;
  481. return &ring->buf[idx];
  482. }
  483. static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
  484. {
  485. if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
  486. dma_unmap_single(eth->dev,
  487. dma_unmap_addr(tx_buf, dma_addr0),
  488. dma_unmap_len(tx_buf, dma_len0),
  489. DMA_TO_DEVICE);
  490. } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
  491. dma_unmap_page(eth->dev,
  492. dma_unmap_addr(tx_buf, dma_addr0),
  493. dma_unmap_len(tx_buf, dma_len0),
  494. DMA_TO_DEVICE);
  495. }
  496. tx_buf->flags = 0;
  497. if (tx_buf->skb &&
  498. (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
  499. dev_kfree_skb_any(tx_buf->skb);
  500. tx_buf->skb = NULL;
  501. }
  502. static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
  503. int tx_num, struct mtk_tx_ring *ring, bool gso)
  504. {
  505. struct mtk_mac *mac = netdev_priv(dev);
  506. struct mtk_eth *eth = mac->hw;
  507. struct mtk_tx_dma *itxd, *txd;
  508. struct mtk_tx_buf *tx_buf;
  509. dma_addr_t mapped_addr;
  510. unsigned int nr_frags;
  511. int i, n_desc = 1;
  512. u32 txd4 = 0, fport;
  513. itxd = ring->next_free;
  514. if (itxd == ring->last_free)
  515. return -ENOMEM;
  516. /* set the forward port */
  517. fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
  518. txd4 |= fport;
  519. tx_buf = mtk_desc_to_tx_buf(ring, itxd);
  520. memset(tx_buf, 0, sizeof(*tx_buf));
  521. if (gso)
  522. txd4 |= TX_DMA_TSO;
  523. /* TX Checksum offload */
  524. if (skb->ip_summed == CHECKSUM_PARTIAL)
  525. txd4 |= TX_DMA_CHKSUM;
  526. /* VLAN header offload */
  527. if (skb_vlan_tag_present(skb))
  528. txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
  529. mapped_addr = dma_map_single(eth->dev, skb->data,
  530. skb_headlen(skb), DMA_TO_DEVICE);
  531. if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
  532. return -ENOMEM;
  533. WRITE_ONCE(itxd->txd1, mapped_addr);
  534. tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
  535. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  536. dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
  537. /* TX SG offload */
  538. txd = itxd;
  539. nr_frags = skb_shinfo(skb)->nr_frags;
  540. for (i = 0; i < nr_frags; i++) {
  541. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  542. unsigned int offset = 0;
  543. int frag_size = skb_frag_size(frag);
  544. while (frag_size) {
  545. bool last_frag = false;
  546. unsigned int frag_map_size;
  547. txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
  548. if (txd == ring->last_free)
  549. goto err_dma;
  550. n_desc++;
  551. frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
  552. mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
  553. frag_map_size,
  554. DMA_TO_DEVICE);
  555. if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
  556. goto err_dma;
  557. if (i == nr_frags - 1 &&
  558. (frag_size - frag_map_size) == 0)
  559. last_frag = true;
  560. WRITE_ONCE(txd->txd1, mapped_addr);
  561. WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
  562. TX_DMA_PLEN0(frag_map_size) |
  563. last_frag * TX_DMA_LS0));
  564. WRITE_ONCE(txd->txd4, fport);
  565. tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
  566. tx_buf = mtk_desc_to_tx_buf(ring, txd);
  567. memset(tx_buf, 0, sizeof(*tx_buf));
  568. tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
  569. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  570. dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
  571. frag_size -= frag_map_size;
  572. offset += frag_map_size;
  573. }
  574. }
  575. /* store skb to cleanup */
  576. tx_buf->skb = skb;
  577. WRITE_ONCE(itxd->txd4, txd4);
  578. WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
  579. (!nr_frags * TX_DMA_LS0)));
  580. netdev_sent_queue(dev, skb->len);
  581. skb_tx_timestamp(skb);
  582. ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
  583. atomic_sub(n_desc, &ring->free_count);
  584. /* make sure that all changes to the dma ring are flushed before we
  585. * continue
  586. */
  587. wmb();
  588. if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
  589. mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
  590. return 0;
  591. err_dma:
  592. do {
  593. tx_buf = mtk_desc_to_tx_buf(ring, itxd);
  594. /* unmap dma */
  595. mtk_tx_unmap(eth, tx_buf);
  596. itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  597. itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
  598. } while (itxd != txd);
  599. return -ENOMEM;
  600. }
  601. static inline int mtk_cal_txd_req(struct sk_buff *skb)
  602. {
  603. int i, nfrags;
  604. struct skb_frag_struct *frag;
  605. nfrags = 1;
  606. if (skb_is_gso(skb)) {
  607. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  608. frag = &skb_shinfo(skb)->frags[i];
  609. nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
  610. }
  611. } else {
  612. nfrags += skb_shinfo(skb)->nr_frags;
  613. }
  614. return nfrags;
  615. }
  616. static int mtk_queue_stopped(struct mtk_eth *eth)
  617. {
  618. int i;
  619. for (i = 0; i < MTK_MAC_COUNT; i++) {
  620. if (!eth->netdev[i])
  621. continue;
  622. if (netif_queue_stopped(eth->netdev[i]))
  623. return 1;
  624. }
  625. return 0;
  626. }
  627. static void mtk_wake_queue(struct mtk_eth *eth)
  628. {
  629. int i;
  630. for (i = 0; i < MTK_MAC_COUNT; i++) {
  631. if (!eth->netdev[i])
  632. continue;
  633. netif_wake_queue(eth->netdev[i]);
  634. }
  635. }
  636. static void mtk_stop_queue(struct mtk_eth *eth)
  637. {
  638. int i;
  639. for (i = 0; i < MTK_MAC_COUNT; i++) {
  640. if (!eth->netdev[i])
  641. continue;
  642. netif_stop_queue(eth->netdev[i]);
  643. }
  644. }
  645. static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
  646. {
  647. struct mtk_mac *mac = netdev_priv(dev);
  648. struct mtk_eth *eth = mac->hw;
  649. struct mtk_tx_ring *ring = &eth->tx_ring;
  650. struct net_device_stats *stats = &dev->stats;
  651. bool gso = false;
  652. int tx_num;
  653. /* normally we can rely on the stack not calling this more than once,
  654. * however we have 2 queues running on the same ring so we need to lock
  655. * the ring access
  656. */
  657. spin_lock(&eth->page_lock);
  658. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  659. goto drop;
  660. tx_num = mtk_cal_txd_req(skb);
  661. if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
  662. mtk_stop_queue(eth);
  663. netif_err(eth, tx_queued, dev,
  664. "Tx Ring full when queue awake!\n");
  665. spin_unlock(&eth->page_lock);
  666. return NETDEV_TX_BUSY;
  667. }
  668. /* TSO: fill MSS info in tcp checksum field */
  669. if (skb_is_gso(skb)) {
  670. if (skb_cow_head(skb, 0)) {
  671. netif_warn(eth, tx_err, dev,
  672. "GSO expand head fail.\n");
  673. goto drop;
  674. }
  675. if (skb_shinfo(skb)->gso_type &
  676. (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
  677. gso = true;
  678. tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
  679. }
  680. }
  681. if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
  682. goto drop;
  683. if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
  684. mtk_stop_queue(eth);
  685. spin_unlock(&eth->page_lock);
  686. return NETDEV_TX_OK;
  687. drop:
  688. spin_unlock(&eth->page_lock);
  689. stats->tx_dropped++;
  690. dev_kfree_skb(skb);
  691. return NETDEV_TX_OK;
  692. }
  693. static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
  694. {
  695. int i;
  696. struct mtk_rx_ring *ring;
  697. int idx;
  698. if (!eth->hwlro)
  699. return &eth->rx_ring[0];
  700. for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
  701. ring = &eth->rx_ring[i];
  702. idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
  703. if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
  704. ring->calc_idx_update = true;
  705. return ring;
  706. }
  707. }
  708. return NULL;
  709. }
  710. static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
  711. {
  712. struct mtk_rx_ring *ring;
  713. int i;
  714. if (!eth->hwlro) {
  715. ring = &eth->rx_ring[0];
  716. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
  717. } else {
  718. for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
  719. ring = &eth->rx_ring[i];
  720. if (ring->calc_idx_update) {
  721. ring->calc_idx_update = false;
  722. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
  723. }
  724. }
  725. }
  726. }
  727. static int mtk_poll_rx(struct napi_struct *napi, int budget,
  728. struct mtk_eth *eth)
  729. {
  730. struct mtk_rx_ring *ring;
  731. int idx;
  732. struct sk_buff *skb;
  733. u8 *data, *new_data;
  734. struct mtk_rx_dma *rxd, trxd;
  735. int done = 0;
  736. while (done < budget) {
  737. struct net_device *netdev;
  738. unsigned int pktlen;
  739. dma_addr_t dma_addr;
  740. int mac = 0;
  741. ring = mtk_get_rx_ring(eth);
  742. if (unlikely(!ring))
  743. goto rx_done;
  744. idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
  745. rxd = &ring->dma[idx];
  746. data = ring->data[idx];
  747. mtk_rx_get_desc(&trxd, rxd);
  748. if (!(trxd.rxd2 & RX_DMA_DONE))
  749. break;
  750. /* find out which mac the packet come from. values start at 1 */
  751. mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
  752. RX_DMA_FPORT_MASK;
  753. mac--;
  754. netdev = eth->netdev[mac];
  755. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  756. goto release_desc;
  757. /* alloc new buffer */
  758. new_data = napi_alloc_frag(ring->frag_size);
  759. if (unlikely(!new_data)) {
  760. netdev->stats.rx_dropped++;
  761. goto release_desc;
  762. }
  763. dma_addr = dma_map_single(eth->dev,
  764. new_data + NET_SKB_PAD,
  765. ring->buf_size,
  766. DMA_FROM_DEVICE);
  767. if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
  768. skb_free_frag(new_data);
  769. netdev->stats.rx_dropped++;
  770. goto release_desc;
  771. }
  772. /* receive data */
  773. skb = build_skb(data, ring->frag_size);
  774. if (unlikely(!skb)) {
  775. skb_free_frag(new_data);
  776. netdev->stats.rx_dropped++;
  777. goto release_desc;
  778. }
  779. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  780. dma_unmap_single(eth->dev, trxd.rxd1,
  781. ring->buf_size, DMA_FROM_DEVICE);
  782. pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
  783. skb->dev = netdev;
  784. skb_put(skb, pktlen);
  785. if (trxd.rxd4 & RX_DMA_L4_VALID)
  786. skb->ip_summed = CHECKSUM_UNNECESSARY;
  787. else
  788. skb_checksum_none_assert(skb);
  789. skb->protocol = eth_type_trans(skb, netdev);
  790. if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
  791. RX_DMA_VID(trxd.rxd3))
  792. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  793. RX_DMA_VID(trxd.rxd3));
  794. napi_gro_receive(napi, skb);
  795. ring->data[idx] = new_data;
  796. rxd->rxd1 = (unsigned int)dma_addr;
  797. release_desc:
  798. rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
  799. ring->calc_idx = idx;
  800. done++;
  801. }
  802. rx_done:
  803. if (done) {
  804. /* make sure that all changes to the dma ring are flushed before
  805. * we continue
  806. */
  807. wmb();
  808. mtk_update_rx_cpu_idx(eth);
  809. }
  810. return done;
  811. }
  812. static int mtk_poll_tx(struct mtk_eth *eth, int budget)
  813. {
  814. struct mtk_tx_ring *ring = &eth->tx_ring;
  815. struct mtk_tx_dma *desc;
  816. struct sk_buff *skb;
  817. struct mtk_tx_buf *tx_buf;
  818. unsigned int done[MTK_MAX_DEVS];
  819. unsigned int bytes[MTK_MAX_DEVS];
  820. u32 cpu, dma;
  821. static int condition;
  822. int total = 0, i;
  823. memset(done, 0, sizeof(done));
  824. memset(bytes, 0, sizeof(bytes));
  825. cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
  826. dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
  827. desc = mtk_qdma_phys_to_virt(ring, cpu);
  828. while ((cpu != dma) && budget) {
  829. u32 next_cpu = desc->txd2;
  830. int mac;
  831. desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
  832. if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
  833. break;
  834. mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
  835. TX_DMA_FPORT_MASK;
  836. mac--;
  837. tx_buf = mtk_desc_to_tx_buf(ring, desc);
  838. skb = tx_buf->skb;
  839. if (!skb) {
  840. condition = 1;
  841. break;
  842. }
  843. if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
  844. bytes[mac] += skb->len;
  845. done[mac]++;
  846. budget--;
  847. }
  848. mtk_tx_unmap(eth, tx_buf);
  849. ring->last_free = desc;
  850. atomic_inc(&ring->free_count);
  851. cpu = next_cpu;
  852. }
  853. mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
  854. for (i = 0; i < MTK_MAC_COUNT; i++) {
  855. if (!eth->netdev[i] || !done[i])
  856. continue;
  857. netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
  858. total += done[i];
  859. }
  860. if (mtk_queue_stopped(eth) &&
  861. (atomic_read(&ring->free_count) > ring->thresh))
  862. mtk_wake_queue(eth);
  863. return total;
  864. }
  865. static void mtk_handle_status_irq(struct mtk_eth *eth)
  866. {
  867. u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
  868. if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
  869. mtk_stats_update(eth);
  870. mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
  871. MTK_INT_STATUS2);
  872. }
  873. }
  874. static int mtk_napi_tx(struct napi_struct *napi, int budget)
  875. {
  876. struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
  877. u32 status, mask;
  878. int tx_done = 0;
  879. mtk_handle_status_irq(eth);
  880. mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
  881. tx_done = mtk_poll_tx(eth, budget);
  882. if (unlikely(netif_msg_intr(eth))) {
  883. status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  884. mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
  885. dev_info(eth->dev,
  886. "done tx %d, intr 0x%08x/0x%x\n",
  887. tx_done, status, mask);
  888. }
  889. if (tx_done == budget)
  890. return budget;
  891. status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
  892. if (status & MTK_TX_DONE_INT)
  893. return budget;
  894. napi_complete(napi);
  895. mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  896. return tx_done;
  897. }
  898. static int mtk_napi_rx(struct napi_struct *napi, int budget)
  899. {
  900. struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
  901. u32 status, mask;
  902. int rx_done = 0;
  903. int remain_budget = budget;
  904. mtk_handle_status_irq(eth);
  905. poll_again:
  906. mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
  907. rx_done = mtk_poll_rx(napi, remain_budget, eth);
  908. if (unlikely(netif_msg_intr(eth))) {
  909. status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
  910. mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
  911. dev_info(eth->dev,
  912. "done rx %d, intr 0x%08x/0x%x\n",
  913. rx_done, status, mask);
  914. }
  915. if (rx_done == remain_budget)
  916. return budget;
  917. status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
  918. if (status & MTK_RX_DONE_INT) {
  919. remain_budget -= rx_done;
  920. goto poll_again;
  921. }
  922. napi_complete(napi);
  923. mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  924. return rx_done + budget - remain_budget;
  925. }
  926. static int mtk_tx_alloc(struct mtk_eth *eth)
  927. {
  928. struct mtk_tx_ring *ring = &eth->tx_ring;
  929. int i, sz = sizeof(*ring->dma);
  930. ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
  931. GFP_KERNEL);
  932. if (!ring->buf)
  933. goto no_tx_mem;
  934. ring->dma = dma_alloc_coherent(eth->dev,
  935. MTK_DMA_SIZE * sz,
  936. &ring->phys,
  937. GFP_ATOMIC | __GFP_ZERO);
  938. if (!ring->dma)
  939. goto no_tx_mem;
  940. memset(ring->dma, 0, MTK_DMA_SIZE * sz);
  941. for (i = 0; i < MTK_DMA_SIZE; i++) {
  942. int next = (i + 1) % MTK_DMA_SIZE;
  943. u32 next_ptr = ring->phys + next * sz;
  944. ring->dma[i].txd2 = next_ptr;
  945. ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  946. }
  947. atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
  948. ring->next_free = &ring->dma[0];
  949. ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
  950. ring->thresh = MAX_SKB_FRAGS;
  951. /* make sure that all changes to the dma ring are flushed before we
  952. * continue
  953. */
  954. wmb();
  955. mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
  956. mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
  957. mtk_w32(eth,
  958. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  959. MTK_QTX_CRX_PTR);
  960. mtk_w32(eth,
  961. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  962. MTK_QTX_DRX_PTR);
  963. mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
  964. return 0;
  965. no_tx_mem:
  966. return -ENOMEM;
  967. }
  968. static void mtk_tx_clean(struct mtk_eth *eth)
  969. {
  970. struct mtk_tx_ring *ring = &eth->tx_ring;
  971. int i;
  972. if (ring->buf) {
  973. for (i = 0; i < MTK_DMA_SIZE; i++)
  974. mtk_tx_unmap(eth, &ring->buf[i]);
  975. kfree(ring->buf);
  976. ring->buf = NULL;
  977. }
  978. if (ring->dma) {
  979. dma_free_coherent(eth->dev,
  980. MTK_DMA_SIZE * sizeof(*ring->dma),
  981. ring->dma,
  982. ring->phys);
  983. ring->dma = NULL;
  984. }
  985. }
  986. static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
  987. {
  988. struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
  989. int rx_data_len, rx_dma_size;
  990. int i;
  991. if (rx_flag == MTK_RX_FLAGS_HWLRO) {
  992. rx_data_len = MTK_MAX_LRO_RX_LENGTH;
  993. rx_dma_size = MTK_HW_LRO_DMA_SIZE;
  994. } else {
  995. rx_data_len = ETH_DATA_LEN;
  996. rx_dma_size = MTK_DMA_SIZE;
  997. }
  998. ring->frag_size = mtk_max_frag_size(rx_data_len);
  999. ring->buf_size = mtk_max_buf_size(ring->frag_size);
  1000. ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
  1001. GFP_KERNEL);
  1002. if (!ring->data)
  1003. return -ENOMEM;
  1004. for (i = 0; i < rx_dma_size; i++) {
  1005. ring->data[i] = netdev_alloc_frag(ring->frag_size);
  1006. if (!ring->data[i])
  1007. return -ENOMEM;
  1008. }
  1009. ring->dma = dma_alloc_coherent(eth->dev,
  1010. rx_dma_size * sizeof(*ring->dma),
  1011. &ring->phys,
  1012. GFP_ATOMIC | __GFP_ZERO);
  1013. if (!ring->dma)
  1014. return -ENOMEM;
  1015. for (i = 0; i < rx_dma_size; i++) {
  1016. dma_addr_t dma_addr = dma_map_single(eth->dev,
  1017. ring->data[i] + NET_SKB_PAD,
  1018. ring->buf_size,
  1019. DMA_FROM_DEVICE);
  1020. if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  1021. return -ENOMEM;
  1022. ring->dma[i].rxd1 = (unsigned int)dma_addr;
  1023. ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
  1024. }
  1025. ring->dma_size = rx_dma_size;
  1026. ring->calc_idx_update = false;
  1027. ring->calc_idx = rx_dma_size - 1;
  1028. ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
  1029. /* make sure that all changes to the dma ring are flushed before we
  1030. * continue
  1031. */
  1032. wmb();
  1033. mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
  1034. mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
  1035. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
  1036. mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
  1037. return 0;
  1038. }
  1039. static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
  1040. {
  1041. struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
  1042. int i;
  1043. if (ring->data && ring->dma) {
  1044. for (i = 0; i < ring->dma_size; i++) {
  1045. if (!ring->data[i])
  1046. continue;
  1047. if (!ring->dma[i].rxd1)
  1048. continue;
  1049. dma_unmap_single(eth->dev,
  1050. ring->dma[i].rxd1,
  1051. ring->buf_size,
  1052. DMA_FROM_DEVICE);
  1053. skb_free_frag(ring->data[i]);
  1054. }
  1055. kfree(ring->data);
  1056. ring->data = NULL;
  1057. }
  1058. if (ring->dma) {
  1059. dma_free_coherent(eth->dev,
  1060. ring->dma_size * sizeof(*ring->dma),
  1061. ring->dma,
  1062. ring->phys);
  1063. ring->dma = NULL;
  1064. }
  1065. }
  1066. static int mtk_hwlro_rx_init(struct mtk_eth *eth)
  1067. {
  1068. int i;
  1069. u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
  1070. u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
  1071. /* set LRO rings to auto-learn modes */
  1072. ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
  1073. /* validate LRO ring */
  1074. ring_ctrl_dw2 |= MTK_RING_VLD;
  1075. /* set AGE timer (unit: 20us) */
  1076. ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
  1077. ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
  1078. /* set max AGG timer (unit: 20us) */
  1079. ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
  1080. /* set max LRO AGG count */
  1081. ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
  1082. ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
  1083. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
  1084. mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
  1085. mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
  1086. mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
  1087. }
  1088. /* IPv4 checksum update enable */
  1089. lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
  1090. /* switch priority comparison to packet count mode */
  1091. lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
  1092. /* bandwidth threshold setting */
  1093. mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
  1094. /* auto-learn score delta setting */
  1095. mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
  1096. /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
  1097. mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
  1098. MTK_PDMA_LRO_ALT_REFRESH_TIMER);
  1099. /* set HW LRO mode & the max aggregation count for rx packets */
  1100. lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
  1101. /* the minimal remaining room of SDL0 in RXD for lro aggregation */
  1102. lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
  1103. /* enable HW LRO */
  1104. lro_ctrl_dw0 |= MTK_LRO_EN;
  1105. mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
  1106. mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
  1107. return 0;
  1108. }
  1109. static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
  1110. {
  1111. int i;
  1112. u32 val;
  1113. /* relinquish lro rings, flush aggregated packets */
  1114. mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
  1115. /* wait for relinquishments done */
  1116. for (i = 0; i < 10; i++) {
  1117. val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
  1118. if (val & MTK_LRO_RING_RELINQUISH_DONE) {
  1119. msleep(20);
  1120. continue;
  1121. }
  1122. break;
  1123. }
  1124. /* invalidate lro rings */
  1125. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
  1126. mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
  1127. /* disable HW LRO */
  1128. mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
  1129. }
  1130. static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
  1131. {
  1132. u32 reg_val;
  1133. reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
  1134. /* invalidate the IP setting */
  1135. mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
  1136. mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
  1137. /* validate the IP setting */
  1138. mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
  1139. }
  1140. static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
  1141. {
  1142. u32 reg_val;
  1143. reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
  1144. /* invalidate the IP setting */
  1145. mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
  1146. mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
  1147. }
  1148. static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
  1149. {
  1150. int cnt = 0;
  1151. int i;
  1152. for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
  1153. if (mac->hwlro_ip[i])
  1154. cnt++;
  1155. }
  1156. return cnt;
  1157. }
  1158. static int mtk_hwlro_add_ipaddr(struct net_device *dev,
  1159. struct ethtool_rxnfc *cmd)
  1160. {
  1161. struct ethtool_rx_flow_spec *fsp =
  1162. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1163. struct mtk_mac *mac = netdev_priv(dev);
  1164. struct mtk_eth *eth = mac->hw;
  1165. int hwlro_idx;
  1166. if ((fsp->flow_type != TCP_V4_FLOW) ||
  1167. (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
  1168. (fsp->location > 1))
  1169. return -EINVAL;
  1170. mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
  1171. hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
  1172. mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
  1173. mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
  1174. return 0;
  1175. }
  1176. static int mtk_hwlro_del_ipaddr(struct net_device *dev,
  1177. struct ethtool_rxnfc *cmd)
  1178. {
  1179. struct ethtool_rx_flow_spec *fsp =
  1180. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1181. struct mtk_mac *mac = netdev_priv(dev);
  1182. struct mtk_eth *eth = mac->hw;
  1183. int hwlro_idx;
  1184. if (fsp->location > 1)
  1185. return -EINVAL;
  1186. mac->hwlro_ip[fsp->location] = 0;
  1187. hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
  1188. mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
  1189. mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
  1190. return 0;
  1191. }
  1192. static void mtk_hwlro_netdev_disable(struct net_device *dev)
  1193. {
  1194. struct mtk_mac *mac = netdev_priv(dev);
  1195. struct mtk_eth *eth = mac->hw;
  1196. int i, hwlro_idx;
  1197. for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
  1198. mac->hwlro_ip[i] = 0;
  1199. hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
  1200. mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
  1201. }
  1202. mac->hwlro_ip_cnt = 0;
  1203. }
  1204. static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
  1205. struct ethtool_rxnfc *cmd)
  1206. {
  1207. struct mtk_mac *mac = netdev_priv(dev);
  1208. struct ethtool_rx_flow_spec *fsp =
  1209. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1210. /* only tcp dst ipv4 is meaningful, others are meaningless */
  1211. fsp->flow_type = TCP_V4_FLOW;
  1212. fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
  1213. fsp->m_u.tcp_ip4_spec.ip4dst = 0;
  1214. fsp->h_u.tcp_ip4_spec.ip4src = 0;
  1215. fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
  1216. fsp->h_u.tcp_ip4_spec.psrc = 0;
  1217. fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
  1218. fsp->h_u.tcp_ip4_spec.pdst = 0;
  1219. fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
  1220. fsp->h_u.tcp_ip4_spec.tos = 0;
  1221. fsp->m_u.tcp_ip4_spec.tos = 0xff;
  1222. return 0;
  1223. }
  1224. static int mtk_hwlro_get_fdir_all(struct net_device *dev,
  1225. struct ethtool_rxnfc *cmd,
  1226. u32 *rule_locs)
  1227. {
  1228. struct mtk_mac *mac = netdev_priv(dev);
  1229. int cnt = 0;
  1230. int i;
  1231. for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
  1232. if (mac->hwlro_ip[i]) {
  1233. rule_locs[cnt] = i;
  1234. cnt++;
  1235. }
  1236. }
  1237. cmd->rule_cnt = cnt;
  1238. return 0;
  1239. }
  1240. static netdev_features_t mtk_fix_features(struct net_device *dev,
  1241. netdev_features_t features)
  1242. {
  1243. if (!(features & NETIF_F_LRO)) {
  1244. struct mtk_mac *mac = netdev_priv(dev);
  1245. int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
  1246. if (ip_cnt) {
  1247. netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
  1248. features |= NETIF_F_LRO;
  1249. }
  1250. }
  1251. return features;
  1252. }
  1253. static int mtk_set_features(struct net_device *dev, netdev_features_t features)
  1254. {
  1255. int err = 0;
  1256. if (!((dev->features ^ features) & NETIF_F_LRO))
  1257. return 0;
  1258. if (!(features & NETIF_F_LRO))
  1259. mtk_hwlro_netdev_disable(dev);
  1260. return err;
  1261. }
  1262. /* wait for DMA to finish whatever it is doing before we start using it again */
  1263. static int mtk_dma_busy_wait(struct mtk_eth *eth)
  1264. {
  1265. unsigned long t_start = jiffies;
  1266. while (1) {
  1267. if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
  1268. (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
  1269. return 0;
  1270. if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
  1271. break;
  1272. }
  1273. dev_err(eth->dev, "DMA init timeout\n");
  1274. return -1;
  1275. }
  1276. static int mtk_dma_init(struct mtk_eth *eth)
  1277. {
  1278. int err;
  1279. u32 i;
  1280. if (mtk_dma_busy_wait(eth))
  1281. return -EBUSY;
  1282. /* QDMA needs scratch memory for internal reordering of the
  1283. * descriptors
  1284. */
  1285. err = mtk_init_fq_dma(eth);
  1286. if (err)
  1287. return err;
  1288. err = mtk_tx_alloc(eth);
  1289. if (err)
  1290. return err;
  1291. err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
  1292. if (err)
  1293. return err;
  1294. if (eth->hwlro) {
  1295. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
  1296. err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
  1297. if (err)
  1298. return err;
  1299. }
  1300. err = mtk_hwlro_rx_init(eth);
  1301. if (err)
  1302. return err;
  1303. }
  1304. /* Enable random early drop and set drop threshold automatically */
  1305. mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
  1306. MTK_QDMA_FC_THRES);
  1307. mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
  1308. return 0;
  1309. }
  1310. static void mtk_dma_free(struct mtk_eth *eth)
  1311. {
  1312. int i;
  1313. for (i = 0; i < MTK_MAC_COUNT; i++)
  1314. if (eth->netdev[i])
  1315. netdev_reset_queue(eth->netdev[i]);
  1316. if (eth->scratch_ring) {
  1317. dma_free_coherent(eth->dev,
  1318. MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
  1319. eth->scratch_ring,
  1320. eth->phy_scratch_ring);
  1321. eth->scratch_ring = NULL;
  1322. eth->phy_scratch_ring = 0;
  1323. }
  1324. mtk_tx_clean(eth);
  1325. mtk_rx_clean(eth, 0);
  1326. if (eth->hwlro) {
  1327. mtk_hwlro_rx_uninit(eth);
  1328. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
  1329. mtk_rx_clean(eth, i);
  1330. }
  1331. kfree(eth->scratch_head);
  1332. }
  1333. static void mtk_tx_timeout(struct net_device *dev)
  1334. {
  1335. struct mtk_mac *mac = netdev_priv(dev);
  1336. struct mtk_eth *eth = mac->hw;
  1337. eth->netdev[mac->id]->stats.tx_errors++;
  1338. netif_err(eth, tx_err, dev,
  1339. "transmit timed out\n");
  1340. schedule_work(&eth->pending_work);
  1341. }
  1342. static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
  1343. {
  1344. struct mtk_eth *eth = _eth;
  1345. if (likely(napi_schedule_prep(&eth->rx_napi))) {
  1346. __napi_schedule(&eth->rx_napi);
  1347. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1348. }
  1349. return IRQ_HANDLED;
  1350. }
  1351. static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
  1352. {
  1353. struct mtk_eth *eth = _eth;
  1354. if (likely(napi_schedule_prep(&eth->tx_napi))) {
  1355. __napi_schedule(&eth->tx_napi);
  1356. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1357. }
  1358. return IRQ_HANDLED;
  1359. }
  1360. #ifdef CONFIG_NET_POLL_CONTROLLER
  1361. static void mtk_poll_controller(struct net_device *dev)
  1362. {
  1363. struct mtk_mac *mac = netdev_priv(dev);
  1364. struct mtk_eth *eth = mac->hw;
  1365. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1366. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1367. mtk_handle_irq_rx(eth->irq[2], dev);
  1368. mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1369. mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1370. }
  1371. #endif
  1372. static int mtk_start_dma(struct mtk_eth *eth)
  1373. {
  1374. int err;
  1375. err = mtk_dma_init(eth);
  1376. if (err) {
  1377. mtk_dma_free(eth);
  1378. return err;
  1379. }
  1380. mtk_w32(eth,
  1381. MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
  1382. MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO,
  1383. MTK_QDMA_GLO_CFG);
  1384. mtk_w32(eth,
  1385. MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
  1386. MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
  1387. MTK_PDMA_GLO_CFG);
  1388. return 0;
  1389. }
  1390. static int mtk_open(struct net_device *dev)
  1391. {
  1392. struct mtk_mac *mac = netdev_priv(dev);
  1393. struct mtk_eth *eth = mac->hw;
  1394. /* we run 2 netdevs on the same dma ring so we only bring it up once */
  1395. if (!atomic_read(&eth->dma_refcnt)) {
  1396. int err = mtk_start_dma(eth);
  1397. if (err)
  1398. return err;
  1399. napi_enable(&eth->tx_napi);
  1400. napi_enable(&eth->rx_napi);
  1401. mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1402. mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1403. }
  1404. atomic_inc(&eth->dma_refcnt);
  1405. phy_start(dev->phydev);
  1406. netif_start_queue(dev);
  1407. return 0;
  1408. }
  1409. static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
  1410. {
  1411. u32 val;
  1412. int i;
  1413. /* stop the dma engine */
  1414. spin_lock_bh(&eth->page_lock);
  1415. val = mtk_r32(eth, glo_cfg);
  1416. mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
  1417. glo_cfg);
  1418. spin_unlock_bh(&eth->page_lock);
  1419. /* wait for dma stop */
  1420. for (i = 0; i < 10; i++) {
  1421. val = mtk_r32(eth, glo_cfg);
  1422. if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
  1423. msleep(20);
  1424. continue;
  1425. }
  1426. break;
  1427. }
  1428. }
  1429. static int mtk_stop(struct net_device *dev)
  1430. {
  1431. struct mtk_mac *mac = netdev_priv(dev);
  1432. struct mtk_eth *eth = mac->hw;
  1433. netif_tx_disable(dev);
  1434. phy_stop(dev->phydev);
  1435. /* only shutdown DMA if this is the last user */
  1436. if (!atomic_dec_and_test(&eth->dma_refcnt))
  1437. return 0;
  1438. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  1439. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  1440. napi_disable(&eth->tx_napi);
  1441. napi_disable(&eth->rx_napi);
  1442. mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
  1443. mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
  1444. mtk_dma_free(eth);
  1445. return 0;
  1446. }
  1447. static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
  1448. {
  1449. regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
  1450. reset_bits,
  1451. reset_bits);
  1452. usleep_range(1000, 1100);
  1453. regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
  1454. reset_bits,
  1455. ~reset_bits);
  1456. mdelay(10);
  1457. }
  1458. static int mtk_hw_init(struct mtk_eth *eth)
  1459. {
  1460. int i, val;
  1461. if (test_and_set_bit(MTK_HW_INIT, &eth->state))
  1462. return 0;
  1463. pm_runtime_enable(eth->dev);
  1464. pm_runtime_get_sync(eth->dev);
  1465. clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
  1466. clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
  1467. clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
  1468. clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
  1469. ethsys_reset(eth, RSTCTRL_FE);
  1470. ethsys_reset(eth, RSTCTRL_PPE);
  1471. regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
  1472. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1473. if (!eth->mac[i])
  1474. continue;
  1475. val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
  1476. val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
  1477. }
  1478. regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
  1479. /* Set GE2 driving and slew rate */
  1480. regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
  1481. /* set GE2 TDSEL */
  1482. regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
  1483. /* set GE2 TUNE */
  1484. regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
  1485. /* Set linkdown as the default for each GMAC. Its own MCR would be set
  1486. * up with the more appropriate value when mtk_phy_link_adjust call is
  1487. * being invoked.
  1488. */
  1489. for (i = 0; i < MTK_MAC_COUNT; i++)
  1490. mtk_w32(eth, 0, MTK_MAC_MCR(i));
  1491. /* Enable RX VLan Offloading */
  1492. mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
  1493. /* disable delay and normal interrupt */
  1494. mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
  1495. mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
  1496. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
  1497. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
  1498. mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
  1499. mtk_w32(eth, 0, MTK_RST_GL);
  1500. /* FE int grouping */
  1501. mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
  1502. mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
  1503. mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
  1504. mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
  1505. mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
  1506. for (i = 0; i < 2; i++) {
  1507. u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
  1508. /* setup the forward port to send frame to PDMA */
  1509. val &= ~0xffff;
  1510. /* Enable RX checksum */
  1511. val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
  1512. /* setup the mac dma */
  1513. mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
  1514. }
  1515. return 0;
  1516. }
  1517. static int mtk_hw_deinit(struct mtk_eth *eth)
  1518. {
  1519. if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
  1520. return 0;
  1521. clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
  1522. clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
  1523. clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
  1524. clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
  1525. pm_runtime_put_sync(eth->dev);
  1526. pm_runtime_disable(eth->dev);
  1527. return 0;
  1528. }
  1529. static int __init mtk_init(struct net_device *dev)
  1530. {
  1531. struct mtk_mac *mac = netdev_priv(dev);
  1532. struct mtk_eth *eth = mac->hw;
  1533. const char *mac_addr;
  1534. mac_addr = of_get_mac_address(mac->of_node);
  1535. if (mac_addr)
  1536. ether_addr_copy(dev->dev_addr, mac_addr);
  1537. /* If the mac address is invalid, use random mac address */
  1538. if (!is_valid_ether_addr(dev->dev_addr)) {
  1539. random_ether_addr(dev->dev_addr);
  1540. dev_err(eth->dev, "generated random MAC address %pM\n",
  1541. dev->dev_addr);
  1542. dev->addr_assign_type = NET_ADDR_RANDOM;
  1543. }
  1544. return mtk_phy_connect(dev);
  1545. }
  1546. static void mtk_uninit(struct net_device *dev)
  1547. {
  1548. struct mtk_mac *mac = netdev_priv(dev);
  1549. struct mtk_eth *eth = mac->hw;
  1550. phy_disconnect(dev->phydev);
  1551. if (of_phy_is_fixed_link(mac->of_node))
  1552. of_phy_deregister_fixed_link(mac->of_node);
  1553. mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
  1554. mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
  1555. }
  1556. static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1557. {
  1558. switch (cmd) {
  1559. case SIOCGMIIPHY:
  1560. case SIOCGMIIREG:
  1561. case SIOCSMIIREG:
  1562. return phy_mii_ioctl(dev->phydev, ifr, cmd);
  1563. default:
  1564. break;
  1565. }
  1566. return -EOPNOTSUPP;
  1567. }
  1568. static void mtk_pending_work(struct work_struct *work)
  1569. {
  1570. struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
  1571. int err, i;
  1572. unsigned long restart = 0;
  1573. rtnl_lock();
  1574. dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
  1575. while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
  1576. cpu_relax();
  1577. dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
  1578. /* stop all devices to make sure that dma is properly shut down */
  1579. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1580. if (!eth->netdev[i])
  1581. continue;
  1582. mtk_stop(eth->netdev[i]);
  1583. __set_bit(i, &restart);
  1584. }
  1585. dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
  1586. /* restart underlying hardware such as power, clock, pin mux
  1587. * and the connected phy
  1588. */
  1589. mtk_hw_deinit(eth);
  1590. if (eth->dev->pins)
  1591. pinctrl_select_state(eth->dev->pins->p,
  1592. eth->dev->pins->default_state);
  1593. mtk_hw_init(eth);
  1594. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1595. if (!eth->mac[i] ||
  1596. of_phy_is_fixed_link(eth->mac[i]->of_node))
  1597. continue;
  1598. err = phy_init_hw(eth->netdev[i]->phydev);
  1599. if (err)
  1600. dev_err(eth->dev, "%s: PHY init failed.\n",
  1601. eth->netdev[i]->name);
  1602. }
  1603. /* restart DMA and enable IRQs */
  1604. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1605. if (!test_bit(i, &restart))
  1606. continue;
  1607. err = mtk_open(eth->netdev[i]);
  1608. if (err) {
  1609. netif_alert(eth, ifup, eth->netdev[i],
  1610. "Driver up/down cycle failed, closing device.\n");
  1611. dev_close(eth->netdev[i]);
  1612. }
  1613. }
  1614. dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
  1615. clear_bit_unlock(MTK_RESETTING, &eth->state);
  1616. rtnl_unlock();
  1617. }
  1618. static int mtk_free_dev(struct mtk_eth *eth)
  1619. {
  1620. int i;
  1621. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1622. if (!eth->netdev[i])
  1623. continue;
  1624. free_netdev(eth->netdev[i]);
  1625. }
  1626. return 0;
  1627. }
  1628. static int mtk_unreg_dev(struct mtk_eth *eth)
  1629. {
  1630. int i;
  1631. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1632. if (!eth->netdev[i])
  1633. continue;
  1634. unregister_netdev(eth->netdev[i]);
  1635. }
  1636. return 0;
  1637. }
  1638. static int mtk_cleanup(struct mtk_eth *eth)
  1639. {
  1640. mtk_unreg_dev(eth);
  1641. mtk_free_dev(eth);
  1642. cancel_work_sync(&eth->pending_work);
  1643. return 0;
  1644. }
  1645. static int mtk_get_link_ksettings(struct net_device *ndev,
  1646. struct ethtool_link_ksettings *cmd)
  1647. {
  1648. struct mtk_mac *mac = netdev_priv(ndev);
  1649. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1650. return -EBUSY;
  1651. return phy_ethtool_ksettings_get(ndev->phydev, cmd);
  1652. }
  1653. static int mtk_set_link_ksettings(struct net_device *ndev,
  1654. const struct ethtool_link_ksettings *cmd)
  1655. {
  1656. struct mtk_mac *mac = netdev_priv(ndev);
  1657. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1658. return -EBUSY;
  1659. return phy_ethtool_ksettings_set(ndev->phydev, cmd);
  1660. }
  1661. static void mtk_get_drvinfo(struct net_device *dev,
  1662. struct ethtool_drvinfo *info)
  1663. {
  1664. struct mtk_mac *mac = netdev_priv(dev);
  1665. strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
  1666. strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
  1667. info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
  1668. }
  1669. static u32 mtk_get_msglevel(struct net_device *dev)
  1670. {
  1671. struct mtk_mac *mac = netdev_priv(dev);
  1672. return mac->hw->msg_enable;
  1673. }
  1674. static void mtk_set_msglevel(struct net_device *dev, u32 value)
  1675. {
  1676. struct mtk_mac *mac = netdev_priv(dev);
  1677. mac->hw->msg_enable = value;
  1678. }
  1679. static int mtk_nway_reset(struct net_device *dev)
  1680. {
  1681. struct mtk_mac *mac = netdev_priv(dev);
  1682. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1683. return -EBUSY;
  1684. return genphy_restart_aneg(dev->phydev);
  1685. }
  1686. static u32 mtk_get_link(struct net_device *dev)
  1687. {
  1688. struct mtk_mac *mac = netdev_priv(dev);
  1689. int err;
  1690. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1691. return -EBUSY;
  1692. err = genphy_update_link(dev->phydev);
  1693. if (err)
  1694. return ethtool_op_get_link(dev);
  1695. return dev->phydev->link;
  1696. }
  1697. static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  1698. {
  1699. int i;
  1700. switch (stringset) {
  1701. case ETH_SS_STATS:
  1702. for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
  1703. memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
  1704. data += ETH_GSTRING_LEN;
  1705. }
  1706. break;
  1707. }
  1708. }
  1709. static int mtk_get_sset_count(struct net_device *dev, int sset)
  1710. {
  1711. switch (sset) {
  1712. case ETH_SS_STATS:
  1713. return ARRAY_SIZE(mtk_ethtool_stats);
  1714. default:
  1715. return -EOPNOTSUPP;
  1716. }
  1717. }
  1718. static void mtk_get_ethtool_stats(struct net_device *dev,
  1719. struct ethtool_stats *stats, u64 *data)
  1720. {
  1721. struct mtk_mac *mac = netdev_priv(dev);
  1722. struct mtk_hw_stats *hwstats = mac->hw_stats;
  1723. u64 *data_src, *data_dst;
  1724. unsigned int start;
  1725. int i;
  1726. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  1727. return;
  1728. if (netif_running(dev) && netif_device_present(dev)) {
  1729. if (spin_trylock(&hwstats->stats_lock)) {
  1730. mtk_stats_update_mac(mac);
  1731. spin_unlock(&hwstats->stats_lock);
  1732. }
  1733. }
  1734. data_src = (u64 *)hwstats;
  1735. do {
  1736. data_dst = data;
  1737. start = u64_stats_fetch_begin_irq(&hwstats->syncp);
  1738. for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
  1739. *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
  1740. } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
  1741. }
  1742. static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1743. u32 *rule_locs)
  1744. {
  1745. int ret = -EOPNOTSUPP;
  1746. switch (cmd->cmd) {
  1747. case ETHTOOL_GRXRINGS:
  1748. if (dev->features & NETIF_F_LRO) {
  1749. cmd->data = MTK_MAX_RX_RING_NUM;
  1750. ret = 0;
  1751. }
  1752. break;
  1753. case ETHTOOL_GRXCLSRLCNT:
  1754. if (dev->features & NETIF_F_LRO) {
  1755. struct mtk_mac *mac = netdev_priv(dev);
  1756. cmd->rule_cnt = mac->hwlro_ip_cnt;
  1757. ret = 0;
  1758. }
  1759. break;
  1760. case ETHTOOL_GRXCLSRULE:
  1761. if (dev->features & NETIF_F_LRO)
  1762. ret = mtk_hwlro_get_fdir_entry(dev, cmd);
  1763. break;
  1764. case ETHTOOL_GRXCLSRLALL:
  1765. if (dev->features & NETIF_F_LRO)
  1766. ret = mtk_hwlro_get_fdir_all(dev, cmd,
  1767. rule_locs);
  1768. break;
  1769. default:
  1770. break;
  1771. }
  1772. return ret;
  1773. }
  1774. static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  1775. {
  1776. int ret = -EOPNOTSUPP;
  1777. switch (cmd->cmd) {
  1778. case ETHTOOL_SRXCLSRLINS:
  1779. if (dev->features & NETIF_F_LRO)
  1780. ret = mtk_hwlro_add_ipaddr(dev, cmd);
  1781. break;
  1782. case ETHTOOL_SRXCLSRLDEL:
  1783. if (dev->features & NETIF_F_LRO)
  1784. ret = mtk_hwlro_del_ipaddr(dev, cmd);
  1785. break;
  1786. default:
  1787. break;
  1788. }
  1789. return ret;
  1790. }
  1791. static const struct ethtool_ops mtk_ethtool_ops = {
  1792. .get_link_ksettings = mtk_get_link_ksettings,
  1793. .set_link_ksettings = mtk_set_link_ksettings,
  1794. .get_drvinfo = mtk_get_drvinfo,
  1795. .get_msglevel = mtk_get_msglevel,
  1796. .set_msglevel = mtk_set_msglevel,
  1797. .nway_reset = mtk_nway_reset,
  1798. .get_link = mtk_get_link,
  1799. .get_strings = mtk_get_strings,
  1800. .get_sset_count = mtk_get_sset_count,
  1801. .get_ethtool_stats = mtk_get_ethtool_stats,
  1802. .get_rxnfc = mtk_get_rxnfc,
  1803. .set_rxnfc = mtk_set_rxnfc,
  1804. };
  1805. static const struct net_device_ops mtk_netdev_ops = {
  1806. .ndo_init = mtk_init,
  1807. .ndo_uninit = mtk_uninit,
  1808. .ndo_open = mtk_open,
  1809. .ndo_stop = mtk_stop,
  1810. .ndo_start_xmit = mtk_start_xmit,
  1811. .ndo_set_mac_address = mtk_set_mac_address,
  1812. .ndo_validate_addr = eth_validate_addr,
  1813. .ndo_do_ioctl = mtk_do_ioctl,
  1814. .ndo_change_mtu = eth_change_mtu,
  1815. .ndo_tx_timeout = mtk_tx_timeout,
  1816. .ndo_get_stats64 = mtk_get_stats64,
  1817. .ndo_fix_features = mtk_fix_features,
  1818. .ndo_set_features = mtk_set_features,
  1819. #ifdef CONFIG_NET_POLL_CONTROLLER
  1820. .ndo_poll_controller = mtk_poll_controller,
  1821. #endif
  1822. };
  1823. static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
  1824. {
  1825. struct mtk_mac *mac;
  1826. const __be32 *_id = of_get_property(np, "reg", NULL);
  1827. int id, err;
  1828. if (!_id) {
  1829. dev_err(eth->dev, "missing mac id\n");
  1830. return -EINVAL;
  1831. }
  1832. id = be32_to_cpup(_id);
  1833. if (id >= MTK_MAC_COUNT) {
  1834. dev_err(eth->dev, "%d is not a valid mac id\n", id);
  1835. return -EINVAL;
  1836. }
  1837. if (eth->netdev[id]) {
  1838. dev_err(eth->dev, "duplicate mac id found: %d\n", id);
  1839. return -EINVAL;
  1840. }
  1841. eth->netdev[id] = alloc_etherdev(sizeof(*mac));
  1842. if (!eth->netdev[id]) {
  1843. dev_err(eth->dev, "alloc_etherdev failed\n");
  1844. return -ENOMEM;
  1845. }
  1846. mac = netdev_priv(eth->netdev[id]);
  1847. eth->mac[id] = mac;
  1848. mac->id = id;
  1849. mac->hw = eth;
  1850. mac->of_node = np;
  1851. memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
  1852. mac->hwlro_ip_cnt = 0;
  1853. mac->hw_stats = devm_kzalloc(eth->dev,
  1854. sizeof(*mac->hw_stats),
  1855. GFP_KERNEL);
  1856. if (!mac->hw_stats) {
  1857. dev_err(eth->dev, "failed to allocate counter memory\n");
  1858. err = -ENOMEM;
  1859. goto free_netdev;
  1860. }
  1861. spin_lock_init(&mac->hw_stats->stats_lock);
  1862. u64_stats_init(&mac->hw_stats->syncp);
  1863. mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
  1864. SET_NETDEV_DEV(eth->netdev[id], eth->dev);
  1865. eth->netdev[id]->watchdog_timeo = 5 * HZ;
  1866. eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
  1867. eth->netdev[id]->base_addr = (unsigned long)eth->base;
  1868. eth->netdev[id]->hw_features = MTK_HW_FEATURES;
  1869. if (eth->hwlro)
  1870. eth->netdev[id]->hw_features |= NETIF_F_LRO;
  1871. eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
  1872. ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
  1873. eth->netdev[id]->features |= MTK_HW_FEATURES;
  1874. eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
  1875. eth->netdev[id]->irq = eth->irq[0];
  1876. return 0;
  1877. free_netdev:
  1878. free_netdev(eth->netdev[id]);
  1879. return err;
  1880. }
  1881. static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id)
  1882. {
  1883. u32 val[2], id[4];
  1884. regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]);
  1885. regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]);
  1886. id[3] = ((val[0] >> 16) & 0xff) - '0';
  1887. id[2] = ((val[0] >> 24) & 0xff) - '0';
  1888. id[1] = (val[1] & 0xff) - '0';
  1889. id[0] = ((val[1] >> 8) & 0xff) - '0';
  1890. *chip_id = (id[3] * 1000) + (id[2] * 100) +
  1891. (id[1] * 10) + id[0];
  1892. if (!(*chip_id)) {
  1893. dev_err(eth->dev, "failed to get chip id\n");
  1894. return -ENODEV;
  1895. }
  1896. dev_info(eth->dev, "chip id = %d\n", *chip_id);
  1897. return 0;
  1898. }
  1899. static bool mtk_is_hwlro_supported(struct mtk_eth *eth)
  1900. {
  1901. switch (eth->chip_id) {
  1902. case MT7623_ETH:
  1903. return true;
  1904. }
  1905. return false;
  1906. }
  1907. static int mtk_probe(struct platform_device *pdev)
  1908. {
  1909. struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1910. struct device_node *mac_np;
  1911. const struct of_device_id *match;
  1912. struct mtk_soc_data *soc;
  1913. struct mtk_eth *eth;
  1914. int err;
  1915. int i;
  1916. match = of_match_device(of_mtk_match, &pdev->dev);
  1917. soc = (struct mtk_soc_data *)match->data;
  1918. eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
  1919. if (!eth)
  1920. return -ENOMEM;
  1921. eth->dev = &pdev->dev;
  1922. eth->base = devm_ioremap_resource(&pdev->dev, res);
  1923. if (IS_ERR(eth->base))
  1924. return PTR_ERR(eth->base);
  1925. spin_lock_init(&eth->page_lock);
  1926. spin_lock_init(&eth->irq_lock);
  1927. eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  1928. "mediatek,ethsys");
  1929. if (IS_ERR(eth->ethsys)) {
  1930. dev_err(&pdev->dev, "no ethsys regmap found\n");
  1931. return PTR_ERR(eth->ethsys);
  1932. }
  1933. eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  1934. "mediatek,pctl");
  1935. if (IS_ERR(eth->pctl)) {
  1936. dev_err(&pdev->dev, "no pctl regmap found\n");
  1937. return PTR_ERR(eth->pctl);
  1938. }
  1939. for (i = 0; i < 3; i++) {
  1940. eth->irq[i] = platform_get_irq(pdev, i);
  1941. if (eth->irq[i] < 0) {
  1942. dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
  1943. return -ENXIO;
  1944. }
  1945. }
  1946. for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
  1947. eth->clks[i] = devm_clk_get(eth->dev,
  1948. mtk_clks_source_name[i]);
  1949. if (IS_ERR(eth->clks[i])) {
  1950. if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
  1951. return -EPROBE_DEFER;
  1952. return -ENODEV;
  1953. }
  1954. }
  1955. eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
  1956. INIT_WORK(&eth->pending_work, mtk_pending_work);
  1957. err = mtk_hw_init(eth);
  1958. if (err)
  1959. return err;
  1960. err = mtk_get_chip_id(eth, &eth->chip_id);
  1961. if (err)
  1962. return err;
  1963. eth->hwlro = mtk_is_hwlro_supported(eth);
  1964. for_each_child_of_node(pdev->dev.of_node, mac_np) {
  1965. if (!of_device_is_compatible(mac_np,
  1966. "mediatek,eth-mac"))
  1967. continue;
  1968. if (!of_device_is_available(mac_np))
  1969. continue;
  1970. err = mtk_add_mac(eth, mac_np);
  1971. if (err)
  1972. goto err_deinit_hw;
  1973. }
  1974. err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
  1975. dev_name(eth->dev), eth);
  1976. if (err)
  1977. goto err_free_dev;
  1978. err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
  1979. dev_name(eth->dev), eth);
  1980. if (err)
  1981. goto err_free_dev;
  1982. err = mtk_mdio_init(eth);
  1983. if (err)
  1984. goto err_free_dev;
  1985. for (i = 0; i < MTK_MAX_DEVS; i++) {
  1986. if (!eth->netdev[i])
  1987. continue;
  1988. err = register_netdev(eth->netdev[i]);
  1989. if (err) {
  1990. dev_err(eth->dev, "error bringing up device\n");
  1991. goto err_deinit_mdio;
  1992. } else
  1993. netif_info(eth, probe, eth->netdev[i],
  1994. "mediatek frame engine at 0x%08lx, irq %d\n",
  1995. eth->netdev[i]->base_addr, eth->irq[0]);
  1996. }
  1997. /* we run 2 devices on the same DMA ring so we need a dummy device
  1998. * for NAPI to work
  1999. */
  2000. init_dummy_netdev(&eth->dummy_dev);
  2001. netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
  2002. MTK_NAPI_WEIGHT);
  2003. netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
  2004. MTK_NAPI_WEIGHT);
  2005. platform_set_drvdata(pdev, eth);
  2006. return 0;
  2007. err_deinit_mdio:
  2008. mtk_mdio_cleanup(eth);
  2009. err_free_dev:
  2010. mtk_free_dev(eth);
  2011. err_deinit_hw:
  2012. mtk_hw_deinit(eth);
  2013. return err;
  2014. }
  2015. static int mtk_remove(struct platform_device *pdev)
  2016. {
  2017. struct mtk_eth *eth = platform_get_drvdata(pdev);
  2018. int i;
  2019. /* stop all devices to make sure that dma is properly shut down */
  2020. for (i = 0; i < MTK_MAC_COUNT; i++) {
  2021. if (!eth->netdev[i])
  2022. continue;
  2023. mtk_stop(eth->netdev[i]);
  2024. }
  2025. mtk_hw_deinit(eth);
  2026. netif_napi_del(&eth->tx_napi);
  2027. netif_napi_del(&eth->rx_napi);
  2028. mtk_cleanup(eth);
  2029. mtk_mdio_cleanup(eth);
  2030. return 0;
  2031. }
  2032. const struct of_device_id of_mtk_match[] = {
  2033. { .compatible = "mediatek,mt2701-eth" },
  2034. {},
  2035. };
  2036. MODULE_DEVICE_TABLE(of, of_mtk_match);
  2037. static struct platform_driver mtk_driver = {
  2038. .probe = mtk_probe,
  2039. .remove = mtk_remove,
  2040. .driver = {
  2041. .name = "mtk_soc_eth",
  2042. .of_match_table = of_mtk_match,
  2043. },
  2044. };
  2045. module_platform_driver(mtk_driver);
  2046. MODULE_LICENSE("GPL");
  2047. MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
  2048. MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");