mtk_eth_soc.c 77 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
  5. * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
  6. * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
  7. */
  8. #include <linux/of_device.h>
  9. #include <linux/of_mdio.h>
  10. #include <linux/of_net.h>
  11. #include <linux/mfd/syscon.h>
  12. #include <linux/regmap.h>
  13. #include <linux/clk.h>
  14. #include <linux/pm_runtime.h>
  15. #include <linux/if_vlan.h>
  16. #include <linux/reset.h>
  17. #include <linux/tcp.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/pinctrl/devinfo.h>
  20. #include <linux/phylink.h>
  21. #include "mtk_eth_soc.h"
  22. static int mtk_msg_level = -1;
  23. module_param_named(msg_level, mtk_msg_level, int, 0);
  24. MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
  25. #define MTK_ETHTOOL_STAT(x) { #x, \
  26. offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
  27. /* strings used by ethtool */
  28. static const struct mtk_ethtool_stats {
  29. char str[ETH_GSTRING_LEN];
  30. u32 offset;
  31. } mtk_ethtool_stats[] = {
  32. MTK_ETHTOOL_STAT(tx_bytes),
  33. MTK_ETHTOOL_STAT(tx_packets),
  34. MTK_ETHTOOL_STAT(tx_skip),
  35. MTK_ETHTOOL_STAT(tx_collisions),
  36. MTK_ETHTOOL_STAT(rx_bytes),
  37. MTK_ETHTOOL_STAT(rx_packets),
  38. MTK_ETHTOOL_STAT(rx_overflow),
  39. MTK_ETHTOOL_STAT(rx_fcs_errors),
  40. MTK_ETHTOOL_STAT(rx_short_errors),
  41. MTK_ETHTOOL_STAT(rx_long_errors),
  42. MTK_ETHTOOL_STAT(rx_checksum_errors),
  43. MTK_ETHTOOL_STAT(rx_flow_control_packets),
  44. };
  45. static const char * const mtk_clks_source_name[] = {
  46. "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
  47. "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
  48. "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
  49. "sgmii_ck", "eth2pll",
  50. };
  51. void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
  52. {
  53. __raw_writel(val, eth->base + reg);
  54. }
  55. u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
  56. {
  57. return __raw_readl(eth->base + reg);
  58. }
  59. u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
  60. {
  61. u32 val;
  62. val = mtk_r32(eth, reg);
  63. val &= ~mask;
  64. val |= set;
  65. mtk_w32(eth, val, reg);
  66. return reg;
  67. }
  68. static int mtk_mdio_busy_wait(struct mtk_eth *eth)
  69. {
  70. unsigned long t_start = jiffies;
  71. while (1) {
  72. if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
  73. return 0;
  74. if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
  75. break;
  76. usleep_range(10, 20);
  77. }
  78. dev_err(eth->dev, "mdio: MDIO timeout\n");
  79. return -1;
  80. }
  81. static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
  82. u32 phy_register, u32 write_data)
  83. {
  84. if (mtk_mdio_busy_wait(eth))
  85. return -1;
  86. write_data &= 0xffff;
  87. mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
  88. (phy_register << PHY_IAC_REG_SHIFT) |
  89. (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
  90. MTK_PHY_IAC);
  91. if (mtk_mdio_busy_wait(eth))
  92. return -1;
  93. return 0;
  94. }
  95. static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
  96. {
  97. u32 d;
  98. if (mtk_mdio_busy_wait(eth))
  99. return 0xffff;
  100. mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
  101. (phy_reg << PHY_IAC_REG_SHIFT) |
  102. (phy_addr << PHY_IAC_ADDR_SHIFT),
  103. MTK_PHY_IAC);
  104. if (mtk_mdio_busy_wait(eth))
  105. return 0xffff;
  106. d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
  107. return d;
  108. }
  109. static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
  110. int phy_reg, u16 val)
  111. {
  112. struct mtk_eth *eth = bus->priv;
  113. return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
  114. }
  115. static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
  116. {
  117. struct mtk_eth *eth = bus->priv;
  118. return _mtk_mdio_read(eth, phy_addr, phy_reg);
  119. }
  120. static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
  121. phy_interface_t interface)
  122. {
  123. u32 val;
  124. /* Check DDR memory type.
  125. * Currently TRGMII mode with DDR2 memory is not supported.
  126. */
  127. regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
  128. if (interface == PHY_INTERFACE_MODE_TRGMII &&
  129. val & SYSCFG_DRAM_TYPE_DDR2) {
  130. dev_err(eth->dev,
  131. "TRGMII mode with DDR2 memory is not supported!\n");
  132. return -EOPNOTSUPP;
  133. }
  134. val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
  135. ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
  136. regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
  137. ETHSYS_TRGMII_MT7621_MASK, val);
  138. return 0;
  139. }
  140. static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
  141. phy_interface_t interface, int speed)
  142. {
  143. u32 val;
  144. int ret;
  145. if (interface == PHY_INTERFACE_MODE_TRGMII) {
  146. mtk_w32(eth, TRGMII_MODE, INTF_MODE);
  147. val = 500000000;
  148. ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
  149. if (ret)
  150. dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
  151. return;
  152. }
  153. val = (speed == SPEED_1000) ?
  154. INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
  155. mtk_w32(eth, val, INTF_MODE);
  156. regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
  157. ETHSYS_TRGMII_CLK_SEL362_5,
  158. ETHSYS_TRGMII_CLK_SEL362_5);
  159. val = (speed == SPEED_1000) ? 250000000 : 500000000;
  160. ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
  161. if (ret)
  162. dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
  163. val = (speed == SPEED_1000) ?
  164. RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
  165. mtk_w32(eth, val, TRGMII_RCK_CTRL);
  166. val = (speed == SPEED_1000) ?
  167. TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
  168. mtk_w32(eth, val, TRGMII_TCK_CTRL);
  169. }
  170. static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
  171. const struct phylink_link_state *state)
  172. {
  173. struct mtk_mac *mac = container_of(config, struct mtk_mac,
  174. phylink_config);
  175. struct mtk_eth *eth = mac->hw;
  176. u32 mcr_cur, mcr_new, sid, i;
  177. int val, ge_mode, err;
  178. /* MT76x8 has no hardware settings between for the MAC */
  179. if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
  180. mac->interface != state->interface) {
  181. /* Setup soc pin functions */
  182. switch (state->interface) {
  183. case PHY_INTERFACE_MODE_TRGMII:
  184. if (mac->id)
  185. goto err_phy;
  186. if (!MTK_HAS_CAPS(mac->hw->soc->caps,
  187. MTK_GMAC1_TRGMII))
  188. goto err_phy;
  189. /* fall through */
  190. case PHY_INTERFACE_MODE_RGMII_TXID:
  191. case PHY_INTERFACE_MODE_RGMII_RXID:
  192. case PHY_INTERFACE_MODE_RGMII_ID:
  193. case PHY_INTERFACE_MODE_RGMII:
  194. case PHY_INTERFACE_MODE_MII:
  195. case PHY_INTERFACE_MODE_REVMII:
  196. case PHY_INTERFACE_MODE_RMII:
  197. if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
  198. err = mtk_gmac_rgmii_path_setup(eth, mac->id);
  199. if (err)
  200. goto init_err;
  201. }
  202. break;
  203. case PHY_INTERFACE_MODE_1000BASEX:
  204. case PHY_INTERFACE_MODE_2500BASEX:
  205. case PHY_INTERFACE_MODE_SGMII:
  206. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
  207. err = mtk_gmac_sgmii_path_setup(eth, mac->id);
  208. if (err)
  209. goto init_err;
  210. }
  211. break;
  212. case PHY_INTERFACE_MODE_GMII:
  213. if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
  214. err = mtk_gmac_gephy_path_setup(eth, mac->id);
  215. if (err)
  216. goto init_err;
  217. }
  218. break;
  219. default:
  220. goto err_phy;
  221. }
  222. /* Setup clock for 1st gmac */
  223. if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
  224. !phy_interface_mode_is_8023z(state->interface) &&
  225. MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
  226. if (MTK_HAS_CAPS(mac->hw->soc->caps,
  227. MTK_TRGMII_MT7621_CLK)) {
  228. if (mt7621_gmac0_rgmii_adjust(mac->hw,
  229. state->interface))
  230. goto err_phy;
  231. } else {
  232. mtk_gmac0_rgmii_adjust(mac->hw,
  233. state->interface,
  234. state->speed);
  235. /* mt7623_pad_clk_setup */
  236. for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
  237. mtk_w32(mac->hw,
  238. TD_DM_DRVP(8) | TD_DM_DRVN(8),
  239. TRGMII_TD_ODT(i));
  240. /* Assert/release MT7623 RXC reset */
  241. mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
  242. TRGMII_RCK_CTRL);
  243. mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
  244. }
  245. }
  246. ge_mode = 0;
  247. switch (state->interface) {
  248. case PHY_INTERFACE_MODE_MII:
  249. case PHY_INTERFACE_MODE_GMII:
  250. ge_mode = 1;
  251. break;
  252. case PHY_INTERFACE_MODE_REVMII:
  253. ge_mode = 2;
  254. break;
  255. case PHY_INTERFACE_MODE_RMII:
  256. if (mac->id)
  257. goto err_phy;
  258. ge_mode = 3;
  259. break;
  260. default:
  261. break;
  262. }
  263. /* put the gmac into the right mode */
  264. regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
  265. val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
  266. val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
  267. regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
  268. mac->interface = state->interface;
  269. }
  270. /* SGMII */
  271. if (state->interface == PHY_INTERFACE_MODE_SGMII ||
  272. phy_interface_mode_is_8023z(state->interface)) {
  273. /* The path GMAC to SGMII will be enabled once the SGMIISYS is
  274. * being setup done.
  275. */
  276. regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
  277. regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
  278. SYSCFG0_SGMII_MASK,
  279. ~(u32)SYSCFG0_SGMII_MASK);
  280. /* Decide how GMAC and SGMIISYS be mapped */
  281. sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
  282. 0 : mac->id;
  283. /* Setup SGMIISYS with the determined property */
  284. if (state->interface != PHY_INTERFACE_MODE_SGMII)
  285. err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
  286. state);
  287. else if (phylink_autoneg_inband(mode))
  288. err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
  289. if (err)
  290. goto init_err;
  291. regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
  292. SYSCFG0_SGMII_MASK, val);
  293. } else if (phylink_autoneg_inband(mode)) {
  294. dev_err(eth->dev,
  295. "In-band mode not supported in non SGMII mode!\n");
  296. return;
  297. }
  298. /* Setup gmac */
  299. mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
  300. mcr_new = mcr_cur;
  301. mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
  302. MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
  303. MAC_MCR_FORCE_RX_FC);
  304. mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
  305. MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
  306. switch (state->speed) {
  307. case SPEED_2500:
  308. case SPEED_1000:
  309. mcr_new |= MAC_MCR_SPEED_1000;
  310. break;
  311. case SPEED_100:
  312. mcr_new |= MAC_MCR_SPEED_100;
  313. break;
  314. }
  315. if (state->duplex == DUPLEX_FULL) {
  316. mcr_new |= MAC_MCR_FORCE_DPX;
  317. if (state->pause & MLO_PAUSE_TX)
  318. mcr_new |= MAC_MCR_FORCE_TX_FC;
  319. if (state->pause & MLO_PAUSE_RX)
  320. mcr_new |= MAC_MCR_FORCE_RX_FC;
  321. }
  322. /* Only update control register when needed! */
  323. if (mcr_new != mcr_cur)
  324. mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
  325. return;
  326. err_phy:
  327. dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
  328. mac->id, phy_modes(state->interface));
  329. return;
  330. init_err:
  331. dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
  332. mac->id, phy_modes(state->interface), err);
  333. }
  334. static int mtk_mac_link_state(struct phylink_config *config,
  335. struct phylink_link_state *state)
  336. {
  337. struct mtk_mac *mac = container_of(config, struct mtk_mac,
  338. phylink_config);
  339. u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
  340. state->link = (pmsr & MAC_MSR_LINK);
  341. state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
  342. switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
  343. case 0:
  344. state->speed = SPEED_10;
  345. break;
  346. case MAC_MSR_SPEED_100:
  347. state->speed = SPEED_100;
  348. break;
  349. case MAC_MSR_SPEED_1000:
  350. state->speed = SPEED_1000;
  351. break;
  352. default:
  353. state->speed = SPEED_UNKNOWN;
  354. break;
  355. }
  356. state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
  357. if (pmsr & MAC_MSR_RX_FC)
  358. state->pause |= MLO_PAUSE_RX;
  359. if (pmsr & MAC_MSR_TX_FC)
  360. state->pause |= MLO_PAUSE_TX;
  361. return 1;
  362. }
  363. static void mtk_mac_an_restart(struct phylink_config *config)
  364. {
  365. struct mtk_mac *mac = container_of(config, struct mtk_mac,
  366. phylink_config);
  367. mtk_sgmii_restart_an(mac->hw, mac->id);
  368. }
  369. static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
  370. phy_interface_t interface)
  371. {
  372. struct mtk_mac *mac = container_of(config, struct mtk_mac,
  373. phylink_config);
  374. u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
  375. mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
  376. mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
  377. }
  378. static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
  379. phy_interface_t interface,
  380. struct phy_device *phy)
  381. {
  382. struct mtk_mac *mac = container_of(config, struct mtk_mac,
  383. phylink_config);
  384. u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
  385. mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
  386. mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
  387. }
  388. static void mtk_validate(struct phylink_config *config,
  389. unsigned long *supported,
  390. struct phylink_link_state *state)
  391. {
  392. struct mtk_mac *mac = container_of(config, struct mtk_mac,
  393. phylink_config);
  394. __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
  395. if (state->interface != PHY_INTERFACE_MODE_NA &&
  396. state->interface != PHY_INTERFACE_MODE_MII &&
  397. state->interface != PHY_INTERFACE_MODE_GMII &&
  398. !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
  399. phy_interface_mode_is_rgmii(state->interface)) &&
  400. !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
  401. !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
  402. !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
  403. (state->interface == PHY_INTERFACE_MODE_SGMII ||
  404. phy_interface_mode_is_8023z(state->interface)))) {
  405. linkmode_zero(supported);
  406. return;
  407. }
  408. phylink_set_port_modes(mask);
  409. phylink_set(mask, Autoneg);
  410. switch (state->interface) {
  411. case PHY_INTERFACE_MODE_TRGMII:
  412. phylink_set(mask, 1000baseT_Full);
  413. break;
  414. case PHY_INTERFACE_MODE_1000BASEX:
  415. case PHY_INTERFACE_MODE_2500BASEX:
  416. phylink_set(mask, 1000baseX_Full);
  417. phylink_set(mask, 2500baseX_Full);
  418. break;
  419. case PHY_INTERFACE_MODE_GMII:
  420. case PHY_INTERFACE_MODE_RGMII:
  421. case PHY_INTERFACE_MODE_RGMII_ID:
  422. case PHY_INTERFACE_MODE_RGMII_RXID:
  423. case PHY_INTERFACE_MODE_RGMII_TXID:
  424. phylink_set(mask, 1000baseT_Half);
  425. /* fall through */
  426. case PHY_INTERFACE_MODE_SGMII:
  427. phylink_set(mask, 1000baseT_Full);
  428. phylink_set(mask, 1000baseX_Full);
  429. /* fall through */
  430. case PHY_INTERFACE_MODE_MII:
  431. case PHY_INTERFACE_MODE_RMII:
  432. case PHY_INTERFACE_MODE_REVMII:
  433. case PHY_INTERFACE_MODE_NA:
  434. default:
  435. phylink_set(mask, 10baseT_Half);
  436. phylink_set(mask, 10baseT_Full);
  437. phylink_set(mask, 100baseT_Half);
  438. phylink_set(mask, 100baseT_Full);
  439. break;
  440. }
  441. if (state->interface == PHY_INTERFACE_MODE_NA) {
  442. if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
  443. phylink_set(mask, 1000baseT_Full);
  444. phylink_set(mask, 1000baseX_Full);
  445. phylink_set(mask, 2500baseX_Full);
  446. }
  447. if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
  448. phylink_set(mask, 1000baseT_Full);
  449. phylink_set(mask, 1000baseT_Half);
  450. phylink_set(mask, 1000baseX_Full);
  451. }
  452. if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
  453. phylink_set(mask, 1000baseT_Full);
  454. phylink_set(mask, 1000baseT_Half);
  455. }
  456. }
  457. phylink_set(mask, Pause);
  458. phylink_set(mask, Asym_Pause);
  459. linkmode_and(supported, supported, mask);
  460. linkmode_and(state->advertising, state->advertising, mask);
  461. /* We can only operate at 2500BaseX or 1000BaseX. If requested
  462. * to advertise both, only report advertising at 2500BaseX.
  463. */
  464. phylink_helper_basex_speed(state);
  465. }
  466. static const struct phylink_mac_ops mtk_phylink_ops = {
  467. .validate = mtk_validate,
  468. .mac_link_state = mtk_mac_link_state,
  469. .mac_an_restart = mtk_mac_an_restart,
  470. .mac_config = mtk_mac_config,
  471. .mac_link_down = mtk_mac_link_down,
  472. .mac_link_up = mtk_mac_link_up,
  473. };
  474. static int mtk_mdio_init(struct mtk_eth *eth)
  475. {
  476. struct device_node *mii_np;
  477. int ret;
  478. mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
  479. if (!mii_np) {
  480. dev_err(eth->dev, "no %s child node found", "mdio-bus");
  481. return -ENODEV;
  482. }
  483. if (!of_device_is_available(mii_np)) {
  484. ret = -ENODEV;
  485. goto err_put_node;
  486. }
  487. eth->mii_bus = devm_mdiobus_alloc(eth->dev);
  488. if (!eth->mii_bus) {
  489. ret = -ENOMEM;
  490. goto err_put_node;
  491. }
  492. eth->mii_bus->name = "mdio";
  493. eth->mii_bus->read = mtk_mdio_read;
  494. eth->mii_bus->write = mtk_mdio_write;
  495. eth->mii_bus->priv = eth;
  496. eth->mii_bus->parent = eth->dev;
  497. snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
  498. ret = of_mdiobus_register(eth->mii_bus, mii_np);
  499. err_put_node:
  500. of_node_put(mii_np);
  501. return ret;
  502. }
  503. static void mtk_mdio_cleanup(struct mtk_eth *eth)
  504. {
  505. if (!eth->mii_bus)
  506. return;
  507. mdiobus_unregister(eth->mii_bus);
  508. }
  509. static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
  510. {
  511. unsigned long flags;
  512. u32 val;
  513. spin_lock_irqsave(&eth->tx_irq_lock, flags);
  514. val = mtk_r32(eth, eth->tx_int_mask_reg);
  515. mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
  516. spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
  517. }
  518. static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
  519. {
  520. unsigned long flags;
  521. u32 val;
  522. spin_lock_irqsave(&eth->tx_irq_lock, flags);
  523. val = mtk_r32(eth, eth->tx_int_mask_reg);
  524. mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
  525. spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
  526. }
  527. static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
  528. {
  529. unsigned long flags;
  530. u32 val;
  531. spin_lock_irqsave(&eth->rx_irq_lock, flags);
  532. val = mtk_r32(eth, MTK_PDMA_INT_MASK);
  533. mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
  534. spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
  535. }
  536. static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
  537. {
  538. unsigned long flags;
  539. u32 val;
  540. spin_lock_irqsave(&eth->rx_irq_lock, flags);
  541. val = mtk_r32(eth, MTK_PDMA_INT_MASK);
  542. mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
  543. spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
  544. }
  545. static int mtk_set_mac_address(struct net_device *dev, void *p)
  546. {
  547. int ret = eth_mac_addr(dev, p);
  548. struct mtk_mac *mac = netdev_priv(dev);
  549. struct mtk_eth *eth = mac->hw;
  550. const char *macaddr = dev->dev_addr;
  551. if (ret)
  552. return ret;
  553. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  554. return -EBUSY;
  555. spin_lock_bh(&mac->hw->page_lock);
  556. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
  557. mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
  558. MT7628_SDM_MAC_ADRH);
  559. mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
  560. (macaddr[4] << 8) | macaddr[5],
  561. MT7628_SDM_MAC_ADRL);
  562. } else {
  563. mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
  564. MTK_GDMA_MAC_ADRH(mac->id));
  565. mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
  566. (macaddr[4] << 8) | macaddr[5],
  567. MTK_GDMA_MAC_ADRL(mac->id));
  568. }
  569. spin_unlock_bh(&mac->hw->page_lock);
  570. return 0;
  571. }
  572. void mtk_stats_update_mac(struct mtk_mac *mac)
  573. {
  574. struct mtk_hw_stats *hw_stats = mac->hw_stats;
  575. struct mtk_eth *eth = mac->hw;
  576. u64_stats_update_begin(&hw_stats->syncp);
  577. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
  578. hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
  579. hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
  580. hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
  581. hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
  582. hw_stats->rx_checksum_errors +=
  583. mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
  584. } else {
  585. unsigned int offs = hw_stats->reg_offset;
  586. u64 stats;
  587. hw_stats->rx_bytes += mtk_r32(mac->hw,
  588. MTK_GDM1_RX_GBCNT_L + offs);
  589. stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
  590. if (stats)
  591. hw_stats->rx_bytes += (stats << 32);
  592. hw_stats->rx_packets +=
  593. mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
  594. hw_stats->rx_overflow +=
  595. mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
  596. hw_stats->rx_fcs_errors +=
  597. mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
  598. hw_stats->rx_short_errors +=
  599. mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
  600. hw_stats->rx_long_errors +=
  601. mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
  602. hw_stats->rx_checksum_errors +=
  603. mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
  604. hw_stats->rx_flow_control_packets +=
  605. mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
  606. hw_stats->tx_skip +=
  607. mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
  608. hw_stats->tx_collisions +=
  609. mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
  610. hw_stats->tx_bytes +=
  611. mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
  612. stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
  613. if (stats)
  614. hw_stats->tx_bytes += (stats << 32);
  615. hw_stats->tx_packets +=
  616. mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
  617. }
  618. u64_stats_update_end(&hw_stats->syncp);
  619. }
  620. static void mtk_stats_update(struct mtk_eth *eth)
  621. {
  622. int i;
  623. for (i = 0; i < MTK_MAC_COUNT; i++) {
  624. if (!eth->mac[i] || !eth->mac[i]->hw_stats)
  625. continue;
  626. if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
  627. mtk_stats_update_mac(eth->mac[i]);
  628. spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
  629. }
  630. }
  631. }
  632. static void mtk_get_stats64(struct net_device *dev,
  633. struct rtnl_link_stats64 *storage)
  634. {
  635. struct mtk_mac *mac = netdev_priv(dev);
  636. struct mtk_hw_stats *hw_stats = mac->hw_stats;
  637. unsigned int start;
  638. if (netif_running(dev) && netif_device_present(dev)) {
  639. if (spin_trylock_bh(&hw_stats->stats_lock)) {
  640. mtk_stats_update_mac(mac);
  641. spin_unlock_bh(&hw_stats->stats_lock);
  642. }
  643. }
  644. do {
  645. start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
  646. storage->rx_packets = hw_stats->rx_packets;
  647. storage->tx_packets = hw_stats->tx_packets;
  648. storage->rx_bytes = hw_stats->rx_bytes;
  649. storage->tx_bytes = hw_stats->tx_bytes;
  650. storage->collisions = hw_stats->tx_collisions;
  651. storage->rx_length_errors = hw_stats->rx_short_errors +
  652. hw_stats->rx_long_errors;
  653. storage->rx_over_errors = hw_stats->rx_overflow;
  654. storage->rx_crc_errors = hw_stats->rx_fcs_errors;
  655. storage->rx_errors = hw_stats->rx_checksum_errors;
  656. storage->tx_aborted_errors = hw_stats->tx_skip;
  657. } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
  658. storage->tx_errors = dev->stats.tx_errors;
  659. storage->rx_dropped = dev->stats.rx_dropped;
  660. storage->tx_dropped = dev->stats.tx_dropped;
  661. }
  662. static inline int mtk_max_frag_size(int mtu)
  663. {
  664. /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
  665. if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
  666. mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
  667. return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
  668. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  669. }
  670. static inline int mtk_max_buf_size(int frag_size)
  671. {
  672. int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
  673. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  674. WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
  675. return buf_size;
  676. }
  677. static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
  678. struct mtk_rx_dma *dma_rxd)
  679. {
  680. rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
  681. rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
  682. rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
  683. rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
  684. }
  685. /* the qdma core needs scratch memory to be setup */
  686. static int mtk_init_fq_dma(struct mtk_eth *eth)
  687. {
  688. dma_addr_t phy_ring_tail;
  689. int cnt = MTK_DMA_SIZE;
  690. dma_addr_t dma_addr;
  691. int i;
  692. eth->scratch_ring = dma_alloc_coherent(eth->dev,
  693. cnt * sizeof(struct mtk_tx_dma),
  694. &eth->phy_scratch_ring,
  695. GFP_ATOMIC);
  696. if (unlikely(!eth->scratch_ring))
  697. return -ENOMEM;
  698. eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
  699. GFP_KERNEL);
  700. if (unlikely(!eth->scratch_head))
  701. return -ENOMEM;
  702. dma_addr = dma_map_single(eth->dev,
  703. eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
  704. DMA_FROM_DEVICE);
  705. if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  706. return -ENOMEM;
  707. phy_ring_tail = eth->phy_scratch_ring +
  708. (sizeof(struct mtk_tx_dma) * (cnt - 1));
  709. for (i = 0; i < cnt; i++) {
  710. eth->scratch_ring[i].txd1 =
  711. (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
  712. if (i < cnt - 1)
  713. eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
  714. ((i + 1) * sizeof(struct mtk_tx_dma)));
  715. eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
  716. }
  717. mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
  718. mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
  719. mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
  720. mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
  721. return 0;
  722. }
  723. static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
  724. {
  725. void *ret = ring->dma;
  726. return ret + (desc - ring->phys);
  727. }
  728. static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
  729. struct mtk_tx_dma *txd)
  730. {
  731. int idx = txd - ring->dma;
  732. return &ring->buf[idx];
  733. }
  734. static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
  735. struct mtk_tx_dma *dma)
  736. {
  737. return ring->dma_pdma - ring->dma + dma;
  738. }
  739. static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
  740. {
  741. return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
  742. }
  743. static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
  744. {
  745. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  746. if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
  747. dma_unmap_single(eth->dev,
  748. dma_unmap_addr(tx_buf, dma_addr0),
  749. dma_unmap_len(tx_buf, dma_len0),
  750. DMA_TO_DEVICE);
  751. } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
  752. dma_unmap_page(eth->dev,
  753. dma_unmap_addr(tx_buf, dma_addr0),
  754. dma_unmap_len(tx_buf, dma_len0),
  755. DMA_TO_DEVICE);
  756. }
  757. } else {
  758. if (dma_unmap_len(tx_buf, dma_len0)) {
  759. dma_unmap_page(eth->dev,
  760. dma_unmap_addr(tx_buf, dma_addr0),
  761. dma_unmap_len(tx_buf, dma_len0),
  762. DMA_TO_DEVICE);
  763. }
  764. if (dma_unmap_len(tx_buf, dma_len1)) {
  765. dma_unmap_page(eth->dev,
  766. dma_unmap_addr(tx_buf, dma_addr1),
  767. dma_unmap_len(tx_buf, dma_len1),
  768. DMA_TO_DEVICE);
  769. }
  770. }
  771. tx_buf->flags = 0;
  772. if (tx_buf->skb &&
  773. (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
  774. dev_kfree_skb_any(tx_buf->skb);
  775. tx_buf->skb = NULL;
  776. }
  777. static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
  778. struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
  779. size_t size, int idx)
  780. {
  781. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  782. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  783. dma_unmap_len_set(tx_buf, dma_len0, size);
  784. } else {
  785. if (idx & 1) {
  786. txd->txd3 = mapped_addr;
  787. txd->txd2 |= TX_DMA_PLEN1(size);
  788. dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
  789. dma_unmap_len_set(tx_buf, dma_len1, size);
  790. } else {
  791. tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
  792. txd->txd1 = mapped_addr;
  793. txd->txd2 = TX_DMA_PLEN0(size);
  794. dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
  795. dma_unmap_len_set(tx_buf, dma_len0, size);
  796. }
  797. }
  798. }
  799. static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
  800. int tx_num, struct mtk_tx_ring *ring, bool gso)
  801. {
  802. struct mtk_mac *mac = netdev_priv(dev);
  803. struct mtk_eth *eth = mac->hw;
  804. struct mtk_tx_dma *itxd, *txd;
  805. struct mtk_tx_dma *itxd_pdma, *txd_pdma;
  806. struct mtk_tx_buf *itx_buf, *tx_buf;
  807. dma_addr_t mapped_addr;
  808. unsigned int nr_frags;
  809. int i, n_desc = 1;
  810. u32 txd4 = 0, fport;
  811. int k = 0;
  812. itxd = ring->next_free;
  813. itxd_pdma = qdma_to_pdma(ring, itxd);
  814. if (itxd == ring->last_free)
  815. return -ENOMEM;
  816. /* set the forward port */
  817. fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
  818. txd4 |= fport;
  819. itx_buf = mtk_desc_to_tx_buf(ring, itxd);
  820. memset(itx_buf, 0, sizeof(*itx_buf));
  821. if (gso)
  822. txd4 |= TX_DMA_TSO;
  823. /* TX Checksum offload */
  824. if (skb->ip_summed == CHECKSUM_PARTIAL)
  825. txd4 |= TX_DMA_CHKSUM;
  826. /* VLAN header offload */
  827. if (skb_vlan_tag_present(skb))
  828. txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
  829. mapped_addr = dma_map_single(eth->dev, skb->data,
  830. skb_headlen(skb), DMA_TO_DEVICE);
  831. if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
  832. return -ENOMEM;
  833. WRITE_ONCE(itxd->txd1, mapped_addr);
  834. itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
  835. itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
  836. MTK_TX_FLAGS_FPORT1;
  837. setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
  838. k++);
  839. /* TX SG offload */
  840. txd = itxd;
  841. txd_pdma = qdma_to_pdma(ring, txd);
  842. nr_frags = skb_shinfo(skb)->nr_frags;
  843. for (i = 0; i < nr_frags; i++) {
  844. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  845. unsigned int offset = 0;
  846. int frag_size = skb_frag_size(frag);
  847. while (frag_size) {
  848. bool last_frag = false;
  849. unsigned int frag_map_size;
  850. bool new_desc = true;
  851. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
  852. (i & 0x1)) {
  853. txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
  854. txd_pdma = qdma_to_pdma(ring, txd);
  855. if (txd == ring->last_free)
  856. goto err_dma;
  857. n_desc++;
  858. } else {
  859. new_desc = false;
  860. }
  861. frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
  862. mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
  863. frag_map_size,
  864. DMA_TO_DEVICE);
  865. if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
  866. goto err_dma;
  867. if (i == nr_frags - 1 &&
  868. (frag_size - frag_map_size) == 0)
  869. last_frag = true;
  870. WRITE_ONCE(txd->txd1, mapped_addr);
  871. WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
  872. TX_DMA_PLEN0(frag_map_size) |
  873. last_frag * TX_DMA_LS0));
  874. WRITE_ONCE(txd->txd4, fport);
  875. tx_buf = mtk_desc_to_tx_buf(ring, txd);
  876. if (new_desc)
  877. memset(tx_buf, 0, sizeof(*tx_buf));
  878. tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
  879. tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
  880. tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
  881. MTK_TX_FLAGS_FPORT1;
  882. setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
  883. frag_map_size, k++);
  884. frag_size -= frag_map_size;
  885. offset += frag_map_size;
  886. }
  887. }
  888. /* store skb to cleanup */
  889. itx_buf->skb = skb;
  890. WRITE_ONCE(itxd->txd4, txd4);
  891. WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
  892. (!nr_frags * TX_DMA_LS0)));
  893. if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  894. if (k & 0x1)
  895. txd_pdma->txd2 |= TX_DMA_LS0;
  896. else
  897. txd_pdma->txd2 |= TX_DMA_LS1;
  898. }
  899. netdev_sent_queue(dev, skb->len);
  900. skb_tx_timestamp(skb);
  901. ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
  902. atomic_sub(n_desc, &ring->free_count);
  903. /* make sure that all changes to the dma ring are flushed before we
  904. * continue
  905. */
  906. wmb();
  907. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  908. if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
  909. !netdev_xmit_more())
  910. mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
  911. } else {
  912. int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
  913. ring->dma_size);
  914. mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
  915. }
  916. return 0;
  917. err_dma:
  918. do {
  919. tx_buf = mtk_desc_to_tx_buf(ring, itxd);
  920. /* unmap dma */
  921. mtk_tx_unmap(eth, tx_buf);
  922. itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  923. if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
  924. itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
  925. itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
  926. itxd_pdma = qdma_to_pdma(ring, itxd);
  927. } while (itxd != txd);
  928. return -ENOMEM;
  929. }
  930. static inline int mtk_cal_txd_req(struct sk_buff *skb)
  931. {
  932. int i, nfrags;
  933. skb_frag_t *frag;
  934. nfrags = 1;
  935. if (skb_is_gso(skb)) {
  936. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  937. frag = &skb_shinfo(skb)->frags[i];
  938. nfrags += DIV_ROUND_UP(skb_frag_size(frag),
  939. MTK_TX_DMA_BUF_LEN);
  940. }
  941. } else {
  942. nfrags += skb_shinfo(skb)->nr_frags;
  943. }
  944. return nfrags;
  945. }
  946. static int mtk_queue_stopped(struct mtk_eth *eth)
  947. {
  948. int i;
  949. for (i = 0; i < MTK_MAC_COUNT; i++) {
  950. if (!eth->netdev[i])
  951. continue;
  952. if (netif_queue_stopped(eth->netdev[i]))
  953. return 1;
  954. }
  955. return 0;
  956. }
  957. static void mtk_wake_queue(struct mtk_eth *eth)
  958. {
  959. int i;
  960. for (i = 0; i < MTK_MAC_COUNT; i++) {
  961. if (!eth->netdev[i])
  962. continue;
  963. netif_wake_queue(eth->netdev[i]);
  964. }
  965. }
  966. static void mtk_stop_queue(struct mtk_eth *eth)
  967. {
  968. int i;
  969. for (i = 0; i < MTK_MAC_COUNT; i++) {
  970. if (!eth->netdev[i])
  971. continue;
  972. netif_stop_queue(eth->netdev[i]);
  973. }
  974. }
  975. static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
  976. {
  977. struct mtk_mac *mac = netdev_priv(dev);
  978. struct mtk_eth *eth = mac->hw;
  979. struct mtk_tx_ring *ring = &eth->tx_ring;
  980. struct net_device_stats *stats = &dev->stats;
  981. bool gso = false;
  982. int tx_num;
  983. /* normally we can rely on the stack not calling this more than once,
  984. * however we have 2 queues running on the same ring so we need to lock
  985. * the ring access
  986. */
  987. spin_lock(&eth->page_lock);
  988. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  989. goto drop;
  990. tx_num = mtk_cal_txd_req(skb);
  991. if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
  992. mtk_stop_queue(eth);
  993. netif_err(eth, tx_queued, dev,
  994. "Tx Ring full when queue awake!\n");
  995. spin_unlock(&eth->page_lock);
  996. return NETDEV_TX_BUSY;
  997. }
  998. /* TSO: fill MSS info in tcp checksum field */
  999. if (skb_is_gso(skb)) {
  1000. if (skb_cow_head(skb, 0)) {
  1001. netif_warn(eth, tx_err, dev,
  1002. "GSO expand head fail.\n");
  1003. goto drop;
  1004. }
  1005. if (skb_shinfo(skb)->gso_type &
  1006. (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
  1007. gso = true;
  1008. tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
  1009. }
  1010. }
  1011. if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
  1012. goto drop;
  1013. if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
  1014. mtk_stop_queue(eth);
  1015. spin_unlock(&eth->page_lock);
  1016. return NETDEV_TX_OK;
  1017. drop:
  1018. spin_unlock(&eth->page_lock);
  1019. stats->tx_dropped++;
  1020. dev_kfree_skb_any(skb);
  1021. return NETDEV_TX_OK;
  1022. }
  1023. static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
  1024. {
  1025. int i;
  1026. struct mtk_rx_ring *ring;
  1027. int idx;
  1028. if (!eth->hwlro)
  1029. return &eth->rx_ring[0];
  1030. for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
  1031. ring = &eth->rx_ring[i];
  1032. idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
  1033. if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
  1034. ring->calc_idx_update = true;
  1035. return ring;
  1036. }
  1037. }
  1038. return NULL;
  1039. }
  1040. static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
  1041. {
  1042. struct mtk_rx_ring *ring;
  1043. int i;
  1044. if (!eth->hwlro) {
  1045. ring = &eth->rx_ring[0];
  1046. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
  1047. } else {
  1048. for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
  1049. ring = &eth->rx_ring[i];
  1050. if (ring->calc_idx_update) {
  1051. ring->calc_idx_update = false;
  1052. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
  1053. }
  1054. }
  1055. }
  1056. }
  1057. static int mtk_poll_rx(struct napi_struct *napi, int budget,
  1058. struct mtk_eth *eth)
  1059. {
  1060. struct mtk_rx_ring *ring;
  1061. int idx;
  1062. struct sk_buff *skb;
  1063. u8 *data, *new_data;
  1064. struct mtk_rx_dma *rxd, trxd;
  1065. int done = 0;
  1066. while (done < budget) {
  1067. struct net_device *netdev;
  1068. unsigned int pktlen;
  1069. dma_addr_t dma_addr;
  1070. int mac;
  1071. ring = mtk_get_rx_ring(eth);
  1072. if (unlikely(!ring))
  1073. goto rx_done;
  1074. idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
  1075. rxd = &ring->dma[idx];
  1076. data = ring->data[idx];
  1077. mtk_rx_get_desc(&trxd, rxd);
  1078. if (!(trxd.rxd2 & RX_DMA_DONE))
  1079. break;
  1080. /* find out which mac the packet come from. values start at 1 */
  1081. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
  1082. mac = 0;
  1083. } else {
  1084. mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
  1085. RX_DMA_FPORT_MASK;
  1086. mac--;
  1087. }
  1088. if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
  1089. !eth->netdev[mac]))
  1090. goto release_desc;
  1091. netdev = eth->netdev[mac];
  1092. if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  1093. goto release_desc;
  1094. /* alloc new buffer */
  1095. new_data = napi_alloc_frag(ring->frag_size);
  1096. if (unlikely(!new_data)) {
  1097. netdev->stats.rx_dropped++;
  1098. goto release_desc;
  1099. }
  1100. dma_addr = dma_map_single(eth->dev,
  1101. new_data + NET_SKB_PAD +
  1102. eth->ip_align,
  1103. ring->buf_size,
  1104. DMA_FROM_DEVICE);
  1105. if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
  1106. skb_free_frag(new_data);
  1107. netdev->stats.rx_dropped++;
  1108. goto release_desc;
  1109. }
  1110. /* receive data */
  1111. skb = build_skb(data, ring->frag_size);
  1112. if (unlikely(!skb)) {
  1113. skb_free_frag(new_data);
  1114. netdev->stats.rx_dropped++;
  1115. goto release_desc;
  1116. }
  1117. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  1118. dma_unmap_single(eth->dev, trxd.rxd1,
  1119. ring->buf_size, DMA_FROM_DEVICE);
  1120. pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
  1121. skb->dev = netdev;
  1122. skb_put(skb, pktlen);
  1123. if (trxd.rxd4 & eth->rx_dma_l4_valid)
  1124. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1125. else
  1126. skb_checksum_none_assert(skb);
  1127. skb->protocol = eth_type_trans(skb, netdev);
  1128. if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
  1129. (trxd.rxd2 & RX_DMA_VTAG))
  1130. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  1131. RX_DMA_VID(trxd.rxd3));
  1132. skb_record_rx_queue(skb, 0);
  1133. napi_gro_receive(napi, skb);
  1134. ring->data[idx] = new_data;
  1135. rxd->rxd1 = (unsigned int)dma_addr;
  1136. release_desc:
  1137. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
  1138. rxd->rxd2 = RX_DMA_LSO;
  1139. else
  1140. rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
  1141. ring->calc_idx = idx;
  1142. done++;
  1143. }
  1144. rx_done:
  1145. if (done) {
  1146. /* make sure that all changes to the dma ring are flushed before
  1147. * we continue
  1148. */
  1149. wmb();
  1150. mtk_update_rx_cpu_idx(eth);
  1151. }
  1152. return done;
  1153. }
  1154. static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
  1155. unsigned int *done, unsigned int *bytes)
  1156. {
  1157. struct mtk_tx_ring *ring = &eth->tx_ring;
  1158. struct mtk_tx_dma *desc;
  1159. struct sk_buff *skb;
  1160. struct mtk_tx_buf *tx_buf;
  1161. u32 cpu, dma;
  1162. cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
  1163. dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
  1164. desc = mtk_qdma_phys_to_virt(ring, cpu);
  1165. while ((cpu != dma) && budget) {
  1166. u32 next_cpu = desc->txd2;
  1167. int mac = 0;
  1168. desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
  1169. if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
  1170. break;
  1171. tx_buf = mtk_desc_to_tx_buf(ring, desc);
  1172. if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
  1173. mac = 1;
  1174. skb = tx_buf->skb;
  1175. if (!skb)
  1176. break;
  1177. if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
  1178. bytes[mac] += skb->len;
  1179. done[mac]++;
  1180. budget--;
  1181. }
  1182. mtk_tx_unmap(eth, tx_buf);
  1183. ring->last_free = desc;
  1184. atomic_inc(&ring->free_count);
  1185. cpu = next_cpu;
  1186. }
  1187. mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
  1188. return budget;
  1189. }
  1190. static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
  1191. unsigned int *done, unsigned int *bytes)
  1192. {
  1193. struct mtk_tx_ring *ring = &eth->tx_ring;
  1194. struct mtk_tx_dma *desc;
  1195. struct sk_buff *skb;
  1196. struct mtk_tx_buf *tx_buf;
  1197. u32 cpu, dma;
  1198. cpu = ring->cpu_idx;
  1199. dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
  1200. while ((cpu != dma) && budget) {
  1201. tx_buf = &ring->buf[cpu];
  1202. skb = tx_buf->skb;
  1203. if (!skb)
  1204. break;
  1205. if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
  1206. bytes[0] += skb->len;
  1207. done[0]++;
  1208. budget--;
  1209. }
  1210. mtk_tx_unmap(eth, tx_buf);
  1211. desc = &ring->dma[cpu];
  1212. ring->last_free = desc;
  1213. atomic_inc(&ring->free_count);
  1214. cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
  1215. }
  1216. ring->cpu_idx = cpu;
  1217. return budget;
  1218. }
  1219. static int mtk_poll_tx(struct mtk_eth *eth, int budget)
  1220. {
  1221. struct mtk_tx_ring *ring = &eth->tx_ring;
  1222. unsigned int done[MTK_MAX_DEVS];
  1223. unsigned int bytes[MTK_MAX_DEVS];
  1224. int total = 0, i;
  1225. memset(done, 0, sizeof(done));
  1226. memset(bytes, 0, sizeof(bytes));
  1227. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
  1228. budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
  1229. else
  1230. budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
  1231. for (i = 0; i < MTK_MAC_COUNT; i++) {
  1232. if (!eth->netdev[i] || !done[i])
  1233. continue;
  1234. netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
  1235. total += done[i];
  1236. }
  1237. if (mtk_queue_stopped(eth) &&
  1238. (atomic_read(&ring->free_count) > ring->thresh))
  1239. mtk_wake_queue(eth);
  1240. return total;
  1241. }
  1242. static void mtk_handle_status_irq(struct mtk_eth *eth)
  1243. {
  1244. u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
  1245. if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
  1246. mtk_stats_update(eth);
  1247. mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
  1248. MTK_INT_STATUS2);
  1249. }
  1250. }
  1251. static int mtk_napi_tx(struct napi_struct *napi, int budget)
  1252. {
  1253. struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
  1254. u32 status, mask;
  1255. int tx_done = 0;
  1256. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
  1257. mtk_handle_status_irq(eth);
  1258. mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
  1259. tx_done = mtk_poll_tx(eth, budget);
  1260. if (unlikely(netif_msg_intr(eth))) {
  1261. status = mtk_r32(eth, eth->tx_int_status_reg);
  1262. mask = mtk_r32(eth, eth->tx_int_mask_reg);
  1263. dev_info(eth->dev,
  1264. "done tx %d, intr 0x%08x/0x%x\n",
  1265. tx_done, status, mask);
  1266. }
  1267. if (tx_done == budget)
  1268. return budget;
  1269. status = mtk_r32(eth, eth->tx_int_status_reg);
  1270. if (status & MTK_TX_DONE_INT)
  1271. return budget;
  1272. napi_complete(napi);
  1273. mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
  1274. return tx_done;
  1275. }
  1276. static int mtk_napi_rx(struct napi_struct *napi, int budget)
  1277. {
  1278. struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
  1279. u32 status, mask;
  1280. int rx_done = 0;
  1281. int remain_budget = budget;
  1282. mtk_handle_status_irq(eth);
  1283. poll_again:
  1284. mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
  1285. rx_done = mtk_poll_rx(napi, remain_budget, eth);
  1286. if (unlikely(netif_msg_intr(eth))) {
  1287. status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
  1288. mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
  1289. dev_info(eth->dev,
  1290. "done rx %d, intr 0x%08x/0x%x\n",
  1291. rx_done, status, mask);
  1292. }
  1293. if (rx_done == remain_budget)
  1294. return budget;
  1295. status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
  1296. if (status & MTK_RX_DONE_INT) {
  1297. remain_budget -= rx_done;
  1298. goto poll_again;
  1299. }
  1300. napi_complete(napi);
  1301. mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
  1302. return rx_done + budget - remain_budget;
  1303. }
  1304. static int mtk_tx_alloc(struct mtk_eth *eth)
  1305. {
  1306. struct mtk_tx_ring *ring = &eth->tx_ring;
  1307. int i, sz = sizeof(*ring->dma);
  1308. ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
  1309. GFP_KERNEL);
  1310. if (!ring->buf)
  1311. goto no_tx_mem;
  1312. ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
  1313. &ring->phys, GFP_ATOMIC);
  1314. if (!ring->dma)
  1315. goto no_tx_mem;
  1316. for (i = 0; i < MTK_DMA_SIZE; i++) {
  1317. int next = (i + 1) % MTK_DMA_SIZE;
  1318. u32 next_ptr = ring->phys + next * sz;
  1319. ring->dma[i].txd2 = next_ptr;
  1320. ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
  1321. }
  1322. /* On MT7688 (PDMA only) this driver uses the ring->dma structs
  1323. * only as the framework. The real HW descriptors are the PDMA
  1324. * descriptors in ring->dma_pdma.
  1325. */
  1326. if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  1327. ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
  1328. &ring->phys_pdma,
  1329. GFP_ATOMIC);
  1330. if (!ring->dma_pdma)
  1331. goto no_tx_mem;
  1332. for (i = 0; i < MTK_DMA_SIZE; i++) {
  1333. ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
  1334. ring->dma_pdma[i].txd4 = 0;
  1335. }
  1336. }
  1337. ring->dma_size = MTK_DMA_SIZE;
  1338. atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
  1339. ring->next_free = &ring->dma[0];
  1340. ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
  1341. ring->thresh = MAX_SKB_FRAGS;
  1342. /* make sure that all changes to the dma ring are flushed before we
  1343. * continue
  1344. */
  1345. wmb();
  1346. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  1347. mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
  1348. mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
  1349. mtk_w32(eth,
  1350. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  1351. MTK_QTX_CRX_PTR);
  1352. mtk_w32(eth,
  1353. ring->phys + ((MTK_DMA_SIZE - 1) * sz),
  1354. MTK_QTX_DRX_PTR);
  1355. mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
  1356. MTK_QTX_CFG(0));
  1357. } else {
  1358. mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
  1359. mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
  1360. mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
  1361. mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
  1362. }
  1363. return 0;
  1364. no_tx_mem:
  1365. return -ENOMEM;
  1366. }
  1367. static void mtk_tx_clean(struct mtk_eth *eth)
  1368. {
  1369. struct mtk_tx_ring *ring = &eth->tx_ring;
  1370. int i;
  1371. if (ring->buf) {
  1372. for (i = 0; i < MTK_DMA_SIZE; i++)
  1373. mtk_tx_unmap(eth, &ring->buf[i]);
  1374. kfree(ring->buf);
  1375. ring->buf = NULL;
  1376. }
  1377. if (ring->dma) {
  1378. dma_free_coherent(eth->dev,
  1379. MTK_DMA_SIZE * sizeof(*ring->dma),
  1380. ring->dma,
  1381. ring->phys);
  1382. ring->dma = NULL;
  1383. }
  1384. if (ring->dma_pdma) {
  1385. dma_free_coherent(eth->dev,
  1386. MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
  1387. ring->dma_pdma,
  1388. ring->phys_pdma);
  1389. ring->dma_pdma = NULL;
  1390. }
  1391. }
  1392. static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
  1393. {
  1394. struct mtk_rx_ring *ring;
  1395. int rx_data_len, rx_dma_size;
  1396. int i;
  1397. u32 offset = 0;
  1398. if (rx_flag == MTK_RX_FLAGS_QDMA) {
  1399. if (ring_no)
  1400. return -EINVAL;
  1401. ring = &eth->rx_ring_qdma;
  1402. offset = 0x1000;
  1403. } else {
  1404. ring = &eth->rx_ring[ring_no];
  1405. }
  1406. if (rx_flag == MTK_RX_FLAGS_HWLRO) {
  1407. rx_data_len = MTK_MAX_LRO_RX_LENGTH;
  1408. rx_dma_size = MTK_HW_LRO_DMA_SIZE;
  1409. } else {
  1410. rx_data_len = ETH_DATA_LEN;
  1411. rx_dma_size = MTK_DMA_SIZE;
  1412. }
  1413. ring->frag_size = mtk_max_frag_size(rx_data_len);
  1414. ring->buf_size = mtk_max_buf_size(ring->frag_size);
  1415. ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
  1416. GFP_KERNEL);
  1417. if (!ring->data)
  1418. return -ENOMEM;
  1419. for (i = 0; i < rx_dma_size; i++) {
  1420. ring->data[i] = netdev_alloc_frag(ring->frag_size);
  1421. if (!ring->data[i])
  1422. return -ENOMEM;
  1423. }
  1424. ring->dma = dma_alloc_coherent(eth->dev,
  1425. rx_dma_size * sizeof(*ring->dma),
  1426. &ring->phys, GFP_ATOMIC);
  1427. if (!ring->dma)
  1428. return -ENOMEM;
  1429. for (i = 0; i < rx_dma_size; i++) {
  1430. dma_addr_t dma_addr = dma_map_single(eth->dev,
  1431. ring->data[i] + NET_SKB_PAD + eth->ip_align,
  1432. ring->buf_size,
  1433. DMA_FROM_DEVICE);
  1434. if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  1435. return -ENOMEM;
  1436. ring->dma[i].rxd1 = (unsigned int)dma_addr;
  1437. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
  1438. ring->dma[i].rxd2 = RX_DMA_LSO;
  1439. else
  1440. ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
  1441. }
  1442. ring->dma_size = rx_dma_size;
  1443. ring->calc_idx_update = false;
  1444. ring->calc_idx = rx_dma_size - 1;
  1445. ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
  1446. /* make sure that all changes to the dma ring are flushed before we
  1447. * continue
  1448. */
  1449. wmb();
  1450. mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
  1451. mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
  1452. mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
  1453. mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
  1454. return 0;
  1455. }
  1456. static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
  1457. {
  1458. int i;
  1459. if (ring->data && ring->dma) {
  1460. for (i = 0; i < ring->dma_size; i++) {
  1461. if (!ring->data[i])
  1462. continue;
  1463. if (!ring->dma[i].rxd1)
  1464. continue;
  1465. dma_unmap_single(eth->dev,
  1466. ring->dma[i].rxd1,
  1467. ring->buf_size,
  1468. DMA_FROM_DEVICE);
  1469. skb_free_frag(ring->data[i]);
  1470. }
  1471. kfree(ring->data);
  1472. ring->data = NULL;
  1473. }
  1474. if (ring->dma) {
  1475. dma_free_coherent(eth->dev,
  1476. ring->dma_size * sizeof(*ring->dma),
  1477. ring->dma,
  1478. ring->phys);
  1479. ring->dma = NULL;
  1480. }
  1481. }
  1482. static int mtk_hwlro_rx_init(struct mtk_eth *eth)
  1483. {
  1484. int i;
  1485. u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
  1486. u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
  1487. /* set LRO rings to auto-learn modes */
  1488. ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
  1489. /* validate LRO ring */
  1490. ring_ctrl_dw2 |= MTK_RING_VLD;
  1491. /* set AGE timer (unit: 20us) */
  1492. ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
  1493. ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
  1494. /* set max AGG timer (unit: 20us) */
  1495. ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
  1496. /* set max LRO AGG count */
  1497. ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
  1498. ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
  1499. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
  1500. mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
  1501. mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
  1502. mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
  1503. }
  1504. /* IPv4 checksum update enable */
  1505. lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
  1506. /* switch priority comparison to packet count mode */
  1507. lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
  1508. /* bandwidth threshold setting */
  1509. mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
  1510. /* auto-learn score delta setting */
  1511. mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
  1512. /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
  1513. mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
  1514. MTK_PDMA_LRO_ALT_REFRESH_TIMER);
  1515. /* set HW LRO mode & the max aggregation count for rx packets */
  1516. lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
  1517. /* the minimal remaining room of SDL0 in RXD for lro aggregation */
  1518. lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
  1519. /* enable HW LRO */
  1520. lro_ctrl_dw0 |= MTK_LRO_EN;
  1521. mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
  1522. mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
  1523. return 0;
  1524. }
  1525. static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
  1526. {
  1527. int i;
  1528. u32 val;
  1529. /* relinquish lro rings, flush aggregated packets */
  1530. mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
  1531. /* wait for relinquishments done */
  1532. for (i = 0; i < 10; i++) {
  1533. val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
  1534. if (val & MTK_LRO_RING_RELINQUISH_DONE) {
  1535. msleep(20);
  1536. continue;
  1537. }
  1538. break;
  1539. }
  1540. /* invalidate lro rings */
  1541. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
  1542. mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
  1543. /* disable HW LRO */
  1544. mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
  1545. }
  1546. static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
  1547. {
  1548. u32 reg_val;
  1549. reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
  1550. /* invalidate the IP setting */
  1551. mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
  1552. mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
  1553. /* validate the IP setting */
  1554. mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
  1555. }
  1556. static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
  1557. {
  1558. u32 reg_val;
  1559. reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
  1560. /* invalidate the IP setting */
  1561. mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
  1562. mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
  1563. }
  1564. static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
  1565. {
  1566. int cnt = 0;
  1567. int i;
  1568. for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
  1569. if (mac->hwlro_ip[i])
  1570. cnt++;
  1571. }
  1572. return cnt;
  1573. }
  1574. static int mtk_hwlro_add_ipaddr(struct net_device *dev,
  1575. struct ethtool_rxnfc *cmd)
  1576. {
  1577. struct ethtool_rx_flow_spec *fsp =
  1578. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1579. struct mtk_mac *mac = netdev_priv(dev);
  1580. struct mtk_eth *eth = mac->hw;
  1581. int hwlro_idx;
  1582. if ((fsp->flow_type != TCP_V4_FLOW) ||
  1583. (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
  1584. (fsp->location > 1))
  1585. return -EINVAL;
  1586. mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
  1587. hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
  1588. mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
  1589. mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
  1590. return 0;
  1591. }
  1592. static int mtk_hwlro_del_ipaddr(struct net_device *dev,
  1593. struct ethtool_rxnfc *cmd)
  1594. {
  1595. struct ethtool_rx_flow_spec *fsp =
  1596. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1597. struct mtk_mac *mac = netdev_priv(dev);
  1598. struct mtk_eth *eth = mac->hw;
  1599. int hwlro_idx;
  1600. if (fsp->location > 1)
  1601. return -EINVAL;
  1602. mac->hwlro_ip[fsp->location] = 0;
  1603. hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
  1604. mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
  1605. mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
  1606. return 0;
  1607. }
  1608. static void mtk_hwlro_netdev_disable(struct net_device *dev)
  1609. {
  1610. struct mtk_mac *mac = netdev_priv(dev);
  1611. struct mtk_eth *eth = mac->hw;
  1612. int i, hwlro_idx;
  1613. for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
  1614. mac->hwlro_ip[i] = 0;
  1615. hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
  1616. mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
  1617. }
  1618. mac->hwlro_ip_cnt = 0;
  1619. }
  1620. static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
  1621. struct ethtool_rxnfc *cmd)
  1622. {
  1623. struct mtk_mac *mac = netdev_priv(dev);
  1624. struct ethtool_rx_flow_spec *fsp =
  1625. (struct ethtool_rx_flow_spec *)&cmd->fs;
  1626. /* only tcp dst ipv4 is meaningful, others are meaningless */
  1627. fsp->flow_type = TCP_V4_FLOW;
  1628. fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
  1629. fsp->m_u.tcp_ip4_spec.ip4dst = 0;
  1630. fsp->h_u.tcp_ip4_spec.ip4src = 0;
  1631. fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
  1632. fsp->h_u.tcp_ip4_spec.psrc = 0;
  1633. fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
  1634. fsp->h_u.tcp_ip4_spec.pdst = 0;
  1635. fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
  1636. fsp->h_u.tcp_ip4_spec.tos = 0;
  1637. fsp->m_u.tcp_ip4_spec.tos = 0xff;
  1638. return 0;
  1639. }
  1640. static int mtk_hwlro_get_fdir_all(struct net_device *dev,
  1641. struct ethtool_rxnfc *cmd,
  1642. u32 *rule_locs)
  1643. {
  1644. struct mtk_mac *mac = netdev_priv(dev);
  1645. int cnt = 0;
  1646. int i;
  1647. for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
  1648. if (mac->hwlro_ip[i]) {
  1649. rule_locs[cnt] = i;
  1650. cnt++;
  1651. }
  1652. }
  1653. cmd->rule_cnt = cnt;
  1654. return 0;
  1655. }
  1656. static netdev_features_t mtk_fix_features(struct net_device *dev,
  1657. netdev_features_t features)
  1658. {
  1659. if (!(features & NETIF_F_LRO)) {
  1660. struct mtk_mac *mac = netdev_priv(dev);
  1661. int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
  1662. if (ip_cnt) {
  1663. netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
  1664. features |= NETIF_F_LRO;
  1665. }
  1666. }
  1667. return features;
  1668. }
  1669. static int mtk_set_features(struct net_device *dev, netdev_features_t features)
  1670. {
  1671. int err = 0;
  1672. if (!((dev->features ^ features) & NETIF_F_LRO))
  1673. return 0;
  1674. if (!(features & NETIF_F_LRO))
  1675. mtk_hwlro_netdev_disable(dev);
  1676. return err;
  1677. }
  1678. /* wait for DMA to finish whatever it is doing before we start using it again */
  1679. static int mtk_dma_busy_wait(struct mtk_eth *eth)
  1680. {
  1681. unsigned long t_start = jiffies;
  1682. while (1) {
  1683. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  1684. if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
  1685. (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
  1686. return 0;
  1687. } else {
  1688. if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
  1689. (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
  1690. return 0;
  1691. }
  1692. if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
  1693. break;
  1694. }
  1695. dev_err(eth->dev, "DMA init timeout\n");
  1696. return -1;
  1697. }
  1698. static int mtk_dma_init(struct mtk_eth *eth)
  1699. {
  1700. int err;
  1701. u32 i;
  1702. if (mtk_dma_busy_wait(eth))
  1703. return -EBUSY;
  1704. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  1705. /* QDMA needs scratch memory for internal reordering of the
  1706. * descriptors
  1707. */
  1708. err = mtk_init_fq_dma(eth);
  1709. if (err)
  1710. return err;
  1711. }
  1712. err = mtk_tx_alloc(eth);
  1713. if (err)
  1714. return err;
  1715. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  1716. err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
  1717. if (err)
  1718. return err;
  1719. }
  1720. err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
  1721. if (err)
  1722. return err;
  1723. if (eth->hwlro) {
  1724. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
  1725. err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
  1726. if (err)
  1727. return err;
  1728. }
  1729. err = mtk_hwlro_rx_init(eth);
  1730. if (err)
  1731. return err;
  1732. }
  1733. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  1734. /* Enable random early drop and set drop threshold
  1735. * automatically
  1736. */
  1737. mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
  1738. FC_THRES_MIN, MTK_QDMA_FC_THRES);
  1739. mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
  1740. }
  1741. return 0;
  1742. }
  1743. static void mtk_dma_free(struct mtk_eth *eth)
  1744. {
  1745. int i;
  1746. for (i = 0; i < MTK_MAC_COUNT; i++)
  1747. if (eth->netdev[i])
  1748. netdev_reset_queue(eth->netdev[i]);
  1749. if (eth->scratch_ring) {
  1750. dma_free_coherent(eth->dev,
  1751. MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
  1752. eth->scratch_ring,
  1753. eth->phy_scratch_ring);
  1754. eth->scratch_ring = NULL;
  1755. eth->phy_scratch_ring = 0;
  1756. }
  1757. mtk_tx_clean(eth);
  1758. mtk_rx_clean(eth, &eth->rx_ring[0]);
  1759. mtk_rx_clean(eth, &eth->rx_ring_qdma);
  1760. if (eth->hwlro) {
  1761. mtk_hwlro_rx_uninit(eth);
  1762. for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
  1763. mtk_rx_clean(eth, &eth->rx_ring[i]);
  1764. }
  1765. kfree(eth->scratch_head);
  1766. }
  1767. static void mtk_tx_timeout(struct net_device *dev)
  1768. {
  1769. struct mtk_mac *mac = netdev_priv(dev);
  1770. struct mtk_eth *eth = mac->hw;
  1771. eth->netdev[mac->id]->stats.tx_errors++;
  1772. netif_err(eth, tx_err, dev,
  1773. "transmit timed out\n");
  1774. schedule_work(&eth->pending_work);
  1775. }
  1776. static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
  1777. {
  1778. struct mtk_eth *eth = _eth;
  1779. if (likely(napi_schedule_prep(&eth->rx_napi))) {
  1780. __napi_schedule(&eth->rx_napi);
  1781. mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
  1782. }
  1783. return IRQ_HANDLED;
  1784. }
  1785. static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
  1786. {
  1787. struct mtk_eth *eth = _eth;
  1788. if (likely(napi_schedule_prep(&eth->tx_napi))) {
  1789. __napi_schedule(&eth->tx_napi);
  1790. mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
  1791. }
  1792. return IRQ_HANDLED;
  1793. }
  1794. static irqreturn_t mtk_handle_irq(int irq, void *_eth)
  1795. {
  1796. struct mtk_eth *eth = _eth;
  1797. if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
  1798. if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
  1799. mtk_handle_irq_rx(irq, _eth);
  1800. }
  1801. if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
  1802. if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
  1803. mtk_handle_irq_tx(irq, _eth);
  1804. }
  1805. return IRQ_HANDLED;
  1806. }
  1807. #ifdef CONFIG_NET_POLL_CONTROLLER
  1808. static void mtk_poll_controller(struct net_device *dev)
  1809. {
  1810. struct mtk_mac *mac = netdev_priv(dev);
  1811. struct mtk_eth *eth = mac->hw;
  1812. mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
  1813. mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
  1814. mtk_handle_irq_rx(eth->irq[2], dev);
  1815. mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
  1816. mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
  1817. }
  1818. #endif
  1819. static int mtk_start_dma(struct mtk_eth *eth)
  1820. {
  1821. u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
  1822. int err;
  1823. err = mtk_dma_init(eth);
  1824. if (err) {
  1825. mtk_dma_free(eth);
  1826. return err;
  1827. }
  1828. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  1829. mtk_w32(eth,
  1830. MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
  1831. MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
  1832. MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
  1833. MTK_RX_BT_32DWORDS,
  1834. MTK_QDMA_GLO_CFG);
  1835. mtk_w32(eth,
  1836. MTK_RX_DMA_EN | rx_2b_offset |
  1837. MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
  1838. MTK_PDMA_GLO_CFG);
  1839. } else {
  1840. mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
  1841. MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
  1842. MTK_PDMA_GLO_CFG);
  1843. }
  1844. return 0;
  1845. }
  1846. static int mtk_open(struct net_device *dev)
  1847. {
  1848. struct mtk_mac *mac = netdev_priv(dev);
  1849. struct mtk_eth *eth = mac->hw;
  1850. int err;
  1851. err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
  1852. if (err) {
  1853. netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
  1854. err);
  1855. return err;
  1856. }
  1857. /* we run 2 netdevs on the same dma ring so we only bring it up once */
  1858. if (!refcount_read(&eth->dma_refcnt)) {
  1859. int err = mtk_start_dma(eth);
  1860. if (err)
  1861. return err;
  1862. napi_enable(&eth->tx_napi);
  1863. napi_enable(&eth->rx_napi);
  1864. mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
  1865. mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
  1866. refcount_set(&eth->dma_refcnt, 1);
  1867. }
  1868. else
  1869. refcount_inc(&eth->dma_refcnt);
  1870. phylink_start(mac->phylink);
  1871. netif_start_queue(dev);
  1872. return 0;
  1873. }
  1874. static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
  1875. {
  1876. u32 val;
  1877. int i;
  1878. /* stop the dma engine */
  1879. spin_lock_bh(&eth->page_lock);
  1880. val = mtk_r32(eth, glo_cfg);
  1881. mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
  1882. glo_cfg);
  1883. spin_unlock_bh(&eth->page_lock);
  1884. /* wait for dma stop */
  1885. for (i = 0; i < 10; i++) {
  1886. val = mtk_r32(eth, glo_cfg);
  1887. if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
  1888. msleep(20);
  1889. continue;
  1890. }
  1891. break;
  1892. }
  1893. }
  1894. static int mtk_stop(struct net_device *dev)
  1895. {
  1896. struct mtk_mac *mac = netdev_priv(dev);
  1897. struct mtk_eth *eth = mac->hw;
  1898. phylink_stop(mac->phylink);
  1899. netif_tx_disable(dev);
  1900. phylink_disconnect_phy(mac->phylink);
  1901. /* only shutdown DMA if this is the last user */
  1902. if (!refcount_dec_and_test(&eth->dma_refcnt))
  1903. return 0;
  1904. mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
  1905. mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
  1906. napi_disable(&eth->tx_napi);
  1907. napi_disable(&eth->rx_napi);
  1908. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
  1909. mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
  1910. mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
  1911. mtk_dma_free(eth);
  1912. return 0;
  1913. }
  1914. static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
  1915. {
  1916. regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
  1917. reset_bits,
  1918. reset_bits);
  1919. usleep_range(1000, 1100);
  1920. regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
  1921. reset_bits,
  1922. ~reset_bits);
  1923. mdelay(10);
  1924. }
  1925. static void mtk_clk_disable(struct mtk_eth *eth)
  1926. {
  1927. int clk;
  1928. for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
  1929. clk_disable_unprepare(eth->clks[clk]);
  1930. }
  1931. static int mtk_clk_enable(struct mtk_eth *eth)
  1932. {
  1933. int clk, ret;
  1934. for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
  1935. ret = clk_prepare_enable(eth->clks[clk]);
  1936. if (ret)
  1937. goto err_disable_clks;
  1938. }
  1939. return 0;
  1940. err_disable_clks:
  1941. while (--clk >= 0)
  1942. clk_disable_unprepare(eth->clks[clk]);
  1943. return ret;
  1944. }
  1945. static int mtk_hw_init(struct mtk_eth *eth)
  1946. {
  1947. int i, val, ret;
  1948. if (test_and_set_bit(MTK_HW_INIT, &eth->state))
  1949. return 0;
  1950. pm_runtime_enable(eth->dev);
  1951. pm_runtime_get_sync(eth->dev);
  1952. ret = mtk_clk_enable(eth);
  1953. if (ret)
  1954. goto err_disable_pm;
  1955. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
  1956. ret = device_reset(eth->dev);
  1957. if (ret) {
  1958. dev_err(eth->dev, "MAC reset failed!\n");
  1959. goto err_disable_pm;
  1960. }
  1961. /* enable interrupt delay for RX */
  1962. mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
  1963. /* disable delay and normal interrupt */
  1964. mtk_tx_irq_disable(eth, ~0);
  1965. mtk_rx_irq_disable(eth, ~0);
  1966. return 0;
  1967. }
  1968. /* Non-MT7628 handling... */
  1969. ethsys_reset(eth, RSTCTRL_FE);
  1970. ethsys_reset(eth, RSTCTRL_PPE);
  1971. if (eth->pctl) {
  1972. /* Set GE2 driving and slew rate */
  1973. regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
  1974. /* set GE2 TDSEL */
  1975. regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
  1976. /* set GE2 TUNE */
  1977. regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
  1978. }
  1979. /* Set linkdown as the default for each GMAC. Its own MCR would be set
  1980. * up with the more appropriate value when mtk_mac_config call is being
  1981. * invoked.
  1982. */
  1983. for (i = 0; i < MTK_MAC_COUNT; i++)
  1984. mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
  1985. /* Indicates CDM to parse the MTK special tag from CPU
  1986. * which also is working out for untag packets.
  1987. */
  1988. val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
  1989. mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
  1990. /* Enable RX VLan Offloading */
  1991. mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
  1992. /* enable interrupt delay for RX */
  1993. mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
  1994. /* disable delay and normal interrupt */
  1995. mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
  1996. mtk_tx_irq_disable(eth, ~0);
  1997. mtk_rx_irq_disable(eth, ~0);
  1998. mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
  1999. mtk_w32(eth, 0, MTK_RST_GL);
  2000. /* FE int grouping */
  2001. mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
  2002. mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
  2003. mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
  2004. mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
  2005. mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
  2006. for (i = 0; i < MTK_MAC_COUNT; i++) {
  2007. u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
  2008. /* setup the forward port to send frame to PDMA */
  2009. val &= ~0xffff;
  2010. /* Enable RX checksum */
  2011. val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
  2012. /* setup the mac dma */
  2013. mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
  2014. }
  2015. return 0;
  2016. err_disable_pm:
  2017. pm_runtime_put_sync(eth->dev);
  2018. pm_runtime_disable(eth->dev);
  2019. return ret;
  2020. }
  2021. static int mtk_hw_deinit(struct mtk_eth *eth)
  2022. {
  2023. if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
  2024. return 0;
  2025. mtk_clk_disable(eth);
  2026. pm_runtime_put_sync(eth->dev);
  2027. pm_runtime_disable(eth->dev);
  2028. return 0;
  2029. }
  2030. static int __init mtk_init(struct net_device *dev)
  2031. {
  2032. struct mtk_mac *mac = netdev_priv(dev);
  2033. struct mtk_eth *eth = mac->hw;
  2034. const char *mac_addr;
  2035. mac_addr = of_get_mac_address(mac->of_node);
  2036. if (!IS_ERR(mac_addr))
  2037. ether_addr_copy(dev->dev_addr, mac_addr);
  2038. /* If the mac address is invalid, use random mac address */
  2039. if (!is_valid_ether_addr(dev->dev_addr)) {
  2040. eth_hw_addr_random(dev);
  2041. dev_err(eth->dev, "generated random MAC address %pM\n",
  2042. dev->dev_addr);
  2043. }
  2044. return 0;
  2045. }
  2046. static void mtk_uninit(struct net_device *dev)
  2047. {
  2048. struct mtk_mac *mac = netdev_priv(dev);
  2049. struct mtk_eth *eth = mac->hw;
  2050. phylink_disconnect_phy(mac->phylink);
  2051. mtk_tx_irq_disable(eth, ~0);
  2052. mtk_rx_irq_disable(eth, ~0);
  2053. }
  2054. static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  2055. {
  2056. struct mtk_mac *mac = netdev_priv(dev);
  2057. switch (cmd) {
  2058. case SIOCGMIIPHY:
  2059. case SIOCGMIIREG:
  2060. case SIOCSMIIREG:
  2061. return phylink_mii_ioctl(mac->phylink, ifr, cmd);
  2062. default:
  2063. break;
  2064. }
  2065. return -EOPNOTSUPP;
  2066. }
  2067. static void mtk_pending_work(struct work_struct *work)
  2068. {
  2069. struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
  2070. int err, i;
  2071. unsigned long restart = 0;
  2072. rtnl_lock();
  2073. dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
  2074. while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
  2075. cpu_relax();
  2076. dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
  2077. /* stop all devices to make sure that dma is properly shut down */
  2078. for (i = 0; i < MTK_MAC_COUNT; i++) {
  2079. if (!eth->netdev[i])
  2080. continue;
  2081. mtk_stop(eth->netdev[i]);
  2082. __set_bit(i, &restart);
  2083. }
  2084. dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
  2085. /* restart underlying hardware such as power, clock, pin mux
  2086. * and the connected phy
  2087. */
  2088. mtk_hw_deinit(eth);
  2089. if (eth->dev->pins)
  2090. pinctrl_select_state(eth->dev->pins->p,
  2091. eth->dev->pins->default_state);
  2092. mtk_hw_init(eth);
  2093. /* restart DMA and enable IRQs */
  2094. for (i = 0; i < MTK_MAC_COUNT; i++) {
  2095. if (!test_bit(i, &restart))
  2096. continue;
  2097. err = mtk_open(eth->netdev[i]);
  2098. if (err) {
  2099. netif_alert(eth, ifup, eth->netdev[i],
  2100. "Driver up/down cycle failed, closing device.\n");
  2101. dev_close(eth->netdev[i]);
  2102. }
  2103. }
  2104. dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
  2105. clear_bit_unlock(MTK_RESETTING, &eth->state);
  2106. rtnl_unlock();
  2107. }
  2108. static int mtk_free_dev(struct mtk_eth *eth)
  2109. {
  2110. int i;
  2111. for (i = 0; i < MTK_MAC_COUNT; i++) {
  2112. if (!eth->netdev[i])
  2113. continue;
  2114. free_netdev(eth->netdev[i]);
  2115. }
  2116. return 0;
  2117. }
  2118. static int mtk_unreg_dev(struct mtk_eth *eth)
  2119. {
  2120. int i;
  2121. for (i = 0; i < MTK_MAC_COUNT; i++) {
  2122. if (!eth->netdev[i])
  2123. continue;
  2124. unregister_netdev(eth->netdev[i]);
  2125. }
  2126. return 0;
  2127. }
  2128. static int mtk_cleanup(struct mtk_eth *eth)
  2129. {
  2130. mtk_unreg_dev(eth);
  2131. mtk_free_dev(eth);
  2132. cancel_work_sync(&eth->pending_work);
  2133. return 0;
  2134. }
  2135. static int mtk_get_link_ksettings(struct net_device *ndev,
  2136. struct ethtool_link_ksettings *cmd)
  2137. {
  2138. struct mtk_mac *mac = netdev_priv(ndev);
  2139. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  2140. return -EBUSY;
  2141. return phylink_ethtool_ksettings_get(mac->phylink, cmd);
  2142. }
  2143. static int mtk_set_link_ksettings(struct net_device *ndev,
  2144. const struct ethtool_link_ksettings *cmd)
  2145. {
  2146. struct mtk_mac *mac = netdev_priv(ndev);
  2147. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  2148. return -EBUSY;
  2149. return phylink_ethtool_ksettings_set(mac->phylink, cmd);
  2150. }
  2151. static void mtk_get_drvinfo(struct net_device *dev,
  2152. struct ethtool_drvinfo *info)
  2153. {
  2154. struct mtk_mac *mac = netdev_priv(dev);
  2155. strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
  2156. strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
  2157. info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
  2158. }
  2159. static u32 mtk_get_msglevel(struct net_device *dev)
  2160. {
  2161. struct mtk_mac *mac = netdev_priv(dev);
  2162. return mac->hw->msg_enable;
  2163. }
  2164. static void mtk_set_msglevel(struct net_device *dev, u32 value)
  2165. {
  2166. struct mtk_mac *mac = netdev_priv(dev);
  2167. mac->hw->msg_enable = value;
  2168. }
  2169. static int mtk_nway_reset(struct net_device *dev)
  2170. {
  2171. struct mtk_mac *mac = netdev_priv(dev);
  2172. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  2173. return -EBUSY;
  2174. if (!mac->phylink)
  2175. return -ENOTSUPP;
  2176. return phylink_ethtool_nway_reset(mac->phylink);
  2177. }
  2178. static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  2179. {
  2180. int i;
  2181. switch (stringset) {
  2182. case ETH_SS_STATS:
  2183. for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
  2184. memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
  2185. data += ETH_GSTRING_LEN;
  2186. }
  2187. break;
  2188. }
  2189. }
  2190. static int mtk_get_sset_count(struct net_device *dev, int sset)
  2191. {
  2192. switch (sset) {
  2193. case ETH_SS_STATS:
  2194. return ARRAY_SIZE(mtk_ethtool_stats);
  2195. default:
  2196. return -EOPNOTSUPP;
  2197. }
  2198. }
  2199. static void mtk_get_ethtool_stats(struct net_device *dev,
  2200. struct ethtool_stats *stats, u64 *data)
  2201. {
  2202. struct mtk_mac *mac = netdev_priv(dev);
  2203. struct mtk_hw_stats *hwstats = mac->hw_stats;
  2204. u64 *data_src, *data_dst;
  2205. unsigned int start;
  2206. int i;
  2207. if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
  2208. return;
  2209. if (netif_running(dev) && netif_device_present(dev)) {
  2210. if (spin_trylock_bh(&hwstats->stats_lock)) {
  2211. mtk_stats_update_mac(mac);
  2212. spin_unlock_bh(&hwstats->stats_lock);
  2213. }
  2214. }
  2215. data_src = (u64 *)hwstats;
  2216. do {
  2217. data_dst = data;
  2218. start = u64_stats_fetch_begin_irq(&hwstats->syncp);
  2219. for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
  2220. *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
  2221. } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
  2222. }
  2223. static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  2224. u32 *rule_locs)
  2225. {
  2226. int ret = -EOPNOTSUPP;
  2227. switch (cmd->cmd) {
  2228. case ETHTOOL_GRXRINGS:
  2229. if (dev->hw_features & NETIF_F_LRO) {
  2230. cmd->data = MTK_MAX_RX_RING_NUM;
  2231. ret = 0;
  2232. }
  2233. break;
  2234. case ETHTOOL_GRXCLSRLCNT:
  2235. if (dev->hw_features & NETIF_F_LRO) {
  2236. struct mtk_mac *mac = netdev_priv(dev);
  2237. cmd->rule_cnt = mac->hwlro_ip_cnt;
  2238. ret = 0;
  2239. }
  2240. break;
  2241. case ETHTOOL_GRXCLSRULE:
  2242. if (dev->hw_features & NETIF_F_LRO)
  2243. ret = mtk_hwlro_get_fdir_entry(dev, cmd);
  2244. break;
  2245. case ETHTOOL_GRXCLSRLALL:
  2246. if (dev->hw_features & NETIF_F_LRO)
  2247. ret = mtk_hwlro_get_fdir_all(dev, cmd,
  2248. rule_locs);
  2249. break;
  2250. default:
  2251. break;
  2252. }
  2253. return ret;
  2254. }
  2255. static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  2256. {
  2257. int ret = -EOPNOTSUPP;
  2258. switch (cmd->cmd) {
  2259. case ETHTOOL_SRXCLSRLINS:
  2260. if (dev->hw_features & NETIF_F_LRO)
  2261. ret = mtk_hwlro_add_ipaddr(dev, cmd);
  2262. break;
  2263. case ETHTOOL_SRXCLSRLDEL:
  2264. if (dev->hw_features & NETIF_F_LRO)
  2265. ret = mtk_hwlro_del_ipaddr(dev, cmd);
  2266. break;
  2267. default:
  2268. break;
  2269. }
  2270. return ret;
  2271. }
  2272. static const struct ethtool_ops mtk_ethtool_ops = {
  2273. .get_link_ksettings = mtk_get_link_ksettings,
  2274. .set_link_ksettings = mtk_set_link_ksettings,
  2275. .get_drvinfo = mtk_get_drvinfo,
  2276. .get_msglevel = mtk_get_msglevel,
  2277. .set_msglevel = mtk_set_msglevel,
  2278. .nway_reset = mtk_nway_reset,
  2279. .get_link = ethtool_op_get_link,
  2280. .get_strings = mtk_get_strings,
  2281. .get_sset_count = mtk_get_sset_count,
  2282. .get_ethtool_stats = mtk_get_ethtool_stats,
  2283. .get_rxnfc = mtk_get_rxnfc,
  2284. .set_rxnfc = mtk_set_rxnfc,
  2285. };
  2286. static const struct net_device_ops mtk_netdev_ops = {
  2287. .ndo_init = mtk_init,
  2288. .ndo_uninit = mtk_uninit,
  2289. .ndo_open = mtk_open,
  2290. .ndo_stop = mtk_stop,
  2291. .ndo_start_xmit = mtk_start_xmit,
  2292. .ndo_set_mac_address = mtk_set_mac_address,
  2293. .ndo_validate_addr = eth_validate_addr,
  2294. .ndo_do_ioctl = mtk_do_ioctl,
  2295. .ndo_tx_timeout = mtk_tx_timeout,
  2296. .ndo_get_stats64 = mtk_get_stats64,
  2297. .ndo_fix_features = mtk_fix_features,
  2298. .ndo_set_features = mtk_set_features,
  2299. #ifdef CONFIG_NET_POLL_CONTROLLER
  2300. .ndo_poll_controller = mtk_poll_controller,
  2301. #endif
  2302. };
  2303. static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
  2304. {
  2305. const __be32 *_id = of_get_property(np, "reg", NULL);
  2306. struct phylink *phylink;
  2307. int phy_mode, id, err;
  2308. struct mtk_mac *mac;
  2309. if (!_id) {
  2310. dev_err(eth->dev, "missing mac id\n");
  2311. return -EINVAL;
  2312. }
  2313. id = be32_to_cpup(_id);
  2314. if (id >= MTK_MAC_COUNT) {
  2315. dev_err(eth->dev, "%d is not a valid mac id\n", id);
  2316. return -EINVAL;
  2317. }
  2318. if (eth->netdev[id]) {
  2319. dev_err(eth->dev, "duplicate mac id found: %d\n", id);
  2320. return -EINVAL;
  2321. }
  2322. eth->netdev[id] = alloc_etherdev(sizeof(*mac));
  2323. if (!eth->netdev[id]) {
  2324. dev_err(eth->dev, "alloc_etherdev failed\n");
  2325. return -ENOMEM;
  2326. }
  2327. mac = netdev_priv(eth->netdev[id]);
  2328. eth->mac[id] = mac;
  2329. mac->id = id;
  2330. mac->hw = eth;
  2331. mac->of_node = np;
  2332. memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
  2333. mac->hwlro_ip_cnt = 0;
  2334. mac->hw_stats = devm_kzalloc(eth->dev,
  2335. sizeof(*mac->hw_stats),
  2336. GFP_KERNEL);
  2337. if (!mac->hw_stats) {
  2338. dev_err(eth->dev, "failed to allocate counter memory\n");
  2339. err = -ENOMEM;
  2340. goto free_netdev;
  2341. }
  2342. spin_lock_init(&mac->hw_stats->stats_lock);
  2343. u64_stats_init(&mac->hw_stats->syncp);
  2344. mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
  2345. /* phylink create */
  2346. phy_mode = of_get_phy_mode(np);
  2347. if (phy_mode < 0) {
  2348. dev_err(eth->dev, "incorrect phy-mode\n");
  2349. err = -EINVAL;
  2350. goto free_netdev;
  2351. }
  2352. /* mac config is not set */
  2353. mac->interface = PHY_INTERFACE_MODE_NA;
  2354. mac->mode = MLO_AN_PHY;
  2355. mac->speed = SPEED_UNKNOWN;
  2356. mac->phylink_config.dev = &eth->netdev[id]->dev;
  2357. mac->phylink_config.type = PHYLINK_NETDEV;
  2358. phylink = phylink_create(&mac->phylink_config,
  2359. of_fwnode_handle(mac->of_node),
  2360. phy_mode, &mtk_phylink_ops);
  2361. if (IS_ERR(phylink)) {
  2362. err = PTR_ERR(phylink);
  2363. goto free_netdev;
  2364. }
  2365. mac->phylink = phylink;
  2366. SET_NETDEV_DEV(eth->netdev[id], eth->dev);
  2367. eth->netdev[id]->watchdog_timeo = 5 * HZ;
  2368. eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
  2369. eth->netdev[id]->base_addr = (unsigned long)eth->base;
  2370. eth->netdev[id]->hw_features = eth->soc->hw_features;
  2371. if (eth->hwlro)
  2372. eth->netdev[id]->hw_features |= NETIF_F_LRO;
  2373. eth->netdev[id]->vlan_features = eth->soc->hw_features &
  2374. ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
  2375. eth->netdev[id]->features |= eth->soc->hw_features;
  2376. eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
  2377. eth->netdev[id]->irq = eth->irq[0];
  2378. eth->netdev[id]->dev.of_node = np;
  2379. eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
  2380. return 0;
  2381. free_netdev:
  2382. free_netdev(eth->netdev[id]);
  2383. return err;
  2384. }
  2385. static int mtk_probe(struct platform_device *pdev)
  2386. {
  2387. struct device_node *mac_np;
  2388. struct mtk_eth *eth;
  2389. int err, i;
  2390. eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
  2391. if (!eth)
  2392. return -ENOMEM;
  2393. eth->soc = of_device_get_match_data(&pdev->dev);
  2394. eth->dev = &pdev->dev;
  2395. eth->base = devm_platform_ioremap_resource(pdev, 0);
  2396. if (IS_ERR(eth->base))
  2397. return PTR_ERR(eth->base);
  2398. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  2399. eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
  2400. eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
  2401. } else {
  2402. eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
  2403. eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
  2404. }
  2405. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
  2406. eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
  2407. eth->ip_align = NET_IP_ALIGN;
  2408. } else {
  2409. eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
  2410. }
  2411. spin_lock_init(&eth->page_lock);
  2412. spin_lock_init(&eth->tx_irq_lock);
  2413. spin_lock_init(&eth->rx_irq_lock);
  2414. if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
  2415. eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  2416. "mediatek,ethsys");
  2417. if (IS_ERR(eth->ethsys)) {
  2418. dev_err(&pdev->dev, "no ethsys regmap found\n");
  2419. return PTR_ERR(eth->ethsys);
  2420. }
  2421. }
  2422. if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
  2423. eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  2424. "mediatek,infracfg");
  2425. if (IS_ERR(eth->infra)) {
  2426. dev_err(&pdev->dev, "no infracfg regmap found\n");
  2427. return PTR_ERR(eth->infra);
  2428. }
  2429. }
  2430. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
  2431. eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
  2432. GFP_KERNEL);
  2433. if (!eth->sgmii)
  2434. return -ENOMEM;
  2435. err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
  2436. eth->soc->ana_rgc3);
  2437. if (err)
  2438. return err;
  2439. }
  2440. if (eth->soc->required_pctl) {
  2441. eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  2442. "mediatek,pctl");
  2443. if (IS_ERR(eth->pctl)) {
  2444. dev_err(&pdev->dev, "no pctl regmap found\n");
  2445. return PTR_ERR(eth->pctl);
  2446. }
  2447. }
  2448. for (i = 0; i < 3; i++) {
  2449. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
  2450. eth->irq[i] = eth->irq[0];
  2451. else
  2452. eth->irq[i] = platform_get_irq(pdev, i);
  2453. if (eth->irq[i] < 0) {
  2454. dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
  2455. return -ENXIO;
  2456. }
  2457. }
  2458. for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
  2459. eth->clks[i] = devm_clk_get(eth->dev,
  2460. mtk_clks_source_name[i]);
  2461. if (IS_ERR(eth->clks[i])) {
  2462. if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
  2463. return -EPROBE_DEFER;
  2464. if (eth->soc->required_clks & BIT(i)) {
  2465. dev_err(&pdev->dev, "clock %s not found\n",
  2466. mtk_clks_source_name[i]);
  2467. return -EINVAL;
  2468. }
  2469. eth->clks[i] = NULL;
  2470. }
  2471. }
  2472. eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
  2473. INIT_WORK(&eth->pending_work, mtk_pending_work);
  2474. err = mtk_hw_init(eth);
  2475. if (err)
  2476. return err;
  2477. eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
  2478. for_each_child_of_node(pdev->dev.of_node, mac_np) {
  2479. if (!of_device_is_compatible(mac_np,
  2480. "mediatek,eth-mac"))
  2481. continue;
  2482. if (!of_device_is_available(mac_np))
  2483. continue;
  2484. err = mtk_add_mac(eth, mac_np);
  2485. if (err) {
  2486. of_node_put(mac_np);
  2487. goto err_deinit_hw;
  2488. }
  2489. }
  2490. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
  2491. err = devm_request_irq(eth->dev, eth->irq[0],
  2492. mtk_handle_irq, 0,
  2493. dev_name(eth->dev), eth);
  2494. } else {
  2495. err = devm_request_irq(eth->dev, eth->irq[1],
  2496. mtk_handle_irq_tx, 0,
  2497. dev_name(eth->dev), eth);
  2498. if (err)
  2499. goto err_free_dev;
  2500. err = devm_request_irq(eth->dev, eth->irq[2],
  2501. mtk_handle_irq_rx, 0,
  2502. dev_name(eth->dev), eth);
  2503. }
  2504. if (err)
  2505. goto err_free_dev;
  2506. /* No MT7628/88 support yet */
  2507. if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
  2508. err = mtk_mdio_init(eth);
  2509. if (err)
  2510. goto err_free_dev;
  2511. }
  2512. for (i = 0; i < MTK_MAX_DEVS; i++) {
  2513. if (!eth->netdev[i])
  2514. continue;
  2515. err = register_netdev(eth->netdev[i]);
  2516. if (err) {
  2517. dev_err(eth->dev, "error bringing up device\n");
  2518. goto err_deinit_mdio;
  2519. } else
  2520. netif_info(eth, probe, eth->netdev[i],
  2521. "mediatek frame engine at 0x%08lx, irq %d\n",
  2522. eth->netdev[i]->base_addr, eth->irq[0]);
  2523. }
  2524. /* we run 2 devices on the same DMA ring so we need a dummy device
  2525. * for NAPI to work
  2526. */
  2527. init_dummy_netdev(&eth->dummy_dev);
  2528. netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
  2529. MTK_NAPI_WEIGHT);
  2530. netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
  2531. MTK_NAPI_WEIGHT);
  2532. platform_set_drvdata(pdev, eth);
  2533. return 0;
  2534. err_deinit_mdio:
  2535. mtk_mdio_cleanup(eth);
  2536. err_free_dev:
  2537. mtk_free_dev(eth);
  2538. err_deinit_hw:
  2539. mtk_hw_deinit(eth);
  2540. return err;
  2541. }
  2542. static int mtk_remove(struct platform_device *pdev)
  2543. {
  2544. struct mtk_eth *eth = platform_get_drvdata(pdev);
  2545. struct mtk_mac *mac;
  2546. int i;
  2547. /* stop all devices to make sure that dma is properly shut down */
  2548. for (i = 0; i < MTK_MAC_COUNT; i++) {
  2549. if (!eth->netdev[i])
  2550. continue;
  2551. mtk_stop(eth->netdev[i]);
  2552. mac = netdev_priv(eth->netdev[i]);
  2553. phylink_disconnect_phy(mac->phylink);
  2554. }
  2555. mtk_hw_deinit(eth);
  2556. netif_napi_del(&eth->tx_napi);
  2557. netif_napi_del(&eth->rx_napi);
  2558. mtk_cleanup(eth);
  2559. mtk_mdio_cleanup(eth);
  2560. return 0;
  2561. }
  2562. static const struct mtk_soc_data mt2701_data = {
  2563. .caps = MT7623_CAPS | MTK_HWLRO,
  2564. .hw_features = MTK_HW_FEATURES,
  2565. .required_clks = MT7623_CLKS_BITMAP,
  2566. .required_pctl = true,
  2567. };
  2568. static const struct mtk_soc_data mt7621_data = {
  2569. .caps = MT7621_CAPS,
  2570. .hw_features = MTK_HW_FEATURES,
  2571. .required_clks = MT7621_CLKS_BITMAP,
  2572. .required_pctl = false,
  2573. };
  2574. static const struct mtk_soc_data mt7622_data = {
  2575. .ana_rgc3 = 0x2028,
  2576. .caps = MT7622_CAPS | MTK_HWLRO,
  2577. .hw_features = MTK_HW_FEATURES,
  2578. .required_clks = MT7622_CLKS_BITMAP,
  2579. .required_pctl = false,
  2580. };
  2581. static const struct mtk_soc_data mt7623_data = {
  2582. .caps = MT7623_CAPS | MTK_HWLRO,
  2583. .hw_features = MTK_HW_FEATURES,
  2584. .required_clks = MT7623_CLKS_BITMAP,
  2585. .required_pctl = true,
  2586. };
  2587. static const struct mtk_soc_data mt7629_data = {
  2588. .ana_rgc3 = 0x128,
  2589. .caps = MT7629_CAPS | MTK_HWLRO,
  2590. .hw_features = MTK_HW_FEATURES,
  2591. .required_clks = MT7629_CLKS_BITMAP,
  2592. .required_pctl = false,
  2593. };
  2594. static const struct mtk_soc_data rt5350_data = {
  2595. .caps = MT7628_CAPS,
  2596. .hw_features = MTK_HW_FEATURES_MT7628,
  2597. .required_clks = MT7628_CLKS_BITMAP,
  2598. .required_pctl = false,
  2599. };
  2600. const struct of_device_id of_mtk_match[] = {
  2601. { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
  2602. { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
  2603. { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
  2604. { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
  2605. { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
  2606. { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
  2607. {},
  2608. };
  2609. MODULE_DEVICE_TABLE(of, of_mtk_match);
  2610. static struct platform_driver mtk_driver = {
  2611. .probe = mtk_probe,
  2612. .remove = mtk_remove,
  2613. .driver = {
  2614. .name = "mtk_soc_eth",
  2615. .of_match_table = of_mtk_match,
  2616. },
  2617. };
  2618. module_platform_driver(mtk_driver);
  2619. MODULE_LICENSE("GPL");
  2620. MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
  2621. MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");