mv643xx_eth.c 79 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312
  1. /*
  2. * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
  3. * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
  4. *
  5. * Based on the 64360 driver from:
  6. * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
  7. * Rabeeh Khoury <rabeeh@marvell.com>
  8. *
  9. * Copyright (C) 2003 PMC-Sierra, Inc.,
  10. * written by Manish Lachwani
  11. *
  12. * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
  13. *
  14. * Copyright (C) 2004-2006 MontaVista Software, Inc.
  15. * Dale Farnsworth <dale@farnsworth.org>
  16. *
  17. * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
  18. * <sjhill@realitydiluted.com>
  19. *
  20. * Copyright (C) 2007-2008 Marvell Semiconductor
  21. * Lennert Buytenhek <buytenh@marvell.com>
  22. *
  23. * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
  24. *
  25. * This program is free software; you can redistribute it and/or
  26. * modify it under the terms of the GNU General Public License
  27. * as published by the Free Software Foundation; either version 2
  28. * of the License, or (at your option) any later version.
  29. *
  30. * This program is distributed in the hope that it will be useful,
  31. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  32. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  33. * GNU General Public License for more details.
  34. *
  35. * You should have received a copy of the GNU General Public License
  36. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  37. */
  38. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  39. #include <linux/init.h>
  40. #include <linux/dma-mapping.h>
  41. #include <linux/in.h>
  42. #include <linux/ip.h>
  43. #include <net/tso.h>
  44. #include <linux/tcp.h>
  45. #include <linux/udp.h>
  46. #include <linux/etherdevice.h>
  47. #include <linux/delay.h>
  48. #include <linux/ethtool.h>
  49. #include <linux/platform_device.h>
  50. #include <linux/module.h>
  51. #include <linux/kernel.h>
  52. #include <linux/spinlock.h>
  53. #include <linux/workqueue.h>
  54. #include <linux/phy.h>
  55. #include <linux/mv643xx_eth.h>
  56. #include <linux/io.h>
  57. #include <linux/interrupt.h>
  58. #include <linux/types.h>
  59. #include <linux/slab.h>
  60. #include <linux/clk.h>
  61. #include <linux/of.h>
  62. #include <linux/of_irq.h>
  63. #include <linux/of_net.h>
  64. #include <linux/of_mdio.h>
  65. static char mv643xx_eth_driver_name[] = "mv643xx_eth";
  66. static char mv643xx_eth_driver_version[] = "1.4";
  67. /*
  68. * Registers shared between all ports.
  69. */
  70. #define PHY_ADDR 0x0000
  71. #define WINDOW_BASE(w) (0x0200 + ((w) << 3))
  72. #define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
  73. #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
  74. #define WINDOW_BAR_ENABLE 0x0290
  75. #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
  76. /*
  77. * Main per-port registers. These live at offset 0x0400 for
  78. * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
  79. */
  80. #define PORT_CONFIG 0x0000
  81. #define UNICAST_PROMISCUOUS_MODE 0x00000001
  82. #define PORT_CONFIG_EXT 0x0004
  83. #define MAC_ADDR_LOW 0x0014
  84. #define MAC_ADDR_HIGH 0x0018
  85. #define SDMA_CONFIG 0x001c
  86. #define TX_BURST_SIZE_16_64BIT 0x01000000
  87. #define TX_BURST_SIZE_4_64BIT 0x00800000
  88. #define BLM_TX_NO_SWAP 0x00000020
  89. #define BLM_RX_NO_SWAP 0x00000010
  90. #define RX_BURST_SIZE_16_64BIT 0x00000008
  91. #define RX_BURST_SIZE_4_64BIT 0x00000004
  92. #define PORT_SERIAL_CONTROL 0x003c
  93. #define SET_MII_SPEED_TO_100 0x01000000
  94. #define SET_GMII_SPEED_TO_1000 0x00800000
  95. #define SET_FULL_DUPLEX_MODE 0x00200000
  96. #define MAX_RX_PACKET_9700BYTE 0x000a0000
  97. #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
  98. #define DO_NOT_FORCE_LINK_FAIL 0x00000400
  99. #define SERIAL_PORT_CONTROL_RESERVED 0x00000200
  100. #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
  101. #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
  102. #define FORCE_LINK_PASS 0x00000002
  103. #define SERIAL_PORT_ENABLE 0x00000001
  104. #define PORT_STATUS 0x0044
  105. #define TX_FIFO_EMPTY 0x00000400
  106. #define TX_IN_PROGRESS 0x00000080
  107. #define PORT_SPEED_MASK 0x00000030
  108. #define PORT_SPEED_1000 0x00000010
  109. #define PORT_SPEED_100 0x00000020
  110. #define PORT_SPEED_10 0x00000000
  111. #define FLOW_CONTROL_ENABLED 0x00000008
  112. #define FULL_DUPLEX 0x00000004
  113. #define LINK_UP 0x00000002
  114. #define TXQ_COMMAND 0x0048
  115. #define TXQ_FIX_PRIO_CONF 0x004c
  116. #define PORT_SERIAL_CONTROL1 0x004c
  117. #define CLK125_BYPASS_EN 0x00000010
  118. #define TX_BW_RATE 0x0050
  119. #define TX_BW_MTU 0x0058
  120. #define TX_BW_BURST 0x005c
  121. #define INT_CAUSE 0x0060
  122. #define INT_TX_END 0x07f80000
  123. #define INT_TX_END_0 0x00080000
  124. #define INT_RX 0x000003fc
  125. #define INT_RX_0 0x00000004
  126. #define INT_EXT 0x00000002
  127. #define INT_CAUSE_EXT 0x0064
  128. #define INT_EXT_LINK_PHY 0x00110000
  129. #define INT_EXT_TX 0x000000ff
  130. #define INT_MASK 0x0068
  131. #define INT_MASK_EXT 0x006c
  132. #define TX_FIFO_URGENT_THRESHOLD 0x0074
  133. #define RX_DISCARD_FRAME_CNT 0x0084
  134. #define RX_OVERRUN_FRAME_CNT 0x0088
  135. #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
  136. #define TX_BW_RATE_MOVED 0x00e0
  137. #define TX_BW_MTU_MOVED 0x00e8
  138. #define TX_BW_BURST_MOVED 0x00ec
  139. #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
  140. #define RXQ_COMMAND 0x0280
  141. #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
  142. #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
  143. #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
  144. #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
  145. /*
  146. * Misc per-port registers.
  147. */
  148. #define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
  149. #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
  150. #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
  151. #define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
  152. /*
  153. * SDMA configuration register default value.
  154. */
  155. #if defined(__BIG_ENDIAN)
  156. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  157. (RX_BURST_SIZE_4_64BIT | \
  158. TX_BURST_SIZE_4_64BIT)
  159. #elif defined(__LITTLE_ENDIAN)
  160. #define PORT_SDMA_CONFIG_DEFAULT_VALUE \
  161. (RX_BURST_SIZE_4_64BIT | \
  162. BLM_RX_NO_SWAP | \
  163. BLM_TX_NO_SWAP | \
  164. TX_BURST_SIZE_4_64BIT)
  165. #else
  166. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  167. #endif
  168. /*
  169. * Misc definitions.
  170. */
  171. #define DEFAULT_RX_QUEUE_SIZE 128
  172. #define DEFAULT_TX_QUEUE_SIZE 512
  173. #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
  174. /* Max number of allowed TCP segments for software TSO */
  175. #define MV643XX_MAX_TSO_SEGS 100
  176. #define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
  177. #define IS_TSO_HEADER(txq, addr) \
  178. ((addr >= txq->tso_hdrs_dma) && \
  179. (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
  180. #define DESC_DMA_MAP_SINGLE 0
  181. #define DESC_DMA_MAP_PAGE 1
  182. /*
  183. * RX/TX descriptors.
  184. */
  185. #if defined(__BIG_ENDIAN)
  186. struct rx_desc {
  187. u16 byte_cnt; /* Descriptor buffer byte count */
  188. u16 buf_size; /* Buffer size */
  189. u32 cmd_sts; /* Descriptor command status */
  190. u32 next_desc_ptr; /* Next descriptor pointer */
  191. u32 buf_ptr; /* Descriptor buffer pointer */
  192. };
  193. struct tx_desc {
  194. u16 byte_cnt; /* buffer byte count */
  195. u16 l4i_chk; /* CPU provided TCP checksum */
  196. u32 cmd_sts; /* Command/status field */
  197. u32 next_desc_ptr; /* Pointer to next descriptor */
  198. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  199. };
  200. #elif defined(__LITTLE_ENDIAN)
  201. struct rx_desc {
  202. u32 cmd_sts; /* Descriptor command status */
  203. u16 buf_size; /* Buffer size */
  204. u16 byte_cnt; /* Descriptor buffer byte count */
  205. u32 buf_ptr; /* Descriptor buffer pointer */
  206. u32 next_desc_ptr; /* Next descriptor pointer */
  207. };
  208. struct tx_desc {
  209. u32 cmd_sts; /* Command/status field */
  210. u16 l4i_chk; /* CPU provided TCP checksum */
  211. u16 byte_cnt; /* buffer byte count */
  212. u32 buf_ptr; /* pointer to buffer for this descriptor*/
  213. u32 next_desc_ptr; /* Pointer to next descriptor */
  214. };
  215. #else
  216. #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
  217. #endif
  218. /* RX & TX descriptor command */
  219. #define BUFFER_OWNED_BY_DMA 0x80000000
  220. /* RX & TX descriptor status */
  221. #define ERROR_SUMMARY 0x00000001
  222. /* RX descriptor status */
  223. #define LAYER_4_CHECKSUM_OK 0x40000000
  224. #define RX_ENABLE_INTERRUPT 0x20000000
  225. #define RX_FIRST_DESC 0x08000000
  226. #define RX_LAST_DESC 0x04000000
  227. #define RX_IP_HDR_OK 0x02000000
  228. #define RX_PKT_IS_IPV4 0x01000000
  229. #define RX_PKT_IS_ETHERNETV2 0x00800000
  230. #define RX_PKT_LAYER4_TYPE_MASK 0x00600000
  231. #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
  232. #define RX_PKT_IS_VLAN_TAGGED 0x00080000
  233. /* TX descriptor command */
  234. #define TX_ENABLE_INTERRUPT 0x00800000
  235. #define GEN_CRC 0x00400000
  236. #define TX_FIRST_DESC 0x00200000
  237. #define TX_LAST_DESC 0x00100000
  238. #define ZERO_PADDING 0x00080000
  239. #define GEN_IP_V4_CHECKSUM 0x00040000
  240. #define GEN_TCP_UDP_CHECKSUM 0x00020000
  241. #define UDP_FRAME 0x00010000
  242. #define MAC_HDR_EXTRA_4_BYTES 0x00008000
  243. #define GEN_TCP_UDP_CHK_FULL 0x00000400
  244. #define MAC_HDR_EXTRA_8_BYTES 0x00000200
  245. #define TX_IHL_SHIFT 11
  246. /* global *******************************************************************/
  247. struct mv643xx_eth_shared_private {
  248. /*
  249. * Ethernet controller base address.
  250. */
  251. void __iomem *base;
  252. /*
  253. * Per-port MBUS window access register value.
  254. */
  255. u32 win_protect;
  256. /*
  257. * Hardware-specific parameters.
  258. */
  259. int extended_rx_coal_limit;
  260. int tx_bw_control;
  261. int tx_csum_limit;
  262. struct clk *clk;
  263. };
  264. #define TX_BW_CONTROL_ABSENT 0
  265. #define TX_BW_CONTROL_OLD_LAYOUT 1
  266. #define TX_BW_CONTROL_NEW_LAYOUT 2
  267. static int mv643xx_eth_open(struct net_device *dev);
  268. static int mv643xx_eth_stop(struct net_device *dev);
  269. /* per-port *****************************************************************/
  270. struct mib_counters {
  271. u64 good_octets_received;
  272. u32 bad_octets_received;
  273. u32 internal_mac_transmit_err;
  274. u32 good_frames_received;
  275. u32 bad_frames_received;
  276. u32 broadcast_frames_received;
  277. u32 multicast_frames_received;
  278. u32 frames_64_octets;
  279. u32 frames_65_to_127_octets;
  280. u32 frames_128_to_255_octets;
  281. u32 frames_256_to_511_octets;
  282. u32 frames_512_to_1023_octets;
  283. u32 frames_1024_to_max_octets;
  284. u64 good_octets_sent;
  285. u32 good_frames_sent;
  286. u32 excessive_collision;
  287. u32 multicast_frames_sent;
  288. u32 broadcast_frames_sent;
  289. u32 unrec_mac_control_received;
  290. u32 fc_sent;
  291. u32 good_fc_received;
  292. u32 bad_fc_received;
  293. u32 undersize_received;
  294. u32 fragments_received;
  295. u32 oversize_received;
  296. u32 jabber_received;
  297. u32 mac_receive_error;
  298. u32 bad_crc_event;
  299. u32 collision;
  300. u32 late_collision;
  301. /* Non MIB hardware counters */
  302. u32 rx_discard;
  303. u32 rx_overrun;
  304. };
  305. struct rx_queue {
  306. int index;
  307. int rx_ring_size;
  308. int rx_desc_count;
  309. int rx_curr_desc;
  310. int rx_used_desc;
  311. struct rx_desc *rx_desc_area;
  312. dma_addr_t rx_desc_dma;
  313. int rx_desc_area_size;
  314. struct sk_buff **rx_skb;
  315. };
  316. struct tx_queue {
  317. int index;
  318. int tx_ring_size;
  319. int tx_desc_count;
  320. int tx_curr_desc;
  321. int tx_used_desc;
  322. int tx_stop_threshold;
  323. int tx_wake_threshold;
  324. char *tso_hdrs;
  325. dma_addr_t tso_hdrs_dma;
  326. struct tx_desc *tx_desc_area;
  327. char *tx_desc_mapping; /* array to track the type of the dma mapping */
  328. dma_addr_t tx_desc_dma;
  329. int tx_desc_area_size;
  330. struct sk_buff_head tx_skb;
  331. unsigned long tx_packets;
  332. unsigned long tx_bytes;
  333. unsigned long tx_dropped;
  334. };
  335. struct mv643xx_eth_private {
  336. struct mv643xx_eth_shared_private *shared;
  337. void __iomem *base;
  338. int port_num;
  339. struct net_device *dev;
  340. struct timer_list mib_counters_timer;
  341. spinlock_t mib_counters_lock;
  342. struct mib_counters mib_counters;
  343. struct work_struct tx_timeout_task;
  344. struct napi_struct napi;
  345. u32 int_mask;
  346. u8 oom;
  347. u8 work_link;
  348. u8 work_tx;
  349. u8 work_tx_end;
  350. u8 work_rx;
  351. u8 work_rx_refill;
  352. int skb_size;
  353. /*
  354. * RX state.
  355. */
  356. int rx_ring_size;
  357. unsigned long rx_desc_sram_addr;
  358. int rx_desc_sram_size;
  359. int rxq_count;
  360. struct timer_list rx_oom;
  361. struct rx_queue rxq[8];
  362. /*
  363. * TX state.
  364. */
  365. int tx_ring_size;
  366. unsigned long tx_desc_sram_addr;
  367. int tx_desc_sram_size;
  368. int txq_count;
  369. struct tx_queue txq[8];
  370. /*
  371. * Hardware-specific parameters.
  372. */
  373. struct clk *clk;
  374. unsigned int t_clk;
  375. };
  376. /* port register accessors **************************************************/
  377. static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
  378. {
  379. return readl(mp->shared->base + offset);
  380. }
  381. static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
  382. {
  383. return readl(mp->base + offset);
  384. }
  385. static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
  386. {
  387. writel(data, mp->shared->base + offset);
  388. }
  389. static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
  390. {
  391. writel(data, mp->base + offset);
  392. }
  393. /* rxq/txq helper functions *************************************************/
  394. static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
  395. {
  396. return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
  397. }
  398. static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
  399. {
  400. return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
  401. }
  402. static void rxq_enable(struct rx_queue *rxq)
  403. {
  404. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  405. wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
  406. }
  407. static void rxq_disable(struct rx_queue *rxq)
  408. {
  409. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  410. u8 mask = 1 << rxq->index;
  411. wrlp(mp, RXQ_COMMAND, mask << 8);
  412. while (rdlp(mp, RXQ_COMMAND) & mask)
  413. udelay(10);
  414. }
  415. static void txq_reset_hw_ptr(struct tx_queue *txq)
  416. {
  417. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  418. u32 addr;
  419. addr = (u32)txq->tx_desc_dma;
  420. addr += txq->tx_curr_desc * sizeof(struct tx_desc);
  421. wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
  422. }
  423. static void txq_enable(struct tx_queue *txq)
  424. {
  425. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  426. wrlp(mp, TXQ_COMMAND, 1 << txq->index);
  427. }
  428. static void txq_disable(struct tx_queue *txq)
  429. {
  430. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  431. u8 mask = 1 << txq->index;
  432. wrlp(mp, TXQ_COMMAND, mask << 8);
  433. while (rdlp(mp, TXQ_COMMAND) & mask)
  434. udelay(10);
  435. }
  436. static void txq_maybe_wake(struct tx_queue *txq)
  437. {
  438. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  439. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  440. if (netif_tx_queue_stopped(nq)) {
  441. __netif_tx_lock(nq, smp_processor_id());
  442. if (txq->tx_desc_count <= txq->tx_wake_threshold)
  443. netif_tx_wake_queue(nq);
  444. __netif_tx_unlock(nq);
  445. }
  446. }
  447. static int rxq_process(struct rx_queue *rxq, int budget)
  448. {
  449. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  450. struct net_device_stats *stats = &mp->dev->stats;
  451. int rx;
  452. rx = 0;
  453. while (rx < budget && rxq->rx_desc_count) {
  454. struct rx_desc *rx_desc;
  455. unsigned int cmd_sts;
  456. struct sk_buff *skb;
  457. u16 byte_cnt;
  458. rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
  459. cmd_sts = rx_desc->cmd_sts;
  460. if (cmd_sts & BUFFER_OWNED_BY_DMA)
  461. break;
  462. rmb();
  463. skb = rxq->rx_skb[rxq->rx_curr_desc];
  464. rxq->rx_skb[rxq->rx_curr_desc] = NULL;
  465. rxq->rx_curr_desc++;
  466. if (rxq->rx_curr_desc == rxq->rx_ring_size)
  467. rxq->rx_curr_desc = 0;
  468. dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
  469. rx_desc->buf_size, DMA_FROM_DEVICE);
  470. rxq->rx_desc_count--;
  471. rx++;
  472. mp->work_rx_refill |= 1 << rxq->index;
  473. byte_cnt = rx_desc->byte_cnt;
  474. /*
  475. * Update statistics.
  476. *
  477. * Note that the descriptor byte count includes 2 dummy
  478. * bytes automatically inserted by the hardware at the
  479. * start of the packet (which we don't count), and a 4
  480. * byte CRC at the end of the packet (which we do count).
  481. */
  482. stats->rx_packets++;
  483. stats->rx_bytes += byte_cnt - 2;
  484. /*
  485. * In case we received a packet without first / last bits
  486. * on, or the error summary bit is set, the packet needs
  487. * to be dropped.
  488. */
  489. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
  490. != (RX_FIRST_DESC | RX_LAST_DESC))
  491. goto err;
  492. /*
  493. * The -4 is for the CRC in the trailer of the
  494. * received packet
  495. */
  496. skb_put(skb, byte_cnt - 2 - 4);
  497. if (cmd_sts & LAYER_4_CHECKSUM_OK)
  498. skb->ip_summed = CHECKSUM_UNNECESSARY;
  499. skb->protocol = eth_type_trans(skb, mp->dev);
  500. napi_gro_receive(&mp->napi, skb);
  501. continue;
  502. err:
  503. stats->rx_dropped++;
  504. if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
  505. (RX_FIRST_DESC | RX_LAST_DESC)) {
  506. if (net_ratelimit())
  507. netdev_err(mp->dev,
  508. "received packet spanning multiple descriptors\n");
  509. }
  510. if (cmd_sts & ERROR_SUMMARY)
  511. stats->rx_errors++;
  512. dev_kfree_skb(skb);
  513. }
  514. if (rx < budget)
  515. mp->work_rx &= ~(1 << rxq->index);
  516. return rx;
  517. }
  518. static int rxq_refill(struct rx_queue *rxq, int budget)
  519. {
  520. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  521. int refilled;
  522. refilled = 0;
  523. while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
  524. struct sk_buff *skb;
  525. int rx;
  526. struct rx_desc *rx_desc;
  527. int size;
  528. skb = netdev_alloc_skb(mp->dev, mp->skb_size);
  529. if (skb == NULL) {
  530. mp->oom = 1;
  531. goto oom;
  532. }
  533. if (SKB_DMA_REALIGN)
  534. skb_reserve(skb, SKB_DMA_REALIGN);
  535. refilled++;
  536. rxq->rx_desc_count++;
  537. rx = rxq->rx_used_desc++;
  538. if (rxq->rx_used_desc == rxq->rx_ring_size)
  539. rxq->rx_used_desc = 0;
  540. rx_desc = rxq->rx_desc_area + rx;
  541. size = skb_end_pointer(skb) - skb->data;
  542. rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
  543. skb->data, size,
  544. DMA_FROM_DEVICE);
  545. rx_desc->buf_size = size;
  546. rxq->rx_skb[rx] = skb;
  547. wmb();
  548. rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
  549. wmb();
  550. /*
  551. * The hardware automatically prepends 2 bytes of
  552. * dummy data to each received packet, so that the
  553. * IP header ends up 16-byte aligned.
  554. */
  555. skb_reserve(skb, 2);
  556. }
  557. if (refilled < budget)
  558. mp->work_rx_refill &= ~(1 << rxq->index);
  559. oom:
  560. return refilled;
  561. }
  562. /* tx ***********************************************************************/
  563. static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
  564. {
  565. int frag;
  566. for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
  567. const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
  568. if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
  569. return 1;
  570. }
  571. return 0;
  572. }
  573. static inline __be16 sum16_as_be(__sum16 sum)
  574. {
  575. return (__force __be16)sum;
  576. }
  577. static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
  578. u16 *l4i_chk, u32 *command, int length)
  579. {
  580. int ret;
  581. u32 cmd = 0;
  582. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  583. int hdr_len;
  584. int tag_bytes;
  585. BUG_ON(skb->protocol != htons(ETH_P_IP) &&
  586. skb->protocol != htons(ETH_P_8021Q));
  587. hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
  588. tag_bytes = hdr_len - ETH_HLEN;
  589. if (length - hdr_len > mp->shared->tx_csum_limit ||
  590. unlikely(tag_bytes & ~12)) {
  591. ret = skb_checksum_help(skb);
  592. if (!ret)
  593. goto no_csum;
  594. return ret;
  595. }
  596. if (tag_bytes & 4)
  597. cmd |= MAC_HDR_EXTRA_4_BYTES;
  598. if (tag_bytes & 8)
  599. cmd |= MAC_HDR_EXTRA_8_BYTES;
  600. cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
  601. GEN_IP_V4_CHECKSUM |
  602. ip_hdr(skb)->ihl << TX_IHL_SHIFT;
  603. /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
  604. * it seems we don't need to pass the initial checksum. */
  605. switch (ip_hdr(skb)->protocol) {
  606. case IPPROTO_UDP:
  607. cmd |= UDP_FRAME;
  608. *l4i_chk = 0;
  609. break;
  610. case IPPROTO_TCP:
  611. *l4i_chk = 0;
  612. break;
  613. default:
  614. WARN(1, "protocol not supported");
  615. }
  616. } else {
  617. no_csum:
  618. /* Errata BTS #50, IHL must be 5 if no HW checksum */
  619. cmd |= 5 << TX_IHL_SHIFT;
  620. }
  621. *command = cmd;
  622. return 0;
  623. }
  624. static inline int
  625. txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
  626. struct sk_buff *skb, char *data, int length,
  627. bool last_tcp, bool is_last)
  628. {
  629. int tx_index;
  630. u32 cmd_sts;
  631. struct tx_desc *desc;
  632. tx_index = txq->tx_curr_desc++;
  633. if (txq->tx_curr_desc == txq->tx_ring_size)
  634. txq->tx_curr_desc = 0;
  635. desc = &txq->tx_desc_area[tx_index];
  636. txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
  637. desc->l4i_chk = 0;
  638. desc->byte_cnt = length;
  639. if (length <= 8 && (uintptr_t)data & 0x7) {
  640. /* Copy unaligned small data fragment to TSO header data area */
  641. memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
  642. data, length);
  643. desc->buf_ptr = txq->tso_hdrs_dma
  644. + tx_index * TSO_HEADER_SIZE;
  645. } else {
  646. /* Alignment is okay, map buffer and hand off to hardware */
  647. txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
  648. desc->buf_ptr = dma_map_single(dev->dev.parent, data,
  649. length, DMA_TO_DEVICE);
  650. if (unlikely(dma_mapping_error(dev->dev.parent,
  651. desc->buf_ptr))) {
  652. WARN(1, "dma_map_single failed!\n");
  653. return -ENOMEM;
  654. }
  655. }
  656. cmd_sts = BUFFER_OWNED_BY_DMA;
  657. if (last_tcp) {
  658. /* last descriptor in the TCP packet */
  659. cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
  660. /* last descriptor in SKB */
  661. if (is_last)
  662. cmd_sts |= TX_ENABLE_INTERRUPT;
  663. }
  664. desc->cmd_sts = cmd_sts;
  665. return 0;
  666. }
  667. static inline void
  668. txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
  669. u32 *first_cmd_sts, bool first_desc)
  670. {
  671. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  672. int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  673. int tx_index;
  674. struct tx_desc *desc;
  675. int ret;
  676. u32 cmd_csum = 0;
  677. u16 l4i_chk = 0;
  678. u32 cmd_sts;
  679. tx_index = txq->tx_curr_desc;
  680. desc = &txq->tx_desc_area[tx_index];
  681. ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
  682. if (ret)
  683. WARN(1, "failed to prepare checksum!");
  684. /* Should we set this? Can't use the value from skb_tx_csum()
  685. * as it's not the correct initial L4 checksum to use. */
  686. desc->l4i_chk = 0;
  687. desc->byte_cnt = hdr_len;
  688. desc->buf_ptr = txq->tso_hdrs_dma +
  689. txq->tx_curr_desc * TSO_HEADER_SIZE;
  690. cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
  691. GEN_CRC;
  692. /* Defer updating the first command descriptor until all
  693. * following descriptors have been written.
  694. */
  695. if (first_desc)
  696. *first_cmd_sts = cmd_sts;
  697. else
  698. desc->cmd_sts = cmd_sts;
  699. txq->tx_curr_desc++;
  700. if (txq->tx_curr_desc == txq->tx_ring_size)
  701. txq->tx_curr_desc = 0;
  702. }
  703. static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
  704. struct net_device *dev)
  705. {
  706. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  707. int total_len, data_left, ret;
  708. int desc_count = 0;
  709. struct tso_t tso;
  710. int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
  711. struct tx_desc *first_tx_desc;
  712. u32 first_cmd_sts = 0;
  713. /* Count needed descriptors */
  714. if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
  715. netdev_dbg(dev, "not enough descriptors for TSO!\n");
  716. return -EBUSY;
  717. }
  718. first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
  719. /* Initialize the TSO handler, and prepare the first payload */
  720. tso_start(skb, &tso);
  721. total_len = skb->len - hdr_len;
  722. while (total_len > 0) {
  723. bool first_desc = (desc_count == 0);
  724. char *hdr;
  725. data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
  726. total_len -= data_left;
  727. desc_count++;
  728. /* prepare packet headers: MAC + IP + TCP */
  729. hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
  730. tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
  731. txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
  732. first_desc);
  733. while (data_left > 0) {
  734. int size;
  735. desc_count++;
  736. size = min_t(int, tso.size, data_left);
  737. ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
  738. size == data_left,
  739. total_len == 0);
  740. if (ret)
  741. goto err_release;
  742. data_left -= size;
  743. tso_build_data(skb, &tso, size);
  744. }
  745. }
  746. __skb_queue_tail(&txq->tx_skb, skb);
  747. skb_tx_timestamp(skb);
  748. /* ensure all other descriptors are written before first cmd_sts */
  749. wmb();
  750. first_tx_desc->cmd_sts = first_cmd_sts;
  751. /* clear TX_END status */
  752. mp->work_tx_end &= ~(1 << txq->index);
  753. /* ensure all descriptors are written before poking hardware */
  754. wmb();
  755. txq_enable(txq);
  756. txq->tx_desc_count += desc_count;
  757. return 0;
  758. err_release:
  759. /* TODO: Release all used data descriptors; header descriptors must not
  760. * be DMA-unmapped.
  761. */
  762. return ret;
  763. }
  764. static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
  765. {
  766. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  767. int nr_frags = skb_shinfo(skb)->nr_frags;
  768. int frag;
  769. for (frag = 0; frag < nr_frags; frag++) {
  770. skb_frag_t *this_frag;
  771. int tx_index;
  772. struct tx_desc *desc;
  773. this_frag = &skb_shinfo(skb)->frags[frag];
  774. tx_index = txq->tx_curr_desc++;
  775. if (txq->tx_curr_desc == txq->tx_ring_size)
  776. txq->tx_curr_desc = 0;
  777. desc = &txq->tx_desc_area[tx_index];
  778. txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
  779. /*
  780. * The last fragment will generate an interrupt
  781. * which will free the skb on TX completion.
  782. */
  783. if (frag == nr_frags - 1) {
  784. desc->cmd_sts = BUFFER_OWNED_BY_DMA |
  785. ZERO_PADDING | TX_LAST_DESC |
  786. TX_ENABLE_INTERRUPT;
  787. } else {
  788. desc->cmd_sts = BUFFER_OWNED_BY_DMA;
  789. }
  790. desc->l4i_chk = 0;
  791. desc->byte_cnt = skb_frag_size(this_frag);
  792. desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
  793. this_frag, 0, desc->byte_cnt,
  794. DMA_TO_DEVICE);
  795. }
  796. }
  797. static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
  798. struct net_device *dev)
  799. {
  800. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  801. int nr_frags = skb_shinfo(skb)->nr_frags;
  802. int tx_index;
  803. struct tx_desc *desc;
  804. u32 cmd_sts;
  805. u16 l4i_chk;
  806. int length, ret;
  807. cmd_sts = 0;
  808. l4i_chk = 0;
  809. if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
  810. if (net_ratelimit())
  811. netdev_err(dev, "tx queue full?!\n");
  812. return -EBUSY;
  813. }
  814. ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
  815. if (ret)
  816. return ret;
  817. cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
  818. tx_index = txq->tx_curr_desc++;
  819. if (txq->tx_curr_desc == txq->tx_ring_size)
  820. txq->tx_curr_desc = 0;
  821. desc = &txq->tx_desc_area[tx_index];
  822. txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
  823. if (nr_frags) {
  824. txq_submit_frag_skb(txq, skb);
  825. length = skb_headlen(skb);
  826. } else {
  827. cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
  828. length = skb->len;
  829. }
  830. desc->l4i_chk = l4i_chk;
  831. desc->byte_cnt = length;
  832. desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
  833. length, DMA_TO_DEVICE);
  834. __skb_queue_tail(&txq->tx_skb, skb);
  835. skb_tx_timestamp(skb);
  836. /* ensure all other descriptors are written before first cmd_sts */
  837. wmb();
  838. desc->cmd_sts = cmd_sts;
  839. /* clear TX_END status */
  840. mp->work_tx_end &= ~(1 << txq->index);
  841. /* ensure all descriptors are written before poking hardware */
  842. wmb();
  843. txq_enable(txq);
  844. txq->tx_desc_count += nr_frags + 1;
  845. return 0;
  846. }
  847. static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
  848. {
  849. struct mv643xx_eth_private *mp = netdev_priv(dev);
  850. int length, queue, ret;
  851. struct tx_queue *txq;
  852. struct netdev_queue *nq;
  853. queue = skb_get_queue_mapping(skb);
  854. txq = mp->txq + queue;
  855. nq = netdev_get_tx_queue(dev, queue);
  856. if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
  857. netdev_printk(KERN_DEBUG, dev,
  858. "failed to linearize skb with tiny unaligned fragment\n");
  859. return NETDEV_TX_BUSY;
  860. }
  861. length = skb->len;
  862. if (skb_is_gso(skb))
  863. ret = txq_submit_tso(txq, skb, dev);
  864. else
  865. ret = txq_submit_skb(txq, skb, dev);
  866. if (!ret) {
  867. txq->tx_bytes += length;
  868. txq->tx_packets++;
  869. if (txq->tx_desc_count >= txq->tx_stop_threshold)
  870. netif_tx_stop_queue(nq);
  871. } else {
  872. txq->tx_dropped++;
  873. dev_kfree_skb_any(skb);
  874. }
  875. return NETDEV_TX_OK;
  876. }
  877. /* tx napi ******************************************************************/
  878. static void txq_kick(struct tx_queue *txq)
  879. {
  880. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  881. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  882. u32 hw_desc_ptr;
  883. u32 expected_ptr;
  884. __netif_tx_lock(nq, smp_processor_id());
  885. if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
  886. goto out;
  887. hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
  888. expected_ptr = (u32)txq->tx_desc_dma +
  889. txq->tx_curr_desc * sizeof(struct tx_desc);
  890. if (hw_desc_ptr != expected_ptr)
  891. txq_enable(txq);
  892. out:
  893. __netif_tx_unlock(nq);
  894. mp->work_tx_end &= ~(1 << txq->index);
  895. }
  896. static int txq_reclaim(struct tx_queue *txq, int budget, int force)
  897. {
  898. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  899. struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
  900. int reclaimed;
  901. __netif_tx_lock_bh(nq);
  902. reclaimed = 0;
  903. while (reclaimed < budget && txq->tx_desc_count > 0) {
  904. int tx_index;
  905. struct tx_desc *desc;
  906. u32 cmd_sts;
  907. char desc_dma_map;
  908. tx_index = txq->tx_used_desc;
  909. desc = &txq->tx_desc_area[tx_index];
  910. desc_dma_map = txq->tx_desc_mapping[tx_index];
  911. cmd_sts = desc->cmd_sts;
  912. if (cmd_sts & BUFFER_OWNED_BY_DMA) {
  913. if (!force)
  914. break;
  915. desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
  916. }
  917. txq->tx_used_desc = tx_index + 1;
  918. if (txq->tx_used_desc == txq->tx_ring_size)
  919. txq->tx_used_desc = 0;
  920. reclaimed++;
  921. txq->tx_desc_count--;
  922. if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
  923. if (desc_dma_map == DESC_DMA_MAP_PAGE)
  924. dma_unmap_page(mp->dev->dev.parent,
  925. desc->buf_ptr,
  926. desc->byte_cnt,
  927. DMA_TO_DEVICE);
  928. else
  929. dma_unmap_single(mp->dev->dev.parent,
  930. desc->buf_ptr,
  931. desc->byte_cnt,
  932. DMA_TO_DEVICE);
  933. }
  934. if (cmd_sts & TX_ENABLE_INTERRUPT) {
  935. struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
  936. if (!WARN_ON(!skb))
  937. dev_consume_skb_any(skb);
  938. }
  939. if (cmd_sts & ERROR_SUMMARY) {
  940. netdev_info(mp->dev, "tx error\n");
  941. mp->dev->stats.tx_errors++;
  942. }
  943. }
  944. __netif_tx_unlock_bh(nq);
  945. if (reclaimed < budget)
  946. mp->work_tx &= ~(1 << txq->index);
  947. return reclaimed;
  948. }
  949. /* tx rate control **********************************************************/
  950. /*
  951. * Set total maximum TX rate (shared by all TX queues for this port)
  952. * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
  953. */
  954. static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
  955. {
  956. int token_rate;
  957. int mtu;
  958. int bucket_size;
  959. token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
  960. if (token_rate > 1023)
  961. token_rate = 1023;
  962. mtu = (mp->dev->mtu + 255) >> 8;
  963. if (mtu > 63)
  964. mtu = 63;
  965. bucket_size = (burst + 255) >> 8;
  966. if (bucket_size > 65535)
  967. bucket_size = 65535;
  968. switch (mp->shared->tx_bw_control) {
  969. case TX_BW_CONTROL_OLD_LAYOUT:
  970. wrlp(mp, TX_BW_RATE, token_rate);
  971. wrlp(mp, TX_BW_MTU, mtu);
  972. wrlp(mp, TX_BW_BURST, bucket_size);
  973. break;
  974. case TX_BW_CONTROL_NEW_LAYOUT:
  975. wrlp(mp, TX_BW_RATE_MOVED, token_rate);
  976. wrlp(mp, TX_BW_MTU_MOVED, mtu);
  977. wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
  978. break;
  979. }
  980. }
  981. static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
  982. {
  983. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  984. int token_rate;
  985. int bucket_size;
  986. token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
  987. if (token_rate > 1023)
  988. token_rate = 1023;
  989. bucket_size = (burst + 255) >> 8;
  990. if (bucket_size > 65535)
  991. bucket_size = 65535;
  992. wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
  993. wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
  994. }
  995. static void txq_set_fixed_prio_mode(struct tx_queue *txq)
  996. {
  997. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  998. int off;
  999. u32 val;
  1000. /*
  1001. * Turn on fixed priority mode.
  1002. */
  1003. off = 0;
  1004. switch (mp->shared->tx_bw_control) {
  1005. case TX_BW_CONTROL_OLD_LAYOUT:
  1006. off = TXQ_FIX_PRIO_CONF;
  1007. break;
  1008. case TX_BW_CONTROL_NEW_LAYOUT:
  1009. off = TXQ_FIX_PRIO_CONF_MOVED;
  1010. break;
  1011. }
  1012. if (off) {
  1013. val = rdlp(mp, off);
  1014. val |= 1 << txq->index;
  1015. wrlp(mp, off, val);
  1016. }
  1017. }
  1018. /* mii management interface *************************************************/
  1019. static void mv643xx_eth_adjust_link(struct net_device *dev)
  1020. {
  1021. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1022. u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  1023. u32 autoneg_disable = FORCE_LINK_PASS |
  1024. DISABLE_AUTO_NEG_SPEED_GMII |
  1025. DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
  1026. DISABLE_AUTO_NEG_FOR_DUPLEX;
  1027. if (dev->phydev->autoneg == AUTONEG_ENABLE) {
  1028. /* enable auto negotiation */
  1029. pscr &= ~autoneg_disable;
  1030. goto out_write;
  1031. }
  1032. pscr |= autoneg_disable;
  1033. if (dev->phydev->speed == SPEED_1000) {
  1034. /* force gigabit, half duplex not supported */
  1035. pscr |= SET_GMII_SPEED_TO_1000;
  1036. pscr |= SET_FULL_DUPLEX_MODE;
  1037. goto out_write;
  1038. }
  1039. pscr &= ~SET_GMII_SPEED_TO_1000;
  1040. if (dev->phydev->speed == SPEED_100)
  1041. pscr |= SET_MII_SPEED_TO_100;
  1042. else
  1043. pscr &= ~SET_MII_SPEED_TO_100;
  1044. if (dev->phydev->duplex == DUPLEX_FULL)
  1045. pscr |= SET_FULL_DUPLEX_MODE;
  1046. else
  1047. pscr &= ~SET_FULL_DUPLEX_MODE;
  1048. out_write:
  1049. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1050. }
  1051. /* statistics ***************************************************************/
  1052. static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
  1053. {
  1054. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1055. struct net_device_stats *stats = &dev->stats;
  1056. unsigned long tx_packets = 0;
  1057. unsigned long tx_bytes = 0;
  1058. unsigned long tx_dropped = 0;
  1059. int i;
  1060. for (i = 0; i < mp->txq_count; i++) {
  1061. struct tx_queue *txq = mp->txq + i;
  1062. tx_packets += txq->tx_packets;
  1063. tx_bytes += txq->tx_bytes;
  1064. tx_dropped += txq->tx_dropped;
  1065. }
  1066. stats->tx_packets = tx_packets;
  1067. stats->tx_bytes = tx_bytes;
  1068. stats->tx_dropped = tx_dropped;
  1069. return stats;
  1070. }
  1071. static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
  1072. {
  1073. return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
  1074. }
  1075. static void mib_counters_clear(struct mv643xx_eth_private *mp)
  1076. {
  1077. int i;
  1078. for (i = 0; i < 0x80; i += 4)
  1079. mib_read(mp, i);
  1080. /* Clear non MIB hw counters also */
  1081. rdlp(mp, RX_DISCARD_FRAME_CNT);
  1082. rdlp(mp, RX_OVERRUN_FRAME_CNT);
  1083. }
  1084. static void mib_counters_update(struct mv643xx_eth_private *mp)
  1085. {
  1086. struct mib_counters *p = &mp->mib_counters;
  1087. spin_lock_bh(&mp->mib_counters_lock);
  1088. p->good_octets_received += mib_read(mp, 0x00);
  1089. p->bad_octets_received += mib_read(mp, 0x08);
  1090. p->internal_mac_transmit_err += mib_read(mp, 0x0c);
  1091. p->good_frames_received += mib_read(mp, 0x10);
  1092. p->bad_frames_received += mib_read(mp, 0x14);
  1093. p->broadcast_frames_received += mib_read(mp, 0x18);
  1094. p->multicast_frames_received += mib_read(mp, 0x1c);
  1095. p->frames_64_octets += mib_read(mp, 0x20);
  1096. p->frames_65_to_127_octets += mib_read(mp, 0x24);
  1097. p->frames_128_to_255_octets += mib_read(mp, 0x28);
  1098. p->frames_256_to_511_octets += mib_read(mp, 0x2c);
  1099. p->frames_512_to_1023_octets += mib_read(mp, 0x30);
  1100. p->frames_1024_to_max_octets += mib_read(mp, 0x34);
  1101. p->good_octets_sent += mib_read(mp, 0x38);
  1102. p->good_frames_sent += mib_read(mp, 0x40);
  1103. p->excessive_collision += mib_read(mp, 0x44);
  1104. p->multicast_frames_sent += mib_read(mp, 0x48);
  1105. p->broadcast_frames_sent += mib_read(mp, 0x4c);
  1106. p->unrec_mac_control_received += mib_read(mp, 0x50);
  1107. p->fc_sent += mib_read(mp, 0x54);
  1108. p->good_fc_received += mib_read(mp, 0x58);
  1109. p->bad_fc_received += mib_read(mp, 0x5c);
  1110. p->undersize_received += mib_read(mp, 0x60);
  1111. p->fragments_received += mib_read(mp, 0x64);
  1112. p->oversize_received += mib_read(mp, 0x68);
  1113. p->jabber_received += mib_read(mp, 0x6c);
  1114. p->mac_receive_error += mib_read(mp, 0x70);
  1115. p->bad_crc_event += mib_read(mp, 0x74);
  1116. p->collision += mib_read(mp, 0x78);
  1117. p->late_collision += mib_read(mp, 0x7c);
  1118. /* Non MIB hardware counters */
  1119. p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
  1120. p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
  1121. spin_unlock_bh(&mp->mib_counters_lock);
  1122. }
  1123. static void mib_counters_timer_wrapper(struct timer_list *t)
  1124. {
  1125. struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
  1126. mib_counters_update(mp);
  1127. mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
  1128. }
  1129. /* interrupt coalescing *****************************************************/
  1130. /*
  1131. * Hardware coalescing parameters are set in units of 64 t_clk
  1132. * cycles. I.e.:
  1133. *
  1134. * coal_delay_in_usec = 64000000 * register_value / t_clk_rate
  1135. *
  1136. * register_value = coal_delay_in_usec * t_clk_rate / 64000000
  1137. *
  1138. * In the ->set*() methods, we round the computed register value
  1139. * to the nearest integer.
  1140. */
  1141. static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
  1142. {
  1143. u32 val = rdlp(mp, SDMA_CONFIG);
  1144. u64 temp;
  1145. if (mp->shared->extended_rx_coal_limit)
  1146. temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
  1147. else
  1148. temp = (val & 0x003fff00) >> 8;
  1149. temp *= 64000000;
  1150. temp += mp->t_clk / 2;
  1151. do_div(temp, mp->t_clk);
  1152. return (unsigned int)temp;
  1153. }
  1154. static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
  1155. {
  1156. u64 temp;
  1157. u32 val;
  1158. temp = (u64)usec * mp->t_clk;
  1159. temp += 31999999;
  1160. do_div(temp, 64000000);
  1161. val = rdlp(mp, SDMA_CONFIG);
  1162. if (mp->shared->extended_rx_coal_limit) {
  1163. if (temp > 0xffff)
  1164. temp = 0xffff;
  1165. val &= ~0x023fff80;
  1166. val |= (temp & 0x8000) << 10;
  1167. val |= (temp & 0x7fff) << 7;
  1168. } else {
  1169. if (temp > 0x3fff)
  1170. temp = 0x3fff;
  1171. val &= ~0x003fff00;
  1172. val |= (temp & 0x3fff) << 8;
  1173. }
  1174. wrlp(mp, SDMA_CONFIG, val);
  1175. }
  1176. static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
  1177. {
  1178. u64 temp;
  1179. temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
  1180. temp *= 64000000;
  1181. temp += mp->t_clk / 2;
  1182. do_div(temp, mp->t_clk);
  1183. return (unsigned int)temp;
  1184. }
  1185. static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
  1186. {
  1187. u64 temp;
  1188. temp = (u64)usec * mp->t_clk;
  1189. temp += 31999999;
  1190. do_div(temp, 64000000);
  1191. if (temp > 0x3fff)
  1192. temp = 0x3fff;
  1193. wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
  1194. }
  1195. /* ethtool ******************************************************************/
  1196. struct mv643xx_eth_stats {
  1197. char stat_string[ETH_GSTRING_LEN];
  1198. int sizeof_stat;
  1199. int netdev_off;
  1200. int mp_off;
  1201. };
  1202. #define SSTAT(m) \
  1203. { #m, FIELD_SIZEOF(struct net_device_stats, m), \
  1204. offsetof(struct net_device, stats.m), -1 }
  1205. #define MIBSTAT(m) \
  1206. { #m, FIELD_SIZEOF(struct mib_counters, m), \
  1207. -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
  1208. static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
  1209. SSTAT(rx_packets),
  1210. SSTAT(tx_packets),
  1211. SSTAT(rx_bytes),
  1212. SSTAT(tx_bytes),
  1213. SSTAT(rx_errors),
  1214. SSTAT(tx_errors),
  1215. SSTAT(rx_dropped),
  1216. SSTAT(tx_dropped),
  1217. MIBSTAT(good_octets_received),
  1218. MIBSTAT(bad_octets_received),
  1219. MIBSTAT(internal_mac_transmit_err),
  1220. MIBSTAT(good_frames_received),
  1221. MIBSTAT(bad_frames_received),
  1222. MIBSTAT(broadcast_frames_received),
  1223. MIBSTAT(multicast_frames_received),
  1224. MIBSTAT(frames_64_octets),
  1225. MIBSTAT(frames_65_to_127_octets),
  1226. MIBSTAT(frames_128_to_255_octets),
  1227. MIBSTAT(frames_256_to_511_octets),
  1228. MIBSTAT(frames_512_to_1023_octets),
  1229. MIBSTAT(frames_1024_to_max_octets),
  1230. MIBSTAT(good_octets_sent),
  1231. MIBSTAT(good_frames_sent),
  1232. MIBSTAT(excessive_collision),
  1233. MIBSTAT(multicast_frames_sent),
  1234. MIBSTAT(broadcast_frames_sent),
  1235. MIBSTAT(unrec_mac_control_received),
  1236. MIBSTAT(fc_sent),
  1237. MIBSTAT(good_fc_received),
  1238. MIBSTAT(bad_fc_received),
  1239. MIBSTAT(undersize_received),
  1240. MIBSTAT(fragments_received),
  1241. MIBSTAT(oversize_received),
  1242. MIBSTAT(jabber_received),
  1243. MIBSTAT(mac_receive_error),
  1244. MIBSTAT(bad_crc_event),
  1245. MIBSTAT(collision),
  1246. MIBSTAT(late_collision),
  1247. MIBSTAT(rx_discard),
  1248. MIBSTAT(rx_overrun),
  1249. };
  1250. static int
  1251. mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
  1252. struct ethtool_link_ksettings *cmd)
  1253. {
  1254. struct net_device *dev = mp->dev;
  1255. u32 supported, advertising;
  1256. phy_ethtool_ksettings_get(dev->phydev, cmd);
  1257. /*
  1258. * The MAC does not support 1000baseT_Half.
  1259. */
  1260. ethtool_convert_link_mode_to_legacy_u32(&supported,
  1261. cmd->link_modes.supported);
  1262. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  1263. cmd->link_modes.advertising);
  1264. supported &= ~SUPPORTED_1000baseT_Half;
  1265. advertising &= ~ADVERTISED_1000baseT_Half;
  1266. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  1267. supported);
  1268. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  1269. advertising);
  1270. return 0;
  1271. }
  1272. static int
  1273. mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp,
  1274. struct ethtool_link_ksettings *cmd)
  1275. {
  1276. u32 port_status;
  1277. u32 supported, advertising;
  1278. port_status = rdlp(mp, PORT_STATUS);
  1279. supported = SUPPORTED_MII;
  1280. advertising = ADVERTISED_MII;
  1281. switch (port_status & PORT_SPEED_MASK) {
  1282. case PORT_SPEED_10:
  1283. cmd->base.speed = SPEED_10;
  1284. break;
  1285. case PORT_SPEED_100:
  1286. cmd->base.speed = SPEED_100;
  1287. break;
  1288. case PORT_SPEED_1000:
  1289. cmd->base.speed = SPEED_1000;
  1290. break;
  1291. default:
  1292. cmd->base.speed = -1;
  1293. break;
  1294. }
  1295. cmd->base.duplex = (port_status & FULL_DUPLEX) ?
  1296. DUPLEX_FULL : DUPLEX_HALF;
  1297. cmd->base.port = PORT_MII;
  1298. cmd->base.phy_address = 0;
  1299. cmd->base.autoneg = AUTONEG_DISABLE;
  1300. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  1301. supported);
  1302. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  1303. advertising);
  1304. return 0;
  1305. }
  1306. static void
  1307. mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  1308. {
  1309. wol->supported = 0;
  1310. wol->wolopts = 0;
  1311. if (dev->phydev)
  1312. phy_ethtool_get_wol(dev->phydev, wol);
  1313. }
  1314. static int
  1315. mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  1316. {
  1317. int err;
  1318. if (!dev->phydev)
  1319. return -EOPNOTSUPP;
  1320. err = phy_ethtool_set_wol(dev->phydev, wol);
  1321. /* Given that mv643xx_eth works without the marvell-specific PHY driver,
  1322. * this debugging hint is useful to have.
  1323. */
  1324. if (err == -EOPNOTSUPP)
  1325. netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
  1326. return err;
  1327. }
  1328. static int
  1329. mv643xx_eth_get_link_ksettings(struct net_device *dev,
  1330. struct ethtool_link_ksettings *cmd)
  1331. {
  1332. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1333. if (dev->phydev)
  1334. return mv643xx_eth_get_link_ksettings_phy(mp, cmd);
  1335. else
  1336. return mv643xx_eth_get_link_ksettings_phyless(mp, cmd);
  1337. }
  1338. static int
  1339. mv643xx_eth_set_link_ksettings(struct net_device *dev,
  1340. const struct ethtool_link_ksettings *cmd)
  1341. {
  1342. struct ethtool_link_ksettings c = *cmd;
  1343. u32 advertising;
  1344. int ret;
  1345. if (!dev->phydev)
  1346. return -EINVAL;
  1347. /*
  1348. * The MAC does not support 1000baseT_Half.
  1349. */
  1350. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  1351. c.link_modes.advertising);
  1352. advertising &= ~ADVERTISED_1000baseT_Half;
  1353. ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising,
  1354. advertising);
  1355. ret = phy_ethtool_ksettings_set(dev->phydev, &c);
  1356. if (!ret)
  1357. mv643xx_eth_adjust_link(dev);
  1358. return ret;
  1359. }
  1360. static void mv643xx_eth_get_drvinfo(struct net_device *dev,
  1361. struct ethtool_drvinfo *drvinfo)
  1362. {
  1363. strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
  1364. sizeof(drvinfo->driver));
  1365. strlcpy(drvinfo->version, mv643xx_eth_driver_version,
  1366. sizeof(drvinfo->version));
  1367. strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
  1368. strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
  1369. }
  1370. static int
  1371. mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
  1372. {
  1373. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1374. ec->rx_coalesce_usecs = get_rx_coal(mp);
  1375. ec->tx_coalesce_usecs = get_tx_coal(mp);
  1376. return 0;
  1377. }
  1378. static int
  1379. mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
  1380. {
  1381. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1382. set_rx_coal(mp, ec->rx_coalesce_usecs);
  1383. set_tx_coal(mp, ec->tx_coalesce_usecs);
  1384. return 0;
  1385. }
  1386. static void
  1387. mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
  1388. {
  1389. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1390. er->rx_max_pending = 4096;
  1391. er->tx_max_pending = 4096;
  1392. er->rx_pending = mp->rx_ring_size;
  1393. er->tx_pending = mp->tx_ring_size;
  1394. }
  1395. static int
  1396. mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
  1397. {
  1398. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1399. if (er->rx_mini_pending || er->rx_jumbo_pending)
  1400. return -EINVAL;
  1401. mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
  1402. mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
  1403. MV643XX_MAX_SKB_DESCS * 2, 4096);
  1404. if (mp->tx_ring_size != er->tx_pending)
  1405. netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
  1406. mp->tx_ring_size, er->tx_pending);
  1407. if (netif_running(dev)) {
  1408. mv643xx_eth_stop(dev);
  1409. if (mv643xx_eth_open(dev)) {
  1410. netdev_err(dev,
  1411. "fatal error on re-opening device after ring param change\n");
  1412. return -ENOMEM;
  1413. }
  1414. }
  1415. return 0;
  1416. }
  1417. static int
  1418. mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
  1419. {
  1420. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1421. bool rx_csum = features & NETIF_F_RXCSUM;
  1422. wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
  1423. return 0;
  1424. }
  1425. static void mv643xx_eth_get_strings(struct net_device *dev,
  1426. uint32_t stringset, uint8_t *data)
  1427. {
  1428. int i;
  1429. if (stringset == ETH_SS_STATS) {
  1430. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1431. memcpy(data + i * ETH_GSTRING_LEN,
  1432. mv643xx_eth_stats[i].stat_string,
  1433. ETH_GSTRING_LEN);
  1434. }
  1435. }
  1436. }
  1437. static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
  1438. struct ethtool_stats *stats,
  1439. uint64_t *data)
  1440. {
  1441. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1442. int i;
  1443. mv643xx_eth_get_stats(dev);
  1444. mib_counters_update(mp);
  1445. for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
  1446. const struct mv643xx_eth_stats *stat;
  1447. void *p;
  1448. stat = mv643xx_eth_stats + i;
  1449. if (stat->netdev_off >= 0)
  1450. p = ((void *)mp->dev) + stat->netdev_off;
  1451. else
  1452. p = ((void *)mp) + stat->mp_off;
  1453. data[i] = (stat->sizeof_stat == 8) ?
  1454. *(uint64_t *)p : *(uint32_t *)p;
  1455. }
  1456. }
  1457. static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
  1458. {
  1459. if (sset == ETH_SS_STATS)
  1460. return ARRAY_SIZE(mv643xx_eth_stats);
  1461. return -EOPNOTSUPP;
  1462. }
  1463. static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
  1464. .get_drvinfo = mv643xx_eth_get_drvinfo,
  1465. .nway_reset = phy_ethtool_nway_reset,
  1466. .get_link = ethtool_op_get_link,
  1467. .get_coalesce = mv643xx_eth_get_coalesce,
  1468. .set_coalesce = mv643xx_eth_set_coalesce,
  1469. .get_ringparam = mv643xx_eth_get_ringparam,
  1470. .set_ringparam = mv643xx_eth_set_ringparam,
  1471. .get_strings = mv643xx_eth_get_strings,
  1472. .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
  1473. .get_sset_count = mv643xx_eth_get_sset_count,
  1474. .get_ts_info = ethtool_op_get_ts_info,
  1475. .get_wol = mv643xx_eth_get_wol,
  1476. .set_wol = mv643xx_eth_set_wol,
  1477. .get_link_ksettings = mv643xx_eth_get_link_ksettings,
  1478. .set_link_ksettings = mv643xx_eth_set_link_ksettings,
  1479. };
  1480. /* address handling *********************************************************/
  1481. static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
  1482. {
  1483. unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
  1484. unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
  1485. addr[0] = (mac_h >> 24) & 0xff;
  1486. addr[1] = (mac_h >> 16) & 0xff;
  1487. addr[2] = (mac_h >> 8) & 0xff;
  1488. addr[3] = mac_h & 0xff;
  1489. addr[4] = (mac_l >> 8) & 0xff;
  1490. addr[5] = mac_l & 0xff;
  1491. }
  1492. static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
  1493. {
  1494. wrlp(mp, MAC_ADDR_HIGH,
  1495. (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
  1496. wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
  1497. }
  1498. static u32 uc_addr_filter_mask(struct net_device *dev)
  1499. {
  1500. struct netdev_hw_addr *ha;
  1501. u32 nibbles;
  1502. if (dev->flags & IFF_PROMISC)
  1503. return 0;
  1504. nibbles = 1 << (dev->dev_addr[5] & 0x0f);
  1505. netdev_for_each_uc_addr(ha, dev) {
  1506. if (memcmp(dev->dev_addr, ha->addr, 5))
  1507. return 0;
  1508. if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
  1509. return 0;
  1510. nibbles |= 1 << (ha->addr[5] & 0x0f);
  1511. }
  1512. return nibbles;
  1513. }
  1514. static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
  1515. {
  1516. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1517. u32 port_config;
  1518. u32 nibbles;
  1519. int i;
  1520. uc_addr_set(mp, dev->dev_addr);
  1521. port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
  1522. nibbles = uc_addr_filter_mask(dev);
  1523. if (!nibbles) {
  1524. port_config |= UNICAST_PROMISCUOUS_MODE;
  1525. nibbles = 0xffff;
  1526. }
  1527. for (i = 0; i < 16; i += 4) {
  1528. int off = UNICAST_TABLE(mp->port_num) + i;
  1529. u32 v;
  1530. v = 0;
  1531. if (nibbles & 1)
  1532. v |= 0x00000001;
  1533. if (nibbles & 2)
  1534. v |= 0x00000100;
  1535. if (nibbles & 4)
  1536. v |= 0x00010000;
  1537. if (nibbles & 8)
  1538. v |= 0x01000000;
  1539. nibbles >>= 4;
  1540. wrl(mp, off, v);
  1541. }
  1542. wrlp(mp, PORT_CONFIG, port_config);
  1543. }
  1544. static int addr_crc(unsigned char *addr)
  1545. {
  1546. int crc = 0;
  1547. int i;
  1548. for (i = 0; i < 6; i++) {
  1549. int j;
  1550. crc = (crc ^ addr[i]) << 8;
  1551. for (j = 7; j >= 0; j--) {
  1552. if (crc & (0x100 << j))
  1553. crc ^= 0x107 << j;
  1554. }
  1555. }
  1556. return crc;
  1557. }
  1558. static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
  1559. {
  1560. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1561. u32 *mc_spec;
  1562. u32 *mc_other;
  1563. struct netdev_hw_addr *ha;
  1564. int i;
  1565. if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
  1566. goto promiscuous;
  1567. /* Allocate both mc_spec and mc_other tables */
  1568. mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC);
  1569. if (!mc_spec)
  1570. goto promiscuous;
  1571. mc_other = &mc_spec[64];
  1572. netdev_for_each_mc_addr(ha, dev) {
  1573. u8 *a = ha->addr;
  1574. u32 *table;
  1575. u8 entry;
  1576. if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
  1577. table = mc_spec;
  1578. entry = a[5];
  1579. } else {
  1580. table = mc_other;
  1581. entry = addr_crc(a);
  1582. }
  1583. table[entry >> 2] |= 1 << (8 * (entry & 3));
  1584. }
  1585. for (i = 0; i < 64; i++) {
  1586. wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
  1587. mc_spec[i]);
  1588. wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
  1589. mc_other[i]);
  1590. }
  1591. kfree(mc_spec);
  1592. return;
  1593. promiscuous:
  1594. for (i = 0; i < 64; i++) {
  1595. wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
  1596. 0x01010101u);
  1597. wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
  1598. 0x01010101u);
  1599. }
  1600. }
  1601. static void mv643xx_eth_set_rx_mode(struct net_device *dev)
  1602. {
  1603. mv643xx_eth_program_unicast_filter(dev);
  1604. mv643xx_eth_program_multicast_filter(dev);
  1605. }
  1606. static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
  1607. {
  1608. struct sockaddr *sa = addr;
  1609. if (!is_valid_ether_addr(sa->sa_data))
  1610. return -EADDRNOTAVAIL;
  1611. memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
  1612. netif_addr_lock_bh(dev);
  1613. mv643xx_eth_program_unicast_filter(dev);
  1614. netif_addr_unlock_bh(dev);
  1615. return 0;
  1616. }
  1617. /* rx/tx queue initialisation ***********************************************/
  1618. static int rxq_init(struct mv643xx_eth_private *mp, int index)
  1619. {
  1620. struct rx_queue *rxq = mp->rxq + index;
  1621. struct rx_desc *rx_desc;
  1622. int size;
  1623. int i;
  1624. rxq->index = index;
  1625. rxq->rx_ring_size = mp->rx_ring_size;
  1626. rxq->rx_desc_count = 0;
  1627. rxq->rx_curr_desc = 0;
  1628. rxq->rx_used_desc = 0;
  1629. size = rxq->rx_ring_size * sizeof(struct rx_desc);
  1630. if (index == 0 && size <= mp->rx_desc_sram_size) {
  1631. rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
  1632. mp->rx_desc_sram_size);
  1633. rxq->rx_desc_dma = mp->rx_desc_sram_addr;
  1634. } else {
  1635. rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
  1636. size, &rxq->rx_desc_dma,
  1637. GFP_KERNEL);
  1638. }
  1639. if (rxq->rx_desc_area == NULL) {
  1640. netdev_err(mp->dev,
  1641. "can't allocate rx ring (%d bytes)\n", size);
  1642. goto out;
  1643. }
  1644. memset(rxq->rx_desc_area, 0, size);
  1645. rxq->rx_desc_area_size = size;
  1646. rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
  1647. GFP_KERNEL);
  1648. if (rxq->rx_skb == NULL)
  1649. goto out_free;
  1650. rx_desc = rxq->rx_desc_area;
  1651. for (i = 0; i < rxq->rx_ring_size; i++) {
  1652. int nexti;
  1653. nexti = i + 1;
  1654. if (nexti == rxq->rx_ring_size)
  1655. nexti = 0;
  1656. rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
  1657. nexti * sizeof(struct rx_desc);
  1658. }
  1659. return 0;
  1660. out_free:
  1661. if (index == 0 && size <= mp->rx_desc_sram_size)
  1662. iounmap(rxq->rx_desc_area);
  1663. else
  1664. dma_free_coherent(mp->dev->dev.parent, size,
  1665. rxq->rx_desc_area,
  1666. rxq->rx_desc_dma);
  1667. out:
  1668. return -ENOMEM;
  1669. }
  1670. static void rxq_deinit(struct rx_queue *rxq)
  1671. {
  1672. struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
  1673. int i;
  1674. rxq_disable(rxq);
  1675. for (i = 0; i < rxq->rx_ring_size; i++) {
  1676. if (rxq->rx_skb[i]) {
  1677. dev_consume_skb_any(rxq->rx_skb[i]);
  1678. rxq->rx_desc_count--;
  1679. }
  1680. }
  1681. if (rxq->rx_desc_count) {
  1682. netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
  1683. rxq->rx_desc_count);
  1684. }
  1685. if (rxq->index == 0 &&
  1686. rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
  1687. iounmap(rxq->rx_desc_area);
  1688. else
  1689. dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
  1690. rxq->rx_desc_area, rxq->rx_desc_dma);
  1691. kfree(rxq->rx_skb);
  1692. }
  1693. static int txq_init(struct mv643xx_eth_private *mp, int index)
  1694. {
  1695. struct tx_queue *txq = mp->txq + index;
  1696. struct tx_desc *tx_desc;
  1697. int size;
  1698. int ret;
  1699. int i;
  1700. txq->index = index;
  1701. txq->tx_ring_size = mp->tx_ring_size;
  1702. /* A queue must always have room for at least one skb.
  1703. * Therefore, stop the queue when the free entries reaches
  1704. * the maximum number of descriptors per skb.
  1705. */
  1706. txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
  1707. txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
  1708. txq->tx_desc_count = 0;
  1709. txq->tx_curr_desc = 0;
  1710. txq->tx_used_desc = 0;
  1711. size = txq->tx_ring_size * sizeof(struct tx_desc);
  1712. if (index == 0 && size <= mp->tx_desc_sram_size) {
  1713. txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
  1714. mp->tx_desc_sram_size);
  1715. txq->tx_desc_dma = mp->tx_desc_sram_addr;
  1716. } else {
  1717. txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
  1718. size, &txq->tx_desc_dma,
  1719. GFP_KERNEL);
  1720. }
  1721. if (txq->tx_desc_area == NULL) {
  1722. netdev_err(mp->dev,
  1723. "can't allocate tx ring (%d bytes)\n", size);
  1724. return -ENOMEM;
  1725. }
  1726. memset(txq->tx_desc_area, 0, size);
  1727. txq->tx_desc_area_size = size;
  1728. tx_desc = txq->tx_desc_area;
  1729. for (i = 0; i < txq->tx_ring_size; i++) {
  1730. struct tx_desc *txd = tx_desc + i;
  1731. int nexti;
  1732. nexti = i + 1;
  1733. if (nexti == txq->tx_ring_size)
  1734. nexti = 0;
  1735. txd->cmd_sts = 0;
  1736. txd->next_desc_ptr = txq->tx_desc_dma +
  1737. nexti * sizeof(struct tx_desc);
  1738. }
  1739. txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
  1740. GFP_KERNEL);
  1741. if (!txq->tx_desc_mapping) {
  1742. ret = -ENOMEM;
  1743. goto err_free_desc_area;
  1744. }
  1745. /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
  1746. txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
  1747. txq->tx_ring_size * TSO_HEADER_SIZE,
  1748. &txq->tso_hdrs_dma, GFP_KERNEL);
  1749. if (txq->tso_hdrs == NULL) {
  1750. ret = -ENOMEM;
  1751. goto err_free_desc_mapping;
  1752. }
  1753. skb_queue_head_init(&txq->tx_skb);
  1754. return 0;
  1755. err_free_desc_mapping:
  1756. kfree(txq->tx_desc_mapping);
  1757. err_free_desc_area:
  1758. if (index == 0 && size <= mp->tx_desc_sram_size)
  1759. iounmap(txq->tx_desc_area);
  1760. else
  1761. dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
  1762. txq->tx_desc_area, txq->tx_desc_dma);
  1763. return ret;
  1764. }
  1765. static void txq_deinit(struct tx_queue *txq)
  1766. {
  1767. struct mv643xx_eth_private *mp = txq_to_mp(txq);
  1768. txq_disable(txq);
  1769. txq_reclaim(txq, txq->tx_ring_size, 1);
  1770. BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
  1771. if (txq->index == 0 &&
  1772. txq->tx_desc_area_size <= mp->tx_desc_sram_size)
  1773. iounmap(txq->tx_desc_area);
  1774. else
  1775. dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
  1776. txq->tx_desc_area, txq->tx_desc_dma);
  1777. kfree(txq->tx_desc_mapping);
  1778. if (txq->tso_hdrs)
  1779. dma_free_coherent(mp->dev->dev.parent,
  1780. txq->tx_ring_size * TSO_HEADER_SIZE,
  1781. txq->tso_hdrs, txq->tso_hdrs_dma);
  1782. }
  1783. /* netdev ops and related ***************************************************/
  1784. static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
  1785. {
  1786. u32 int_cause;
  1787. u32 int_cause_ext;
  1788. int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
  1789. if (int_cause == 0)
  1790. return 0;
  1791. int_cause_ext = 0;
  1792. if (int_cause & INT_EXT) {
  1793. int_cause &= ~INT_EXT;
  1794. int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
  1795. }
  1796. if (int_cause) {
  1797. wrlp(mp, INT_CAUSE, ~int_cause);
  1798. mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
  1799. ~(rdlp(mp, TXQ_COMMAND) & 0xff);
  1800. mp->work_rx |= (int_cause & INT_RX) >> 2;
  1801. }
  1802. int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
  1803. if (int_cause_ext) {
  1804. wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
  1805. if (int_cause_ext & INT_EXT_LINK_PHY)
  1806. mp->work_link = 1;
  1807. mp->work_tx |= int_cause_ext & INT_EXT_TX;
  1808. }
  1809. return 1;
  1810. }
  1811. static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
  1812. {
  1813. struct net_device *dev = (struct net_device *)dev_id;
  1814. struct mv643xx_eth_private *mp = netdev_priv(dev);
  1815. if (unlikely(!mv643xx_eth_collect_events(mp)))
  1816. return IRQ_NONE;
  1817. wrlp(mp, INT_MASK, 0);
  1818. napi_schedule(&mp->napi);
  1819. return IRQ_HANDLED;
  1820. }
  1821. static void handle_link_event(struct mv643xx_eth_private *mp)
  1822. {
  1823. struct net_device *dev = mp->dev;
  1824. u32 port_status;
  1825. int speed;
  1826. int duplex;
  1827. int fc;
  1828. port_status = rdlp(mp, PORT_STATUS);
  1829. if (!(port_status & LINK_UP)) {
  1830. if (netif_carrier_ok(dev)) {
  1831. int i;
  1832. netdev_info(dev, "link down\n");
  1833. netif_carrier_off(dev);
  1834. for (i = 0; i < mp->txq_count; i++) {
  1835. struct tx_queue *txq = mp->txq + i;
  1836. txq_reclaim(txq, txq->tx_ring_size, 1);
  1837. txq_reset_hw_ptr(txq);
  1838. }
  1839. }
  1840. return;
  1841. }
  1842. switch (port_status & PORT_SPEED_MASK) {
  1843. case PORT_SPEED_10:
  1844. speed = 10;
  1845. break;
  1846. case PORT_SPEED_100:
  1847. speed = 100;
  1848. break;
  1849. case PORT_SPEED_1000:
  1850. speed = 1000;
  1851. break;
  1852. default:
  1853. speed = -1;
  1854. break;
  1855. }
  1856. duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
  1857. fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
  1858. netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
  1859. speed, duplex ? "full" : "half", fc ? "en" : "dis");
  1860. if (!netif_carrier_ok(dev))
  1861. netif_carrier_on(dev);
  1862. }
  1863. static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
  1864. {
  1865. struct mv643xx_eth_private *mp;
  1866. int work_done;
  1867. mp = container_of(napi, struct mv643xx_eth_private, napi);
  1868. if (unlikely(mp->oom)) {
  1869. mp->oom = 0;
  1870. del_timer(&mp->rx_oom);
  1871. }
  1872. work_done = 0;
  1873. while (work_done < budget) {
  1874. u8 queue_mask;
  1875. int queue;
  1876. int work_tbd;
  1877. if (mp->work_link) {
  1878. mp->work_link = 0;
  1879. handle_link_event(mp);
  1880. work_done++;
  1881. continue;
  1882. }
  1883. queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
  1884. if (likely(!mp->oom))
  1885. queue_mask |= mp->work_rx_refill;
  1886. if (!queue_mask) {
  1887. if (mv643xx_eth_collect_events(mp))
  1888. continue;
  1889. break;
  1890. }
  1891. queue = fls(queue_mask) - 1;
  1892. queue_mask = 1 << queue;
  1893. work_tbd = budget - work_done;
  1894. if (work_tbd > 16)
  1895. work_tbd = 16;
  1896. if (mp->work_tx_end & queue_mask) {
  1897. txq_kick(mp->txq + queue);
  1898. } else if (mp->work_tx & queue_mask) {
  1899. work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
  1900. txq_maybe_wake(mp->txq + queue);
  1901. } else if (mp->work_rx & queue_mask) {
  1902. work_done += rxq_process(mp->rxq + queue, work_tbd);
  1903. } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
  1904. work_done += rxq_refill(mp->rxq + queue, work_tbd);
  1905. } else {
  1906. BUG();
  1907. }
  1908. }
  1909. if (work_done < budget) {
  1910. if (mp->oom)
  1911. mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
  1912. napi_complete_done(napi, work_done);
  1913. wrlp(mp, INT_MASK, mp->int_mask);
  1914. }
  1915. return work_done;
  1916. }
  1917. static inline void oom_timer_wrapper(struct timer_list *t)
  1918. {
  1919. struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
  1920. napi_schedule(&mp->napi);
  1921. }
  1922. static void port_start(struct mv643xx_eth_private *mp)
  1923. {
  1924. struct net_device *dev = mp->dev;
  1925. u32 pscr;
  1926. int i;
  1927. /*
  1928. * Perform PHY reset, if there is a PHY.
  1929. */
  1930. if (dev->phydev) {
  1931. struct ethtool_link_ksettings cmd;
  1932. mv643xx_eth_get_link_ksettings(dev, &cmd);
  1933. phy_init_hw(dev->phydev);
  1934. mv643xx_eth_set_link_ksettings(
  1935. dev, (const struct ethtool_link_ksettings *)&cmd);
  1936. phy_start(dev->phydev);
  1937. }
  1938. /*
  1939. * Configure basic link parameters.
  1940. */
  1941. pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  1942. pscr |= SERIAL_PORT_ENABLE;
  1943. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1944. pscr |= DO_NOT_FORCE_LINK_FAIL;
  1945. if (!dev->phydev)
  1946. pscr |= FORCE_LINK_PASS;
  1947. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  1948. /*
  1949. * Configure TX path and queues.
  1950. */
  1951. tx_set_rate(mp, 1000000000, 16777216);
  1952. for (i = 0; i < mp->txq_count; i++) {
  1953. struct tx_queue *txq = mp->txq + i;
  1954. txq_reset_hw_ptr(txq);
  1955. txq_set_rate(txq, 1000000000, 16777216);
  1956. txq_set_fixed_prio_mode(txq);
  1957. }
  1958. /*
  1959. * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
  1960. * frames to RX queue #0, and include the pseudo-header when
  1961. * calculating receive checksums.
  1962. */
  1963. mv643xx_eth_set_features(mp->dev, mp->dev->features);
  1964. /*
  1965. * Treat BPDUs as normal multicasts, and disable partition mode.
  1966. */
  1967. wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
  1968. /*
  1969. * Add configured unicast addresses to address filter table.
  1970. */
  1971. mv643xx_eth_program_unicast_filter(mp->dev);
  1972. /*
  1973. * Enable the receive queues.
  1974. */
  1975. for (i = 0; i < mp->rxq_count; i++) {
  1976. struct rx_queue *rxq = mp->rxq + i;
  1977. u32 addr;
  1978. addr = (u32)rxq->rx_desc_dma;
  1979. addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
  1980. wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
  1981. rxq_enable(rxq);
  1982. }
  1983. }
  1984. static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
  1985. {
  1986. int skb_size;
  1987. /*
  1988. * Reserve 2+14 bytes for an ethernet header (the hardware
  1989. * automatically prepends 2 bytes of dummy data to each
  1990. * received packet), 16 bytes for up to four VLAN tags, and
  1991. * 4 bytes for the trailing FCS -- 36 bytes total.
  1992. */
  1993. skb_size = mp->dev->mtu + 36;
  1994. /*
  1995. * Make sure that the skb size is a multiple of 8 bytes, as
  1996. * the lower three bits of the receive descriptor's buffer
  1997. * size field are ignored by the hardware.
  1998. */
  1999. mp->skb_size = (skb_size + 7) & ~7;
  2000. /*
  2001. * If NET_SKB_PAD is smaller than a cache line,
  2002. * netdev_alloc_skb() will cause skb->data to be misaligned
  2003. * to a cache line boundary. If this is the case, include
  2004. * some extra space to allow re-aligning the data area.
  2005. */
  2006. mp->skb_size += SKB_DMA_REALIGN;
  2007. }
  2008. static int mv643xx_eth_open(struct net_device *dev)
  2009. {
  2010. struct mv643xx_eth_private *mp = netdev_priv(dev);
  2011. int err;
  2012. int i;
  2013. wrlp(mp, INT_CAUSE, 0);
  2014. wrlp(mp, INT_CAUSE_EXT, 0);
  2015. rdlp(mp, INT_CAUSE_EXT);
  2016. err = request_irq(dev->irq, mv643xx_eth_irq,
  2017. IRQF_SHARED, dev->name, dev);
  2018. if (err) {
  2019. netdev_err(dev, "can't assign irq\n");
  2020. return -EAGAIN;
  2021. }
  2022. mv643xx_eth_recalc_skb_size(mp);
  2023. napi_enable(&mp->napi);
  2024. mp->int_mask = INT_EXT;
  2025. for (i = 0; i < mp->rxq_count; i++) {
  2026. err = rxq_init(mp, i);
  2027. if (err) {
  2028. while (--i >= 0)
  2029. rxq_deinit(mp->rxq + i);
  2030. goto out;
  2031. }
  2032. rxq_refill(mp->rxq + i, INT_MAX);
  2033. mp->int_mask |= INT_RX_0 << i;
  2034. }
  2035. if (mp->oom) {
  2036. mp->rx_oom.expires = jiffies + (HZ / 10);
  2037. add_timer(&mp->rx_oom);
  2038. }
  2039. for (i = 0; i < mp->txq_count; i++) {
  2040. err = txq_init(mp, i);
  2041. if (err) {
  2042. while (--i >= 0)
  2043. txq_deinit(mp->txq + i);
  2044. goto out_free;
  2045. }
  2046. mp->int_mask |= INT_TX_END_0 << i;
  2047. }
  2048. add_timer(&mp->mib_counters_timer);
  2049. port_start(mp);
  2050. wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
  2051. wrlp(mp, INT_MASK, mp->int_mask);
  2052. return 0;
  2053. out_free:
  2054. for (i = 0; i < mp->rxq_count; i++)
  2055. rxq_deinit(mp->rxq + i);
  2056. out:
  2057. free_irq(dev->irq, dev);
  2058. return err;
  2059. }
  2060. static void port_reset(struct mv643xx_eth_private *mp)
  2061. {
  2062. unsigned int data;
  2063. int i;
  2064. for (i = 0; i < mp->rxq_count; i++)
  2065. rxq_disable(mp->rxq + i);
  2066. for (i = 0; i < mp->txq_count; i++)
  2067. txq_disable(mp->txq + i);
  2068. while (1) {
  2069. u32 ps = rdlp(mp, PORT_STATUS);
  2070. if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
  2071. break;
  2072. udelay(10);
  2073. }
  2074. /* Reset the Enable bit in the Configuration Register */
  2075. data = rdlp(mp, PORT_SERIAL_CONTROL);
  2076. data &= ~(SERIAL_PORT_ENABLE |
  2077. DO_NOT_FORCE_LINK_FAIL |
  2078. FORCE_LINK_PASS);
  2079. wrlp(mp, PORT_SERIAL_CONTROL, data);
  2080. }
  2081. static int mv643xx_eth_stop(struct net_device *dev)
  2082. {
  2083. struct mv643xx_eth_private *mp = netdev_priv(dev);
  2084. int i;
  2085. wrlp(mp, INT_MASK_EXT, 0x00000000);
  2086. wrlp(mp, INT_MASK, 0x00000000);
  2087. rdlp(mp, INT_MASK);
  2088. napi_disable(&mp->napi);
  2089. del_timer_sync(&mp->rx_oom);
  2090. netif_carrier_off(dev);
  2091. if (dev->phydev)
  2092. phy_stop(dev->phydev);
  2093. free_irq(dev->irq, dev);
  2094. port_reset(mp);
  2095. mv643xx_eth_get_stats(dev);
  2096. mib_counters_update(mp);
  2097. del_timer_sync(&mp->mib_counters_timer);
  2098. for (i = 0; i < mp->rxq_count; i++)
  2099. rxq_deinit(mp->rxq + i);
  2100. for (i = 0; i < mp->txq_count; i++)
  2101. txq_deinit(mp->txq + i);
  2102. return 0;
  2103. }
  2104. static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  2105. {
  2106. int ret;
  2107. if (!dev->phydev)
  2108. return -ENOTSUPP;
  2109. ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
  2110. if (!ret)
  2111. mv643xx_eth_adjust_link(dev);
  2112. return ret;
  2113. }
  2114. static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
  2115. {
  2116. struct mv643xx_eth_private *mp = netdev_priv(dev);
  2117. dev->mtu = new_mtu;
  2118. mv643xx_eth_recalc_skb_size(mp);
  2119. tx_set_rate(mp, 1000000000, 16777216);
  2120. if (!netif_running(dev))
  2121. return 0;
  2122. /*
  2123. * Stop and then re-open the interface. This will allocate RX
  2124. * skbs of the new MTU.
  2125. * There is a possible danger that the open will not succeed,
  2126. * due to memory being full.
  2127. */
  2128. mv643xx_eth_stop(dev);
  2129. if (mv643xx_eth_open(dev)) {
  2130. netdev_err(dev,
  2131. "fatal error on re-opening device after MTU change\n");
  2132. }
  2133. return 0;
  2134. }
  2135. static void tx_timeout_task(struct work_struct *ugly)
  2136. {
  2137. struct mv643xx_eth_private *mp;
  2138. mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
  2139. if (netif_running(mp->dev)) {
  2140. netif_tx_stop_all_queues(mp->dev);
  2141. port_reset(mp);
  2142. port_start(mp);
  2143. netif_tx_wake_all_queues(mp->dev);
  2144. }
  2145. }
  2146. static void mv643xx_eth_tx_timeout(struct net_device *dev)
  2147. {
  2148. struct mv643xx_eth_private *mp = netdev_priv(dev);
  2149. netdev_info(dev, "tx timeout\n");
  2150. schedule_work(&mp->tx_timeout_task);
  2151. }
  2152. #ifdef CONFIG_NET_POLL_CONTROLLER
  2153. static void mv643xx_eth_netpoll(struct net_device *dev)
  2154. {
  2155. struct mv643xx_eth_private *mp = netdev_priv(dev);
  2156. wrlp(mp, INT_MASK, 0x00000000);
  2157. rdlp(mp, INT_MASK);
  2158. mv643xx_eth_irq(dev->irq, dev);
  2159. wrlp(mp, INT_MASK, mp->int_mask);
  2160. }
  2161. #endif
  2162. /* platform glue ************************************************************/
  2163. static void
  2164. mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
  2165. const struct mbus_dram_target_info *dram)
  2166. {
  2167. void __iomem *base = msp->base;
  2168. u32 win_enable;
  2169. u32 win_protect;
  2170. int i;
  2171. for (i = 0; i < 6; i++) {
  2172. writel(0, base + WINDOW_BASE(i));
  2173. writel(0, base + WINDOW_SIZE(i));
  2174. if (i < 4)
  2175. writel(0, base + WINDOW_REMAP_HIGH(i));
  2176. }
  2177. win_enable = 0x3f;
  2178. win_protect = 0;
  2179. for (i = 0; i < dram->num_cs; i++) {
  2180. const struct mbus_dram_window *cs = dram->cs + i;
  2181. writel((cs->base & 0xffff0000) |
  2182. (cs->mbus_attr << 8) |
  2183. dram->mbus_dram_target_id, base + WINDOW_BASE(i));
  2184. writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
  2185. win_enable &= ~(1 << i);
  2186. win_protect |= 3 << (2 * i);
  2187. }
  2188. writel(win_enable, base + WINDOW_BAR_ENABLE);
  2189. msp->win_protect = win_protect;
  2190. }
  2191. static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
  2192. {
  2193. /*
  2194. * Check whether we have a 14-bit coal limit field in bits
  2195. * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
  2196. * SDMA config register.
  2197. */
  2198. writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
  2199. if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
  2200. msp->extended_rx_coal_limit = 1;
  2201. else
  2202. msp->extended_rx_coal_limit = 0;
  2203. /*
  2204. * Check whether the MAC supports TX rate control, and if
  2205. * yes, whether its associated registers are in the old or
  2206. * the new place.
  2207. */
  2208. writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
  2209. if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
  2210. msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
  2211. } else {
  2212. writel(7, msp->base + 0x0400 + TX_BW_RATE);
  2213. if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
  2214. msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
  2215. else
  2216. msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
  2217. }
  2218. }
  2219. #if defined(CONFIG_OF)
  2220. static const struct of_device_id mv643xx_eth_shared_ids[] = {
  2221. { .compatible = "marvell,orion-eth", },
  2222. { .compatible = "marvell,kirkwood-eth", },
  2223. { }
  2224. };
  2225. MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
  2226. #endif
  2227. #if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60)
  2228. #define mv643xx_eth_property(_np, _name, _v) \
  2229. do { \
  2230. u32 tmp; \
  2231. if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \
  2232. _v = tmp; \
  2233. } while (0)
  2234. static struct platform_device *port_platdev[3];
  2235. static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
  2236. struct device_node *pnp)
  2237. {
  2238. struct platform_device *ppdev;
  2239. struct mv643xx_eth_platform_data ppd;
  2240. struct resource res;
  2241. const char *mac_addr;
  2242. int ret;
  2243. int dev_num = 0;
  2244. memset(&ppd, 0, sizeof(ppd));
  2245. ppd.shared = pdev;
  2246. memset(&res, 0, sizeof(res));
  2247. if (of_irq_to_resource(pnp, 0, &res) <= 0) {
  2248. dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
  2249. return -EINVAL;
  2250. }
  2251. if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
  2252. dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name);
  2253. return -EINVAL;
  2254. }
  2255. if (ppd.port_number >= 3) {
  2256. dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name);
  2257. return -EINVAL;
  2258. }
  2259. while (dev_num < 3 && port_platdev[dev_num])
  2260. dev_num++;
  2261. if (dev_num == 3) {
  2262. dev_err(&pdev->dev, "too many ports registered\n");
  2263. return -EINVAL;
  2264. }
  2265. mac_addr = of_get_mac_address(pnp);
  2266. if (mac_addr)
  2267. memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
  2268. mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
  2269. mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
  2270. mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
  2271. mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
  2272. mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
  2273. mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
  2274. ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
  2275. if (!ppd.phy_node) {
  2276. ppd.phy_addr = MV643XX_ETH_PHY_NONE;
  2277. of_property_read_u32(pnp, "speed", &ppd.speed);
  2278. of_property_read_u32(pnp, "duplex", &ppd.duplex);
  2279. }
  2280. ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
  2281. if (!ppdev)
  2282. return -ENOMEM;
  2283. ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  2284. ppdev->dev.of_node = pnp;
  2285. ret = platform_device_add_resources(ppdev, &res, 1);
  2286. if (ret)
  2287. goto port_err;
  2288. ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
  2289. if (ret)
  2290. goto port_err;
  2291. ret = platform_device_add(ppdev);
  2292. if (ret)
  2293. goto port_err;
  2294. port_platdev[dev_num] = ppdev;
  2295. return 0;
  2296. port_err:
  2297. platform_device_put(ppdev);
  2298. return ret;
  2299. }
  2300. static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
  2301. {
  2302. struct mv643xx_eth_shared_platform_data *pd;
  2303. struct device_node *pnp, *np = pdev->dev.of_node;
  2304. int ret;
  2305. /* bail out if not registered from DT */
  2306. if (!np)
  2307. return 0;
  2308. pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
  2309. if (!pd)
  2310. return -ENOMEM;
  2311. pdev->dev.platform_data = pd;
  2312. mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
  2313. for_each_available_child_of_node(np, pnp) {
  2314. ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
  2315. if (ret) {
  2316. of_node_put(pnp);
  2317. return ret;
  2318. }
  2319. }
  2320. return 0;
  2321. }
  2322. static void mv643xx_eth_shared_of_remove(void)
  2323. {
  2324. int n;
  2325. for (n = 0; n < 3; n++) {
  2326. platform_device_del(port_platdev[n]);
  2327. port_platdev[n] = NULL;
  2328. }
  2329. }
  2330. #else
  2331. static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
  2332. {
  2333. return 0;
  2334. }
  2335. static inline void mv643xx_eth_shared_of_remove(void)
  2336. {
  2337. }
  2338. #endif
  2339. static int mv643xx_eth_shared_probe(struct platform_device *pdev)
  2340. {
  2341. static int mv643xx_eth_version_printed;
  2342. struct mv643xx_eth_shared_platform_data *pd;
  2343. struct mv643xx_eth_shared_private *msp;
  2344. const struct mbus_dram_target_info *dram;
  2345. struct resource *res;
  2346. int ret;
  2347. if (!mv643xx_eth_version_printed++)
  2348. pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
  2349. mv643xx_eth_driver_version);
  2350. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2351. if (res == NULL)
  2352. return -EINVAL;
  2353. msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
  2354. if (msp == NULL)
  2355. return -ENOMEM;
  2356. platform_set_drvdata(pdev, msp);
  2357. msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
  2358. if (msp->base == NULL)
  2359. return -ENOMEM;
  2360. msp->clk = devm_clk_get(&pdev->dev, NULL);
  2361. if (!IS_ERR(msp->clk))
  2362. clk_prepare_enable(msp->clk);
  2363. /*
  2364. * (Re-)program MBUS remapping windows if we are asked to.
  2365. */
  2366. dram = mv_mbus_dram_info();
  2367. if (dram)
  2368. mv643xx_eth_conf_mbus_windows(msp, dram);
  2369. ret = mv643xx_eth_shared_of_probe(pdev);
  2370. if (ret)
  2371. goto err_put_clk;
  2372. pd = dev_get_platdata(&pdev->dev);
  2373. msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
  2374. pd->tx_csum_limit : 9 * 1024;
  2375. infer_hw_params(msp);
  2376. return 0;
  2377. err_put_clk:
  2378. if (!IS_ERR(msp->clk))
  2379. clk_disable_unprepare(msp->clk);
  2380. return ret;
  2381. }
  2382. static int mv643xx_eth_shared_remove(struct platform_device *pdev)
  2383. {
  2384. struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
  2385. mv643xx_eth_shared_of_remove();
  2386. if (!IS_ERR(msp->clk))
  2387. clk_disable_unprepare(msp->clk);
  2388. return 0;
  2389. }
  2390. static struct platform_driver mv643xx_eth_shared_driver = {
  2391. .probe = mv643xx_eth_shared_probe,
  2392. .remove = mv643xx_eth_shared_remove,
  2393. .driver = {
  2394. .name = MV643XX_ETH_SHARED_NAME,
  2395. .of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
  2396. },
  2397. };
  2398. static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
  2399. {
  2400. int addr_shift = 5 * mp->port_num;
  2401. u32 data;
  2402. data = rdl(mp, PHY_ADDR);
  2403. data &= ~(0x1f << addr_shift);
  2404. data |= (phy_addr & 0x1f) << addr_shift;
  2405. wrl(mp, PHY_ADDR, data);
  2406. }
  2407. static int phy_addr_get(struct mv643xx_eth_private *mp)
  2408. {
  2409. unsigned int data;
  2410. data = rdl(mp, PHY_ADDR);
  2411. return (data >> (5 * mp->port_num)) & 0x1f;
  2412. }
  2413. static void set_params(struct mv643xx_eth_private *mp,
  2414. struct mv643xx_eth_platform_data *pd)
  2415. {
  2416. struct net_device *dev = mp->dev;
  2417. unsigned int tx_ring_size;
  2418. if (is_valid_ether_addr(pd->mac_addr))
  2419. memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
  2420. else
  2421. uc_addr_get(mp, dev->dev_addr);
  2422. mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
  2423. if (pd->rx_queue_size)
  2424. mp->rx_ring_size = pd->rx_queue_size;
  2425. mp->rx_desc_sram_addr = pd->rx_sram_addr;
  2426. mp->rx_desc_sram_size = pd->rx_sram_size;
  2427. mp->rxq_count = pd->rx_queue_count ? : 1;
  2428. tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
  2429. if (pd->tx_queue_size)
  2430. tx_ring_size = pd->tx_queue_size;
  2431. mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
  2432. MV643XX_MAX_SKB_DESCS * 2, 4096);
  2433. if (mp->tx_ring_size != tx_ring_size)
  2434. netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
  2435. mp->tx_ring_size, tx_ring_size);
  2436. mp->tx_desc_sram_addr = pd->tx_sram_addr;
  2437. mp->tx_desc_sram_size = pd->tx_sram_size;
  2438. mp->txq_count = pd->tx_queue_count ? : 1;
  2439. }
  2440. static int get_phy_mode(struct mv643xx_eth_private *mp)
  2441. {
  2442. struct device *dev = mp->dev->dev.parent;
  2443. int iface = -1;
  2444. if (dev->of_node)
  2445. iface = of_get_phy_mode(dev->of_node);
  2446. /* Historical default if unspecified. We could also read/write
  2447. * the interface state in the PSC1
  2448. */
  2449. if (iface < 0)
  2450. iface = PHY_INTERFACE_MODE_GMII;
  2451. return iface;
  2452. }
  2453. static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
  2454. int phy_addr)
  2455. {
  2456. struct phy_device *phydev;
  2457. int start;
  2458. int num;
  2459. int i;
  2460. char phy_id[MII_BUS_ID_SIZE + 3];
  2461. if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
  2462. start = phy_addr_get(mp) & 0x1f;
  2463. num = 32;
  2464. } else {
  2465. start = phy_addr & 0x1f;
  2466. num = 1;
  2467. }
  2468. /* Attempt to connect to the PHY using orion-mdio */
  2469. phydev = ERR_PTR(-ENODEV);
  2470. for (i = 0; i < num; i++) {
  2471. int addr = (start + i) & 0x1f;
  2472. snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
  2473. "orion-mdio-mii", addr);
  2474. phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
  2475. get_phy_mode(mp));
  2476. if (!IS_ERR(phydev)) {
  2477. phy_addr_set(mp, addr);
  2478. break;
  2479. }
  2480. }
  2481. return phydev;
  2482. }
  2483. static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
  2484. {
  2485. struct net_device *dev = mp->dev;
  2486. struct phy_device *phy = dev->phydev;
  2487. if (speed == 0) {
  2488. phy->autoneg = AUTONEG_ENABLE;
  2489. phy->speed = 0;
  2490. phy->duplex = 0;
  2491. phy->advertising = phy->supported | ADVERTISED_Autoneg;
  2492. } else {
  2493. phy->autoneg = AUTONEG_DISABLE;
  2494. phy->advertising = 0;
  2495. phy->speed = speed;
  2496. phy->duplex = duplex;
  2497. }
  2498. phy_start_aneg(phy);
  2499. }
  2500. static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
  2501. {
  2502. struct net_device *dev = mp->dev;
  2503. u32 pscr;
  2504. pscr = rdlp(mp, PORT_SERIAL_CONTROL);
  2505. if (pscr & SERIAL_PORT_ENABLE) {
  2506. pscr &= ~SERIAL_PORT_ENABLE;
  2507. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  2508. }
  2509. pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
  2510. if (!dev->phydev) {
  2511. pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
  2512. if (speed == SPEED_1000)
  2513. pscr |= SET_GMII_SPEED_TO_1000;
  2514. else if (speed == SPEED_100)
  2515. pscr |= SET_MII_SPEED_TO_100;
  2516. pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
  2517. pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
  2518. if (duplex == DUPLEX_FULL)
  2519. pscr |= SET_FULL_DUPLEX_MODE;
  2520. }
  2521. wrlp(mp, PORT_SERIAL_CONTROL, pscr);
  2522. }
  2523. static const struct net_device_ops mv643xx_eth_netdev_ops = {
  2524. .ndo_open = mv643xx_eth_open,
  2525. .ndo_stop = mv643xx_eth_stop,
  2526. .ndo_start_xmit = mv643xx_eth_xmit,
  2527. .ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
  2528. .ndo_set_mac_address = mv643xx_eth_set_mac_address,
  2529. .ndo_validate_addr = eth_validate_addr,
  2530. .ndo_do_ioctl = mv643xx_eth_ioctl,
  2531. .ndo_change_mtu = mv643xx_eth_change_mtu,
  2532. .ndo_set_features = mv643xx_eth_set_features,
  2533. .ndo_tx_timeout = mv643xx_eth_tx_timeout,
  2534. .ndo_get_stats = mv643xx_eth_get_stats,
  2535. #ifdef CONFIG_NET_POLL_CONTROLLER
  2536. .ndo_poll_controller = mv643xx_eth_netpoll,
  2537. #endif
  2538. };
  2539. static int mv643xx_eth_probe(struct platform_device *pdev)
  2540. {
  2541. struct mv643xx_eth_platform_data *pd;
  2542. struct mv643xx_eth_private *mp;
  2543. struct net_device *dev;
  2544. struct phy_device *phydev = NULL;
  2545. struct resource *res;
  2546. int err;
  2547. pd = dev_get_platdata(&pdev->dev);
  2548. if (pd == NULL) {
  2549. dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
  2550. return -ENODEV;
  2551. }
  2552. if (pd->shared == NULL) {
  2553. dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
  2554. return -ENODEV;
  2555. }
  2556. dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
  2557. if (!dev)
  2558. return -ENOMEM;
  2559. SET_NETDEV_DEV(dev, &pdev->dev);
  2560. mp = netdev_priv(dev);
  2561. platform_set_drvdata(pdev, mp);
  2562. mp->shared = platform_get_drvdata(pd->shared);
  2563. mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
  2564. mp->port_num = pd->port_number;
  2565. mp->dev = dev;
  2566. /* Kirkwood resets some registers on gated clocks. Especially
  2567. * CLK125_BYPASS_EN must be cleared but is not available on
  2568. * all other SoCs/System Controllers using this driver.
  2569. */
  2570. if (of_device_is_compatible(pdev->dev.of_node,
  2571. "marvell,kirkwood-eth-port"))
  2572. wrlp(mp, PORT_SERIAL_CONTROL1,
  2573. rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
  2574. /*
  2575. * Start with a default rate, and if there is a clock, allow
  2576. * it to override the default.
  2577. */
  2578. mp->t_clk = 133000000;
  2579. mp->clk = devm_clk_get(&pdev->dev, NULL);
  2580. if (!IS_ERR(mp->clk)) {
  2581. clk_prepare_enable(mp->clk);
  2582. mp->t_clk = clk_get_rate(mp->clk);
  2583. } else if (!IS_ERR(mp->shared->clk)) {
  2584. mp->t_clk = clk_get_rate(mp->shared->clk);
  2585. }
  2586. set_params(mp, pd);
  2587. netif_set_real_num_tx_queues(dev, mp->txq_count);
  2588. netif_set_real_num_rx_queues(dev, mp->rxq_count);
  2589. err = 0;
  2590. if (pd->phy_node) {
  2591. phydev = of_phy_connect(mp->dev, pd->phy_node,
  2592. mv643xx_eth_adjust_link, 0,
  2593. get_phy_mode(mp));
  2594. if (!phydev)
  2595. err = -ENODEV;
  2596. else
  2597. phy_addr_set(mp, phydev->mdio.addr);
  2598. } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
  2599. phydev = phy_scan(mp, pd->phy_addr);
  2600. if (IS_ERR(phydev))
  2601. err = PTR_ERR(phydev);
  2602. else
  2603. phy_init(mp, pd->speed, pd->duplex);
  2604. }
  2605. if (err == -ENODEV) {
  2606. err = -EPROBE_DEFER;
  2607. goto out;
  2608. }
  2609. if (err)
  2610. goto out;
  2611. dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
  2612. init_pscr(mp, pd->speed, pd->duplex);
  2613. mib_counters_clear(mp);
  2614. timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
  2615. mp->mib_counters_timer.expires = jiffies + 30 * HZ;
  2616. spin_lock_init(&mp->mib_counters_lock);
  2617. INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
  2618. netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
  2619. timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
  2620. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  2621. BUG_ON(!res);
  2622. dev->irq = res->start;
  2623. dev->netdev_ops = &mv643xx_eth_netdev_ops;
  2624. dev->watchdog_timeo = 2 * HZ;
  2625. dev->base_addr = 0;
  2626. dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
  2627. dev->vlan_features = dev->features;
  2628. dev->features |= NETIF_F_RXCSUM;
  2629. dev->hw_features = dev->features;
  2630. dev->priv_flags |= IFF_UNICAST_FLT;
  2631. dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
  2632. /* MTU range: 64 - 9500 */
  2633. dev->min_mtu = 64;
  2634. dev->max_mtu = 9500;
  2635. if (mp->shared->win_protect)
  2636. wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
  2637. netif_carrier_off(dev);
  2638. wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
  2639. set_rx_coal(mp, 250);
  2640. set_tx_coal(mp, 0);
  2641. err = register_netdev(dev);
  2642. if (err)
  2643. goto out;
  2644. netdev_notice(dev, "port %d with MAC address %pM\n",
  2645. mp->port_num, dev->dev_addr);
  2646. if (mp->tx_desc_sram_size > 0)
  2647. netdev_notice(dev, "configured with sram\n");
  2648. return 0;
  2649. out:
  2650. if (!IS_ERR(mp->clk))
  2651. clk_disable_unprepare(mp->clk);
  2652. free_netdev(dev);
  2653. return err;
  2654. }
  2655. static int mv643xx_eth_remove(struct platform_device *pdev)
  2656. {
  2657. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2658. struct net_device *dev = mp->dev;
  2659. unregister_netdev(mp->dev);
  2660. if (dev->phydev)
  2661. phy_disconnect(dev->phydev);
  2662. cancel_work_sync(&mp->tx_timeout_task);
  2663. if (!IS_ERR(mp->clk))
  2664. clk_disable_unprepare(mp->clk);
  2665. free_netdev(mp->dev);
  2666. return 0;
  2667. }
  2668. static void mv643xx_eth_shutdown(struct platform_device *pdev)
  2669. {
  2670. struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
  2671. /* Mask all interrupts on ethernet port */
  2672. wrlp(mp, INT_MASK, 0);
  2673. rdlp(mp, INT_MASK);
  2674. if (netif_running(mp->dev))
  2675. port_reset(mp);
  2676. }
  2677. static struct platform_driver mv643xx_eth_driver = {
  2678. .probe = mv643xx_eth_probe,
  2679. .remove = mv643xx_eth_remove,
  2680. .shutdown = mv643xx_eth_shutdown,
  2681. .driver = {
  2682. .name = MV643XX_ETH_NAME,
  2683. },
  2684. };
  2685. static struct platform_driver * const drivers[] = {
  2686. &mv643xx_eth_shared_driver,
  2687. &mv643xx_eth_driver,
  2688. };
  2689. static int __init mv643xx_eth_init_module(void)
  2690. {
  2691. return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  2692. }
  2693. module_init(mv643xx_eth_init_module);
  2694. static void __exit mv643xx_eth_cleanup_module(void)
  2695. {
  2696. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  2697. }
  2698. module_exit(mv643xx_eth_cleanup_module);
  2699. MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
  2700. "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
  2701. MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
  2702. MODULE_LICENSE("GPL");
  2703. MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
  2704. MODULE_ALIAS("platform:" MV643XX_ETH_NAME);