macb_main.c 116 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Cadence MACB/GEM Ethernet Controller driver
  4. *
  5. * Copyright (C) 2004-2006 Atmel Corporation
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/clk.h>
  9. #include <linux/clk-provider.h>
  10. #include <linux/crc32.h>
  11. #include <linux/module.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/kernel.h>
  14. #include <linux/types.h>
  15. #include <linux/circ_buf.h>
  16. #include <linux/slab.h>
  17. #include <linux/init.h>
  18. #include <linux/io.h>
  19. #include <linux/gpio.h>
  20. #include <linux/gpio/consumer.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/platform_data/macb.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/phy.h>
  28. #include <linux/of.h>
  29. #include <linux/of_device.h>
  30. #include <linux/of_gpio.h>
  31. #include <linux/of_mdio.h>
  32. #include <linux/of_net.h>
  33. #include <linux/ip.h>
  34. #include <linux/udp.h>
  35. #include <linux/tcp.h>
  36. #include <linux/iopoll.h>
  37. #include <linux/pm_runtime.h>
  38. #include "macb.h"
  39. /* This structure is only used for MACB on SiFive FU540 devices */
  40. struct sifive_fu540_macb_mgmt {
  41. void __iomem *reg;
  42. unsigned long rate;
  43. struct clk_hw hw;
  44. };
  45. #define MACB_RX_BUFFER_SIZE 128
  46. #define RX_BUFFER_MULTIPLE 64 /* bytes */
  47. #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
  48. #define MIN_RX_RING_SIZE 64
  49. #define MAX_RX_RING_SIZE 8192
  50. #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
  51. * (bp)->rx_ring_size)
  52. #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
  53. #define MIN_TX_RING_SIZE 64
  54. #define MAX_TX_RING_SIZE 4096
  55. #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
  56. * (bp)->tx_ring_size)
  57. /* level of occupied TX descriptors under which we wake up TX process */
  58. #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
  59. #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
  60. #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
  61. | MACB_BIT(ISR_RLE) \
  62. | MACB_BIT(TXERR))
  63. #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
  64. | MACB_BIT(TXUBR))
  65. /* Max length of transmit frame must be a multiple of 8 bytes */
  66. #define MACB_TX_LEN_ALIGN 8
  67. #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
  68. /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
  69. * false amba_error in TX path from the DMA assuming there is not enough
  70. * space in the SRAM (16KB) even when there is.
  71. */
  72. #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
  73. #define GEM_MTU_MIN_SIZE ETH_MIN_MTU
  74. #define MACB_NETIF_LSO NETIF_F_TSO
  75. #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
  76. #define MACB_WOL_ENABLED (0x1 << 1)
  77. /* Graceful stop timeouts in us. We should allow up to
  78. * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
  79. */
  80. #define MACB_HALT_TIMEOUT 1230
  81. #define MACB_PM_TIMEOUT 100 /* ms */
  82. #define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
  83. /* DMA buffer descriptor might be different size
  84. * depends on hardware configuration:
  85. *
  86. * 1. dma address width 32 bits:
  87. * word 1: 32 bit address of Data Buffer
  88. * word 2: control
  89. *
  90. * 2. dma address width 64 bits:
  91. * word 1: 32 bit address of Data Buffer
  92. * word 2: control
  93. * word 3: upper 32 bit address of Data Buffer
  94. * word 4: unused
  95. *
  96. * 3. dma address width 32 bits with hardware timestamping:
  97. * word 1: 32 bit address of Data Buffer
  98. * word 2: control
  99. * word 3: timestamp word 1
  100. * word 4: timestamp word 2
  101. *
  102. * 4. dma address width 64 bits with hardware timestamping:
  103. * word 1: 32 bit address of Data Buffer
  104. * word 2: control
  105. * word 3: upper 32 bit address of Data Buffer
  106. * word 4: unused
  107. * word 5: timestamp word 1
  108. * word 6: timestamp word 2
  109. */
  110. static unsigned int macb_dma_desc_get_size(struct macb *bp)
  111. {
  112. #ifdef MACB_EXT_DESC
  113. unsigned int desc_size;
  114. switch (bp->hw_dma_cap) {
  115. case HW_DMA_CAP_64B:
  116. desc_size = sizeof(struct macb_dma_desc)
  117. + sizeof(struct macb_dma_desc_64);
  118. break;
  119. case HW_DMA_CAP_PTP:
  120. desc_size = sizeof(struct macb_dma_desc)
  121. + sizeof(struct macb_dma_desc_ptp);
  122. break;
  123. case HW_DMA_CAP_64B_PTP:
  124. desc_size = sizeof(struct macb_dma_desc)
  125. + sizeof(struct macb_dma_desc_64)
  126. + sizeof(struct macb_dma_desc_ptp);
  127. break;
  128. default:
  129. desc_size = sizeof(struct macb_dma_desc);
  130. }
  131. return desc_size;
  132. #endif
  133. return sizeof(struct macb_dma_desc);
  134. }
  135. static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
  136. {
  137. #ifdef MACB_EXT_DESC
  138. switch (bp->hw_dma_cap) {
  139. case HW_DMA_CAP_64B:
  140. case HW_DMA_CAP_PTP:
  141. desc_idx <<= 1;
  142. break;
  143. case HW_DMA_CAP_64B_PTP:
  144. desc_idx *= 3;
  145. break;
  146. default:
  147. break;
  148. }
  149. #endif
  150. return desc_idx;
  151. }
  152. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  153. static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
  154. {
  155. return (struct macb_dma_desc_64 *)((void *)desc
  156. + sizeof(struct macb_dma_desc));
  157. }
  158. #endif
  159. /* Ring buffer accessors */
  160. static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
  161. {
  162. return index & (bp->tx_ring_size - 1);
  163. }
  164. static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
  165. unsigned int index)
  166. {
  167. index = macb_tx_ring_wrap(queue->bp, index);
  168. index = macb_adj_dma_desc_idx(queue->bp, index);
  169. return &queue->tx_ring[index];
  170. }
  171. static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
  172. unsigned int index)
  173. {
  174. return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
  175. }
  176. static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
  177. {
  178. dma_addr_t offset;
  179. offset = macb_tx_ring_wrap(queue->bp, index) *
  180. macb_dma_desc_get_size(queue->bp);
  181. return queue->tx_ring_dma + offset;
  182. }
  183. static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
  184. {
  185. return index & (bp->rx_ring_size - 1);
  186. }
  187. static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
  188. {
  189. index = macb_rx_ring_wrap(queue->bp, index);
  190. index = macb_adj_dma_desc_idx(queue->bp, index);
  191. return &queue->rx_ring[index];
  192. }
  193. static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
  194. {
  195. return queue->rx_buffers + queue->bp->rx_buffer_size *
  196. macb_rx_ring_wrap(queue->bp, index);
  197. }
  198. /* I/O accessors */
  199. static u32 hw_readl_native(struct macb *bp, int offset)
  200. {
  201. return __raw_readl(bp->regs + offset);
  202. }
  203. static void hw_writel_native(struct macb *bp, int offset, u32 value)
  204. {
  205. __raw_writel(value, bp->regs + offset);
  206. }
  207. static u32 hw_readl(struct macb *bp, int offset)
  208. {
  209. return readl_relaxed(bp->regs + offset);
  210. }
  211. static void hw_writel(struct macb *bp, int offset, u32 value)
  212. {
  213. writel_relaxed(value, bp->regs + offset);
  214. }
  215. /* Find the CPU endianness by using the loopback bit of NCR register. When the
  216. * CPU is in big endian we need to program swapped mode for management
  217. * descriptor access.
  218. */
  219. static bool hw_is_native_io(void __iomem *addr)
  220. {
  221. u32 value = MACB_BIT(LLB);
  222. __raw_writel(value, addr + MACB_NCR);
  223. value = __raw_readl(addr + MACB_NCR);
  224. /* Write 0 back to disable everything */
  225. __raw_writel(0, addr + MACB_NCR);
  226. return value == MACB_BIT(LLB);
  227. }
  228. static bool hw_is_gem(void __iomem *addr, bool native_io)
  229. {
  230. u32 id;
  231. if (native_io)
  232. id = __raw_readl(addr + MACB_MID);
  233. else
  234. id = readl_relaxed(addr + MACB_MID);
  235. return MACB_BFEXT(IDNUM, id) >= 0x2;
  236. }
  237. static void macb_set_hwaddr(struct macb *bp)
  238. {
  239. u32 bottom;
  240. u16 top;
  241. bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
  242. macb_or_gem_writel(bp, SA1B, bottom);
  243. top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
  244. macb_or_gem_writel(bp, SA1T, top);
  245. /* Clear unused address register sets */
  246. macb_or_gem_writel(bp, SA2B, 0);
  247. macb_or_gem_writel(bp, SA2T, 0);
  248. macb_or_gem_writel(bp, SA3B, 0);
  249. macb_or_gem_writel(bp, SA3T, 0);
  250. macb_or_gem_writel(bp, SA4B, 0);
  251. macb_or_gem_writel(bp, SA4T, 0);
  252. }
  253. static void macb_get_hwaddr(struct macb *bp)
  254. {
  255. u32 bottom;
  256. u16 top;
  257. u8 addr[6];
  258. int i;
  259. /* Check all 4 address register for valid address */
  260. for (i = 0; i < 4; i++) {
  261. bottom = macb_or_gem_readl(bp, SA1B + i * 8);
  262. top = macb_or_gem_readl(bp, SA1T + i * 8);
  263. addr[0] = bottom & 0xff;
  264. addr[1] = (bottom >> 8) & 0xff;
  265. addr[2] = (bottom >> 16) & 0xff;
  266. addr[3] = (bottom >> 24) & 0xff;
  267. addr[4] = top & 0xff;
  268. addr[5] = (top >> 8) & 0xff;
  269. if (is_valid_ether_addr(addr)) {
  270. memcpy(bp->dev->dev_addr, addr, sizeof(addr));
  271. return;
  272. }
  273. }
  274. dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
  275. eth_hw_addr_random(bp->dev);
  276. }
  277. static int macb_mdio_wait_for_idle(struct macb *bp)
  278. {
  279. u32 val;
  280. return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
  281. 1, MACB_MDIO_TIMEOUT);
  282. }
  283. static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  284. {
  285. struct macb *bp = bus->priv;
  286. int status;
  287. status = pm_runtime_get_sync(&bp->pdev->dev);
  288. if (status < 0) {
  289. pm_runtime_put_noidle(&bp->pdev->dev);
  290. goto mdio_pm_exit;
  291. }
  292. status = macb_mdio_wait_for_idle(bp);
  293. if (status < 0)
  294. goto mdio_read_exit;
  295. macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
  296. | MACB_BF(RW, MACB_MAN_READ)
  297. | MACB_BF(PHYA, mii_id)
  298. | MACB_BF(REGA, regnum)
  299. | MACB_BF(CODE, MACB_MAN_CODE)));
  300. status = macb_mdio_wait_for_idle(bp);
  301. if (status < 0)
  302. goto mdio_read_exit;
  303. status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
  304. mdio_read_exit:
  305. pm_runtime_mark_last_busy(&bp->pdev->dev);
  306. pm_runtime_put_autosuspend(&bp->pdev->dev);
  307. mdio_pm_exit:
  308. return status;
  309. }
  310. static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
  311. u16 value)
  312. {
  313. struct macb *bp = bus->priv;
  314. int status;
  315. status = pm_runtime_get_sync(&bp->pdev->dev);
  316. if (status < 0) {
  317. pm_runtime_put_noidle(&bp->pdev->dev);
  318. goto mdio_pm_exit;
  319. }
  320. status = macb_mdio_wait_for_idle(bp);
  321. if (status < 0)
  322. goto mdio_write_exit;
  323. macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
  324. | MACB_BF(RW, MACB_MAN_WRITE)
  325. | MACB_BF(PHYA, mii_id)
  326. | MACB_BF(REGA, regnum)
  327. | MACB_BF(CODE, MACB_MAN_CODE)
  328. | MACB_BF(DATA, value)));
  329. status = macb_mdio_wait_for_idle(bp);
  330. if (status < 0)
  331. goto mdio_write_exit;
  332. mdio_write_exit:
  333. pm_runtime_mark_last_busy(&bp->pdev->dev);
  334. pm_runtime_put_autosuspend(&bp->pdev->dev);
  335. mdio_pm_exit:
  336. return status;
  337. }
  338. /**
  339. * macb_set_tx_clk() - Set a clock to a new frequency
  340. * @clk Pointer to the clock to change
  341. * @rate New frequency in Hz
  342. * @dev Pointer to the struct net_device
  343. */
  344. static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
  345. {
  346. long ferr, rate, rate_rounded;
  347. if (!clk)
  348. return;
  349. switch (speed) {
  350. case SPEED_10:
  351. rate = 2500000;
  352. break;
  353. case SPEED_100:
  354. rate = 25000000;
  355. break;
  356. case SPEED_1000:
  357. rate = 125000000;
  358. break;
  359. default:
  360. return;
  361. }
  362. rate_rounded = clk_round_rate(clk, rate);
  363. if (rate_rounded < 0)
  364. return;
  365. /* RGMII allows 50 ppm frequency error. Test and warn if this limit
  366. * is not satisfied.
  367. */
  368. ferr = abs(rate_rounded - rate);
  369. ferr = DIV_ROUND_UP(ferr, rate / 100000);
  370. if (ferr > 5)
  371. netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
  372. rate);
  373. if (clk_set_rate(clk, rate_rounded))
  374. netdev_err(dev, "adjusting tx_clk failed.\n");
  375. }
  376. static void macb_handle_link_change(struct net_device *dev)
  377. {
  378. struct macb *bp = netdev_priv(dev);
  379. struct phy_device *phydev = dev->phydev;
  380. unsigned long flags;
  381. int status_change = 0;
  382. spin_lock_irqsave(&bp->lock, flags);
  383. if (phydev->link) {
  384. if ((bp->speed != phydev->speed) ||
  385. (bp->duplex != phydev->duplex)) {
  386. u32 reg;
  387. reg = macb_readl(bp, NCFGR);
  388. reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
  389. if (macb_is_gem(bp))
  390. reg &= ~GEM_BIT(GBE);
  391. if (phydev->duplex)
  392. reg |= MACB_BIT(FD);
  393. if (phydev->speed == SPEED_100)
  394. reg |= MACB_BIT(SPD);
  395. if (phydev->speed == SPEED_1000 &&
  396. bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
  397. reg |= GEM_BIT(GBE);
  398. macb_or_gem_writel(bp, NCFGR, reg);
  399. bp->speed = phydev->speed;
  400. bp->duplex = phydev->duplex;
  401. status_change = 1;
  402. }
  403. }
  404. if (phydev->link != bp->link) {
  405. if (!phydev->link) {
  406. bp->speed = 0;
  407. bp->duplex = -1;
  408. }
  409. bp->link = phydev->link;
  410. status_change = 1;
  411. }
  412. spin_unlock_irqrestore(&bp->lock, flags);
  413. if (status_change) {
  414. if (phydev->link) {
  415. /* Update the TX clock rate if and only if the link is
  416. * up and there has been a link change.
  417. */
  418. macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
  419. netif_carrier_on(dev);
  420. netdev_info(dev, "link up (%d/%s)\n",
  421. phydev->speed,
  422. phydev->duplex == DUPLEX_FULL ?
  423. "Full" : "Half");
  424. } else {
  425. netif_carrier_off(dev);
  426. netdev_info(dev, "link down\n");
  427. }
  428. }
  429. }
  430. /* based on au1000_eth. c*/
  431. static int macb_mii_probe(struct net_device *dev)
  432. {
  433. struct macb *bp = netdev_priv(dev);
  434. struct phy_device *phydev;
  435. struct device_node *np;
  436. int ret, i;
  437. np = bp->pdev->dev.of_node;
  438. ret = 0;
  439. if (np) {
  440. if (of_phy_is_fixed_link(np)) {
  441. bp->phy_node = of_node_get(np);
  442. } else {
  443. bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
  444. /* fallback to standard phy registration if no
  445. * phy-handle was found nor any phy found during
  446. * dt phy registration
  447. */
  448. if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
  449. for (i = 0; i < PHY_MAX_ADDR; i++) {
  450. phydev = mdiobus_scan(bp->mii_bus, i);
  451. if (IS_ERR(phydev) &&
  452. PTR_ERR(phydev) != -ENODEV) {
  453. ret = PTR_ERR(phydev);
  454. break;
  455. }
  456. }
  457. if (ret)
  458. return -ENODEV;
  459. }
  460. }
  461. }
  462. if (bp->phy_node) {
  463. phydev = of_phy_connect(dev, bp->phy_node,
  464. &macb_handle_link_change, 0,
  465. bp->phy_interface);
  466. if (!phydev)
  467. return -ENODEV;
  468. } else {
  469. phydev = phy_find_first(bp->mii_bus);
  470. if (!phydev) {
  471. netdev_err(dev, "no PHY found\n");
  472. return -ENXIO;
  473. }
  474. /* attach the mac to the phy */
  475. ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
  476. bp->phy_interface);
  477. if (ret) {
  478. netdev_err(dev, "Could not attach to PHY\n");
  479. return ret;
  480. }
  481. }
  482. /* mask with MAC supported features */
  483. if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
  484. phy_set_max_speed(phydev, SPEED_1000);
  485. else
  486. phy_set_max_speed(phydev, SPEED_100);
  487. if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
  488. phy_remove_link_mode(phydev,
  489. ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
  490. bp->link = 0;
  491. bp->speed = 0;
  492. bp->duplex = -1;
  493. return 0;
  494. }
  495. static int macb_mii_init(struct macb *bp)
  496. {
  497. struct device_node *np;
  498. int err = -ENXIO;
  499. /* Enable management port */
  500. macb_writel(bp, NCR, MACB_BIT(MPE));
  501. bp->mii_bus = mdiobus_alloc();
  502. if (!bp->mii_bus) {
  503. err = -ENOMEM;
  504. goto err_out;
  505. }
  506. bp->mii_bus->name = "MACB_mii_bus";
  507. bp->mii_bus->read = &macb_mdio_read;
  508. bp->mii_bus->write = &macb_mdio_write;
  509. snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
  510. bp->pdev->name, bp->pdev->id);
  511. bp->mii_bus->priv = bp;
  512. bp->mii_bus->parent = &bp->pdev->dev;
  513. dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
  514. np = bp->pdev->dev.of_node;
  515. if (np && of_phy_is_fixed_link(np)) {
  516. if (of_phy_register_fixed_link(np) < 0) {
  517. dev_err(&bp->pdev->dev,
  518. "broken fixed-link specification %pOF\n", np);
  519. goto err_out_free_mdiobus;
  520. }
  521. err = mdiobus_register(bp->mii_bus);
  522. } else {
  523. err = of_mdiobus_register(bp->mii_bus, np);
  524. }
  525. if (err)
  526. goto err_out_free_fixed_link;
  527. err = macb_mii_probe(bp->dev);
  528. if (err)
  529. goto err_out_unregister_bus;
  530. return 0;
  531. err_out_unregister_bus:
  532. mdiobus_unregister(bp->mii_bus);
  533. err_out_free_fixed_link:
  534. if (np && of_phy_is_fixed_link(np))
  535. of_phy_deregister_fixed_link(np);
  536. err_out_free_mdiobus:
  537. of_node_put(bp->phy_node);
  538. mdiobus_free(bp->mii_bus);
  539. err_out:
  540. return err;
  541. }
  542. static void macb_update_stats(struct macb *bp)
  543. {
  544. u32 *p = &bp->hw_stats.macb.rx_pause_frames;
  545. u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
  546. int offset = MACB_PFR;
  547. WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
  548. for (; p < end; p++, offset += 4)
  549. *p += bp->macb_reg_readl(bp, offset);
  550. }
  551. static int macb_halt_tx(struct macb *bp)
  552. {
  553. unsigned long halt_time, timeout;
  554. u32 status;
  555. macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
  556. timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
  557. do {
  558. halt_time = jiffies;
  559. status = macb_readl(bp, TSR);
  560. if (!(status & MACB_BIT(TGO)))
  561. return 0;
  562. udelay(250);
  563. } while (time_before(halt_time, timeout));
  564. return -ETIMEDOUT;
  565. }
  566. static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
  567. {
  568. if (tx_skb->mapping) {
  569. if (tx_skb->mapped_as_page)
  570. dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
  571. tx_skb->size, DMA_TO_DEVICE);
  572. else
  573. dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
  574. tx_skb->size, DMA_TO_DEVICE);
  575. tx_skb->mapping = 0;
  576. }
  577. if (tx_skb->skb) {
  578. dev_kfree_skb_any(tx_skb->skb);
  579. tx_skb->skb = NULL;
  580. }
  581. }
  582. static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
  583. {
  584. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  585. struct macb_dma_desc_64 *desc_64;
  586. if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
  587. desc_64 = macb_64b_desc(bp, desc);
  588. desc_64->addrh = upper_32_bits(addr);
  589. /* The low bits of RX address contain the RX_USED bit, clearing
  590. * of which allows packet RX. Make sure the high bits are also
  591. * visible to HW at that point.
  592. */
  593. dma_wmb();
  594. }
  595. #endif
  596. desc->addr = lower_32_bits(addr);
  597. }
  598. static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
  599. {
  600. dma_addr_t addr = 0;
  601. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  602. struct macb_dma_desc_64 *desc_64;
  603. if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
  604. desc_64 = macb_64b_desc(bp, desc);
  605. addr = ((u64)(desc_64->addrh) << 32);
  606. }
  607. #endif
  608. addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
  609. return addr;
  610. }
  611. static void macb_tx_error_task(struct work_struct *work)
  612. {
  613. struct macb_queue *queue = container_of(work, struct macb_queue,
  614. tx_error_task);
  615. struct macb *bp = queue->bp;
  616. struct macb_tx_skb *tx_skb;
  617. struct macb_dma_desc *desc;
  618. struct sk_buff *skb;
  619. unsigned int tail;
  620. unsigned long flags;
  621. netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
  622. (unsigned int)(queue - bp->queues),
  623. queue->tx_tail, queue->tx_head);
  624. /* Prevent the queue IRQ handlers from running: each of them may call
  625. * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
  626. * As explained below, we have to halt the transmission before updating
  627. * TBQP registers so we call netif_tx_stop_all_queues() to notify the
  628. * network engine about the macb/gem being halted.
  629. */
  630. spin_lock_irqsave(&bp->lock, flags);
  631. /* Make sure nobody is trying to queue up new packets */
  632. netif_tx_stop_all_queues(bp->dev);
  633. /* Stop transmission now
  634. * (in case we have just queued new packets)
  635. * macb/gem must be halted to write TBQP register
  636. */
  637. if (macb_halt_tx(bp))
  638. /* Just complain for now, reinitializing TX path can be good */
  639. netdev_err(bp->dev, "BUG: halt tx timed out\n");
  640. /* Treat frames in TX queue including the ones that caused the error.
  641. * Free transmit buffers in upper layer.
  642. */
  643. for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
  644. u32 ctrl;
  645. desc = macb_tx_desc(queue, tail);
  646. ctrl = desc->ctrl;
  647. tx_skb = macb_tx_skb(queue, tail);
  648. skb = tx_skb->skb;
  649. if (ctrl & MACB_BIT(TX_USED)) {
  650. /* skb is set for the last buffer of the frame */
  651. while (!skb) {
  652. macb_tx_unmap(bp, tx_skb);
  653. tail++;
  654. tx_skb = macb_tx_skb(queue, tail);
  655. skb = tx_skb->skb;
  656. }
  657. /* ctrl still refers to the first buffer descriptor
  658. * since it's the only one written back by the hardware
  659. */
  660. if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
  661. netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
  662. macb_tx_ring_wrap(bp, tail),
  663. skb->data);
  664. bp->dev->stats.tx_packets++;
  665. queue->stats.tx_packets++;
  666. bp->dev->stats.tx_bytes += skb->len;
  667. queue->stats.tx_bytes += skb->len;
  668. }
  669. } else {
  670. /* "Buffers exhausted mid-frame" errors may only happen
  671. * if the driver is buggy, so complain loudly about
  672. * those. Statistics are updated by hardware.
  673. */
  674. if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
  675. netdev_err(bp->dev,
  676. "BUG: TX buffers exhausted mid-frame\n");
  677. desc->ctrl = ctrl | MACB_BIT(TX_USED);
  678. }
  679. macb_tx_unmap(bp, tx_skb);
  680. }
  681. /* Set end of TX queue */
  682. desc = macb_tx_desc(queue, 0);
  683. macb_set_addr(bp, desc, 0);
  684. desc->ctrl = MACB_BIT(TX_USED);
  685. /* Make descriptor updates visible to hardware */
  686. wmb();
  687. /* Reinitialize the TX desc queue */
  688. queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
  689. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  690. if (bp->hw_dma_cap & HW_DMA_CAP_64B)
  691. queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
  692. #endif
  693. /* Make TX ring reflect state of hardware */
  694. queue->tx_head = 0;
  695. queue->tx_tail = 0;
  696. /* Housework before enabling TX IRQ */
  697. macb_writel(bp, TSR, macb_readl(bp, TSR));
  698. queue_writel(queue, IER, MACB_TX_INT_FLAGS);
  699. /* Now we are ready to start transmission again */
  700. netif_tx_start_all_queues(bp->dev);
  701. macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
  702. spin_unlock_irqrestore(&bp->lock, flags);
  703. }
  704. static void macb_tx_interrupt(struct macb_queue *queue)
  705. {
  706. unsigned int tail;
  707. unsigned int head;
  708. u32 status;
  709. struct macb *bp = queue->bp;
  710. u16 queue_index = queue - bp->queues;
  711. status = macb_readl(bp, TSR);
  712. macb_writel(bp, TSR, status);
  713. if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
  714. queue_writel(queue, ISR, MACB_BIT(TCOMP));
  715. netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
  716. (unsigned long)status);
  717. head = queue->tx_head;
  718. for (tail = queue->tx_tail; tail != head; tail++) {
  719. struct macb_tx_skb *tx_skb;
  720. struct sk_buff *skb;
  721. struct macb_dma_desc *desc;
  722. u32 ctrl;
  723. desc = macb_tx_desc(queue, tail);
  724. /* Make hw descriptor updates visible to CPU */
  725. rmb();
  726. ctrl = desc->ctrl;
  727. /* TX_USED bit is only set by hardware on the very first buffer
  728. * descriptor of the transmitted frame.
  729. */
  730. if (!(ctrl & MACB_BIT(TX_USED)))
  731. break;
  732. /* Process all buffers of the current transmitted frame */
  733. for (;; tail++) {
  734. tx_skb = macb_tx_skb(queue, tail);
  735. skb = tx_skb->skb;
  736. /* First, update TX stats if needed */
  737. if (skb) {
  738. if (unlikely(skb_shinfo(skb)->tx_flags &
  739. SKBTX_HW_TSTAMP) &&
  740. gem_ptp_do_txstamp(queue, skb, desc) == 0) {
  741. /* skb now belongs to timestamp buffer
  742. * and will be removed later
  743. */
  744. tx_skb->skb = NULL;
  745. }
  746. netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
  747. macb_tx_ring_wrap(bp, tail),
  748. skb->data);
  749. bp->dev->stats.tx_packets++;
  750. queue->stats.tx_packets++;
  751. bp->dev->stats.tx_bytes += skb->len;
  752. queue->stats.tx_bytes += skb->len;
  753. }
  754. /* Now we can safely release resources */
  755. macb_tx_unmap(bp, tx_skb);
  756. /* skb is set only for the last buffer of the frame.
  757. * WARNING: at this point skb has been freed by
  758. * macb_tx_unmap().
  759. */
  760. if (skb)
  761. break;
  762. }
  763. }
  764. queue->tx_tail = tail;
  765. if (__netif_subqueue_stopped(bp->dev, queue_index) &&
  766. CIRC_CNT(queue->tx_head, queue->tx_tail,
  767. bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
  768. netif_wake_subqueue(bp->dev, queue_index);
  769. }
  770. static void gem_rx_refill(struct macb_queue *queue)
  771. {
  772. unsigned int entry;
  773. struct sk_buff *skb;
  774. dma_addr_t paddr;
  775. struct macb *bp = queue->bp;
  776. struct macb_dma_desc *desc;
  777. while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
  778. bp->rx_ring_size) > 0) {
  779. entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
  780. /* Make hw descriptor updates visible to CPU */
  781. rmb();
  782. queue->rx_prepared_head++;
  783. desc = macb_rx_desc(queue, entry);
  784. if (!queue->rx_skbuff[entry]) {
  785. /* allocate sk_buff for this free entry in ring */
  786. skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
  787. if (unlikely(!skb)) {
  788. netdev_err(bp->dev,
  789. "Unable to allocate sk_buff\n");
  790. break;
  791. }
  792. /* now fill corresponding descriptor entry */
  793. paddr = dma_map_single(&bp->pdev->dev, skb->data,
  794. bp->rx_buffer_size,
  795. DMA_FROM_DEVICE);
  796. if (dma_mapping_error(&bp->pdev->dev, paddr)) {
  797. dev_kfree_skb(skb);
  798. break;
  799. }
  800. queue->rx_skbuff[entry] = skb;
  801. if (entry == bp->rx_ring_size - 1)
  802. paddr |= MACB_BIT(RX_WRAP);
  803. desc->ctrl = 0;
  804. /* Setting addr clears RX_USED and allows reception,
  805. * make sure ctrl is cleared first to avoid a race.
  806. */
  807. dma_wmb();
  808. macb_set_addr(bp, desc, paddr);
  809. /* properly align Ethernet header */
  810. skb_reserve(skb, NET_IP_ALIGN);
  811. } else {
  812. desc->ctrl = 0;
  813. dma_wmb();
  814. desc->addr &= ~MACB_BIT(RX_USED);
  815. }
  816. }
  817. /* Make descriptor updates visible to hardware */
  818. wmb();
  819. netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
  820. queue, queue->rx_prepared_head, queue->rx_tail);
  821. }
  822. /* Mark DMA descriptors from begin up to and not including end as unused */
  823. static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
  824. unsigned int end)
  825. {
  826. unsigned int frag;
  827. for (frag = begin; frag != end; frag++) {
  828. struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
  829. desc->addr &= ~MACB_BIT(RX_USED);
  830. }
  831. /* Make descriptor updates visible to hardware */
  832. wmb();
  833. /* When this happens, the hardware stats registers for
  834. * whatever caused this is updated, so we don't have to record
  835. * anything.
  836. */
  837. }
  838. static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
  839. int budget)
  840. {
  841. struct macb *bp = queue->bp;
  842. unsigned int len;
  843. unsigned int entry;
  844. struct sk_buff *skb;
  845. struct macb_dma_desc *desc;
  846. int count = 0;
  847. while (count < budget) {
  848. u32 ctrl;
  849. dma_addr_t addr;
  850. bool rxused;
  851. entry = macb_rx_ring_wrap(bp, queue->rx_tail);
  852. desc = macb_rx_desc(queue, entry);
  853. /* Make hw descriptor updates visible to CPU */
  854. rmb();
  855. rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
  856. addr = macb_get_addr(bp, desc);
  857. if (!rxused)
  858. break;
  859. /* Ensure ctrl is at least as up-to-date as rxused */
  860. dma_rmb();
  861. ctrl = desc->ctrl;
  862. queue->rx_tail++;
  863. count++;
  864. if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
  865. netdev_err(bp->dev,
  866. "not whole frame pointed by descriptor\n");
  867. bp->dev->stats.rx_dropped++;
  868. queue->stats.rx_dropped++;
  869. break;
  870. }
  871. skb = queue->rx_skbuff[entry];
  872. if (unlikely(!skb)) {
  873. netdev_err(bp->dev,
  874. "inconsistent Rx descriptor chain\n");
  875. bp->dev->stats.rx_dropped++;
  876. queue->stats.rx_dropped++;
  877. break;
  878. }
  879. /* now everything is ready for receiving packet */
  880. queue->rx_skbuff[entry] = NULL;
  881. len = ctrl & bp->rx_frm_len_mask;
  882. netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
  883. skb_put(skb, len);
  884. dma_unmap_single(&bp->pdev->dev, addr,
  885. bp->rx_buffer_size, DMA_FROM_DEVICE);
  886. skb->protocol = eth_type_trans(skb, bp->dev);
  887. skb_checksum_none_assert(skb);
  888. if (bp->dev->features & NETIF_F_RXCSUM &&
  889. !(bp->dev->flags & IFF_PROMISC) &&
  890. GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
  891. skb->ip_summed = CHECKSUM_UNNECESSARY;
  892. bp->dev->stats.rx_packets++;
  893. queue->stats.rx_packets++;
  894. bp->dev->stats.rx_bytes += skb->len;
  895. queue->stats.rx_bytes += skb->len;
  896. gem_ptp_do_rxstamp(bp, skb, desc);
  897. #if defined(DEBUG) && defined(VERBOSE_DEBUG)
  898. netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
  899. skb->len, skb->csum);
  900. print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
  901. skb_mac_header(skb), 16, true);
  902. print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
  903. skb->data, 32, true);
  904. #endif
  905. napi_gro_receive(napi, skb);
  906. }
  907. gem_rx_refill(queue);
  908. return count;
  909. }
  910. static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
  911. unsigned int first_frag, unsigned int last_frag)
  912. {
  913. unsigned int len;
  914. unsigned int frag;
  915. unsigned int offset;
  916. struct sk_buff *skb;
  917. struct macb_dma_desc *desc;
  918. struct macb *bp = queue->bp;
  919. desc = macb_rx_desc(queue, last_frag);
  920. len = desc->ctrl & bp->rx_frm_len_mask;
  921. netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
  922. macb_rx_ring_wrap(bp, first_frag),
  923. macb_rx_ring_wrap(bp, last_frag), len);
  924. /* The ethernet header starts NET_IP_ALIGN bytes into the
  925. * first buffer. Since the header is 14 bytes, this makes the
  926. * payload word-aligned.
  927. *
  928. * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
  929. * the two padding bytes into the skb so that we avoid hitting
  930. * the slowpath in memcpy(), and pull them off afterwards.
  931. */
  932. skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
  933. if (!skb) {
  934. bp->dev->stats.rx_dropped++;
  935. for (frag = first_frag; ; frag++) {
  936. desc = macb_rx_desc(queue, frag);
  937. desc->addr &= ~MACB_BIT(RX_USED);
  938. if (frag == last_frag)
  939. break;
  940. }
  941. /* Make descriptor updates visible to hardware */
  942. wmb();
  943. return 1;
  944. }
  945. offset = 0;
  946. len += NET_IP_ALIGN;
  947. skb_checksum_none_assert(skb);
  948. skb_put(skb, len);
  949. for (frag = first_frag; ; frag++) {
  950. unsigned int frag_len = bp->rx_buffer_size;
  951. if (offset + frag_len > len) {
  952. if (unlikely(frag != last_frag)) {
  953. dev_kfree_skb_any(skb);
  954. return -1;
  955. }
  956. frag_len = len - offset;
  957. }
  958. skb_copy_to_linear_data_offset(skb, offset,
  959. macb_rx_buffer(queue, frag),
  960. frag_len);
  961. offset += bp->rx_buffer_size;
  962. desc = macb_rx_desc(queue, frag);
  963. desc->addr &= ~MACB_BIT(RX_USED);
  964. if (frag == last_frag)
  965. break;
  966. }
  967. /* Make descriptor updates visible to hardware */
  968. wmb();
  969. __skb_pull(skb, NET_IP_ALIGN);
  970. skb->protocol = eth_type_trans(skb, bp->dev);
  971. bp->dev->stats.rx_packets++;
  972. bp->dev->stats.rx_bytes += skb->len;
  973. netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
  974. skb->len, skb->csum);
  975. napi_gro_receive(napi, skb);
  976. return 0;
  977. }
  978. static inline void macb_init_rx_ring(struct macb_queue *queue)
  979. {
  980. struct macb *bp = queue->bp;
  981. dma_addr_t addr;
  982. struct macb_dma_desc *desc = NULL;
  983. int i;
  984. addr = queue->rx_buffers_dma;
  985. for (i = 0; i < bp->rx_ring_size; i++) {
  986. desc = macb_rx_desc(queue, i);
  987. macb_set_addr(bp, desc, addr);
  988. desc->ctrl = 0;
  989. addr += bp->rx_buffer_size;
  990. }
  991. desc->addr |= MACB_BIT(RX_WRAP);
  992. queue->rx_tail = 0;
  993. }
  994. static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
  995. int budget)
  996. {
  997. struct macb *bp = queue->bp;
  998. bool reset_rx_queue = false;
  999. int received = 0;
  1000. unsigned int tail;
  1001. int first_frag = -1;
  1002. for (tail = queue->rx_tail; budget > 0; tail++) {
  1003. struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
  1004. u32 ctrl;
  1005. /* Make hw descriptor updates visible to CPU */
  1006. rmb();
  1007. if (!(desc->addr & MACB_BIT(RX_USED)))
  1008. break;
  1009. /* Ensure ctrl is at least as up-to-date as addr */
  1010. dma_rmb();
  1011. ctrl = desc->ctrl;
  1012. if (ctrl & MACB_BIT(RX_SOF)) {
  1013. if (first_frag != -1)
  1014. discard_partial_frame(queue, first_frag, tail);
  1015. first_frag = tail;
  1016. }
  1017. if (ctrl & MACB_BIT(RX_EOF)) {
  1018. int dropped;
  1019. if (unlikely(first_frag == -1)) {
  1020. reset_rx_queue = true;
  1021. continue;
  1022. }
  1023. dropped = macb_rx_frame(queue, napi, first_frag, tail);
  1024. first_frag = -1;
  1025. if (unlikely(dropped < 0)) {
  1026. reset_rx_queue = true;
  1027. continue;
  1028. }
  1029. if (!dropped) {
  1030. received++;
  1031. budget--;
  1032. }
  1033. }
  1034. }
  1035. if (unlikely(reset_rx_queue)) {
  1036. unsigned long flags;
  1037. u32 ctrl;
  1038. netdev_err(bp->dev, "RX queue corruption: reset it\n");
  1039. spin_lock_irqsave(&bp->lock, flags);
  1040. ctrl = macb_readl(bp, NCR);
  1041. macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
  1042. macb_init_rx_ring(queue);
  1043. queue_writel(queue, RBQP, queue->rx_ring_dma);
  1044. macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
  1045. spin_unlock_irqrestore(&bp->lock, flags);
  1046. return received;
  1047. }
  1048. if (first_frag != -1)
  1049. queue->rx_tail = first_frag;
  1050. else
  1051. queue->rx_tail = tail;
  1052. return received;
  1053. }
  1054. static int macb_poll(struct napi_struct *napi, int budget)
  1055. {
  1056. struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
  1057. struct macb *bp = queue->bp;
  1058. int work_done;
  1059. u32 status;
  1060. status = macb_readl(bp, RSR);
  1061. macb_writel(bp, RSR, status);
  1062. netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
  1063. (unsigned long)status, budget);
  1064. work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
  1065. if (work_done < budget) {
  1066. napi_complete_done(napi, work_done);
  1067. /* Packets received while interrupts were disabled */
  1068. status = macb_readl(bp, RSR);
  1069. if (status) {
  1070. if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
  1071. queue_writel(queue, ISR, MACB_BIT(RCOMP));
  1072. napi_reschedule(napi);
  1073. } else {
  1074. queue_writel(queue, IER, bp->rx_intr_mask);
  1075. }
  1076. }
  1077. /* TODO: Handle errors */
  1078. return work_done;
  1079. }
  1080. static void macb_hresp_error_task(unsigned long data)
  1081. {
  1082. struct macb *bp = (struct macb *)data;
  1083. struct net_device *dev = bp->dev;
  1084. struct macb_queue *queue = bp->queues;
  1085. unsigned int q;
  1086. u32 ctrl;
  1087. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
  1088. queue_writel(queue, IDR, bp->rx_intr_mask |
  1089. MACB_TX_INT_FLAGS |
  1090. MACB_BIT(HRESP));
  1091. }
  1092. ctrl = macb_readl(bp, NCR);
  1093. ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
  1094. macb_writel(bp, NCR, ctrl);
  1095. netif_tx_stop_all_queues(dev);
  1096. netif_carrier_off(dev);
  1097. bp->macbgem_ops.mog_init_rings(bp);
  1098. /* Initialize TX and RX buffers */
  1099. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
  1100. queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
  1101. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  1102. if (bp->hw_dma_cap & HW_DMA_CAP_64B)
  1103. queue_writel(queue, RBQPH,
  1104. upper_32_bits(queue->rx_ring_dma));
  1105. #endif
  1106. queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
  1107. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  1108. if (bp->hw_dma_cap & HW_DMA_CAP_64B)
  1109. queue_writel(queue, TBQPH,
  1110. upper_32_bits(queue->tx_ring_dma));
  1111. #endif
  1112. /* Enable interrupts */
  1113. queue_writel(queue, IER,
  1114. bp->rx_intr_mask |
  1115. MACB_TX_INT_FLAGS |
  1116. MACB_BIT(HRESP));
  1117. }
  1118. ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
  1119. macb_writel(bp, NCR, ctrl);
  1120. netif_carrier_on(dev);
  1121. netif_tx_start_all_queues(dev);
  1122. }
  1123. static void macb_tx_restart(struct macb_queue *queue)
  1124. {
  1125. unsigned int head = queue->tx_head;
  1126. unsigned int tail = queue->tx_tail;
  1127. struct macb *bp = queue->bp;
  1128. if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
  1129. queue_writel(queue, ISR, MACB_BIT(TXUBR));
  1130. if (head == tail)
  1131. return;
  1132. macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
  1133. }
  1134. static irqreturn_t macb_interrupt(int irq, void *dev_id)
  1135. {
  1136. struct macb_queue *queue = dev_id;
  1137. struct macb *bp = queue->bp;
  1138. struct net_device *dev = bp->dev;
  1139. u32 status, ctrl;
  1140. status = queue_readl(queue, ISR);
  1141. if (unlikely(!status))
  1142. return IRQ_NONE;
  1143. spin_lock(&bp->lock);
  1144. while (status) {
  1145. /* close possible race with dev_close */
  1146. if (unlikely(!netif_running(dev))) {
  1147. queue_writel(queue, IDR, -1);
  1148. if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
  1149. queue_writel(queue, ISR, -1);
  1150. break;
  1151. }
  1152. netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
  1153. (unsigned int)(queue - bp->queues),
  1154. (unsigned long)status);
  1155. if (status & bp->rx_intr_mask) {
  1156. /* There's no point taking any more interrupts
  1157. * until we have processed the buffers. The
  1158. * scheduling call may fail if the poll routine
  1159. * is already scheduled, so disable interrupts
  1160. * now.
  1161. */
  1162. queue_writel(queue, IDR, bp->rx_intr_mask);
  1163. if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
  1164. queue_writel(queue, ISR, MACB_BIT(RCOMP));
  1165. if (napi_schedule_prep(&queue->napi)) {
  1166. netdev_vdbg(bp->dev, "scheduling RX softirq\n");
  1167. __napi_schedule(&queue->napi);
  1168. }
  1169. }
  1170. if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
  1171. queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
  1172. schedule_work(&queue->tx_error_task);
  1173. if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
  1174. queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
  1175. break;
  1176. }
  1177. if (status & MACB_BIT(TCOMP))
  1178. macb_tx_interrupt(queue);
  1179. if (status & MACB_BIT(TXUBR))
  1180. macb_tx_restart(queue);
  1181. /* Link change detection isn't possible with RMII, so we'll
  1182. * add that if/when we get our hands on a full-blown MII PHY.
  1183. */
  1184. /* There is a hardware issue under heavy load where DMA can
  1185. * stop, this causes endless "used buffer descriptor read"
  1186. * interrupts but it can be cleared by re-enabling RX. See
  1187. * the at91rm9200 manual, section 41.3.1 or the Zynq manual
  1188. * section 16.7.4 for details. RXUBR is only enabled for
  1189. * these two versions.
  1190. */
  1191. if (status & MACB_BIT(RXUBR)) {
  1192. ctrl = macb_readl(bp, NCR);
  1193. macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
  1194. wmb();
  1195. macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
  1196. if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
  1197. queue_writel(queue, ISR, MACB_BIT(RXUBR));
  1198. }
  1199. if (status & MACB_BIT(ISR_ROVR)) {
  1200. /* We missed at least one packet */
  1201. if (macb_is_gem(bp))
  1202. bp->hw_stats.gem.rx_overruns++;
  1203. else
  1204. bp->hw_stats.macb.rx_overruns++;
  1205. if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
  1206. queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
  1207. }
  1208. if (status & MACB_BIT(HRESP)) {
  1209. tasklet_schedule(&bp->hresp_err_tasklet);
  1210. netdev_err(dev, "DMA bus error: HRESP not OK\n");
  1211. if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
  1212. queue_writel(queue, ISR, MACB_BIT(HRESP));
  1213. }
  1214. status = queue_readl(queue, ISR);
  1215. }
  1216. spin_unlock(&bp->lock);
  1217. return IRQ_HANDLED;
  1218. }
  1219. #ifdef CONFIG_NET_POLL_CONTROLLER
  1220. /* Polling receive - used by netconsole and other diagnostic tools
  1221. * to allow network i/o with interrupts disabled.
  1222. */
  1223. static void macb_poll_controller(struct net_device *dev)
  1224. {
  1225. struct macb *bp = netdev_priv(dev);
  1226. struct macb_queue *queue;
  1227. unsigned long flags;
  1228. unsigned int q;
  1229. local_irq_save(flags);
  1230. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
  1231. macb_interrupt(dev->irq, queue);
  1232. local_irq_restore(flags);
  1233. }
  1234. #endif
  1235. static unsigned int macb_tx_map(struct macb *bp,
  1236. struct macb_queue *queue,
  1237. struct sk_buff *skb,
  1238. unsigned int hdrlen)
  1239. {
  1240. dma_addr_t mapping;
  1241. unsigned int len, entry, i, tx_head = queue->tx_head;
  1242. struct macb_tx_skb *tx_skb = NULL;
  1243. struct macb_dma_desc *desc;
  1244. unsigned int offset, size, count = 0;
  1245. unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
  1246. unsigned int eof = 1, mss_mfs = 0;
  1247. u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
  1248. /* LSO */
  1249. if (skb_shinfo(skb)->gso_size != 0) {
  1250. if (ip_hdr(skb)->protocol == IPPROTO_UDP)
  1251. /* UDP - UFO */
  1252. lso_ctrl = MACB_LSO_UFO_ENABLE;
  1253. else
  1254. /* TCP - TSO */
  1255. lso_ctrl = MACB_LSO_TSO_ENABLE;
  1256. }
  1257. /* First, map non-paged data */
  1258. len = skb_headlen(skb);
  1259. /* first buffer length */
  1260. size = hdrlen;
  1261. offset = 0;
  1262. while (len) {
  1263. entry = macb_tx_ring_wrap(bp, tx_head);
  1264. tx_skb = &queue->tx_skb[entry];
  1265. mapping = dma_map_single(&bp->pdev->dev,
  1266. skb->data + offset,
  1267. size, DMA_TO_DEVICE);
  1268. if (dma_mapping_error(&bp->pdev->dev, mapping))
  1269. goto dma_error;
  1270. /* Save info to properly release resources */
  1271. tx_skb->skb = NULL;
  1272. tx_skb->mapping = mapping;
  1273. tx_skb->size = size;
  1274. tx_skb->mapped_as_page = false;
  1275. len -= size;
  1276. offset += size;
  1277. count++;
  1278. tx_head++;
  1279. size = min(len, bp->max_tx_length);
  1280. }
  1281. /* Then, map paged data from fragments */
  1282. for (f = 0; f < nr_frags; f++) {
  1283. const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
  1284. len = skb_frag_size(frag);
  1285. offset = 0;
  1286. while (len) {
  1287. size = min(len, bp->max_tx_length);
  1288. entry = macb_tx_ring_wrap(bp, tx_head);
  1289. tx_skb = &queue->tx_skb[entry];
  1290. mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
  1291. offset, size, DMA_TO_DEVICE);
  1292. if (dma_mapping_error(&bp->pdev->dev, mapping))
  1293. goto dma_error;
  1294. /* Save info to properly release resources */
  1295. tx_skb->skb = NULL;
  1296. tx_skb->mapping = mapping;
  1297. tx_skb->size = size;
  1298. tx_skb->mapped_as_page = true;
  1299. len -= size;
  1300. offset += size;
  1301. count++;
  1302. tx_head++;
  1303. }
  1304. }
  1305. /* Should never happen */
  1306. if (unlikely(!tx_skb)) {
  1307. netdev_err(bp->dev, "BUG! empty skb!\n");
  1308. return 0;
  1309. }
  1310. /* This is the last buffer of the frame: save socket buffer */
  1311. tx_skb->skb = skb;
  1312. /* Update TX ring: update buffer descriptors in reverse order
  1313. * to avoid race condition
  1314. */
  1315. /* Set 'TX_USED' bit in buffer descriptor at tx_head position
  1316. * to set the end of TX queue
  1317. */
  1318. i = tx_head;
  1319. entry = macb_tx_ring_wrap(bp, i);
  1320. ctrl = MACB_BIT(TX_USED);
  1321. desc = macb_tx_desc(queue, entry);
  1322. desc->ctrl = ctrl;
  1323. if (lso_ctrl) {
  1324. if (lso_ctrl == MACB_LSO_UFO_ENABLE)
  1325. /* include header and FCS in value given to h/w */
  1326. mss_mfs = skb_shinfo(skb)->gso_size +
  1327. skb_transport_offset(skb) +
  1328. ETH_FCS_LEN;
  1329. else /* TSO */ {
  1330. mss_mfs = skb_shinfo(skb)->gso_size;
  1331. /* TCP Sequence Number Source Select
  1332. * can be set only for TSO
  1333. */
  1334. seq_ctrl = 0;
  1335. }
  1336. }
  1337. do {
  1338. i--;
  1339. entry = macb_tx_ring_wrap(bp, i);
  1340. tx_skb = &queue->tx_skb[entry];
  1341. desc = macb_tx_desc(queue, entry);
  1342. ctrl = (u32)tx_skb->size;
  1343. if (eof) {
  1344. ctrl |= MACB_BIT(TX_LAST);
  1345. eof = 0;
  1346. }
  1347. if (unlikely(entry == (bp->tx_ring_size - 1)))
  1348. ctrl |= MACB_BIT(TX_WRAP);
  1349. /* First descriptor is header descriptor */
  1350. if (i == queue->tx_head) {
  1351. ctrl |= MACB_BF(TX_LSO, lso_ctrl);
  1352. ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
  1353. if ((bp->dev->features & NETIF_F_HW_CSUM) &&
  1354. skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
  1355. ctrl |= MACB_BIT(TX_NOCRC);
  1356. } else
  1357. /* Only set MSS/MFS on payload descriptors
  1358. * (second or later descriptor)
  1359. */
  1360. ctrl |= MACB_BF(MSS_MFS, mss_mfs);
  1361. /* Set TX buffer descriptor */
  1362. macb_set_addr(bp, desc, tx_skb->mapping);
  1363. /* desc->addr must be visible to hardware before clearing
  1364. * 'TX_USED' bit in desc->ctrl.
  1365. */
  1366. wmb();
  1367. desc->ctrl = ctrl;
  1368. } while (i != queue->tx_head);
  1369. queue->tx_head = tx_head;
  1370. return count;
  1371. dma_error:
  1372. netdev_err(bp->dev, "TX DMA map failed\n");
  1373. for (i = queue->tx_head; i != tx_head; i++) {
  1374. tx_skb = macb_tx_skb(queue, i);
  1375. macb_tx_unmap(bp, tx_skb);
  1376. }
  1377. return 0;
  1378. }
  1379. static netdev_features_t macb_features_check(struct sk_buff *skb,
  1380. struct net_device *dev,
  1381. netdev_features_t features)
  1382. {
  1383. unsigned int nr_frags, f;
  1384. unsigned int hdrlen;
  1385. /* Validate LSO compatibility */
  1386. /* there is only one buffer or protocol is not UDP */
  1387. if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
  1388. return features;
  1389. /* length of header */
  1390. hdrlen = skb_transport_offset(skb);
  1391. /* For UFO only:
  1392. * When software supplies two or more payload buffers all payload buffers
  1393. * apart from the last must be a multiple of 8 bytes in size.
  1394. */
  1395. if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
  1396. return features & ~MACB_NETIF_LSO;
  1397. nr_frags = skb_shinfo(skb)->nr_frags;
  1398. /* No need to check last fragment */
  1399. nr_frags--;
  1400. for (f = 0; f < nr_frags; f++) {
  1401. const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
  1402. if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
  1403. return features & ~MACB_NETIF_LSO;
  1404. }
  1405. return features;
  1406. }
  1407. static inline int macb_clear_csum(struct sk_buff *skb)
  1408. {
  1409. /* no change for packets without checksum offloading */
  1410. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1411. return 0;
  1412. /* make sure we can modify the header */
  1413. if (unlikely(skb_cow_head(skb, 0)))
  1414. return -1;
  1415. /* initialize checksum field
  1416. * This is required - at least for Zynq, which otherwise calculates
  1417. * wrong UDP header checksums for UDP packets with UDP data len <=2
  1418. */
  1419. *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
  1420. return 0;
  1421. }
  1422. static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
  1423. {
  1424. bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
  1425. skb_is_nonlinear(*skb);
  1426. int padlen = ETH_ZLEN - (*skb)->len;
  1427. int headroom = skb_headroom(*skb);
  1428. int tailroom = skb_tailroom(*skb);
  1429. struct sk_buff *nskb;
  1430. u32 fcs;
  1431. if (!(ndev->features & NETIF_F_HW_CSUM) ||
  1432. !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
  1433. skb_shinfo(*skb)->gso_size) /* Not available for GSO */
  1434. return 0;
  1435. if (padlen <= 0) {
  1436. /* FCS could be appeded to tailroom. */
  1437. if (tailroom >= ETH_FCS_LEN)
  1438. goto add_fcs;
  1439. /* FCS could be appeded by moving data to headroom. */
  1440. else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
  1441. padlen = 0;
  1442. /* No room for FCS, need to reallocate skb. */
  1443. else
  1444. padlen = ETH_FCS_LEN;
  1445. } else {
  1446. /* Add room for FCS. */
  1447. padlen += ETH_FCS_LEN;
  1448. }
  1449. if (!cloned && headroom + tailroom >= padlen) {
  1450. (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
  1451. skb_set_tail_pointer(*skb, (*skb)->len);
  1452. } else {
  1453. nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
  1454. if (!nskb)
  1455. return -ENOMEM;
  1456. dev_consume_skb_any(*skb);
  1457. *skb = nskb;
  1458. }
  1459. if (padlen > ETH_FCS_LEN)
  1460. skb_put_zero(*skb, padlen - ETH_FCS_LEN);
  1461. add_fcs:
  1462. /* set FCS to packet */
  1463. fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
  1464. fcs = ~fcs;
  1465. skb_put_u8(*skb, fcs & 0xff);
  1466. skb_put_u8(*skb, (fcs >> 8) & 0xff);
  1467. skb_put_u8(*skb, (fcs >> 16) & 0xff);
  1468. skb_put_u8(*skb, (fcs >> 24) & 0xff);
  1469. return 0;
  1470. }
  1471. static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1472. {
  1473. u16 queue_index = skb_get_queue_mapping(skb);
  1474. struct macb *bp = netdev_priv(dev);
  1475. struct macb_queue *queue = &bp->queues[queue_index];
  1476. unsigned long flags;
  1477. unsigned int desc_cnt, nr_frags, frag_size, f;
  1478. unsigned int hdrlen;
  1479. bool is_lso, is_udp = 0;
  1480. netdev_tx_t ret = NETDEV_TX_OK;
  1481. if (macb_clear_csum(skb)) {
  1482. dev_kfree_skb_any(skb);
  1483. return ret;
  1484. }
  1485. if (macb_pad_and_fcs(&skb, dev)) {
  1486. dev_kfree_skb_any(skb);
  1487. return ret;
  1488. }
  1489. is_lso = (skb_shinfo(skb)->gso_size != 0);
  1490. if (is_lso) {
  1491. is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
  1492. /* length of headers */
  1493. if (is_udp)
  1494. /* only queue eth + ip headers separately for UDP */
  1495. hdrlen = skb_transport_offset(skb);
  1496. else
  1497. hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
  1498. if (skb_headlen(skb) < hdrlen) {
  1499. netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
  1500. /* if this is required, would need to copy to single buffer */
  1501. return NETDEV_TX_BUSY;
  1502. }
  1503. } else
  1504. hdrlen = min(skb_headlen(skb), bp->max_tx_length);
  1505. #if defined(DEBUG) && defined(VERBOSE_DEBUG)
  1506. netdev_vdbg(bp->dev,
  1507. "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
  1508. queue_index, skb->len, skb->head, skb->data,
  1509. skb_tail_pointer(skb), skb_end_pointer(skb));
  1510. print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
  1511. skb->data, 16, true);
  1512. #endif
  1513. /* Count how many TX buffer descriptors are needed to send this
  1514. * socket buffer: skb fragments of jumbo frames may need to be
  1515. * split into many buffer descriptors.
  1516. */
  1517. if (is_lso && (skb_headlen(skb) > hdrlen))
  1518. /* extra header descriptor if also payload in first buffer */
  1519. desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
  1520. else
  1521. desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
  1522. nr_frags = skb_shinfo(skb)->nr_frags;
  1523. for (f = 0; f < nr_frags; f++) {
  1524. frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
  1525. desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
  1526. }
  1527. spin_lock_irqsave(&bp->lock, flags);
  1528. /* This is a hard error, log it. */
  1529. if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
  1530. bp->tx_ring_size) < desc_cnt) {
  1531. netif_stop_subqueue(dev, queue_index);
  1532. spin_unlock_irqrestore(&bp->lock, flags);
  1533. netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
  1534. queue->tx_head, queue->tx_tail);
  1535. return NETDEV_TX_BUSY;
  1536. }
  1537. /* Map socket buffer for DMA transfer */
  1538. if (!macb_tx_map(bp, queue, skb, hdrlen)) {
  1539. dev_kfree_skb_any(skb);
  1540. goto unlock;
  1541. }
  1542. /* Make newly initialized descriptor visible to hardware */
  1543. wmb();
  1544. skb_tx_timestamp(skb);
  1545. macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
  1546. if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
  1547. netif_stop_subqueue(dev, queue_index);
  1548. unlock:
  1549. spin_unlock_irqrestore(&bp->lock, flags);
  1550. return ret;
  1551. }
  1552. static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
  1553. {
  1554. if (!macb_is_gem(bp)) {
  1555. bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
  1556. } else {
  1557. bp->rx_buffer_size = size;
  1558. if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
  1559. netdev_dbg(bp->dev,
  1560. "RX buffer must be multiple of %d bytes, expanding\n",
  1561. RX_BUFFER_MULTIPLE);
  1562. bp->rx_buffer_size =
  1563. roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
  1564. }
  1565. }
  1566. netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
  1567. bp->dev->mtu, bp->rx_buffer_size);
  1568. }
  1569. static void gem_free_rx_buffers(struct macb *bp)
  1570. {
  1571. struct sk_buff *skb;
  1572. struct macb_dma_desc *desc;
  1573. struct macb_queue *queue;
  1574. dma_addr_t addr;
  1575. unsigned int q;
  1576. int i;
  1577. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
  1578. if (!queue->rx_skbuff)
  1579. continue;
  1580. for (i = 0; i < bp->rx_ring_size; i++) {
  1581. skb = queue->rx_skbuff[i];
  1582. if (!skb)
  1583. continue;
  1584. desc = macb_rx_desc(queue, i);
  1585. addr = macb_get_addr(bp, desc);
  1586. dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
  1587. DMA_FROM_DEVICE);
  1588. dev_kfree_skb_any(skb);
  1589. skb = NULL;
  1590. }
  1591. kfree(queue->rx_skbuff);
  1592. queue->rx_skbuff = NULL;
  1593. }
  1594. }
  1595. static void macb_free_rx_buffers(struct macb *bp)
  1596. {
  1597. struct macb_queue *queue = &bp->queues[0];
  1598. if (queue->rx_buffers) {
  1599. dma_free_coherent(&bp->pdev->dev,
  1600. bp->rx_ring_size * bp->rx_buffer_size,
  1601. queue->rx_buffers, queue->rx_buffers_dma);
  1602. queue->rx_buffers = NULL;
  1603. }
  1604. }
  1605. static void macb_free_consistent(struct macb *bp)
  1606. {
  1607. struct macb_queue *queue;
  1608. unsigned int q;
  1609. int size;
  1610. bp->macbgem_ops.mog_free_rx_buffers(bp);
  1611. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
  1612. kfree(queue->tx_skb);
  1613. queue->tx_skb = NULL;
  1614. if (queue->tx_ring) {
  1615. size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
  1616. dma_free_coherent(&bp->pdev->dev, size,
  1617. queue->tx_ring, queue->tx_ring_dma);
  1618. queue->tx_ring = NULL;
  1619. }
  1620. if (queue->rx_ring) {
  1621. size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
  1622. dma_free_coherent(&bp->pdev->dev, size,
  1623. queue->rx_ring, queue->rx_ring_dma);
  1624. queue->rx_ring = NULL;
  1625. }
  1626. }
  1627. }
  1628. static int gem_alloc_rx_buffers(struct macb *bp)
  1629. {
  1630. struct macb_queue *queue;
  1631. unsigned int q;
  1632. int size;
  1633. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
  1634. size = bp->rx_ring_size * sizeof(struct sk_buff *);
  1635. queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
  1636. if (!queue->rx_skbuff)
  1637. return -ENOMEM;
  1638. else
  1639. netdev_dbg(bp->dev,
  1640. "Allocated %d RX struct sk_buff entries at %p\n",
  1641. bp->rx_ring_size, queue->rx_skbuff);
  1642. }
  1643. return 0;
  1644. }
  1645. static int macb_alloc_rx_buffers(struct macb *bp)
  1646. {
  1647. struct macb_queue *queue = &bp->queues[0];
  1648. int size;
  1649. size = bp->rx_ring_size * bp->rx_buffer_size;
  1650. queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
  1651. &queue->rx_buffers_dma, GFP_KERNEL);
  1652. if (!queue->rx_buffers)
  1653. return -ENOMEM;
  1654. netdev_dbg(bp->dev,
  1655. "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
  1656. size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
  1657. return 0;
  1658. }
  1659. static int macb_alloc_consistent(struct macb *bp)
  1660. {
  1661. struct macb_queue *queue;
  1662. unsigned int q;
  1663. int size;
  1664. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
  1665. size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
  1666. queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
  1667. &queue->tx_ring_dma,
  1668. GFP_KERNEL);
  1669. if (!queue->tx_ring)
  1670. goto out_err;
  1671. netdev_dbg(bp->dev,
  1672. "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
  1673. q, size, (unsigned long)queue->tx_ring_dma,
  1674. queue->tx_ring);
  1675. size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
  1676. queue->tx_skb = kmalloc(size, GFP_KERNEL);
  1677. if (!queue->tx_skb)
  1678. goto out_err;
  1679. size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
  1680. queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
  1681. &queue->rx_ring_dma, GFP_KERNEL);
  1682. if (!queue->rx_ring)
  1683. goto out_err;
  1684. netdev_dbg(bp->dev,
  1685. "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
  1686. size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
  1687. }
  1688. if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
  1689. goto out_err;
  1690. return 0;
  1691. out_err:
  1692. macb_free_consistent(bp);
  1693. return -ENOMEM;
  1694. }
  1695. static void gem_init_rings(struct macb *bp)
  1696. {
  1697. struct macb_queue *queue;
  1698. struct macb_dma_desc *desc = NULL;
  1699. unsigned int q;
  1700. int i;
  1701. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
  1702. for (i = 0; i < bp->tx_ring_size; i++) {
  1703. desc = macb_tx_desc(queue, i);
  1704. macb_set_addr(bp, desc, 0);
  1705. desc->ctrl = MACB_BIT(TX_USED);
  1706. }
  1707. desc->ctrl |= MACB_BIT(TX_WRAP);
  1708. queue->tx_head = 0;
  1709. queue->tx_tail = 0;
  1710. queue->rx_tail = 0;
  1711. queue->rx_prepared_head = 0;
  1712. gem_rx_refill(queue);
  1713. }
  1714. }
  1715. static void macb_init_rings(struct macb *bp)
  1716. {
  1717. int i;
  1718. struct macb_dma_desc *desc = NULL;
  1719. macb_init_rx_ring(&bp->queues[0]);
  1720. for (i = 0; i < bp->tx_ring_size; i++) {
  1721. desc = macb_tx_desc(&bp->queues[0], i);
  1722. macb_set_addr(bp, desc, 0);
  1723. desc->ctrl = MACB_BIT(TX_USED);
  1724. }
  1725. bp->queues[0].tx_head = 0;
  1726. bp->queues[0].tx_tail = 0;
  1727. desc->ctrl |= MACB_BIT(TX_WRAP);
  1728. }
  1729. static void macb_reset_hw(struct macb *bp)
  1730. {
  1731. struct macb_queue *queue;
  1732. unsigned int q;
  1733. u32 ctrl = macb_readl(bp, NCR);
  1734. /* Disable RX and TX (XXX: Should we halt the transmission
  1735. * more gracefully?)
  1736. */
  1737. ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
  1738. /* Clear the stats registers (XXX: Update stats first?) */
  1739. ctrl |= MACB_BIT(CLRSTAT);
  1740. macb_writel(bp, NCR, ctrl);
  1741. /* Clear all status flags */
  1742. macb_writel(bp, TSR, -1);
  1743. macb_writel(bp, RSR, -1);
  1744. /* Disable all interrupts */
  1745. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
  1746. queue_writel(queue, IDR, -1);
  1747. queue_readl(queue, ISR);
  1748. if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
  1749. queue_writel(queue, ISR, -1);
  1750. }
  1751. }
  1752. static u32 gem_mdc_clk_div(struct macb *bp)
  1753. {
  1754. u32 config;
  1755. unsigned long pclk_hz = clk_get_rate(bp->pclk);
  1756. if (pclk_hz <= 20000000)
  1757. config = GEM_BF(CLK, GEM_CLK_DIV8);
  1758. else if (pclk_hz <= 40000000)
  1759. config = GEM_BF(CLK, GEM_CLK_DIV16);
  1760. else if (pclk_hz <= 80000000)
  1761. config = GEM_BF(CLK, GEM_CLK_DIV32);
  1762. else if (pclk_hz <= 120000000)
  1763. config = GEM_BF(CLK, GEM_CLK_DIV48);
  1764. else if (pclk_hz <= 160000000)
  1765. config = GEM_BF(CLK, GEM_CLK_DIV64);
  1766. else
  1767. config = GEM_BF(CLK, GEM_CLK_DIV96);
  1768. return config;
  1769. }
  1770. static u32 macb_mdc_clk_div(struct macb *bp)
  1771. {
  1772. u32 config;
  1773. unsigned long pclk_hz;
  1774. if (macb_is_gem(bp))
  1775. return gem_mdc_clk_div(bp);
  1776. pclk_hz = clk_get_rate(bp->pclk);
  1777. if (pclk_hz <= 20000000)
  1778. config = MACB_BF(CLK, MACB_CLK_DIV8);
  1779. else if (pclk_hz <= 40000000)
  1780. config = MACB_BF(CLK, MACB_CLK_DIV16);
  1781. else if (pclk_hz <= 80000000)
  1782. config = MACB_BF(CLK, MACB_CLK_DIV32);
  1783. else
  1784. config = MACB_BF(CLK, MACB_CLK_DIV64);
  1785. return config;
  1786. }
  1787. /* Get the DMA bus width field of the network configuration register that we
  1788. * should program. We find the width from decoding the design configuration
  1789. * register to find the maximum supported data bus width.
  1790. */
  1791. static u32 macb_dbw(struct macb *bp)
  1792. {
  1793. if (!macb_is_gem(bp))
  1794. return 0;
  1795. switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
  1796. case 4:
  1797. return GEM_BF(DBW, GEM_DBW128);
  1798. case 2:
  1799. return GEM_BF(DBW, GEM_DBW64);
  1800. case 1:
  1801. default:
  1802. return GEM_BF(DBW, GEM_DBW32);
  1803. }
  1804. }
  1805. /* Configure the receive DMA engine
  1806. * - use the correct receive buffer size
  1807. * - set best burst length for DMA operations
  1808. * (if not supported by FIFO, it will fallback to default)
  1809. * - set both rx/tx packet buffers to full memory size
  1810. * These are configurable parameters for GEM.
  1811. */
  1812. static void macb_configure_dma(struct macb *bp)
  1813. {
  1814. struct macb_queue *queue;
  1815. u32 buffer_size;
  1816. unsigned int q;
  1817. u32 dmacfg;
  1818. buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
  1819. if (macb_is_gem(bp)) {
  1820. dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
  1821. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
  1822. if (q)
  1823. queue_writel(queue, RBQS, buffer_size);
  1824. else
  1825. dmacfg |= GEM_BF(RXBS, buffer_size);
  1826. }
  1827. if (bp->dma_burst_length)
  1828. dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
  1829. dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
  1830. dmacfg &= ~GEM_BIT(ENDIA_PKT);
  1831. if (bp->native_io)
  1832. dmacfg &= ~GEM_BIT(ENDIA_DESC);
  1833. else
  1834. dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
  1835. if (bp->dev->features & NETIF_F_HW_CSUM)
  1836. dmacfg |= GEM_BIT(TXCOEN);
  1837. else
  1838. dmacfg &= ~GEM_BIT(TXCOEN);
  1839. dmacfg &= ~GEM_BIT(ADDR64);
  1840. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  1841. if (bp->hw_dma_cap & HW_DMA_CAP_64B)
  1842. dmacfg |= GEM_BIT(ADDR64);
  1843. #endif
  1844. #ifdef CONFIG_MACB_USE_HWSTAMP
  1845. if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
  1846. dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
  1847. #endif
  1848. netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
  1849. dmacfg);
  1850. gem_writel(bp, DMACFG, dmacfg);
  1851. }
  1852. }
  1853. static void macb_init_hw(struct macb *bp)
  1854. {
  1855. struct macb_queue *queue;
  1856. unsigned int q;
  1857. u32 config;
  1858. macb_reset_hw(bp);
  1859. macb_set_hwaddr(bp);
  1860. config = macb_mdc_clk_div(bp);
  1861. if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
  1862. config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
  1863. config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
  1864. config |= MACB_BIT(PAE); /* PAuse Enable */
  1865. config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
  1866. if (bp->caps & MACB_CAPS_JUMBO)
  1867. config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
  1868. else
  1869. config |= MACB_BIT(BIG); /* Receive oversized frames */
  1870. if (bp->dev->flags & IFF_PROMISC)
  1871. config |= MACB_BIT(CAF); /* Copy All Frames */
  1872. else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
  1873. config |= GEM_BIT(RXCOEN);
  1874. if (!(bp->dev->flags & IFF_BROADCAST))
  1875. config |= MACB_BIT(NBC); /* No BroadCast */
  1876. config |= macb_dbw(bp);
  1877. macb_writel(bp, NCFGR, config);
  1878. if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
  1879. gem_writel(bp, JML, bp->jumbo_max_len);
  1880. bp->speed = SPEED_10;
  1881. bp->duplex = DUPLEX_HALF;
  1882. bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
  1883. if (bp->caps & MACB_CAPS_JUMBO)
  1884. bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
  1885. macb_configure_dma(bp);
  1886. /* Initialize TX and RX buffers */
  1887. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
  1888. queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
  1889. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  1890. if (bp->hw_dma_cap & HW_DMA_CAP_64B)
  1891. queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
  1892. #endif
  1893. queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
  1894. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  1895. if (bp->hw_dma_cap & HW_DMA_CAP_64B)
  1896. queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
  1897. #endif
  1898. /* Enable interrupts */
  1899. queue_writel(queue, IER,
  1900. bp->rx_intr_mask |
  1901. MACB_TX_INT_FLAGS |
  1902. MACB_BIT(HRESP));
  1903. }
  1904. /* Enable TX and RX */
  1905. macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
  1906. }
  1907. /* The hash address register is 64 bits long and takes up two
  1908. * locations in the memory map. The least significant bits are stored
  1909. * in EMAC_HSL and the most significant bits in EMAC_HSH.
  1910. *
  1911. * The unicast hash enable and the multicast hash enable bits in the
  1912. * network configuration register enable the reception of hash matched
  1913. * frames. The destination address is reduced to a 6 bit index into
  1914. * the 64 bit hash register using the following hash function. The
  1915. * hash function is an exclusive or of every sixth bit of the
  1916. * destination address.
  1917. *
  1918. * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
  1919. * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
  1920. * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
  1921. * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
  1922. * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
  1923. * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
  1924. *
  1925. * da[0] represents the least significant bit of the first byte
  1926. * received, that is, the multicast/unicast indicator, and da[47]
  1927. * represents the most significant bit of the last byte received. If
  1928. * the hash index, hi[n], points to a bit that is set in the hash
  1929. * register then the frame will be matched according to whether the
  1930. * frame is multicast or unicast. A multicast match will be signalled
  1931. * if the multicast hash enable bit is set, da[0] is 1 and the hash
  1932. * index points to a bit set in the hash register. A unicast match
  1933. * will be signalled if the unicast hash enable bit is set, da[0] is 0
  1934. * and the hash index points to a bit set in the hash register. To
  1935. * receive all multicast frames, the hash register should be set with
  1936. * all ones and the multicast hash enable bit should be set in the
  1937. * network configuration register.
  1938. */
  1939. static inline int hash_bit_value(int bitnr, __u8 *addr)
  1940. {
  1941. if (addr[bitnr / 8] & (1 << (bitnr % 8)))
  1942. return 1;
  1943. return 0;
  1944. }
  1945. /* Return the hash index value for the specified address. */
  1946. static int hash_get_index(__u8 *addr)
  1947. {
  1948. int i, j, bitval;
  1949. int hash_index = 0;
  1950. for (j = 0; j < 6; j++) {
  1951. for (i = 0, bitval = 0; i < 8; i++)
  1952. bitval ^= hash_bit_value(i * 6 + j, addr);
  1953. hash_index |= (bitval << j);
  1954. }
  1955. return hash_index;
  1956. }
  1957. /* Add multicast addresses to the internal multicast-hash table. */
  1958. static void macb_sethashtable(struct net_device *dev)
  1959. {
  1960. struct netdev_hw_addr *ha;
  1961. unsigned long mc_filter[2];
  1962. unsigned int bitnr;
  1963. struct macb *bp = netdev_priv(dev);
  1964. mc_filter[0] = 0;
  1965. mc_filter[1] = 0;
  1966. netdev_for_each_mc_addr(ha, dev) {
  1967. bitnr = hash_get_index(ha->addr);
  1968. mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
  1969. }
  1970. macb_or_gem_writel(bp, HRB, mc_filter[0]);
  1971. macb_or_gem_writel(bp, HRT, mc_filter[1]);
  1972. }
  1973. /* Enable/Disable promiscuous and multicast modes. */
  1974. static void macb_set_rx_mode(struct net_device *dev)
  1975. {
  1976. unsigned long cfg;
  1977. struct macb *bp = netdev_priv(dev);
  1978. cfg = macb_readl(bp, NCFGR);
  1979. if (dev->flags & IFF_PROMISC) {
  1980. /* Enable promiscuous mode */
  1981. cfg |= MACB_BIT(CAF);
  1982. /* Disable RX checksum offload */
  1983. if (macb_is_gem(bp))
  1984. cfg &= ~GEM_BIT(RXCOEN);
  1985. } else {
  1986. /* Disable promiscuous mode */
  1987. cfg &= ~MACB_BIT(CAF);
  1988. /* Enable RX checksum offload only if requested */
  1989. if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
  1990. cfg |= GEM_BIT(RXCOEN);
  1991. }
  1992. if (dev->flags & IFF_ALLMULTI) {
  1993. /* Enable all multicast mode */
  1994. macb_or_gem_writel(bp, HRB, -1);
  1995. macb_or_gem_writel(bp, HRT, -1);
  1996. cfg |= MACB_BIT(NCFGR_MTI);
  1997. } else if (!netdev_mc_empty(dev)) {
  1998. /* Enable specific multicasts */
  1999. macb_sethashtable(dev);
  2000. cfg |= MACB_BIT(NCFGR_MTI);
  2001. } else if (dev->flags & (~IFF_ALLMULTI)) {
  2002. /* Disable all multicast mode */
  2003. macb_or_gem_writel(bp, HRB, 0);
  2004. macb_or_gem_writel(bp, HRT, 0);
  2005. cfg &= ~MACB_BIT(NCFGR_MTI);
  2006. }
  2007. macb_writel(bp, NCFGR, cfg);
  2008. }
  2009. static int macb_open(struct net_device *dev)
  2010. {
  2011. struct macb *bp = netdev_priv(dev);
  2012. size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
  2013. struct macb_queue *queue;
  2014. unsigned int q;
  2015. int err;
  2016. netdev_dbg(bp->dev, "open\n");
  2017. err = pm_runtime_get_sync(&bp->pdev->dev);
  2018. if (err < 0)
  2019. goto pm_exit;
  2020. /* carrier starts down */
  2021. netif_carrier_off(dev);
  2022. /* if the phy is not yet register, retry later*/
  2023. if (!dev->phydev) {
  2024. err = -EAGAIN;
  2025. goto pm_exit;
  2026. }
  2027. /* RX buffers initialization */
  2028. macb_init_rx_buffer_size(bp, bufsz);
  2029. err = macb_alloc_consistent(bp);
  2030. if (err) {
  2031. netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
  2032. err);
  2033. goto pm_exit;
  2034. }
  2035. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
  2036. napi_enable(&queue->napi);
  2037. bp->macbgem_ops.mog_init_rings(bp);
  2038. macb_init_hw(bp);
  2039. /* schedule a link state check */
  2040. phy_start(dev->phydev);
  2041. netif_tx_start_all_queues(dev);
  2042. if (bp->ptp_info)
  2043. bp->ptp_info->ptp_init(dev);
  2044. pm_exit:
  2045. if (err) {
  2046. pm_runtime_put_sync(&bp->pdev->dev);
  2047. return err;
  2048. }
  2049. return 0;
  2050. }
  2051. static int macb_close(struct net_device *dev)
  2052. {
  2053. struct macb *bp = netdev_priv(dev);
  2054. struct macb_queue *queue;
  2055. unsigned long flags;
  2056. unsigned int q;
  2057. netif_tx_stop_all_queues(dev);
  2058. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
  2059. napi_disable(&queue->napi);
  2060. if (dev->phydev)
  2061. phy_stop(dev->phydev);
  2062. spin_lock_irqsave(&bp->lock, flags);
  2063. macb_reset_hw(bp);
  2064. netif_carrier_off(dev);
  2065. spin_unlock_irqrestore(&bp->lock, flags);
  2066. macb_free_consistent(bp);
  2067. if (bp->ptp_info)
  2068. bp->ptp_info->ptp_remove(dev);
  2069. pm_runtime_put(&bp->pdev->dev);
  2070. return 0;
  2071. }
  2072. static int macb_change_mtu(struct net_device *dev, int new_mtu)
  2073. {
  2074. if (netif_running(dev))
  2075. return -EBUSY;
  2076. dev->mtu = new_mtu;
  2077. return 0;
  2078. }
  2079. static void gem_update_stats(struct macb *bp)
  2080. {
  2081. struct macb_queue *queue;
  2082. unsigned int i, q, idx;
  2083. unsigned long *stat;
  2084. u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
  2085. for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
  2086. u32 offset = gem_statistics[i].offset;
  2087. u64 val = bp->macb_reg_readl(bp, offset);
  2088. bp->ethtool_stats[i] += val;
  2089. *p += val;
  2090. if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
  2091. /* Add GEM_OCTTXH, GEM_OCTRXH */
  2092. val = bp->macb_reg_readl(bp, offset + 4);
  2093. bp->ethtool_stats[i] += ((u64)val) << 32;
  2094. *(++p) += val;
  2095. }
  2096. }
  2097. idx = GEM_STATS_LEN;
  2098. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
  2099. for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
  2100. bp->ethtool_stats[idx++] = *stat;
  2101. }
  2102. static struct net_device_stats *gem_get_stats(struct macb *bp)
  2103. {
  2104. struct gem_stats *hwstat = &bp->hw_stats.gem;
  2105. struct net_device_stats *nstat = &bp->dev->stats;
  2106. if (!netif_running(bp->dev))
  2107. return nstat;
  2108. gem_update_stats(bp);
  2109. nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
  2110. hwstat->rx_alignment_errors +
  2111. hwstat->rx_resource_errors +
  2112. hwstat->rx_overruns +
  2113. hwstat->rx_oversize_frames +
  2114. hwstat->rx_jabbers +
  2115. hwstat->rx_undersized_frames +
  2116. hwstat->rx_length_field_frame_errors);
  2117. nstat->tx_errors = (hwstat->tx_late_collisions +
  2118. hwstat->tx_excessive_collisions +
  2119. hwstat->tx_underrun +
  2120. hwstat->tx_carrier_sense_errors);
  2121. nstat->multicast = hwstat->rx_multicast_frames;
  2122. nstat->collisions = (hwstat->tx_single_collision_frames +
  2123. hwstat->tx_multiple_collision_frames +
  2124. hwstat->tx_excessive_collisions);
  2125. nstat->rx_length_errors = (hwstat->rx_oversize_frames +
  2126. hwstat->rx_jabbers +
  2127. hwstat->rx_undersized_frames +
  2128. hwstat->rx_length_field_frame_errors);
  2129. nstat->rx_over_errors = hwstat->rx_resource_errors;
  2130. nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
  2131. nstat->rx_frame_errors = hwstat->rx_alignment_errors;
  2132. nstat->rx_fifo_errors = hwstat->rx_overruns;
  2133. nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
  2134. nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
  2135. nstat->tx_fifo_errors = hwstat->tx_underrun;
  2136. return nstat;
  2137. }
  2138. static void gem_get_ethtool_stats(struct net_device *dev,
  2139. struct ethtool_stats *stats, u64 *data)
  2140. {
  2141. struct macb *bp;
  2142. bp = netdev_priv(dev);
  2143. gem_update_stats(bp);
  2144. memcpy(data, &bp->ethtool_stats, sizeof(u64)
  2145. * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
  2146. }
  2147. static int gem_get_sset_count(struct net_device *dev, int sset)
  2148. {
  2149. struct macb *bp = netdev_priv(dev);
  2150. switch (sset) {
  2151. case ETH_SS_STATS:
  2152. return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
  2153. default:
  2154. return -EOPNOTSUPP;
  2155. }
  2156. }
  2157. static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
  2158. {
  2159. char stat_string[ETH_GSTRING_LEN];
  2160. struct macb *bp = netdev_priv(dev);
  2161. struct macb_queue *queue;
  2162. unsigned int i;
  2163. unsigned int q;
  2164. switch (sset) {
  2165. case ETH_SS_STATS:
  2166. for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
  2167. memcpy(p, gem_statistics[i].stat_string,
  2168. ETH_GSTRING_LEN);
  2169. for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
  2170. for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
  2171. snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
  2172. q, queue_statistics[i].stat_string);
  2173. memcpy(p, stat_string, ETH_GSTRING_LEN);
  2174. }
  2175. }
  2176. break;
  2177. }
  2178. }
  2179. static struct net_device_stats *macb_get_stats(struct net_device *dev)
  2180. {
  2181. struct macb *bp = netdev_priv(dev);
  2182. struct net_device_stats *nstat = &bp->dev->stats;
  2183. struct macb_stats *hwstat = &bp->hw_stats.macb;
  2184. if (macb_is_gem(bp))
  2185. return gem_get_stats(bp);
  2186. /* read stats from hardware */
  2187. macb_update_stats(bp);
  2188. /* Convert HW stats into netdevice stats */
  2189. nstat->rx_errors = (hwstat->rx_fcs_errors +
  2190. hwstat->rx_align_errors +
  2191. hwstat->rx_resource_errors +
  2192. hwstat->rx_overruns +
  2193. hwstat->rx_oversize_pkts +
  2194. hwstat->rx_jabbers +
  2195. hwstat->rx_undersize_pkts +
  2196. hwstat->rx_length_mismatch);
  2197. nstat->tx_errors = (hwstat->tx_late_cols +
  2198. hwstat->tx_excessive_cols +
  2199. hwstat->tx_underruns +
  2200. hwstat->tx_carrier_errors +
  2201. hwstat->sqe_test_errors);
  2202. nstat->collisions = (hwstat->tx_single_cols +
  2203. hwstat->tx_multiple_cols +
  2204. hwstat->tx_excessive_cols);
  2205. nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
  2206. hwstat->rx_jabbers +
  2207. hwstat->rx_undersize_pkts +
  2208. hwstat->rx_length_mismatch);
  2209. nstat->rx_over_errors = hwstat->rx_resource_errors +
  2210. hwstat->rx_overruns;
  2211. nstat->rx_crc_errors = hwstat->rx_fcs_errors;
  2212. nstat->rx_frame_errors = hwstat->rx_align_errors;
  2213. nstat->rx_fifo_errors = hwstat->rx_overruns;
  2214. /* XXX: What does "missed" mean? */
  2215. nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
  2216. nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
  2217. nstat->tx_fifo_errors = hwstat->tx_underruns;
  2218. /* Don't know about heartbeat or window errors... */
  2219. return nstat;
  2220. }
  2221. static int macb_get_regs_len(struct net_device *netdev)
  2222. {
  2223. return MACB_GREGS_NBR * sizeof(u32);
  2224. }
  2225. static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
  2226. void *p)
  2227. {
  2228. struct macb *bp = netdev_priv(dev);
  2229. unsigned int tail, head;
  2230. u32 *regs_buff = p;
  2231. regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
  2232. | MACB_GREGS_VERSION;
  2233. tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
  2234. head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
  2235. regs_buff[0] = macb_readl(bp, NCR);
  2236. regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
  2237. regs_buff[2] = macb_readl(bp, NSR);
  2238. regs_buff[3] = macb_readl(bp, TSR);
  2239. regs_buff[4] = macb_readl(bp, RBQP);
  2240. regs_buff[5] = macb_readl(bp, TBQP);
  2241. regs_buff[6] = macb_readl(bp, RSR);
  2242. regs_buff[7] = macb_readl(bp, IMR);
  2243. regs_buff[8] = tail;
  2244. regs_buff[9] = head;
  2245. regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
  2246. regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
  2247. if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
  2248. regs_buff[12] = macb_or_gem_readl(bp, USRIO);
  2249. if (macb_is_gem(bp))
  2250. regs_buff[13] = gem_readl(bp, DMACFG);
  2251. }
  2252. static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
  2253. {
  2254. struct macb *bp = netdev_priv(netdev);
  2255. wol->supported = 0;
  2256. wol->wolopts = 0;
  2257. if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
  2258. wol->supported = WAKE_MAGIC;
  2259. if (bp->wol & MACB_WOL_ENABLED)
  2260. wol->wolopts |= WAKE_MAGIC;
  2261. }
  2262. }
  2263. static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
  2264. {
  2265. struct macb *bp = netdev_priv(netdev);
  2266. if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
  2267. (wol->wolopts & ~WAKE_MAGIC))
  2268. return -EOPNOTSUPP;
  2269. if (wol->wolopts & WAKE_MAGIC)
  2270. bp->wol |= MACB_WOL_ENABLED;
  2271. else
  2272. bp->wol &= ~MACB_WOL_ENABLED;
  2273. device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
  2274. return 0;
  2275. }
  2276. static void macb_get_ringparam(struct net_device *netdev,
  2277. struct ethtool_ringparam *ring)
  2278. {
  2279. struct macb *bp = netdev_priv(netdev);
  2280. ring->rx_max_pending = MAX_RX_RING_SIZE;
  2281. ring->tx_max_pending = MAX_TX_RING_SIZE;
  2282. ring->rx_pending = bp->rx_ring_size;
  2283. ring->tx_pending = bp->tx_ring_size;
  2284. }
  2285. static int macb_set_ringparam(struct net_device *netdev,
  2286. struct ethtool_ringparam *ring)
  2287. {
  2288. struct macb *bp = netdev_priv(netdev);
  2289. u32 new_rx_size, new_tx_size;
  2290. unsigned int reset = 0;
  2291. if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
  2292. return -EINVAL;
  2293. new_rx_size = clamp_t(u32, ring->rx_pending,
  2294. MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
  2295. new_rx_size = roundup_pow_of_two(new_rx_size);
  2296. new_tx_size = clamp_t(u32, ring->tx_pending,
  2297. MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
  2298. new_tx_size = roundup_pow_of_two(new_tx_size);
  2299. if ((new_tx_size == bp->tx_ring_size) &&
  2300. (new_rx_size == bp->rx_ring_size)) {
  2301. /* nothing to do */
  2302. return 0;
  2303. }
  2304. if (netif_running(bp->dev)) {
  2305. reset = 1;
  2306. macb_close(bp->dev);
  2307. }
  2308. bp->rx_ring_size = new_rx_size;
  2309. bp->tx_ring_size = new_tx_size;
  2310. if (reset)
  2311. macb_open(bp->dev);
  2312. return 0;
  2313. }
  2314. #ifdef CONFIG_MACB_USE_HWSTAMP
  2315. static unsigned int gem_get_tsu_rate(struct macb *bp)
  2316. {
  2317. struct clk *tsu_clk;
  2318. unsigned int tsu_rate;
  2319. tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
  2320. if (!IS_ERR(tsu_clk))
  2321. tsu_rate = clk_get_rate(tsu_clk);
  2322. /* try pclk instead */
  2323. else if (!IS_ERR(bp->pclk)) {
  2324. tsu_clk = bp->pclk;
  2325. tsu_rate = clk_get_rate(tsu_clk);
  2326. } else
  2327. return -ENOTSUPP;
  2328. return tsu_rate;
  2329. }
  2330. static s32 gem_get_ptp_max_adj(void)
  2331. {
  2332. return 64000000;
  2333. }
  2334. static int gem_get_ts_info(struct net_device *dev,
  2335. struct ethtool_ts_info *info)
  2336. {
  2337. struct macb *bp = netdev_priv(dev);
  2338. if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
  2339. ethtool_op_get_ts_info(dev, info);
  2340. return 0;
  2341. }
  2342. info->so_timestamping =
  2343. SOF_TIMESTAMPING_TX_SOFTWARE |
  2344. SOF_TIMESTAMPING_RX_SOFTWARE |
  2345. SOF_TIMESTAMPING_SOFTWARE |
  2346. SOF_TIMESTAMPING_TX_HARDWARE |
  2347. SOF_TIMESTAMPING_RX_HARDWARE |
  2348. SOF_TIMESTAMPING_RAW_HARDWARE;
  2349. info->tx_types =
  2350. (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
  2351. (1 << HWTSTAMP_TX_OFF) |
  2352. (1 << HWTSTAMP_TX_ON);
  2353. info->rx_filters =
  2354. (1 << HWTSTAMP_FILTER_NONE) |
  2355. (1 << HWTSTAMP_FILTER_ALL);
  2356. info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
  2357. return 0;
  2358. }
  2359. static struct macb_ptp_info gem_ptp_info = {
  2360. .ptp_init = gem_ptp_init,
  2361. .ptp_remove = gem_ptp_remove,
  2362. .get_ptp_max_adj = gem_get_ptp_max_adj,
  2363. .get_tsu_rate = gem_get_tsu_rate,
  2364. .get_ts_info = gem_get_ts_info,
  2365. .get_hwtst = gem_get_hwtst,
  2366. .set_hwtst = gem_set_hwtst,
  2367. };
  2368. #endif
  2369. static int macb_get_ts_info(struct net_device *netdev,
  2370. struct ethtool_ts_info *info)
  2371. {
  2372. struct macb *bp = netdev_priv(netdev);
  2373. if (bp->ptp_info)
  2374. return bp->ptp_info->get_ts_info(netdev, info);
  2375. return ethtool_op_get_ts_info(netdev, info);
  2376. }
  2377. static void gem_enable_flow_filters(struct macb *bp, bool enable)
  2378. {
  2379. struct net_device *netdev = bp->dev;
  2380. struct ethtool_rx_fs_item *item;
  2381. u32 t2_scr;
  2382. int num_t2_scr;
  2383. if (!(netdev->features & NETIF_F_NTUPLE))
  2384. return;
  2385. num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
  2386. list_for_each_entry(item, &bp->rx_fs_list.list, list) {
  2387. struct ethtool_rx_flow_spec *fs = &item->fs;
  2388. struct ethtool_tcpip4_spec *tp4sp_m;
  2389. if (fs->location >= num_t2_scr)
  2390. continue;
  2391. t2_scr = gem_readl_n(bp, SCRT2, fs->location);
  2392. /* enable/disable screener regs for the flow entry */
  2393. t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
  2394. /* only enable fields with no masking */
  2395. tp4sp_m = &(fs->m_u.tcp_ip4_spec);
  2396. if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
  2397. t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
  2398. else
  2399. t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
  2400. if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
  2401. t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
  2402. else
  2403. t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
  2404. if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
  2405. t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
  2406. else
  2407. t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
  2408. gem_writel_n(bp, SCRT2, fs->location, t2_scr);
  2409. }
  2410. }
  2411. static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
  2412. {
  2413. struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
  2414. uint16_t index = fs->location;
  2415. u32 w0, w1, t2_scr;
  2416. bool cmp_a = false;
  2417. bool cmp_b = false;
  2418. bool cmp_c = false;
  2419. if (!macb_is_gem(bp))
  2420. return;
  2421. tp4sp_v = &(fs->h_u.tcp_ip4_spec);
  2422. tp4sp_m = &(fs->m_u.tcp_ip4_spec);
  2423. /* ignore field if any masking set */
  2424. if (tp4sp_m->ip4src == 0xFFFFFFFF) {
  2425. /* 1st compare reg - IP source address */
  2426. w0 = 0;
  2427. w1 = 0;
  2428. w0 = tp4sp_v->ip4src;
  2429. w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
  2430. w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
  2431. w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
  2432. gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
  2433. gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
  2434. cmp_a = true;
  2435. }
  2436. /* ignore field if any masking set */
  2437. if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
  2438. /* 2nd compare reg - IP destination address */
  2439. w0 = 0;
  2440. w1 = 0;
  2441. w0 = tp4sp_v->ip4dst;
  2442. w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
  2443. w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
  2444. w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
  2445. gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
  2446. gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
  2447. cmp_b = true;
  2448. }
  2449. /* ignore both port fields if masking set in both */
  2450. if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
  2451. /* 3rd compare reg - source port, destination port */
  2452. w0 = 0;
  2453. w1 = 0;
  2454. w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
  2455. if (tp4sp_m->psrc == tp4sp_m->pdst) {
  2456. w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
  2457. w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
  2458. w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
  2459. w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
  2460. } else {
  2461. /* only one port definition */
  2462. w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
  2463. w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
  2464. if (tp4sp_m->psrc == 0xFFFF) { /* src port */
  2465. w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
  2466. w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
  2467. } else { /* dst port */
  2468. w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
  2469. w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
  2470. }
  2471. }
  2472. gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
  2473. gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
  2474. cmp_c = true;
  2475. }
  2476. t2_scr = 0;
  2477. t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
  2478. t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
  2479. if (cmp_a)
  2480. t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
  2481. if (cmp_b)
  2482. t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
  2483. if (cmp_c)
  2484. t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
  2485. gem_writel_n(bp, SCRT2, index, t2_scr);
  2486. }
  2487. static int gem_add_flow_filter(struct net_device *netdev,
  2488. struct ethtool_rxnfc *cmd)
  2489. {
  2490. struct macb *bp = netdev_priv(netdev);
  2491. struct ethtool_rx_flow_spec *fs = &cmd->fs;
  2492. struct ethtool_rx_fs_item *item, *newfs;
  2493. unsigned long flags;
  2494. int ret = -EINVAL;
  2495. bool added = false;
  2496. newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
  2497. if (newfs == NULL)
  2498. return -ENOMEM;
  2499. memcpy(&newfs->fs, fs, sizeof(newfs->fs));
  2500. netdev_dbg(netdev,
  2501. "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
  2502. fs->flow_type, (int)fs->ring_cookie, fs->location,
  2503. htonl(fs->h_u.tcp_ip4_spec.ip4src),
  2504. htonl(fs->h_u.tcp_ip4_spec.ip4dst),
  2505. htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
  2506. spin_lock_irqsave(&bp->rx_fs_lock, flags);
  2507. /* find correct place to add in list */
  2508. list_for_each_entry(item, &bp->rx_fs_list.list, list) {
  2509. if (item->fs.location > newfs->fs.location) {
  2510. list_add_tail(&newfs->list, &item->list);
  2511. added = true;
  2512. break;
  2513. } else if (item->fs.location == fs->location) {
  2514. netdev_err(netdev, "Rule not added: location %d not free!\n",
  2515. fs->location);
  2516. ret = -EBUSY;
  2517. goto err;
  2518. }
  2519. }
  2520. if (!added)
  2521. list_add_tail(&newfs->list, &bp->rx_fs_list.list);
  2522. gem_prog_cmp_regs(bp, fs);
  2523. bp->rx_fs_list.count++;
  2524. /* enable filtering if NTUPLE on */
  2525. gem_enable_flow_filters(bp, 1);
  2526. spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
  2527. return 0;
  2528. err:
  2529. spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
  2530. kfree(newfs);
  2531. return ret;
  2532. }
  2533. static int gem_del_flow_filter(struct net_device *netdev,
  2534. struct ethtool_rxnfc *cmd)
  2535. {
  2536. struct macb *bp = netdev_priv(netdev);
  2537. struct ethtool_rx_fs_item *item;
  2538. struct ethtool_rx_flow_spec *fs;
  2539. unsigned long flags;
  2540. spin_lock_irqsave(&bp->rx_fs_lock, flags);
  2541. list_for_each_entry(item, &bp->rx_fs_list.list, list) {
  2542. if (item->fs.location == cmd->fs.location) {
  2543. /* disable screener regs for the flow entry */
  2544. fs = &(item->fs);
  2545. netdev_dbg(netdev,
  2546. "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
  2547. fs->flow_type, (int)fs->ring_cookie, fs->location,
  2548. htonl(fs->h_u.tcp_ip4_spec.ip4src),
  2549. htonl(fs->h_u.tcp_ip4_spec.ip4dst),
  2550. htons(fs->h_u.tcp_ip4_spec.psrc),
  2551. htons(fs->h_u.tcp_ip4_spec.pdst));
  2552. gem_writel_n(bp, SCRT2, fs->location, 0);
  2553. list_del(&item->list);
  2554. bp->rx_fs_list.count--;
  2555. spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
  2556. kfree(item);
  2557. return 0;
  2558. }
  2559. }
  2560. spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
  2561. return -EINVAL;
  2562. }
  2563. static int gem_get_flow_entry(struct net_device *netdev,
  2564. struct ethtool_rxnfc *cmd)
  2565. {
  2566. struct macb *bp = netdev_priv(netdev);
  2567. struct ethtool_rx_fs_item *item;
  2568. list_for_each_entry(item, &bp->rx_fs_list.list, list) {
  2569. if (item->fs.location == cmd->fs.location) {
  2570. memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
  2571. return 0;
  2572. }
  2573. }
  2574. return -EINVAL;
  2575. }
  2576. static int gem_get_all_flow_entries(struct net_device *netdev,
  2577. struct ethtool_rxnfc *cmd, u32 *rule_locs)
  2578. {
  2579. struct macb *bp = netdev_priv(netdev);
  2580. struct ethtool_rx_fs_item *item;
  2581. uint32_t cnt = 0;
  2582. list_for_each_entry(item, &bp->rx_fs_list.list, list) {
  2583. if (cnt == cmd->rule_cnt)
  2584. return -EMSGSIZE;
  2585. rule_locs[cnt] = item->fs.location;
  2586. cnt++;
  2587. }
  2588. cmd->data = bp->max_tuples;
  2589. cmd->rule_cnt = cnt;
  2590. return 0;
  2591. }
  2592. static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
  2593. u32 *rule_locs)
  2594. {
  2595. struct macb *bp = netdev_priv(netdev);
  2596. int ret = 0;
  2597. switch (cmd->cmd) {
  2598. case ETHTOOL_GRXRINGS:
  2599. cmd->data = bp->num_queues;
  2600. break;
  2601. case ETHTOOL_GRXCLSRLCNT:
  2602. cmd->rule_cnt = bp->rx_fs_list.count;
  2603. break;
  2604. case ETHTOOL_GRXCLSRULE:
  2605. ret = gem_get_flow_entry(netdev, cmd);
  2606. break;
  2607. case ETHTOOL_GRXCLSRLALL:
  2608. ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
  2609. break;
  2610. default:
  2611. netdev_err(netdev,
  2612. "Command parameter %d is not supported\n", cmd->cmd);
  2613. ret = -EOPNOTSUPP;
  2614. }
  2615. return ret;
  2616. }
  2617. static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
  2618. {
  2619. struct macb *bp = netdev_priv(netdev);
  2620. int ret;
  2621. switch (cmd->cmd) {
  2622. case ETHTOOL_SRXCLSRLINS:
  2623. if ((cmd->fs.location >= bp->max_tuples)
  2624. || (cmd->fs.ring_cookie >= bp->num_queues)) {
  2625. ret = -EINVAL;
  2626. break;
  2627. }
  2628. ret = gem_add_flow_filter(netdev, cmd);
  2629. break;
  2630. case ETHTOOL_SRXCLSRLDEL:
  2631. ret = gem_del_flow_filter(netdev, cmd);
  2632. break;
  2633. default:
  2634. netdev_err(netdev,
  2635. "Command parameter %d is not supported\n", cmd->cmd);
  2636. ret = -EOPNOTSUPP;
  2637. }
  2638. return ret;
  2639. }
  2640. static const struct ethtool_ops macb_ethtool_ops = {
  2641. .get_regs_len = macb_get_regs_len,
  2642. .get_regs = macb_get_regs,
  2643. .get_link = ethtool_op_get_link,
  2644. .get_ts_info = ethtool_op_get_ts_info,
  2645. .get_wol = macb_get_wol,
  2646. .set_wol = macb_set_wol,
  2647. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  2648. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  2649. .get_ringparam = macb_get_ringparam,
  2650. .set_ringparam = macb_set_ringparam,
  2651. };
  2652. static const struct ethtool_ops gem_ethtool_ops = {
  2653. .get_regs_len = macb_get_regs_len,
  2654. .get_regs = macb_get_regs,
  2655. .get_link = ethtool_op_get_link,
  2656. .get_ts_info = macb_get_ts_info,
  2657. .get_ethtool_stats = gem_get_ethtool_stats,
  2658. .get_strings = gem_get_ethtool_strings,
  2659. .get_sset_count = gem_get_sset_count,
  2660. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  2661. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  2662. .get_ringparam = macb_get_ringparam,
  2663. .set_ringparam = macb_set_ringparam,
  2664. .get_rxnfc = gem_get_rxnfc,
  2665. .set_rxnfc = gem_set_rxnfc,
  2666. };
  2667. static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  2668. {
  2669. struct phy_device *phydev = dev->phydev;
  2670. struct macb *bp = netdev_priv(dev);
  2671. if (!netif_running(dev))
  2672. return -EINVAL;
  2673. if (!phydev)
  2674. return -ENODEV;
  2675. if (!bp->ptp_info)
  2676. return phy_mii_ioctl(phydev, rq, cmd);
  2677. switch (cmd) {
  2678. case SIOCSHWTSTAMP:
  2679. return bp->ptp_info->set_hwtst(dev, rq, cmd);
  2680. case SIOCGHWTSTAMP:
  2681. return bp->ptp_info->get_hwtst(dev, rq);
  2682. default:
  2683. return phy_mii_ioctl(phydev, rq, cmd);
  2684. }
  2685. }
  2686. static inline void macb_set_txcsum_feature(struct macb *bp,
  2687. netdev_features_t features)
  2688. {
  2689. u32 val;
  2690. if (!macb_is_gem(bp))
  2691. return;
  2692. val = gem_readl(bp, DMACFG);
  2693. if (features & NETIF_F_HW_CSUM)
  2694. val |= GEM_BIT(TXCOEN);
  2695. else
  2696. val &= ~GEM_BIT(TXCOEN);
  2697. gem_writel(bp, DMACFG, val);
  2698. }
  2699. static inline void macb_set_rxcsum_feature(struct macb *bp,
  2700. netdev_features_t features)
  2701. {
  2702. struct net_device *netdev = bp->dev;
  2703. u32 val;
  2704. if (!macb_is_gem(bp))
  2705. return;
  2706. val = gem_readl(bp, NCFGR);
  2707. if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
  2708. val |= GEM_BIT(RXCOEN);
  2709. else
  2710. val &= ~GEM_BIT(RXCOEN);
  2711. gem_writel(bp, NCFGR, val);
  2712. }
  2713. static inline void macb_set_rxflow_feature(struct macb *bp,
  2714. netdev_features_t features)
  2715. {
  2716. if (!macb_is_gem(bp))
  2717. return;
  2718. gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
  2719. }
  2720. static int macb_set_features(struct net_device *netdev,
  2721. netdev_features_t features)
  2722. {
  2723. struct macb *bp = netdev_priv(netdev);
  2724. netdev_features_t changed = features ^ netdev->features;
  2725. /* TX checksum offload */
  2726. if (changed & NETIF_F_HW_CSUM)
  2727. macb_set_txcsum_feature(bp, features);
  2728. /* RX checksum offload */
  2729. if (changed & NETIF_F_RXCSUM)
  2730. macb_set_rxcsum_feature(bp, features);
  2731. /* RX Flow Filters */
  2732. if (changed & NETIF_F_NTUPLE)
  2733. macb_set_rxflow_feature(bp, features);
  2734. return 0;
  2735. }
  2736. static void macb_restore_features(struct macb *bp)
  2737. {
  2738. struct net_device *netdev = bp->dev;
  2739. netdev_features_t features = netdev->features;
  2740. struct ethtool_rx_fs_item *item;
  2741. /* TX checksum offload */
  2742. macb_set_txcsum_feature(bp, features);
  2743. /* RX checksum offload */
  2744. macb_set_rxcsum_feature(bp, features);
  2745. /* RX Flow Filters */
  2746. list_for_each_entry(item, &bp->rx_fs_list.list, list)
  2747. gem_prog_cmp_regs(bp, &item->fs);
  2748. macb_set_rxflow_feature(bp, features);
  2749. }
  2750. static const struct net_device_ops macb_netdev_ops = {
  2751. .ndo_open = macb_open,
  2752. .ndo_stop = macb_close,
  2753. .ndo_start_xmit = macb_start_xmit,
  2754. .ndo_set_rx_mode = macb_set_rx_mode,
  2755. .ndo_get_stats = macb_get_stats,
  2756. .ndo_do_ioctl = macb_ioctl,
  2757. .ndo_validate_addr = eth_validate_addr,
  2758. .ndo_change_mtu = macb_change_mtu,
  2759. .ndo_set_mac_address = eth_mac_addr,
  2760. #ifdef CONFIG_NET_POLL_CONTROLLER
  2761. .ndo_poll_controller = macb_poll_controller,
  2762. #endif
  2763. .ndo_set_features = macb_set_features,
  2764. .ndo_features_check = macb_features_check,
  2765. };
  2766. /* Configure peripheral capabilities according to device tree
  2767. * and integration options used
  2768. */
  2769. static void macb_configure_caps(struct macb *bp,
  2770. const struct macb_config *dt_conf)
  2771. {
  2772. u32 dcfg;
  2773. if (dt_conf)
  2774. bp->caps = dt_conf->caps;
  2775. if (hw_is_gem(bp->regs, bp->native_io)) {
  2776. bp->caps |= MACB_CAPS_MACB_IS_GEM;
  2777. dcfg = gem_readl(bp, DCFG1);
  2778. if (GEM_BFEXT(IRQCOR, dcfg) == 0)
  2779. bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
  2780. dcfg = gem_readl(bp, DCFG2);
  2781. if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
  2782. bp->caps |= MACB_CAPS_FIFO_MODE;
  2783. #ifdef CONFIG_MACB_USE_HWSTAMP
  2784. if (gem_has_ptp(bp)) {
  2785. if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
  2786. pr_err("GEM doesn't support hardware ptp.\n");
  2787. else {
  2788. bp->hw_dma_cap |= HW_DMA_CAP_PTP;
  2789. bp->ptp_info = &gem_ptp_info;
  2790. }
  2791. }
  2792. #endif
  2793. }
  2794. dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
  2795. }
  2796. static void macb_probe_queues(void __iomem *mem,
  2797. bool native_io,
  2798. unsigned int *queue_mask,
  2799. unsigned int *num_queues)
  2800. {
  2801. unsigned int hw_q;
  2802. *queue_mask = 0x1;
  2803. *num_queues = 1;
  2804. /* is it macb or gem ?
  2805. *
  2806. * We need to read directly from the hardware here because
  2807. * we are early in the probe process and don't have the
  2808. * MACB_CAPS_MACB_IS_GEM flag positioned
  2809. */
  2810. if (!hw_is_gem(mem, native_io))
  2811. return;
  2812. /* bit 0 is never set but queue 0 always exists */
  2813. *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
  2814. *queue_mask |= 0x1;
  2815. for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
  2816. if (*queue_mask & (1 << hw_q))
  2817. (*num_queues)++;
  2818. }
  2819. static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
  2820. struct clk **hclk, struct clk **tx_clk,
  2821. struct clk **rx_clk, struct clk **tsu_clk)
  2822. {
  2823. struct macb_platform_data *pdata;
  2824. int err;
  2825. pdata = dev_get_platdata(&pdev->dev);
  2826. if (pdata) {
  2827. *pclk = pdata->pclk;
  2828. *hclk = pdata->hclk;
  2829. } else {
  2830. *pclk = devm_clk_get(&pdev->dev, "pclk");
  2831. *hclk = devm_clk_get(&pdev->dev, "hclk");
  2832. }
  2833. if (IS_ERR_OR_NULL(*pclk)) {
  2834. err = PTR_ERR(*pclk);
  2835. if (!err)
  2836. err = -ENODEV;
  2837. dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
  2838. return err;
  2839. }
  2840. if (IS_ERR_OR_NULL(*hclk)) {
  2841. err = PTR_ERR(*hclk);
  2842. if (!err)
  2843. err = -ENODEV;
  2844. dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
  2845. return err;
  2846. }
  2847. *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
  2848. if (IS_ERR(*tx_clk))
  2849. return PTR_ERR(*tx_clk);
  2850. *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
  2851. if (IS_ERR(*rx_clk))
  2852. return PTR_ERR(*rx_clk);
  2853. *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
  2854. if (IS_ERR(*tsu_clk))
  2855. return PTR_ERR(*tsu_clk);
  2856. err = clk_prepare_enable(*pclk);
  2857. if (err) {
  2858. dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
  2859. return err;
  2860. }
  2861. err = clk_prepare_enable(*hclk);
  2862. if (err) {
  2863. dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
  2864. goto err_disable_pclk;
  2865. }
  2866. err = clk_prepare_enable(*tx_clk);
  2867. if (err) {
  2868. dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
  2869. goto err_disable_hclk;
  2870. }
  2871. err = clk_prepare_enable(*rx_clk);
  2872. if (err) {
  2873. dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
  2874. goto err_disable_txclk;
  2875. }
  2876. err = clk_prepare_enable(*tsu_clk);
  2877. if (err) {
  2878. dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
  2879. goto err_disable_rxclk;
  2880. }
  2881. return 0;
  2882. err_disable_rxclk:
  2883. clk_disable_unprepare(*rx_clk);
  2884. err_disable_txclk:
  2885. clk_disable_unprepare(*tx_clk);
  2886. err_disable_hclk:
  2887. clk_disable_unprepare(*hclk);
  2888. err_disable_pclk:
  2889. clk_disable_unprepare(*pclk);
  2890. return err;
  2891. }
  2892. static int macb_init(struct platform_device *pdev)
  2893. {
  2894. struct net_device *dev = platform_get_drvdata(pdev);
  2895. unsigned int hw_q, q;
  2896. struct macb *bp = netdev_priv(dev);
  2897. struct macb_queue *queue;
  2898. int err;
  2899. u32 val, reg;
  2900. bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
  2901. bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
  2902. /* set the queue register mapping once for all: queue0 has a special
  2903. * register mapping but we don't want to test the queue index then
  2904. * compute the corresponding register offset at run time.
  2905. */
  2906. for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
  2907. if (!(bp->queue_mask & (1 << hw_q)))
  2908. continue;
  2909. queue = &bp->queues[q];
  2910. queue->bp = bp;
  2911. netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
  2912. if (hw_q) {
  2913. queue->ISR = GEM_ISR(hw_q - 1);
  2914. queue->IER = GEM_IER(hw_q - 1);
  2915. queue->IDR = GEM_IDR(hw_q - 1);
  2916. queue->IMR = GEM_IMR(hw_q - 1);
  2917. queue->TBQP = GEM_TBQP(hw_q - 1);
  2918. queue->RBQP = GEM_RBQP(hw_q - 1);
  2919. queue->RBQS = GEM_RBQS(hw_q - 1);
  2920. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  2921. if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
  2922. queue->TBQPH = GEM_TBQPH(hw_q - 1);
  2923. queue->RBQPH = GEM_RBQPH(hw_q - 1);
  2924. }
  2925. #endif
  2926. } else {
  2927. /* queue0 uses legacy registers */
  2928. queue->ISR = MACB_ISR;
  2929. queue->IER = MACB_IER;
  2930. queue->IDR = MACB_IDR;
  2931. queue->IMR = MACB_IMR;
  2932. queue->TBQP = MACB_TBQP;
  2933. queue->RBQP = MACB_RBQP;
  2934. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  2935. if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
  2936. queue->TBQPH = MACB_TBQPH;
  2937. queue->RBQPH = MACB_RBQPH;
  2938. }
  2939. #endif
  2940. }
  2941. /* get irq: here we use the linux queue index, not the hardware
  2942. * queue index. the queue irq definitions in the device tree
  2943. * must remove the optional gaps that could exist in the
  2944. * hardware queue mask.
  2945. */
  2946. queue->irq = platform_get_irq(pdev, q);
  2947. err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
  2948. IRQF_SHARED, dev->name, queue);
  2949. if (err) {
  2950. dev_err(&pdev->dev,
  2951. "Unable to request IRQ %d (error %d)\n",
  2952. queue->irq, err);
  2953. return err;
  2954. }
  2955. INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
  2956. q++;
  2957. }
  2958. dev->netdev_ops = &macb_netdev_ops;
  2959. /* setup appropriated routines according to adapter type */
  2960. if (macb_is_gem(bp)) {
  2961. bp->max_tx_length = GEM_MAX_TX_LEN;
  2962. bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
  2963. bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
  2964. bp->macbgem_ops.mog_init_rings = gem_init_rings;
  2965. bp->macbgem_ops.mog_rx = gem_rx;
  2966. dev->ethtool_ops = &gem_ethtool_ops;
  2967. } else {
  2968. bp->max_tx_length = MACB_MAX_TX_LEN;
  2969. bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
  2970. bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
  2971. bp->macbgem_ops.mog_init_rings = macb_init_rings;
  2972. bp->macbgem_ops.mog_rx = macb_rx;
  2973. dev->ethtool_ops = &macb_ethtool_ops;
  2974. }
  2975. /* Set features */
  2976. dev->hw_features = NETIF_F_SG;
  2977. /* Check LSO capability */
  2978. if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
  2979. dev->hw_features |= MACB_NETIF_LSO;
  2980. /* Checksum offload is only available on gem with packet buffer */
  2981. if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
  2982. dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
  2983. if (bp->caps & MACB_CAPS_SG_DISABLED)
  2984. dev->hw_features &= ~NETIF_F_SG;
  2985. dev->features = dev->hw_features;
  2986. /* Check RX Flow Filters support.
  2987. * Max Rx flows set by availability of screeners & compare regs:
  2988. * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
  2989. */
  2990. reg = gem_readl(bp, DCFG8);
  2991. bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
  2992. GEM_BFEXT(T2SCR, reg));
  2993. INIT_LIST_HEAD(&bp->rx_fs_list.list);
  2994. if (bp->max_tuples > 0) {
  2995. /* also needs one ethtype match to check IPv4 */
  2996. if (GEM_BFEXT(SCR2ETH, reg) > 0) {
  2997. /* program this reg now */
  2998. reg = 0;
  2999. reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
  3000. gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
  3001. /* Filtering is supported in hw but don't enable it in kernel now */
  3002. dev->hw_features |= NETIF_F_NTUPLE;
  3003. /* init Rx flow definitions */
  3004. bp->rx_fs_list.count = 0;
  3005. spin_lock_init(&bp->rx_fs_lock);
  3006. } else
  3007. bp->max_tuples = 0;
  3008. }
  3009. if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
  3010. val = 0;
  3011. if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
  3012. val = GEM_BIT(RGMII);
  3013. else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
  3014. (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
  3015. val = MACB_BIT(RMII);
  3016. else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
  3017. val = MACB_BIT(MII);
  3018. if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
  3019. val |= MACB_BIT(CLKEN);
  3020. macb_or_gem_writel(bp, USRIO, val);
  3021. }
  3022. /* Set MII management clock divider */
  3023. val = macb_mdc_clk_div(bp);
  3024. val |= macb_dbw(bp);
  3025. if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
  3026. val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
  3027. macb_writel(bp, NCFGR, val);
  3028. return 0;
  3029. }
  3030. #if defined(CONFIG_OF)
  3031. /* 1518 rounded up */
  3032. #define AT91ETHER_MAX_RBUFF_SZ 0x600
  3033. /* max number of receive buffers */
  3034. #define AT91ETHER_MAX_RX_DESCR 9
  3035. static struct sifive_fu540_macb_mgmt *mgmt;
  3036. /* Initialize and start the Receiver and Transmit subsystems */
  3037. static int at91ether_start(struct net_device *dev)
  3038. {
  3039. struct macb *lp = netdev_priv(dev);
  3040. struct macb_queue *q = &lp->queues[0];
  3041. struct macb_dma_desc *desc;
  3042. dma_addr_t addr;
  3043. u32 ctl;
  3044. int i;
  3045. q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
  3046. (AT91ETHER_MAX_RX_DESCR *
  3047. macb_dma_desc_get_size(lp)),
  3048. &q->rx_ring_dma, GFP_KERNEL);
  3049. if (!q->rx_ring)
  3050. return -ENOMEM;
  3051. q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
  3052. AT91ETHER_MAX_RX_DESCR *
  3053. AT91ETHER_MAX_RBUFF_SZ,
  3054. &q->rx_buffers_dma, GFP_KERNEL);
  3055. if (!q->rx_buffers) {
  3056. dma_free_coherent(&lp->pdev->dev,
  3057. AT91ETHER_MAX_RX_DESCR *
  3058. macb_dma_desc_get_size(lp),
  3059. q->rx_ring, q->rx_ring_dma);
  3060. q->rx_ring = NULL;
  3061. return -ENOMEM;
  3062. }
  3063. addr = q->rx_buffers_dma;
  3064. for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
  3065. desc = macb_rx_desc(q, i);
  3066. macb_set_addr(lp, desc, addr);
  3067. desc->ctrl = 0;
  3068. addr += AT91ETHER_MAX_RBUFF_SZ;
  3069. }
  3070. /* Set the Wrap bit on the last descriptor */
  3071. desc->addr |= MACB_BIT(RX_WRAP);
  3072. /* Reset buffer index */
  3073. q->rx_tail = 0;
  3074. /* Program address of descriptor list in Rx Buffer Queue register */
  3075. macb_writel(lp, RBQP, q->rx_ring_dma);
  3076. /* Enable Receive and Transmit */
  3077. ctl = macb_readl(lp, NCR);
  3078. macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
  3079. return 0;
  3080. }
  3081. /* Open the ethernet interface */
  3082. static int at91ether_open(struct net_device *dev)
  3083. {
  3084. struct macb *lp = netdev_priv(dev);
  3085. u32 ctl;
  3086. int ret;
  3087. ret = pm_runtime_get_sync(&lp->pdev->dev);
  3088. if (ret < 0) {
  3089. pm_runtime_put_noidle(&lp->pdev->dev);
  3090. return ret;
  3091. }
  3092. /* Clear internal statistics */
  3093. ctl = macb_readl(lp, NCR);
  3094. macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
  3095. macb_set_hwaddr(lp);
  3096. ret = at91ether_start(dev);
  3097. if (ret)
  3098. goto pm_exit;
  3099. /* Enable MAC interrupts */
  3100. macb_writel(lp, IER, MACB_BIT(RCOMP) |
  3101. MACB_BIT(RXUBR) |
  3102. MACB_BIT(ISR_TUND) |
  3103. MACB_BIT(ISR_RLE) |
  3104. MACB_BIT(TCOMP) |
  3105. MACB_BIT(ISR_ROVR) |
  3106. MACB_BIT(HRESP));
  3107. /* schedule a link state check */
  3108. phy_start(dev->phydev);
  3109. netif_start_queue(dev);
  3110. return 0;
  3111. pm_exit:
  3112. pm_runtime_put_sync(&lp->pdev->dev);
  3113. return ret;
  3114. }
  3115. /* Close the interface */
  3116. static int at91ether_close(struct net_device *dev)
  3117. {
  3118. struct macb *lp = netdev_priv(dev);
  3119. struct macb_queue *q = &lp->queues[0];
  3120. u32 ctl;
  3121. /* Disable Receiver and Transmitter */
  3122. ctl = macb_readl(lp, NCR);
  3123. macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
  3124. /* Disable MAC interrupts */
  3125. macb_writel(lp, IDR, MACB_BIT(RCOMP) |
  3126. MACB_BIT(RXUBR) |
  3127. MACB_BIT(ISR_TUND) |
  3128. MACB_BIT(ISR_RLE) |
  3129. MACB_BIT(TCOMP) |
  3130. MACB_BIT(ISR_ROVR) |
  3131. MACB_BIT(HRESP));
  3132. netif_stop_queue(dev);
  3133. dma_free_coherent(&lp->pdev->dev,
  3134. AT91ETHER_MAX_RX_DESCR *
  3135. macb_dma_desc_get_size(lp),
  3136. q->rx_ring, q->rx_ring_dma);
  3137. q->rx_ring = NULL;
  3138. dma_free_coherent(&lp->pdev->dev,
  3139. AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
  3140. q->rx_buffers, q->rx_buffers_dma);
  3141. q->rx_buffers = NULL;
  3142. return pm_runtime_put(&lp->pdev->dev);
  3143. }
  3144. /* Transmit packet */
  3145. static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
  3146. struct net_device *dev)
  3147. {
  3148. struct macb *lp = netdev_priv(dev);
  3149. if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
  3150. netif_stop_queue(dev);
  3151. /* Store packet information (to free when Tx completed) */
  3152. lp->skb = skb;
  3153. lp->skb_length = skb->len;
  3154. lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
  3155. skb->len, DMA_TO_DEVICE);
  3156. if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) {
  3157. dev_kfree_skb_any(skb);
  3158. dev->stats.tx_dropped++;
  3159. netdev_err(dev, "%s: DMA mapping error\n", __func__);
  3160. return NETDEV_TX_OK;
  3161. }
  3162. /* Set address of the data in the Transmit Address register */
  3163. macb_writel(lp, TAR, lp->skb_physaddr);
  3164. /* Set length of the packet in the Transmit Control register */
  3165. macb_writel(lp, TCR, skb->len);
  3166. } else {
  3167. netdev_err(dev, "%s called, but device is busy!\n", __func__);
  3168. return NETDEV_TX_BUSY;
  3169. }
  3170. return NETDEV_TX_OK;
  3171. }
  3172. /* Extract received frame from buffer descriptors and sent to upper layers.
  3173. * (Called from interrupt context)
  3174. */
  3175. static void at91ether_rx(struct net_device *dev)
  3176. {
  3177. struct macb *lp = netdev_priv(dev);
  3178. struct macb_queue *q = &lp->queues[0];
  3179. struct macb_dma_desc *desc;
  3180. unsigned char *p_recv;
  3181. struct sk_buff *skb;
  3182. unsigned int pktlen;
  3183. desc = macb_rx_desc(q, q->rx_tail);
  3184. while (desc->addr & MACB_BIT(RX_USED)) {
  3185. p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
  3186. pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
  3187. skb = netdev_alloc_skb(dev, pktlen + 2);
  3188. if (skb) {
  3189. skb_reserve(skb, 2);
  3190. skb_put_data(skb, p_recv, pktlen);
  3191. skb->protocol = eth_type_trans(skb, dev);
  3192. dev->stats.rx_packets++;
  3193. dev->stats.rx_bytes += pktlen;
  3194. netif_rx(skb);
  3195. } else {
  3196. dev->stats.rx_dropped++;
  3197. }
  3198. if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
  3199. dev->stats.multicast++;
  3200. /* reset ownership bit */
  3201. desc->addr &= ~MACB_BIT(RX_USED);
  3202. /* wrap after last buffer */
  3203. if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
  3204. q->rx_tail = 0;
  3205. else
  3206. q->rx_tail++;
  3207. desc = macb_rx_desc(q, q->rx_tail);
  3208. }
  3209. }
  3210. /* MAC interrupt handler */
  3211. static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
  3212. {
  3213. struct net_device *dev = dev_id;
  3214. struct macb *lp = netdev_priv(dev);
  3215. u32 intstatus, ctl;
  3216. /* MAC Interrupt Status register indicates what interrupts are pending.
  3217. * It is automatically cleared once read.
  3218. */
  3219. intstatus = macb_readl(lp, ISR);
  3220. /* Receive complete */
  3221. if (intstatus & MACB_BIT(RCOMP))
  3222. at91ether_rx(dev);
  3223. /* Transmit complete */
  3224. if (intstatus & MACB_BIT(TCOMP)) {
  3225. /* The TCOM bit is set even if the transmission failed */
  3226. if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
  3227. dev->stats.tx_errors++;
  3228. if (lp->skb) {
  3229. dev_consume_skb_irq(lp->skb);
  3230. lp->skb = NULL;
  3231. dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr,
  3232. lp->skb_length, DMA_TO_DEVICE);
  3233. dev->stats.tx_packets++;
  3234. dev->stats.tx_bytes += lp->skb_length;
  3235. }
  3236. netif_wake_queue(dev);
  3237. }
  3238. /* Work-around for EMAC Errata section 41.3.1 */
  3239. if (intstatus & MACB_BIT(RXUBR)) {
  3240. ctl = macb_readl(lp, NCR);
  3241. macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
  3242. wmb();
  3243. macb_writel(lp, NCR, ctl | MACB_BIT(RE));
  3244. }
  3245. if (intstatus & MACB_BIT(ISR_ROVR))
  3246. netdev_err(dev, "ROVR error\n");
  3247. return IRQ_HANDLED;
  3248. }
  3249. #ifdef CONFIG_NET_POLL_CONTROLLER
  3250. static void at91ether_poll_controller(struct net_device *dev)
  3251. {
  3252. unsigned long flags;
  3253. local_irq_save(flags);
  3254. at91ether_interrupt(dev->irq, dev);
  3255. local_irq_restore(flags);
  3256. }
  3257. #endif
  3258. static const struct net_device_ops at91ether_netdev_ops = {
  3259. .ndo_open = at91ether_open,
  3260. .ndo_stop = at91ether_close,
  3261. .ndo_start_xmit = at91ether_start_xmit,
  3262. .ndo_get_stats = macb_get_stats,
  3263. .ndo_set_rx_mode = macb_set_rx_mode,
  3264. .ndo_set_mac_address = eth_mac_addr,
  3265. .ndo_do_ioctl = macb_ioctl,
  3266. .ndo_validate_addr = eth_validate_addr,
  3267. #ifdef CONFIG_NET_POLL_CONTROLLER
  3268. .ndo_poll_controller = at91ether_poll_controller,
  3269. #endif
  3270. };
  3271. static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
  3272. struct clk **hclk, struct clk **tx_clk,
  3273. struct clk **rx_clk, struct clk **tsu_clk)
  3274. {
  3275. int err;
  3276. *hclk = NULL;
  3277. *tx_clk = NULL;
  3278. *rx_clk = NULL;
  3279. *tsu_clk = NULL;
  3280. *pclk = devm_clk_get(&pdev->dev, "ether_clk");
  3281. if (IS_ERR(*pclk))
  3282. return PTR_ERR(*pclk);
  3283. err = clk_prepare_enable(*pclk);
  3284. if (err) {
  3285. dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
  3286. return err;
  3287. }
  3288. return 0;
  3289. }
  3290. static int at91ether_init(struct platform_device *pdev)
  3291. {
  3292. struct net_device *dev = platform_get_drvdata(pdev);
  3293. struct macb *bp = netdev_priv(dev);
  3294. int err;
  3295. u32 reg;
  3296. bp->queues[0].bp = bp;
  3297. dev->netdev_ops = &at91ether_netdev_ops;
  3298. dev->ethtool_ops = &macb_ethtool_ops;
  3299. err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
  3300. 0, dev->name, dev);
  3301. if (err)
  3302. return err;
  3303. macb_writel(bp, NCR, 0);
  3304. reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
  3305. if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
  3306. reg |= MACB_BIT(RM9200_RMII);
  3307. macb_writel(bp, NCFGR, reg);
  3308. return 0;
  3309. }
  3310. static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
  3311. unsigned long parent_rate)
  3312. {
  3313. return mgmt->rate;
  3314. }
  3315. static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
  3316. unsigned long *parent_rate)
  3317. {
  3318. if (WARN_ON(rate < 2500000))
  3319. return 2500000;
  3320. else if (rate == 2500000)
  3321. return 2500000;
  3322. else if (WARN_ON(rate < 13750000))
  3323. return 2500000;
  3324. else if (WARN_ON(rate < 25000000))
  3325. return 25000000;
  3326. else if (rate == 25000000)
  3327. return 25000000;
  3328. else if (WARN_ON(rate < 75000000))
  3329. return 25000000;
  3330. else if (WARN_ON(rate < 125000000))
  3331. return 125000000;
  3332. else if (rate == 125000000)
  3333. return 125000000;
  3334. WARN_ON(rate > 125000000);
  3335. return 125000000;
  3336. }
  3337. static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
  3338. unsigned long parent_rate)
  3339. {
  3340. rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
  3341. if (rate != 125000000)
  3342. iowrite32(1, mgmt->reg);
  3343. else
  3344. iowrite32(0, mgmt->reg);
  3345. mgmt->rate = rate;
  3346. return 0;
  3347. }
  3348. static const struct clk_ops fu540_c000_ops = {
  3349. .recalc_rate = fu540_macb_tx_recalc_rate,
  3350. .round_rate = fu540_macb_tx_round_rate,
  3351. .set_rate = fu540_macb_tx_set_rate,
  3352. };
  3353. static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
  3354. struct clk **hclk, struct clk **tx_clk,
  3355. struct clk **rx_clk, struct clk **tsu_clk)
  3356. {
  3357. struct clk_init_data init;
  3358. int err = 0;
  3359. err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
  3360. if (err)
  3361. return err;
  3362. mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
  3363. if (!mgmt)
  3364. return -ENOMEM;
  3365. init.name = "sifive-gemgxl-mgmt";
  3366. init.ops = &fu540_c000_ops;
  3367. init.flags = 0;
  3368. init.num_parents = 0;
  3369. mgmt->rate = 0;
  3370. mgmt->hw.init = &init;
  3371. *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
  3372. if (IS_ERR(*tx_clk))
  3373. return PTR_ERR(*tx_clk);
  3374. err = clk_prepare_enable(*tx_clk);
  3375. if (err)
  3376. dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
  3377. else
  3378. dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
  3379. return 0;
  3380. }
  3381. static int fu540_c000_init(struct platform_device *pdev)
  3382. {
  3383. mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
  3384. if (IS_ERR(mgmt->reg))
  3385. return PTR_ERR(mgmt->reg);
  3386. return macb_init(pdev);
  3387. }
  3388. static const struct macb_config fu540_c000_config = {
  3389. .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
  3390. MACB_CAPS_GEM_HAS_PTP,
  3391. .dma_burst_length = 16,
  3392. .clk_init = fu540_c000_clk_init,
  3393. .init = fu540_c000_init,
  3394. .jumbo_max_len = 10240,
  3395. };
  3396. static const struct macb_config at91sam9260_config = {
  3397. .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
  3398. .clk_init = macb_clk_init,
  3399. .init = macb_init,
  3400. };
  3401. static const struct macb_config sama5d3macb_config = {
  3402. .caps = MACB_CAPS_SG_DISABLED
  3403. | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
  3404. .clk_init = macb_clk_init,
  3405. .init = macb_init,
  3406. };
  3407. static const struct macb_config pc302gem_config = {
  3408. .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
  3409. .dma_burst_length = 16,
  3410. .clk_init = macb_clk_init,
  3411. .init = macb_init,
  3412. };
  3413. static const struct macb_config sama5d2_config = {
  3414. .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
  3415. .dma_burst_length = 16,
  3416. .clk_init = macb_clk_init,
  3417. .init = macb_init,
  3418. };
  3419. static const struct macb_config sama5d3_config = {
  3420. .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
  3421. | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
  3422. .dma_burst_length = 16,
  3423. .clk_init = macb_clk_init,
  3424. .init = macb_init,
  3425. .jumbo_max_len = 10240,
  3426. };
  3427. static const struct macb_config sama5d4_config = {
  3428. .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
  3429. .dma_burst_length = 4,
  3430. .clk_init = macb_clk_init,
  3431. .init = macb_init,
  3432. };
  3433. static const struct macb_config emac_config = {
  3434. .caps = MACB_CAPS_NEEDS_RSTONUBR,
  3435. .clk_init = at91ether_clk_init,
  3436. .init = at91ether_init,
  3437. };
  3438. static const struct macb_config np4_config = {
  3439. .caps = MACB_CAPS_USRIO_DISABLED,
  3440. .clk_init = macb_clk_init,
  3441. .init = macb_init,
  3442. };
  3443. static const struct macb_config zynqmp_config = {
  3444. .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
  3445. MACB_CAPS_JUMBO |
  3446. MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
  3447. .dma_burst_length = 16,
  3448. .clk_init = macb_clk_init,
  3449. .init = macb_init,
  3450. .jumbo_max_len = 10240,
  3451. };
  3452. static const struct macb_config zynq_config = {
  3453. .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
  3454. MACB_CAPS_NEEDS_RSTONUBR,
  3455. .dma_burst_length = 16,
  3456. .clk_init = macb_clk_init,
  3457. .init = macb_init,
  3458. };
  3459. static const struct of_device_id macb_dt_ids[] = {
  3460. { .compatible = "cdns,at32ap7000-macb" },
  3461. { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
  3462. { .compatible = "cdns,macb" },
  3463. { .compatible = "cdns,np4-macb", .data = &np4_config },
  3464. { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
  3465. { .compatible = "cdns,gem", .data = &pc302gem_config },
  3466. { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
  3467. { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
  3468. { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
  3469. { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
  3470. { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
  3471. { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
  3472. { .compatible = "cdns,emac", .data = &emac_config },
  3473. { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
  3474. { .compatible = "cdns,zynq-gem", .data = &zynq_config },
  3475. { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
  3476. { /* sentinel */ }
  3477. };
  3478. MODULE_DEVICE_TABLE(of, macb_dt_ids);
  3479. #endif /* CONFIG_OF */
  3480. static const struct macb_config default_gem_config = {
  3481. .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
  3482. MACB_CAPS_JUMBO |
  3483. MACB_CAPS_GEM_HAS_PTP,
  3484. .dma_burst_length = 16,
  3485. .clk_init = macb_clk_init,
  3486. .init = macb_init,
  3487. .jumbo_max_len = 10240,
  3488. };
  3489. static int macb_probe(struct platform_device *pdev)
  3490. {
  3491. const struct macb_config *macb_config = &default_gem_config;
  3492. int (*clk_init)(struct platform_device *, struct clk **,
  3493. struct clk **, struct clk **, struct clk **,
  3494. struct clk **) = macb_config->clk_init;
  3495. int (*init)(struct platform_device *) = macb_config->init;
  3496. struct device_node *np = pdev->dev.of_node;
  3497. struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
  3498. struct clk *tsu_clk = NULL;
  3499. unsigned int queue_mask, num_queues;
  3500. bool native_io;
  3501. struct phy_device *phydev;
  3502. struct net_device *dev;
  3503. struct resource *regs;
  3504. void __iomem *mem;
  3505. const char *mac;
  3506. struct macb *bp;
  3507. int err, val;
  3508. regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  3509. mem = devm_ioremap_resource(&pdev->dev, regs);
  3510. if (IS_ERR(mem))
  3511. return PTR_ERR(mem);
  3512. if (np) {
  3513. const struct of_device_id *match;
  3514. match = of_match_node(macb_dt_ids, np);
  3515. if (match && match->data) {
  3516. macb_config = match->data;
  3517. clk_init = macb_config->clk_init;
  3518. init = macb_config->init;
  3519. }
  3520. }
  3521. err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
  3522. if (err)
  3523. return err;
  3524. pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
  3525. pm_runtime_use_autosuspend(&pdev->dev);
  3526. pm_runtime_get_noresume(&pdev->dev);
  3527. pm_runtime_set_active(&pdev->dev);
  3528. pm_runtime_enable(&pdev->dev);
  3529. native_io = hw_is_native_io(mem);
  3530. macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
  3531. dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
  3532. if (!dev) {
  3533. err = -ENOMEM;
  3534. goto err_disable_clocks;
  3535. }
  3536. dev->base_addr = regs->start;
  3537. SET_NETDEV_DEV(dev, &pdev->dev);
  3538. bp = netdev_priv(dev);
  3539. bp->pdev = pdev;
  3540. bp->dev = dev;
  3541. bp->regs = mem;
  3542. bp->native_io = native_io;
  3543. if (native_io) {
  3544. bp->macb_reg_readl = hw_readl_native;
  3545. bp->macb_reg_writel = hw_writel_native;
  3546. } else {
  3547. bp->macb_reg_readl = hw_readl;
  3548. bp->macb_reg_writel = hw_writel;
  3549. }
  3550. bp->num_queues = num_queues;
  3551. bp->queue_mask = queue_mask;
  3552. if (macb_config)
  3553. bp->dma_burst_length = macb_config->dma_burst_length;
  3554. bp->pclk = pclk;
  3555. bp->hclk = hclk;
  3556. bp->tx_clk = tx_clk;
  3557. bp->rx_clk = rx_clk;
  3558. bp->tsu_clk = tsu_clk;
  3559. if (macb_config)
  3560. bp->jumbo_max_len = macb_config->jumbo_max_len;
  3561. bp->wol = 0;
  3562. if (of_get_property(np, "magic-packet", NULL))
  3563. bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
  3564. device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
  3565. spin_lock_init(&bp->lock);
  3566. /* setup capabilities */
  3567. macb_configure_caps(bp, macb_config);
  3568. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  3569. if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
  3570. dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
  3571. bp->hw_dma_cap |= HW_DMA_CAP_64B;
  3572. }
  3573. #endif
  3574. platform_set_drvdata(pdev, dev);
  3575. dev->irq = platform_get_irq(pdev, 0);
  3576. if (dev->irq < 0) {
  3577. err = dev->irq;
  3578. goto err_out_free_netdev;
  3579. }
  3580. /* MTU range: 68 - 1500 or 10240 */
  3581. dev->min_mtu = GEM_MTU_MIN_SIZE;
  3582. if (bp->caps & MACB_CAPS_JUMBO)
  3583. dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
  3584. else
  3585. dev->max_mtu = ETH_DATA_LEN;
  3586. if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
  3587. val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
  3588. if (val)
  3589. bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
  3590. macb_dma_desc_get_size(bp);
  3591. val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
  3592. if (val)
  3593. bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
  3594. macb_dma_desc_get_size(bp);
  3595. }
  3596. bp->rx_intr_mask = MACB_RX_INT_FLAGS;
  3597. if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
  3598. bp->rx_intr_mask |= MACB_BIT(RXUBR);
  3599. mac = of_get_mac_address(np);
  3600. if (PTR_ERR(mac) == -EPROBE_DEFER) {
  3601. err = -EPROBE_DEFER;
  3602. goto err_out_free_netdev;
  3603. } else if (!IS_ERR_OR_NULL(mac)) {
  3604. ether_addr_copy(bp->dev->dev_addr, mac);
  3605. } else {
  3606. macb_get_hwaddr(bp);
  3607. }
  3608. err = of_get_phy_mode(np);
  3609. if (err < 0)
  3610. /* not found in DT, MII by default */
  3611. bp->phy_interface = PHY_INTERFACE_MODE_MII;
  3612. else
  3613. bp->phy_interface = err;
  3614. /* IP specific init */
  3615. err = init(pdev);
  3616. if (err)
  3617. goto err_out_free_netdev;
  3618. err = macb_mii_init(bp);
  3619. if (err)
  3620. goto err_out_free_netdev;
  3621. phydev = dev->phydev;
  3622. netif_carrier_off(dev);
  3623. err = register_netdev(dev);
  3624. if (err) {
  3625. dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
  3626. goto err_out_unregister_mdio;
  3627. }
  3628. tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
  3629. (unsigned long)bp);
  3630. phy_attached_info(phydev);
  3631. netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
  3632. macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
  3633. dev->base_addr, dev->irq, dev->dev_addr);
  3634. pm_runtime_mark_last_busy(&bp->pdev->dev);
  3635. pm_runtime_put_autosuspend(&bp->pdev->dev);
  3636. return 0;
  3637. err_out_unregister_mdio:
  3638. phy_disconnect(dev->phydev);
  3639. mdiobus_unregister(bp->mii_bus);
  3640. of_node_put(bp->phy_node);
  3641. if (np && of_phy_is_fixed_link(np))
  3642. of_phy_deregister_fixed_link(np);
  3643. mdiobus_free(bp->mii_bus);
  3644. err_out_free_netdev:
  3645. free_netdev(dev);
  3646. err_disable_clocks:
  3647. clk_disable_unprepare(tx_clk);
  3648. clk_disable_unprepare(hclk);
  3649. clk_disable_unprepare(pclk);
  3650. clk_disable_unprepare(rx_clk);
  3651. clk_disable_unprepare(tsu_clk);
  3652. pm_runtime_disable(&pdev->dev);
  3653. pm_runtime_set_suspended(&pdev->dev);
  3654. pm_runtime_dont_use_autosuspend(&pdev->dev);
  3655. return err;
  3656. }
  3657. static int macb_remove(struct platform_device *pdev)
  3658. {
  3659. struct net_device *dev;
  3660. struct macb *bp;
  3661. struct device_node *np = pdev->dev.of_node;
  3662. dev = platform_get_drvdata(pdev);
  3663. if (dev) {
  3664. bp = netdev_priv(dev);
  3665. if (dev->phydev)
  3666. phy_disconnect(dev->phydev);
  3667. mdiobus_unregister(bp->mii_bus);
  3668. if (np && of_phy_is_fixed_link(np))
  3669. of_phy_deregister_fixed_link(np);
  3670. dev->phydev = NULL;
  3671. mdiobus_free(bp->mii_bus);
  3672. unregister_netdev(dev);
  3673. tasklet_kill(&bp->hresp_err_tasklet);
  3674. pm_runtime_disable(&pdev->dev);
  3675. pm_runtime_dont_use_autosuspend(&pdev->dev);
  3676. if (!pm_runtime_suspended(&pdev->dev)) {
  3677. clk_disable_unprepare(bp->tx_clk);
  3678. clk_disable_unprepare(bp->hclk);
  3679. clk_disable_unprepare(bp->pclk);
  3680. clk_disable_unprepare(bp->rx_clk);
  3681. clk_disable_unprepare(bp->tsu_clk);
  3682. pm_runtime_set_suspended(&pdev->dev);
  3683. }
  3684. of_node_put(bp->phy_node);
  3685. free_netdev(dev);
  3686. }
  3687. return 0;
  3688. }
  3689. static int __maybe_unused macb_suspend(struct device *dev)
  3690. {
  3691. struct net_device *netdev = dev_get_drvdata(dev);
  3692. struct macb *bp = netdev_priv(netdev);
  3693. struct macb_queue *queue = bp->queues;
  3694. unsigned long flags;
  3695. unsigned int q;
  3696. if (!netif_running(netdev))
  3697. return 0;
  3698. if (bp->wol & MACB_WOL_ENABLED) {
  3699. macb_writel(bp, IER, MACB_BIT(WOL));
  3700. macb_writel(bp, WOL, MACB_BIT(MAG));
  3701. enable_irq_wake(bp->queues[0].irq);
  3702. netif_device_detach(netdev);
  3703. } else {
  3704. netif_device_detach(netdev);
  3705. for (q = 0, queue = bp->queues; q < bp->num_queues;
  3706. ++q, ++queue)
  3707. napi_disable(&queue->napi);
  3708. phy_stop(netdev->phydev);
  3709. phy_suspend(netdev->phydev);
  3710. spin_lock_irqsave(&bp->lock, flags);
  3711. macb_reset_hw(bp);
  3712. spin_unlock_irqrestore(&bp->lock, flags);
  3713. if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
  3714. bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
  3715. if (netdev->hw_features & NETIF_F_NTUPLE)
  3716. bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
  3717. }
  3718. netif_carrier_off(netdev);
  3719. if (bp->ptp_info)
  3720. bp->ptp_info->ptp_remove(netdev);
  3721. if (!device_may_wakeup(dev))
  3722. pm_runtime_force_suspend(dev);
  3723. return 0;
  3724. }
  3725. static int __maybe_unused macb_resume(struct device *dev)
  3726. {
  3727. struct net_device *netdev = dev_get_drvdata(dev);
  3728. struct macb *bp = netdev_priv(netdev);
  3729. struct macb_queue *queue = bp->queues;
  3730. unsigned int q;
  3731. if (!netif_running(netdev))
  3732. return 0;
  3733. if (!device_may_wakeup(dev))
  3734. pm_runtime_force_resume(dev);
  3735. if (bp->wol & MACB_WOL_ENABLED) {
  3736. macb_writel(bp, IDR, MACB_BIT(WOL));
  3737. macb_writel(bp, WOL, 0);
  3738. disable_irq_wake(bp->queues[0].irq);
  3739. } else {
  3740. macb_writel(bp, NCR, MACB_BIT(MPE));
  3741. if (netdev->hw_features & NETIF_F_NTUPLE)
  3742. gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
  3743. if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
  3744. macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
  3745. for (q = 0, queue = bp->queues; q < bp->num_queues;
  3746. ++q, ++queue)
  3747. napi_enable(&queue->napi);
  3748. phy_resume(netdev->phydev);
  3749. phy_init_hw(netdev->phydev);
  3750. phy_start(netdev->phydev);
  3751. }
  3752. bp->macbgem_ops.mog_init_rings(bp);
  3753. macb_init_hw(bp);
  3754. macb_set_rx_mode(netdev);
  3755. macb_restore_features(bp);
  3756. netif_device_attach(netdev);
  3757. if (bp->ptp_info)
  3758. bp->ptp_info->ptp_init(netdev);
  3759. return 0;
  3760. }
  3761. static int __maybe_unused macb_runtime_suspend(struct device *dev)
  3762. {
  3763. struct net_device *netdev = dev_get_drvdata(dev);
  3764. struct macb *bp = netdev_priv(netdev);
  3765. if (!(device_may_wakeup(dev))) {
  3766. clk_disable_unprepare(bp->tx_clk);
  3767. clk_disable_unprepare(bp->hclk);
  3768. clk_disable_unprepare(bp->pclk);
  3769. clk_disable_unprepare(bp->rx_clk);
  3770. }
  3771. clk_disable_unprepare(bp->tsu_clk);
  3772. return 0;
  3773. }
  3774. static int __maybe_unused macb_runtime_resume(struct device *dev)
  3775. {
  3776. struct net_device *netdev = dev_get_drvdata(dev);
  3777. struct macb *bp = netdev_priv(netdev);
  3778. if (!(device_may_wakeup(dev))) {
  3779. clk_prepare_enable(bp->pclk);
  3780. clk_prepare_enable(bp->hclk);
  3781. clk_prepare_enable(bp->tx_clk);
  3782. clk_prepare_enable(bp->rx_clk);
  3783. }
  3784. clk_prepare_enable(bp->tsu_clk);
  3785. return 0;
  3786. }
  3787. static const struct dev_pm_ops macb_pm_ops = {
  3788. SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
  3789. SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
  3790. };
  3791. static struct platform_driver macb_driver = {
  3792. .probe = macb_probe,
  3793. .remove = macb_remove,
  3794. .driver = {
  3795. .name = "macb",
  3796. .of_match_table = of_match_ptr(macb_dt_ids),
  3797. .pm = &macb_pm_ops,
  3798. },
  3799. };
  3800. module_platform_driver(macb_driver);
  3801. MODULE_LICENSE("GPL");
  3802. MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
  3803. MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
  3804. MODULE_ALIAS("platform:macb");