bnxt.c 182 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2014-2016 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/stringify.h>
  11. #include <linux/kernel.h>
  12. #include <linux/timer.h>
  13. #include <linux/errno.h>
  14. #include <linux/ioport.h>
  15. #include <linux/slab.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/pci.h>
  19. #include <linux/netdevice.h>
  20. #include <linux/etherdevice.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/bitops.h>
  24. #include <linux/io.h>
  25. #include <linux/irq.h>
  26. #include <linux/delay.h>
  27. #include <asm/byteorder.h>
  28. #include <asm/page.h>
  29. #include <linux/time.h>
  30. #include <linux/mii.h>
  31. #include <linux/if.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/rtc.h>
  34. #include <net/ip.h>
  35. #include <net/tcp.h>
  36. #include <net/udp.h>
  37. #include <net/checksum.h>
  38. #include <net/ip6_checksum.h>
  39. #include <net/udp_tunnel.h>
  40. #ifdef CONFIG_NET_RX_BUSY_POLL
  41. #include <net/busy_poll.h>
  42. #endif
  43. #include <linux/workqueue.h>
  44. #include <linux/prefetch.h>
  45. #include <linux/cache.h>
  46. #include <linux/log2.h>
  47. #include <linux/aer.h>
  48. #include <linux/bitmap.h>
  49. #include <linux/cpu_rmap.h>
  50. #include "bnxt_hsi.h"
  51. #include "bnxt.h"
  52. #include "bnxt_sriov.h"
  53. #include "bnxt_ethtool.h"
  54. #define BNXT_TX_TIMEOUT (5 * HZ)
  55. static const char version[] =
  56. "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
  57. MODULE_LICENSE("GPL");
  58. MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
  59. MODULE_VERSION(DRV_MODULE_VERSION);
  60. #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
  61. #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
  62. #define BNXT_RX_COPY_THRESH 256
  63. #define BNXT_TX_PUSH_THRESH 164
  64. enum board_idx {
  65. BCM57301,
  66. BCM57302,
  67. BCM57304,
  68. BCM57417_NPAR,
  69. BCM58700,
  70. BCM57311,
  71. BCM57312,
  72. BCM57402,
  73. BCM57404,
  74. BCM57406,
  75. BCM57402_NPAR,
  76. BCM57407,
  77. BCM57412,
  78. BCM57414,
  79. BCM57416,
  80. BCM57417,
  81. BCM57412_NPAR,
  82. BCM57314,
  83. BCM57417_SFP,
  84. BCM57416_SFP,
  85. BCM57404_NPAR,
  86. BCM57406_NPAR,
  87. BCM57407_SFP,
  88. BCM57407_NPAR,
  89. BCM57414_NPAR,
  90. BCM57416_NPAR,
  91. BCM57452,
  92. BCM57454,
  93. NETXTREME_E_VF,
  94. NETXTREME_C_VF,
  95. };
  96. /* indexed by enum above */
  97. static const struct {
  98. char *name;
  99. } board_info[] = {
  100. { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
  101. { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
  102. { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
  103. { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
  104. { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
  105. { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
  106. { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
  107. { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
  108. { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
  109. { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
  110. { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
  111. { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
  112. { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
  113. { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
  114. { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
  115. { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
  116. { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
  117. { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
  118. { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
  119. { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
  120. { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
  121. { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
  122. { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
  123. { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
  124. { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
  125. { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
  126. { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
  127. { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
  128. { "Broadcom NetXtreme-E Ethernet Virtual Function" },
  129. { "Broadcom NetXtreme-C Ethernet Virtual Function" },
  130. };
  131. static const struct pci_device_id bnxt_pci_tbl[] = {
  132. { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
  133. { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
  134. { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
  135. { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
  136. { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
  137. { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
  138. { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
  139. { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
  140. { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
  141. { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
  142. { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
  143. { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
  144. { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
  145. { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
  146. { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
  147. { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
  148. { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
  149. { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
  150. { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
  151. { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
  152. { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
  153. { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
  154. { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
  155. { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
  156. { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
  157. { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
  158. { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
  159. { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
  160. { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
  161. { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
  162. { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
  163. { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
  164. #ifdef CONFIG_BNXT_SRIOV
  165. { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
  166. { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
  167. { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
  168. { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
  169. { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
  170. { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
  171. #endif
  172. { 0 }
  173. };
  174. MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
  175. static const u16 bnxt_vf_req_snif[] = {
  176. HWRM_FUNC_CFG,
  177. HWRM_PORT_PHY_QCFG,
  178. HWRM_CFA_L2_FILTER_ALLOC,
  179. };
  180. static const u16 bnxt_async_events_arr[] = {
  181. HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
  182. HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
  183. HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
  184. HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
  185. HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
  186. };
  187. static bool bnxt_vf_pciid(enum board_idx idx)
  188. {
  189. return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
  190. }
  191. #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
  192. #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
  193. #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
  194. #define BNXT_CP_DB_REARM(db, raw_cons) \
  195. writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
  196. #define BNXT_CP_DB(db, raw_cons) \
  197. writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
  198. #define BNXT_CP_DB_IRQ_DIS(db) \
  199. writel(DB_CP_IRQ_DIS_FLAGS, db)
  200. static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
  201. {
  202. /* Tell compiler to fetch tx indices from memory. */
  203. barrier();
  204. return bp->tx_ring_size -
  205. ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
  206. }
  207. static const u16 bnxt_lhint_arr[] = {
  208. TX_BD_FLAGS_LHINT_512_AND_SMALLER,
  209. TX_BD_FLAGS_LHINT_512_TO_1023,
  210. TX_BD_FLAGS_LHINT_1024_TO_2047,
  211. TX_BD_FLAGS_LHINT_1024_TO_2047,
  212. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  213. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  214. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  215. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  216. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  217. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  218. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  219. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  220. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  221. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  222. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  223. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  224. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  225. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  226. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  227. };
  228. static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
  229. {
  230. struct bnxt *bp = netdev_priv(dev);
  231. struct tx_bd *txbd;
  232. struct tx_bd_ext *txbd1;
  233. struct netdev_queue *txq;
  234. int i;
  235. dma_addr_t mapping;
  236. unsigned int length, pad = 0;
  237. u32 len, free_size, vlan_tag_flags, cfa_action, flags;
  238. u16 prod, last_frag;
  239. struct pci_dev *pdev = bp->pdev;
  240. struct bnxt_tx_ring_info *txr;
  241. struct bnxt_sw_tx_bd *tx_buf;
  242. i = skb_get_queue_mapping(skb);
  243. if (unlikely(i >= bp->tx_nr_rings)) {
  244. dev_kfree_skb_any(skb);
  245. return NETDEV_TX_OK;
  246. }
  247. txr = &bp->tx_ring[i];
  248. txq = netdev_get_tx_queue(dev, i);
  249. prod = txr->tx_prod;
  250. free_size = bnxt_tx_avail(bp, txr);
  251. if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
  252. netif_tx_stop_queue(txq);
  253. return NETDEV_TX_BUSY;
  254. }
  255. length = skb->len;
  256. len = skb_headlen(skb);
  257. last_frag = skb_shinfo(skb)->nr_frags;
  258. txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  259. txbd->tx_bd_opaque = prod;
  260. tx_buf = &txr->tx_buf_ring[prod];
  261. tx_buf->skb = skb;
  262. tx_buf->nr_frags = last_frag;
  263. vlan_tag_flags = 0;
  264. cfa_action = 0;
  265. if (skb_vlan_tag_present(skb)) {
  266. vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
  267. skb_vlan_tag_get(skb);
  268. /* Currently supports 8021Q, 8021AD vlan offloads
  269. * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
  270. */
  271. if (skb->vlan_proto == htons(ETH_P_8021Q))
  272. vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
  273. }
  274. if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
  275. struct tx_push_buffer *tx_push_buf = txr->tx_push;
  276. struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
  277. struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
  278. void *pdata = tx_push_buf->data;
  279. u64 *end;
  280. int j, push_len;
  281. /* Set COAL_NOW to be ready quickly for the next push */
  282. tx_push->tx_bd_len_flags_type =
  283. cpu_to_le32((length << TX_BD_LEN_SHIFT) |
  284. TX_BD_TYPE_LONG_TX_BD |
  285. TX_BD_FLAGS_LHINT_512_AND_SMALLER |
  286. TX_BD_FLAGS_COAL_NOW |
  287. TX_BD_FLAGS_PACKET_END |
  288. (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
  289. if (skb->ip_summed == CHECKSUM_PARTIAL)
  290. tx_push1->tx_bd_hsize_lflags =
  291. cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
  292. else
  293. tx_push1->tx_bd_hsize_lflags = 0;
  294. tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
  295. tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
  296. end = pdata + length;
  297. end = PTR_ALIGN(end, 8) - 1;
  298. *end = 0;
  299. skb_copy_from_linear_data(skb, pdata, len);
  300. pdata += len;
  301. for (j = 0; j < last_frag; j++) {
  302. skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
  303. void *fptr;
  304. fptr = skb_frag_address_safe(frag);
  305. if (!fptr)
  306. goto normal_tx;
  307. memcpy(pdata, fptr, skb_frag_size(frag));
  308. pdata += skb_frag_size(frag);
  309. }
  310. txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
  311. txbd->tx_bd_haddr = txr->data_mapping;
  312. prod = NEXT_TX(prod);
  313. txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  314. memcpy(txbd, tx_push1, sizeof(*txbd));
  315. prod = NEXT_TX(prod);
  316. tx_push->doorbell =
  317. cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
  318. txr->tx_prod = prod;
  319. tx_buf->is_push = 1;
  320. netdev_tx_sent_queue(txq, skb->len);
  321. wmb(); /* Sync is_push and byte queue before pushing data */
  322. push_len = (length + sizeof(*tx_push) + 7) / 8;
  323. if (push_len > 16) {
  324. __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
  325. __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
  326. (push_len - 16) << 1);
  327. } else {
  328. __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
  329. push_len);
  330. }
  331. goto tx_done;
  332. }
  333. normal_tx:
  334. if (length < BNXT_MIN_PKT_SIZE) {
  335. pad = BNXT_MIN_PKT_SIZE - length;
  336. if (skb_pad(skb, pad)) {
  337. /* SKB already freed. */
  338. tx_buf->skb = NULL;
  339. return NETDEV_TX_OK;
  340. }
  341. length = BNXT_MIN_PKT_SIZE;
  342. }
  343. mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
  344. if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
  345. dev_kfree_skb_any(skb);
  346. tx_buf->skb = NULL;
  347. return NETDEV_TX_OK;
  348. }
  349. dma_unmap_addr_set(tx_buf, mapping, mapping);
  350. flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
  351. ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
  352. txbd->tx_bd_haddr = cpu_to_le64(mapping);
  353. prod = NEXT_TX(prod);
  354. txbd1 = (struct tx_bd_ext *)
  355. &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  356. txbd1->tx_bd_hsize_lflags = 0;
  357. if (skb_is_gso(skb)) {
  358. u32 hdr_len;
  359. if (skb->encapsulation)
  360. hdr_len = skb_inner_network_offset(skb) +
  361. skb_inner_network_header_len(skb) +
  362. inner_tcp_hdrlen(skb);
  363. else
  364. hdr_len = skb_transport_offset(skb) +
  365. tcp_hdrlen(skb);
  366. txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
  367. TX_BD_FLAGS_T_IPID |
  368. (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
  369. length = skb_shinfo(skb)->gso_size;
  370. txbd1->tx_bd_mss = cpu_to_le32(length);
  371. length += hdr_len;
  372. } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  373. txbd1->tx_bd_hsize_lflags =
  374. cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
  375. txbd1->tx_bd_mss = 0;
  376. }
  377. length >>= 9;
  378. flags |= bnxt_lhint_arr[length];
  379. txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
  380. txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
  381. txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
  382. for (i = 0; i < last_frag; i++) {
  383. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  384. prod = NEXT_TX(prod);
  385. txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  386. len = skb_frag_size(frag);
  387. mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
  388. DMA_TO_DEVICE);
  389. if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
  390. goto tx_dma_error;
  391. tx_buf = &txr->tx_buf_ring[prod];
  392. dma_unmap_addr_set(tx_buf, mapping, mapping);
  393. txbd->tx_bd_haddr = cpu_to_le64(mapping);
  394. flags = len << TX_BD_LEN_SHIFT;
  395. txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
  396. }
  397. flags &= ~TX_BD_LEN;
  398. txbd->tx_bd_len_flags_type =
  399. cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
  400. TX_BD_FLAGS_PACKET_END);
  401. netdev_tx_sent_queue(txq, skb->len);
  402. /* Sync BD data before updating doorbell */
  403. wmb();
  404. prod = NEXT_TX(prod);
  405. txr->tx_prod = prod;
  406. writel(DB_KEY_TX | prod, txr->tx_doorbell);
  407. writel(DB_KEY_TX | prod, txr->tx_doorbell);
  408. tx_done:
  409. mmiowb();
  410. if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
  411. netif_tx_stop_queue(txq);
  412. /* netif_tx_stop_queue() must be done before checking
  413. * tx index in bnxt_tx_avail() below, because in
  414. * bnxt_tx_int(), we update tx index before checking for
  415. * netif_tx_queue_stopped().
  416. */
  417. smp_mb();
  418. if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
  419. netif_tx_wake_queue(txq);
  420. }
  421. return NETDEV_TX_OK;
  422. tx_dma_error:
  423. last_frag = i;
  424. /* start back at beginning and unmap skb */
  425. prod = txr->tx_prod;
  426. tx_buf = &txr->tx_buf_ring[prod];
  427. tx_buf->skb = NULL;
  428. dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
  429. skb_headlen(skb), PCI_DMA_TODEVICE);
  430. prod = NEXT_TX(prod);
  431. /* unmap remaining mapped pages */
  432. for (i = 0; i < last_frag; i++) {
  433. prod = NEXT_TX(prod);
  434. tx_buf = &txr->tx_buf_ring[prod];
  435. dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
  436. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  437. PCI_DMA_TODEVICE);
  438. }
  439. dev_kfree_skb_any(skb);
  440. return NETDEV_TX_OK;
  441. }
  442. static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
  443. {
  444. struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
  445. int index = txr - &bp->tx_ring[0];
  446. struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
  447. u16 cons = txr->tx_cons;
  448. struct pci_dev *pdev = bp->pdev;
  449. int i;
  450. unsigned int tx_bytes = 0;
  451. for (i = 0; i < nr_pkts; i++) {
  452. struct bnxt_sw_tx_bd *tx_buf;
  453. struct sk_buff *skb;
  454. int j, last;
  455. tx_buf = &txr->tx_buf_ring[cons];
  456. cons = NEXT_TX(cons);
  457. skb = tx_buf->skb;
  458. tx_buf->skb = NULL;
  459. if (tx_buf->is_push) {
  460. tx_buf->is_push = 0;
  461. goto next_tx_int;
  462. }
  463. dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
  464. skb_headlen(skb), PCI_DMA_TODEVICE);
  465. last = tx_buf->nr_frags;
  466. for (j = 0; j < last; j++) {
  467. cons = NEXT_TX(cons);
  468. tx_buf = &txr->tx_buf_ring[cons];
  469. dma_unmap_page(
  470. &pdev->dev,
  471. dma_unmap_addr(tx_buf, mapping),
  472. skb_frag_size(&skb_shinfo(skb)->frags[j]),
  473. PCI_DMA_TODEVICE);
  474. }
  475. next_tx_int:
  476. cons = NEXT_TX(cons);
  477. tx_bytes += skb->len;
  478. dev_kfree_skb_any(skb);
  479. }
  480. netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
  481. txr->tx_cons = cons;
  482. /* Need to make the tx_cons update visible to bnxt_start_xmit()
  483. * before checking for netif_tx_queue_stopped(). Without the
  484. * memory barrier, there is a small possibility that bnxt_start_xmit()
  485. * will miss it and cause the queue to be stopped forever.
  486. */
  487. smp_mb();
  488. if (unlikely(netif_tx_queue_stopped(txq)) &&
  489. (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
  490. __netif_tx_lock(txq, smp_processor_id());
  491. if (netif_tx_queue_stopped(txq) &&
  492. bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
  493. txr->dev_state != BNXT_DEV_STATE_CLOSING)
  494. netif_tx_wake_queue(txq);
  495. __netif_tx_unlock(txq);
  496. }
  497. }
  498. static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
  499. gfp_t gfp)
  500. {
  501. u8 *data;
  502. struct pci_dev *pdev = bp->pdev;
  503. data = kmalloc(bp->rx_buf_size, gfp);
  504. if (!data)
  505. return NULL;
  506. *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
  507. bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
  508. if (dma_mapping_error(&pdev->dev, *mapping)) {
  509. kfree(data);
  510. data = NULL;
  511. }
  512. return data;
  513. }
  514. static inline int bnxt_alloc_rx_data(struct bnxt *bp,
  515. struct bnxt_rx_ring_info *rxr,
  516. u16 prod, gfp_t gfp)
  517. {
  518. struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  519. struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
  520. u8 *data;
  521. dma_addr_t mapping;
  522. data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
  523. if (!data)
  524. return -ENOMEM;
  525. rx_buf->data = data;
  526. dma_unmap_addr_set(rx_buf, mapping, mapping);
  527. rxbd->rx_bd_haddr = cpu_to_le64(mapping);
  528. return 0;
  529. }
  530. static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
  531. u8 *data)
  532. {
  533. u16 prod = rxr->rx_prod;
  534. struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
  535. struct rx_bd *cons_bd, *prod_bd;
  536. prod_rx_buf = &rxr->rx_buf_ring[prod];
  537. cons_rx_buf = &rxr->rx_buf_ring[cons];
  538. prod_rx_buf->data = data;
  539. dma_unmap_addr_set(prod_rx_buf, mapping,
  540. dma_unmap_addr(cons_rx_buf, mapping));
  541. prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  542. cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
  543. prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
  544. }
  545. static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
  546. {
  547. u16 next, max = rxr->rx_agg_bmap_size;
  548. next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
  549. if (next >= max)
  550. next = find_first_zero_bit(rxr->rx_agg_bmap, max);
  551. return next;
  552. }
  553. static inline int bnxt_alloc_rx_page(struct bnxt *bp,
  554. struct bnxt_rx_ring_info *rxr,
  555. u16 prod, gfp_t gfp)
  556. {
  557. struct rx_bd *rxbd =
  558. &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  559. struct bnxt_sw_rx_agg_bd *rx_agg_buf;
  560. struct pci_dev *pdev = bp->pdev;
  561. struct page *page;
  562. dma_addr_t mapping;
  563. u16 sw_prod = rxr->rx_sw_agg_prod;
  564. unsigned int offset = 0;
  565. if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
  566. page = rxr->rx_page;
  567. if (!page) {
  568. page = alloc_page(gfp);
  569. if (!page)
  570. return -ENOMEM;
  571. rxr->rx_page = page;
  572. rxr->rx_page_offset = 0;
  573. }
  574. offset = rxr->rx_page_offset;
  575. rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
  576. if (rxr->rx_page_offset == PAGE_SIZE)
  577. rxr->rx_page = NULL;
  578. else
  579. get_page(page);
  580. } else {
  581. page = alloc_page(gfp);
  582. if (!page)
  583. return -ENOMEM;
  584. }
  585. mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
  586. PCI_DMA_FROMDEVICE);
  587. if (dma_mapping_error(&pdev->dev, mapping)) {
  588. __free_page(page);
  589. return -EIO;
  590. }
  591. if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
  592. sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
  593. __set_bit(sw_prod, rxr->rx_agg_bmap);
  594. rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
  595. rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
  596. rx_agg_buf->page = page;
  597. rx_agg_buf->offset = offset;
  598. rx_agg_buf->mapping = mapping;
  599. rxbd->rx_bd_haddr = cpu_to_le64(mapping);
  600. rxbd->rx_bd_opaque = sw_prod;
  601. return 0;
  602. }
  603. static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
  604. u32 agg_bufs)
  605. {
  606. struct bnxt *bp = bnapi->bp;
  607. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  608. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  609. u16 prod = rxr->rx_agg_prod;
  610. u16 sw_prod = rxr->rx_sw_agg_prod;
  611. u32 i;
  612. for (i = 0; i < agg_bufs; i++) {
  613. u16 cons;
  614. struct rx_agg_cmp *agg;
  615. struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
  616. struct rx_bd *prod_bd;
  617. struct page *page;
  618. agg = (struct rx_agg_cmp *)
  619. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  620. cons = agg->rx_agg_cmp_opaque;
  621. __clear_bit(cons, rxr->rx_agg_bmap);
  622. if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
  623. sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
  624. __set_bit(sw_prod, rxr->rx_agg_bmap);
  625. prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
  626. cons_rx_buf = &rxr->rx_agg_ring[cons];
  627. /* It is possible for sw_prod to be equal to cons, so
  628. * set cons_rx_buf->page to NULL first.
  629. */
  630. page = cons_rx_buf->page;
  631. cons_rx_buf->page = NULL;
  632. prod_rx_buf->page = page;
  633. prod_rx_buf->offset = cons_rx_buf->offset;
  634. prod_rx_buf->mapping = cons_rx_buf->mapping;
  635. prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  636. prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
  637. prod_bd->rx_bd_opaque = sw_prod;
  638. prod = NEXT_RX_AGG(prod);
  639. sw_prod = NEXT_RX_AGG(sw_prod);
  640. cp_cons = NEXT_CMP(cp_cons);
  641. }
  642. rxr->rx_agg_prod = prod;
  643. rxr->rx_sw_agg_prod = sw_prod;
  644. }
  645. static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
  646. struct bnxt_rx_ring_info *rxr, u16 cons,
  647. u16 prod, u8 *data, dma_addr_t dma_addr,
  648. unsigned int len)
  649. {
  650. int err;
  651. struct sk_buff *skb;
  652. err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
  653. if (unlikely(err)) {
  654. bnxt_reuse_rx_data(rxr, cons, data);
  655. return NULL;
  656. }
  657. skb = build_skb(data, 0);
  658. dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
  659. PCI_DMA_FROMDEVICE);
  660. if (!skb) {
  661. kfree(data);
  662. return NULL;
  663. }
  664. skb_reserve(skb, BNXT_RX_OFFSET);
  665. skb_put(skb, len);
  666. return skb;
  667. }
  668. static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
  669. struct sk_buff *skb, u16 cp_cons,
  670. u32 agg_bufs)
  671. {
  672. struct pci_dev *pdev = bp->pdev;
  673. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  674. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  675. u16 prod = rxr->rx_agg_prod;
  676. u32 i;
  677. for (i = 0; i < agg_bufs; i++) {
  678. u16 cons, frag_len;
  679. struct rx_agg_cmp *agg;
  680. struct bnxt_sw_rx_agg_bd *cons_rx_buf;
  681. struct page *page;
  682. dma_addr_t mapping;
  683. agg = (struct rx_agg_cmp *)
  684. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  685. cons = agg->rx_agg_cmp_opaque;
  686. frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
  687. RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
  688. cons_rx_buf = &rxr->rx_agg_ring[cons];
  689. skb_fill_page_desc(skb, i, cons_rx_buf->page,
  690. cons_rx_buf->offset, frag_len);
  691. __clear_bit(cons, rxr->rx_agg_bmap);
  692. /* It is possible for bnxt_alloc_rx_page() to allocate
  693. * a sw_prod index that equals the cons index, so we
  694. * need to clear the cons entry now.
  695. */
  696. mapping = dma_unmap_addr(cons_rx_buf, mapping);
  697. page = cons_rx_buf->page;
  698. cons_rx_buf->page = NULL;
  699. if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
  700. struct skb_shared_info *shinfo;
  701. unsigned int nr_frags;
  702. shinfo = skb_shinfo(skb);
  703. nr_frags = --shinfo->nr_frags;
  704. __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
  705. dev_kfree_skb(skb);
  706. cons_rx_buf->page = page;
  707. /* Update prod since possibly some pages have been
  708. * allocated already.
  709. */
  710. rxr->rx_agg_prod = prod;
  711. bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
  712. return NULL;
  713. }
  714. dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
  715. PCI_DMA_FROMDEVICE);
  716. skb->data_len += frag_len;
  717. skb->len += frag_len;
  718. skb->truesize += PAGE_SIZE;
  719. prod = NEXT_RX_AGG(prod);
  720. cp_cons = NEXT_CMP(cp_cons);
  721. }
  722. rxr->rx_agg_prod = prod;
  723. return skb;
  724. }
  725. static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
  726. u8 agg_bufs, u32 *raw_cons)
  727. {
  728. u16 last;
  729. struct rx_agg_cmp *agg;
  730. *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
  731. last = RING_CMP(*raw_cons);
  732. agg = (struct rx_agg_cmp *)
  733. &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
  734. return RX_AGG_CMP_VALID(agg, *raw_cons);
  735. }
  736. static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
  737. unsigned int len,
  738. dma_addr_t mapping)
  739. {
  740. struct bnxt *bp = bnapi->bp;
  741. struct pci_dev *pdev = bp->pdev;
  742. struct sk_buff *skb;
  743. skb = napi_alloc_skb(&bnapi->napi, len);
  744. if (!skb)
  745. return NULL;
  746. dma_sync_single_for_cpu(&pdev->dev, mapping,
  747. bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
  748. memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
  749. dma_sync_single_for_device(&pdev->dev, mapping,
  750. bp->rx_copy_thresh,
  751. PCI_DMA_FROMDEVICE);
  752. skb_put(skb, len);
  753. return skb;
  754. }
  755. static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
  756. u32 *raw_cons, void *cmp)
  757. {
  758. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  759. struct rx_cmp *rxcmp = cmp;
  760. u32 tmp_raw_cons = *raw_cons;
  761. u8 cmp_type, agg_bufs = 0;
  762. cmp_type = RX_CMP_TYPE(rxcmp);
  763. if (cmp_type == CMP_TYPE_RX_L2_CMP) {
  764. agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
  765. RX_CMP_AGG_BUFS) >>
  766. RX_CMP_AGG_BUFS_SHIFT;
  767. } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
  768. struct rx_tpa_end_cmp *tpa_end = cmp;
  769. agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
  770. RX_TPA_END_CMP_AGG_BUFS) >>
  771. RX_TPA_END_CMP_AGG_BUFS_SHIFT;
  772. }
  773. if (agg_bufs) {
  774. if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
  775. return -EBUSY;
  776. }
  777. *raw_cons = tmp_raw_cons;
  778. return 0;
  779. }
  780. static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
  781. {
  782. if (!rxr->bnapi->in_reset) {
  783. rxr->bnapi->in_reset = true;
  784. set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
  785. schedule_work(&bp->sp_task);
  786. }
  787. rxr->rx_next_cons = 0xffff;
  788. }
  789. static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
  790. struct rx_tpa_start_cmp *tpa_start,
  791. struct rx_tpa_start_cmp_ext *tpa_start1)
  792. {
  793. u8 agg_id = TPA_START_AGG_ID(tpa_start);
  794. u16 cons, prod;
  795. struct bnxt_tpa_info *tpa_info;
  796. struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
  797. struct rx_bd *prod_bd;
  798. dma_addr_t mapping;
  799. cons = tpa_start->rx_tpa_start_cmp_opaque;
  800. prod = rxr->rx_prod;
  801. cons_rx_buf = &rxr->rx_buf_ring[cons];
  802. prod_rx_buf = &rxr->rx_buf_ring[prod];
  803. tpa_info = &rxr->rx_tpa[agg_id];
  804. if (unlikely(cons != rxr->rx_next_cons)) {
  805. bnxt_sched_reset(bp, rxr);
  806. return;
  807. }
  808. prod_rx_buf->data = tpa_info->data;
  809. mapping = tpa_info->mapping;
  810. dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
  811. prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  812. prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
  813. tpa_info->data = cons_rx_buf->data;
  814. cons_rx_buf->data = NULL;
  815. tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
  816. tpa_info->len =
  817. le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
  818. RX_TPA_START_CMP_LEN_SHIFT;
  819. if (likely(TPA_START_HASH_VALID(tpa_start))) {
  820. u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
  821. tpa_info->hash_type = PKT_HASH_TYPE_L4;
  822. tpa_info->gso_type = SKB_GSO_TCPV4;
  823. /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
  824. if (hash_type == 3)
  825. tpa_info->gso_type = SKB_GSO_TCPV6;
  826. tpa_info->rss_hash =
  827. le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
  828. } else {
  829. tpa_info->hash_type = PKT_HASH_TYPE_NONE;
  830. tpa_info->gso_type = 0;
  831. if (netif_msg_rx_err(bp))
  832. netdev_warn(bp->dev, "TPA packet without valid hash\n");
  833. }
  834. tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
  835. tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
  836. tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
  837. rxr->rx_prod = NEXT_RX(prod);
  838. cons = NEXT_RX(cons);
  839. rxr->rx_next_cons = NEXT_RX(cons);
  840. cons_rx_buf = &rxr->rx_buf_ring[cons];
  841. bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
  842. rxr->rx_prod = NEXT_RX(rxr->rx_prod);
  843. cons_rx_buf->data = NULL;
  844. }
  845. static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
  846. u16 cp_cons, u32 agg_bufs)
  847. {
  848. if (agg_bufs)
  849. bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
  850. }
  851. static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
  852. int payload_off, int tcp_ts,
  853. struct sk_buff *skb)
  854. {
  855. #ifdef CONFIG_INET
  856. struct tcphdr *th;
  857. int len, nw_off;
  858. u16 outer_ip_off, inner_ip_off, inner_mac_off;
  859. u32 hdr_info = tpa_info->hdr_info;
  860. bool loopback = false;
  861. inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
  862. inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
  863. outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
  864. /* If the packet is an internal loopback packet, the offsets will
  865. * have an extra 4 bytes.
  866. */
  867. if (inner_mac_off == 4) {
  868. loopback = true;
  869. } else if (inner_mac_off > 4) {
  870. __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
  871. ETH_HLEN - 2));
  872. /* We only support inner iPv4/ipv6. If we don't see the
  873. * correct protocol ID, it must be a loopback packet where
  874. * the offsets are off by 4.
  875. */
  876. if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
  877. loopback = true;
  878. }
  879. if (loopback) {
  880. /* internal loopback packet, subtract all offsets by 4 */
  881. inner_ip_off -= 4;
  882. inner_mac_off -= 4;
  883. outer_ip_off -= 4;
  884. }
  885. nw_off = inner_ip_off - ETH_HLEN;
  886. skb_set_network_header(skb, nw_off);
  887. if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
  888. struct ipv6hdr *iph = ipv6_hdr(skb);
  889. skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
  890. len = skb->len - skb_transport_offset(skb);
  891. th = tcp_hdr(skb);
  892. th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
  893. } else {
  894. struct iphdr *iph = ip_hdr(skb);
  895. skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
  896. len = skb->len - skb_transport_offset(skb);
  897. th = tcp_hdr(skb);
  898. th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
  899. }
  900. if (inner_mac_off) { /* tunnel */
  901. struct udphdr *uh = NULL;
  902. __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
  903. ETH_HLEN - 2));
  904. if (proto == htons(ETH_P_IP)) {
  905. struct iphdr *iph = (struct iphdr *)skb->data;
  906. if (iph->protocol == IPPROTO_UDP)
  907. uh = (struct udphdr *)(iph + 1);
  908. } else {
  909. struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
  910. if (iph->nexthdr == IPPROTO_UDP)
  911. uh = (struct udphdr *)(iph + 1);
  912. }
  913. if (uh) {
  914. if (uh->check)
  915. skb_shinfo(skb)->gso_type |=
  916. SKB_GSO_UDP_TUNNEL_CSUM;
  917. else
  918. skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
  919. }
  920. }
  921. #endif
  922. return skb;
  923. }
  924. #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
  925. #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
  926. static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
  927. int payload_off, int tcp_ts,
  928. struct sk_buff *skb)
  929. {
  930. #ifdef CONFIG_INET
  931. struct tcphdr *th;
  932. int len, nw_off, tcp_opt_len = 0;
  933. if (tcp_ts)
  934. tcp_opt_len = 12;
  935. if (tpa_info->gso_type == SKB_GSO_TCPV4) {
  936. struct iphdr *iph;
  937. nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
  938. ETH_HLEN;
  939. skb_set_network_header(skb, nw_off);
  940. iph = ip_hdr(skb);
  941. skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
  942. len = skb->len - skb_transport_offset(skb);
  943. th = tcp_hdr(skb);
  944. th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
  945. } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
  946. struct ipv6hdr *iph;
  947. nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
  948. ETH_HLEN;
  949. skb_set_network_header(skb, nw_off);
  950. iph = ipv6_hdr(skb);
  951. skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
  952. len = skb->len - skb_transport_offset(skb);
  953. th = tcp_hdr(skb);
  954. th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
  955. } else {
  956. dev_kfree_skb_any(skb);
  957. return NULL;
  958. }
  959. tcp_gro_complete(skb);
  960. if (nw_off) { /* tunnel */
  961. struct udphdr *uh = NULL;
  962. if (skb->protocol == htons(ETH_P_IP)) {
  963. struct iphdr *iph = (struct iphdr *)skb->data;
  964. if (iph->protocol == IPPROTO_UDP)
  965. uh = (struct udphdr *)(iph + 1);
  966. } else {
  967. struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
  968. if (iph->nexthdr == IPPROTO_UDP)
  969. uh = (struct udphdr *)(iph + 1);
  970. }
  971. if (uh) {
  972. if (uh->check)
  973. skb_shinfo(skb)->gso_type |=
  974. SKB_GSO_UDP_TUNNEL_CSUM;
  975. else
  976. skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
  977. }
  978. }
  979. #endif
  980. return skb;
  981. }
  982. static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
  983. struct bnxt_tpa_info *tpa_info,
  984. struct rx_tpa_end_cmp *tpa_end,
  985. struct rx_tpa_end_cmp_ext *tpa_end1,
  986. struct sk_buff *skb)
  987. {
  988. #ifdef CONFIG_INET
  989. int payload_off;
  990. u16 segs;
  991. segs = TPA_END_TPA_SEGS(tpa_end);
  992. if (segs == 1)
  993. return skb;
  994. NAPI_GRO_CB(skb)->count = segs;
  995. skb_shinfo(skb)->gso_size =
  996. le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
  997. skb_shinfo(skb)->gso_type = tpa_info->gso_type;
  998. payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
  999. RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
  1000. RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
  1001. skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
  1002. #endif
  1003. return skb;
  1004. }
  1005. static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
  1006. struct bnxt_napi *bnapi,
  1007. u32 *raw_cons,
  1008. struct rx_tpa_end_cmp *tpa_end,
  1009. struct rx_tpa_end_cmp_ext *tpa_end1,
  1010. bool *agg_event)
  1011. {
  1012. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1013. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  1014. u8 agg_id = TPA_END_AGG_ID(tpa_end);
  1015. u8 *data, agg_bufs;
  1016. u16 cp_cons = RING_CMP(*raw_cons);
  1017. unsigned int len;
  1018. struct bnxt_tpa_info *tpa_info;
  1019. dma_addr_t mapping;
  1020. struct sk_buff *skb;
  1021. if (unlikely(bnapi->in_reset)) {
  1022. int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
  1023. if (rc < 0)
  1024. return ERR_PTR(-EBUSY);
  1025. return NULL;
  1026. }
  1027. tpa_info = &rxr->rx_tpa[agg_id];
  1028. data = tpa_info->data;
  1029. prefetch(data);
  1030. len = tpa_info->len;
  1031. mapping = tpa_info->mapping;
  1032. agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
  1033. RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
  1034. if (agg_bufs) {
  1035. if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
  1036. return ERR_PTR(-EBUSY);
  1037. *agg_event = true;
  1038. cp_cons = NEXT_CMP(cp_cons);
  1039. }
  1040. if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
  1041. bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
  1042. netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
  1043. agg_bufs, (int)MAX_SKB_FRAGS);
  1044. return NULL;
  1045. }
  1046. if (len <= bp->rx_copy_thresh) {
  1047. skb = bnxt_copy_skb(bnapi, data, len, mapping);
  1048. if (!skb) {
  1049. bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
  1050. return NULL;
  1051. }
  1052. } else {
  1053. u8 *new_data;
  1054. dma_addr_t new_mapping;
  1055. new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
  1056. if (!new_data) {
  1057. bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
  1058. return NULL;
  1059. }
  1060. tpa_info->data = new_data;
  1061. tpa_info->mapping = new_mapping;
  1062. skb = build_skb(data, 0);
  1063. dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
  1064. PCI_DMA_FROMDEVICE);
  1065. if (!skb) {
  1066. kfree(data);
  1067. bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
  1068. return NULL;
  1069. }
  1070. skb_reserve(skb, BNXT_RX_OFFSET);
  1071. skb_put(skb, len);
  1072. }
  1073. if (agg_bufs) {
  1074. skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
  1075. if (!skb) {
  1076. /* Page reuse already handled by bnxt_rx_pages(). */
  1077. return NULL;
  1078. }
  1079. }
  1080. skb->protocol = eth_type_trans(skb, bp->dev);
  1081. if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
  1082. skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
  1083. if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
  1084. (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
  1085. u16 vlan_proto = tpa_info->metadata >>
  1086. RX_CMP_FLAGS2_METADATA_TPID_SFT;
  1087. u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
  1088. __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
  1089. }
  1090. skb_checksum_none_assert(skb);
  1091. if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
  1092. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1093. skb->csum_level =
  1094. (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
  1095. }
  1096. if (TPA_END_GRO(tpa_end))
  1097. skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
  1098. return skb;
  1099. }
  1100. /* returns the following:
  1101. * 1 - 1 packet successfully received
  1102. * 0 - successful TPA_START, packet not completed yet
  1103. * -EBUSY - completion ring does not have all the agg buffers yet
  1104. * -ENOMEM - packet aborted due to out of memory
  1105. * -EIO - packet aborted due to hw error indicated in BD
  1106. */
  1107. static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
  1108. bool *agg_event)
  1109. {
  1110. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1111. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  1112. struct net_device *dev = bp->dev;
  1113. struct rx_cmp *rxcmp;
  1114. struct rx_cmp_ext *rxcmp1;
  1115. u32 tmp_raw_cons = *raw_cons;
  1116. u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
  1117. struct bnxt_sw_rx_bd *rx_buf;
  1118. unsigned int len;
  1119. u8 *data, agg_bufs, cmp_type;
  1120. dma_addr_t dma_addr;
  1121. struct sk_buff *skb;
  1122. int rc = 0;
  1123. rxcmp = (struct rx_cmp *)
  1124. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  1125. tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
  1126. cp_cons = RING_CMP(tmp_raw_cons);
  1127. rxcmp1 = (struct rx_cmp_ext *)
  1128. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  1129. if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
  1130. return -EBUSY;
  1131. cmp_type = RX_CMP_TYPE(rxcmp);
  1132. prod = rxr->rx_prod;
  1133. if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
  1134. bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
  1135. (struct rx_tpa_start_cmp_ext *)rxcmp1);
  1136. goto next_rx_no_prod;
  1137. } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
  1138. skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
  1139. (struct rx_tpa_end_cmp *)rxcmp,
  1140. (struct rx_tpa_end_cmp_ext *)rxcmp1,
  1141. agg_event);
  1142. if (unlikely(IS_ERR(skb)))
  1143. return -EBUSY;
  1144. rc = -ENOMEM;
  1145. if (likely(skb)) {
  1146. skb_record_rx_queue(skb, bnapi->index);
  1147. skb_mark_napi_id(skb, &bnapi->napi);
  1148. if (bnxt_busy_polling(bnapi))
  1149. netif_receive_skb(skb);
  1150. else
  1151. napi_gro_receive(&bnapi->napi, skb);
  1152. rc = 1;
  1153. }
  1154. goto next_rx_no_prod;
  1155. }
  1156. cons = rxcmp->rx_cmp_opaque;
  1157. rx_buf = &rxr->rx_buf_ring[cons];
  1158. data = rx_buf->data;
  1159. if (unlikely(cons != rxr->rx_next_cons)) {
  1160. int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
  1161. bnxt_sched_reset(bp, rxr);
  1162. return rc1;
  1163. }
  1164. prefetch(data);
  1165. agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
  1166. RX_CMP_AGG_BUFS_SHIFT;
  1167. if (agg_bufs) {
  1168. if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
  1169. return -EBUSY;
  1170. cp_cons = NEXT_CMP(cp_cons);
  1171. *agg_event = true;
  1172. }
  1173. rx_buf->data = NULL;
  1174. if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
  1175. bnxt_reuse_rx_data(rxr, cons, data);
  1176. if (agg_bufs)
  1177. bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
  1178. rc = -EIO;
  1179. goto next_rx;
  1180. }
  1181. len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
  1182. dma_addr = dma_unmap_addr(rx_buf, mapping);
  1183. if (len <= bp->rx_copy_thresh) {
  1184. skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
  1185. bnxt_reuse_rx_data(rxr, cons, data);
  1186. if (!skb) {
  1187. rc = -ENOMEM;
  1188. goto next_rx;
  1189. }
  1190. } else {
  1191. skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
  1192. if (!skb) {
  1193. rc = -ENOMEM;
  1194. goto next_rx;
  1195. }
  1196. }
  1197. if (agg_bufs) {
  1198. skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
  1199. if (!skb) {
  1200. rc = -ENOMEM;
  1201. goto next_rx;
  1202. }
  1203. }
  1204. if (RX_CMP_HASH_VALID(rxcmp)) {
  1205. u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
  1206. enum pkt_hash_types type = PKT_HASH_TYPE_L4;
  1207. /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
  1208. if (hash_type != 1 && hash_type != 3)
  1209. type = PKT_HASH_TYPE_L3;
  1210. skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
  1211. }
  1212. skb->protocol = eth_type_trans(skb, dev);
  1213. if ((rxcmp1->rx_cmp_flags2 &
  1214. cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
  1215. (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
  1216. u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
  1217. u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
  1218. u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
  1219. __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
  1220. }
  1221. skb_checksum_none_assert(skb);
  1222. if (RX_CMP_L4_CS_OK(rxcmp1)) {
  1223. if (dev->features & NETIF_F_RXCSUM) {
  1224. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1225. skb->csum_level = RX_CMP_ENCAP(rxcmp1);
  1226. }
  1227. } else {
  1228. if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
  1229. if (dev->features & NETIF_F_RXCSUM)
  1230. cpr->rx_l4_csum_errors++;
  1231. }
  1232. }
  1233. skb_record_rx_queue(skb, bnapi->index);
  1234. skb_mark_napi_id(skb, &bnapi->napi);
  1235. if (bnxt_busy_polling(bnapi))
  1236. netif_receive_skb(skb);
  1237. else
  1238. napi_gro_receive(&bnapi->napi, skb);
  1239. rc = 1;
  1240. next_rx:
  1241. rxr->rx_prod = NEXT_RX(prod);
  1242. rxr->rx_next_cons = NEXT_RX(cons);
  1243. next_rx_no_prod:
  1244. *raw_cons = tmp_raw_cons;
  1245. return rc;
  1246. }
  1247. #define BNXT_GET_EVENT_PORT(data) \
  1248. ((data) & \
  1249. HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
  1250. static int bnxt_async_event_process(struct bnxt *bp,
  1251. struct hwrm_async_event_cmpl *cmpl)
  1252. {
  1253. u16 event_id = le16_to_cpu(cmpl->event_id);
  1254. /* TODO CHIMP_FW: Define event id's for link change, error etc */
  1255. switch (event_id) {
  1256. case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
  1257. u32 data1 = le32_to_cpu(cmpl->event_data1);
  1258. struct bnxt_link_info *link_info = &bp->link_info;
  1259. if (BNXT_VF(bp))
  1260. goto async_event_process_exit;
  1261. /* print unsupported speed warning in forced speed mode only */
  1262. if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
  1263. (data1 & 0x20000)) {
  1264. u16 fw_speed = link_info->force_link_speed;
  1265. u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
  1266. if (speed != SPEED_UNKNOWN)
  1267. netdev_warn(bp->dev, "Link speed %d no longer supported\n",
  1268. speed);
  1269. }
  1270. set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
  1271. /* fall thru */
  1272. }
  1273. case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
  1274. set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
  1275. break;
  1276. case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
  1277. set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
  1278. break;
  1279. case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
  1280. u32 data1 = le32_to_cpu(cmpl->event_data1);
  1281. u16 port_id = BNXT_GET_EVENT_PORT(data1);
  1282. if (BNXT_VF(bp))
  1283. break;
  1284. if (bp->pf.port_id != port_id)
  1285. break;
  1286. set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
  1287. break;
  1288. }
  1289. case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
  1290. if (BNXT_PF(bp))
  1291. goto async_event_process_exit;
  1292. set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
  1293. break;
  1294. default:
  1295. netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
  1296. event_id);
  1297. goto async_event_process_exit;
  1298. }
  1299. schedule_work(&bp->sp_task);
  1300. async_event_process_exit:
  1301. return 0;
  1302. }
  1303. static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
  1304. {
  1305. u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
  1306. struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
  1307. struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
  1308. (struct hwrm_fwd_req_cmpl *)txcmp;
  1309. switch (cmpl_type) {
  1310. case CMPL_BASE_TYPE_HWRM_DONE:
  1311. seq_id = le16_to_cpu(h_cmpl->sequence_id);
  1312. if (seq_id == bp->hwrm_intr_seq_id)
  1313. bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
  1314. else
  1315. netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
  1316. break;
  1317. case CMPL_BASE_TYPE_HWRM_FWD_REQ:
  1318. vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
  1319. if ((vf_id < bp->pf.first_vf_id) ||
  1320. (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
  1321. netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
  1322. vf_id);
  1323. return -EINVAL;
  1324. }
  1325. set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
  1326. set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
  1327. schedule_work(&bp->sp_task);
  1328. break;
  1329. case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
  1330. bnxt_async_event_process(bp,
  1331. (struct hwrm_async_event_cmpl *)txcmp);
  1332. default:
  1333. break;
  1334. }
  1335. return 0;
  1336. }
  1337. static irqreturn_t bnxt_msix(int irq, void *dev_instance)
  1338. {
  1339. struct bnxt_napi *bnapi = dev_instance;
  1340. struct bnxt *bp = bnapi->bp;
  1341. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1342. u32 cons = RING_CMP(cpr->cp_raw_cons);
  1343. prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
  1344. napi_schedule(&bnapi->napi);
  1345. return IRQ_HANDLED;
  1346. }
  1347. static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
  1348. {
  1349. u32 raw_cons = cpr->cp_raw_cons;
  1350. u16 cons = RING_CMP(raw_cons);
  1351. struct tx_cmp *txcmp;
  1352. txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
  1353. return TX_CMP_VALID(txcmp, raw_cons);
  1354. }
  1355. static irqreturn_t bnxt_inta(int irq, void *dev_instance)
  1356. {
  1357. struct bnxt_napi *bnapi = dev_instance;
  1358. struct bnxt *bp = bnapi->bp;
  1359. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1360. u32 cons = RING_CMP(cpr->cp_raw_cons);
  1361. u32 int_status;
  1362. prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
  1363. if (!bnxt_has_work(bp, cpr)) {
  1364. int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
  1365. /* return if erroneous interrupt */
  1366. if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
  1367. return IRQ_NONE;
  1368. }
  1369. /* disable ring IRQ */
  1370. BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
  1371. /* Return here if interrupt is shared and is disabled. */
  1372. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  1373. return IRQ_HANDLED;
  1374. napi_schedule(&bnapi->napi);
  1375. return IRQ_HANDLED;
  1376. }
  1377. static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
  1378. {
  1379. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1380. u32 raw_cons = cpr->cp_raw_cons;
  1381. u32 cons;
  1382. int tx_pkts = 0;
  1383. int rx_pkts = 0;
  1384. bool rx_event = false;
  1385. bool agg_event = false;
  1386. struct tx_cmp *txcmp;
  1387. while (1) {
  1388. int rc;
  1389. cons = RING_CMP(raw_cons);
  1390. txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
  1391. if (!TX_CMP_VALID(txcmp, raw_cons))
  1392. break;
  1393. /* The valid test of the entry must be done first before
  1394. * reading any further.
  1395. */
  1396. dma_rmb();
  1397. if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
  1398. tx_pkts++;
  1399. /* return full budget so NAPI will complete. */
  1400. if (unlikely(tx_pkts > bp->tx_wake_thresh))
  1401. rx_pkts = budget;
  1402. } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
  1403. rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
  1404. if (likely(rc >= 0))
  1405. rx_pkts += rc;
  1406. else if (rc == -EBUSY) /* partial completion */
  1407. break;
  1408. rx_event = true;
  1409. } else if (unlikely((TX_CMP_TYPE(txcmp) ==
  1410. CMPL_BASE_TYPE_HWRM_DONE) ||
  1411. (TX_CMP_TYPE(txcmp) ==
  1412. CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
  1413. (TX_CMP_TYPE(txcmp) ==
  1414. CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
  1415. bnxt_hwrm_handler(bp, txcmp);
  1416. }
  1417. raw_cons = NEXT_RAW_CMP(raw_cons);
  1418. if (rx_pkts == budget)
  1419. break;
  1420. }
  1421. cpr->cp_raw_cons = raw_cons;
  1422. /* ACK completion ring before freeing tx ring and producing new
  1423. * buffers in rx/agg rings to prevent overflowing the completion
  1424. * ring.
  1425. */
  1426. BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
  1427. if (tx_pkts)
  1428. bnxt_tx_int(bp, bnapi, tx_pkts);
  1429. if (rx_event) {
  1430. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  1431. writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
  1432. writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
  1433. if (agg_event) {
  1434. writel(DB_KEY_RX | rxr->rx_agg_prod,
  1435. rxr->rx_agg_doorbell);
  1436. writel(DB_KEY_RX | rxr->rx_agg_prod,
  1437. rxr->rx_agg_doorbell);
  1438. }
  1439. }
  1440. return rx_pkts;
  1441. }
  1442. static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
  1443. {
  1444. struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
  1445. struct bnxt *bp = bnapi->bp;
  1446. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1447. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  1448. struct tx_cmp *txcmp;
  1449. struct rx_cmp_ext *rxcmp1;
  1450. u32 cp_cons, tmp_raw_cons;
  1451. u32 raw_cons = cpr->cp_raw_cons;
  1452. u32 rx_pkts = 0;
  1453. bool agg_event = false;
  1454. while (1) {
  1455. int rc;
  1456. cp_cons = RING_CMP(raw_cons);
  1457. txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  1458. if (!TX_CMP_VALID(txcmp, raw_cons))
  1459. break;
  1460. if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
  1461. tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
  1462. cp_cons = RING_CMP(tmp_raw_cons);
  1463. rxcmp1 = (struct rx_cmp_ext *)
  1464. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  1465. if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
  1466. break;
  1467. /* force an error to recycle the buffer */
  1468. rxcmp1->rx_cmp_cfa_code_errors_v2 |=
  1469. cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
  1470. rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
  1471. if (likely(rc == -EIO))
  1472. rx_pkts++;
  1473. else if (rc == -EBUSY) /* partial completion */
  1474. break;
  1475. } else if (unlikely(TX_CMP_TYPE(txcmp) ==
  1476. CMPL_BASE_TYPE_HWRM_DONE)) {
  1477. bnxt_hwrm_handler(bp, txcmp);
  1478. } else {
  1479. netdev_err(bp->dev,
  1480. "Invalid completion received on special ring\n");
  1481. }
  1482. raw_cons = NEXT_RAW_CMP(raw_cons);
  1483. if (rx_pkts == budget)
  1484. break;
  1485. }
  1486. cpr->cp_raw_cons = raw_cons;
  1487. BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
  1488. writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
  1489. writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
  1490. if (agg_event) {
  1491. writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
  1492. writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
  1493. }
  1494. if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
  1495. napi_complete(napi);
  1496. BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
  1497. }
  1498. return rx_pkts;
  1499. }
  1500. static int bnxt_poll(struct napi_struct *napi, int budget)
  1501. {
  1502. struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
  1503. struct bnxt *bp = bnapi->bp;
  1504. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1505. int work_done = 0;
  1506. if (!bnxt_lock_napi(bnapi))
  1507. return budget;
  1508. while (1) {
  1509. work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
  1510. if (work_done >= budget)
  1511. break;
  1512. if (!bnxt_has_work(bp, cpr)) {
  1513. napi_complete(napi);
  1514. BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
  1515. break;
  1516. }
  1517. }
  1518. mmiowb();
  1519. bnxt_unlock_napi(bnapi);
  1520. return work_done;
  1521. }
  1522. #ifdef CONFIG_NET_RX_BUSY_POLL
  1523. static int bnxt_busy_poll(struct napi_struct *napi)
  1524. {
  1525. struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
  1526. struct bnxt *bp = bnapi->bp;
  1527. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1528. int rx_work, budget = 4;
  1529. if (atomic_read(&bp->intr_sem) != 0)
  1530. return LL_FLUSH_FAILED;
  1531. if (!bp->link_info.link_up)
  1532. return LL_FLUSH_FAILED;
  1533. if (!bnxt_lock_poll(bnapi))
  1534. return LL_FLUSH_BUSY;
  1535. rx_work = bnxt_poll_work(bp, bnapi, budget);
  1536. BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
  1537. bnxt_unlock_poll(bnapi);
  1538. return rx_work;
  1539. }
  1540. #endif
  1541. static void bnxt_free_tx_skbs(struct bnxt *bp)
  1542. {
  1543. int i, max_idx;
  1544. struct pci_dev *pdev = bp->pdev;
  1545. if (!bp->tx_ring)
  1546. return;
  1547. max_idx = bp->tx_nr_pages * TX_DESC_CNT;
  1548. for (i = 0; i < bp->tx_nr_rings; i++) {
  1549. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  1550. int j;
  1551. for (j = 0; j < max_idx;) {
  1552. struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
  1553. struct sk_buff *skb = tx_buf->skb;
  1554. int k, last;
  1555. if (!skb) {
  1556. j++;
  1557. continue;
  1558. }
  1559. tx_buf->skb = NULL;
  1560. if (tx_buf->is_push) {
  1561. dev_kfree_skb(skb);
  1562. j += 2;
  1563. continue;
  1564. }
  1565. dma_unmap_single(&pdev->dev,
  1566. dma_unmap_addr(tx_buf, mapping),
  1567. skb_headlen(skb),
  1568. PCI_DMA_TODEVICE);
  1569. last = tx_buf->nr_frags;
  1570. j += 2;
  1571. for (k = 0; k < last; k++, j++) {
  1572. int ring_idx = j & bp->tx_ring_mask;
  1573. skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
  1574. tx_buf = &txr->tx_buf_ring[ring_idx];
  1575. dma_unmap_page(
  1576. &pdev->dev,
  1577. dma_unmap_addr(tx_buf, mapping),
  1578. skb_frag_size(frag), PCI_DMA_TODEVICE);
  1579. }
  1580. dev_kfree_skb(skb);
  1581. }
  1582. netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
  1583. }
  1584. }
  1585. static void bnxt_free_rx_skbs(struct bnxt *bp)
  1586. {
  1587. int i, max_idx, max_agg_idx;
  1588. struct pci_dev *pdev = bp->pdev;
  1589. if (!bp->rx_ring)
  1590. return;
  1591. max_idx = bp->rx_nr_pages * RX_DESC_CNT;
  1592. max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
  1593. for (i = 0; i < bp->rx_nr_rings; i++) {
  1594. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  1595. int j;
  1596. if (rxr->rx_tpa) {
  1597. for (j = 0; j < MAX_TPA; j++) {
  1598. struct bnxt_tpa_info *tpa_info =
  1599. &rxr->rx_tpa[j];
  1600. u8 *data = tpa_info->data;
  1601. if (!data)
  1602. continue;
  1603. dma_unmap_single(
  1604. &pdev->dev,
  1605. dma_unmap_addr(tpa_info, mapping),
  1606. bp->rx_buf_use_size,
  1607. PCI_DMA_FROMDEVICE);
  1608. tpa_info->data = NULL;
  1609. kfree(data);
  1610. }
  1611. }
  1612. for (j = 0; j < max_idx; j++) {
  1613. struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
  1614. u8 *data = rx_buf->data;
  1615. if (!data)
  1616. continue;
  1617. dma_unmap_single(&pdev->dev,
  1618. dma_unmap_addr(rx_buf, mapping),
  1619. bp->rx_buf_use_size,
  1620. PCI_DMA_FROMDEVICE);
  1621. rx_buf->data = NULL;
  1622. kfree(data);
  1623. }
  1624. for (j = 0; j < max_agg_idx; j++) {
  1625. struct bnxt_sw_rx_agg_bd *rx_agg_buf =
  1626. &rxr->rx_agg_ring[j];
  1627. struct page *page = rx_agg_buf->page;
  1628. if (!page)
  1629. continue;
  1630. dma_unmap_page(&pdev->dev,
  1631. dma_unmap_addr(rx_agg_buf, mapping),
  1632. BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
  1633. rx_agg_buf->page = NULL;
  1634. __clear_bit(j, rxr->rx_agg_bmap);
  1635. __free_page(page);
  1636. }
  1637. if (rxr->rx_page) {
  1638. __free_page(rxr->rx_page);
  1639. rxr->rx_page = NULL;
  1640. }
  1641. }
  1642. }
  1643. static void bnxt_free_skbs(struct bnxt *bp)
  1644. {
  1645. bnxt_free_tx_skbs(bp);
  1646. bnxt_free_rx_skbs(bp);
  1647. }
  1648. static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
  1649. {
  1650. struct pci_dev *pdev = bp->pdev;
  1651. int i;
  1652. for (i = 0; i < ring->nr_pages; i++) {
  1653. if (!ring->pg_arr[i])
  1654. continue;
  1655. dma_free_coherent(&pdev->dev, ring->page_size,
  1656. ring->pg_arr[i], ring->dma_arr[i]);
  1657. ring->pg_arr[i] = NULL;
  1658. }
  1659. if (ring->pg_tbl) {
  1660. dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
  1661. ring->pg_tbl, ring->pg_tbl_map);
  1662. ring->pg_tbl = NULL;
  1663. }
  1664. if (ring->vmem_size && *ring->vmem) {
  1665. vfree(*ring->vmem);
  1666. *ring->vmem = NULL;
  1667. }
  1668. }
  1669. static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
  1670. {
  1671. int i;
  1672. struct pci_dev *pdev = bp->pdev;
  1673. if (ring->nr_pages > 1) {
  1674. ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
  1675. ring->nr_pages * 8,
  1676. &ring->pg_tbl_map,
  1677. GFP_KERNEL);
  1678. if (!ring->pg_tbl)
  1679. return -ENOMEM;
  1680. }
  1681. for (i = 0; i < ring->nr_pages; i++) {
  1682. ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
  1683. ring->page_size,
  1684. &ring->dma_arr[i],
  1685. GFP_KERNEL);
  1686. if (!ring->pg_arr[i])
  1687. return -ENOMEM;
  1688. if (ring->nr_pages > 1)
  1689. ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
  1690. }
  1691. if (ring->vmem_size) {
  1692. *ring->vmem = vzalloc(ring->vmem_size);
  1693. if (!(*ring->vmem))
  1694. return -ENOMEM;
  1695. }
  1696. return 0;
  1697. }
  1698. static void bnxt_free_rx_rings(struct bnxt *bp)
  1699. {
  1700. int i;
  1701. if (!bp->rx_ring)
  1702. return;
  1703. for (i = 0; i < bp->rx_nr_rings; i++) {
  1704. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  1705. struct bnxt_ring_struct *ring;
  1706. kfree(rxr->rx_tpa);
  1707. rxr->rx_tpa = NULL;
  1708. kfree(rxr->rx_agg_bmap);
  1709. rxr->rx_agg_bmap = NULL;
  1710. ring = &rxr->rx_ring_struct;
  1711. bnxt_free_ring(bp, ring);
  1712. ring = &rxr->rx_agg_ring_struct;
  1713. bnxt_free_ring(bp, ring);
  1714. }
  1715. }
  1716. static int bnxt_alloc_rx_rings(struct bnxt *bp)
  1717. {
  1718. int i, rc, agg_rings = 0, tpa_rings = 0;
  1719. if (!bp->rx_ring)
  1720. return -ENOMEM;
  1721. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  1722. agg_rings = 1;
  1723. if (bp->flags & BNXT_FLAG_TPA)
  1724. tpa_rings = 1;
  1725. for (i = 0; i < bp->rx_nr_rings; i++) {
  1726. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  1727. struct bnxt_ring_struct *ring;
  1728. ring = &rxr->rx_ring_struct;
  1729. rc = bnxt_alloc_ring(bp, ring);
  1730. if (rc)
  1731. return rc;
  1732. if (agg_rings) {
  1733. u16 mem_size;
  1734. ring = &rxr->rx_agg_ring_struct;
  1735. rc = bnxt_alloc_ring(bp, ring);
  1736. if (rc)
  1737. return rc;
  1738. rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
  1739. mem_size = rxr->rx_agg_bmap_size / 8;
  1740. rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
  1741. if (!rxr->rx_agg_bmap)
  1742. return -ENOMEM;
  1743. if (tpa_rings) {
  1744. rxr->rx_tpa = kcalloc(MAX_TPA,
  1745. sizeof(struct bnxt_tpa_info),
  1746. GFP_KERNEL);
  1747. if (!rxr->rx_tpa)
  1748. return -ENOMEM;
  1749. }
  1750. }
  1751. }
  1752. return 0;
  1753. }
  1754. static void bnxt_free_tx_rings(struct bnxt *bp)
  1755. {
  1756. int i;
  1757. struct pci_dev *pdev = bp->pdev;
  1758. if (!bp->tx_ring)
  1759. return;
  1760. for (i = 0; i < bp->tx_nr_rings; i++) {
  1761. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  1762. struct bnxt_ring_struct *ring;
  1763. if (txr->tx_push) {
  1764. dma_free_coherent(&pdev->dev, bp->tx_push_size,
  1765. txr->tx_push, txr->tx_push_mapping);
  1766. txr->tx_push = NULL;
  1767. }
  1768. ring = &txr->tx_ring_struct;
  1769. bnxt_free_ring(bp, ring);
  1770. }
  1771. }
  1772. static int bnxt_alloc_tx_rings(struct bnxt *bp)
  1773. {
  1774. int i, j, rc;
  1775. struct pci_dev *pdev = bp->pdev;
  1776. bp->tx_push_size = 0;
  1777. if (bp->tx_push_thresh) {
  1778. int push_size;
  1779. push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
  1780. bp->tx_push_thresh);
  1781. if (push_size > 256) {
  1782. push_size = 0;
  1783. bp->tx_push_thresh = 0;
  1784. }
  1785. bp->tx_push_size = push_size;
  1786. }
  1787. for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
  1788. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  1789. struct bnxt_ring_struct *ring;
  1790. ring = &txr->tx_ring_struct;
  1791. rc = bnxt_alloc_ring(bp, ring);
  1792. if (rc)
  1793. return rc;
  1794. if (bp->tx_push_size) {
  1795. dma_addr_t mapping;
  1796. /* One pre-allocated DMA buffer to backup
  1797. * TX push operation
  1798. */
  1799. txr->tx_push = dma_alloc_coherent(&pdev->dev,
  1800. bp->tx_push_size,
  1801. &txr->tx_push_mapping,
  1802. GFP_KERNEL);
  1803. if (!txr->tx_push)
  1804. return -ENOMEM;
  1805. mapping = txr->tx_push_mapping +
  1806. sizeof(struct tx_push_bd);
  1807. txr->data_mapping = cpu_to_le64(mapping);
  1808. memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
  1809. }
  1810. ring->queue_id = bp->q_info[j].queue_id;
  1811. if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
  1812. j++;
  1813. }
  1814. return 0;
  1815. }
  1816. static void bnxt_free_cp_rings(struct bnxt *bp)
  1817. {
  1818. int i;
  1819. if (!bp->bnapi)
  1820. return;
  1821. for (i = 0; i < bp->cp_nr_rings; i++) {
  1822. struct bnxt_napi *bnapi = bp->bnapi[i];
  1823. struct bnxt_cp_ring_info *cpr;
  1824. struct bnxt_ring_struct *ring;
  1825. if (!bnapi)
  1826. continue;
  1827. cpr = &bnapi->cp_ring;
  1828. ring = &cpr->cp_ring_struct;
  1829. bnxt_free_ring(bp, ring);
  1830. }
  1831. }
  1832. static int bnxt_alloc_cp_rings(struct bnxt *bp)
  1833. {
  1834. int i, rc;
  1835. for (i = 0; i < bp->cp_nr_rings; i++) {
  1836. struct bnxt_napi *bnapi = bp->bnapi[i];
  1837. struct bnxt_cp_ring_info *cpr;
  1838. struct bnxt_ring_struct *ring;
  1839. if (!bnapi)
  1840. continue;
  1841. cpr = &bnapi->cp_ring;
  1842. ring = &cpr->cp_ring_struct;
  1843. rc = bnxt_alloc_ring(bp, ring);
  1844. if (rc)
  1845. return rc;
  1846. }
  1847. return 0;
  1848. }
  1849. static void bnxt_init_ring_struct(struct bnxt *bp)
  1850. {
  1851. int i;
  1852. for (i = 0; i < bp->cp_nr_rings; i++) {
  1853. struct bnxt_napi *bnapi = bp->bnapi[i];
  1854. struct bnxt_cp_ring_info *cpr;
  1855. struct bnxt_rx_ring_info *rxr;
  1856. struct bnxt_tx_ring_info *txr;
  1857. struct bnxt_ring_struct *ring;
  1858. if (!bnapi)
  1859. continue;
  1860. cpr = &bnapi->cp_ring;
  1861. ring = &cpr->cp_ring_struct;
  1862. ring->nr_pages = bp->cp_nr_pages;
  1863. ring->page_size = HW_CMPD_RING_SIZE;
  1864. ring->pg_arr = (void **)cpr->cp_desc_ring;
  1865. ring->dma_arr = cpr->cp_desc_mapping;
  1866. ring->vmem_size = 0;
  1867. rxr = bnapi->rx_ring;
  1868. if (!rxr)
  1869. goto skip_rx;
  1870. ring = &rxr->rx_ring_struct;
  1871. ring->nr_pages = bp->rx_nr_pages;
  1872. ring->page_size = HW_RXBD_RING_SIZE;
  1873. ring->pg_arr = (void **)rxr->rx_desc_ring;
  1874. ring->dma_arr = rxr->rx_desc_mapping;
  1875. ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
  1876. ring->vmem = (void **)&rxr->rx_buf_ring;
  1877. ring = &rxr->rx_agg_ring_struct;
  1878. ring->nr_pages = bp->rx_agg_nr_pages;
  1879. ring->page_size = HW_RXBD_RING_SIZE;
  1880. ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
  1881. ring->dma_arr = rxr->rx_agg_desc_mapping;
  1882. ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
  1883. ring->vmem = (void **)&rxr->rx_agg_ring;
  1884. skip_rx:
  1885. txr = bnapi->tx_ring;
  1886. if (!txr)
  1887. continue;
  1888. ring = &txr->tx_ring_struct;
  1889. ring->nr_pages = bp->tx_nr_pages;
  1890. ring->page_size = HW_RXBD_RING_SIZE;
  1891. ring->pg_arr = (void **)txr->tx_desc_ring;
  1892. ring->dma_arr = txr->tx_desc_mapping;
  1893. ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
  1894. ring->vmem = (void **)&txr->tx_buf_ring;
  1895. }
  1896. }
  1897. static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
  1898. {
  1899. int i;
  1900. u32 prod;
  1901. struct rx_bd **rx_buf_ring;
  1902. rx_buf_ring = (struct rx_bd **)ring->pg_arr;
  1903. for (i = 0, prod = 0; i < ring->nr_pages; i++) {
  1904. int j;
  1905. struct rx_bd *rxbd;
  1906. rxbd = rx_buf_ring[i];
  1907. if (!rxbd)
  1908. continue;
  1909. for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
  1910. rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
  1911. rxbd->rx_bd_opaque = prod;
  1912. }
  1913. }
  1914. }
  1915. static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
  1916. {
  1917. struct net_device *dev = bp->dev;
  1918. struct bnxt_rx_ring_info *rxr;
  1919. struct bnxt_ring_struct *ring;
  1920. u32 prod, type;
  1921. int i;
  1922. type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
  1923. RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
  1924. if (NET_IP_ALIGN == 2)
  1925. type |= RX_BD_FLAGS_SOP;
  1926. rxr = &bp->rx_ring[ring_nr];
  1927. ring = &rxr->rx_ring_struct;
  1928. bnxt_init_rxbd_pages(ring, type);
  1929. prod = rxr->rx_prod;
  1930. for (i = 0; i < bp->rx_ring_size; i++) {
  1931. if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
  1932. netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
  1933. ring_nr, i, bp->rx_ring_size);
  1934. break;
  1935. }
  1936. prod = NEXT_RX(prod);
  1937. }
  1938. rxr->rx_prod = prod;
  1939. ring->fw_ring_id = INVALID_HW_RING_ID;
  1940. ring = &rxr->rx_agg_ring_struct;
  1941. ring->fw_ring_id = INVALID_HW_RING_ID;
  1942. if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
  1943. return 0;
  1944. type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
  1945. RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
  1946. bnxt_init_rxbd_pages(ring, type);
  1947. prod = rxr->rx_agg_prod;
  1948. for (i = 0; i < bp->rx_agg_ring_size; i++) {
  1949. if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
  1950. netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
  1951. ring_nr, i, bp->rx_ring_size);
  1952. break;
  1953. }
  1954. prod = NEXT_RX_AGG(prod);
  1955. }
  1956. rxr->rx_agg_prod = prod;
  1957. if (bp->flags & BNXT_FLAG_TPA) {
  1958. if (rxr->rx_tpa) {
  1959. u8 *data;
  1960. dma_addr_t mapping;
  1961. for (i = 0; i < MAX_TPA; i++) {
  1962. data = __bnxt_alloc_rx_data(bp, &mapping,
  1963. GFP_KERNEL);
  1964. if (!data)
  1965. return -ENOMEM;
  1966. rxr->rx_tpa[i].data = data;
  1967. rxr->rx_tpa[i].mapping = mapping;
  1968. }
  1969. } else {
  1970. netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
  1971. return -ENOMEM;
  1972. }
  1973. }
  1974. return 0;
  1975. }
  1976. static void bnxt_init_cp_rings(struct bnxt *bp)
  1977. {
  1978. int i;
  1979. for (i = 0; i < bp->cp_nr_rings; i++) {
  1980. struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
  1981. struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
  1982. ring->fw_ring_id = INVALID_HW_RING_ID;
  1983. }
  1984. }
  1985. static int bnxt_init_rx_rings(struct bnxt *bp)
  1986. {
  1987. int i, rc = 0;
  1988. for (i = 0; i < bp->rx_nr_rings; i++) {
  1989. rc = bnxt_init_one_rx_ring(bp, i);
  1990. if (rc)
  1991. break;
  1992. }
  1993. return rc;
  1994. }
  1995. static int bnxt_init_tx_rings(struct bnxt *bp)
  1996. {
  1997. u16 i;
  1998. bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
  1999. MAX_SKB_FRAGS + 1);
  2000. for (i = 0; i < bp->tx_nr_rings; i++) {
  2001. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  2002. struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
  2003. ring->fw_ring_id = INVALID_HW_RING_ID;
  2004. }
  2005. return 0;
  2006. }
  2007. static void bnxt_free_ring_grps(struct bnxt *bp)
  2008. {
  2009. kfree(bp->grp_info);
  2010. bp->grp_info = NULL;
  2011. }
  2012. static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
  2013. {
  2014. int i;
  2015. if (irq_re_init) {
  2016. bp->grp_info = kcalloc(bp->cp_nr_rings,
  2017. sizeof(struct bnxt_ring_grp_info),
  2018. GFP_KERNEL);
  2019. if (!bp->grp_info)
  2020. return -ENOMEM;
  2021. }
  2022. for (i = 0; i < bp->cp_nr_rings; i++) {
  2023. if (irq_re_init)
  2024. bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
  2025. bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
  2026. bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
  2027. bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
  2028. bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
  2029. }
  2030. return 0;
  2031. }
  2032. static void bnxt_free_vnics(struct bnxt *bp)
  2033. {
  2034. kfree(bp->vnic_info);
  2035. bp->vnic_info = NULL;
  2036. bp->nr_vnics = 0;
  2037. }
  2038. static int bnxt_alloc_vnics(struct bnxt *bp)
  2039. {
  2040. int num_vnics = 1;
  2041. #ifdef CONFIG_RFS_ACCEL
  2042. if (bp->flags & BNXT_FLAG_RFS)
  2043. num_vnics += bp->rx_nr_rings;
  2044. #endif
  2045. if (BNXT_CHIP_TYPE_NITRO_A0(bp))
  2046. num_vnics++;
  2047. bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
  2048. GFP_KERNEL);
  2049. if (!bp->vnic_info)
  2050. return -ENOMEM;
  2051. bp->nr_vnics = num_vnics;
  2052. return 0;
  2053. }
  2054. static void bnxt_init_vnics(struct bnxt *bp)
  2055. {
  2056. int i;
  2057. for (i = 0; i < bp->nr_vnics; i++) {
  2058. struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
  2059. vnic->fw_vnic_id = INVALID_HW_RING_ID;
  2060. vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
  2061. vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
  2062. vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
  2063. if (bp->vnic_info[i].rss_hash_key) {
  2064. if (i == 0)
  2065. prandom_bytes(vnic->rss_hash_key,
  2066. HW_HASH_KEY_SIZE);
  2067. else
  2068. memcpy(vnic->rss_hash_key,
  2069. bp->vnic_info[0].rss_hash_key,
  2070. HW_HASH_KEY_SIZE);
  2071. }
  2072. }
  2073. }
  2074. static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
  2075. {
  2076. int pages;
  2077. pages = ring_size / desc_per_pg;
  2078. if (!pages)
  2079. return 1;
  2080. pages++;
  2081. while (pages & (pages - 1))
  2082. pages++;
  2083. return pages;
  2084. }
  2085. static void bnxt_set_tpa_flags(struct bnxt *bp)
  2086. {
  2087. bp->flags &= ~BNXT_FLAG_TPA;
  2088. if (bp->dev->features & NETIF_F_LRO)
  2089. bp->flags |= BNXT_FLAG_LRO;
  2090. if (bp->dev->features & NETIF_F_GRO)
  2091. bp->flags |= BNXT_FLAG_GRO;
  2092. }
  2093. /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
  2094. * be set on entry.
  2095. */
  2096. void bnxt_set_ring_params(struct bnxt *bp)
  2097. {
  2098. u32 ring_size, rx_size, rx_space;
  2099. u32 agg_factor = 0, agg_ring_size = 0;
  2100. /* 8 for CRC and VLAN */
  2101. rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
  2102. rx_space = rx_size + NET_SKB_PAD +
  2103. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  2104. bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
  2105. ring_size = bp->rx_ring_size;
  2106. bp->rx_agg_ring_size = 0;
  2107. bp->rx_agg_nr_pages = 0;
  2108. if (bp->flags & BNXT_FLAG_TPA)
  2109. agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
  2110. bp->flags &= ~BNXT_FLAG_JUMBO;
  2111. if (rx_space > PAGE_SIZE) {
  2112. u32 jumbo_factor;
  2113. bp->flags |= BNXT_FLAG_JUMBO;
  2114. jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
  2115. if (jumbo_factor > agg_factor)
  2116. agg_factor = jumbo_factor;
  2117. }
  2118. agg_ring_size = ring_size * agg_factor;
  2119. if (agg_ring_size) {
  2120. bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
  2121. RX_DESC_CNT);
  2122. if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
  2123. u32 tmp = agg_ring_size;
  2124. bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
  2125. agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
  2126. netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
  2127. tmp, agg_ring_size);
  2128. }
  2129. bp->rx_agg_ring_size = agg_ring_size;
  2130. bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
  2131. rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
  2132. rx_space = rx_size + NET_SKB_PAD +
  2133. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  2134. }
  2135. bp->rx_buf_use_size = rx_size;
  2136. bp->rx_buf_size = rx_space;
  2137. bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
  2138. bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
  2139. ring_size = bp->tx_ring_size;
  2140. bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
  2141. bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
  2142. ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
  2143. bp->cp_ring_size = ring_size;
  2144. bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
  2145. if (bp->cp_nr_pages > MAX_CP_PAGES) {
  2146. bp->cp_nr_pages = MAX_CP_PAGES;
  2147. bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
  2148. netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
  2149. ring_size, bp->cp_ring_size);
  2150. }
  2151. bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
  2152. bp->cp_ring_mask = bp->cp_bit - 1;
  2153. }
  2154. static void bnxt_free_vnic_attributes(struct bnxt *bp)
  2155. {
  2156. int i;
  2157. struct bnxt_vnic_info *vnic;
  2158. struct pci_dev *pdev = bp->pdev;
  2159. if (!bp->vnic_info)
  2160. return;
  2161. for (i = 0; i < bp->nr_vnics; i++) {
  2162. vnic = &bp->vnic_info[i];
  2163. kfree(vnic->fw_grp_ids);
  2164. vnic->fw_grp_ids = NULL;
  2165. kfree(vnic->uc_list);
  2166. vnic->uc_list = NULL;
  2167. if (vnic->mc_list) {
  2168. dma_free_coherent(&pdev->dev, vnic->mc_list_size,
  2169. vnic->mc_list, vnic->mc_list_mapping);
  2170. vnic->mc_list = NULL;
  2171. }
  2172. if (vnic->rss_table) {
  2173. dma_free_coherent(&pdev->dev, PAGE_SIZE,
  2174. vnic->rss_table,
  2175. vnic->rss_table_dma_addr);
  2176. vnic->rss_table = NULL;
  2177. }
  2178. vnic->rss_hash_key = NULL;
  2179. vnic->flags = 0;
  2180. }
  2181. }
  2182. static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
  2183. {
  2184. int i, rc = 0, size;
  2185. struct bnxt_vnic_info *vnic;
  2186. struct pci_dev *pdev = bp->pdev;
  2187. int max_rings;
  2188. for (i = 0; i < bp->nr_vnics; i++) {
  2189. vnic = &bp->vnic_info[i];
  2190. if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
  2191. int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
  2192. if (mem_size > 0) {
  2193. vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
  2194. if (!vnic->uc_list) {
  2195. rc = -ENOMEM;
  2196. goto out;
  2197. }
  2198. }
  2199. }
  2200. if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
  2201. vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
  2202. vnic->mc_list =
  2203. dma_alloc_coherent(&pdev->dev,
  2204. vnic->mc_list_size,
  2205. &vnic->mc_list_mapping,
  2206. GFP_KERNEL);
  2207. if (!vnic->mc_list) {
  2208. rc = -ENOMEM;
  2209. goto out;
  2210. }
  2211. }
  2212. if (vnic->flags & BNXT_VNIC_RSS_FLAG)
  2213. max_rings = bp->rx_nr_rings;
  2214. else
  2215. max_rings = 1;
  2216. vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
  2217. if (!vnic->fw_grp_ids) {
  2218. rc = -ENOMEM;
  2219. goto out;
  2220. }
  2221. /* Allocate rss table and hash key */
  2222. vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
  2223. &vnic->rss_table_dma_addr,
  2224. GFP_KERNEL);
  2225. if (!vnic->rss_table) {
  2226. rc = -ENOMEM;
  2227. goto out;
  2228. }
  2229. size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
  2230. vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
  2231. vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
  2232. }
  2233. return 0;
  2234. out:
  2235. return rc;
  2236. }
  2237. static void bnxt_free_hwrm_resources(struct bnxt *bp)
  2238. {
  2239. struct pci_dev *pdev = bp->pdev;
  2240. dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
  2241. bp->hwrm_cmd_resp_dma_addr);
  2242. bp->hwrm_cmd_resp_addr = NULL;
  2243. if (bp->hwrm_dbg_resp_addr) {
  2244. dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
  2245. bp->hwrm_dbg_resp_addr,
  2246. bp->hwrm_dbg_resp_dma_addr);
  2247. bp->hwrm_dbg_resp_addr = NULL;
  2248. }
  2249. }
  2250. static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
  2251. {
  2252. struct pci_dev *pdev = bp->pdev;
  2253. bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
  2254. &bp->hwrm_cmd_resp_dma_addr,
  2255. GFP_KERNEL);
  2256. if (!bp->hwrm_cmd_resp_addr)
  2257. return -ENOMEM;
  2258. bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
  2259. HWRM_DBG_REG_BUF_SIZE,
  2260. &bp->hwrm_dbg_resp_dma_addr,
  2261. GFP_KERNEL);
  2262. if (!bp->hwrm_dbg_resp_addr)
  2263. netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
  2264. return 0;
  2265. }
  2266. static void bnxt_free_stats(struct bnxt *bp)
  2267. {
  2268. u32 size, i;
  2269. struct pci_dev *pdev = bp->pdev;
  2270. if (bp->hw_rx_port_stats) {
  2271. dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
  2272. bp->hw_rx_port_stats,
  2273. bp->hw_rx_port_stats_map);
  2274. bp->hw_rx_port_stats = NULL;
  2275. bp->flags &= ~BNXT_FLAG_PORT_STATS;
  2276. }
  2277. if (!bp->bnapi)
  2278. return;
  2279. size = sizeof(struct ctx_hw_stats);
  2280. for (i = 0; i < bp->cp_nr_rings; i++) {
  2281. struct bnxt_napi *bnapi = bp->bnapi[i];
  2282. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  2283. if (cpr->hw_stats) {
  2284. dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
  2285. cpr->hw_stats_map);
  2286. cpr->hw_stats = NULL;
  2287. }
  2288. }
  2289. }
  2290. static int bnxt_alloc_stats(struct bnxt *bp)
  2291. {
  2292. u32 size, i;
  2293. struct pci_dev *pdev = bp->pdev;
  2294. size = sizeof(struct ctx_hw_stats);
  2295. for (i = 0; i < bp->cp_nr_rings; i++) {
  2296. struct bnxt_napi *bnapi = bp->bnapi[i];
  2297. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  2298. cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
  2299. &cpr->hw_stats_map,
  2300. GFP_KERNEL);
  2301. if (!cpr->hw_stats)
  2302. return -ENOMEM;
  2303. cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
  2304. }
  2305. if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
  2306. bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
  2307. sizeof(struct tx_port_stats) + 1024;
  2308. bp->hw_rx_port_stats =
  2309. dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
  2310. &bp->hw_rx_port_stats_map,
  2311. GFP_KERNEL);
  2312. if (!bp->hw_rx_port_stats)
  2313. return -ENOMEM;
  2314. bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
  2315. 512;
  2316. bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
  2317. sizeof(struct rx_port_stats) + 512;
  2318. bp->flags |= BNXT_FLAG_PORT_STATS;
  2319. }
  2320. return 0;
  2321. }
  2322. static void bnxt_clear_ring_indices(struct bnxt *bp)
  2323. {
  2324. int i;
  2325. if (!bp->bnapi)
  2326. return;
  2327. for (i = 0; i < bp->cp_nr_rings; i++) {
  2328. struct bnxt_napi *bnapi = bp->bnapi[i];
  2329. struct bnxt_cp_ring_info *cpr;
  2330. struct bnxt_rx_ring_info *rxr;
  2331. struct bnxt_tx_ring_info *txr;
  2332. if (!bnapi)
  2333. continue;
  2334. cpr = &bnapi->cp_ring;
  2335. cpr->cp_raw_cons = 0;
  2336. txr = bnapi->tx_ring;
  2337. if (txr) {
  2338. txr->tx_prod = 0;
  2339. txr->tx_cons = 0;
  2340. }
  2341. rxr = bnapi->rx_ring;
  2342. if (rxr) {
  2343. rxr->rx_prod = 0;
  2344. rxr->rx_agg_prod = 0;
  2345. rxr->rx_sw_agg_prod = 0;
  2346. rxr->rx_next_cons = 0;
  2347. }
  2348. }
  2349. }
  2350. static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
  2351. {
  2352. #ifdef CONFIG_RFS_ACCEL
  2353. int i;
  2354. /* Under rtnl_lock and all our NAPIs have been disabled. It's
  2355. * safe to delete the hash table.
  2356. */
  2357. for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
  2358. struct hlist_head *head;
  2359. struct hlist_node *tmp;
  2360. struct bnxt_ntuple_filter *fltr;
  2361. head = &bp->ntp_fltr_hash_tbl[i];
  2362. hlist_for_each_entry_safe(fltr, tmp, head, hash) {
  2363. hlist_del(&fltr->hash);
  2364. kfree(fltr);
  2365. }
  2366. }
  2367. if (irq_reinit) {
  2368. kfree(bp->ntp_fltr_bmap);
  2369. bp->ntp_fltr_bmap = NULL;
  2370. }
  2371. bp->ntp_fltr_count = 0;
  2372. #endif
  2373. }
  2374. static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
  2375. {
  2376. #ifdef CONFIG_RFS_ACCEL
  2377. int i, rc = 0;
  2378. if (!(bp->flags & BNXT_FLAG_RFS))
  2379. return 0;
  2380. for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
  2381. INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
  2382. bp->ntp_fltr_count = 0;
  2383. bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
  2384. sizeof(long),
  2385. GFP_KERNEL);
  2386. if (!bp->ntp_fltr_bmap)
  2387. rc = -ENOMEM;
  2388. return rc;
  2389. #else
  2390. return 0;
  2391. #endif
  2392. }
  2393. static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
  2394. {
  2395. bnxt_free_vnic_attributes(bp);
  2396. bnxt_free_tx_rings(bp);
  2397. bnxt_free_rx_rings(bp);
  2398. bnxt_free_cp_rings(bp);
  2399. bnxt_free_ntp_fltrs(bp, irq_re_init);
  2400. if (irq_re_init) {
  2401. bnxt_free_stats(bp);
  2402. bnxt_free_ring_grps(bp);
  2403. bnxt_free_vnics(bp);
  2404. kfree(bp->tx_ring);
  2405. bp->tx_ring = NULL;
  2406. kfree(bp->rx_ring);
  2407. bp->rx_ring = NULL;
  2408. kfree(bp->bnapi);
  2409. bp->bnapi = NULL;
  2410. } else {
  2411. bnxt_clear_ring_indices(bp);
  2412. }
  2413. }
  2414. static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
  2415. {
  2416. int i, j, rc, size, arr_size;
  2417. void *bnapi;
  2418. if (irq_re_init) {
  2419. /* Allocate bnapi mem pointer array and mem block for
  2420. * all queues
  2421. */
  2422. arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
  2423. bp->cp_nr_rings);
  2424. size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
  2425. bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
  2426. if (!bnapi)
  2427. return -ENOMEM;
  2428. bp->bnapi = bnapi;
  2429. bnapi += arr_size;
  2430. for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
  2431. bp->bnapi[i] = bnapi;
  2432. bp->bnapi[i]->index = i;
  2433. bp->bnapi[i]->bp = bp;
  2434. }
  2435. bp->rx_ring = kcalloc(bp->rx_nr_rings,
  2436. sizeof(struct bnxt_rx_ring_info),
  2437. GFP_KERNEL);
  2438. if (!bp->rx_ring)
  2439. return -ENOMEM;
  2440. for (i = 0; i < bp->rx_nr_rings; i++) {
  2441. bp->rx_ring[i].bnapi = bp->bnapi[i];
  2442. bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
  2443. }
  2444. bp->tx_ring = kcalloc(bp->tx_nr_rings,
  2445. sizeof(struct bnxt_tx_ring_info),
  2446. GFP_KERNEL);
  2447. if (!bp->tx_ring)
  2448. return -ENOMEM;
  2449. if (bp->flags & BNXT_FLAG_SHARED_RINGS)
  2450. j = 0;
  2451. else
  2452. j = bp->rx_nr_rings;
  2453. for (i = 0; i < bp->tx_nr_rings; i++, j++) {
  2454. bp->tx_ring[i].bnapi = bp->bnapi[j];
  2455. bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
  2456. }
  2457. rc = bnxt_alloc_stats(bp);
  2458. if (rc)
  2459. goto alloc_mem_err;
  2460. rc = bnxt_alloc_ntp_fltrs(bp);
  2461. if (rc)
  2462. goto alloc_mem_err;
  2463. rc = bnxt_alloc_vnics(bp);
  2464. if (rc)
  2465. goto alloc_mem_err;
  2466. }
  2467. bnxt_init_ring_struct(bp);
  2468. rc = bnxt_alloc_rx_rings(bp);
  2469. if (rc)
  2470. goto alloc_mem_err;
  2471. rc = bnxt_alloc_tx_rings(bp);
  2472. if (rc)
  2473. goto alloc_mem_err;
  2474. rc = bnxt_alloc_cp_rings(bp);
  2475. if (rc)
  2476. goto alloc_mem_err;
  2477. bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
  2478. BNXT_VNIC_UCAST_FLAG;
  2479. rc = bnxt_alloc_vnic_attributes(bp);
  2480. if (rc)
  2481. goto alloc_mem_err;
  2482. return 0;
  2483. alloc_mem_err:
  2484. bnxt_free_mem(bp, true);
  2485. return rc;
  2486. }
  2487. void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
  2488. u16 cmpl_ring, u16 target_id)
  2489. {
  2490. struct input *req = request;
  2491. req->req_type = cpu_to_le16(req_type);
  2492. req->cmpl_ring = cpu_to_le16(cmpl_ring);
  2493. req->target_id = cpu_to_le16(target_id);
  2494. req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
  2495. }
  2496. static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
  2497. int timeout, bool silent)
  2498. {
  2499. int i, intr_process, rc, tmo_count;
  2500. struct input *req = msg;
  2501. u32 *data = msg;
  2502. __le32 *resp_len, *valid;
  2503. u16 cp_ring_id, len = 0;
  2504. struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
  2505. req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
  2506. memset(resp, 0, PAGE_SIZE);
  2507. cp_ring_id = le16_to_cpu(req->cmpl_ring);
  2508. intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
  2509. /* Write request msg to hwrm channel */
  2510. __iowrite32_copy(bp->bar0, data, msg_len / 4);
  2511. for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
  2512. writel(0, bp->bar0 + i);
  2513. /* currently supports only one outstanding message */
  2514. if (intr_process)
  2515. bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
  2516. /* Ring channel doorbell */
  2517. writel(1, bp->bar0 + 0x100);
  2518. if (!timeout)
  2519. timeout = DFLT_HWRM_CMD_TIMEOUT;
  2520. i = 0;
  2521. tmo_count = timeout * 40;
  2522. if (intr_process) {
  2523. /* Wait until hwrm response cmpl interrupt is processed */
  2524. while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
  2525. i++ < tmo_count) {
  2526. usleep_range(25, 40);
  2527. }
  2528. if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
  2529. netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
  2530. le16_to_cpu(req->req_type));
  2531. return -1;
  2532. }
  2533. } else {
  2534. /* Check if response len is updated */
  2535. resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
  2536. for (i = 0; i < tmo_count; i++) {
  2537. len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
  2538. HWRM_RESP_LEN_SFT;
  2539. if (len)
  2540. break;
  2541. usleep_range(25, 40);
  2542. }
  2543. if (i >= tmo_count) {
  2544. netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
  2545. timeout, le16_to_cpu(req->req_type),
  2546. le16_to_cpu(req->seq_id), len);
  2547. return -1;
  2548. }
  2549. /* Last word of resp contains valid bit */
  2550. valid = bp->hwrm_cmd_resp_addr + len - 4;
  2551. for (i = 0; i < 5; i++) {
  2552. if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
  2553. break;
  2554. udelay(1);
  2555. }
  2556. if (i >= 5) {
  2557. netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
  2558. timeout, le16_to_cpu(req->req_type),
  2559. le16_to_cpu(req->seq_id), len, *valid);
  2560. return -1;
  2561. }
  2562. }
  2563. rc = le16_to_cpu(resp->error_code);
  2564. if (rc && !silent)
  2565. netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
  2566. le16_to_cpu(resp->req_type),
  2567. le16_to_cpu(resp->seq_id), rc);
  2568. return rc;
  2569. }
  2570. int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
  2571. {
  2572. return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
  2573. }
  2574. int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
  2575. {
  2576. int rc;
  2577. mutex_lock(&bp->hwrm_cmd_lock);
  2578. rc = _hwrm_send_message(bp, msg, msg_len, timeout);
  2579. mutex_unlock(&bp->hwrm_cmd_lock);
  2580. return rc;
  2581. }
  2582. int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
  2583. int timeout)
  2584. {
  2585. int rc;
  2586. mutex_lock(&bp->hwrm_cmd_lock);
  2587. rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
  2588. mutex_unlock(&bp->hwrm_cmd_lock);
  2589. return rc;
  2590. }
  2591. static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
  2592. {
  2593. struct hwrm_func_drv_rgtr_input req = {0};
  2594. int i;
  2595. DECLARE_BITMAP(async_events_bmap, 256);
  2596. u32 *events = (u32 *)async_events_bmap;
  2597. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
  2598. req.enables =
  2599. cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
  2600. FUNC_DRV_RGTR_REQ_ENABLES_VER |
  2601. FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
  2602. memset(async_events_bmap, 0, sizeof(async_events_bmap));
  2603. for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
  2604. __set_bit(bnxt_async_events_arr[i], async_events_bmap);
  2605. for (i = 0; i < 8; i++)
  2606. req.async_event_fwd[i] |= cpu_to_le32(events[i]);
  2607. req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
  2608. req.ver_maj = DRV_VER_MAJ;
  2609. req.ver_min = DRV_VER_MIN;
  2610. req.ver_upd = DRV_VER_UPD;
  2611. if (BNXT_PF(bp)) {
  2612. DECLARE_BITMAP(vf_req_snif_bmap, 256);
  2613. u32 *data = (u32 *)vf_req_snif_bmap;
  2614. memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
  2615. for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
  2616. __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
  2617. for (i = 0; i < 8; i++)
  2618. req.vf_req_fwd[i] = cpu_to_le32(data[i]);
  2619. req.enables |=
  2620. cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
  2621. }
  2622. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2623. }
  2624. static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
  2625. {
  2626. struct hwrm_func_drv_unrgtr_input req = {0};
  2627. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
  2628. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2629. }
  2630. static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
  2631. {
  2632. u32 rc = 0;
  2633. struct hwrm_tunnel_dst_port_free_input req = {0};
  2634. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
  2635. req.tunnel_type = tunnel_type;
  2636. switch (tunnel_type) {
  2637. case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
  2638. req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
  2639. break;
  2640. case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
  2641. req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
  2642. break;
  2643. default:
  2644. break;
  2645. }
  2646. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2647. if (rc)
  2648. netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
  2649. rc);
  2650. return rc;
  2651. }
  2652. static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
  2653. u8 tunnel_type)
  2654. {
  2655. u32 rc = 0;
  2656. struct hwrm_tunnel_dst_port_alloc_input req = {0};
  2657. struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  2658. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
  2659. req.tunnel_type = tunnel_type;
  2660. req.tunnel_dst_port_val = port;
  2661. mutex_lock(&bp->hwrm_cmd_lock);
  2662. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2663. if (rc) {
  2664. netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
  2665. rc);
  2666. goto err_out;
  2667. }
  2668. switch (tunnel_type) {
  2669. case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
  2670. bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
  2671. break;
  2672. case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
  2673. bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
  2674. break;
  2675. default:
  2676. break;
  2677. }
  2678. err_out:
  2679. mutex_unlock(&bp->hwrm_cmd_lock);
  2680. return rc;
  2681. }
  2682. static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
  2683. {
  2684. struct hwrm_cfa_l2_set_rx_mask_input req = {0};
  2685. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  2686. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
  2687. req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
  2688. req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
  2689. req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
  2690. req.mask = cpu_to_le32(vnic->rx_mask);
  2691. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2692. }
  2693. #ifdef CONFIG_RFS_ACCEL
  2694. static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
  2695. struct bnxt_ntuple_filter *fltr)
  2696. {
  2697. struct hwrm_cfa_ntuple_filter_free_input req = {0};
  2698. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
  2699. req.ntuple_filter_id = fltr->filter_id;
  2700. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2701. }
  2702. #define BNXT_NTP_FLTR_FLAGS \
  2703. (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
  2704. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
  2705. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
  2706. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
  2707. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
  2708. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
  2709. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
  2710. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
  2711. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
  2712. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
  2713. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
  2714. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
  2715. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
  2716. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
  2717. static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
  2718. struct bnxt_ntuple_filter *fltr)
  2719. {
  2720. int rc = 0;
  2721. struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
  2722. struct hwrm_cfa_ntuple_filter_alloc_output *resp =
  2723. bp->hwrm_cmd_resp_addr;
  2724. struct flow_keys *keys = &fltr->fkeys;
  2725. struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
  2726. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
  2727. req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
  2728. req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
  2729. req.ethertype = htons(ETH_P_IP);
  2730. memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
  2731. req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
  2732. req.ip_protocol = keys->basic.ip_proto;
  2733. req.src_ipaddr[0] = keys->addrs.v4addrs.src;
  2734. req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
  2735. req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
  2736. req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
  2737. req.src_port = keys->ports.src;
  2738. req.src_port_mask = cpu_to_be16(0xffff);
  2739. req.dst_port = keys->ports.dst;
  2740. req.dst_port_mask = cpu_to_be16(0xffff);
  2741. req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
  2742. mutex_lock(&bp->hwrm_cmd_lock);
  2743. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2744. if (!rc)
  2745. fltr->filter_id = resp->ntuple_filter_id;
  2746. mutex_unlock(&bp->hwrm_cmd_lock);
  2747. return rc;
  2748. }
  2749. #endif
  2750. static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
  2751. u8 *mac_addr)
  2752. {
  2753. u32 rc = 0;
  2754. struct hwrm_cfa_l2_filter_alloc_input req = {0};
  2755. struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  2756. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
  2757. req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
  2758. if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
  2759. req.flags |=
  2760. cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
  2761. req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
  2762. req.enables =
  2763. cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
  2764. CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
  2765. CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
  2766. memcpy(req.l2_addr, mac_addr, ETH_ALEN);
  2767. req.l2_addr_mask[0] = 0xff;
  2768. req.l2_addr_mask[1] = 0xff;
  2769. req.l2_addr_mask[2] = 0xff;
  2770. req.l2_addr_mask[3] = 0xff;
  2771. req.l2_addr_mask[4] = 0xff;
  2772. req.l2_addr_mask[5] = 0xff;
  2773. mutex_lock(&bp->hwrm_cmd_lock);
  2774. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2775. if (!rc)
  2776. bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
  2777. resp->l2_filter_id;
  2778. mutex_unlock(&bp->hwrm_cmd_lock);
  2779. return rc;
  2780. }
  2781. static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
  2782. {
  2783. u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
  2784. int rc = 0;
  2785. /* Any associated ntuple filters will also be cleared by firmware. */
  2786. mutex_lock(&bp->hwrm_cmd_lock);
  2787. for (i = 0; i < num_of_vnics; i++) {
  2788. struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
  2789. for (j = 0; j < vnic->uc_filter_count; j++) {
  2790. struct hwrm_cfa_l2_filter_free_input req = {0};
  2791. bnxt_hwrm_cmd_hdr_init(bp, &req,
  2792. HWRM_CFA_L2_FILTER_FREE, -1, -1);
  2793. req.l2_filter_id = vnic->fw_l2_filter_id[j];
  2794. rc = _hwrm_send_message(bp, &req, sizeof(req),
  2795. HWRM_CMD_TIMEOUT);
  2796. }
  2797. vnic->uc_filter_count = 0;
  2798. }
  2799. mutex_unlock(&bp->hwrm_cmd_lock);
  2800. return rc;
  2801. }
  2802. static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
  2803. {
  2804. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  2805. struct hwrm_vnic_tpa_cfg_input req = {0};
  2806. if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
  2807. return 0;
  2808. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
  2809. if (tpa_flags) {
  2810. u16 mss = bp->dev->mtu - 40;
  2811. u32 nsegs, n, segs = 0, flags;
  2812. flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
  2813. VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
  2814. VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
  2815. VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
  2816. VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
  2817. if (tpa_flags & BNXT_FLAG_GRO)
  2818. flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
  2819. req.flags = cpu_to_le32(flags);
  2820. req.enables =
  2821. cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
  2822. VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
  2823. VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
  2824. /* Number of segs are log2 units, and first packet is not
  2825. * included as part of this units.
  2826. */
  2827. if (mss <= BNXT_RX_PAGE_SIZE) {
  2828. n = BNXT_RX_PAGE_SIZE / mss;
  2829. nsegs = (MAX_SKB_FRAGS - 1) * n;
  2830. } else {
  2831. n = mss / BNXT_RX_PAGE_SIZE;
  2832. if (mss & (BNXT_RX_PAGE_SIZE - 1))
  2833. n++;
  2834. nsegs = (MAX_SKB_FRAGS - n) / n;
  2835. }
  2836. segs = ilog2(nsegs);
  2837. req.max_agg_segs = cpu_to_le16(segs);
  2838. req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
  2839. req.min_agg_len = cpu_to_le32(512);
  2840. }
  2841. req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
  2842. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2843. }
  2844. static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
  2845. {
  2846. u32 i, j, max_rings;
  2847. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  2848. struct hwrm_vnic_rss_cfg_input req = {0};
  2849. if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
  2850. return 0;
  2851. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
  2852. if (set_rss) {
  2853. vnic->hash_type = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
  2854. VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
  2855. VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
  2856. VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
  2857. req.hash_type = cpu_to_le32(vnic->hash_type);
  2858. if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
  2859. if (BNXT_CHIP_TYPE_NITRO_A0(bp))
  2860. max_rings = bp->rx_nr_rings - 1;
  2861. else
  2862. max_rings = bp->rx_nr_rings;
  2863. } else {
  2864. max_rings = 1;
  2865. }
  2866. /* Fill the RSS indirection table with ring group ids */
  2867. for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
  2868. if (j == max_rings)
  2869. j = 0;
  2870. vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
  2871. }
  2872. req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
  2873. req.hash_key_tbl_addr =
  2874. cpu_to_le64(vnic->rss_hash_key_dma_addr);
  2875. }
  2876. req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
  2877. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2878. }
  2879. static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
  2880. {
  2881. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  2882. struct hwrm_vnic_plcmodes_cfg_input req = {0};
  2883. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
  2884. req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
  2885. VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
  2886. VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
  2887. req.enables =
  2888. cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
  2889. VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
  2890. /* thresholds not implemented in firmware yet */
  2891. req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
  2892. req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
  2893. req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
  2894. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2895. }
  2896. static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
  2897. u16 ctx_idx)
  2898. {
  2899. struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
  2900. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
  2901. req.rss_cos_lb_ctx_id =
  2902. cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
  2903. hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2904. bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
  2905. }
  2906. static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
  2907. {
  2908. int i, j;
  2909. for (i = 0; i < bp->nr_vnics; i++) {
  2910. struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
  2911. for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
  2912. if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
  2913. bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
  2914. }
  2915. }
  2916. bp->rsscos_nr_ctxs = 0;
  2917. }
  2918. static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
  2919. {
  2920. int rc;
  2921. struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
  2922. struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
  2923. bp->hwrm_cmd_resp_addr;
  2924. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
  2925. -1);
  2926. mutex_lock(&bp->hwrm_cmd_lock);
  2927. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2928. if (!rc)
  2929. bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
  2930. le16_to_cpu(resp->rss_cos_lb_ctx_id);
  2931. mutex_unlock(&bp->hwrm_cmd_lock);
  2932. return rc;
  2933. }
  2934. static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
  2935. {
  2936. unsigned int ring = 0, grp_idx;
  2937. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  2938. struct hwrm_vnic_cfg_input req = {0};
  2939. u16 def_vlan = 0;
  2940. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
  2941. req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
  2942. /* Only RSS support for now TBD: COS & LB */
  2943. if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
  2944. req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
  2945. req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
  2946. VNIC_CFG_REQ_ENABLES_MRU);
  2947. } else {
  2948. req.rss_rule = cpu_to_le16(0xffff);
  2949. }
  2950. if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
  2951. (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
  2952. req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
  2953. req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
  2954. } else {
  2955. req.cos_rule = cpu_to_le16(0xffff);
  2956. }
  2957. if (vnic->flags & BNXT_VNIC_RSS_FLAG)
  2958. ring = 0;
  2959. else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
  2960. ring = vnic_id - 1;
  2961. else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
  2962. ring = bp->rx_nr_rings - 1;
  2963. grp_idx = bp->rx_ring[ring].bnapi->index;
  2964. req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
  2965. req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
  2966. req.lb_rule = cpu_to_le16(0xffff);
  2967. req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
  2968. VLAN_HLEN);
  2969. #ifdef CONFIG_BNXT_SRIOV
  2970. if (BNXT_VF(bp))
  2971. def_vlan = bp->vf.vlan;
  2972. #endif
  2973. if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
  2974. req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
  2975. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2976. }
  2977. static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
  2978. {
  2979. u32 rc = 0;
  2980. if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
  2981. struct hwrm_vnic_free_input req = {0};
  2982. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
  2983. req.vnic_id =
  2984. cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
  2985. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2986. if (rc)
  2987. return rc;
  2988. bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
  2989. }
  2990. return rc;
  2991. }
  2992. static void bnxt_hwrm_vnic_free(struct bnxt *bp)
  2993. {
  2994. u16 i;
  2995. for (i = 0; i < bp->nr_vnics; i++)
  2996. bnxt_hwrm_vnic_free_one(bp, i);
  2997. }
  2998. static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
  2999. unsigned int start_rx_ring_idx,
  3000. unsigned int nr_rings)
  3001. {
  3002. int rc = 0;
  3003. unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
  3004. struct hwrm_vnic_alloc_input req = {0};
  3005. struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  3006. /* map ring groups to this vnic */
  3007. for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
  3008. grp_idx = bp->rx_ring[i].bnapi->index;
  3009. if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
  3010. netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
  3011. j, nr_rings);
  3012. break;
  3013. }
  3014. bp->vnic_info[vnic_id].fw_grp_ids[j] =
  3015. bp->grp_info[grp_idx].fw_grp_id;
  3016. }
  3017. bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
  3018. bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
  3019. if (vnic_id == 0)
  3020. req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
  3021. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
  3022. mutex_lock(&bp->hwrm_cmd_lock);
  3023. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3024. if (!rc)
  3025. bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
  3026. mutex_unlock(&bp->hwrm_cmd_lock);
  3027. return rc;
  3028. }
  3029. static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
  3030. {
  3031. u16 i;
  3032. u32 rc = 0;
  3033. mutex_lock(&bp->hwrm_cmd_lock);
  3034. for (i = 0; i < bp->rx_nr_rings; i++) {
  3035. struct hwrm_ring_grp_alloc_input req = {0};
  3036. struct hwrm_ring_grp_alloc_output *resp =
  3037. bp->hwrm_cmd_resp_addr;
  3038. unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
  3039. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
  3040. req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
  3041. req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
  3042. req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
  3043. req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
  3044. rc = _hwrm_send_message(bp, &req, sizeof(req),
  3045. HWRM_CMD_TIMEOUT);
  3046. if (rc)
  3047. break;
  3048. bp->grp_info[grp_idx].fw_grp_id =
  3049. le32_to_cpu(resp->ring_group_id);
  3050. }
  3051. mutex_unlock(&bp->hwrm_cmd_lock);
  3052. return rc;
  3053. }
  3054. static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
  3055. {
  3056. u16 i;
  3057. u32 rc = 0;
  3058. struct hwrm_ring_grp_free_input req = {0};
  3059. if (!bp->grp_info)
  3060. return 0;
  3061. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
  3062. mutex_lock(&bp->hwrm_cmd_lock);
  3063. for (i = 0; i < bp->cp_nr_rings; i++) {
  3064. if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
  3065. continue;
  3066. req.ring_group_id =
  3067. cpu_to_le32(bp->grp_info[i].fw_grp_id);
  3068. rc = _hwrm_send_message(bp, &req, sizeof(req),
  3069. HWRM_CMD_TIMEOUT);
  3070. if (rc)
  3071. break;
  3072. bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
  3073. }
  3074. mutex_unlock(&bp->hwrm_cmd_lock);
  3075. return rc;
  3076. }
  3077. static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
  3078. struct bnxt_ring_struct *ring,
  3079. u32 ring_type, u32 map_index,
  3080. u32 stats_ctx_id)
  3081. {
  3082. int rc = 0, err = 0;
  3083. struct hwrm_ring_alloc_input req = {0};
  3084. struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  3085. u16 ring_id;
  3086. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
  3087. req.enables = 0;
  3088. if (ring->nr_pages > 1) {
  3089. req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
  3090. /* Page size is in log2 units */
  3091. req.page_size = BNXT_PAGE_SHIFT;
  3092. req.page_tbl_depth = 1;
  3093. } else {
  3094. req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
  3095. }
  3096. req.fbo = 0;
  3097. /* Association of ring index with doorbell index and MSIX number */
  3098. req.logical_id = cpu_to_le16(map_index);
  3099. switch (ring_type) {
  3100. case HWRM_RING_ALLOC_TX:
  3101. req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
  3102. /* Association of transmit ring with completion ring */
  3103. req.cmpl_ring_id =
  3104. cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
  3105. req.length = cpu_to_le32(bp->tx_ring_mask + 1);
  3106. req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
  3107. req.queue_id = cpu_to_le16(ring->queue_id);
  3108. break;
  3109. case HWRM_RING_ALLOC_RX:
  3110. req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
  3111. req.length = cpu_to_le32(bp->rx_ring_mask + 1);
  3112. break;
  3113. case HWRM_RING_ALLOC_AGG:
  3114. req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
  3115. req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
  3116. break;
  3117. case HWRM_RING_ALLOC_CMPL:
  3118. req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
  3119. req.length = cpu_to_le32(bp->cp_ring_mask + 1);
  3120. if (bp->flags & BNXT_FLAG_USING_MSIX)
  3121. req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
  3122. break;
  3123. default:
  3124. netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
  3125. ring_type);
  3126. return -1;
  3127. }
  3128. mutex_lock(&bp->hwrm_cmd_lock);
  3129. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3130. err = le16_to_cpu(resp->error_code);
  3131. ring_id = le16_to_cpu(resp->ring_id);
  3132. mutex_unlock(&bp->hwrm_cmd_lock);
  3133. if (rc || err) {
  3134. switch (ring_type) {
  3135. case RING_FREE_REQ_RING_TYPE_CMPL:
  3136. netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
  3137. rc, err);
  3138. return -1;
  3139. case RING_FREE_REQ_RING_TYPE_RX:
  3140. netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
  3141. rc, err);
  3142. return -1;
  3143. case RING_FREE_REQ_RING_TYPE_TX:
  3144. netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
  3145. rc, err);
  3146. return -1;
  3147. default:
  3148. netdev_err(bp->dev, "Invalid ring\n");
  3149. return -1;
  3150. }
  3151. }
  3152. ring->fw_ring_id = ring_id;
  3153. return rc;
  3154. }
  3155. static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
  3156. {
  3157. int rc;
  3158. if (BNXT_PF(bp)) {
  3159. struct hwrm_func_cfg_input req = {0};
  3160. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  3161. req.fid = cpu_to_le16(0xffff);
  3162. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
  3163. req.async_event_cr = cpu_to_le16(idx);
  3164. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3165. } else {
  3166. struct hwrm_func_vf_cfg_input req = {0};
  3167. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
  3168. req.enables =
  3169. cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
  3170. req.async_event_cr = cpu_to_le16(idx);
  3171. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3172. }
  3173. return rc;
  3174. }
  3175. static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
  3176. {
  3177. int i, rc = 0;
  3178. for (i = 0; i < bp->cp_nr_rings; i++) {
  3179. struct bnxt_napi *bnapi = bp->bnapi[i];
  3180. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  3181. struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
  3182. cpr->cp_doorbell = bp->bar1 + i * 0x80;
  3183. rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
  3184. INVALID_STATS_CTX_ID);
  3185. if (rc)
  3186. goto err_out;
  3187. BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
  3188. bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
  3189. if (!i) {
  3190. rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
  3191. if (rc)
  3192. netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
  3193. }
  3194. }
  3195. for (i = 0; i < bp->tx_nr_rings; i++) {
  3196. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  3197. struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
  3198. u32 map_idx = txr->bnapi->index;
  3199. u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
  3200. rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
  3201. map_idx, fw_stats_ctx);
  3202. if (rc)
  3203. goto err_out;
  3204. txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
  3205. }
  3206. for (i = 0; i < bp->rx_nr_rings; i++) {
  3207. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  3208. struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
  3209. u32 map_idx = rxr->bnapi->index;
  3210. rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
  3211. map_idx, INVALID_STATS_CTX_ID);
  3212. if (rc)
  3213. goto err_out;
  3214. rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
  3215. writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
  3216. bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
  3217. }
  3218. if (bp->flags & BNXT_FLAG_AGG_RINGS) {
  3219. for (i = 0; i < bp->rx_nr_rings; i++) {
  3220. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  3221. struct bnxt_ring_struct *ring =
  3222. &rxr->rx_agg_ring_struct;
  3223. u32 grp_idx = rxr->bnapi->index;
  3224. u32 map_idx = grp_idx + bp->rx_nr_rings;
  3225. rc = hwrm_ring_alloc_send_msg(bp, ring,
  3226. HWRM_RING_ALLOC_AGG,
  3227. map_idx,
  3228. INVALID_STATS_CTX_ID);
  3229. if (rc)
  3230. goto err_out;
  3231. rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
  3232. writel(DB_KEY_RX | rxr->rx_agg_prod,
  3233. rxr->rx_agg_doorbell);
  3234. bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
  3235. }
  3236. }
  3237. err_out:
  3238. return rc;
  3239. }
  3240. static int hwrm_ring_free_send_msg(struct bnxt *bp,
  3241. struct bnxt_ring_struct *ring,
  3242. u32 ring_type, int cmpl_ring_id)
  3243. {
  3244. int rc;
  3245. struct hwrm_ring_free_input req = {0};
  3246. struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
  3247. u16 error_code;
  3248. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
  3249. req.ring_type = ring_type;
  3250. req.ring_id = cpu_to_le16(ring->fw_ring_id);
  3251. mutex_lock(&bp->hwrm_cmd_lock);
  3252. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3253. error_code = le16_to_cpu(resp->error_code);
  3254. mutex_unlock(&bp->hwrm_cmd_lock);
  3255. if (rc || error_code) {
  3256. switch (ring_type) {
  3257. case RING_FREE_REQ_RING_TYPE_CMPL:
  3258. netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
  3259. rc);
  3260. return rc;
  3261. case RING_FREE_REQ_RING_TYPE_RX:
  3262. netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
  3263. rc);
  3264. return rc;
  3265. case RING_FREE_REQ_RING_TYPE_TX:
  3266. netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
  3267. rc);
  3268. return rc;
  3269. default:
  3270. netdev_err(bp->dev, "Invalid ring\n");
  3271. return -1;
  3272. }
  3273. }
  3274. return 0;
  3275. }
  3276. static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
  3277. {
  3278. int i;
  3279. if (!bp->bnapi)
  3280. return;
  3281. for (i = 0; i < bp->tx_nr_rings; i++) {
  3282. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  3283. struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
  3284. u32 grp_idx = txr->bnapi->index;
  3285. u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
  3286. if (ring->fw_ring_id != INVALID_HW_RING_ID) {
  3287. hwrm_ring_free_send_msg(bp, ring,
  3288. RING_FREE_REQ_RING_TYPE_TX,
  3289. close_path ? cmpl_ring_id :
  3290. INVALID_HW_RING_ID);
  3291. ring->fw_ring_id = INVALID_HW_RING_ID;
  3292. }
  3293. }
  3294. for (i = 0; i < bp->rx_nr_rings; i++) {
  3295. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  3296. struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
  3297. u32 grp_idx = rxr->bnapi->index;
  3298. u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
  3299. if (ring->fw_ring_id != INVALID_HW_RING_ID) {
  3300. hwrm_ring_free_send_msg(bp, ring,
  3301. RING_FREE_REQ_RING_TYPE_RX,
  3302. close_path ? cmpl_ring_id :
  3303. INVALID_HW_RING_ID);
  3304. ring->fw_ring_id = INVALID_HW_RING_ID;
  3305. bp->grp_info[grp_idx].rx_fw_ring_id =
  3306. INVALID_HW_RING_ID;
  3307. }
  3308. }
  3309. for (i = 0; i < bp->rx_nr_rings; i++) {
  3310. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  3311. struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
  3312. u32 grp_idx = rxr->bnapi->index;
  3313. u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
  3314. if (ring->fw_ring_id != INVALID_HW_RING_ID) {
  3315. hwrm_ring_free_send_msg(bp, ring,
  3316. RING_FREE_REQ_RING_TYPE_RX,
  3317. close_path ? cmpl_ring_id :
  3318. INVALID_HW_RING_ID);
  3319. ring->fw_ring_id = INVALID_HW_RING_ID;
  3320. bp->grp_info[grp_idx].agg_fw_ring_id =
  3321. INVALID_HW_RING_ID;
  3322. }
  3323. }
  3324. for (i = 0; i < bp->cp_nr_rings; i++) {
  3325. struct bnxt_napi *bnapi = bp->bnapi[i];
  3326. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  3327. struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
  3328. if (ring->fw_ring_id != INVALID_HW_RING_ID) {
  3329. hwrm_ring_free_send_msg(bp, ring,
  3330. RING_FREE_REQ_RING_TYPE_CMPL,
  3331. INVALID_HW_RING_ID);
  3332. ring->fw_ring_id = INVALID_HW_RING_ID;
  3333. bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
  3334. }
  3335. }
  3336. }
  3337. static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
  3338. u32 buf_tmrs, u16 flags,
  3339. struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
  3340. {
  3341. req->flags = cpu_to_le16(flags);
  3342. req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
  3343. req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
  3344. req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
  3345. req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
  3346. /* Minimum time between 2 interrupts set to buf_tmr x 2 */
  3347. req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
  3348. req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
  3349. req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
  3350. }
  3351. int bnxt_hwrm_set_coal(struct bnxt *bp)
  3352. {
  3353. int i, rc = 0;
  3354. struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
  3355. req_tx = {0}, *req;
  3356. u16 max_buf, max_buf_irq;
  3357. u16 buf_tmr, buf_tmr_irq;
  3358. u32 flags;
  3359. bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
  3360. HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
  3361. bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
  3362. HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
  3363. /* Each rx completion (2 records) should be DMAed immediately.
  3364. * DMA 1/4 of the completion buffers at a time.
  3365. */
  3366. max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
  3367. /* max_buf must not be zero */
  3368. max_buf = clamp_t(u16, max_buf, 1, 63);
  3369. max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
  3370. buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
  3371. /* buf timer set to 1/4 of interrupt timer */
  3372. buf_tmr = max_t(u16, buf_tmr / 4, 1);
  3373. buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
  3374. buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
  3375. flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
  3376. /* RING_IDLE generates more IRQs for lower latency. Enable it only
  3377. * if coal_ticks is less than 25 us.
  3378. */
  3379. if (bp->rx_coal_ticks < 25)
  3380. flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
  3381. bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
  3382. buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
  3383. /* max_buf must not be zero */
  3384. max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
  3385. max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
  3386. buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
  3387. /* buf timer set to 1/4 of interrupt timer */
  3388. buf_tmr = max_t(u16, buf_tmr / 4, 1);
  3389. buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
  3390. buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
  3391. flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
  3392. bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
  3393. buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
  3394. mutex_lock(&bp->hwrm_cmd_lock);
  3395. for (i = 0; i < bp->cp_nr_rings; i++) {
  3396. struct bnxt_napi *bnapi = bp->bnapi[i];
  3397. req = &req_rx;
  3398. if (!bnapi->rx_ring)
  3399. req = &req_tx;
  3400. req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
  3401. rc = _hwrm_send_message(bp, req, sizeof(*req),
  3402. HWRM_CMD_TIMEOUT);
  3403. if (rc)
  3404. break;
  3405. }
  3406. mutex_unlock(&bp->hwrm_cmd_lock);
  3407. return rc;
  3408. }
  3409. static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
  3410. {
  3411. int rc = 0, i;
  3412. struct hwrm_stat_ctx_free_input req = {0};
  3413. if (!bp->bnapi)
  3414. return 0;
  3415. if (BNXT_CHIP_TYPE_NITRO_A0(bp))
  3416. return 0;
  3417. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
  3418. mutex_lock(&bp->hwrm_cmd_lock);
  3419. for (i = 0; i < bp->cp_nr_rings; i++) {
  3420. struct bnxt_napi *bnapi = bp->bnapi[i];
  3421. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  3422. if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
  3423. req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
  3424. rc = _hwrm_send_message(bp, &req, sizeof(req),
  3425. HWRM_CMD_TIMEOUT);
  3426. if (rc)
  3427. break;
  3428. cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
  3429. }
  3430. }
  3431. mutex_unlock(&bp->hwrm_cmd_lock);
  3432. return rc;
  3433. }
  3434. static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
  3435. {
  3436. int rc = 0, i;
  3437. struct hwrm_stat_ctx_alloc_input req = {0};
  3438. struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  3439. if (BNXT_CHIP_TYPE_NITRO_A0(bp))
  3440. return 0;
  3441. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
  3442. req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
  3443. mutex_lock(&bp->hwrm_cmd_lock);
  3444. for (i = 0; i < bp->cp_nr_rings; i++) {
  3445. struct bnxt_napi *bnapi = bp->bnapi[i];
  3446. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  3447. req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
  3448. rc = _hwrm_send_message(bp, &req, sizeof(req),
  3449. HWRM_CMD_TIMEOUT);
  3450. if (rc)
  3451. break;
  3452. cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
  3453. bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
  3454. }
  3455. mutex_unlock(&bp->hwrm_cmd_lock);
  3456. return rc;
  3457. }
  3458. static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
  3459. {
  3460. struct hwrm_func_qcfg_input req = {0};
  3461. struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  3462. int rc;
  3463. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
  3464. req.fid = cpu_to_le16(0xffff);
  3465. mutex_lock(&bp->hwrm_cmd_lock);
  3466. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3467. if (rc)
  3468. goto func_qcfg_exit;
  3469. #ifdef CONFIG_BNXT_SRIOV
  3470. if (BNXT_VF(bp)) {
  3471. struct bnxt_vf_info *vf = &bp->vf;
  3472. vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
  3473. }
  3474. #endif
  3475. switch (resp->port_partition_type) {
  3476. case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
  3477. case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
  3478. case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
  3479. bp->port_partition_type = resp->port_partition_type;
  3480. break;
  3481. }
  3482. func_qcfg_exit:
  3483. mutex_unlock(&bp->hwrm_cmd_lock);
  3484. return rc;
  3485. }
  3486. int bnxt_hwrm_func_qcaps(struct bnxt *bp)
  3487. {
  3488. int rc = 0;
  3489. struct hwrm_func_qcaps_input req = {0};
  3490. struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
  3491. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
  3492. req.fid = cpu_to_le16(0xffff);
  3493. mutex_lock(&bp->hwrm_cmd_lock);
  3494. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3495. if (rc)
  3496. goto hwrm_func_qcaps_exit;
  3497. bp->tx_push_thresh = 0;
  3498. if (resp->flags &
  3499. cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
  3500. bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
  3501. if (BNXT_PF(bp)) {
  3502. struct bnxt_pf_info *pf = &bp->pf;
  3503. pf->fw_fid = le16_to_cpu(resp->fid);
  3504. pf->port_id = le16_to_cpu(resp->port_id);
  3505. bp->dev->dev_port = pf->port_id;
  3506. memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
  3507. memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
  3508. pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
  3509. pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
  3510. pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
  3511. pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
  3512. pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
  3513. if (!pf->max_hw_ring_grps)
  3514. pf->max_hw_ring_grps = pf->max_tx_rings;
  3515. pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
  3516. pf->max_vnics = le16_to_cpu(resp->max_vnics);
  3517. pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
  3518. pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
  3519. pf->max_vfs = le16_to_cpu(resp->max_vfs);
  3520. pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
  3521. pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
  3522. pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
  3523. pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
  3524. pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
  3525. pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
  3526. } else {
  3527. #ifdef CONFIG_BNXT_SRIOV
  3528. struct bnxt_vf_info *vf = &bp->vf;
  3529. vf->fw_fid = le16_to_cpu(resp->fid);
  3530. vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
  3531. vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
  3532. vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
  3533. vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
  3534. vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
  3535. if (!vf->max_hw_ring_grps)
  3536. vf->max_hw_ring_grps = vf->max_tx_rings;
  3537. vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
  3538. vf->max_vnics = le16_to_cpu(resp->max_vnics);
  3539. vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
  3540. memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
  3541. mutex_unlock(&bp->hwrm_cmd_lock);
  3542. if (is_valid_ether_addr(vf->mac_addr)) {
  3543. /* overwrite netdev dev_adr with admin VF MAC */
  3544. memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
  3545. } else {
  3546. random_ether_addr(bp->dev->dev_addr);
  3547. rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
  3548. }
  3549. return rc;
  3550. #endif
  3551. }
  3552. hwrm_func_qcaps_exit:
  3553. mutex_unlock(&bp->hwrm_cmd_lock);
  3554. return rc;
  3555. }
  3556. static int bnxt_hwrm_func_reset(struct bnxt *bp)
  3557. {
  3558. struct hwrm_func_reset_input req = {0};
  3559. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
  3560. req.enables = 0;
  3561. return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
  3562. }
  3563. static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
  3564. {
  3565. int rc = 0;
  3566. struct hwrm_queue_qportcfg_input req = {0};
  3567. struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
  3568. u8 i, *qptr;
  3569. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
  3570. mutex_lock(&bp->hwrm_cmd_lock);
  3571. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3572. if (rc)
  3573. goto qportcfg_exit;
  3574. if (!resp->max_configurable_queues) {
  3575. rc = -EINVAL;
  3576. goto qportcfg_exit;
  3577. }
  3578. bp->max_tc = resp->max_configurable_queues;
  3579. if (bp->max_tc > BNXT_MAX_QUEUE)
  3580. bp->max_tc = BNXT_MAX_QUEUE;
  3581. if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
  3582. bp->max_tc = 1;
  3583. qptr = &resp->queue_id0;
  3584. for (i = 0; i < bp->max_tc; i++) {
  3585. bp->q_info[i].queue_id = *qptr++;
  3586. bp->q_info[i].queue_profile = *qptr++;
  3587. }
  3588. qportcfg_exit:
  3589. mutex_unlock(&bp->hwrm_cmd_lock);
  3590. return rc;
  3591. }
  3592. static int bnxt_hwrm_ver_get(struct bnxt *bp)
  3593. {
  3594. int rc;
  3595. struct hwrm_ver_get_input req = {0};
  3596. struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
  3597. bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
  3598. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
  3599. req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
  3600. req.hwrm_intf_min = HWRM_VERSION_MINOR;
  3601. req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
  3602. mutex_lock(&bp->hwrm_cmd_lock);
  3603. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3604. if (rc)
  3605. goto hwrm_ver_get_exit;
  3606. memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
  3607. bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
  3608. resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
  3609. if (resp->hwrm_intf_maj < 1) {
  3610. netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
  3611. resp->hwrm_intf_maj, resp->hwrm_intf_min,
  3612. resp->hwrm_intf_upd);
  3613. netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
  3614. }
  3615. snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
  3616. resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
  3617. resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
  3618. bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
  3619. if (!bp->hwrm_cmd_timeout)
  3620. bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
  3621. if (resp->hwrm_intf_maj >= 1)
  3622. bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
  3623. bp->chip_num = le16_to_cpu(resp->chip_num);
  3624. if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
  3625. !resp->chip_metal)
  3626. bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
  3627. hwrm_ver_get_exit:
  3628. mutex_unlock(&bp->hwrm_cmd_lock);
  3629. return rc;
  3630. }
  3631. int bnxt_hwrm_fw_set_time(struct bnxt *bp)
  3632. {
  3633. #if IS_ENABLED(CONFIG_RTC_LIB)
  3634. struct hwrm_fw_set_time_input req = {0};
  3635. struct rtc_time tm;
  3636. struct timeval tv;
  3637. if (bp->hwrm_spec_code < 0x10400)
  3638. return -EOPNOTSUPP;
  3639. do_gettimeofday(&tv);
  3640. rtc_time_to_tm(tv.tv_sec, &tm);
  3641. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
  3642. req.year = cpu_to_le16(1900 + tm.tm_year);
  3643. req.month = 1 + tm.tm_mon;
  3644. req.day = tm.tm_mday;
  3645. req.hour = tm.tm_hour;
  3646. req.minute = tm.tm_min;
  3647. req.second = tm.tm_sec;
  3648. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3649. #else
  3650. return -EOPNOTSUPP;
  3651. #endif
  3652. }
  3653. static int bnxt_hwrm_port_qstats(struct bnxt *bp)
  3654. {
  3655. int rc;
  3656. struct bnxt_pf_info *pf = &bp->pf;
  3657. struct hwrm_port_qstats_input req = {0};
  3658. if (!(bp->flags & BNXT_FLAG_PORT_STATS))
  3659. return 0;
  3660. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
  3661. req.port_id = cpu_to_le16(pf->port_id);
  3662. req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
  3663. req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
  3664. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3665. return rc;
  3666. }
  3667. static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
  3668. {
  3669. if (bp->vxlan_port_cnt) {
  3670. bnxt_hwrm_tunnel_dst_port_free(
  3671. bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
  3672. }
  3673. bp->vxlan_port_cnt = 0;
  3674. if (bp->nge_port_cnt) {
  3675. bnxt_hwrm_tunnel_dst_port_free(
  3676. bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
  3677. }
  3678. bp->nge_port_cnt = 0;
  3679. }
  3680. static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
  3681. {
  3682. int rc, i;
  3683. u32 tpa_flags = 0;
  3684. if (set_tpa)
  3685. tpa_flags = bp->flags & BNXT_FLAG_TPA;
  3686. for (i = 0; i < bp->nr_vnics; i++) {
  3687. rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
  3688. if (rc) {
  3689. netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
  3690. rc, i);
  3691. return rc;
  3692. }
  3693. }
  3694. return 0;
  3695. }
  3696. static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
  3697. {
  3698. int i;
  3699. for (i = 0; i < bp->nr_vnics; i++)
  3700. bnxt_hwrm_vnic_set_rss(bp, i, false);
  3701. }
  3702. static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
  3703. bool irq_re_init)
  3704. {
  3705. if (bp->vnic_info) {
  3706. bnxt_hwrm_clear_vnic_filter(bp);
  3707. /* clear all RSS setting before free vnic ctx */
  3708. bnxt_hwrm_clear_vnic_rss(bp);
  3709. bnxt_hwrm_vnic_ctx_free(bp);
  3710. /* before free the vnic, undo the vnic tpa settings */
  3711. if (bp->flags & BNXT_FLAG_TPA)
  3712. bnxt_set_tpa(bp, false);
  3713. bnxt_hwrm_vnic_free(bp);
  3714. }
  3715. bnxt_hwrm_ring_free(bp, close_path);
  3716. bnxt_hwrm_ring_grp_free(bp);
  3717. if (irq_re_init) {
  3718. bnxt_hwrm_stat_ctx_free(bp);
  3719. bnxt_hwrm_free_tunnel_ports(bp);
  3720. }
  3721. }
  3722. static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
  3723. {
  3724. int rc;
  3725. /* allocate context for vnic */
  3726. rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
  3727. if (rc) {
  3728. netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
  3729. vnic_id, rc);
  3730. goto vnic_setup_err;
  3731. }
  3732. bp->rsscos_nr_ctxs++;
  3733. if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
  3734. rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
  3735. if (rc) {
  3736. netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
  3737. vnic_id, rc);
  3738. goto vnic_setup_err;
  3739. }
  3740. bp->rsscos_nr_ctxs++;
  3741. }
  3742. /* configure default vnic, ring grp */
  3743. rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
  3744. if (rc) {
  3745. netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
  3746. vnic_id, rc);
  3747. goto vnic_setup_err;
  3748. }
  3749. /* Enable RSS hashing on vnic */
  3750. rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
  3751. if (rc) {
  3752. netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
  3753. vnic_id, rc);
  3754. goto vnic_setup_err;
  3755. }
  3756. if (bp->flags & BNXT_FLAG_AGG_RINGS) {
  3757. rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
  3758. if (rc) {
  3759. netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
  3760. vnic_id, rc);
  3761. }
  3762. }
  3763. vnic_setup_err:
  3764. return rc;
  3765. }
  3766. static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
  3767. {
  3768. #ifdef CONFIG_RFS_ACCEL
  3769. int i, rc = 0;
  3770. for (i = 0; i < bp->rx_nr_rings; i++) {
  3771. u16 vnic_id = i + 1;
  3772. u16 ring_id = i;
  3773. if (vnic_id >= bp->nr_vnics)
  3774. break;
  3775. bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
  3776. rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
  3777. if (rc) {
  3778. netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
  3779. vnic_id, rc);
  3780. break;
  3781. }
  3782. rc = bnxt_setup_vnic(bp, vnic_id);
  3783. if (rc)
  3784. break;
  3785. }
  3786. return rc;
  3787. #else
  3788. return 0;
  3789. #endif
  3790. }
  3791. /* Allow PF and VF with default VLAN to be in promiscuous mode */
  3792. static bool bnxt_promisc_ok(struct bnxt *bp)
  3793. {
  3794. #ifdef CONFIG_BNXT_SRIOV
  3795. if (BNXT_VF(bp) && !bp->vf.vlan)
  3796. return false;
  3797. #endif
  3798. return true;
  3799. }
  3800. static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
  3801. {
  3802. unsigned int rc = 0;
  3803. rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
  3804. if (rc) {
  3805. netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
  3806. rc);
  3807. return rc;
  3808. }
  3809. rc = bnxt_hwrm_vnic_cfg(bp, 1);
  3810. if (rc) {
  3811. netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
  3812. rc);
  3813. return rc;
  3814. }
  3815. return rc;
  3816. }
  3817. static int bnxt_cfg_rx_mode(struct bnxt *);
  3818. static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
  3819. static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
  3820. {
  3821. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  3822. int rc = 0;
  3823. unsigned int rx_nr_rings = bp->rx_nr_rings;
  3824. if (irq_re_init) {
  3825. rc = bnxt_hwrm_stat_ctx_alloc(bp);
  3826. if (rc) {
  3827. netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
  3828. rc);
  3829. goto err_out;
  3830. }
  3831. }
  3832. rc = bnxt_hwrm_ring_alloc(bp);
  3833. if (rc) {
  3834. netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
  3835. goto err_out;
  3836. }
  3837. rc = bnxt_hwrm_ring_grp_alloc(bp);
  3838. if (rc) {
  3839. netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
  3840. goto err_out;
  3841. }
  3842. if (BNXT_CHIP_TYPE_NITRO_A0(bp))
  3843. rx_nr_rings--;
  3844. /* default vnic 0 */
  3845. rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
  3846. if (rc) {
  3847. netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
  3848. goto err_out;
  3849. }
  3850. rc = bnxt_setup_vnic(bp, 0);
  3851. if (rc)
  3852. goto err_out;
  3853. if (bp->flags & BNXT_FLAG_RFS) {
  3854. rc = bnxt_alloc_rfs_vnics(bp);
  3855. if (rc)
  3856. goto err_out;
  3857. }
  3858. if (bp->flags & BNXT_FLAG_TPA) {
  3859. rc = bnxt_set_tpa(bp, true);
  3860. if (rc)
  3861. goto err_out;
  3862. }
  3863. if (BNXT_VF(bp))
  3864. bnxt_update_vf_mac(bp);
  3865. /* Filter for default vnic 0 */
  3866. rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
  3867. if (rc) {
  3868. netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
  3869. goto err_out;
  3870. }
  3871. vnic->uc_filter_count = 1;
  3872. vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
  3873. if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
  3874. vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  3875. if (bp->dev->flags & IFF_ALLMULTI) {
  3876. vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
  3877. vnic->mc_list_count = 0;
  3878. } else {
  3879. u32 mask = 0;
  3880. bnxt_mc_list_updated(bp, &mask);
  3881. vnic->rx_mask |= mask;
  3882. }
  3883. rc = bnxt_cfg_rx_mode(bp);
  3884. if (rc)
  3885. goto err_out;
  3886. rc = bnxt_hwrm_set_coal(bp);
  3887. if (rc)
  3888. netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
  3889. rc);
  3890. if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
  3891. rc = bnxt_setup_nitroa0_vnic(bp);
  3892. if (rc)
  3893. netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
  3894. rc);
  3895. }
  3896. if (BNXT_VF(bp)) {
  3897. bnxt_hwrm_func_qcfg(bp);
  3898. netdev_update_features(bp->dev);
  3899. }
  3900. return 0;
  3901. err_out:
  3902. bnxt_hwrm_resource_free(bp, 0, true);
  3903. return rc;
  3904. }
  3905. static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
  3906. {
  3907. bnxt_hwrm_resource_free(bp, 1, irq_re_init);
  3908. return 0;
  3909. }
  3910. static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
  3911. {
  3912. bnxt_init_cp_rings(bp);
  3913. bnxt_init_rx_rings(bp);
  3914. bnxt_init_tx_rings(bp);
  3915. bnxt_init_ring_grps(bp, irq_re_init);
  3916. bnxt_init_vnics(bp);
  3917. return bnxt_init_chip(bp, irq_re_init);
  3918. }
  3919. static void bnxt_disable_int(struct bnxt *bp)
  3920. {
  3921. int i;
  3922. if (!bp->bnapi)
  3923. return;
  3924. for (i = 0; i < bp->cp_nr_rings; i++) {
  3925. struct bnxt_napi *bnapi = bp->bnapi[i];
  3926. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  3927. BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
  3928. }
  3929. }
  3930. static void bnxt_enable_int(struct bnxt *bp)
  3931. {
  3932. int i;
  3933. atomic_set(&bp->intr_sem, 0);
  3934. for (i = 0; i < bp->cp_nr_rings; i++) {
  3935. struct bnxt_napi *bnapi = bp->bnapi[i];
  3936. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  3937. BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
  3938. }
  3939. }
  3940. static int bnxt_set_real_num_queues(struct bnxt *bp)
  3941. {
  3942. int rc;
  3943. struct net_device *dev = bp->dev;
  3944. rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
  3945. if (rc)
  3946. return rc;
  3947. rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
  3948. if (rc)
  3949. return rc;
  3950. #ifdef CONFIG_RFS_ACCEL
  3951. if (bp->flags & BNXT_FLAG_RFS)
  3952. dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
  3953. #endif
  3954. return rc;
  3955. }
  3956. static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
  3957. bool shared)
  3958. {
  3959. int _rx = *rx, _tx = *tx;
  3960. if (shared) {
  3961. *rx = min_t(int, _rx, max);
  3962. *tx = min_t(int, _tx, max);
  3963. } else {
  3964. if (max < 2)
  3965. return -ENOMEM;
  3966. while (_rx + _tx > max) {
  3967. if (_rx > _tx && _rx > 1)
  3968. _rx--;
  3969. else if (_tx > 1)
  3970. _tx--;
  3971. }
  3972. *rx = _rx;
  3973. *tx = _tx;
  3974. }
  3975. return 0;
  3976. }
  3977. static int bnxt_setup_msix(struct bnxt *bp)
  3978. {
  3979. struct msix_entry *msix_ent;
  3980. struct net_device *dev = bp->dev;
  3981. int i, total_vecs, rc = 0, min = 1;
  3982. const int len = sizeof(bp->irq_tbl[0].name);
  3983. bp->flags &= ~BNXT_FLAG_USING_MSIX;
  3984. total_vecs = bp->cp_nr_rings;
  3985. msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
  3986. if (!msix_ent)
  3987. return -ENOMEM;
  3988. for (i = 0; i < total_vecs; i++) {
  3989. msix_ent[i].entry = i;
  3990. msix_ent[i].vector = 0;
  3991. }
  3992. if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
  3993. min = 2;
  3994. total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
  3995. if (total_vecs < 0) {
  3996. rc = -ENODEV;
  3997. goto msix_setup_exit;
  3998. }
  3999. bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
  4000. if (bp->irq_tbl) {
  4001. int tcs;
  4002. /* Trim rings based upon num of vectors allocated */
  4003. rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
  4004. total_vecs, min == 1);
  4005. if (rc)
  4006. goto msix_setup_exit;
  4007. bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  4008. tcs = netdev_get_num_tc(dev);
  4009. if (tcs > 1) {
  4010. bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
  4011. if (bp->tx_nr_rings_per_tc == 0) {
  4012. netdev_reset_tc(dev);
  4013. bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  4014. } else {
  4015. int i, off, count;
  4016. bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
  4017. for (i = 0; i < tcs; i++) {
  4018. count = bp->tx_nr_rings_per_tc;
  4019. off = i * count;
  4020. netdev_set_tc_queue(dev, i, count, off);
  4021. }
  4022. }
  4023. }
  4024. bp->cp_nr_rings = total_vecs;
  4025. for (i = 0; i < bp->cp_nr_rings; i++) {
  4026. char *attr;
  4027. bp->irq_tbl[i].vector = msix_ent[i].vector;
  4028. if (bp->flags & BNXT_FLAG_SHARED_RINGS)
  4029. attr = "TxRx";
  4030. else if (i < bp->rx_nr_rings)
  4031. attr = "rx";
  4032. else
  4033. attr = "tx";
  4034. snprintf(bp->irq_tbl[i].name, len,
  4035. "%s-%s-%d", dev->name, attr, i);
  4036. bp->irq_tbl[i].handler = bnxt_msix;
  4037. }
  4038. rc = bnxt_set_real_num_queues(bp);
  4039. if (rc)
  4040. goto msix_setup_exit;
  4041. } else {
  4042. rc = -ENOMEM;
  4043. goto msix_setup_exit;
  4044. }
  4045. bp->flags |= BNXT_FLAG_USING_MSIX;
  4046. kfree(msix_ent);
  4047. return 0;
  4048. msix_setup_exit:
  4049. netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
  4050. pci_disable_msix(bp->pdev);
  4051. kfree(msix_ent);
  4052. return rc;
  4053. }
  4054. static int bnxt_setup_inta(struct bnxt *bp)
  4055. {
  4056. int rc;
  4057. const int len = sizeof(bp->irq_tbl[0].name);
  4058. if (netdev_get_num_tc(bp->dev))
  4059. netdev_reset_tc(bp->dev);
  4060. bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
  4061. if (!bp->irq_tbl) {
  4062. rc = -ENOMEM;
  4063. return rc;
  4064. }
  4065. bp->rx_nr_rings = 1;
  4066. bp->tx_nr_rings = 1;
  4067. bp->cp_nr_rings = 1;
  4068. bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  4069. bp->flags |= BNXT_FLAG_SHARED_RINGS;
  4070. bp->irq_tbl[0].vector = bp->pdev->irq;
  4071. snprintf(bp->irq_tbl[0].name, len,
  4072. "%s-%s-%d", bp->dev->name, "TxRx", 0);
  4073. bp->irq_tbl[0].handler = bnxt_inta;
  4074. rc = bnxt_set_real_num_queues(bp);
  4075. return rc;
  4076. }
  4077. static int bnxt_setup_int_mode(struct bnxt *bp)
  4078. {
  4079. int rc = 0;
  4080. if (bp->flags & BNXT_FLAG_MSIX_CAP)
  4081. rc = bnxt_setup_msix(bp);
  4082. if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
  4083. /* fallback to INTA */
  4084. rc = bnxt_setup_inta(bp);
  4085. }
  4086. return rc;
  4087. }
  4088. static void bnxt_free_irq(struct bnxt *bp)
  4089. {
  4090. struct bnxt_irq *irq;
  4091. int i;
  4092. #ifdef CONFIG_RFS_ACCEL
  4093. free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
  4094. bp->dev->rx_cpu_rmap = NULL;
  4095. #endif
  4096. if (!bp->irq_tbl)
  4097. return;
  4098. for (i = 0; i < bp->cp_nr_rings; i++) {
  4099. irq = &bp->irq_tbl[i];
  4100. if (irq->requested)
  4101. free_irq(irq->vector, bp->bnapi[i]);
  4102. irq->requested = 0;
  4103. }
  4104. if (bp->flags & BNXT_FLAG_USING_MSIX)
  4105. pci_disable_msix(bp->pdev);
  4106. kfree(bp->irq_tbl);
  4107. bp->irq_tbl = NULL;
  4108. }
  4109. static int bnxt_request_irq(struct bnxt *bp)
  4110. {
  4111. int i, j, rc = 0;
  4112. unsigned long flags = 0;
  4113. #ifdef CONFIG_RFS_ACCEL
  4114. struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
  4115. #endif
  4116. if (!(bp->flags & BNXT_FLAG_USING_MSIX))
  4117. flags = IRQF_SHARED;
  4118. for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
  4119. struct bnxt_irq *irq = &bp->irq_tbl[i];
  4120. #ifdef CONFIG_RFS_ACCEL
  4121. if (rmap && bp->bnapi[i]->rx_ring) {
  4122. rc = irq_cpu_rmap_add(rmap, irq->vector);
  4123. if (rc)
  4124. netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
  4125. j);
  4126. j++;
  4127. }
  4128. #endif
  4129. rc = request_irq(irq->vector, irq->handler, flags, irq->name,
  4130. bp->bnapi[i]);
  4131. if (rc)
  4132. break;
  4133. irq->requested = 1;
  4134. }
  4135. return rc;
  4136. }
  4137. static void bnxt_del_napi(struct bnxt *bp)
  4138. {
  4139. int i;
  4140. if (!bp->bnapi)
  4141. return;
  4142. for (i = 0; i < bp->cp_nr_rings; i++) {
  4143. struct bnxt_napi *bnapi = bp->bnapi[i];
  4144. napi_hash_del(&bnapi->napi);
  4145. netif_napi_del(&bnapi->napi);
  4146. }
  4147. /* We called napi_hash_del() before netif_napi_del(), we need
  4148. * to respect an RCU grace period before freeing napi structures.
  4149. */
  4150. synchronize_net();
  4151. }
  4152. static void bnxt_init_napi(struct bnxt *bp)
  4153. {
  4154. int i;
  4155. unsigned int cp_nr_rings = bp->cp_nr_rings;
  4156. struct bnxt_napi *bnapi;
  4157. if (bp->flags & BNXT_FLAG_USING_MSIX) {
  4158. if (BNXT_CHIP_TYPE_NITRO_A0(bp))
  4159. cp_nr_rings--;
  4160. for (i = 0; i < cp_nr_rings; i++) {
  4161. bnapi = bp->bnapi[i];
  4162. netif_napi_add(bp->dev, &bnapi->napi,
  4163. bnxt_poll, 64);
  4164. }
  4165. if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
  4166. bnapi = bp->bnapi[cp_nr_rings];
  4167. netif_napi_add(bp->dev, &bnapi->napi,
  4168. bnxt_poll_nitroa0, 64);
  4169. napi_hash_add(&bnapi->napi);
  4170. }
  4171. } else {
  4172. bnapi = bp->bnapi[0];
  4173. netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
  4174. }
  4175. }
  4176. static void bnxt_disable_napi(struct bnxt *bp)
  4177. {
  4178. int i;
  4179. if (!bp->bnapi)
  4180. return;
  4181. for (i = 0; i < bp->cp_nr_rings; i++) {
  4182. napi_disable(&bp->bnapi[i]->napi);
  4183. bnxt_disable_poll(bp->bnapi[i]);
  4184. }
  4185. }
  4186. static void bnxt_enable_napi(struct bnxt *bp)
  4187. {
  4188. int i;
  4189. for (i = 0; i < bp->cp_nr_rings; i++) {
  4190. bp->bnapi[i]->in_reset = false;
  4191. bnxt_enable_poll(bp->bnapi[i]);
  4192. napi_enable(&bp->bnapi[i]->napi);
  4193. }
  4194. }
  4195. static void bnxt_tx_disable(struct bnxt *bp)
  4196. {
  4197. int i;
  4198. struct bnxt_tx_ring_info *txr;
  4199. struct netdev_queue *txq;
  4200. if (bp->tx_ring) {
  4201. for (i = 0; i < bp->tx_nr_rings; i++) {
  4202. txr = &bp->tx_ring[i];
  4203. txq = netdev_get_tx_queue(bp->dev, i);
  4204. txr->dev_state = BNXT_DEV_STATE_CLOSING;
  4205. }
  4206. }
  4207. /* Stop all TX queues */
  4208. netif_tx_disable(bp->dev);
  4209. netif_carrier_off(bp->dev);
  4210. }
  4211. static void bnxt_tx_enable(struct bnxt *bp)
  4212. {
  4213. int i;
  4214. struct bnxt_tx_ring_info *txr;
  4215. struct netdev_queue *txq;
  4216. for (i = 0; i < bp->tx_nr_rings; i++) {
  4217. txr = &bp->tx_ring[i];
  4218. txq = netdev_get_tx_queue(bp->dev, i);
  4219. txr->dev_state = 0;
  4220. }
  4221. netif_tx_wake_all_queues(bp->dev);
  4222. if (bp->link_info.link_up)
  4223. netif_carrier_on(bp->dev);
  4224. }
  4225. static void bnxt_report_link(struct bnxt *bp)
  4226. {
  4227. if (bp->link_info.link_up) {
  4228. const char *duplex;
  4229. const char *flow_ctrl;
  4230. u16 speed;
  4231. netif_carrier_on(bp->dev);
  4232. if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
  4233. duplex = "full";
  4234. else
  4235. duplex = "half";
  4236. if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
  4237. flow_ctrl = "ON - receive & transmit";
  4238. else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
  4239. flow_ctrl = "ON - transmit";
  4240. else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
  4241. flow_ctrl = "ON - receive";
  4242. else
  4243. flow_ctrl = "none";
  4244. speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
  4245. netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
  4246. speed, duplex, flow_ctrl);
  4247. if (bp->flags & BNXT_FLAG_EEE_CAP)
  4248. netdev_info(bp->dev, "EEE is %s\n",
  4249. bp->eee.eee_active ? "active" :
  4250. "not active");
  4251. } else {
  4252. netif_carrier_off(bp->dev);
  4253. netdev_err(bp->dev, "NIC Link is Down\n");
  4254. }
  4255. }
  4256. static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
  4257. {
  4258. int rc = 0;
  4259. struct hwrm_port_phy_qcaps_input req = {0};
  4260. struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
  4261. struct bnxt_link_info *link_info = &bp->link_info;
  4262. if (bp->hwrm_spec_code < 0x10201)
  4263. return 0;
  4264. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
  4265. mutex_lock(&bp->hwrm_cmd_lock);
  4266. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4267. if (rc)
  4268. goto hwrm_phy_qcaps_exit;
  4269. if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
  4270. struct ethtool_eee *eee = &bp->eee;
  4271. u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
  4272. bp->flags |= BNXT_FLAG_EEE_CAP;
  4273. eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
  4274. bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
  4275. PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
  4276. bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
  4277. PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
  4278. }
  4279. if (resp->supported_speeds_auto_mode)
  4280. link_info->support_auto_speeds =
  4281. le16_to_cpu(resp->supported_speeds_auto_mode);
  4282. hwrm_phy_qcaps_exit:
  4283. mutex_unlock(&bp->hwrm_cmd_lock);
  4284. return rc;
  4285. }
  4286. static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
  4287. {
  4288. int rc = 0;
  4289. struct bnxt_link_info *link_info = &bp->link_info;
  4290. struct hwrm_port_phy_qcfg_input req = {0};
  4291. struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  4292. u8 link_up = link_info->link_up;
  4293. u16 diff;
  4294. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
  4295. mutex_lock(&bp->hwrm_cmd_lock);
  4296. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4297. if (rc) {
  4298. mutex_unlock(&bp->hwrm_cmd_lock);
  4299. return rc;
  4300. }
  4301. memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
  4302. link_info->phy_link_status = resp->link;
  4303. link_info->duplex = resp->duplex;
  4304. link_info->pause = resp->pause;
  4305. link_info->auto_mode = resp->auto_mode;
  4306. link_info->auto_pause_setting = resp->auto_pause;
  4307. link_info->lp_pause = resp->link_partner_adv_pause;
  4308. link_info->force_pause_setting = resp->force_pause;
  4309. link_info->duplex_setting = resp->duplex;
  4310. if (link_info->phy_link_status == BNXT_LINK_LINK)
  4311. link_info->link_speed = le16_to_cpu(resp->link_speed);
  4312. else
  4313. link_info->link_speed = 0;
  4314. link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
  4315. link_info->support_speeds = le16_to_cpu(resp->support_speeds);
  4316. link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
  4317. link_info->lp_auto_link_speeds =
  4318. le16_to_cpu(resp->link_partner_adv_speeds);
  4319. link_info->preemphasis = le32_to_cpu(resp->preemphasis);
  4320. link_info->phy_ver[0] = resp->phy_maj;
  4321. link_info->phy_ver[1] = resp->phy_min;
  4322. link_info->phy_ver[2] = resp->phy_bld;
  4323. link_info->media_type = resp->media_type;
  4324. link_info->phy_type = resp->phy_type;
  4325. link_info->transceiver = resp->xcvr_pkg_type;
  4326. link_info->phy_addr = resp->eee_config_phy_addr &
  4327. PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
  4328. link_info->module_status = resp->module_status;
  4329. if (bp->flags & BNXT_FLAG_EEE_CAP) {
  4330. struct ethtool_eee *eee = &bp->eee;
  4331. u16 fw_speeds;
  4332. eee->eee_active = 0;
  4333. if (resp->eee_config_phy_addr &
  4334. PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
  4335. eee->eee_active = 1;
  4336. fw_speeds = le16_to_cpu(
  4337. resp->link_partner_adv_eee_link_speed_mask);
  4338. eee->lp_advertised =
  4339. _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
  4340. }
  4341. /* Pull initial EEE config */
  4342. if (!chng_link_state) {
  4343. if (resp->eee_config_phy_addr &
  4344. PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
  4345. eee->eee_enabled = 1;
  4346. fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
  4347. eee->advertised =
  4348. _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
  4349. if (resp->eee_config_phy_addr &
  4350. PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
  4351. __le32 tmr;
  4352. eee->tx_lpi_enabled = 1;
  4353. tmr = resp->xcvr_identifier_type_tx_lpi_timer;
  4354. eee->tx_lpi_timer = le32_to_cpu(tmr) &
  4355. PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
  4356. }
  4357. }
  4358. }
  4359. /* TODO: need to add more logic to report VF link */
  4360. if (chng_link_state) {
  4361. if (link_info->phy_link_status == BNXT_LINK_LINK)
  4362. link_info->link_up = 1;
  4363. else
  4364. link_info->link_up = 0;
  4365. if (link_up != link_info->link_up)
  4366. bnxt_report_link(bp);
  4367. } else {
  4368. /* alwasy link down if not require to update link state */
  4369. link_info->link_up = 0;
  4370. }
  4371. mutex_unlock(&bp->hwrm_cmd_lock);
  4372. if (!BNXT_SINGLE_PF(bp))
  4373. return 0;
  4374. diff = link_info->support_auto_speeds ^ link_info->advertising;
  4375. if ((link_info->support_auto_speeds | diff) !=
  4376. link_info->support_auto_speeds) {
  4377. /* An advertised speed is no longer supported, so we need to
  4378. * update the advertisement settings. Caller holds RTNL
  4379. * so we can modify link settings.
  4380. */
  4381. link_info->advertising = link_info->support_auto_speeds;
  4382. if (link_info->autoneg & BNXT_AUTONEG_SPEED)
  4383. bnxt_hwrm_set_link_setting(bp, true, false);
  4384. }
  4385. return 0;
  4386. }
  4387. static void bnxt_get_port_module_status(struct bnxt *bp)
  4388. {
  4389. struct bnxt_link_info *link_info = &bp->link_info;
  4390. struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
  4391. u8 module_status;
  4392. if (bnxt_update_link(bp, true))
  4393. return;
  4394. module_status = link_info->module_status;
  4395. switch (module_status) {
  4396. case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
  4397. case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
  4398. case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
  4399. netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
  4400. bp->pf.port_id);
  4401. if (bp->hwrm_spec_code >= 0x10201) {
  4402. netdev_warn(bp->dev, "Module part number %s\n",
  4403. resp->phy_vendor_partnumber);
  4404. }
  4405. if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
  4406. netdev_warn(bp->dev, "TX is disabled\n");
  4407. if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
  4408. netdev_warn(bp->dev, "SFP+ module is shutdown\n");
  4409. }
  4410. }
  4411. static void
  4412. bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
  4413. {
  4414. if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
  4415. if (bp->hwrm_spec_code >= 0x10201)
  4416. req->auto_pause =
  4417. PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
  4418. if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
  4419. req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
  4420. if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
  4421. req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
  4422. req->enables |=
  4423. cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
  4424. } else {
  4425. if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
  4426. req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
  4427. if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
  4428. req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
  4429. req->enables |=
  4430. cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
  4431. if (bp->hwrm_spec_code >= 0x10201) {
  4432. req->auto_pause = req->force_pause;
  4433. req->enables |= cpu_to_le32(
  4434. PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
  4435. }
  4436. }
  4437. }
  4438. static void bnxt_hwrm_set_link_common(struct bnxt *bp,
  4439. struct hwrm_port_phy_cfg_input *req)
  4440. {
  4441. u8 autoneg = bp->link_info.autoneg;
  4442. u16 fw_link_speed = bp->link_info.req_link_speed;
  4443. u32 advertising = bp->link_info.advertising;
  4444. if (autoneg & BNXT_AUTONEG_SPEED) {
  4445. req->auto_mode |=
  4446. PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
  4447. req->enables |= cpu_to_le32(
  4448. PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
  4449. req->auto_link_speed_mask = cpu_to_le16(advertising);
  4450. req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
  4451. req->flags |=
  4452. cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
  4453. } else {
  4454. req->force_link_speed = cpu_to_le16(fw_link_speed);
  4455. req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
  4456. }
  4457. /* tell chimp that the setting takes effect immediately */
  4458. req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
  4459. }
  4460. int bnxt_hwrm_set_pause(struct bnxt *bp)
  4461. {
  4462. struct hwrm_port_phy_cfg_input req = {0};
  4463. int rc;
  4464. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
  4465. bnxt_hwrm_set_pause_common(bp, &req);
  4466. if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
  4467. bp->link_info.force_link_chng)
  4468. bnxt_hwrm_set_link_common(bp, &req);
  4469. mutex_lock(&bp->hwrm_cmd_lock);
  4470. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4471. if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
  4472. /* since changing of pause setting doesn't trigger any link
  4473. * change event, the driver needs to update the current pause
  4474. * result upon successfully return of the phy_cfg command
  4475. */
  4476. bp->link_info.pause =
  4477. bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
  4478. bp->link_info.auto_pause_setting = 0;
  4479. if (!bp->link_info.force_link_chng)
  4480. bnxt_report_link(bp);
  4481. }
  4482. bp->link_info.force_link_chng = false;
  4483. mutex_unlock(&bp->hwrm_cmd_lock);
  4484. return rc;
  4485. }
  4486. static void bnxt_hwrm_set_eee(struct bnxt *bp,
  4487. struct hwrm_port_phy_cfg_input *req)
  4488. {
  4489. struct ethtool_eee *eee = &bp->eee;
  4490. if (eee->eee_enabled) {
  4491. u16 eee_speeds;
  4492. u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
  4493. if (eee->tx_lpi_enabled)
  4494. flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
  4495. else
  4496. flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
  4497. req->flags |= cpu_to_le32(flags);
  4498. eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
  4499. req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
  4500. req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
  4501. } else {
  4502. req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
  4503. }
  4504. }
  4505. int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
  4506. {
  4507. struct hwrm_port_phy_cfg_input req = {0};
  4508. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
  4509. if (set_pause)
  4510. bnxt_hwrm_set_pause_common(bp, &req);
  4511. bnxt_hwrm_set_link_common(bp, &req);
  4512. if (set_eee)
  4513. bnxt_hwrm_set_eee(bp, &req);
  4514. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4515. }
  4516. static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
  4517. {
  4518. struct hwrm_port_phy_cfg_input req = {0};
  4519. if (!BNXT_SINGLE_PF(bp))
  4520. return 0;
  4521. if (pci_num_vf(bp->pdev))
  4522. return 0;
  4523. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
  4524. req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN);
  4525. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4526. }
  4527. static bool bnxt_eee_config_ok(struct bnxt *bp)
  4528. {
  4529. struct ethtool_eee *eee = &bp->eee;
  4530. struct bnxt_link_info *link_info = &bp->link_info;
  4531. if (!(bp->flags & BNXT_FLAG_EEE_CAP))
  4532. return true;
  4533. if (eee->eee_enabled) {
  4534. u32 advertising =
  4535. _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
  4536. if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
  4537. eee->eee_enabled = 0;
  4538. return false;
  4539. }
  4540. if (eee->advertised & ~advertising) {
  4541. eee->advertised = advertising & eee->supported;
  4542. return false;
  4543. }
  4544. }
  4545. return true;
  4546. }
  4547. static int bnxt_update_phy_setting(struct bnxt *bp)
  4548. {
  4549. int rc;
  4550. bool update_link = false;
  4551. bool update_pause = false;
  4552. bool update_eee = false;
  4553. struct bnxt_link_info *link_info = &bp->link_info;
  4554. rc = bnxt_update_link(bp, true);
  4555. if (rc) {
  4556. netdev_err(bp->dev, "failed to update link (rc: %x)\n",
  4557. rc);
  4558. return rc;
  4559. }
  4560. if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
  4561. (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
  4562. link_info->req_flow_ctrl)
  4563. update_pause = true;
  4564. if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
  4565. link_info->force_pause_setting != link_info->req_flow_ctrl)
  4566. update_pause = true;
  4567. if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
  4568. if (BNXT_AUTO_MODE(link_info->auto_mode))
  4569. update_link = true;
  4570. if (link_info->req_link_speed != link_info->force_link_speed)
  4571. update_link = true;
  4572. if (link_info->req_duplex != link_info->duplex_setting)
  4573. update_link = true;
  4574. } else {
  4575. if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
  4576. update_link = true;
  4577. if (link_info->advertising != link_info->auto_link_speeds)
  4578. update_link = true;
  4579. }
  4580. if (!bnxt_eee_config_ok(bp))
  4581. update_eee = true;
  4582. if (update_link)
  4583. rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
  4584. else if (update_pause)
  4585. rc = bnxt_hwrm_set_pause(bp);
  4586. if (rc) {
  4587. netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
  4588. rc);
  4589. return rc;
  4590. }
  4591. return rc;
  4592. }
  4593. /* Common routine to pre-map certain register block to different GRC window.
  4594. * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
  4595. * in PF and 3 windows in VF that can be customized to map in different
  4596. * register blocks.
  4597. */
  4598. static void bnxt_preset_reg_win(struct bnxt *bp)
  4599. {
  4600. if (BNXT_PF(bp)) {
  4601. /* CAG registers map to GRC window #4 */
  4602. writel(BNXT_CAG_REG_BASE,
  4603. bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
  4604. }
  4605. }
  4606. static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
  4607. {
  4608. int rc = 0;
  4609. bnxt_preset_reg_win(bp);
  4610. netif_carrier_off(bp->dev);
  4611. if (irq_re_init) {
  4612. rc = bnxt_setup_int_mode(bp);
  4613. if (rc) {
  4614. netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
  4615. rc);
  4616. return rc;
  4617. }
  4618. }
  4619. if ((bp->flags & BNXT_FLAG_RFS) &&
  4620. !(bp->flags & BNXT_FLAG_USING_MSIX)) {
  4621. /* disable RFS if falling back to INTA */
  4622. bp->dev->hw_features &= ~NETIF_F_NTUPLE;
  4623. bp->flags &= ~BNXT_FLAG_RFS;
  4624. }
  4625. rc = bnxt_alloc_mem(bp, irq_re_init);
  4626. if (rc) {
  4627. netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
  4628. goto open_err_free_mem;
  4629. }
  4630. if (irq_re_init) {
  4631. bnxt_init_napi(bp);
  4632. rc = bnxt_request_irq(bp);
  4633. if (rc) {
  4634. netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
  4635. goto open_err;
  4636. }
  4637. }
  4638. bnxt_enable_napi(bp);
  4639. rc = bnxt_init_nic(bp, irq_re_init);
  4640. if (rc) {
  4641. netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
  4642. goto open_err;
  4643. }
  4644. if (link_re_init) {
  4645. rc = bnxt_update_phy_setting(bp);
  4646. if (rc)
  4647. netdev_warn(bp->dev, "failed to update phy settings\n");
  4648. }
  4649. if (irq_re_init)
  4650. udp_tunnel_get_rx_info(bp->dev);
  4651. set_bit(BNXT_STATE_OPEN, &bp->state);
  4652. bnxt_enable_int(bp);
  4653. /* Enable TX queues */
  4654. bnxt_tx_enable(bp);
  4655. mod_timer(&bp->timer, jiffies + bp->current_interval);
  4656. /* Poll link status and check for SFP+ module status */
  4657. bnxt_get_port_module_status(bp);
  4658. return 0;
  4659. open_err:
  4660. bnxt_disable_napi(bp);
  4661. bnxt_del_napi(bp);
  4662. open_err_free_mem:
  4663. bnxt_free_skbs(bp);
  4664. bnxt_free_irq(bp);
  4665. bnxt_free_mem(bp, true);
  4666. return rc;
  4667. }
  4668. /* rtnl_lock held */
  4669. int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
  4670. {
  4671. int rc = 0;
  4672. rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
  4673. if (rc) {
  4674. netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
  4675. dev_close(bp->dev);
  4676. }
  4677. return rc;
  4678. }
  4679. static int bnxt_open(struct net_device *dev)
  4680. {
  4681. struct bnxt *bp = netdev_priv(dev);
  4682. int rc = 0;
  4683. if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) {
  4684. rc = bnxt_hwrm_func_reset(bp);
  4685. if (rc) {
  4686. netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
  4687. rc);
  4688. rc = -EBUSY;
  4689. return rc;
  4690. }
  4691. /* Do func_reset during the 1st PF open only to prevent killing
  4692. * the VFs when the PF is brought down and up.
  4693. */
  4694. if (BNXT_PF(bp))
  4695. set_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
  4696. }
  4697. return __bnxt_open_nic(bp, true, true);
  4698. }
  4699. static void bnxt_disable_int_sync(struct bnxt *bp)
  4700. {
  4701. int i;
  4702. atomic_inc(&bp->intr_sem);
  4703. if (!netif_running(bp->dev))
  4704. return;
  4705. bnxt_disable_int(bp);
  4706. for (i = 0; i < bp->cp_nr_rings; i++)
  4707. synchronize_irq(bp->irq_tbl[i].vector);
  4708. }
  4709. int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
  4710. {
  4711. int rc = 0;
  4712. #ifdef CONFIG_BNXT_SRIOV
  4713. if (bp->sriov_cfg) {
  4714. rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
  4715. !bp->sriov_cfg,
  4716. BNXT_SRIOV_CFG_WAIT_TMO);
  4717. if (rc)
  4718. netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
  4719. }
  4720. #endif
  4721. /* Change device state to avoid TX queue wake up's */
  4722. bnxt_tx_disable(bp);
  4723. clear_bit(BNXT_STATE_OPEN, &bp->state);
  4724. smp_mb__after_atomic();
  4725. while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
  4726. msleep(20);
  4727. /* Flush rings before disabling interrupts */
  4728. bnxt_shutdown_nic(bp, irq_re_init);
  4729. /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
  4730. bnxt_disable_napi(bp);
  4731. bnxt_disable_int_sync(bp);
  4732. del_timer_sync(&bp->timer);
  4733. bnxt_free_skbs(bp);
  4734. if (irq_re_init) {
  4735. bnxt_free_irq(bp);
  4736. bnxt_del_napi(bp);
  4737. }
  4738. bnxt_free_mem(bp, irq_re_init);
  4739. return rc;
  4740. }
  4741. static int bnxt_close(struct net_device *dev)
  4742. {
  4743. struct bnxt *bp = netdev_priv(dev);
  4744. bnxt_close_nic(bp, true, true);
  4745. bnxt_hwrm_shutdown_link(bp);
  4746. return 0;
  4747. }
  4748. /* rtnl_lock held */
  4749. static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  4750. {
  4751. switch (cmd) {
  4752. case SIOCGMIIPHY:
  4753. /* fallthru */
  4754. case SIOCGMIIREG: {
  4755. if (!netif_running(dev))
  4756. return -EAGAIN;
  4757. return 0;
  4758. }
  4759. case SIOCSMIIREG:
  4760. if (!netif_running(dev))
  4761. return -EAGAIN;
  4762. return 0;
  4763. default:
  4764. /* do nothing */
  4765. break;
  4766. }
  4767. return -EOPNOTSUPP;
  4768. }
  4769. static struct rtnl_link_stats64 *
  4770. bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  4771. {
  4772. u32 i;
  4773. struct bnxt *bp = netdev_priv(dev);
  4774. memset(stats, 0, sizeof(struct rtnl_link_stats64));
  4775. if (!bp->bnapi)
  4776. return stats;
  4777. /* TODO check if we need to synchronize with bnxt_close path */
  4778. for (i = 0; i < bp->cp_nr_rings; i++) {
  4779. struct bnxt_napi *bnapi = bp->bnapi[i];
  4780. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  4781. struct ctx_hw_stats *hw_stats = cpr->hw_stats;
  4782. stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
  4783. stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
  4784. stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
  4785. stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
  4786. stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
  4787. stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
  4788. stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
  4789. stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
  4790. stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
  4791. stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
  4792. stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
  4793. stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
  4794. stats->rx_missed_errors +=
  4795. le64_to_cpu(hw_stats->rx_discard_pkts);
  4796. stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
  4797. stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
  4798. }
  4799. if (bp->flags & BNXT_FLAG_PORT_STATS) {
  4800. struct rx_port_stats *rx = bp->hw_rx_port_stats;
  4801. struct tx_port_stats *tx = bp->hw_tx_port_stats;
  4802. stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
  4803. stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
  4804. stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
  4805. le64_to_cpu(rx->rx_ovrsz_frames) +
  4806. le64_to_cpu(rx->rx_runt_frames);
  4807. stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
  4808. le64_to_cpu(rx->rx_jbr_frames);
  4809. stats->collisions = le64_to_cpu(tx->tx_total_collisions);
  4810. stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
  4811. stats->tx_errors = le64_to_cpu(tx->tx_err);
  4812. }
  4813. return stats;
  4814. }
  4815. static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
  4816. {
  4817. struct net_device *dev = bp->dev;
  4818. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  4819. struct netdev_hw_addr *ha;
  4820. u8 *haddr;
  4821. int mc_count = 0;
  4822. bool update = false;
  4823. int off = 0;
  4824. netdev_for_each_mc_addr(ha, dev) {
  4825. if (mc_count >= BNXT_MAX_MC_ADDRS) {
  4826. *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
  4827. vnic->mc_list_count = 0;
  4828. return false;
  4829. }
  4830. haddr = ha->addr;
  4831. if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
  4832. memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
  4833. update = true;
  4834. }
  4835. off += ETH_ALEN;
  4836. mc_count++;
  4837. }
  4838. if (mc_count)
  4839. *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
  4840. if (mc_count != vnic->mc_list_count) {
  4841. vnic->mc_list_count = mc_count;
  4842. update = true;
  4843. }
  4844. return update;
  4845. }
  4846. static bool bnxt_uc_list_updated(struct bnxt *bp)
  4847. {
  4848. struct net_device *dev = bp->dev;
  4849. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  4850. struct netdev_hw_addr *ha;
  4851. int off = 0;
  4852. if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
  4853. return true;
  4854. netdev_for_each_uc_addr(ha, dev) {
  4855. if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
  4856. return true;
  4857. off += ETH_ALEN;
  4858. }
  4859. return false;
  4860. }
  4861. static void bnxt_set_rx_mode(struct net_device *dev)
  4862. {
  4863. struct bnxt *bp = netdev_priv(dev);
  4864. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  4865. u32 mask = vnic->rx_mask;
  4866. bool mc_update = false;
  4867. bool uc_update;
  4868. if (!netif_running(dev))
  4869. return;
  4870. mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
  4871. CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
  4872. CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
  4873. if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
  4874. mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  4875. uc_update = bnxt_uc_list_updated(bp);
  4876. if (dev->flags & IFF_ALLMULTI) {
  4877. mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
  4878. vnic->mc_list_count = 0;
  4879. } else {
  4880. mc_update = bnxt_mc_list_updated(bp, &mask);
  4881. }
  4882. if (mask != vnic->rx_mask || uc_update || mc_update) {
  4883. vnic->rx_mask = mask;
  4884. set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
  4885. schedule_work(&bp->sp_task);
  4886. }
  4887. }
  4888. static int bnxt_cfg_rx_mode(struct bnxt *bp)
  4889. {
  4890. struct net_device *dev = bp->dev;
  4891. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  4892. struct netdev_hw_addr *ha;
  4893. int i, off = 0, rc;
  4894. bool uc_update;
  4895. netif_addr_lock_bh(dev);
  4896. uc_update = bnxt_uc_list_updated(bp);
  4897. netif_addr_unlock_bh(dev);
  4898. if (!uc_update)
  4899. goto skip_uc;
  4900. mutex_lock(&bp->hwrm_cmd_lock);
  4901. for (i = 1; i < vnic->uc_filter_count; i++) {
  4902. struct hwrm_cfa_l2_filter_free_input req = {0};
  4903. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
  4904. -1);
  4905. req.l2_filter_id = vnic->fw_l2_filter_id[i];
  4906. rc = _hwrm_send_message(bp, &req, sizeof(req),
  4907. HWRM_CMD_TIMEOUT);
  4908. }
  4909. mutex_unlock(&bp->hwrm_cmd_lock);
  4910. vnic->uc_filter_count = 1;
  4911. netif_addr_lock_bh(dev);
  4912. if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
  4913. vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  4914. } else {
  4915. netdev_for_each_uc_addr(ha, dev) {
  4916. memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
  4917. off += ETH_ALEN;
  4918. vnic->uc_filter_count++;
  4919. }
  4920. }
  4921. netif_addr_unlock_bh(dev);
  4922. for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
  4923. rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
  4924. if (rc) {
  4925. netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
  4926. rc);
  4927. vnic->uc_filter_count = i;
  4928. return rc;
  4929. }
  4930. }
  4931. skip_uc:
  4932. rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
  4933. if (rc)
  4934. netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
  4935. rc);
  4936. return rc;
  4937. }
  4938. static bool bnxt_rfs_capable(struct bnxt *bp)
  4939. {
  4940. #ifdef CONFIG_RFS_ACCEL
  4941. struct bnxt_pf_info *pf = &bp->pf;
  4942. int vnics;
  4943. if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
  4944. return false;
  4945. vnics = 1 + bp->rx_nr_rings;
  4946. if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) {
  4947. netdev_warn(bp->dev,
  4948. "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
  4949. min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1));
  4950. return false;
  4951. }
  4952. return true;
  4953. #else
  4954. return false;
  4955. #endif
  4956. }
  4957. static netdev_features_t bnxt_fix_features(struct net_device *dev,
  4958. netdev_features_t features)
  4959. {
  4960. struct bnxt *bp = netdev_priv(dev);
  4961. if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
  4962. features &= ~NETIF_F_NTUPLE;
  4963. /* Both CTAG and STAG VLAN accelaration on the RX side have to be
  4964. * turned on or off together.
  4965. */
  4966. if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
  4967. (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
  4968. if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
  4969. features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
  4970. NETIF_F_HW_VLAN_STAG_RX);
  4971. else
  4972. features |= NETIF_F_HW_VLAN_CTAG_RX |
  4973. NETIF_F_HW_VLAN_STAG_RX;
  4974. }
  4975. #ifdef CONFIG_BNXT_SRIOV
  4976. if (BNXT_VF(bp)) {
  4977. if (bp->vf.vlan) {
  4978. features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
  4979. NETIF_F_HW_VLAN_STAG_RX);
  4980. }
  4981. }
  4982. #endif
  4983. return features;
  4984. }
  4985. static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
  4986. {
  4987. struct bnxt *bp = netdev_priv(dev);
  4988. u32 flags = bp->flags;
  4989. u32 changes;
  4990. int rc = 0;
  4991. bool re_init = false;
  4992. bool update_tpa = false;
  4993. flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
  4994. if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
  4995. flags |= BNXT_FLAG_GRO;
  4996. if (features & NETIF_F_LRO)
  4997. flags |= BNXT_FLAG_LRO;
  4998. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  4999. flags |= BNXT_FLAG_STRIP_VLAN;
  5000. if (features & NETIF_F_NTUPLE)
  5001. flags |= BNXT_FLAG_RFS;
  5002. changes = flags ^ bp->flags;
  5003. if (changes & BNXT_FLAG_TPA) {
  5004. update_tpa = true;
  5005. if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
  5006. (flags & BNXT_FLAG_TPA) == 0)
  5007. re_init = true;
  5008. }
  5009. if (changes & ~BNXT_FLAG_TPA)
  5010. re_init = true;
  5011. if (flags != bp->flags) {
  5012. u32 old_flags = bp->flags;
  5013. bp->flags = flags;
  5014. if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  5015. if (update_tpa)
  5016. bnxt_set_ring_params(bp);
  5017. return rc;
  5018. }
  5019. if (re_init) {
  5020. bnxt_close_nic(bp, false, false);
  5021. if (update_tpa)
  5022. bnxt_set_ring_params(bp);
  5023. return bnxt_open_nic(bp, false, false);
  5024. }
  5025. if (update_tpa) {
  5026. rc = bnxt_set_tpa(bp,
  5027. (flags & BNXT_FLAG_TPA) ?
  5028. true : false);
  5029. if (rc)
  5030. bp->flags = old_flags;
  5031. }
  5032. }
  5033. return rc;
  5034. }
  5035. static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
  5036. {
  5037. struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
  5038. int i = bnapi->index;
  5039. if (!txr)
  5040. return;
  5041. netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
  5042. i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
  5043. txr->tx_cons);
  5044. }
  5045. static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
  5046. {
  5047. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  5048. int i = bnapi->index;
  5049. if (!rxr)
  5050. return;
  5051. netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
  5052. i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
  5053. rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
  5054. rxr->rx_sw_agg_prod);
  5055. }
  5056. static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
  5057. {
  5058. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  5059. int i = bnapi->index;
  5060. netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
  5061. i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
  5062. }
  5063. static void bnxt_dbg_dump_states(struct bnxt *bp)
  5064. {
  5065. int i;
  5066. struct bnxt_napi *bnapi;
  5067. for (i = 0; i < bp->cp_nr_rings; i++) {
  5068. bnapi = bp->bnapi[i];
  5069. if (netif_msg_drv(bp)) {
  5070. bnxt_dump_tx_sw_state(bnapi);
  5071. bnxt_dump_rx_sw_state(bnapi);
  5072. bnxt_dump_cp_sw_state(bnapi);
  5073. }
  5074. }
  5075. }
  5076. static void bnxt_reset_task(struct bnxt *bp, bool silent)
  5077. {
  5078. if (!silent)
  5079. bnxt_dbg_dump_states(bp);
  5080. if (netif_running(bp->dev)) {
  5081. bnxt_close_nic(bp, false, false);
  5082. bnxt_open_nic(bp, false, false);
  5083. }
  5084. }
  5085. static void bnxt_tx_timeout(struct net_device *dev)
  5086. {
  5087. struct bnxt *bp = netdev_priv(dev);
  5088. netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
  5089. set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
  5090. schedule_work(&bp->sp_task);
  5091. }
  5092. #ifdef CONFIG_NET_POLL_CONTROLLER
  5093. static void bnxt_poll_controller(struct net_device *dev)
  5094. {
  5095. struct bnxt *bp = netdev_priv(dev);
  5096. int i;
  5097. for (i = 0; i < bp->cp_nr_rings; i++) {
  5098. struct bnxt_irq *irq = &bp->irq_tbl[i];
  5099. disable_irq(irq->vector);
  5100. irq->handler(irq->vector, bp->bnapi[i]);
  5101. enable_irq(irq->vector);
  5102. }
  5103. }
  5104. #endif
  5105. static void bnxt_timer(unsigned long data)
  5106. {
  5107. struct bnxt *bp = (struct bnxt *)data;
  5108. struct net_device *dev = bp->dev;
  5109. if (!netif_running(dev))
  5110. return;
  5111. if (atomic_read(&bp->intr_sem) != 0)
  5112. goto bnxt_restart_timer;
  5113. if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
  5114. set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
  5115. schedule_work(&bp->sp_task);
  5116. }
  5117. bnxt_restart_timer:
  5118. mod_timer(&bp->timer, jiffies + bp->current_interval);
  5119. }
  5120. static void bnxt_rtnl_lock_sp(struct bnxt *bp)
  5121. {
  5122. /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
  5123. * set. If the device is being closed, bnxt_close() may be holding
  5124. * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
  5125. * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
  5126. */
  5127. clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  5128. rtnl_lock();
  5129. }
  5130. static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
  5131. {
  5132. set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  5133. rtnl_unlock();
  5134. }
  5135. /* Only called from bnxt_sp_task() */
  5136. static void bnxt_reset(struct bnxt *bp, bool silent)
  5137. {
  5138. bnxt_rtnl_lock_sp(bp);
  5139. if (test_bit(BNXT_STATE_OPEN, &bp->state))
  5140. bnxt_reset_task(bp, silent);
  5141. bnxt_rtnl_unlock_sp(bp);
  5142. }
  5143. static void bnxt_cfg_ntp_filters(struct bnxt *);
  5144. static void bnxt_sp_task(struct work_struct *work)
  5145. {
  5146. struct bnxt *bp = container_of(work, struct bnxt, sp_task);
  5147. set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  5148. smp_mb__after_atomic();
  5149. if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  5150. clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  5151. return;
  5152. }
  5153. if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
  5154. bnxt_cfg_rx_mode(bp);
  5155. if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
  5156. bnxt_cfg_ntp_filters(bp);
  5157. if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
  5158. bnxt_hwrm_exec_fwd_req(bp);
  5159. if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
  5160. bnxt_hwrm_tunnel_dst_port_alloc(
  5161. bp, bp->vxlan_port,
  5162. TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
  5163. }
  5164. if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
  5165. bnxt_hwrm_tunnel_dst_port_free(
  5166. bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
  5167. }
  5168. if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
  5169. bnxt_hwrm_tunnel_dst_port_alloc(
  5170. bp, bp->nge_port,
  5171. TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
  5172. }
  5173. if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
  5174. bnxt_hwrm_tunnel_dst_port_free(
  5175. bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
  5176. }
  5177. if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
  5178. bnxt_hwrm_port_qstats(bp);
  5179. /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
  5180. * must be the last functions to be called before exiting.
  5181. */
  5182. if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
  5183. int rc = 0;
  5184. if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
  5185. &bp->sp_event))
  5186. bnxt_hwrm_phy_qcaps(bp);
  5187. bnxt_rtnl_lock_sp(bp);
  5188. if (test_bit(BNXT_STATE_OPEN, &bp->state))
  5189. rc = bnxt_update_link(bp, true);
  5190. bnxt_rtnl_unlock_sp(bp);
  5191. if (rc)
  5192. netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
  5193. rc);
  5194. }
  5195. if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
  5196. bnxt_rtnl_lock_sp(bp);
  5197. if (test_bit(BNXT_STATE_OPEN, &bp->state))
  5198. bnxt_get_port_module_status(bp);
  5199. bnxt_rtnl_unlock_sp(bp);
  5200. }
  5201. if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
  5202. bnxt_reset(bp, false);
  5203. if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
  5204. bnxt_reset(bp, true);
  5205. smp_mb__before_atomic();
  5206. clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  5207. }
  5208. static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
  5209. {
  5210. int rc;
  5211. struct bnxt *bp = netdev_priv(dev);
  5212. SET_NETDEV_DEV(dev, &pdev->dev);
  5213. /* enable device (incl. PCI PM wakeup), and bus-mastering */
  5214. rc = pci_enable_device(pdev);
  5215. if (rc) {
  5216. dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
  5217. goto init_err;
  5218. }
  5219. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  5220. dev_err(&pdev->dev,
  5221. "Cannot find PCI device base address, aborting\n");
  5222. rc = -ENODEV;
  5223. goto init_err_disable;
  5224. }
  5225. rc = pci_request_regions(pdev, DRV_MODULE_NAME);
  5226. if (rc) {
  5227. dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
  5228. goto init_err_disable;
  5229. }
  5230. if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
  5231. dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
  5232. dev_err(&pdev->dev, "System does not support DMA, aborting\n");
  5233. goto init_err_disable;
  5234. }
  5235. pci_set_master(pdev);
  5236. bp->dev = dev;
  5237. bp->pdev = pdev;
  5238. bp->bar0 = pci_ioremap_bar(pdev, 0);
  5239. if (!bp->bar0) {
  5240. dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
  5241. rc = -ENOMEM;
  5242. goto init_err_release;
  5243. }
  5244. bp->bar1 = pci_ioremap_bar(pdev, 2);
  5245. if (!bp->bar1) {
  5246. dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
  5247. rc = -ENOMEM;
  5248. goto init_err_release;
  5249. }
  5250. bp->bar2 = pci_ioremap_bar(pdev, 4);
  5251. if (!bp->bar2) {
  5252. dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
  5253. rc = -ENOMEM;
  5254. goto init_err_release;
  5255. }
  5256. pci_enable_pcie_error_reporting(pdev);
  5257. INIT_WORK(&bp->sp_task, bnxt_sp_task);
  5258. spin_lock_init(&bp->ntp_fltr_lock);
  5259. bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
  5260. bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
  5261. /* tick values in micro seconds */
  5262. bp->rx_coal_ticks = 12;
  5263. bp->rx_coal_bufs = 30;
  5264. bp->rx_coal_ticks_irq = 1;
  5265. bp->rx_coal_bufs_irq = 2;
  5266. bp->tx_coal_ticks = 25;
  5267. bp->tx_coal_bufs = 30;
  5268. bp->tx_coal_ticks_irq = 2;
  5269. bp->tx_coal_bufs_irq = 2;
  5270. bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
  5271. init_timer(&bp->timer);
  5272. bp->timer.data = (unsigned long)bp;
  5273. bp->timer.function = bnxt_timer;
  5274. bp->current_interval = BNXT_TIMER_INTERVAL;
  5275. clear_bit(BNXT_STATE_OPEN, &bp->state);
  5276. return 0;
  5277. init_err_release:
  5278. if (bp->bar2) {
  5279. pci_iounmap(pdev, bp->bar2);
  5280. bp->bar2 = NULL;
  5281. }
  5282. if (bp->bar1) {
  5283. pci_iounmap(pdev, bp->bar1);
  5284. bp->bar1 = NULL;
  5285. }
  5286. if (bp->bar0) {
  5287. pci_iounmap(pdev, bp->bar0);
  5288. bp->bar0 = NULL;
  5289. }
  5290. pci_release_regions(pdev);
  5291. init_err_disable:
  5292. pci_disable_device(pdev);
  5293. init_err:
  5294. return rc;
  5295. }
  5296. /* rtnl_lock held */
  5297. static int bnxt_change_mac_addr(struct net_device *dev, void *p)
  5298. {
  5299. struct sockaddr *addr = p;
  5300. struct bnxt *bp = netdev_priv(dev);
  5301. int rc = 0;
  5302. if (!is_valid_ether_addr(addr->sa_data))
  5303. return -EADDRNOTAVAIL;
  5304. rc = bnxt_approve_mac(bp, addr->sa_data);
  5305. if (rc)
  5306. return rc;
  5307. if (ether_addr_equal(addr->sa_data, dev->dev_addr))
  5308. return 0;
  5309. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  5310. if (netif_running(dev)) {
  5311. bnxt_close_nic(bp, false, false);
  5312. rc = bnxt_open_nic(bp, false, false);
  5313. }
  5314. return rc;
  5315. }
  5316. /* rtnl_lock held */
  5317. static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
  5318. {
  5319. struct bnxt *bp = netdev_priv(dev);
  5320. if (new_mtu < 60 || new_mtu > 9500)
  5321. return -EINVAL;
  5322. if (netif_running(dev))
  5323. bnxt_close_nic(bp, false, false);
  5324. dev->mtu = new_mtu;
  5325. bnxt_set_ring_params(bp);
  5326. if (netif_running(dev))
  5327. return bnxt_open_nic(bp, false, false);
  5328. return 0;
  5329. }
  5330. static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
  5331. struct tc_to_netdev *ntc)
  5332. {
  5333. struct bnxt *bp = netdev_priv(dev);
  5334. bool sh = false;
  5335. u8 tc;
  5336. if (ntc->type != TC_SETUP_MQPRIO)
  5337. return -EINVAL;
  5338. tc = ntc->tc;
  5339. if (tc > bp->max_tc) {
  5340. netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
  5341. tc, bp->max_tc);
  5342. return -EINVAL;
  5343. }
  5344. if (netdev_get_num_tc(dev) == tc)
  5345. return 0;
  5346. if (bp->flags & BNXT_FLAG_SHARED_RINGS)
  5347. sh = true;
  5348. if (tc) {
  5349. int max_rx_rings, max_tx_rings, rc;
  5350. rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
  5351. if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
  5352. return -ENOMEM;
  5353. }
  5354. /* Needs to close the device and do hw resource re-allocations */
  5355. if (netif_running(bp->dev))
  5356. bnxt_close_nic(bp, true, false);
  5357. if (tc) {
  5358. bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
  5359. netdev_set_num_tc(dev, tc);
  5360. } else {
  5361. bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
  5362. netdev_reset_tc(dev);
  5363. }
  5364. bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
  5365. bp->tx_nr_rings + bp->rx_nr_rings;
  5366. bp->num_stat_ctxs = bp->cp_nr_rings;
  5367. if (netif_running(bp->dev))
  5368. return bnxt_open_nic(bp, true, false);
  5369. return 0;
  5370. }
  5371. #ifdef CONFIG_RFS_ACCEL
  5372. static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
  5373. struct bnxt_ntuple_filter *f2)
  5374. {
  5375. struct flow_keys *keys1 = &f1->fkeys;
  5376. struct flow_keys *keys2 = &f2->fkeys;
  5377. if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
  5378. keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
  5379. keys1->ports.ports == keys2->ports.ports &&
  5380. keys1->basic.ip_proto == keys2->basic.ip_proto &&
  5381. keys1->basic.n_proto == keys2->basic.n_proto &&
  5382. ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
  5383. ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
  5384. return true;
  5385. return false;
  5386. }
  5387. static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
  5388. u16 rxq_index, u32 flow_id)
  5389. {
  5390. struct bnxt *bp = netdev_priv(dev);
  5391. struct bnxt_ntuple_filter *fltr, *new_fltr;
  5392. struct flow_keys *fkeys;
  5393. struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
  5394. int rc = 0, idx, bit_id, l2_idx = 0;
  5395. struct hlist_head *head;
  5396. if (skb->encapsulation)
  5397. return -EPROTONOSUPPORT;
  5398. if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
  5399. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  5400. int off = 0, j;
  5401. netif_addr_lock_bh(dev);
  5402. for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
  5403. if (ether_addr_equal(eth->h_dest,
  5404. vnic->uc_list + off)) {
  5405. l2_idx = j + 1;
  5406. break;
  5407. }
  5408. }
  5409. netif_addr_unlock_bh(dev);
  5410. if (!l2_idx)
  5411. return -EINVAL;
  5412. }
  5413. new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
  5414. if (!new_fltr)
  5415. return -ENOMEM;
  5416. fkeys = &new_fltr->fkeys;
  5417. if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
  5418. rc = -EPROTONOSUPPORT;
  5419. goto err_free;
  5420. }
  5421. if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
  5422. ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
  5423. (fkeys->basic.ip_proto != IPPROTO_UDP))) {
  5424. rc = -EPROTONOSUPPORT;
  5425. goto err_free;
  5426. }
  5427. memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
  5428. memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
  5429. idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
  5430. head = &bp->ntp_fltr_hash_tbl[idx];
  5431. rcu_read_lock();
  5432. hlist_for_each_entry_rcu(fltr, head, hash) {
  5433. if (bnxt_fltr_match(fltr, new_fltr)) {
  5434. rcu_read_unlock();
  5435. rc = 0;
  5436. goto err_free;
  5437. }
  5438. }
  5439. rcu_read_unlock();
  5440. spin_lock_bh(&bp->ntp_fltr_lock);
  5441. bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
  5442. BNXT_NTP_FLTR_MAX_FLTR, 0);
  5443. if (bit_id < 0) {
  5444. spin_unlock_bh(&bp->ntp_fltr_lock);
  5445. rc = -ENOMEM;
  5446. goto err_free;
  5447. }
  5448. new_fltr->sw_id = (u16)bit_id;
  5449. new_fltr->flow_id = flow_id;
  5450. new_fltr->l2_fltr_idx = l2_idx;
  5451. new_fltr->rxq = rxq_index;
  5452. hlist_add_head_rcu(&new_fltr->hash, head);
  5453. bp->ntp_fltr_count++;
  5454. spin_unlock_bh(&bp->ntp_fltr_lock);
  5455. set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
  5456. schedule_work(&bp->sp_task);
  5457. return new_fltr->sw_id;
  5458. err_free:
  5459. kfree(new_fltr);
  5460. return rc;
  5461. }
  5462. static void bnxt_cfg_ntp_filters(struct bnxt *bp)
  5463. {
  5464. int i;
  5465. for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
  5466. struct hlist_head *head;
  5467. struct hlist_node *tmp;
  5468. struct bnxt_ntuple_filter *fltr;
  5469. int rc;
  5470. head = &bp->ntp_fltr_hash_tbl[i];
  5471. hlist_for_each_entry_safe(fltr, tmp, head, hash) {
  5472. bool del = false;
  5473. if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
  5474. if (rps_may_expire_flow(bp->dev, fltr->rxq,
  5475. fltr->flow_id,
  5476. fltr->sw_id)) {
  5477. bnxt_hwrm_cfa_ntuple_filter_free(bp,
  5478. fltr);
  5479. del = true;
  5480. }
  5481. } else {
  5482. rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
  5483. fltr);
  5484. if (rc)
  5485. del = true;
  5486. else
  5487. set_bit(BNXT_FLTR_VALID, &fltr->state);
  5488. }
  5489. if (del) {
  5490. spin_lock_bh(&bp->ntp_fltr_lock);
  5491. hlist_del_rcu(&fltr->hash);
  5492. bp->ntp_fltr_count--;
  5493. spin_unlock_bh(&bp->ntp_fltr_lock);
  5494. synchronize_rcu();
  5495. clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
  5496. kfree(fltr);
  5497. }
  5498. }
  5499. }
  5500. if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
  5501. netdev_info(bp->dev, "Receive PF driver unload event!");
  5502. }
  5503. #else
  5504. static void bnxt_cfg_ntp_filters(struct bnxt *bp)
  5505. {
  5506. }
  5507. #endif /* CONFIG_RFS_ACCEL */
  5508. static void bnxt_udp_tunnel_add(struct net_device *dev,
  5509. struct udp_tunnel_info *ti)
  5510. {
  5511. struct bnxt *bp = netdev_priv(dev);
  5512. if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
  5513. return;
  5514. if (!netif_running(dev))
  5515. return;
  5516. switch (ti->type) {
  5517. case UDP_TUNNEL_TYPE_VXLAN:
  5518. if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
  5519. return;
  5520. bp->vxlan_port_cnt++;
  5521. if (bp->vxlan_port_cnt == 1) {
  5522. bp->vxlan_port = ti->port;
  5523. set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
  5524. schedule_work(&bp->sp_task);
  5525. }
  5526. break;
  5527. case UDP_TUNNEL_TYPE_GENEVE:
  5528. if (bp->nge_port_cnt && bp->nge_port != ti->port)
  5529. return;
  5530. bp->nge_port_cnt++;
  5531. if (bp->nge_port_cnt == 1) {
  5532. bp->nge_port = ti->port;
  5533. set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
  5534. }
  5535. break;
  5536. default:
  5537. return;
  5538. }
  5539. schedule_work(&bp->sp_task);
  5540. }
  5541. static void bnxt_udp_tunnel_del(struct net_device *dev,
  5542. struct udp_tunnel_info *ti)
  5543. {
  5544. struct bnxt *bp = netdev_priv(dev);
  5545. if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
  5546. return;
  5547. if (!netif_running(dev))
  5548. return;
  5549. switch (ti->type) {
  5550. case UDP_TUNNEL_TYPE_VXLAN:
  5551. if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
  5552. return;
  5553. bp->vxlan_port_cnt--;
  5554. if (bp->vxlan_port_cnt != 0)
  5555. return;
  5556. set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
  5557. break;
  5558. case UDP_TUNNEL_TYPE_GENEVE:
  5559. if (!bp->nge_port_cnt || bp->nge_port != ti->port)
  5560. return;
  5561. bp->nge_port_cnt--;
  5562. if (bp->nge_port_cnt != 0)
  5563. return;
  5564. set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
  5565. break;
  5566. default:
  5567. return;
  5568. }
  5569. schedule_work(&bp->sp_task);
  5570. }
  5571. static const struct net_device_ops bnxt_netdev_ops = {
  5572. .ndo_open = bnxt_open,
  5573. .ndo_start_xmit = bnxt_start_xmit,
  5574. .ndo_stop = bnxt_close,
  5575. .ndo_get_stats64 = bnxt_get_stats64,
  5576. .ndo_set_rx_mode = bnxt_set_rx_mode,
  5577. .ndo_do_ioctl = bnxt_ioctl,
  5578. .ndo_validate_addr = eth_validate_addr,
  5579. .ndo_set_mac_address = bnxt_change_mac_addr,
  5580. .ndo_change_mtu = bnxt_change_mtu,
  5581. .ndo_fix_features = bnxt_fix_features,
  5582. .ndo_set_features = bnxt_set_features,
  5583. .ndo_tx_timeout = bnxt_tx_timeout,
  5584. #ifdef CONFIG_BNXT_SRIOV
  5585. .ndo_get_vf_config = bnxt_get_vf_config,
  5586. .ndo_set_vf_mac = bnxt_set_vf_mac,
  5587. .ndo_set_vf_vlan = bnxt_set_vf_vlan,
  5588. .ndo_set_vf_rate = bnxt_set_vf_bw,
  5589. .ndo_set_vf_link_state = bnxt_set_vf_link_state,
  5590. .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
  5591. #endif
  5592. #ifdef CONFIG_NET_POLL_CONTROLLER
  5593. .ndo_poll_controller = bnxt_poll_controller,
  5594. #endif
  5595. .ndo_setup_tc = bnxt_setup_tc,
  5596. #ifdef CONFIG_RFS_ACCEL
  5597. .ndo_rx_flow_steer = bnxt_rx_flow_steer,
  5598. #endif
  5599. .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
  5600. .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
  5601. #ifdef CONFIG_NET_RX_BUSY_POLL
  5602. .ndo_busy_poll = bnxt_busy_poll,
  5603. #endif
  5604. };
  5605. static void bnxt_remove_one(struct pci_dev *pdev)
  5606. {
  5607. struct net_device *dev = pci_get_drvdata(pdev);
  5608. struct bnxt *bp = netdev_priv(dev);
  5609. if (BNXT_PF(bp))
  5610. bnxt_sriov_disable(bp);
  5611. pci_disable_pcie_error_reporting(pdev);
  5612. unregister_netdev(dev);
  5613. cancel_work_sync(&bp->sp_task);
  5614. bp->sp_event = 0;
  5615. bnxt_hwrm_func_drv_unrgtr(bp);
  5616. bnxt_free_hwrm_resources(bp);
  5617. pci_iounmap(pdev, bp->bar2);
  5618. pci_iounmap(pdev, bp->bar1);
  5619. pci_iounmap(pdev, bp->bar0);
  5620. free_netdev(dev);
  5621. pci_release_regions(pdev);
  5622. pci_disable_device(pdev);
  5623. }
  5624. static int bnxt_probe_phy(struct bnxt *bp)
  5625. {
  5626. int rc = 0;
  5627. struct bnxt_link_info *link_info = &bp->link_info;
  5628. rc = bnxt_hwrm_phy_qcaps(bp);
  5629. if (rc) {
  5630. netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
  5631. rc);
  5632. return rc;
  5633. }
  5634. rc = bnxt_update_link(bp, false);
  5635. if (rc) {
  5636. netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
  5637. rc);
  5638. return rc;
  5639. }
  5640. /* Older firmware does not have supported_auto_speeds, so assume
  5641. * that all supported speeds can be autonegotiated.
  5642. */
  5643. if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
  5644. link_info->support_auto_speeds = link_info->support_speeds;
  5645. /*initialize the ethool setting copy with NVM settings */
  5646. if (BNXT_AUTO_MODE(link_info->auto_mode)) {
  5647. link_info->autoneg = BNXT_AUTONEG_SPEED;
  5648. if (bp->hwrm_spec_code >= 0x10201) {
  5649. if (link_info->auto_pause_setting &
  5650. PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
  5651. link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
  5652. } else {
  5653. link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
  5654. }
  5655. link_info->advertising = link_info->auto_link_speeds;
  5656. } else {
  5657. link_info->req_link_speed = link_info->force_link_speed;
  5658. link_info->req_duplex = link_info->duplex_setting;
  5659. }
  5660. if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
  5661. link_info->req_flow_ctrl =
  5662. link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
  5663. else
  5664. link_info->req_flow_ctrl = link_info->force_pause_setting;
  5665. return rc;
  5666. }
  5667. static int bnxt_get_max_irq(struct pci_dev *pdev)
  5668. {
  5669. u16 ctrl;
  5670. if (!pdev->msix_cap)
  5671. return 1;
  5672. pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
  5673. return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
  5674. }
  5675. static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
  5676. int *max_cp)
  5677. {
  5678. int max_ring_grps = 0;
  5679. #ifdef CONFIG_BNXT_SRIOV
  5680. if (!BNXT_PF(bp)) {
  5681. *max_tx = bp->vf.max_tx_rings;
  5682. *max_rx = bp->vf.max_rx_rings;
  5683. *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
  5684. *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
  5685. max_ring_grps = bp->vf.max_hw_ring_grps;
  5686. } else
  5687. #endif
  5688. {
  5689. *max_tx = bp->pf.max_tx_rings;
  5690. *max_rx = bp->pf.max_rx_rings;
  5691. *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
  5692. *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
  5693. max_ring_grps = bp->pf.max_hw_ring_grps;
  5694. }
  5695. if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
  5696. *max_cp -= 1;
  5697. *max_rx -= 2;
  5698. }
  5699. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  5700. *max_rx >>= 1;
  5701. *max_rx = min_t(int, *max_rx, max_ring_grps);
  5702. }
  5703. int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
  5704. {
  5705. int rx, tx, cp;
  5706. _bnxt_get_max_rings(bp, &rx, &tx, &cp);
  5707. if (!rx || !tx || !cp)
  5708. return -ENOMEM;
  5709. *max_rx = rx;
  5710. *max_tx = tx;
  5711. return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
  5712. }
  5713. static int bnxt_set_dflt_rings(struct bnxt *bp)
  5714. {
  5715. int dflt_rings, max_rx_rings, max_tx_rings, rc;
  5716. bool sh = true;
  5717. if (sh)
  5718. bp->flags |= BNXT_FLAG_SHARED_RINGS;
  5719. dflt_rings = netif_get_num_default_rss_queues();
  5720. rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
  5721. if (rc)
  5722. return rc;
  5723. bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
  5724. bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
  5725. bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
  5726. bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
  5727. bp->tx_nr_rings + bp->rx_nr_rings;
  5728. bp->num_stat_ctxs = bp->cp_nr_rings;
  5729. if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
  5730. bp->rx_nr_rings++;
  5731. bp->cp_nr_rings++;
  5732. }
  5733. return rc;
  5734. }
  5735. static void bnxt_parse_log_pcie_link(struct bnxt *bp)
  5736. {
  5737. enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
  5738. enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
  5739. if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
  5740. speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
  5741. netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
  5742. else
  5743. netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
  5744. speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
  5745. speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
  5746. speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
  5747. "Unknown", width);
  5748. }
  5749. static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  5750. {
  5751. static int version_printed;
  5752. struct net_device *dev;
  5753. struct bnxt *bp;
  5754. int rc, max_irqs;
  5755. if (pdev->device == 0x16cd && pci_is_bridge(pdev))
  5756. return -ENODEV;
  5757. if (version_printed++ == 0)
  5758. pr_info("%s", version);
  5759. max_irqs = bnxt_get_max_irq(pdev);
  5760. dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
  5761. if (!dev)
  5762. return -ENOMEM;
  5763. bp = netdev_priv(dev);
  5764. if (bnxt_vf_pciid(ent->driver_data))
  5765. bp->flags |= BNXT_FLAG_VF;
  5766. if (pdev->msix_cap)
  5767. bp->flags |= BNXT_FLAG_MSIX_CAP;
  5768. rc = bnxt_init_board(pdev, dev);
  5769. if (rc < 0)
  5770. goto init_err_free;
  5771. dev->netdev_ops = &bnxt_netdev_ops;
  5772. dev->watchdog_timeo = BNXT_TX_TIMEOUT;
  5773. dev->ethtool_ops = &bnxt_ethtool_ops;
  5774. pci_set_drvdata(pdev, dev);
  5775. rc = bnxt_alloc_hwrm_resources(bp);
  5776. if (rc)
  5777. goto init_err;
  5778. mutex_init(&bp->hwrm_cmd_lock);
  5779. rc = bnxt_hwrm_ver_get(bp);
  5780. if (rc)
  5781. goto init_err;
  5782. bnxt_hwrm_fw_set_time(bp);
  5783. dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
  5784. NETIF_F_TSO | NETIF_F_TSO6 |
  5785. NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
  5786. NETIF_F_GSO_IPXIP4 |
  5787. NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
  5788. NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
  5789. NETIF_F_RXCSUM | NETIF_F_GRO;
  5790. if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
  5791. dev->hw_features |= NETIF_F_LRO;
  5792. dev->hw_enc_features =
  5793. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
  5794. NETIF_F_TSO | NETIF_F_TSO6 |
  5795. NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
  5796. NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
  5797. NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
  5798. dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
  5799. NETIF_F_GSO_GRE_CSUM;
  5800. dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
  5801. dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
  5802. NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
  5803. dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
  5804. dev->priv_flags |= IFF_UNICAST_FLT;
  5805. #ifdef CONFIG_BNXT_SRIOV
  5806. init_waitqueue_head(&bp->sriov_cfg_wait);
  5807. #endif
  5808. bp->gro_func = bnxt_gro_func_5730x;
  5809. if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
  5810. bp->gro_func = bnxt_gro_func_5731x;
  5811. rc = bnxt_hwrm_func_drv_rgtr(bp);
  5812. if (rc)
  5813. goto init_err;
  5814. /* Get the MAX capabilities for this function */
  5815. rc = bnxt_hwrm_func_qcaps(bp);
  5816. if (rc) {
  5817. netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
  5818. rc);
  5819. rc = -1;
  5820. goto init_err;
  5821. }
  5822. rc = bnxt_hwrm_queue_qportcfg(bp);
  5823. if (rc) {
  5824. netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
  5825. rc);
  5826. rc = -1;
  5827. goto init_err;
  5828. }
  5829. bnxt_hwrm_func_qcfg(bp);
  5830. bnxt_set_tpa_flags(bp);
  5831. bnxt_set_ring_params(bp);
  5832. if (BNXT_PF(bp))
  5833. bp->pf.max_irqs = max_irqs;
  5834. #if defined(CONFIG_BNXT_SRIOV)
  5835. else
  5836. bp->vf.max_irqs = max_irqs;
  5837. #endif
  5838. bnxt_set_dflt_rings(bp);
  5839. if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) {
  5840. dev->hw_features |= NETIF_F_NTUPLE;
  5841. if (bnxt_rfs_capable(bp)) {
  5842. bp->flags |= BNXT_FLAG_RFS;
  5843. dev->features |= NETIF_F_NTUPLE;
  5844. }
  5845. }
  5846. if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
  5847. bp->flags |= BNXT_FLAG_STRIP_VLAN;
  5848. rc = bnxt_probe_phy(bp);
  5849. if (rc)
  5850. goto init_err;
  5851. rc = register_netdev(dev);
  5852. if (rc)
  5853. goto init_err;
  5854. netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
  5855. board_info[ent->driver_data].name,
  5856. (long)pci_resource_start(pdev, 0), dev->dev_addr);
  5857. bnxt_parse_log_pcie_link(bp);
  5858. return 0;
  5859. init_err:
  5860. pci_iounmap(pdev, bp->bar0);
  5861. pci_release_regions(pdev);
  5862. pci_disable_device(pdev);
  5863. init_err_free:
  5864. free_netdev(dev);
  5865. return rc;
  5866. }
  5867. /**
  5868. * bnxt_io_error_detected - called when PCI error is detected
  5869. * @pdev: Pointer to PCI device
  5870. * @state: The current pci connection state
  5871. *
  5872. * This function is called after a PCI bus error affecting
  5873. * this device has been detected.
  5874. */
  5875. static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
  5876. pci_channel_state_t state)
  5877. {
  5878. struct net_device *netdev = pci_get_drvdata(pdev);
  5879. struct bnxt *bp = netdev_priv(netdev);
  5880. netdev_info(netdev, "PCI I/O error detected\n");
  5881. rtnl_lock();
  5882. netif_device_detach(netdev);
  5883. if (state == pci_channel_io_perm_failure) {
  5884. rtnl_unlock();
  5885. return PCI_ERS_RESULT_DISCONNECT;
  5886. }
  5887. if (netif_running(netdev))
  5888. bnxt_close(netdev);
  5889. /* So that func_reset will be done during slot_reset */
  5890. clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
  5891. pci_disable_device(pdev);
  5892. rtnl_unlock();
  5893. /* Request a slot slot reset. */
  5894. return PCI_ERS_RESULT_NEED_RESET;
  5895. }
  5896. /**
  5897. * bnxt_io_slot_reset - called after the pci bus has been reset.
  5898. * @pdev: Pointer to PCI device
  5899. *
  5900. * Restart the card from scratch, as if from a cold-boot.
  5901. * At this point, the card has exprienced a hard reset,
  5902. * followed by fixups by BIOS, and has its config space
  5903. * set up identically to what it was at cold boot.
  5904. */
  5905. static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
  5906. {
  5907. struct net_device *netdev = pci_get_drvdata(pdev);
  5908. struct bnxt *bp = netdev_priv(netdev);
  5909. int err = 0;
  5910. pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
  5911. netdev_info(bp->dev, "PCI Slot Reset\n");
  5912. rtnl_lock();
  5913. if (pci_enable_device(pdev)) {
  5914. dev_err(&pdev->dev,
  5915. "Cannot re-enable PCI device after reset.\n");
  5916. } else {
  5917. pci_set_master(pdev);
  5918. if (netif_running(netdev))
  5919. err = bnxt_open(netdev);
  5920. if (!err)
  5921. result = PCI_ERS_RESULT_RECOVERED;
  5922. }
  5923. if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
  5924. dev_close(netdev);
  5925. rtnl_unlock();
  5926. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  5927. if (err) {
  5928. dev_err(&pdev->dev,
  5929. "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
  5930. err); /* non-fatal, continue */
  5931. }
  5932. return PCI_ERS_RESULT_RECOVERED;
  5933. }
  5934. /**
  5935. * bnxt_io_resume - called when traffic can start flowing again.
  5936. * @pdev: Pointer to PCI device
  5937. *
  5938. * This callback is called when the error recovery driver tells
  5939. * us that its OK to resume normal operation.
  5940. */
  5941. static void bnxt_io_resume(struct pci_dev *pdev)
  5942. {
  5943. struct net_device *netdev = pci_get_drvdata(pdev);
  5944. rtnl_lock();
  5945. netif_device_attach(netdev);
  5946. rtnl_unlock();
  5947. }
  5948. static const struct pci_error_handlers bnxt_err_handler = {
  5949. .error_detected = bnxt_io_error_detected,
  5950. .slot_reset = bnxt_io_slot_reset,
  5951. .resume = bnxt_io_resume
  5952. };
  5953. static struct pci_driver bnxt_pci_driver = {
  5954. .name = DRV_MODULE_NAME,
  5955. .id_table = bnxt_pci_tbl,
  5956. .probe = bnxt_init_one,
  5957. .remove = bnxt_remove_one,
  5958. .err_handler = &bnxt_err_handler,
  5959. #if defined(CONFIG_BNXT_SRIOV)
  5960. .sriov_configure = bnxt_sriov_configure,
  5961. #endif
  5962. };
  5963. module_pci_driver(bnxt_pci_driver);