qla_mbx.c 158 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2014 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_target.h"
  9. #include <linux/delay.h>
  10. #include <linux/gfp.h>
  11. static struct mb_cmd_name {
  12. uint16_t cmd;
  13. const char *str;
  14. } mb_str[] = {
  15. {MBC_GET_PORT_DATABASE, "GPDB"},
  16. {MBC_GET_ID_LIST, "GIDList"},
  17. {MBC_GET_LINK_PRIV_STATS, "Stats"},
  18. {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
  19. };
  20. static const char *mb_to_str(uint16_t cmd)
  21. {
  22. int i;
  23. struct mb_cmd_name *e;
  24. for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
  25. e = mb_str + i;
  26. if (cmd == e->cmd)
  27. return e->str;
  28. }
  29. return "unknown";
  30. }
  31. static struct rom_cmd {
  32. uint16_t cmd;
  33. } rom_cmds[] = {
  34. { MBC_LOAD_RAM },
  35. { MBC_EXECUTE_FIRMWARE },
  36. { MBC_READ_RAM_WORD },
  37. { MBC_MAILBOX_REGISTER_TEST },
  38. { MBC_VERIFY_CHECKSUM },
  39. { MBC_GET_FIRMWARE_VERSION },
  40. { MBC_LOAD_RISC_RAM },
  41. { MBC_DUMP_RISC_RAM },
  42. { MBC_LOAD_RISC_RAM_EXTENDED },
  43. { MBC_DUMP_RISC_RAM_EXTENDED },
  44. { MBC_WRITE_RAM_WORD_EXTENDED },
  45. { MBC_READ_RAM_EXTENDED },
  46. { MBC_GET_RESOURCE_COUNTS },
  47. { MBC_SET_FIRMWARE_OPTION },
  48. { MBC_MID_INITIALIZE_FIRMWARE },
  49. { MBC_GET_FIRMWARE_STATE },
  50. { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
  51. { MBC_GET_RETRY_COUNT },
  52. { MBC_TRACE_CONTROL },
  53. { MBC_INITIALIZE_MULTIQ },
  54. { MBC_IOCB_COMMAND_A64 },
  55. { MBC_GET_ADAPTER_LOOP_ID },
  56. { MBC_READ_SFP },
  57. { MBC_GET_RNID_PARAMS },
  58. };
  59. static int is_rom_cmd(uint16_t cmd)
  60. {
  61. int i;
  62. struct rom_cmd *wc;
  63. for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
  64. wc = rom_cmds + i;
  65. if (wc->cmd == cmd)
  66. return 1;
  67. }
  68. return 0;
  69. }
  70. /*
  71. * qla2x00_mailbox_command
  72. * Issue mailbox command and waits for completion.
  73. *
  74. * Input:
  75. * ha = adapter block pointer.
  76. * mcp = driver internal mbx struct pointer.
  77. *
  78. * Output:
  79. * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
  80. *
  81. * Returns:
  82. * 0 : QLA_SUCCESS = cmd performed success
  83. * 1 : QLA_FUNCTION_FAILED (error encountered)
  84. * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
  85. *
  86. * Context:
  87. * Kernel context.
  88. */
  89. static int
  90. qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
  91. {
  92. int rval, i;
  93. unsigned long flags = 0;
  94. device_reg_t *reg;
  95. uint8_t abort_active;
  96. uint8_t io_lock_on;
  97. uint16_t command = 0;
  98. uint16_t *iptr;
  99. uint16_t __iomem *optr;
  100. uint32_t cnt;
  101. uint32_t mboxes;
  102. unsigned long wait_time;
  103. struct qla_hw_data *ha = vha->hw;
  104. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  105. u32 chip_reset;
  106. ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
  107. if (ha->pdev->error_state > pci_channel_io_frozen) {
  108. ql_log(ql_log_warn, vha, 0x1001,
  109. "error_state is greater than pci_channel_io_frozen, "
  110. "exiting.\n");
  111. return QLA_FUNCTION_TIMEOUT;
  112. }
  113. if (vha->device_flags & DFLG_DEV_FAILED) {
  114. ql_log(ql_log_warn, vha, 0x1002,
  115. "Device in failed state, exiting.\n");
  116. return QLA_FUNCTION_TIMEOUT;
  117. }
  118. /* if PCI error, then avoid mbx processing.*/
  119. if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
  120. test_bit(UNLOADING, &base_vha->dpc_flags)) {
  121. ql_log(ql_log_warn, vha, 0xd04e,
  122. "PCI error, exiting.\n");
  123. return QLA_FUNCTION_TIMEOUT;
  124. }
  125. reg = ha->iobase;
  126. io_lock_on = base_vha->flags.init_done;
  127. rval = QLA_SUCCESS;
  128. abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  129. chip_reset = ha->chip_reset;
  130. if (ha->flags.pci_channel_io_perm_failure) {
  131. ql_log(ql_log_warn, vha, 0x1003,
  132. "Perm failure on EEH timeout MBX, exiting.\n");
  133. return QLA_FUNCTION_TIMEOUT;
  134. }
  135. if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
  136. /* Setting Link-Down error */
  137. mcp->mb[0] = MBS_LINK_DOWN_ERROR;
  138. ql_log(ql_log_warn, vha, 0x1004,
  139. "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
  140. return QLA_FUNCTION_TIMEOUT;
  141. }
  142. /* check if ISP abort is active and return cmd with timeout */
  143. if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
  144. test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
  145. test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
  146. !is_rom_cmd(mcp->mb[0])) {
  147. ql_log(ql_log_info, vha, 0x1005,
  148. "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
  149. mcp->mb[0]);
  150. return QLA_FUNCTION_TIMEOUT;
  151. }
  152. atomic_inc(&ha->num_pend_mbx_stage1);
  153. /*
  154. * Wait for active mailbox commands to finish by waiting at most tov
  155. * seconds. This is to serialize actual issuing of mailbox cmds during
  156. * non ISP abort time.
  157. */
  158. if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
  159. /* Timeout occurred. Return error. */
  160. ql_log(ql_log_warn, vha, 0xd035,
  161. "Cmd access timeout, cmd=0x%x, Exiting.\n",
  162. mcp->mb[0]);
  163. atomic_dec(&ha->num_pend_mbx_stage1);
  164. return QLA_FUNCTION_TIMEOUT;
  165. }
  166. atomic_dec(&ha->num_pend_mbx_stage1);
  167. if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
  168. rval = QLA_ABORTED;
  169. goto premature_exit;
  170. }
  171. ha->flags.mbox_busy = 1;
  172. /* Save mailbox command for debug */
  173. ha->mcp = mcp;
  174. ql_dbg(ql_dbg_mbx, vha, 0x1006,
  175. "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
  176. spin_lock_irqsave(&ha->hardware_lock, flags);
  177. if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
  178. rval = QLA_ABORTED;
  179. ha->flags.mbox_busy = 0;
  180. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  181. goto premature_exit;
  182. }
  183. /* Load mailbox registers. */
  184. if (IS_P3P_TYPE(ha))
  185. optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
  186. else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
  187. optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
  188. else
  189. optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
  190. iptr = mcp->mb;
  191. command = mcp->mb[0];
  192. mboxes = mcp->out_mb;
  193. ql_dbg(ql_dbg_mbx, vha, 0x1111,
  194. "Mailbox registers (OUT):\n");
  195. for (cnt = 0; cnt < ha->mbx_count; cnt++) {
  196. if (IS_QLA2200(ha) && cnt == 8)
  197. optr =
  198. (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
  199. if (mboxes & BIT_0) {
  200. ql_dbg(ql_dbg_mbx, vha, 0x1112,
  201. "mbox[%d]<-0x%04x\n", cnt, *iptr);
  202. WRT_REG_WORD(optr, *iptr);
  203. }
  204. mboxes >>= 1;
  205. optr++;
  206. iptr++;
  207. }
  208. ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
  209. "I/O Address = %p.\n", optr);
  210. /* Issue set host interrupt command to send cmd out. */
  211. ha->flags.mbox_int = 0;
  212. clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  213. /* Unlock mbx registers and wait for interrupt */
  214. ql_dbg(ql_dbg_mbx, vha, 0x100f,
  215. "Going to unlock irq & waiting for interrupts. "
  216. "jiffies=%lx.\n", jiffies);
  217. /* Wait for mbx cmd completion until timeout */
  218. atomic_inc(&ha->num_pend_mbx_stage2);
  219. if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
  220. set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
  221. if (IS_P3P_TYPE(ha)) {
  222. if (RD_REG_DWORD(&reg->isp82.hint) &
  223. HINT_MBX_INT_PENDING) {
  224. spin_unlock_irqrestore(&ha->hardware_lock,
  225. flags);
  226. ha->flags.mbox_busy = 0;
  227. atomic_dec(&ha->num_pend_mbx_stage2);
  228. ql_dbg(ql_dbg_mbx, vha, 0x1010,
  229. "Pending mailbox timeout, exiting.\n");
  230. rval = QLA_FUNCTION_TIMEOUT;
  231. goto premature_exit;
  232. }
  233. WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
  234. } else if (IS_FWI2_CAPABLE(ha))
  235. WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
  236. else
  237. WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
  238. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  239. wait_time = jiffies;
  240. atomic_inc(&ha->num_pend_mbx_stage3);
  241. if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
  242. mcp->tov * HZ)) {
  243. ql_dbg(ql_dbg_mbx, vha, 0x117a,
  244. "cmd=%x Timeout.\n", command);
  245. spin_lock_irqsave(&ha->hardware_lock, flags);
  246. clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
  247. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  248. } else if (ha->flags.purge_mbox ||
  249. chip_reset != ha->chip_reset) {
  250. ha->flags.mbox_busy = 0;
  251. atomic_dec(&ha->num_pend_mbx_stage2);
  252. atomic_dec(&ha->num_pend_mbx_stage3);
  253. rval = QLA_ABORTED;
  254. goto premature_exit;
  255. }
  256. atomic_dec(&ha->num_pend_mbx_stage3);
  257. if (time_after(jiffies, wait_time + 5 * HZ))
  258. ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
  259. command, jiffies_to_msecs(jiffies - wait_time));
  260. } else {
  261. ql_dbg(ql_dbg_mbx, vha, 0x1011,
  262. "Cmd=%x Polling Mode.\n", command);
  263. if (IS_P3P_TYPE(ha)) {
  264. if (RD_REG_DWORD(&reg->isp82.hint) &
  265. HINT_MBX_INT_PENDING) {
  266. spin_unlock_irqrestore(&ha->hardware_lock,
  267. flags);
  268. ha->flags.mbox_busy = 0;
  269. atomic_dec(&ha->num_pend_mbx_stage2);
  270. ql_dbg(ql_dbg_mbx, vha, 0x1012,
  271. "Pending mailbox timeout, exiting.\n");
  272. rval = QLA_FUNCTION_TIMEOUT;
  273. goto premature_exit;
  274. }
  275. WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
  276. } else if (IS_FWI2_CAPABLE(ha))
  277. WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
  278. else
  279. WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
  280. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  281. wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
  282. while (!ha->flags.mbox_int) {
  283. if (ha->flags.purge_mbox ||
  284. chip_reset != ha->chip_reset) {
  285. ha->flags.mbox_busy = 0;
  286. atomic_dec(&ha->num_pend_mbx_stage2);
  287. rval = QLA_ABORTED;
  288. goto premature_exit;
  289. }
  290. if (time_after(jiffies, wait_time))
  291. break;
  292. /*
  293. * Check if it's UNLOADING, cause we cannot poll in
  294. * this case, or else a NULL pointer dereference
  295. * is triggered.
  296. */
  297. if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
  298. return QLA_FUNCTION_TIMEOUT;
  299. /* Check for pending interrupts. */
  300. qla2x00_poll(ha->rsp_q_map[0]);
  301. if (!ha->flags.mbox_int &&
  302. !(IS_QLA2200(ha) &&
  303. command == MBC_LOAD_RISC_RAM_EXTENDED))
  304. msleep(10);
  305. } /* while */
  306. ql_dbg(ql_dbg_mbx, vha, 0x1013,
  307. "Waited %d sec.\n",
  308. (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
  309. }
  310. atomic_dec(&ha->num_pend_mbx_stage2);
  311. /* Check whether we timed out */
  312. if (ha->flags.mbox_int) {
  313. uint16_t *iptr2;
  314. ql_dbg(ql_dbg_mbx, vha, 0x1014,
  315. "Cmd=%x completed.\n", command);
  316. /* Got interrupt. Clear the flag. */
  317. ha->flags.mbox_int = 0;
  318. clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  319. if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
  320. ha->flags.mbox_busy = 0;
  321. /* Setting Link-Down error */
  322. mcp->mb[0] = MBS_LINK_DOWN_ERROR;
  323. ha->mcp = NULL;
  324. rval = QLA_FUNCTION_FAILED;
  325. ql_log(ql_log_warn, vha, 0xd048,
  326. "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
  327. goto premature_exit;
  328. }
  329. if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
  330. rval = QLA_FUNCTION_FAILED;
  331. /* Load return mailbox registers. */
  332. iptr2 = mcp->mb;
  333. iptr = (uint16_t *)&ha->mailbox_out[0];
  334. mboxes = mcp->in_mb;
  335. ql_dbg(ql_dbg_mbx, vha, 0x1113,
  336. "Mailbox registers (IN):\n");
  337. for (cnt = 0; cnt < ha->mbx_count; cnt++) {
  338. if (mboxes & BIT_0) {
  339. *iptr2 = *iptr;
  340. ql_dbg(ql_dbg_mbx, vha, 0x1114,
  341. "mbox[%d]->0x%04x\n", cnt, *iptr2);
  342. }
  343. mboxes >>= 1;
  344. iptr2++;
  345. iptr++;
  346. }
  347. } else {
  348. uint16_t mb[8];
  349. uint32_t ictrl, host_status, hccr;
  350. uint16_t w;
  351. if (IS_FWI2_CAPABLE(ha)) {
  352. mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
  353. mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
  354. mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
  355. mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
  356. mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
  357. ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
  358. host_status = RD_REG_DWORD(&reg->isp24.host_status);
  359. hccr = RD_REG_DWORD(&reg->isp24.hccr);
  360. ql_log(ql_log_warn, vha, 0xd04c,
  361. "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
  362. "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
  363. command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
  364. mb[7], host_status, hccr);
  365. } else {
  366. mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
  367. ictrl = RD_REG_WORD(&reg->isp.ictrl);
  368. ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
  369. "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
  370. "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
  371. }
  372. ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
  373. /* Capture FW dump only, if PCI device active */
  374. if (!pci_channel_offline(vha->hw->pdev)) {
  375. pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
  376. if (w == 0xffff || ictrl == 0xffffffff ||
  377. (chip_reset != ha->chip_reset)) {
  378. /* This is special case if there is unload
  379. * of driver happening and if PCI device go
  380. * into bad state due to PCI error condition
  381. * then only PCI ERR flag would be set.
  382. * we will do premature exit for above case.
  383. */
  384. ha->flags.mbox_busy = 0;
  385. rval = QLA_FUNCTION_TIMEOUT;
  386. goto premature_exit;
  387. }
  388. /* Attempt to capture firmware dump for further
  389. * anallysis of the current formware state. we do not
  390. * need to do this if we are intentionally generating
  391. * a dump
  392. */
  393. if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
  394. ha->isp_ops->fw_dump(vha, 0);
  395. rval = QLA_FUNCTION_TIMEOUT;
  396. }
  397. }
  398. ha->flags.mbox_busy = 0;
  399. /* Clean up */
  400. ha->mcp = NULL;
  401. if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
  402. ql_dbg(ql_dbg_mbx, vha, 0x101a,
  403. "Checking for additional resp interrupt.\n");
  404. /* polling mode for non isp_abort commands. */
  405. qla2x00_poll(ha->rsp_q_map[0]);
  406. }
  407. if (rval == QLA_FUNCTION_TIMEOUT &&
  408. mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
  409. if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
  410. ha->flags.eeh_busy) {
  411. /* not in dpc. schedule it for dpc to take over. */
  412. ql_dbg(ql_dbg_mbx, vha, 0x101b,
  413. "Timeout, schedule isp_abort_needed.\n");
  414. if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
  415. !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
  416. !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  417. if (IS_QLA82XX(ha)) {
  418. ql_dbg(ql_dbg_mbx, vha, 0x112a,
  419. "disabling pause transmit on port "
  420. "0 & 1.\n");
  421. qla82xx_wr_32(ha,
  422. QLA82XX_CRB_NIU + 0x98,
  423. CRB_NIU_XG_PAUSE_CTL_P0|
  424. CRB_NIU_XG_PAUSE_CTL_P1);
  425. }
  426. ql_log(ql_log_info, base_vha, 0x101c,
  427. "Mailbox cmd timeout occurred, cmd=0x%x, "
  428. "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
  429. "abort.\n", command, mcp->mb[0],
  430. ha->flags.eeh_busy);
  431. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  432. qla2xxx_wake_dpc(vha);
  433. }
  434. } else if (current == ha->dpc_thread) {
  435. /* call abort directly since we are in the DPC thread */
  436. ql_dbg(ql_dbg_mbx, vha, 0x101d,
  437. "Timeout, calling abort_isp.\n");
  438. if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
  439. !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
  440. !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  441. if (IS_QLA82XX(ha)) {
  442. ql_dbg(ql_dbg_mbx, vha, 0x112b,
  443. "disabling pause transmit on port "
  444. "0 & 1.\n");
  445. qla82xx_wr_32(ha,
  446. QLA82XX_CRB_NIU + 0x98,
  447. CRB_NIU_XG_PAUSE_CTL_P0|
  448. CRB_NIU_XG_PAUSE_CTL_P1);
  449. }
  450. ql_log(ql_log_info, base_vha, 0x101e,
  451. "Mailbox cmd timeout occurred, cmd=0x%x, "
  452. "mb[0]=0x%x. Scheduling ISP abort ",
  453. command, mcp->mb[0]);
  454. set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
  455. clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  456. /* Allow next mbx cmd to come in. */
  457. complete(&ha->mbx_cmd_comp);
  458. if (ha->isp_ops->abort_isp(vha)) {
  459. /* Failed. retry later. */
  460. set_bit(ISP_ABORT_NEEDED,
  461. &vha->dpc_flags);
  462. }
  463. clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
  464. ql_dbg(ql_dbg_mbx, vha, 0x101f,
  465. "Finished abort_isp.\n");
  466. goto mbx_done;
  467. }
  468. }
  469. }
  470. premature_exit:
  471. /* Allow next mbx cmd to come in. */
  472. complete(&ha->mbx_cmd_comp);
  473. mbx_done:
  474. if (rval == QLA_ABORTED) {
  475. ql_log(ql_log_info, vha, 0xd035,
  476. "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
  477. mcp->mb[0]);
  478. } else if (rval) {
  479. if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
  480. pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
  481. dev_name(&ha->pdev->dev), 0x1020+0x800,
  482. vha->host_no);
  483. mboxes = mcp->in_mb;
  484. cnt = 4;
  485. for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
  486. if (mboxes & BIT_0) {
  487. printk(" mb[%u]=%x", i, mcp->mb[i]);
  488. cnt--;
  489. }
  490. pr_warn(" cmd=%x ****\n", command);
  491. }
  492. if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
  493. ql_dbg(ql_dbg_mbx, vha, 0x1198,
  494. "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
  495. RD_REG_DWORD(&reg->isp24.host_status),
  496. RD_REG_DWORD(&reg->isp24.ictrl),
  497. RD_REG_DWORD(&reg->isp24.istatus));
  498. } else {
  499. ql_dbg(ql_dbg_mbx, vha, 0x1206,
  500. "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
  501. RD_REG_WORD(&reg->isp.ctrl_status),
  502. RD_REG_WORD(&reg->isp.ictrl),
  503. RD_REG_WORD(&reg->isp.istatus));
  504. }
  505. } else {
  506. ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
  507. }
  508. return rval;
  509. }
  510. int
  511. qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
  512. uint32_t risc_code_size)
  513. {
  514. int rval;
  515. struct qla_hw_data *ha = vha->hw;
  516. mbx_cmd_t mc;
  517. mbx_cmd_t *mcp = &mc;
  518. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
  519. "Entered %s.\n", __func__);
  520. if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
  521. mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
  522. mcp->mb[8] = MSW(risc_addr);
  523. mcp->out_mb = MBX_8|MBX_0;
  524. } else {
  525. mcp->mb[0] = MBC_LOAD_RISC_RAM;
  526. mcp->out_mb = MBX_0;
  527. }
  528. mcp->mb[1] = LSW(risc_addr);
  529. mcp->mb[2] = MSW(req_dma);
  530. mcp->mb[3] = LSW(req_dma);
  531. mcp->mb[6] = MSW(MSD(req_dma));
  532. mcp->mb[7] = LSW(MSD(req_dma));
  533. mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
  534. if (IS_FWI2_CAPABLE(ha)) {
  535. mcp->mb[4] = MSW(risc_code_size);
  536. mcp->mb[5] = LSW(risc_code_size);
  537. mcp->out_mb |= MBX_5|MBX_4;
  538. } else {
  539. mcp->mb[4] = LSW(risc_code_size);
  540. mcp->out_mb |= MBX_4;
  541. }
  542. mcp->in_mb = MBX_0;
  543. mcp->tov = MBX_TOV_SECONDS;
  544. mcp->flags = 0;
  545. rval = qla2x00_mailbox_command(vha, mcp);
  546. if (rval != QLA_SUCCESS) {
  547. ql_dbg(ql_dbg_mbx, vha, 0x1023,
  548. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  549. } else {
  550. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
  551. "Done %s.\n", __func__);
  552. }
  553. return rval;
  554. }
  555. #define EXTENDED_BB_CREDITS BIT_0
  556. #define NVME_ENABLE_FLAG BIT_3
  557. static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
  558. {
  559. uint16_t mb4 = BIT_0;
  560. if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
  561. mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
  562. return mb4;
  563. }
  564. static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
  565. {
  566. uint16_t mb4 = BIT_0;
  567. if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  568. struct nvram_81xx *nv = ha->nvram;
  569. mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
  570. }
  571. return mb4;
  572. }
  573. /*
  574. * qla2x00_execute_fw
  575. * Start adapter firmware.
  576. *
  577. * Input:
  578. * ha = adapter block pointer.
  579. * TARGET_QUEUE_LOCK must be released.
  580. * ADAPTER_STATE_LOCK must be released.
  581. *
  582. * Returns:
  583. * qla2x00 local function return status code.
  584. *
  585. * Context:
  586. * Kernel context.
  587. */
  588. int
  589. qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
  590. {
  591. int rval;
  592. struct qla_hw_data *ha = vha->hw;
  593. mbx_cmd_t mc;
  594. mbx_cmd_t *mcp = &mc;
  595. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
  596. "Entered %s.\n", __func__);
  597. mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
  598. mcp->out_mb = MBX_0;
  599. mcp->in_mb = MBX_0;
  600. if (IS_FWI2_CAPABLE(ha)) {
  601. mcp->mb[1] = MSW(risc_addr);
  602. mcp->mb[2] = LSW(risc_addr);
  603. mcp->mb[3] = 0;
  604. mcp->mb[4] = 0;
  605. mcp->mb[11] = 0;
  606. ha->flags.using_lr_setting = 0;
  607. if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
  608. IS_QLA27XX(ha)) {
  609. if (ql2xautodetectsfp) {
  610. if (ha->flags.detected_lr_sfp) {
  611. mcp->mb[4] |=
  612. qla25xx_set_sfp_lr_dist(ha);
  613. ha->flags.using_lr_setting = 1;
  614. }
  615. } else {
  616. struct nvram_81xx *nv = ha->nvram;
  617. /* set LR distance if specified in nvram */
  618. if (nv->enhanced_features &
  619. NEF_LR_DIST_ENABLE) {
  620. mcp->mb[4] |=
  621. qla25xx_set_nvr_lr_dist(ha);
  622. ha->flags.using_lr_setting = 1;
  623. }
  624. }
  625. }
  626. if (ql2xnvmeenable && IS_QLA27XX(ha))
  627. mcp->mb[4] |= NVME_ENABLE_FLAG;
  628. if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  629. struct nvram_81xx *nv = ha->nvram;
  630. /* set minimum speed if specified in nvram */
  631. if (nv->min_link_speed >= 2 &&
  632. nv->min_link_speed <= 5) {
  633. mcp->mb[4] |= BIT_4;
  634. mcp->mb[11] = nv->min_link_speed;
  635. mcp->out_mb |= MBX_11;
  636. mcp->in_mb |= BIT_5;
  637. vha->min_link_speed_feat = nv->min_link_speed;
  638. }
  639. }
  640. if (ha->flags.exlogins_enabled)
  641. mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
  642. if (ha->flags.exchoffld_enabled)
  643. mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
  644. mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
  645. mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
  646. } else {
  647. mcp->mb[1] = LSW(risc_addr);
  648. mcp->out_mb |= MBX_1;
  649. if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
  650. mcp->mb[2] = 0;
  651. mcp->out_mb |= MBX_2;
  652. }
  653. }
  654. mcp->tov = MBX_TOV_SECONDS;
  655. mcp->flags = 0;
  656. rval = qla2x00_mailbox_command(vha, mcp);
  657. if (rval != QLA_SUCCESS) {
  658. ql_dbg(ql_dbg_mbx, vha, 0x1026,
  659. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  660. } else {
  661. if (IS_FWI2_CAPABLE(ha)) {
  662. ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
  663. ql_dbg(ql_dbg_mbx, vha, 0x119a,
  664. "fw_ability_mask=%x.\n", ha->fw_ability_mask);
  665. ql_dbg(ql_dbg_mbx, vha, 0x1027,
  666. "exchanges=%x.\n", mcp->mb[1]);
  667. if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  668. ha->max_speed_sup = mcp->mb[2] & BIT_0;
  669. ql_dbg(ql_dbg_mbx, vha, 0x119b,
  670. "Maximum speed supported=%s.\n",
  671. ha->max_speed_sup ? "32Gps" : "16Gps");
  672. if (vha->min_link_speed_feat) {
  673. ha->min_link_speed = mcp->mb[5];
  674. ql_dbg(ql_dbg_mbx, vha, 0x119c,
  675. "Minimum speed set=%s.\n",
  676. mcp->mb[5] == 5 ? "32Gps" :
  677. mcp->mb[5] == 4 ? "16Gps" :
  678. mcp->mb[5] == 3 ? "8Gps" :
  679. mcp->mb[5] == 2 ? "4Gps" :
  680. "unknown");
  681. }
  682. }
  683. }
  684. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
  685. "Done.\n");
  686. }
  687. return rval;
  688. }
  689. /*
  690. * qla_get_exlogin_status
  691. * Get extended login status
  692. * uses the memory offload control/status Mailbox
  693. *
  694. * Input:
  695. * ha: adapter state pointer.
  696. * fwopt: firmware options
  697. *
  698. * Returns:
  699. * qla2x00 local function status
  700. *
  701. * Context:
  702. * Kernel context.
  703. */
  704. #define FETCH_XLOGINS_STAT 0x8
  705. int
  706. qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
  707. uint16_t *ex_logins_cnt)
  708. {
  709. int rval;
  710. mbx_cmd_t mc;
  711. mbx_cmd_t *mcp = &mc;
  712. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
  713. "Entered %s\n", __func__);
  714. memset(mcp->mb, 0 , sizeof(mcp->mb));
  715. mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
  716. mcp->mb[1] = FETCH_XLOGINS_STAT;
  717. mcp->out_mb = MBX_1|MBX_0;
  718. mcp->in_mb = MBX_10|MBX_4|MBX_0;
  719. mcp->tov = MBX_TOV_SECONDS;
  720. mcp->flags = 0;
  721. rval = qla2x00_mailbox_command(vha, mcp);
  722. if (rval != QLA_SUCCESS) {
  723. ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
  724. } else {
  725. *buf_sz = mcp->mb[4];
  726. *ex_logins_cnt = mcp->mb[10];
  727. ql_log(ql_log_info, vha, 0x1190,
  728. "buffer size 0x%x, exchange login count=%d\n",
  729. mcp->mb[4], mcp->mb[10]);
  730. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
  731. "Done %s.\n", __func__);
  732. }
  733. return rval;
  734. }
  735. /*
  736. * qla_set_exlogin_mem_cfg
  737. * set extended login memory configuration
  738. * Mbx needs to be issues before init_cb is set
  739. *
  740. * Input:
  741. * ha: adapter state pointer.
  742. * buffer: buffer pointer
  743. * phys_addr: physical address of buffer
  744. * size: size of buffer
  745. * TARGET_QUEUE_LOCK must be released
  746. * ADAPTER_STATE_LOCK must be release
  747. *
  748. * Returns:
  749. * qla2x00 local funxtion status code.
  750. *
  751. * Context:
  752. * Kernel context.
  753. */
  754. #define CONFIG_XLOGINS_MEM 0x3
  755. int
  756. qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
  757. {
  758. int rval;
  759. mbx_cmd_t mc;
  760. mbx_cmd_t *mcp = &mc;
  761. struct qla_hw_data *ha = vha->hw;
  762. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
  763. "Entered %s.\n", __func__);
  764. memset(mcp->mb, 0 , sizeof(mcp->mb));
  765. mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
  766. mcp->mb[1] = CONFIG_XLOGINS_MEM;
  767. mcp->mb[2] = MSW(phys_addr);
  768. mcp->mb[3] = LSW(phys_addr);
  769. mcp->mb[6] = MSW(MSD(phys_addr));
  770. mcp->mb[7] = LSW(MSD(phys_addr));
  771. mcp->mb[8] = MSW(ha->exlogin_size);
  772. mcp->mb[9] = LSW(ha->exlogin_size);
  773. mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  774. mcp->in_mb = MBX_11|MBX_0;
  775. mcp->tov = MBX_TOV_SECONDS;
  776. mcp->flags = 0;
  777. rval = qla2x00_mailbox_command(vha, mcp);
  778. if (rval != QLA_SUCCESS) {
  779. /*EMPTY*/
  780. ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
  781. } else {
  782. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
  783. "Done %s.\n", __func__);
  784. }
  785. return rval;
  786. }
  787. /*
  788. * qla_get_exchoffld_status
  789. * Get exchange offload status
  790. * uses the memory offload control/status Mailbox
  791. *
  792. * Input:
  793. * ha: adapter state pointer.
  794. * fwopt: firmware options
  795. *
  796. * Returns:
  797. * qla2x00 local function status
  798. *
  799. * Context:
  800. * Kernel context.
  801. */
  802. #define FETCH_XCHOFFLD_STAT 0x2
  803. int
  804. qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
  805. uint16_t *ex_logins_cnt)
  806. {
  807. int rval;
  808. mbx_cmd_t mc;
  809. mbx_cmd_t *mcp = &mc;
  810. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
  811. "Entered %s\n", __func__);
  812. memset(mcp->mb, 0 , sizeof(mcp->mb));
  813. mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
  814. mcp->mb[1] = FETCH_XCHOFFLD_STAT;
  815. mcp->out_mb = MBX_1|MBX_0;
  816. mcp->in_mb = MBX_10|MBX_4|MBX_0;
  817. mcp->tov = MBX_TOV_SECONDS;
  818. mcp->flags = 0;
  819. rval = qla2x00_mailbox_command(vha, mcp);
  820. if (rval != QLA_SUCCESS) {
  821. ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
  822. } else {
  823. *buf_sz = mcp->mb[4];
  824. *ex_logins_cnt = mcp->mb[10];
  825. ql_log(ql_log_info, vha, 0x118e,
  826. "buffer size 0x%x, exchange offload count=%d\n",
  827. mcp->mb[4], mcp->mb[10]);
  828. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
  829. "Done %s.\n", __func__);
  830. }
  831. return rval;
  832. }
  833. /*
  834. * qla_set_exchoffld_mem_cfg
  835. * Set exchange offload memory configuration
  836. * Mbx needs to be issues before init_cb is set
  837. *
  838. * Input:
  839. * ha: adapter state pointer.
  840. * buffer: buffer pointer
  841. * phys_addr: physical address of buffer
  842. * size: size of buffer
  843. * TARGET_QUEUE_LOCK must be released
  844. * ADAPTER_STATE_LOCK must be release
  845. *
  846. * Returns:
  847. * qla2x00 local funxtion status code.
  848. *
  849. * Context:
  850. * Kernel context.
  851. */
  852. #define CONFIG_XCHOFFLD_MEM 0x3
  853. int
  854. qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
  855. {
  856. int rval;
  857. mbx_cmd_t mc;
  858. mbx_cmd_t *mcp = &mc;
  859. struct qla_hw_data *ha = vha->hw;
  860. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
  861. "Entered %s.\n", __func__);
  862. memset(mcp->mb, 0 , sizeof(mcp->mb));
  863. mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
  864. mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
  865. mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
  866. mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
  867. mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
  868. mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
  869. mcp->mb[8] = MSW(ha->exchoffld_size);
  870. mcp->mb[9] = LSW(ha->exchoffld_size);
  871. mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  872. mcp->in_mb = MBX_11|MBX_0;
  873. mcp->tov = MBX_TOV_SECONDS;
  874. mcp->flags = 0;
  875. rval = qla2x00_mailbox_command(vha, mcp);
  876. if (rval != QLA_SUCCESS) {
  877. /*EMPTY*/
  878. ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
  879. } else {
  880. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
  881. "Done %s.\n", __func__);
  882. }
  883. return rval;
  884. }
  885. /*
  886. * qla2x00_get_fw_version
  887. * Get firmware version.
  888. *
  889. * Input:
  890. * ha: adapter state pointer.
  891. * major: pointer for major number.
  892. * minor: pointer for minor number.
  893. * subminor: pointer for subminor number.
  894. *
  895. * Returns:
  896. * qla2x00 local function return status code.
  897. *
  898. * Context:
  899. * Kernel context.
  900. */
  901. int
  902. qla2x00_get_fw_version(scsi_qla_host_t *vha)
  903. {
  904. int rval;
  905. mbx_cmd_t mc;
  906. mbx_cmd_t *mcp = &mc;
  907. struct qla_hw_data *ha = vha->hw;
  908. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
  909. "Entered %s.\n", __func__);
  910. mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
  911. mcp->out_mb = MBX_0;
  912. mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  913. if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
  914. mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
  915. if (IS_FWI2_CAPABLE(ha))
  916. mcp->in_mb |= MBX_17|MBX_16|MBX_15;
  917. if (IS_QLA27XX(ha))
  918. mcp->in_mb |=
  919. MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
  920. MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
  921. mcp->flags = 0;
  922. mcp->tov = MBX_TOV_SECONDS;
  923. rval = qla2x00_mailbox_command(vha, mcp);
  924. if (rval != QLA_SUCCESS)
  925. goto failed;
  926. /* Return mailbox data. */
  927. ha->fw_major_version = mcp->mb[1];
  928. ha->fw_minor_version = mcp->mb[2];
  929. ha->fw_subminor_version = mcp->mb[3];
  930. ha->fw_attributes = mcp->mb[6];
  931. if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
  932. ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
  933. else
  934. ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
  935. if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
  936. ha->mpi_version[0] = mcp->mb[10] & 0xff;
  937. ha->mpi_version[1] = mcp->mb[11] >> 8;
  938. ha->mpi_version[2] = mcp->mb[11] & 0xff;
  939. ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
  940. ha->phy_version[0] = mcp->mb[8] & 0xff;
  941. ha->phy_version[1] = mcp->mb[9] >> 8;
  942. ha->phy_version[2] = mcp->mb[9] & 0xff;
  943. }
  944. if (IS_FWI2_CAPABLE(ha)) {
  945. ha->fw_attributes_h = mcp->mb[15];
  946. ha->fw_attributes_ext[0] = mcp->mb[16];
  947. ha->fw_attributes_ext[1] = mcp->mb[17];
  948. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
  949. "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
  950. __func__, mcp->mb[15], mcp->mb[6]);
  951. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
  952. "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
  953. __func__, mcp->mb[17], mcp->mb[16]);
  954. if (ha->fw_attributes_h & 0x4)
  955. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
  956. "%s: Firmware supports Extended Login 0x%x\n",
  957. __func__, ha->fw_attributes_h);
  958. if (ha->fw_attributes_h & 0x8)
  959. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
  960. "%s: Firmware supports Exchange Offload 0x%x\n",
  961. __func__, ha->fw_attributes_h);
  962. /*
  963. * FW supports nvme and driver load parameter requested nvme.
  964. * BIT 26 of fw_attributes indicates NVMe support.
  965. */
  966. if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
  967. vha->flags.nvme_enabled = 1;
  968. ql_log(ql_log_info, vha, 0xd302,
  969. "%s: FC-NVMe is Enabled (0x%x)\n",
  970. __func__, ha->fw_attributes_h);
  971. }
  972. }
  973. if (IS_QLA27XX(ha)) {
  974. ha->mpi_version[0] = mcp->mb[10] & 0xff;
  975. ha->mpi_version[1] = mcp->mb[11] >> 8;
  976. ha->mpi_version[2] = mcp->mb[11] & 0xff;
  977. ha->pep_version[0] = mcp->mb[13] & 0xff;
  978. ha->pep_version[1] = mcp->mb[14] >> 8;
  979. ha->pep_version[2] = mcp->mb[14] & 0xff;
  980. ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
  981. ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
  982. ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
  983. ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
  984. }
  985. failed:
  986. if (rval != QLA_SUCCESS) {
  987. /*EMPTY*/
  988. ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
  989. } else {
  990. /*EMPTY*/
  991. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
  992. "Done %s.\n", __func__);
  993. }
  994. return rval;
  995. }
  996. /*
  997. * qla2x00_get_fw_options
  998. * Set firmware options.
  999. *
  1000. * Input:
  1001. * ha = adapter block pointer.
  1002. * fwopt = pointer for firmware options.
  1003. *
  1004. * Returns:
  1005. * qla2x00 local function return status code.
  1006. *
  1007. * Context:
  1008. * Kernel context.
  1009. */
  1010. int
  1011. qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
  1012. {
  1013. int rval;
  1014. mbx_cmd_t mc;
  1015. mbx_cmd_t *mcp = &mc;
  1016. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
  1017. "Entered %s.\n", __func__);
  1018. mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
  1019. mcp->out_mb = MBX_0;
  1020. mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  1021. mcp->tov = MBX_TOV_SECONDS;
  1022. mcp->flags = 0;
  1023. rval = qla2x00_mailbox_command(vha, mcp);
  1024. if (rval != QLA_SUCCESS) {
  1025. /*EMPTY*/
  1026. ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
  1027. } else {
  1028. fwopts[0] = mcp->mb[0];
  1029. fwopts[1] = mcp->mb[1];
  1030. fwopts[2] = mcp->mb[2];
  1031. fwopts[3] = mcp->mb[3];
  1032. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
  1033. "Done %s.\n", __func__);
  1034. }
  1035. return rval;
  1036. }
  1037. /*
  1038. * qla2x00_set_fw_options
  1039. * Set firmware options.
  1040. *
  1041. * Input:
  1042. * ha = adapter block pointer.
  1043. * fwopt = pointer for firmware options.
  1044. *
  1045. * Returns:
  1046. * qla2x00 local function return status code.
  1047. *
  1048. * Context:
  1049. * Kernel context.
  1050. */
  1051. int
  1052. qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
  1053. {
  1054. int rval;
  1055. mbx_cmd_t mc;
  1056. mbx_cmd_t *mcp = &mc;
  1057. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
  1058. "Entered %s.\n", __func__);
  1059. mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
  1060. mcp->mb[1] = fwopts[1];
  1061. mcp->mb[2] = fwopts[2];
  1062. mcp->mb[3] = fwopts[3];
  1063. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  1064. mcp->in_mb = MBX_0;
  1065. if (IS_FWI2_CAPABLE(vha->hw)) {
  1066. mcp->in_mb |= MBX_1;
  1067. mcp->mb[10] = fwopts[10];
  1068. mcp->out_mb |= MBX_10;
  1069. } else {
  1070. mcp->mb[10] = fwopts[10];
  1071. mcp->mb[11] = fwopts[11];
  1072. mcp->mb[12] = 0; /* Undocumented, but used */
  1073. mcp->out_mb |= MBX_12|MBX_11|MBX_10;
  1074. }
  1075. mcp->tov = MBX_TOV_SECONDS;
  1076. mcp->flags = 0;
  1077. rval = qla2x00_mailbox_command(vha, mcp);
  1078. fwopts[0] = mcp->mb[0];
  1079. if (rval != QLA_SUCCESS) {
  1080. /*EMPTY*/
  1081. ql_dbg(ql_dbg_mbx, vha, 0x1030,
  1082. "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
  1083. } else {
  1084. /*EMPTY*/
  1085. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
  1086. "Done %s.\n", __func__);
  1087. }
  1088. return rval;
  1089. }
  1090. /*
  1091. * qla2x00_mbx_reg_test
  1092. * Mailbox register wrap test.
  1093. *
  1094. * Input:
  1095. * ha = adapter block pointer.
  1096. * TARGET_QUEUE_LOCK must be released.
  1097. * ADAPTER_STATE_LOCK must be released.
  1098. *
  1099. * Returns:
  1100. * qla2x00 local function return status code.
  1101. *
  1102. * Context:
  1103. * Kernel context.
  1104. */
  1105. int
  1106. qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
  1107. {
  1108. int rval;
  1109. mbx_cmd_t mc;
  1110. mbx_cmd_t *mcp = &mc;
  1111. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
  1112. "Entered %s.\n", __func__);
  1113. mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
  1114. mcp->mb[1] = 0xAAAA;
  1115. mcp->mb[2] = 0x5555;
  1116. mcp->mb[3] = 0xAA55;
  1117. mcp->mb[4] = 0x55AA;
  1118. mcp->mb[5] = 0xA5A5;
  1119. mcp->mb[6] = 0x5A5A;
  1120. mcp->mb[7] = 0x2525;
  1121. mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  1122. mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  1123. mcp->tov = MBX_TOV_SECONDS;
  1124. mcp->flags = 0;
  1125. rval = qla2x00_mailbox_command(vha, mcp);
  1126. if (rval == QLA_SUCCESS) {
  1127. if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
  1128. mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
  1129. rval = QLA_FUNCTION_FAILED;
  1130. if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
  1131. mcp->mb[7] != 0x2525)
  1132. rval = QLA_FUNCTION_FAILED;
  1133. }
  1134. if (rval != QLA_SUCCESS) {
  1135. /*EMPTY*/
  1136. ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
  1137. } else {
  1138. /*EMPTY*/
  1139. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
  1140. "Done %s.\n", __func__);
  1141. }
  1142. return rval;
  1143. }
  1144. /*
  1145. * qla2x00_verify_checksum
  1146. * Verify firmware checksum.
  1147. *
  1148. * Input:
  1149. * ha = adapter block pointer.
  1150. * TARGET_QUEUE_LOCK must be released.
  1151. * ADAPTER_STATE_LOCK must be released.
  1152. *
  1153. * Returns:
  1154. * qla2x00 local function return status code.
  1155. *
  1156. * Context:
  1157. * Kernel context.
  1158. */
  1159. int
  1160. qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
  1161. {
  1162. int rval;
  1163. mbx_cmd_t mc;
  1164. mbx_cmd_t *mcp = &mc;
  1165. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
  1166. "Entered %s.\n", __func__);
  1167. mcp->mb[0] = MBC_VERIFY_CHECKSUM;
  1168. mcp->out_mb = MBX_0;
  1169. mcp->in_mb = MBX_0;
  1170. if (IS_FWI2_CAPABLE(vha->hw)) {
  1171. mcp->mb[1] = MSW(risc_addr);
  1172. mcp->mb[2] = LSW(risc_addr);
  1173. mcp->out_mb |= MBX_2|MBX_1;
  1174. mcp->in_mb |= MBX_2|MBX_1;
  1175. } else {
  1176. mcp->mb[1] = LSW(risc_addr);
  1177. mcp->out_mb |= MBX_1;
  1178. mcp->in_mb |= MBX_1;
  1179. }
  1180. mcp->tov = MBX_TOV_SECONDS;
  1181. mcp->flags = 0;
  1182. rval = qla2x00_mailbox_command(vha, mcp);
  1183. if (rval != QLA_SUCCESS) {
  1184. ql_dbg(ql_dbg_mbx, vha, 0x1036,
  1185. "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
  1186. (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
  1187. } else {
  1188. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
  1189. "Done %s.\n", __func__);
  1190. }
  1191. return rval;
  1192. }
  1193. /*
  1194. * qla2x00_issue_iocb
  1195. * Issue IOCB using mailbox command
  1196. *
  1197. * Input:
  1198. * ha = adapter state pointer.
  1199. * buffer = buffer pointer.
  1200. * phys_addr = physical address of buffer.
  1201. * size = size of buffer.
  1202. * TARGET_QUEUE_LOCK must be released.
  1203. * ADAPTER_STATE_LOCK must be released.
  1204. *
  1205. * Returns:
  1206. * qla2x00 local function return status code.
  1207. *
  1208. * Context:
  1209. * Kernel context.
  1210. */
  1211. int
  1212. qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
  1213. dma_addr_t phys_addr, size_t size, uint32_t tov)
  1214. {
  1215. int rval;
  1216. mbx_cmd_t mc;
  1217. mbx_cmd_t *mcp = &mc;
  1218. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
  1219. "Entered %s.\n", __func__);
  1220. mcp->mb[0] = MBC_IOCB_COMMAND_A64;
  1221. mcp->mb[1] = 0;
  1222. mcp->mb[2] = MSW(phys_addr);
  1223. mcp->mb[3] = LSW(phys_addr);
  1224. mcp->mb[6] = MSW(MSD(phys_addr));
  1225. mcp->mb[7] = LSW(MSD(phys_addr));
  1226. mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  1227. mcp->in_mb = MBX_2|MBX_0;
  1228. mcp->tov = tov;
  1229. mcp->flags = 0;
  1230. rval = qla2x00_mailbox_command(vha, mcp);
  1231. if (rval != QLA_SUCCESS) {
  1232. /*EMPTY*/
  1233. ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
  1234. } else {
  1235. sts_entry_t *sts_entry = (sts_entry_t *) buffer;
  1236. /* Mask reserved bits. */
  1237. sts_entry->entry_status &=
  1238. IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
  1239. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
  1240. "Done %s.\n", __func__);
  1241. }
  1242. return rval;
  1243. }
  1244. int
  1245. qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
  1246. size_t size)
  1247. {
  1248. return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
  1249. MBX_TOV_SECONDS);
  1250. }
  1251. /*
  1252. * qla2x00_abort_command
  1253. * Abort command aborts a specified IOCB.
  1254. *
  1255. * Input:
  1256. * ha = adapter block pointer.
  1257. * sp = SB structure pointer.
  1258. *
  1259. * Returns:
  1260. * qla2x00 local function return status code.
  1261. *
  1262. * Context:
  1263. * Kernel context.
  1264. */
  1265. int
  1266. qla2x00_abort_command(srb_t *sp)
  1267. {
  1268. unsigned long flags = 0;
  1269. int rval;
  1270. uint32_t handle = 0;
  1271. mbx_cmd_t mc;
  1272. mbx_cmd_t *mcp = &mc;
  1273. fc_port_t *fcport = sp->fcport;
  1274. scsi_qla_host_t *vha = fcport->vha;
  1275. struct qla_hw_data *ha = vha->hw;
  1276. struct req_que *req;
  1277. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  1278. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
  1279. "Entered %s.\n", __func__);
  1280. if (vha->flags.qpairs_available && sp->qpair)
  1281. req = sp->qpair->req;
  1282. else
  1283. req = vha->req;
  1284. spin_lock_irqsave(&ha->hardware_lock, flags);
  1285. for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
  1286. if (req->outstanding_cmds[handle] == sp)
  1287. break;
  1288. }
  1289. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1290. if (handle == req->num_outstanding_cmds) {
  1291. /* command not found */
  1292. return QLA_FUNCTION_FAILED;
  1293. }
  1294. mcp->mb[0] = MBC_ABORT_COMMAND;
  1295. if (HAS_EXTENDED_IDS(ha))
  1296. mcp->mb[1] = fcport->loop_id;
  1297. else
  1298. mcp->mb[1] = fcport->loop_id << 8;
  1299. mcp->mb[2] = (uint16_t)handle;
  1300. mcp->mb[3] = (uint16_t)(handle >> 16);
  1301. mcp->mb[6] = (uint16_t)cmd->device->lun;
  1302. mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  1303. mcp->in_mb = MBX_0;
  1304. mcp->tov = MBX_TOV_SECONDS;
  1305. mcp->flags = 0;
  1306. rval = qla2x00_mailbox_command(vha, mcp);
  1307. if (rval != QLA_SUCCESS) {
  1308. ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
  1309. } else {
  1310. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
  1311. "Done %s.\n", __func__);
  1312. }
  1313. return rval;
  1314. }
  1315. int
  1316. qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
  1317. {
  1318. int rval, rval2;
  1319. mbx_cmd_t mc;
  1320. mbx_cmd_t *mcp = &mc;
  1321. scsi_qla_host_t *vha;
  1322. struct req_que *req;
  1323. struct rsp_que *rsp;
  1324. l = l;
  1325. vha = fcport->vha;
  1326. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
  1327. "Entered %s.\n", __func__);
  1328. req = vha->hw->req_q_map[0];
  1329. rsp = req->rsp;
  1330. mcp->mb[0] = MBC_ABORT_TARGET;
  1331. mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
  1332. if (HAS_EXTENDED_IDS(vha->hw)) {
  1333. mcp->mb[1] = fcport->loop_id;
  1334. mcp->mb[10] = 0;
  1335. mcp->out_mb |= MBX_10;
  1336. } else {
  1337. mcp->mb[1] = fcport->loop_id << 8;
  1338. }
  1339. mcp->mb[2] = vha->hw->loop_reset_delay;
  1340. mcp->mb[9] = vha->vp_idx;
  1341. mcp->in_mb = MBX_0;
  1342. mcp->tov = MBX_TOV_SECONDS;
  1343. mcp->flags = 0;
  1344. rval = qla2x00_mailbox_command(vha, mcp);
  1345. if (rval != QLA_SUCCESS) {
  1346. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
  1347. "Failed=%x.\n", rval);
  1348. }
  1349. /* Issue marker IOCB. */
  1350. rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
  1351. MK_SYNC_ID);
  1352. if (rval2 != QLA_SUCCESS) {
  1353. ql_dbg(ql_dbg_mbx, vha, 0x1040,
  1354. "Failed to issue marker IOCB (%x).\n", rval2);
  1355. } else {
  1356. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
  1357. "Done %s.\n", __func__);
  1358. }
  1359. return rval;
  1360. }
  1361. int
  1362. qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
  1363. {
  1364. int rval, rval2;
  1365. mbx_cmd_t mc;
  1366. mbx_cmd_t *mcp = &mc;
  1367. scsi_qla_host_t *vha;
  1368. struct req_que *req;
  1369. struct rsp_que *rsp;
  1370. vha = fcport->vha;
  1371. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
  1372. "Entered %s.\n", __func__);
  1373. req = vha->hw->req_q_map[0];
  1374. rsp = req->rsp;
  1375. mcp->mb[0] = MBC_LUN_RESET;
  1376. mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
  1377. if (HAS_EXTENDED_IDS(vha->hw))
  1378. mcp->mb[1] = fcport->loop_id;
  1379. else
  1380. mcp->mb[1] = fcport->loop_id << 8;
  1381. mcp->mb[2] = (u32)l;
  1382. mcp->mb[3] = 0;
  1383. mcp->mb[9] = vha->vp_idx;
  1384. mcp->in_mb = MBX_0;
  1385. mcp->tov = MBX_TOV_SECONDS;
  1386. mcp->flags = 0;
  1387. rval = qla2x00_mailbox_command(vha, mcp);
  1388. if (rval != QLA_SUCCESS) {
  1389. ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
  1390. }
  1391. /* Issue marker IOCB. */
  1392. rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
  1393. MK_SYNC_ID_LUN);
  1394. if (rval2 != QLA_SUCCESS) {
  1395. ql_dbg(ql_dbg_mbx, vha, 0x1044,
  1396. "Failed to issue marker IOCB (%x).\n", rval2);
  1397. } else {
  1398. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
  1399. "Done %s.\n", __func__);
  1400. }
  1401. return rval;
  1402. }
  1403. /*
  1404. * qla2x00_get_adapter_id
  1405. * Get adapter ID and topology.
  1406. *
  1407. * Input:
  1408. * ha = adapter block pointer.
  1409. * id = pointer for loop ID.
  1410. * al_pa = pointer for AL_PA.
  1411. * area = pointer for area.
  1412. * domain = pointer for domain.
  1413. * top = pointer for topology.
  1414. * TARGET_QUEUE_LOCK must be released.
  1415. * ADAPTER_STATE_LOCK must be released.
  1416. *
  1417. * Returns:
  1418. * qla2x00 local function return status code.
  1419. *
  1420. * Context:
  1421. * Kernel context.
  1422. */
  1423. int
  1424. qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
  1425. uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
  1426. {
  1427. int rval;
  1428. mbx_cmd_t mc;
  1429. mbx_cmd_t *mcp = &mc;
  1430. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
  1431. "Entered %s.\n", __func__);
  1432. mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
  1433. mcp->mb[9] = vha->vp_idx;
  1434. mcp->out_mb = MBX_9|MBX_0;
  1435. mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  1436. if (IS_CNA_CAPABLE(vha->hw))
  1437. mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
  1438. if (IS_FWI2_CAPABLE(vha->hw))
  1439. mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
  1440. if (IS_QLA27XX(vha->hw))
  1441. mcp->in_mb |= MBX_15;
  1442. mcp->tov = MBX_TOV_SECONDS;
  1443. mcp->flags = 0;
  1444. rval = qla2x00_mailbox_command(vha, mcp);
  1445. if (mcp->mb[0] == MBS_COMMAND_ERROR)
  1446. rval = QLA_COMMAND_ERROR;
  1447. else if (mcp->mb[0] == MBS_INVALID_COMMAND)
  1448. rval = QLA_INVALID_COMMAND;
  1449. /* Return data. */
  1450. *id = mcp->mb[1];
  1451. *al_pa = LSB(mcp->mb[2]);
  1452. *area = MSB(mcp->mb[2]);
  1453. *domain = LSB(mcp->mb[3]);
  1454. *top = mcp->mb[6];
  1455. *sw_cap = mcp->mb[7];
  1456. if (rval != QLA_SUCCESS) {
  1457. /*EMPTY*/
  1458. ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
  1459. } else {
  1460. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
  1461. "Done %s.\n", __func__);
  1462. if (IS_CNA_CAPABLE(vha->hw)) {
  1463. vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
  1464. vha->fcoe_fcf_idx = mcp->mb[10];
  1465. vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
  1466. vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
  1467. vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
  1468. vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
  1469. vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
  1470. vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
  1471. }
  1472. /* If FA-WWN supported */
  1473. if (IS_FAWWN_CAPABLE(vha->hw)) {
  1474. if (mcp->mb[7] & BIT_14) {
  1475. vha->port_name[0] = MSB(mcp->mb[16]);
  1476. vha->port_name[1] = LSB(mcp->mb[16]);
  1477. vha->port_name[2] = MSB(mcp->mb[17]);
  1478. vha->port_name[3] = LSB(mcp->mb[17]);
  1479. vha->port_name[4] = MSB(mcp->mb[18]);
  1480. vha->port_name[5] = LSB(mcp->mb[18]);
  1481. vha->port_name[6] = MSB(mcp->mb[19]);
  1482. vha->port_name[7] = LSB(mcp->mb[19]);
  1483. fc_host_port_name(vha->host) =
  1484. wwn_to_u64(vha->port_name);
  1485. ql_dbg(ql_dbg_mbx, vha, 0x10ca,
  1486. "FA-WWN acquired %016llx\n",
  1487. wwn_to_u64(vha->port_name));
  1488. }
  1489. }
  1490. if (IS_QLA27XX(vha->hw))
  1491. vha->bbcr = mcp->mb[15];
  1492. }
  1493. return rval;
  1494. }
  1495. /*
  1496. * qla2x00_get_retry_cnt
  1497. * Get current firmware login retry count and delay.
  1498. *
  1499. * Input:
  1500. * ha = adapter block pointer.
  1501. * retry_cnt = pointer to login retry count.
  1502. * tov = pointer to login timeout value.
  1503. *
  1504. * Returns:
  1505. * qla2x00 local function return status code.
  1506. *
  1507. * Context:
  1508. * Kernel context.
  1509. */
  1510. int
  1511. qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
  1512. uint16_t *r_a_tov)
  1513. {
  1514. int rval;
  1515. uint16_t ratov;
  1516. mbx_cmd_t mc;
  1517. mbx_cmd_t *mcp = &mc;
  1518. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
  1519. "Entered %s.\n", __func__);
  1520. mcp->mb[0] = MBC_GET_RETRY_COUNT;
  1521. mcp->out_mb = MBX_0;
  1522. mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  1523. mcp->tov = MBX_TOV_SECONDS;
  1524. mcp->flags = 0;
  1525. rval = qla2x00_mailbox_command(vha, mcp);
  1526. if (rval != QLA_SUCCESS) {
  1527. /*EMPTY*/
  1528. ql_dbg(ql_dbg_mbx, vha, 0x104a,
  1529. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  1530. } else {
  1531. /* Convert returned data and check our values. */
  1532. *r_a_tov = mcp->mb[3] / 2;
  1533. ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
  1534. if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
  1535. /* Update to the larger values */
  1536. *retry_cnt = (uint8_t)mcp->mb[1];
  1537. *tov = ratov;
  1538. }
  1539. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
  1540. "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
  1541. }
  1542. return rval;
  1543. }
  1544. /*
  1545. * qla2x00_init_firmware
  1546. * Initialize adapter firmware.
  1547. *
  1548. * Input:
  1549. * ha = adapter block pointer.
  1550. * dptr = Initialization control block pointer.
  1551. * size = size of initialization control block.
  1552. * TARGET_QUEUE_LOCK must be released.
  1553. * ADAPTER_STATE_LOCK must be released.
  1554. *
  1555. * Returns:
  1556. * qla2x00 local function return status code.
  1557. *
  1558. * Context:
  1559. * Kernel context.
  1560. */
  1561. int
  1562. qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
  1563. {
  1564. int rval;
  1565. mbx_cmd_t mc;
  1566. mbx_cmd_t *mcp = &mc;
  1567. struct qla_hw_data *ha = vha->hw;
  1568. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
  1569. "Entered %s.\n", __func__);
  1570. if (IS_P3P_TYPE(ha) && ql2xdbwr)
  1571. qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
  1572. (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
  1573. if (ha->flags.npiv_supported)
  1574. mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
  1575. else
  1576. mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
  1577. mcp->mb[1] = 0;
  1578. mcp->mb[2] = MSW(ha->init_cb_dma);
  1579. mcp->mb[3] = LSW(ha->init_cb_dma);
  1580. mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
  1581. mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
  1582. mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  1583. if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
  1584. mcp->mb[1] = BIT_0;
  1585. mcp->mb[10] = MSW(ha->ex_init_cb_dma);
  1586. mcp->mb[11] = LSW(ha->ex_init_cb_dma);
  1587. mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
  1588. mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
  1589. mcp->mb[14] = sizeof(*ha->ex_init_cb);
  1590. mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
  1591. }
  1592. /* 1 and 2 should normally be captured. */
  1593. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  1594. if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
  1595. /* mb3 is additional info about the installed SFP. */
  1596. mcp->in_mb |= MBX_3;
  1597. mcp->buf_size = size;
  1598. mcp->flags = MBX_DMA_OUT;
  1599. mcp->tov = MBX_TOV_SECONDS;
  1600. rval = qla2x00_mailbox_command(vha, mcp);
  1601. if (rval != QLA_SUCCESS) {
  1602. /*EMPTY*/
  1603. ql_dbg(ql_dbg_mbx, vha, 0x104d,
  1604. "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
  1605. rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
  1606. } else {
  1607. if (IS_QLA27XX(ha)) {
  1608. if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
  1609. ql_dbg(ql_dbg_mbx, vha, 0x119d,
  1610. "Invalid SFP/Validation Failed\n");
  1611. }
  1612. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
  1613. "Done %s.\n", __func__);
  1614. }
  1615. return rval;
  1616. }
  1617. /*
  1618. * qla2x00_get_port_database
  1619. * Issue normal/enhanced get port database mailbox command
  1620. * and copy device name as necessary.
  1621. *
  1622. * Input:
  1623. * ha = adapter state pointer.
  1624. * dev = structure pointer.
  1625. * opt = enhanced cmd option byte.
  1626. *
  1627. * Returns:
  1628. * qla2x00 local function return status code.
  1629. *
  1630. * Context:
  1631. * Kernel context.
  1632. */
  1633. int
  1634. qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
  1635. {
  1636. int rval;
  1637. mbx_cmd_t mc;
  1638. mbx_cmd_t *mcp = &mc;
  1639. port_database_t *pd;
  1640. struct port_database_24xx *pd24;
  1641. dma_addr_t pd_dma;
  1642. struct qla_hw_data *ha = vha->hw;
  1643. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
  1644. "Entered %s.\n", __func__);
  1645. pd24 = NULL;
  1646. pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
  1647. if (pd == NULL) {
  1648. ql_log(ql_log_warn, vha, 0x1050,
  1649. "Failed to allocate port database structure.\n");
  1650. fcport->query = 0;
  1651. return QLA_MEMORY_ALLOC_FAILED;
  1652. }
  1653. mcp->mb[0] = MBC_GET_PORT_DATABASE;
  1654. if (opt != 0 && !IS_FWI2_CAPABLE(ha))
  1655. mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
  1656. mcp->mb[2] = MSW(pd_dma);
  1657. mcp->mb[3] = LSW(pd_dma);
  1658. mcp->mb[6] = MSW(MSD(pd_dma));
  1659. mcp->mb[7] = LSW(MSD(pd_dma));
  1660. mcp->mb[9] = vha->vp_idx;
  1661. mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
  1662. mcp->in_mb = MBX_0;
  1663. if (IS_FWI2_CAPABLE(ha)) {
  1664. mcp->mb[1] = fcport->loop_id;
  1665. mcp->mb[10] = opt;
  1666. mcp->out_mb |= MBX_10|MBX_1;
  1667. mcp->in_mb |= MBX_1;
  1668. } else if (HAS_EXTENDED_IDS(ha)) {
  1669. mcp->mb[1] = fcport->loop_id;
  1670. mcp->mb[10] = opt;
  1671. mcp->out_mb |= MBX_10|MBX_1;
  1672. } else {
  1673. mcp->mb[1] = fcport->loop_id << 8 | opt;
  1674. mcp->out_mb |= MBX_1;
  1675. }
  1676. mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
  1677. PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
  1678. mcp->flags = MBX_DMA_IN;
  1679. mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
  1680. rval = qla2x00_mailbox_command(vha, mcp);
  1681. if (rval != QLA_SUCCESS)
  1682. goto gpd_error_out;
  1683. if (IS_FWI2_CAPABLE(ha)) {
  1684. uint64_t zero = 0;
  1685. u8 current_login_state, last_login_state;
  1686. pd24 = (struct port_database_24xx *) pd;
  1687. /* Check for logged in state. */
  1688. if (fcport->fc4f_nvme) {
  1689. current_login_state = pd24->current_login_state >> 4;
  1690. last_login_state = pd24->last_login_state >> 4;
  1691. } else {
  1692. current_login_state = pd24->current_login_state & 0xf;
  1693. last_login_state = pd24->last_login_state & 0xf;
  1694. }
  1695. fcport->current_login_state = pd24->current_login_state;
  1696. fcport->last_login_state = pd24->last_login_state;
  1697. /* Check for logged in state. */
  1698. if (current_login_state != PDS_PRLI_COMPLETE &&
  1699. last_login_state != PDS_PRLI_COMPLETE) {
  1700. ql_dbg(ql_dbg_mbx, vha, 0x119a,
  1701. "Unable to verify login-state (%x/%x) for loop_id %x.\n",
  1702. current_login_state, last_login_state,
  1703. fcport->loop_id);
  1704. rval = QLA_FUNCTION_FAILED;
  1705. if (!fcport->query)
  1706. goto gpd_error_out;
  1707. }
  1708. if (fcport->loop_id == FC_NO_LOOP_ID ||
  1709. (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
  1710. memcmp(fcport->port_name, pd24->port_name, 8))) {
  1711. /* We lost the device mid way. */
  1712. rval = QLA_NOT_LOGGED_IN;
  1713. goto gpd_error_out;
  1714. }
  1715. /* Names are little-endian. */
  1716. memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
  1717. memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
  1718. /* Get port_id of device. */
  1719. fcport->d_id.b.domain = pd24->port_id[0];
  1720. fcport->d_id.b.area = pd24->port_id[1];
  1721. fcport->d_id.b.al_pa = pd24->port_id[2];
  1722. fcport->d_id.b.rsvd_1 = 0;
  1723. /* If not target must be initiator or unknown type. */
  1724. if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
  1725. fcport->port_type = FCT_INITIATOR;
  1726. else
  1727. fcport->port_type = FCT_TARGET;
  1728. /* Passback COS information. */
  1729. fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
  1730. FC_COS_CLASS2 : FC_COS_CLASS3;
  1731. if (pd24->prli_svc_param_word_3[0] & BIT_7)
  1732. fcport->flags |= FCF_CONF_COMP_SUPPORTED;
  1733. } else {
  1734. uint64_t zero = 0;
  1735. /* Check for logged in state. */
  1736. if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
  1737. pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
  1738. ql_dbg(ql_dbg_mbx, vha, 0x100a,
  1739. "Unable to verify login-state (%x/%x) - "
  1740. "portid=%02x%02x%02x.\n", pd->master_state,
  1741. pd->slave_state, fcport->d_id.b.domain,
  1742. fcport->d_id.b.area, fcport->d_id.b.al_pa);
  1743. rval = QLA_FUNCTION_FAILED;
  1744. goto gpd_error_out;
  1745. }
  1746. if (fcport->loop_id == FC_NO_LOOP_ID ||
  1747. (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
  1748. memcmp(fcport->port_name, pd->port_name, 8))) {
  1749. /* We lost the device mid way. */
  1750. rval = QLA_NOT_LOGGED_IN;
  1751. goto gpd_error_out;
  1752. }
  1753. /* Names are little-endian. */
  1754. memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
  1755. memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
  1756. /* Get port_id of device. */
  1757. fcport->d_id.b.domain = pd->port_id[0];
  1758. fcport->d_id.b.area = pd->port_id[3];
  1759. fcport->d_id.b.al_pa = pd->port_id[2];
  1760. fcport->d_id.b.rsvd_1 = 0;
  1761. /* If not target must be initiator or unknown type. */
  1762. if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
  1763. fcport->port_type = FCT_INITIATOR;
  1764. else
  1765. fcport->port_type = FCT_TARGET;
  1766. /* Passback COS information. */
  1767. fcport->supported_classes = (pd->options & BIT_4) ?
  1768. FC_COS_CLASS2: FC_COS_CLASS3;
  1769. }
  1770. gpd_error_out:
  1771. dma_pool_free(ha->s_dma_pool, pd, pd_dma);
  1772. fcport->query = 0;
  1773. if (rval != QLA_SUCCESS) {
  1774. ql_dbg(ql_dbg_mbx, vha, 0x1052,
  1775. "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
  1776. mcp->mb[0], mcp->mb[1]);
  1777. } else {
  1778. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
  1779. "Done %s.\n", __func__);
  1780. }
  1781. return rval;
  1782. }
  1783. /*
  1784. * qla2x00_get_firmware_state
  1785. * Get adapter firmware state.
  1786. *
  1787. * Input:
  1788. * ha = adapter block pointer.
  1789. * dptr = pointer for firmware state.
  1790. * TARGET_QUEUE_LOCK must be released.
  1791. * ADAPTER_STATE_LOCK must be released.
  1792. *
  1793. * Returns:
  1794. * qla2x00 local function return status code.
  1795. *
  1796. * Context:
  1797. * Kernel context.
  1798. */
  1799. int
  1800. qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
  1801. {
  1802. int rval;
  1803. mbx_cmd_t mc;
  1804. mbx_cmd_t *mcp = &mc;
  1805. struct qla_hw_data *ha = vha->hw;
  1806. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
  1807. "Entered %s.\n", __func__);
  1808. mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
  1809. mcp->out_mb = MBX_0;
  1810. if (IS_FWI2_CAPABLE(vha->hw))
  1811. mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  1812. else
  1813. mcp->in_mb = MBX_1|MBX_0;
  1814. mcp->tov = MBX_TOV_SECONDS;
  1815. mcp->flags = 0;
  1816. rval = qla2x00_mailbox_command(vha, mcp);
  1817. /* Return firmware states. */
  1818. states[0] = mcp->mb[1];
  1819. if (IS_FWI2_CAPABLE(vha->hw)) {
  1820. states[1] = mcp->mb[2];
  1821. states[2] = mcp->mb[3]; /* SFP info */
  1822. states[3] = mcp->mb[4];
  1823. states[4] = mcp->mb[5];
  1824. states[5] = mcp->mb[6]; /* DPORT status */
  1825. }
  1826. if (rval != QLA_SUCCESS) {
  1827. /*EMPTY*/
  1828. ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
  1829. } else {
  1830. if (IS_QLA27XX(ha)) {
  1831. if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
  1832. ql_dbg(ql_dbg_mbx, vha, 0x119e,
  1833. "Invalid SFP/Validation Failed\n");
  1834. }
  1835. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
  1836. "Done %s.\n", __func__);
  1837. }
  1838. return rval;
  1839. }
  1840. /*
  1841. * qla2x00_get_port_name
  1842. * Issue get port name mailbox command.
  1843. * Returned name is in big endian format.
  1844. *
  1845. * Input:
  1846. * ha = adapter block pointer.
  1847. * loop_id = loop ID of device.
  1848. * name = pointer for name.
  1849. * TARGET_QUEUE_LOCK must be released.
  1850. * ADAPTER_STATE_LOCK must be released.
  1851. *
  1852. * Returns:
  1853. * qla2x00 local function return status code.
  1854. *
  1855. * Context:
  1856. * Kernel context.
  1857. */
  1858. int
  1859. qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
  1860. uint8_t opt)
  1861. {
  1862. int rval;
  1863. mbx_cmd_t mc;
  1864. mbx_cmd_t *mcp = &mc;
  1865. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
  1866. "Entered %s.\n", __func__);
  1867. mcp->mb[0] = MBC_GET_PORT_NAME;
  1868. mcp->mb[9] = vha->vp_idx;
  1869. mcp->out_mb = MBX_9|MBX_1|MBX_0;
  1870. if (HAS_EXTENDED_IDS(vha->hw)) {
  1871. mcp->mb[1] = loop_id;
  1872. mcp->mb[10] = opt;
  1873. mcp->out_mb |= MBX_10;
  1874. } else {
  1875. mcp->mb[1] = loop_id << 8 | opt;
  1876. }
  1877. mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  1878. mcp->tov = MBX_TOV_SECONDS;
  1879. mcp->flags = 0;
  1880. rval = qla2x00_mailbox_command(vha, mcp);
  1881. if (rval != QLA_SUCCESS) {
  1882. /*EMPTY*/
  1883. ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
  1884. } else {
  1885. if (name != NULL) {
  1886. /* This function returns name in big endian. */
  1887. name[0] = MSB(mcp->mb[2]);
  1888. name[1] = LSB(mcp->mb[2]);
  1889. name[2] = MSB(mcp->mb[3]);
  1890. name[3] = LSB(mcp->mb[3]);
  1891. name[4] = MSB(mcp->mb[6]);
  1892. name[5] = LSB(mcp->mb[6]);
  1893. name[6] = MSB(mcp->mb[7]);
  1894. name[7] = LSB(mcp->mb[7]);
  1895. }
  1896. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
  1897. "Done %s.\n", __func__);
  1898. }
  1899. return rval;
  1900. }
  1901. /*
  1902. * qla24xx_link_initialization
  1903. * Issue link initialization mailbox command.
  1904. *
  1905. * Input:
  1906. * ha = adapter block pointer.
  1907. * TARGET_QUEUE_LOCK must be released.
  1908. * ADAPTER_STATE_LOCK must be released.
  1909. *
  1910. * Returns:
  1911. * qla2x00 local function return status code.
  1912. *
  1913. * Context:
  1914. * Kernel context.
  1915. */
  1916. int
  1917. qla24xx_link_initialize(scsi_qla_host_t *vha)
  1918. {
  1919. int rval;
  1920. mbx_cmd_t mc;
  1921. mbx_cmd_t *mcp = &mc;
  1922. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
  1923. "Entered %s.\n", __func__);
  1924. if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
  1925. return QLA_FUNCTION_FAILED;
  1926. mcp->mb[0] = MBC_LINK_INITIALIZATION;
  1927. mcp->mb[1] = BIT_4;
  1928. if (vha->hw->operating_mode == LOOP)
  1929. mcp->mb[1] |= BIT_6;
  1930. else
  1931. mcp->mb[1] |= BIT_5;
  1932. mcp->mb[2] = 0;
  1933. mcp->mb[3] = 0;
  1934. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  1935. mcp->in_mb = MBX_0;
  1936. mcp->tov = MBX_TOV_SECONDS;
  1937. mcp->flags = 0;
  1938. rval = qla2x00_mailbox_command(vha, mcp);
  1939. if (rval != QLA_SUCCESS) {
  1940. ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
  1941. } else {
  1942. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
  1943. "Done %s.\n", __func__);
  1944. }
  1945. return rval;
  1946. }
  1947. /*
  1948. * qla2x00_lip_reset
  1949. * Issue LIP reset mailbox command.
  1950. *
  1951. * Input:
  1952. * ha = adapter block pointer.
  1953. * TARGET_QUEUE_LOCK must be released.
  1954. * ADAPTER_STATE_LOCK must be released.
  1955. *
  1956. * Returns:
  1957. * qla2x00 local function return status code.
  1958. *
  1959. * Context:
  1960. * Kernel context.
  1961. */
  1962. int
  1963. qla2x00_lip_reset(scsi_qla_host_t *vha)
  1964. {
  1965. int rval;
  1966. mbx_cmd_t mc;
  1967. mbx_cmd_t *mcp = &mc;
  1968. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
  1969. "Entered %s.\n", __func__);
  1970. if (IS_CNA_CAPABLE(vha->hw)) {
  1971. /* Logout across all FCFs. */
  1972. mcp->mb[0] = MBC_LIP_FULL_LOGIN;
  1973. mcp->mb[1] = BIT_1;
  1974. mcp->mb[2] = 0;
  1975. mcp->out_mb = MBX_2|MBX_1|MBX_0;
  1976. } else if (IS_FWI2_CAPABLE(vha->hw)) {
  1977. mcp->mb[0] = MBC_LIP_FULL_LOGIN;
  1978. if (N2N_TOPO(vha->hw))
  1979. mcp->mb[1] = BIT_4; /* re-init */
  1980. else
  1981. mcp->mb[1] = BIT_6; /* LIP */
  1982. mcp->mb[2] = 0;
  1983. mcp->mb[3] = vha->hw->loop_reset_delay;
  1984. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  1985. } else {
  1986. mcp->mb[0] = MBC_LIP_RESET;
  1987. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  1988. if (HAS_EXTENDED_IDS(vha->hw)) {
  1989. mcp->mb[1] = 0x00ff;
  1990. mcp->mb[10] = 0;
  1991. mcp->out_mb |= MBX_10;
  1992. } else {
  1993. mcp->mb[1] = 0xff00;
  1994. }
  1995. mcp->mb[2] = vha->hw->loop_reset_delay;
  1996. mcp->mb[3] = 0;
  1997. }
  1998. mcp->in_mb = MBX_0;
  1999. mcp->tov = MBX_TOV_SECONDS;
  2000. mcp->flags = 0;
  2001. rval = qla2x00_mailbox_command(vha, mcp);
  2002. if (rval != QLA_SUCCESS) {
  2003. /*EMPTY*/
  2004. ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
  2005. } else {
  2006. /*EMPTY*/
  2007. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
  2008. "Done %s.\n", __func__);
  2009. }
  2010. return rval;
  2011. }
  2012. /*
  2013. * qla2x00_send_sns
  2014. * Send SNS command.
  2015. *
  2016. * Input:
  2017. * ha = adapter block pointer.
  2018. * sns = pointer for command.
  2019. * cmd_size = command size.
  2020. * buf_size = response/command size.
  2021. * TARGET_QUEUE_LOCK must be released.
  2022. * ADAPTER_STATE_LOCK must be released.
  2023. *
  2024. * Returns:
  2025. * qla2x00 local function return status code.
  2026. *
  2027. * Context:
  2028. * Kernel context.
  2029. */
  2030. int
  2031. qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
  2032. uint16_t cmd_size, size_t buf_size)
  2033. {
  2034. int rval;
  2035. mbx_cmd_t mc;
  2036. mbx_cmd_t *mcp = &mc;
  2037. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
  2038. "Entered %s.\n", __func__);
  2039. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
  2040. "Retry cnt=%d ratov=%d total tov=%d.\n",
  2041. vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
  2042. mcp->mb[0] = MBC_SEND_SNS_COMMAND;
  2043. mcp->mb[1] = cmd_size;
  2044. mcp->mb[2] = MSW(sns_phys_address);
  2045. mcp->mb[3] = LSW(sns_phys_address);
  2046. mcp->mb[6] = MSW(MSD(sns_phys_address));
  2047. mcp->mb[7] = LSW(MSD(sns_phys_address));
  2048. mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  2049. mcp->in_mb = MBX_0|MBX_1;
  2050. mcp->buf_size = buf_size;
  2051. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
  2052. mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
  2053. rval = qla2x00_mailbox_command(vha, mcp);
  2054. if (rval != QLA_SUCCESS) {
  2055. /*EMPTY*/
  2056. ql_dbg(ql_dbg_mbx, vha, 0x105f,
  2057. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  2058. rval, mcp->mb[0], mcp->mb[1]);
  2059. } else {
  2060. /*EMPTY*/
  2061. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
  2062. "Done %s.\n", __func__);
  2063. }
  2064. return rval;
  2065. }
  2066. int
  2067. qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
  2068. uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
  2069. {
  2070. int rval;
  2071. struct logio_entry_24xx *lg;
  2072. dma_addr_t lg_dma;
  2073. uint32_t iop[2];
  2074. struct qla_hw_data *ha = vha->hw;
  2075. struct req_que *req;
  2076. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
  2077. "Entered %s.\n", __func__);
  2078. if (vha->vp_idx && vha->qpair)
  2079. req = vha->qpair->req;
  2080. else
  2081. req = ha->req_q_map[0];
  2082. lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
  2083. if (lg == NULL) {
  2084. ql_log(ql_log_warn, vha, 0x1062,
  2085. "Failed to allocate login IOCB.\n");
  2086. return QLA_MEMORY_ALLOC_FAILED;
  2087. }
  2088. lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
  2089. lg->entry_count = 1;
  2090. lg->handle = MAKE_HANDLE(req->id, lg->handle);
  2091. lg->nport_handle = cpu_to_le16(loop_id);
  2092. lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
  2093. if (opt & BIT_0)
  2094. lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
  2095. if (opt & BIT_1)
  2096. lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
  2097. lg->port_id[0] = al_pa;
  2098. lg->port_id[1] = area;
  2099. lg->port_id[2] = domain;
  2100. lg->vp_index = vha->vp_idx;
  2101. rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
  2102. (ha->r_a_tov / 10 * 2) + 2);
  2103. if (rval != QLA_SUCCESS) {
  2104. ql_dbg(ql_dbg_mbx, vha, 0x1063,
  2105. "Failed to issue login IOCB (%x).\n", rval);
  2106. } else if (lg->entry_status != 0) {
  2107. ql_dbg(ql_dbg_mbx, vha, 0x1064,
  2108. "Failed to complete IOCB -- error status (%x).\n",
  2109. lg->entry_status);
  2110. rval = QLA_FUNCTION_FAILED;
  2111. } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
  2112. iop[0] = le32_to_cpu(lg->io_parameter[0]);
  2113. iop[1] = le32_to_cpu(lg->io_parameter[1]);
  2114. ql_dbg(ql_dbg_mbx, vha, 0x1065,
  2115. "Failed to complete IOCB -- completion status (%x) "
  2116. "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
  2117. iop[0], iop[1]);
  2118. switch (iop[0]) {
  2119. case LSC_SCODE_PORTID_USED:
  2120. mb[0] = MBS_PORT_ID_USED;
  2121. mb[1] = LSW(iop[1]);
  2122. break;
  2123. case LSC_SCODE_NPORT_USED:
  2124. mb[0] = MBS_LOOP_ID_USED;
  2125. break;
  2126. case LSC_SCODE_NOLINK:
  2127. case LSC_SCODE_NOIOCB:
  2128. case LSC_SCODE_NOXCB:
  2129. case LSC_SCODE_CMD_FAILED:
  2130. case LSC_SCODE_NOFABRIC:
  2131. case LSC_SCODE_FW_NOT_READY:
  2132. case LSC_SCODE_NOT_LOGGED_IN:
  2133. case LSC_SCODE_NOPCB:
  2134. case LSC_SCODE_ELS_REJECT:
  2135. case LSC_SCODE_CMD_PARAM_ERR:
  2136. case LSC_SCODE_NONPORT:
  2137. case LSC_SCODE_LOGGED_IN:
  2138. case LSC_SCODE_NOFLOGI_ACC:
  2139. default:
  2140. mb[0] = MBS_COMMAND_ERROR;
  2141. break;
  2142. }
  2143. } else {
  2144. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
  2145. "Done %s.\n", __func__);
  2146. iop[0] = le32_to_cpu(lg->io_parameter[0]);
  2147. mb[0] = MBS_COMMAND_COMPLETE;
  2148. mb[1] = 0;
  2149. if (iop[0] & BIT_4) {
  2150. if (iop[0] & BIT_8)
  2151. mb[1] |= BIT_1;
  2152. } else
  2153. mb[1] = BIT_0;
  2154. /* Passback COS information. */
  2155. mb[10] = 0;
  2156. if (lg->io_parameter[7] || lg->io_parameter[8])
  2157. mb[10] |= BIT_0; /* Class 2. */
  2158. if (lg->io_parameter[9] || lg->io_parameter[10])
  2159. mb[10] |= BIT_1; /* Class 3. */
  2160. if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
  2161. mb[10] |= BIT_7; /* Confirmed Completion
  2162. * Allowed
  2163. */
  2164. }
  2165. dma_pool_free(ha->s_dma_pool, lg, lg_dma);
  2166. return rval;
  2167. }
  2168. /*
  2169. * qla2x00_login_fabric
  2170. * Issue login fabric port mailbox command.
  2171. *
  2172. * Input:
  2173. * ha = adapter block pointer.
  2174. * loop_id = device loop ID.
  2175. * domain = device domain.
  2176. * area = device area.
  2177. * al_pa = device AL_PA.
  2178. * status = pointer for return status.
  2179. * opt = command options.
  2180. * TARGET_QUEUE_LOCK must be released.
  2181. * ADAPTER_STATE_LOCK must be released.
  2182. *
  2183. * Returns:
  2184. * qla2x00 local function return status code.
  2185. *
  2186. * Context:
  2187. * Kernel context.
  2188. */
  2189. int
  2190. qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
  2191. uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
  2192. {
  2193. int rval;
  2194. mbx_cmd_t mc;
  2195. mbx_cmd_t *mcp = &mc;
  2196. struct qla_hw_data *ha = vha->hw;
  2197. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
  2198. "Entered %s.\n", __func__);
  2199. mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
  2200. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  2201. if (HAS_EXTENDED_IDS(ha)) {
  2202. mcp->mb[1] = loop_id;
  2203. mcp->mb[10] = opt;
  2204. mcp->out_mb |= MBX_10;
  2205. } else {
  2206. mcp->mb[1] = (loop_id << 8) | opt;
  2207. }
  2208. mcp->mb[2] = domain;
  2209. mcp->mb[3] = area << 8 | al_pa;
  2210. mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
  2211. mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
  2212. mcp->flags = 0;
  2213. rval = qla2x00_mailbox_command(vha, mcp);
  2214. /* Return mailbox statuses. */
  2215. if (mb != NULL) {
  2216. mb[0] = mcp->mb[0];
  2217. mb[1] = mcp->mb[1];
  2218. mb[2] = mcp->mb[2];
  2219. mb[6] = mcp->mb[6];
  2220. mb[7] = mcp->mb[7];
  2221. /* COS retrieved from Get-Port-Database mailbox command. */
  2222. mb[10] = 0;
  2223. }
  2224. if (rval != QLA_SUCCESS) {
  2225. /* RLU tmp code: need to change main mailbox_command function to
  2226. * return ok even when the mailbox completion value is not
  2227. * SUCCESS. The caller needs to be responsible to interpret
  2228. * the return values of this mailbox command if we're not
  2229. * to change too much of the existing code.
  2230. */
  2231. if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
  2232. mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
  2233. mcp->mb[0] == 0x4006)
  2234. rval = QLA_SUCCESS;
  2235. /*EMPTY*/
  2236. ql_dbg(ql_dbg_mbx, vha, 0x1068,
  2237. "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
  2238. rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
  2239. } else {
  2240. /*EMPTY*/
  2241. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
  2242. "Done %s.\n", __func__);
  2243. }
  2244. return rval;
  2245. }
  2246. /*
  2247. * qla2x00_login_local_device
  2248. * Issue login loop port mailbox command.
  2249. *
  2250. * Input:
  2251. * ha = adapter block pointer.
  2252. * loop_id = device loop ID.
  2253. * opt = command options.
  2254. *
  2255. * Returns:
  2256. * Return status code.
  2257. *
  2258. * Context:
  2259. * Kernel context.
  2260. *
  2261. */
  2262. int
  2263. qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
  2264. uint16_t *mb_ret, uint8_t opt)
  2265. {
  2266. int rval;
  2267. mbx_cmd_t mc;
  2268. mbx_cmd_t *mcp = &mc;
  2269. struct qla_hw_data *ha = vha->hw;
  2270. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
  2271. "Entered %s.\n", __func__);
  2272. if (IS_FWI2_CAPABLE(ha))
  2273. return qla24xx_login_fabric(vha, fcport->loop_id,
  2274. fcport->d_id.b.domain, fcport->d_id.b.area,
  2275. fcport->d_id.b.al_pa, mb_ret, opt);
  2276. mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
  2277. if (HAS_EXTENDED_IDS(ha))
  2278. mcp->mb[1] = fcport->loop_id;
  2279. else
  2280. mcp->mb[1] = fcport->loop_id << 8;
  2281. mcp->mb[2] = opt;
  2282. mcp->out_mb = MBX_2|MBX_1|MBX_0;
  2283. mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
  2284. mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
  2285. mcp->flags = 0;
  2286. rval = qla2x00_mailbox_command(vha, mcp);
  2287. /* Return mailbox statuses. */
  2288. if (mb_ret != NULL) {
  2289. mb_ret[0] = mcp->mb[0];
  2290. mb_ret[1] = mcp->mb[1];
  2291. mb_ret[6] = mcp->mb[6];
  2292. mb_ret[7] = mcp->mb[7];
  2293. }
  2294. if (rval != QLA_SUCCESS) {
  2295. /* AV tmp code: need to change main mailbox_command function to
  2296. * return ok even when the mailbox completion value is not
  2297. * SUCCESS. The caller needs to be responsible to interpret
  2298. * the return values of this mailbox command if we're not
  2299. * to change too much of the existing code.
  2300. */
  2301. if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
  2302. rval = QLA_SUCCESS;
  2303. ql_dbg(ql_dbg_mbx, vha, 0x106b,
  2304. "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
  2305. rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
  2306. } else {
  2307. /*EMPTY*/
  2308. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
  2309. "Done %s.\n", __func__);
  2310. }
  2311. return (rval);
  2312. }
  2313. int
  2314. qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
  2315. uint8_t area, uint8_t al_pa)
  2316. {
  2317. int rval;
  2318. struct logio_entry_24xx *lg;
  2319. dma_addr_t lg_dma;
  2320. struct qla_hw_data *ha = vha->hw;
  2321. struct req_que *req;
  2322. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
  2323. "Entered %s.\n", __func__);
  2324. lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
  2325. if (lg == NULL) {
  2326. ql_log(ql_log_warn, vha, 0x106e,
  2327. "Failed to allocate logout IOCB.\n");
  2328. return QLA_MEMORY_ALLOC_FAILED;
  2329. }
  2330. req = vha->req;
  2331. lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
  2332. lg->entry_count = 1;
  2333. lg->handle = MAKE_HANDLE(req->id, lg->handle);
  2334. lg->nport_handle = cpu_to_le16(loop_id);
  2335. lg->control_flags =
  2336. cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
  2337. LCF_FREE_NPORT);
  2338. lg->port_id[0] = al_pa;
  2339. lg->port_id[1] = area;
  2340. lg->port_id[2] = domain;
  2341. lg->vp_index = vha->vp_idx;
  2342. rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
  2343. (ha->r_a_tov / 10 * 2) + 2);
  2344. if (rval != QLA_SUCCESS) {
  2345. ql_dbg(ql_dbg_mbx, vha, 0x106f,
  2346. "Failed to issue logout IOCB (%x).\n", rval);
  2347. } else if (lg->entry_status != 0) {
  2348. ql_dbg(ql_dbg_mbx, vha, 0x1070,
  2349. "Failed to complete IOCB -- error status (%x).\n",
  2350. lg->entry_status);
  2351. rval = QLA_FUNCTION_FAILED;
  2352. } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
  2353. ql_dbg(ql_dbg_mbx, vha, 0x1071,
  2354. "Failed to complete IOCB -- completion status (%x) "
  2355. "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
  2356. le32_to_cpu(lg->io_parameter[0]),
  2357. le32_to_cpu(lg->io_parameter[1]));
  2358. } else {
  2359. /*EMPTY*/
  2360. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
  2361. "Done %s.\n", __func__);
  2362. }
  2363. dma_pool_free(ha->s_dma_pool, lg, lg_dma);
  2364. return rval;
  2365. }
  2366. /*
  2367. * qla2x00_fabric_logout
  2368. * Issue logout fabric port mailbox command.
  2369. *
  2370. * Input:
  2371. * ha = adapter block pointer.
  2372. * loop_id = device loop ID.
  2373. * TARGET_QUEUE_LOCK must be released.
  2374. * ADAPTER_STATE_LOCK must be released.
  2375. *
  2376. * Returns:
  2377. * qla2x00 local function return status code.
  2378. *
  2379. * Context:
  2380. * Kernel context.
  2381. */
  2382. int
  2383. qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
  2384. uint8_t area, uint8_t al_pa)
  2385. {
  2386. int rval;
  2387. mbx_cmd_t mc;
  2388. mbx_cmd_t *mcp = &mc;
  2389. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
  2390. "Entered %s.\n", __func__);
  2391. mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
  2392. mcp->out_mb = MBX_1|MBX_0;
  2393. if (HAS_EXTENDED_IDS(vha->hw)) {
  2394. mcp->mb[1] = loop_id;
  2395. mcp->mb[10] = 0;
  2396. mcp->out_mb |= MBX_10;
  2397. } else {
  2398. mcp->mb[1] = loop_id << 8;
  2399. }
  2400. mcp->in_mb = MBX_1|MBX_0;
  2401. mcp->tov = MBX_TOV_SECONDS;
  2402. mcp->flags = 0;
  2403. rval = qla2x00_mailbox_command(vha, mcp);
  2404. if (rval != QLA_SUCCESS) {
  2405. /*EMPTY*/
  2406. ql_dbg(ql_dbg_mbx, vha, 0x1074,
  2407. "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
  2408. } else {
  2409. /*EMPTY*/
  2410. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
  2411. "Done %s.\n", __func__);
  2412. }
  2413. return rval;
  2414. }
  2415. /*
  2416. * qla2x00_full_login_lip
  2417. * Issue full login LIP mailbox command.
  2418. *
  2419. * Input:
  2420. * ha = adapter block pointer.
  2421. * TARGET_QUEUE_LOCK must be released.
  2422. * ADAPTER_STATE_LOCK must be released.
  2423. *
  2424. * Returns:
  2425. * qla2x00 local function return status code.
  2426. *
  2427. * Context:
  2428. * Kernel context.
  2429. */
  2430. int
  2431. qla2x00_full_login_lip(scsi_qla_host_t *vha)
  2432. {
  2433. int rval;
  2434. mbx_cmd_t mc;
  2435. mbx_cmd_t *mcp = &mc;
  2436. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
  2437. "Entered %s.\n", __func__);
  2438. mcp->mb[0] = MBC_LIP_FULL_LOGIN;
  2439. mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
  2440. mcp->mb[2] = 0;
  2441. mcp->mb[3] = 0;
  2442. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  2443. mcp->in_mb = MBX_0;
  2444. mcp->tov = MBX_TOV_SECONDS;
  2445. mcp->flags = 0;
  2446. rval = qla2x00_mailbox_command(vha, mcp);
  2447. if (rval != QLA_SUCCESS) {
  2448. /*EMPTY*/
  2449. ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
  2450. } else {
  2451. /*EMPTY*/
  2452. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
  2453. "Done %s.\n", __func__);
  2454. }
  2455. return rval;
  2456. }
  2457. /*
  2458. * qla2x00_get_id_list
  2459. *
  2460. * Input:
  2461. * ha = adapter block pointer.
  2462. *
  2463. * Returns:
  2464. * qla2x00 local function return status code.
  2465. *
  2466. * Context:
  2467. * Kernel context.
  2468. */
  2469. int
  2470. qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
  2471. uint16_t *entries)
  2472. {
  2473. int rval;
  2474. mbx_cmd_t mc;
  2475. mbx_cmd_t *mcp = &mc;
  2476. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
  2477. "Entered %s.\n", __func__);
  2478. if (id_list == NULL)
  2479. return QLA_FUNCTION_FAILED;
  2480. mcp->mb[0] = MBC_GET_ID_LIST;
  2481. mcp->out_mb = MBX_0;
  2482. if (IS_FWI2_CAPABLE(vha->hw)) {
  2483. mcp->mb[2] = MSW(id_list_dma);
  2484. mcp->mb[3] = LSW(id_list_dma);
  2485. mcp->mb[6] = MSW(MSD(id_list_dma));
  2486. mcp->mb[7] = LSW(MSD(id_list_dma));
  2487. mcp->mb[8] = 0;
  2488. mcp->mb[9] = vha->vp_idx;
  2489. mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
  2490. } else {
  2491. mcp->mb[1] = MSW(id_list_dma);
  2492. mcp->mb[2] = LSW(id_list_dma);
  2493. mcp->mb[3] = MSW(MSD(id_list_dma));
  2494. mcp->mb[6] = LSW(MSD(id_list_dma));
  2495. mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
  2496. }
  2497. mcp->in_mb = MBX_1|MBX_0;
  2498. mcp->tov = MBX_TOV_SECONDS;
  2499. mcp->flags = 0;
  2500. rval = qla2x00_mailbox_command(vha, mcp);
  2501. if (rval != QLA_SUCCESS) {
  2502. /*EMPTY*/
  2503. ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
  2504. } else {
  2505. *entries = mcp->mb[1];
  2506. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
  2507. "Done %s.\n", __func__);
  2508. }
  2509. return rval;
  2510. }
  2511. /*
  2512. * qla2x00_get_resource_cnts
  2513. * Get current firmware resource counts.
  2514. *
  2515. * Input:
  2516. * ha = adapter block pointer.
  2517. *
  2518. * Returns:
  2519. * qla2x00 local function return status code.
  2520. *
  2521. * Context:
  2522. * Kernel context.
  2523. */
  2524. int
  2525. qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
  2526. {
  2527. struct qla_hw_data *ha = vha->hw;
  2528. int rval;
  2529. mbx_cmd_t mc;
  2530. mbx_cmd_t *mcp = &mc;
  2531. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
  2532. "Entered %s.\n", __func__);
  2533. mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
  2534. mcp->out_mb = MBX_0;
  2535. mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  2536. if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
  2537. mcp->in_mb |= MBX_12;
  2538. mcp->tov = MBX_TOV_SECONDS;
  2539. mcp->flags = 0;
  2540. rval = qla2x00_mailbox_command(vha, mcp);
  2541. if (rval != QLA_SUCCESS) {
  2542. /*EMPTY*/
  2543. ql_dbg(ql_dbg_mbx, vha, 0x107d,
  2544. "Failed mb[0]=%x.\n", mcp->mb[0]);
  2545. } else {
  2546. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
  2547. "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
  2548. "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
  2549. mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
  2550. mcp->mb[11], mcp->mb[12]);
  2551. ha->orig_fw_tgt_xcb_count = mcp->mb[1];
  2552. ha->cur_fw_tgt_xcb_count = mcp->mb[2];
  2553. ha->cur_fw_xcb_count = mcp->mb[3];
  2554. ha->orig_fw_xcb_count = mcp->mb[6];
  2555. ha->cur_fw_iocb_count = mcp->mb[7];
  2556. ha->orig_fw_iocb_count = mcp->mb[10];
  2557. if (ha->flags.npiv_supported)
  2558. ha->max_npiv_vports = mcp->mb[11];
  2559. if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
  2560. ha->fw_max_fcf_count = mcp->mb[12];
  2561. }
  2562. return (rval);
  2563. }
  2564. /*
  2565. * qla2x00_get_fcal_position_map
  2566. * Get FCAL (LILP) position map using mailbox command
  2567. *
  2568. * Input:
  2569. * ha = adapter state pointer.
  2570. * pos_map = buffer pointer (can be NULL).
  2571. *
  2572. * Returns:
  2573. * qla2x00 local function return status code.
  2574. *
  2575. * Context:
  2576. * Kernel context.
  2577. */
  2578. int
  2579. qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
  2580. {
  2581. int rval;
  2582. mbx_cmd_t mc;
  2583. mbx_cmd_t *mcp = &mc;
  2584. char *pmap;
  2585. dma_addr_t pmap_dma;
  2586. struct qla_hw_data *ha = vha->hw;
  2587. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
  2588. "Entered %s.\n", __func__);
  2589. pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
  2590. if (pmap == NULL) {
  2591. ql_log(ql_log_warn, vha, 0x1080,
  2592. "Memory alloc failed.\n");
  2593. return QLA_MEMORY_ALLOC_FAILED;
  2594. }
  2595. mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
  2596. mcp->mb[2] = MSW(pmap_dma);
  2597. mcp->mb[3] = LSW(pmap_dma);
  2598. mcp->mb[6] = MSW(MSD(pmap_dma));
  2599. mcp->mb[7] = LSW(MSD(pmap_dma));
  2600. mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
  2601. mcp->in_mb = MBX_1|MBX_0;
  2602. mcp->buf_size = FCAL_MAP_SIZE;
  2603. mcp->flags = MBX_DMA_IN;
  2604. mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
  2605. rval = qla2x00_mailbox_command(vha, mcp);
  2606. if (rval == QLA_SUCCESS) {
  2607. ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
  2608. "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
  2609. mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
  2610. ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
  2611. pmap, pmap[0] + 1);
  2612. if (pos_map)
  2613. memcpy(pos_map, pmap, FCAL_MAP_SIZE);
  2614. }
  2615. dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
  2616. if (rval != QLA_SUCCESS) {
  2617. ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
  2618. } else {
  2619. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
  2620. "Done %s.\n", __func__);
  2621. }
  2622. return rval;
  2623. }
  2624. /*
  2625. * qla2x00_get_link_status
  2626. *
  2627. * Input:
  2628. * ha = adapter block pointer.
  2629. * loop_id = device loop ID.
  2630. * ret_buf = pointer to link status return buffer.
  2631. *
  2632. * Returns:
  2633. * 0 = success.
  2634. * BIT_0 = mem alloc error.
  2635. * BIT_1 = mailbox error.
  2636. */
  2637. int
  2638. qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
  2639. struct link_statistics *stats, dma_addr_t stats_dma)
  2640. {
  2641. int rval;
  2642. mbx_cmd_t mc;
  2643. mbx_cmd_t *mcp = &mc;
  2644. uint32_t *iter = (void *)stats;
  2645. ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
  2646. struct qla_hw_data *ha = vha->hw;
  2647. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
  2648. "Entered %s.\n", __func__);
  2649. mcp->mb[0] = MBC_GET_LINK_STATUS;
  2650. mcp->mb[2] = MSW(LSD(stats_dma));
  2651. mcp->mb[3] = LSW(LSD(stats_dma));
  2652. mcp->mb[6] = MSW(MSD(stats_dma));
  2653. mcp->mb[7] = LSW(MSD(stats_dma));
  2654. mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
  2655. mcp->in_mb = MBX_0;
  2656. if (IS_FWI2_CAPABLE(ha)) {
  2657. mcp->mb[1] = loop_id;
  2658. mcp->mb[4] = 0;
  2659. mcp->mb[10] = 0;
  2660. mcp->out_mb |= MBX_10|MBX_4|MBX_1;
  2661. mcp->in_mb |= MBX_1;
  2662. } else if (HAS_EXTENDED_IDS(ha)) {
  2663. mcp->mb[1] = loop_id;
  2664. mcp->mb[10] = 0;
  2665. mcp->out_mb |= MBX_10|MBX_1;
  2666. } else {
  2667. mcp->mb[1] = loop_id << 8;
  2668. mcp->out_mb |= MBX_1;
  2669. }
  2670. mcp->tov = MBX_TOV_SECONDS;
  2671. mcp->flags = IOCTL_CMD;
  2672. rval = qla2x00_mailbox_command(vha, mcp);
  2673. if (rval == QLA_SUCCESS) {
  2674. if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
  2675. ql_dbg(ql_dbg_mbx, vha, 0x1085,
  2676. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  2677. rval = QLA_FUNCTION_FAILED;
  2678. } else {
  2679. /* Re-endianize - firmware data is le32. */
  2680. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
  2681. "Done %s.\n", __func__);
  2682. for ( ; dwords--; iter++)
  2683. le32_to_cpus(iter);
  2684. }
  2685. } else {
  2686. /* Failed. */
  2687. ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
  2688. }
  2689. return rval;
  2690. }
  2691. int
  2692. qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
  2693. dma_addr_t stats_dma, uint16_t options)
  2694. {
  2695. int rval;
  2696. mbx_cmd_t mc;
  2697. mbx_cmd_t *mcp = &mc;
  2698. uint32_t *iter, dwords;
  2699. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
  2700. "Entered %s.\n", __func__);
  2701. memset(&mc, 0, sizeof(mc));
  2702. mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
  2703. mc.mb[2] = MSW(stats_dma);
  2704. mc.mb[3] = LSW(stats_dma);
  2705. mc.mb[6] = MSW(MSD(stats_dma));
  2706. mc.mb[7] = LSW(MSD(stats_dma));
  2707. mc.mb[8] = sizeof(struct link_statistics) / 4;
  2708. mc.mb[9] = cpu_to_le16(vha->vp_idx);
  2709. mc.mb[10] = cpu_to_le16(options);
  2710. rval = qla24xx_send_mb_cmd(vha, &mc);
  2711. if (rval == QLA_SUCCESS) {
  2712. if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
  2713. ql_dbg(ql_dbg_mbx, vha, 0x1089,
  2714. "Failed mb[0]=%x.\n", mcp->mb[0]);
  2715. rval = QLA_FUNCTION_FAILED;
  2716. } else {
  2717. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
  2718. "Done %s.\n", __func__);
  2719. /* Re-endianize - firmware data is le32. */
  2720. dwords = sizeof(struct link_statistics) / 4;
  2721. iter = &stats->link_fail_cnt;
  2722. for ( ; dwords--; iter++)
  2723. le32_to_cpus(iter);
  2724. }
  2725. } else {
  2726. /* Failed. */
  2727. ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
  2728. }
  2729. return rval;
  2730. }
  2731. int
  2732. qla24xx_abort_command(srb_t *sp)
  2733. {
  2734. int rval;
  2735. unsigned long flags = 0;
  2736. struct abort_entry_24xx *abt;
  2737. dma_addr_t abt_dma;
  2738. uint32_t handle;
  2739. fc_port_t *fcport = sp->fcport;
  2740. struct scsi_qla_host *vha = fcport->vha;
  2741. struct qla_hw_data *ha = vha->hw;
  2742. struct req_que *req = vha->req;
  2743. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
  2744. "Entered %s.\n", __func__);
  2745. if (vha->flags.qpairs_available && sp->qpair)
  2746. req = sp->qpair->req;
  2747. if (ql2xasynctmfenable)
  2748. return qla24xx_async_abort_command(sp);
  2749. spin_lock_irqsave(&ha->hardware_lock, flags);
  2750. for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
  2751. if (req->outstanding_cmds[handle] == sp)
  2752. break;
  2753. }
  2754. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2755. if (handle == req->num_outstanding_cmds) {
  2756. /* Command not found. */
  2757. return QLA_FUNCTION_FAILED;
  2758. }
  2759. abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
  2760. if (abt == NULL) {
  2761. ql_log(ql_log_warn, vha, 0x108d,
  2762. "Failed to allocate abort IOCB.\n");
  2763. return QLA_MEMORY_ALLOC_FAILED;
  2764. }
  2765. abt->entry_type = ABORT_IOCB_TYPE;
  2766. abt->entry_count = 1;
  2767. abt->handle = MAKE_HANDLE(req->id, abt->handle);
  2768. abt->nport_handle = cpu_to_le16(fcport->loop_id);
  2769. abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
  2770. abt->port_id[0] = fcport->d_id.b.al_pa;
  2771. abt->port_id[1] = fcport->d_id.b.area;
  2772. abt->port_id[2] = fcport->d_id.b.domain;
  2773. abt->vp_index = fcport->vha->vp_idx;
  2774. abt->req_que_no = cpu_to_le16(req->id);
  2775. rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
  2776. if (rval != QLA_SUCCESS) {
  2777. ql_dbg(ql_dbg_mbx, vha, 0x108e,
  2778. "Failed to issue IOCB (%x).\n", rval);
  2779. } else if (abt->entry_status != 0) {
  2780. ql_dbg(ql_dbg_mbx, vha, 0x108f,
  2781. "Failed to complete IOCB -- error status (%x).\n",
  2782. abt->entry_status);
  2783. rval = QLA_FUNCTION_FAILED;
  2784. } else if (abt->nport_handle != cpu_to_le16(0)) {
  2785. ql_dbg(ql_dbg_mbx, vha, 0x1090,
  2786. "Failed to complete IOCB -- completion status (%x).\n",
  2787. le16_to_cpu(abt->nport_handle));
  2788. if (abt->nport_handle == CS_IOCB_ERROR)
  2789. rval = QLA_FUNCTION_PARAMETER_ERROR;
  2790. else
  2791. rval = QLA_FUNCTION_FAILED;
  2792. } else {
  2793. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
  2794. "Done %s.\n", __func__);
  2795. }
  2796. dma_pool_free(ha->s_dma_pool, abt, abt_dma);
  2797. return rval;
  2798. }
  2799. struct tsk_mgmt_cmd {
  2800. union {
  2801. struct tsk_mgmt_entry tsk;
  2802. struct sts_entry_24xx sts;
  2803. } p;
  2804. };
  2805. static int
  2806. __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
  2807. uint64_t l, int tag)
  2808. {
  2809. int rval, rval2;
  2810. struct tsk_mgmt_cmd *tsk;
  2811. struct sts_entry_24xx *sts;
  2812. dma_addr_t tsk_dma;
  2813. scsi_qla_host_t *vha;
  2814. struct qla_hw_data *ha;
  2815. struct req_que *req;
  2816. struct rsp_que *rsp;
  2817. struct qla_qpair *qpair;
  2818. vha = fcport->vha;
  2819. ha = vha->hw;
  2820. req = vha->req;
  2821. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
  2822. "Entered %s.\n", __func__);
  2823. if (vha->vp_idx && vha->qpair) {
  2824. /* NPIV port */
  2825. qpair = vha->qpair;
  2826. rsp = qpair->rsp;
  2827. req = qpair->req;
  2828. } else {
  2829. rsp = req->rsp;
  2830. }
  2831. tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
  2832. if (tsk == NULL) {
  2833. ql_log(ql_log_warn, vha, 0x1093,
  2834. "Failed to allocate task management IOCB.\n");
  2835. return QLA_MEMORY_ALLOC_FAILED;
  2836. }
  2837. tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
  2838. tsk->p.tsk.entry_count = 1;
  2839. tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
  2840. tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
  2841. tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
  2842. tsk->p.tsk.control_flags = cpu_to_le32(type);
  2843. tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
  2844. tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
  2845. tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
  2846. tsk->p.tsk.vp_index = fcport->vha->vp_idx;
  2847. if (type == TCF_LUN_RESET) {
  2848. int_to_scsilun(l, &tsk->p.tsk.lun);
  2849. host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
  2850. sizeof(tsk->p.tsk.lun));
  2851. }
  2852. sts = &tsk->p.sts;
  2853. rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
  2854. if (rval != QLA_SUCCESS) {
  2855. ql_dbg(ql_dbg_mbx, vha, 0x1094,
  2856. "Failed to issue %s reset IOCB (%x).\n", name, rval);
  2857. } else if (sts->entry_status != 0) {
  2858. ql_dbg(ql_dbg_mbx, vha, 0x1095,
  2859. "Failed to complete IOCB -- error status (%x).\n",
  2860. sts->entry_status);
  2861. rval = QLA_FUNCTION_FAILED;
  2862. } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
  2863. ql_dbg(ql_dbg_mbx, vha, 0x1096,
  2864. "Failed to complete IOCB -- completion status (%x).\n",
  2865. le16_to_cpu(sts->comp_status));
  2866. rval = QLA_FUNCTION_FAILED;
  2867. } else if (le16_to_cpu(sts->scsi_status) &
  2868. SS_RESPONSE_INFO_LEN_VALID) {
  2869. if (le32_to_cpu(sts->rsp_data_len) < 4) {
  2870. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
  2871. "Ignoring inconsistent data length -- not enough "
  2872. "response info (%d).\n",
  2873. le32_to_cpu(sts->rsp_data_len));
  2874. } else if (sts->data[3]) {
  2875. ql_dbg(ql_dbg_mbx, vha, 0x1098,
  2876. "Failed to complete IOCB -- response (%x).\n",
  2877. sts->data[3]);
  2878. rval = QLA_FUNCTION_FAILED;
  2879. }
  2880. }
  2881. /* Issue marker IOCB. */
  2882. rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
  2883. type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
  2884. if (rval2 != QLA_SUCCESS) {
  2885. ql_dbg(ql_dbg_mbx, vha, 0x1099,
  2886. "Failed to issue marker IOCB (%x).\n", rval2);
  2887. } else {
  2888. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
  2889. "Done %s.\n", __func__);
  2890. }
  2891. dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
  2892. return rval;
  2893. }
  2894. int
  2895. qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
  2896. {
  2897. struct qla_hw_data *ha = fcport->vha->hw;
  2898. if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
  2899. return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
  2900. return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
  2901. }
  2902. int
  2903. qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
  2904. {
  2905. struct qla_hw_data *ha = fcport->vha->hw;
  2906. if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
  2907. return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
  2908. return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
  2909. }
  2910. int
  2911. qla2x00_system_error(scsi_qla_host_t *vha)
  2912. {
  2913. int rval;
  2914. mbx_cmd_t mc;
  2915. mbx_cmd_t *mcp = &mc;
  2916. struct qla_hw_data *ha = vha->hw;
  2917. if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
  2918. return QLA_FUNCTION_FAILED;
  2919. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
  2920. "Entered %s.\n", __func__);
  2921. mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
  2922. mcp->out_mb = MBX_0;
  2923. mcp->in_mb = MBX_0;
  2924. mcp->tov = 5;
  2925. mcp->flags = 0;
  2926. rval = qla2x00_mailbox_command(vha, mcp);
  2927. if (rval != QLA_SUCCESS) {
  2928. ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
  2929. } else {
  2930. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
  2931. "Done %s.\n", __func__);
  2932. }
  2933. return rval;
  2934. }
  2935. int
  2936. qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
  2937. {
  2938. int rval;
  2939. mbx_cmd_t mc;
  2940. mbx_cmd_t *mcp = &mc;
  2941. if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
  2942. !IS_QLA27XX(vha->hw))
  2943. return QLA_FUNCTION_FAILED;
  2944. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
  2945. "Entered %s.\n", __func__);
  2946. mcp->mb[0] = MBC_WRITE_SERDES;
  2947. mcp->mb[1] = addr;
  2948. if (IS_QLA2031(vha->hw))
  2949. mcp->mb[2] = data & 0xff;
  2950. else
  2951. mcp->mb[2] = data;
  2952. mcp->mb[3] = 0;
  2953. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  2954. mcp->in_mb = MBX_0;
  2955. mcp->tov = MBX_TOV_SECONDS;
  2956. mcp->flags = 0;
  2957. rval = qla2x00_mailbox_command(vha, mcp);
  2958. if (rval != QLA_SUCCESS) {
  2959. ql_dbg(ql_dbg_mbx, vha, 0x1183,
  2960. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  2961. } else {
  2962. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
  2963. "Done %s.\n", __func__);
  2964. }
  2965. return rval;
  2966. }
  2967. int
  2968. qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
  2969. {
  2970. int rval;
  2971. mbx_cmd_t mc;
  2972. mbx_cmd_t *mcp = &mc;
  2973. if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
  2974. !IS_QLA27XX(vha->hw))
  2975. return QLA_FUNCTION_FAILED;
  2976. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
  2977. "Entered %s.\n", __func__);
  2978. mcp->mb[0] = MBC_READ_SERDES;
  2979. mcp->mb[1] = addr;
  2980. mcp->mb[3] = 0;
  2981. mcp->out_mb = MBX_3|MBX_1|MBX_0;
  2982. mcp->in_mb = MBX_1|MBX_0;
  2983. mcp->tov = MBX_TOV_SECONDS;
  2984. mcp->flags = 0;
  2985. rval = qla2x00_mailbox_command(vha, mcp);
  2986. if (IS_QLA2031(vha->hw))
  2987. *data = mcp->mb[1] & 0xff;
  2988. else
  2989. *data = mcp->mb[1];
  2990. if (rval != QLA_SUCCESS) {
  2991. ql_dbg(ql_dbg_mbx, vha, 0x1186,
  2992. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  2993. } else {
  2994. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
  2995. "Done %s.\n", __func__);
  2996. }
  2997. return rval;
  2998. }
  2999. int
  3000. qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
  3001. {
  3002. int rval;
  3003. mbx_cmd_t mc;
  3004. mbx_cmd_t *mcp = &mc;
  3005. if (!IS_QLA8044(vha->hw))
  3006. return QLA_FUNCTION_FAILED;
  3007. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
  3008. "Entered %s.\n", __func__);
  3009. mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
  3010. mcp->mb[1] = HCS_WRITE_SERDES;
  3011. mcp->mb[3] = LSW(addr);
  3012. mcp->mb[4] = MSW(addr);
  3013. mcp->mb[5] = LSW(data);
  3014. mcp->mb[6] = MSW(data);
  3015. mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
  3016. mcp->in_mb = MBX_0;
  3017. mcp->tov = MBX_TOV_SECONDS;
  3018. mcp->flags = 0;
  3019. rval = qla2x00_mailbox_command(vha, mcp);
  3020. if (rval != QLA_SUCCESS) {
  3021. ql_dbg(ql_dbg_mbx, vha, 0x11a1,
  3022. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  3023. } else {
  3024. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
  3025. "Done %s.\n", __func__);
  3026. }
  3027. return rval;
  3028. }
  3029. int
  3030. qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
  3031. {
  3032. int rval;
  3033. mbx_cmd_t mc;
  3034. mbx_cmd_t *mcp = &mc;
  3035. if (!IS_QLA8044(vha->hw))
  3036. return QLA_FUNCTION_FAILED;
  3037. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
  3038. "Entered %s.\n", __func__);
  3039. mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
  3040. mcp->mb[1] = HCS_READ_SERDES;
  3041. mcp->mb[3] = LSW(addr);
  3042. mcp->mb[4] = MSW(addr);
  3043. mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
  3044. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  3045. mcp->tov = MBX_TOV_SECONDS;
  3046. mcp->flags = 0;
  3047. rval = qla2x00_mailbox_command(vha, mcp);
  3048. *data = mcp->mb[2] << 16 | mcp->mb[1];
  3049. if (rval != QLA_SUCCESS) {
  3050. ql_dbg(ql_dbg_mbx, vha, 0x118a,
  3051. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  3052. } else {
  3053. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
  3054. "Done %s.\n", __func__);
  3055. }
  3056. return rval;
  3057. }
  3058. /**
  3059. * qla2x00_set_serdes_params() -
  3060. * @vha: HA context
  3061. * @sw_em_1g:
  3062. * @sw_em_2g:
  3063. * @sw_em_4g:
  3064. *
  3065. * Returns
  3066. */
  3067. int
  3068. qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
  3069. uint16_t sw_em_2g, uint16_t sw_em_4g)
  3070. {
  3071. int rval;
  3072. mbx_cmd_t mc;
  3073. mbx_cmd_t *mcp = &mc;
  3074. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
  3075. "Entered %s.\n", __func__);
  3076. mcp->mb[0] = MBC_SERDES_PARAMS;
  3077. mcp->mb[1] = BIT_0;
  3078. mcp->mb[2] = sw_em_1g | BIT_15;
  3079. mcp->mb[3] = sw_em_2g | BIT_15;
  3080. mcp->mb[4] = sw_em_4g | BIT_15;
  3081. mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3082. mcp->in_mb = MBX_0;
  3083. mcp->tov = MBX_TOV_SECONDS;
  3084. mcp->flags = 0;
  3085. rval = qla2x00_mailbox_command(vha, mcp);
  3086. if (rval != QLA_SUCCESS) {
  3087. /*EMPTY*/
  3088. ql_dbg(ql_dbg_mbx, vha, 0x109f,
  3089. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  3090. } else {
  3091. /*EMPTY*/
  3092. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
  3093. "Done %s.\n", __func__);
  3094. }
  3095. return rval;
  3096. }
  3097. int
  3098. qla2x00_stop_firmware(scsi_qla_host_t *vha)
  3099. {
  3100. int rval;
  3101. mbx_cmd_t mc;
  3102. mbx_cmd_t *mcp = &mc;
  3103. if (!IS_FWI2_CAPABLE(vha->hw))
  3104. return QLA_FUNCTION_FAILED;
  3105. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
  3106. "Entered %s.\n", __func__);
  3107. mcp->mb[0] = MBC_STOP_FIRMWARE;
  3108. mcp->mb[1] = 0;
  3109. mcp->out_mb = MBX_1|MBX_0;
  3110. mcp->in_mb = MBX_0;
  3111. mcp->tov = 5;
  3112. mcp->flags = 0;
  3113. rval = qla2x00_mailbox_command(vha, mcp);
  3114. if (rval != QLA_SUCCESS) {
  3115. ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
  3116. if (mcp->mb[0] == MBS_INVALID_COMMAND)
  3117. rval = QLA_INVALID_COMMAND;
  3118. } else {
  3119. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
  3120. "Done %s.\n", __func__);
  3121. }
  3122. return rval;
  3123. }
  3124. int
  3125. qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
  3126. uint16_t buffers)
  3127. {
  3128. int rval;
  3129. mbx_cmd_t mc;
  3130. mbx_cmd_t *mcp = &mc;
  3131. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
  3132. "Entered %s.\n", __func__);
  3133. if (!IS_FWI2_CAPABLE(vha->hw))
  3134. return QLA_FUNCTION_FAILED;
  3135. if (unlikely(pci_channel_offline(vha->hw->pdev)))
  3136. return QLA_FUNCTION_FAILED;
  3137. mcp->mb[0] = MBC_TRACE_CONTROL;
  3138. mcp->mb[1] = TC_EFT_ENABLE;
  3139. mcp->mb[2] = LSW(eft_dma);
  3140. mcp->mb[3] = MSW(eft_dma);
  3141. mcp->mb[4] = LSW(MSD(eft_dma));
  3142. mcp->mb[5] = MSW(MSD(eft_dma));
  3143. mcp->mb[6] = buffers;
  3144. mcp->mb[7] = TC_AEN_DISABLE;
  3145. mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3146. mcp->in_mb = MBX_1|MBX_0;
  3147. mcp->tov = MBX_TOV_SECONDS;
  3148. mcp->flags = 0;
  3149. rval = qla2x00_mailbox_command(vha, mcp);
  3150. if (rval != QLA_SUCCESS) {
  3151. ql_dbg(ql_dbg_mbx, vha, 0x10a5,
  3152. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3153. rval, mcp->mb[0], mcp->mb[1]);
  3154. } else {
  3155. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
  3156. "Done %s.\n", __func__);
  3157. }
  3158. return rval;
  3159. }
  3160. int
  3161. qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
  3162. {
  3163. int rval;
  3164. mbx_cmd_t mc;
  3165. mbx_cmd_t *mcp = &mc;
  3166. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
  3167. "Entered %s.\n", __func__);
  3168. if (!IS_FWI2_CAPABLE(vha->hw))
  3169. return QLA_FUNCTION_FAILED;
  3170. if (unlikely(pci_channel_offline(vha->hw->pdev)))
  3171. return QLA_FUNCTION_FAILED;
  3172. mcp->mb[0] = MBC_TRACE_CONTROL;
  3173. mcp->mb[1] = TC_EFT_DISABLE;
  3174. mcp->out_mb = MBX_1|MBX_0;
  3175. mcp->in_mb = MBX_1|MBX_0;
  3176. mcp->tov = MBX_TOV_SECONDS;
  3177. mcp->flags = 0;
  3178. rval = qla2x00_mailbox_command(vha, mcp);
  3179. if (rval != QLA_SUCCESS) {
  3180. ql_dbg(ql_dbg_mbx, vha, 0x10a8,
  3181. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3182. rval, mcp->mb[0], mcp->mb[1]);
  3183. } else {
  3184. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
  3185. "Done %s.\n", __func__);
  3186. }
  3187. return rval;
  3188. }
  3189. int
  3190. qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
  3191. uint16_t buffers, uint16_t *mb, uint32_t *dwords)
  3192. {
  3193. int rval;
  3194. mbx_cmd_t mc;
  3195. mbx_cmd_t *mcp = &mc;
  3196. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
  3197. "Entered %s.\n", __func__);
  3198. if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
  3199. !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
  3200. return QLA_FUNCTION_FAILED;
  3201. if (unlikely(pci_channel_offline(vha->hw->pdev)))
  3202. return QLA_FUNCTION_FAILED;
  3203. mcp->mb[0] = MBC_TRACE_CONTROL;
  3204. mcp->mb[1] = TC_FCE_ENABLE;
  3205. mcp->mb[2] = LSW(fce_dma);
  3206. mcp->mb[3] = MSW(fce_dma);
  3207. mcp->mb[4] = LSW(MSD(fce_dma));
  3208. mcp->mb[5] = MSW(MSD(fce_dma));
  3209. mcp->mb[6] = buffers;
  3210. mcp->mb[7] = TC_AEN_DISABLE;
  3211. mcp->mb[8] = 0;
  3212. mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
  3213. mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
  3214. mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
  3215. MBX_1|MBX_0;
  3216. mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3217. mcp->tov = MBX_TOV_SECONDS;
  3218. mcp->flags = 0;
  3219. rval = qla2x00_mailbox_command(vha, mcp);
  3220. if (rval != QLA_SUCCESS) {
  3221. ql_dbg(ql_dbg_mbx, vha, 0x10ab,
  3222. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3223. rval, mcp->mb[0], mcp->mb[1]);
  3224. } else {
  3225. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
  3226. "Done %s.\n", __func__);
  3227. if (mb)
  3228. memcpy(mb, mcp->mb, 8 * sizeof(*mb));
  3229. if (dwords)
  3230. *dwords = buffers;
  3231. }
  3232. return rval;
  3233. }
  3234. int
  3235. qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
  3236. {
  3237. int rval;
  3238. mbx_cmd_t mc;
  3239. mbx_cmd_t *mcp = &mc;
  3240. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
  3241. "Entered %s.\n", __func__);
  3242. if (!IS_FWI2_CAPABLE(vha->hw))
  3243. return QLA_FUNCTION_FAILED;
  3244. if (unlikely(pci_channel_offline(vha->hw->pdev)))
  3245. return QLA_FUNCTION_FAILED;
  3246. mcp->mb[0] = MBC_TRACE_CONTROL;
  3247. mcp->mb[1] = TC_FCE_DISABLE;
  3248. mcp->mb[2] = TC_FCE_DISABLE_TRACE;
  3249. mcp->out_mb = MBX_2|MBX_1|MBX_0;
  3250. mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
  3251. MBX_1|MBX_0;
  3252. mcp->tov = MBX_TOV_SECONDS;
  3253. mcp->flags = 0;
  3254. rval = qla2x00_mailbox_command(vha, mcp);
  3255. if (rval != QLA_SUCCESS) {
  3256. ql_dbg(ql_dbg_mbx, vha, 0x10ae,
  3257. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3258. rval, mcp->mb[0], mcp->mb[1]);
  3259. } else {
  3260. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
  3261. "Done %s.\n", __func__);
  3262. if (wr)
  3263. *wr = (uint64_t) mcp->mb[5] << 48 |
  3264. (uint64_t) mcp->mb[4] << 32 |
  3265. (uint64_t) mcp->mb[3] << 16 |
  3266. (uint64_t) mcp->mb[2];
  3267. if (rd)
  3268. *rd = (uint64_t) mcp->mb[9] << 48 |
  3269. (uint64_t) mcp->mb[8] << 32 |
  3270. (uint64_t) mcp->mb[7] << 16 |
  3271. (uint64_t) mcp->mb[6];
  3272. }
  3273. return rval;
  3274. }
  3275. int
  3276. qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
  3277. uint16_t *port_speed, uint16_t *mb)
  3278. {
  3279. int rval;
  3280. mbx_cmd_t mc;
  3281. mbx_cmd_t *mcp = &mc;
  3282. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
  3283. "Entered %s.\n", __func__);
  3284. if (!IS_IIDMA_CAPABLE(vha->hw))
  3285. return QLA_FUNCTION_FAILED;
  3286. mcp->mb[0] = MBC_PORT_PARAMS;
  3287. mcp->mb[1] = loop_id;
  3288. mcp->mb[2] = mcp->mb[3] = 0;
  3289. mcp->mb[9] = vha->vp_idx;
  3290. mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
  3291. mcp->in_mb = MBX_3|MBX_1|MBX_0;
  3292. mcp->tov = MBX_TOV_SECONDS;
  3293. mcp->flags = 0;
  3294. rval = qla2x00_mailbox_command(vha, mcp);
  3295. /* Return mailbox statuses. */
  3296. if (mb != NULL) {
  3297. mb[0] = mcp->mb[0];
  3298. mb[1] = mcp->mb[1];
  3299. mb[3] = mcp->mb[3];
  3300. }
  3301. if (rval != QLA_SUCCESS) {
  3302. ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
  3303. } else {
  3304. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
  3305. "Done %s.\n", __func__);
  3306. if (port_speed)
  3307. *port_speed = mcp->mb[3];
  3308. }
  3309. return rval;
  3310. }
  3311. int
  3312. qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
  3313. uint16_t port_speed, uint16_t *mb)
  3314. {
  3315. int rval;
  3316. mbx_cmd_t mc;
  3317. mbx_cmd_t *mcp = &mc;
  3318. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
  3319. "Entered %s.\n", __func__);
  3320. if (!IS_IIDMA_CAPABLE(vha->hw))
  3321. return QLA_FUNCTION_FAILED;
  3322. mcp->mb[0] = MBC_PORT_PARAMS;
  3323. mcp->mb[1] = loop_id;
  3324. mcp->mb[2] = BIT_0;
  3325. mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
  3326. mcp->mb[9] = vha->vp_idx;
  3327. mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
  3328. mcp->in_mb = MBX_3|MBX_1|MBX_0;
  3329. mcp->tov = MBX_TOV_SECONDS;
  3330. mcp->flags = 0;
  3331. rval = qla2x00_mailbox_command(vha, mcp);
  3332. /* Return mailbox statuses. */
  3333. if (mb != NULL) {
  3334. mb[0] = mcp->mb[0];
  3335. mb[1] = mcp->mb[1];
  3336. mb[3] = mcp->mb[3];
  3337. }
  3338. if (rval != QLA_SUCCESS) {
  3339. ql_dbg(ql_dbg_mbx, vha, 0x10b4,
  3340. "Failed=%x.\n", rval);
  3341. } else {
  3342. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
  3343. "Done %s.\n", __func__);
  3344. }
  3345. return rval;
  3346. }
  3347. void
  3348. qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
  3349. struct vp_rpt_id_entry_24xx *rptid_entry)
  3350. {
  3351. struct qla_hw_data *ha = vha->hw;
  3352. scsi_qla_host_t *vp = NULL;
  3353. unsigned long flags;
  3354. int found;
  3355. port_id_t id;
  3356. struct fc_port *fcport;
  3357. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
  3358. "Entered %s.\n", __func__);
  3359. if (rptid_entry->entry_status != 0)
  3360. return;
  3361. id.b.domain = rptid_entry->port_id[2];
  3362. id.b.area = rptid_entry->port_id[1];
  3363. id.b.al_pa = rptid_entry->port_id[0];
  3364. id.b.rsvd_1 = 0;
  3365. ha->flags.n2n_ae = 0;
  3366. if (rptid_entry->format == 0) {
  3367. /* loop */
  3368. ql_dbg(ql_dbg_async, vha, 0x10b7,
  3369. "Format 0 : Number of VPs setup %d, number of "
  3370. "VPs acquired %d.\n", rptid_entry->vp_setup,
  3371. rptid_entry->vp_acquired);
  3372. ql_dbg(ql_dbg_async, vha, 0x10b8,
  3373. "Primary port id %02x%02x%02x.\n",
  3374. rptid_entry->port_id[2], rptid_entry->port_id[1],
  3375. rptid_entry->port_id[0]);
  3376. ha->current_topology = ISP_CFG_NL;
  3377. qlt_update_host_map(vha, id);
  3378. } else if (rptid_entry->format == 1) {
  3379. /* fabric */
  3380. ql_dbg(ql_dbg_async, vha, 0x10b9,
  3381. "Format 1: VP[%d] enabled - status %d - with "
  3382. "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
  3383. rptid_entry->vp_status,
  3384. rptid_entry->port_id[2], rptid_entry->port_id[1],
  3385. rptid_entry->port_id[0]);
  3386. ql_dbg(ql_dbg_async, vha, 0x5075,
  3387. "Format 1: Remote WWPN %8phC.\n",
  3388. rptid_entry->u.f1.port_name);
  3389. ql_dbg(ql_dbg_async, vha, 0x5075,
  3390. "Format 1: WWPN %8phC.\n",
  3391. vha->port_name);
  3392. switch (rptid_entry->u.f1.flags & TOPO_MASK) {
  3393. case TOPO_N2N:
  3394. ha->current_topology = ISP_CFG_N;
  3395. spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
  3396. fcport = qla2x00_find_fcport_by_wwpn(vha,
  3397. rptid_entry->u.f1.port_name, 1);
  3398. spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
  3399. if (fcport) {
  3400. fcport->plogi_nack_done_deadline = jiffies + HZ;
  3401. fcport->dm_login_expire = jiffies + 3*HZ;
  3402. fcport->scan_state = QLA_FCPORT_FOUND;
  3403. switch (fcport->disc_state) {
  3404. case DSC_DELETED:
  3405. set_bit(RELOGIN_NEEDED,
  3406. &vha->dpc_flags);
  3407. break;
  3408. case DSC_DELETE_PEND:
  3409. break;
  3410. default:
  3411. qlt_schedule_sess_for_deletion(fcport);
  3412. break;
  3413. }
  3414. } else {
  3415. id.b24 = 0;
  3416. if (wwn_to_u64(vha->port_name) >
  3417. wwn_to_u64(rptid_entry->u.f1.port_name)) {
  3418. vha->d_id.b24 = 0;
  3419. vha->d_id.b.al_pa = 1;
  3420. ha->flags.n2n_bigger = 1;
  3421. ha->flags.n2n_ae = 0;
  3422. id.b.al_pa = 2;
  3423. ql_dbg(ql_dbg_async, vha, 0x5075,
  3424. "Format 1: assign local id %x remote id %x\n",
  3425. vha->d_id.b24, id.b24);
  3426. } else {
  3427. ql_dbg(ql_dbg_async, vha, 0x5075,
  3428. "Format 1: Remote login - Waiting for WWPN %8phC.\n",
  3429. rptid_entry->u.f1.port_name);
  3430. ha->flags.n2n_bigger = 0;
  3431. ha->flags.n2n_ae = 1;
  3432. }
  3433. qla24xx_post_newsess_work(vha, &id,
  3434. rptid_entry->u.f1.port_name,
  3435. rptid_entry->u.f1.node_name,
  3436. NULL,
  3437. FC4_TYPE_UNKNOWN);
  3438. }
  3439. /* if our portname is higher then initiate N2N login */
  3440. set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
  3441. return;
  3442. break;
  3443. case TOPO_FL:
  3444. ha->current_topology = ISP_CFG_FL;
  3445. break;
  3446. case TOPO_F:
  3447. ha->current_topology = ISP_CFG_F;
  3448. break;
  3449. default:
  3450. break;
  3451. }
  3452. ha->flags.gpsc_supported = 1;
  3453. ha->current_topology = ISP_CFG_F;
  3454. /* buffer to buffer credit flag */
  3455. vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
  3456. if (rptid_entry->vp_idx == 0) {
  3457. if (rptid_entry->vp_status == VP_STAT_COMPL) {
  3458. /* FA-WWN is only for physical port */
  3459. if (qla_ini_mode_enabled(vha) &&
  3460. ha->flags.fawwpn_enabled &&
  3461. (rptid_entry->u.f1.flags &
  3462. BIT_6)) {
  3463. memcpy(vha->port_name,
  3464. rptid_entry->u.f1.port_name,
  3465. WWN_SIZE);
  3466. }
  3467. qlt_update_host_map(vha, id);
  3468. }
  3469. set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
  3470. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  3471. } else {
  3472. if (rptid_entry->vp_status != VP_STAT_COMPL &&
  3473. rptid_entry->vp_status != VP_STAT_ID_CHG) {
  3474. ql_dbg(ql_dbg_mbx, vha, 0x10ba,
  3475. "Could not acquire ID for VP[%d].\n",
  3476. rptid_entry->vp_idx);
  3477. return;
  3478. }
  3479. found = 0;
  3480. spin_lock_irqsave(&ha->vport_slock, flags);
  3481. list_for_each_entry(vp, &ha->vp_list, list) {
  3482. if (rptid_entry->vp_idx == vp->vp_idx) {
  3483. found = 1;
  3484. break;
  3485. }
  3486. }
  3487. spin_unlock_irqrestore(&ha->vport_slock, flags);
  3488. if (!found)
  3489. return;
  3490. qlt_update_host_map(vp, id);
  3491. /*
  3492. * Cannot configure here as we are still sitting on the
  3493. * response queue. Handle it in dpc context.
  3494. */
  3495. set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
  3496. set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
  3497. set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
  3498. }
  3499. set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
  3500. qla2xxx_wake_dpc(vha);
  3501. } else if (rptid_entry->format == 2) {
  3502. ql_dbg(ql_dbg_async, vha, 0x505f,
  3503. "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
  3504. rptid_entry->port_id[2], rptid_entry->port_id[1],
  3505. rptid_entry->port_id[0]);
  3506. ql_dbg(ql_dbg_async, vha, 0x5075,
  3507. "N2N: Remote WWPN %8phC.\n",
  3508. rptid_entry->u.f2.port_name);
  3509. /* N2N. direct connect */
  3510. ha->current_topology = ISP_CFG_N;
  3511. ha->flags.rida_fmt2 = 1;
  3512. vha->d_id.b.domain = rptid_entry->port_id[2];
  3513. vha->d_id.b.area = rptid_entry->port_id[1];
  3514. vha->d_id.b.al_pa = rptid_entry->port_id[0];
  3515. ha->flags.n2n_ae = 1;
  3516. spin_lock_irqsave(&ha->vport_slock, flags);
  3517. qlt_update_vp_map(vha, SET_AL_PA);
  3518. spin_unlock_irqrestore(&ha->vport_slock, flags);
  3519. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  3520. fcport->scan_state = QLA_FCPORT_SCAN;
  3521. }
  3522. fcport = qla2x00_find_fcport_by_wwpn(vha,
  3523. rptid_entry->u.f2.port_name, 1);
  3524. if (fcport) {
  3525. fcport->login_retry = vha->hw->login_retry_count;
  3526. fcport->plogi_nack_done_deadline = jiffies + HZ;
  3527. fcport->scan_state = QLA_FCPORT_FOUND;
  3528. }
  3529. }
  3530. }
  3531. /*
  3532. * qla24xx_modify_vp_config
  3533. * Change VP configuration for vha
  3534. *
  3535. * Input:
  3536. * vha = adapter block pointer.
  3537. *
  3538. * Returns:
  3539. * qla2xxx local function return status code.
  3540. *
  3541. * Context:
  3542. * Kernel context.
  3543. */
  3544. int
  3545. qla24xx_modify_vp_config(scsi_qla_host_t *vha)
  3546. {
  3547. int rval;
  3548. struct vp_config_entry_24xx *vpmod;
  3549. dma_addr_t vpmod_dma;
  3550. struct qla_hw_data *ha = vha->hw;
  3551. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  3552. /* This can be called by the parent */
  3553. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
  3554. "Entered %s.\n", __func__);
  3555. vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
  3556. if (!vpmod) {
  3557. ql_log(ql_log_warn, vha, 0x10bc,
  3558. "Failed to allocate modify VP IOCB.\n");
  3559. return QLA_MEMORY_ALLOC_FAILED;
  3560. }
  3561. vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
  3562. vpmod->entry_count = 1;
  3563. vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
  3564. vpmod->vp_count = 1;
  3565. vpmod->vp_index1 = vha->vp_idx;
  3566. vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
  3567. qlt_modify_vp_config(vha, vpmod);
  3568. memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
  3569. memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
  3570. vpmod->entry_count = 1;
  3571. rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
  3572. if (rval != QLA_SUCCESS) {
  3573. ql_dbg(ql_dbg_mbx, vha, 0x10bd,
  3574. "Failed to issue VP config IOCB (%x).\n", rval);
  3575. } else if (vpmod->comp_status != 0) {
  3576. ql_dbg(ql_dbg_mbx, vha, 0x10be,
  3577. "Failed to complete IOCB -- error status (%x).\n",
  3578. vpmod->comp_status);
  3579. rval = QLA_FUNCTION_FAILED;
  3580. } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
  3581. ql_dbg(ql_dbg_mbx, vha, 0x10bf,
  3582. "Failed to complete IOCB -- completion status (%x).\n",
  3583. le16_to_cpu(vpmod->comp_status));
  3584. rval = QLA_FUNCTION_FAILED;
  3585. } else {
  3586. /* EMPTY */
  3587. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
  3588. "Done %s.\n", __func__);
  3589. fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
  3590. }
  3591. dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
  3592. return rval;
  3593. }
  3594. /*
  3595. * qla2x00_send_change_request
  3596. * Receive or disable RSCN request from fabric controller
  3597. *
  3598. * Input:
  3599. * ha = adapter block pointer
  3600. * format = registration format:
  3601. * 0 - Reserved
  3602. * 1 - Fabric detected registration
  3603. * 2 - N_port detected registration
  3604. * 3 - Full registration
  3605. * FF - clear registration
  3606. * vp_idx = Virtual port index
  3607. *
  3608. * Returns:
  3609. * qla2x00 local function return status code.
  3610. *
  3611. * Context:
  3612. * Kernel Context
  3613. */
  3614. int
  3615. qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
  3616. uint16_t vp_idx)
  3617. {
  3618. int rval;
  3619. mbx_cmd_t mc;
  3620. mbx_cmd_t *mcp = &mc;
  3621. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
  3622. "Entered %s.\n", __func__);
  3623. mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
  3624. mcp->mb[1] = format;
  3625. mcp->mb[9] = vp_idx;
  3626. mcp->out_mb = MBX_9|MBX_1|MBX_0;
  3627. mcp->in_mb = MBX_0|MBX_1;
  3628. mcp->tov = MBX_TOV_SECONDS;
  3629. mcp->flags = 0;
  3630. rval = qla2x00_mailbox_command(vha, mcp);
  3631. if (rval == QLA_SUCCESS) {
  3632. if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
  3633. rval = BIT_1;
  3634. }
  3635. } else
  3636. rval = BIT_1;
  3637. return rval;
  3638. }
  3639. int
  3640. qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
  3641. uint32_t size)
  3642. {
  3643. int rval;
  3644. mbx_cmd_t mc;
  3645. mbx_cmd_t *mcp = &mc;
  3646. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
  3647. "Entered %s.\n", __func__);
  3648. if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
  3649. mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
  3650. mcp->mb[8] = MSW(addr);
  3651. mcp->out_mb = MBX_8|MBX_0;
  3652. } else {
  3653. mcp->mb[0] = MBC_DUMP_RISC_RAM;
  3654. mcp->out_mb = MBX_0;
  3655. }
  3656. mcp->mb[1] = LSW(addr);
  3657. mcp->mb[2] = MSW(req_dma);
  3658. mcp->mb[3] = LSW(req_dma);
  3659. mcp->mb[6] = MSW(MSD(req_dma));
  3660. mcp->mb[7] = LSW(MSD(req_dma));
  3661. mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
  3662. if (IS_FWI2_CAPABLE(vha->hw)) {
  3663. mcp->mb[4] = MSW(size);
  3664. mcp->mb[5] = LSW(size);
  3665. mcp->out_mb |= MBX_5|MBX_4;
  3666. } else {
  3667. mcp->mb[4] = LSW(size);
  3668. mcp->out_mb |= MBX_4;
  3669. }
  3670. mcp->in_mb = MBX_0;
  3671. mcp->tov = MBX_TOV_SECONDS;
  3672. mcp->flags = 0;
  3673. rval = qla2x00_mailbox_command(vha, mcp);
  3674. if (rval != QLA_SUCCESS) {
  3675. ql_dbg(ql_dbg_mbx, vha, 0x1008,
  3676. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  3677. } else {
  3678. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
  3679. "Done %s.\n", __func__);
  3680. }
  3681. return rval;
  3682. }
  3683. /* 84XX Support **************************************************************/
  3684. struct cs84xx_mgmt_cmd {
  3685. union {
  3686. struct verify_chip_entry_84xx req;
  3687. struct verify_chip_rsp_84xx rsp;
  3688. } p;
  3689. };
  3690. int
  3691. qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
  3692. {
  3693. int rval, retry;
  3694. struct cs84xx_mgmt_cmd *mn;
  3695. dma_addr_t mn_dma;
  3696. uint16_t options;
  3697. unsigned long flags;
  3698. struct qla_hw_data *ha = vha->hw;
  3699. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
  3700. "Entered %s.\n", __func__);
  3701. mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  3702. if (mn == NULL) {
  3703. return QLA_MEMORY_ALLOC_FAILED;
  3704. }
  3705. /* Force Update? */
  3706. options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
  3707. /* Diagnostic firmware? */
  3708. /* options |= MENLO_DIAG_FW; */
  3709. /* We update the firmware with only one data sequence. */
  3710. options |= VCO_END_OF_DATA;
  3711. do {
  3712. retry = 0;
  3713. memset(mn, 0, sizeof(*mn));
  3714. mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
  3715. mn->p.req.entry_count = 1;
  3716. mn->p.req.options = cpu_to_le16(options);
  3717. ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
  3718. "Dump of Verify Request.\n");
  3719. ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
  3720. (uint8_t *)mn, sizeof(*mn));
  3721. rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
  3722. if (rval != QLA_SUCCESS) {
  3723. ql_dbg(ql_dbg_mbx, vha, 0x10cb,
  3724. "Failed to issue verify IOCB (%x).\n", rval);
  3725. goto verify_done;
  3726. }
  3727. ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
  3728. "Dump of Verify Response.\n");
  3729. ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
  3730. (uint8_t *)mn, sizeof(*mn));
  3731. status[0] = le16_to_cpu(mn->p.rsp.comp_status);
  3732. status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
  3733. le16_to_cpu(mn->p.rsp.failure_code) : 0;
  3734. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
  3735. "cs=%x fc=%x.\n", status[0], status[1]);
  3736. if (status[0] != CS_COMPLETE) {
  3737. rval = QLA_FUNCTION_FAILED;
  3738. if (!(options & VCO_DONT_UPDATE_FW)) {
  3739. ql_dbg(ql_dbg_mbx, vha, 0x10cf,
  3740. "Firmware update failed. Retrying "
  3741. "without update firmware.\n");
  3742. options |= VCO_DONT_UPDATE_FW;
  3743. options &= ~VCO_FORCE_UPDATE;
  3744. retry = 1;
  3745. }
  3746. } else {
  3747. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
  3748. "Firmware updated to %x.\n",
  3749. le32_to_cpu(mn->p.rsp.fw_ver));
  3750. /* NOTE: we only update OP firmware. */
  3751. spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
  3752. ha->cs84xx->op_fw_version =
  3753. le32_to_cpu(mn->p.rsp.fw_ver);
  3754. spin_unlock_irqrestore(&ha->cs84xx->access_lock,
  3755. flags);
  3756. }
  3757. } while (retry);
  3758. verify_done:
  3759. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  3760. if (rval != QLA_SUCCESS) {
  3761. ql_dbg(ql_dbg_mbx, vha, 0x10d1,
  3762. "Failed=%x.\n", rval);
  3763. } else {
  3764. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
  3765. "Done %s.\n", __func__);
  3766. }
  3767. return rval;
  3768. }
  3769. int
  3770. qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
  3771. {
  3772. int rval;
  3773. unsigned long flags;
  3774. mbx_cmd_t mc;
  3775. mbx_cmd_t *mcp = &mc;
  3776. struct qla_hw_data *ha = vha->hw;
  3777. if (!ha->flags.fw_started)
  3778. return QLA_SUCCESS;
  3779. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
  3780. "Entered %s.\n", __func__);
  3781. if (IS_SHADOW_REG_CAPABLE(ha))
  3782. req->options |= BIT_13;
  3783. mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
  3784. mcp->mb[1] = req->options;
  3785. mcp->mb[2] = MSW(LSD(req->dma));
  3786. mcp->mb[3] = LSW(LSD(req->dma));
  3787. mcp->mb[6] = MSW(MSD(req->dma));
  3788. mcp->mb[7] = LSW(MSD(req->dma));
  3789. mcp->mb[5] = req->length;
  3790. if (req->rsp)
  3791. mcp->mb[10] = req->rsp->id;
  3792. mcp->mb[12] = req->qos;
  3793. mcp->mb[11] = req->vp_idx;
  3794. mcp->mb[13] = req->rid;
  3795. if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
  3796. mcp->mb[15] = 0;
  3797. mcp->mb[4] = req->id;
  3798. /* que in ptr index */
  3799. mcp->mb[8] = 0;
  3800. /* que out ptr index */
  3801. mcp->mb[9] = *req->out_ptr = 0;
  3802. mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
  3803. MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3804. mcp->in_mb = MBX_0;
  3805. mcp->flags = MBX_DMA_OUT;
  3806. mcp->tov = MBX_TOV_SECONDS * 2;
  3807. if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
  3808. mcp->in_mb |= MBX_1;
  3809. if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  3810. mcp->out_mb |= MBX_15;
  3811. /* debug q create issue in SR-IOV */
  3812. mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
  3813. }
  3814. spin_lock_irqsave(&ha->hardware_lock, flags);
  3815. if (!(req->options & BIT_0)) {
  3816. WRT_REG_DWORD(req->req_q_in, 0);
  3817. if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
  3818. WRT_REG_DWORD(req->req_q_out, 0);
  3819. }
  3820. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3821. rval = qla2x00_mailbox_command(vha, mcp);
  3822. if (rval != QLA_SUCCESS) {
  3823. ql_dbg(ql_dbg_mbx, vha, 0x10d4,
  3824. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  3825. } else {
  3826. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
  3827. "Done %s.\n", __func__);
  3828. }
  3829. return rval;
  3830. }
  3831. int
  3832. qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  3833. {
  3834. int rval;
  3835. unsigned long flags;
  3836. mbx_cmd_t mc;
  3837. mbx_cmd_t *mcp = &mc;
  3838. struct qla_hw_data *ha = vha->hw;
  3839. if (!ha->flags.fw_started)
  3840. return QLA_SUCCESS;
  3841. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
  3842. "Entered %s.\n", __func__);
  3843. if (IS_SHADOW_REG_CAPABLE(ha))
  3844. rsp->options |= BIT_13;
  3845. mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
  3846. mcp->mb[1] = rsp->options;
  3847. mcp->mb[2] = MSW(LSD(rsp->dma));
  3848. mcp->mb[3] = LSW(LSD(rsp->dma));
  3849. mcp->mb[6] = MSW(MSD(rsp->dma));
  3850. mcp->mb[7] = LSW(MSD(rsp->dma));
  3851. mcp->mb[5] = rsp->length;
  3852. mcp->mb[14] = rsp->msix->entry;
  3853. mcp->mb[13] = rsp->rid;
  3854. if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
  3855. mcp->mb[15] = 0;
  3856. mcp->mb[4] = rsp->id;
  3857. /* que in ptr index */
  3858. mcp->mb[8] = *rsp->in_ptr = 0;
  3859. /* que out ptr index */
  3860. mcp->mb[9] = 0;
  3861. mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
  3862. |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3863. mcp->in_mb = MBX_0;
  3864. mcp->flags = MBX_DMA_OUT;
  3865. mcp->tov = MBX_TOV_SECONDS * 2;
  3866. if (IS_QLA81XX(ha)) {
  3867. mcp->out_mb |= MBX_12|MBX_11|MBX_10;
  3868. mcp->in_mb |= MBX_1;
  3869. } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  3870. mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
  3871. mcp->in_mb |= MBX_1;
  3872. /* debug q create issue in SR-IOV */
  3873. mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
  3874. }
  3875. spin_lock_irqsave(&ha->hardware_lock, flags);
  3876. if (!(rsp->options & BIT_0)) {
  3877. WRT_REG_DWORD(rsp->rsp_q_out, 0);
  3878. if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
  3879. WRT_REG_DWORD(rsp->rsp_q_in, 0);
  3880. }
  3881. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3882. rval = qla2x00_mailbox_command(vha, mcp);
  3883. if (rval != QLA_SUCCESS) {
  3884. ql_dbg(ql_dbg_mbx, vha, 0x10d7,
  3885. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  3886. } else {
  3887. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
  3888. "Done %s.\n", __func__);
  3889. }
  3890. return rval;
  3891. }
  3892. int
  3893. qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
  3894. {
  3895. int rval;
  3896. mbx_cmd_t mc;
  3897. mbx_cmd_t *mcp = &mc;
  3898. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
  3899. "Entered %s.\n", __func__);
  3900. mcp->mb[0] = MBC_IDC_ACK;
  3901. memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
  3902. mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3903. mcp->in_mb = MBX_0;
  3904. mcp->tov = MBX_TOV_SECONDS;
  3905. mcp->flags = 0;
  3906. rval = qla2x00_mailbox_command(vha, mcp);
  3907. if (rval != QLA_SUCCESS) {
  3908. ql_dbg(ql_dbg_mbx, vha, 0x10da,
  3909. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  3910. } else {
  3911. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
  3912. "Done %s.\n", __func__);
  3913. }
  3914. return rval;
  3915. }
  3916. int
  3917. qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
  3918. {
  3919. int rval;
  3920. mbx_cmd_t mc;
  3921. mbx_cmd_t *mcp = &mc;
  3922. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
  3923. "Entered %s.\n", __func__);
  3924. if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
  3925. !IS_QLA27XX(vha->hw))
  3926. return QLA_FUNCTION_FAILED;
  3927. mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
  3928. mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
  3929. mcp->out_mb = MBX_1|MBX_0;
  3930. mcp->in_mb = MBX_1|MBX_0;
  3931. mcp->tov = MBX_TOV_SECONDS;
  3932. mcp->flags = 0;
  3933. rval = qla2x00_mailbox_command(vha, mcp);
  3934. if (rval != QLA_SUCCESS) {
  3935. ql_dbg(ql_dbg_mbx, vha, 0x10dd,
  3936. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3937. rval, mcp->mb[0], mcp->mb[1]);
  3938. } else {
  3939. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
  3940. "Done %s.\n", __func__);
  3941. *sector_size = mcp->mb[1];
  3942. }
  3943. return rval;
  3944. }
  3945. int
  3946. qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
  3947. {
  3948. int rval;
  3949. mbx_cmd_t mc;
  3950. mbx_cmd_t *mcp = &mc;
  3951. if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
  3952. !IS_QLA27XX(vha->hw))
  3953. return QLA_FUNCTION_FAILED;
  3954. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
  3955. "Entered %s.\n", __func__);
  3956. mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
  3957. mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
  3958. FAC_OPT_CMD_WRITE_PROTECT;
  3959. mcp->out_mb = MBX_1|MBX_0;
  3960. mcp->in_mb = MBX_1|MBX_0;
  3961. mcp->tov = MBX_TOV_SECONDS;
  3962. mcp->flags = 0;
  3963. rval = qla2x00_mailbox_command(vha, mcp);
  3964. if (rval != QLA_SUCCESS) {
  3965. ql_dbg(ql_dbg_mbx, vha, 0x10e0,
  3966. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3967. rval, mcp->mb[0], mcp->mb[1]);
  3968. } else {
  3969. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
  3970. "Done %s.\n", __func__);
  3971. }
  3972. return rval;
  3973. }
  3974. int
  3975. qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
  3976. {
  3977. int rval;
  3978. mbx_cmd_t mc;
  3979. mbx_cmd_t *mcp = &mc;
  3980. if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
  3981. !IS_QLA27XX(vha->hw))
  3982. return QLA_FUNCTION_FAILED;
  3983. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
  3984. "Entered %s.\n", __func__);
  3985. mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
  3986. mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
  3987. mcp->mb[2] = LSW(start);
  3988. mcp->mb[3] = MSW(start);
  3989. mcp->mb[4] = LSW(finish);
  3990. mcp->mb[5] = MSW(finish);
  3991. mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3992. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  3993. mcp->tov = MBX_TOV_SECONDS;
  3994. mcp->flags = 0;
  3995. rval = qla2x00_mailbox_command(vha, mcp);
  3996. if (rval != QLA_SUCCESS) {
  3997. ql_dbg(ql_dbg_mbx, vha, 0x10e3,
  3998. "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
  3999. rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
  4000. } else {
  4001. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
  4002. "Done %s.\n", __func__);
  4003. }
  4004. return rval;
  4005. }
  4006. int
  4007. qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
  4008. {
  4009. int rval = 0;
  4010. mbx_cmd_t mc;
  4011. mbx_cmd_t *mcp = &mc;
  4012. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
  4013. "Entered %s.\n", __func__);
  4014. mcp->mb[0] = MBC_RESTART_MPI_FW;
  4015. mcp->out_mb = MBX_0;
  4016. mcp->in_mb = MBX_0|MBX_1;
  4017. mcp->tov = MBX_TOV_SECONDS;
  4018. mcp->flags = 0;
  4019. rval = qla2x00_mailbox_command(vha, mcp);
  4020. if (rval != QLA_SUCCESS) {
  4021. ql_dbg(ql_dbg_mbx, vha, 0x10e6,
  4022. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  4023. rval, mcp->mb[0], mcp->mb[1]);
  4024. } else {
  4025. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
  4026. "Done %s.\n", __func__);
  4027. }
  4028. return rval;
  4029. }
  4030. int
  4031. qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
  4032. {
  4033. int rval;
  4034. mbx_cmd_t mc;
  4035. mbx_cmd_t *mcp = &mc;
  4036. int i;
  4037. int len;
  4038. uint16_t *str;
  4039. struct qla_hw_data *ha = vha->hw;
  4040. if (!IS_P3P_TYPE(ha))
  4041. return QLA_FUNCTION_FAILED;
  4042. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
  4043. "Entered %s.\n", __func__);
  4044. str = (void *)version;
  4045. len = strlen(version);
  4046. mcp->mb[0] = MBC_SET_RNID_PARAMS;
  4047. mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
  4048. mcp->out_mb = MBX_1|MBX_0;
  4049. for (i = 4; i < 16 && len; i++, str++, len -= 2) {
  4050. mcp->mb[i] = cpu_to_le16p(str);
  4051. mcp->out_mb |= 1<<i;
  4052. }
  4053. for (; i < 16; i++) {
  4054. mcp->mb[i] = 0;
  4055. mcp->out_mb |= 1<<i;
  4056. }
  4057. mcp->in_mb = MBX_1|MBX_0;
  4058. mcp->tov = MBX_TOV_SECONDS;
  4059. mcp->flags = 0;
  4060. rval = qla2x00_mailbox_command(vha, mcp);
  4061. if (rval != QLA_SUCCESS) {
  4062. ql_dbg(ql_dbg_mbx, vha, 0x117c,
  4063. "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
  4064. } else {
  4065. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
  4066. "Done %s.\n", __func__);
  4067. }
  4068. return rval;
  4069. }
  4070. int
  4071. qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
  4072. {
  4073. int rval;
  4074. mbx_cmd_t mc;
  4075. mbx_cmd_t *mcp = &mc;
  4076. int len;
  4077. uint16_t dwlen;
  4078. uint8_t *str;
  4079. dma_addr_t str_dma;
  4080. struct qla_hw_data *ha = vha->hw;
  4081. if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
  4082. IS_P3P_TYPE(ha))
  4083. return QLA_FUNCTION_FAILED;
  4084. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
  4085. "Entered %s.\n", __func__);
  4086. str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
  4087. if (!str) {
  4088. ql_log(ql_log_warn, vha, 0x117f,
  4089. "Failed to allocate driver version param.\n");
  4090. return QLA_MEMORY_ALLOC_FAILED;
  4091. }
  4092. memcpy(str, "\x7\x3\x11\x0", 4);
  4093. dwlen = str[0];
  4094. len = dwlen * 4 - 4;
  4095. memset(str + 4, 0, len);
  4096. if (len > strlen(version))
  4097. len = strlen(version);
  4098. memcpy(str + 4, version, len);
  4099. mcp->mb[0] = MBC_SET_RNID_PARAMS;
  4100. mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
  4101. mcp->mb[2] = MSW(LSD(str_dma));
  4102. mcp->mb[3] = LSW(LSD(str_dma));
  4103. mcp->mb[6] = MSW(MSD(str_dma));
  4104. mcp->mb[7] = LSW(MSD(str_dma));
  4105. mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  4106. mcp->in_mb = MBX_1|MBX_0;
  4107. mcp->tov = MBX_TOV_SECONDS;
  4108. mcp->flags = 0;
  4109. rval = qla2x00_mailbox_command(vha, mcp);
  4110. if (rval != QLA_SUCCESS) {
  4111. ql_dbg(ql_dbg_mbx, vha, 0x1180,
  4112. "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
  4113. } else {
  4114. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
  4115. "Done %s.\n", __func__);
  4116. }
  4117. dma_pool_free(ha->s_dma_pool, str, str_dma);
  4118. return rval;
  4119. }
  4120. int
  4121. qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
  4122. void *buf, uint16_t bufsiz)
  4123. {
  4124. int rval, i;
  4125. mbx_cmd_t mc;
  4126. mbx_cmd_t *mcp = &mc;
  4127. uint32_t *bp;
  4128. if (!IS_FWI2_CAPABLE(vha->hw))
  4129. return QLA_FUNCTION_FAILED;
  4130. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
  4131. "Entered %s.\n", __func__);
  4132. mcp->mb[0] = MBC_GET_RNID_PARAMS;
  4133. mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
  4134. mcp->mb[2] = MSW(buf_dma);
  4135. mcp->mb[3] = LSW(buf_dma);
  4136. mcp->mb[6] = MSW(MSD(buf_dma));
  4137. mcp->mb[7] = LSW(MSD(buf_dma));
  4138. mcp->mb[8] = bufsiz/4;
  4139. mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4140. mcp->in_mb = MBX_1|MBX_0;
  4141. mcp->tov = MBX_TOV_SECONDS;
  4142. mcp->flags = 0;
  4143. rval = qla2x00_mailbox_command(vha, mcp);
  4144. if (rval != QLA_SUCCESS) {
  4145. ql_dbg(ql_dbg_mbx, vha, 0x115a,
  4146. "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
  4147. } else {
  4148. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
  4149. "Done %s.\n", __func__);
  4150. bp = (uint32_t *) buf;
  4151. for (i = 0; i < (bufsiz-4)/4; i++, bp++)
  4152. *bp = le32_to_cpu(*bp);
  4153. }
  4154. return rval;
  4155. }
  4156. static int
  4157. qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
  4158. {
  4159. int rval;
  4160. mbx_cmd_t mc;
  4161. mbx_cmd_t *mcp = &mc;
  4162. if (!IS_FWI2_CAPABLE(vha->hw))
  4163. return QLA_FUNCTION_FAILED;
  4164. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
  4165. "Entered %s.\n", __func__);
  4166. mcp->mb[0] = MBC_GET_RNID_PARAMS;
  4167. mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
  4168. mcp->out_mb = MBX_1|MBX_0;
  4169. mcp->in_mb = MBX_1|MBX_0;
  4170. mcp->tov = MBX_TOV_SECONDS;
  4171. mcp->flags = 0;
  4172. rval = qla2x00_mailbox_command(vha, mcp);
  4173. *temp = mcp->mb[1];
  4174. if (rval != QLA_SUCCESS) {
  4175. ql_dbg(ql_dbg_mbx, vha, 0x115a,
  4176. "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
  4177. } else {
  4178. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
  4179. "Done %s.\n", __func__);
  4180. }
  4181. return rval;
  4182. }
  4183. int
  4184. qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
  4185. uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
  4186. {
  4187. int rval;
  4188. mbx_cmd_t mc;
  4189. mbx_cmd_t *mcp = &mc;
  4190. struct qla_hw_data *ha = vha->hw;
  4191. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
  4192. "Entered %s.\n", __func__);
  4193. if (!IS_FWI2_CAPABLE(ha))
  4194. return QLA_FUNCTION_FAILED;
  4195. if (len == 1)
  4196. opt |= BIT_0;
  4197. mcp->mb[0] = MBC_READ_SFP;
  4198. mcp->mb[1] = dev;
  4199. mcp->mb[2] = MSW(sfp_dma);
  4200. mcp->mb[3] = LSW(sfp_dma);
  4201. mcp->mb[6] = MSW(MSD(sfp_dma));
  4202. mcp->mb[7] = LSW(MSD(sfp_dma));
  4203. mcp->mb[8] = len;
  4204. mcp->mb[9] = off;
  4205. mcp->mb[10] = opt;
  4206. mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  4207. mcp->in_mb = MBX_1|MBX_0;
  4208. mcp->tov = MBX_TOV_SECONDS;
  4209. mcp->flags = 0;
  4210. rval = qla2x00_mailbox_command(vha, mcp);
  4211. if (opt & BIT_0)
  4212. *sfp = mcp->mb[1];
  4213. if (rval != QLA_SUCCESS) {
  4214. ql_dbg(ql_dbg_mbx, vha, 0x10e9,
  4215. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4216. if (mcp->mb[0] == MBS_COMMAND_ERROR &&
  4217. mcp->mb[1] == 0x22)
  4218. /* sfp is not there */
  4219. rval = QLA_INTERFACE_ERROR;
  4220. } else {
  4221. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
  4222. "Done %s.\n", __func__);
  4223. }
  4224. return rval;
  4225. }
  4226. int
  4227. qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
  4228. uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
  4229. {
  4230. int rval;
  4231. mbx_cmd_t mc;
  4232. mbx_cmd_t *mcp = &mc;
  4233. struct qla_hw_data *ha = vha->hw;
  4234. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
  4235. "Entered %s.\n", __func__);
  4236. if (!IS_FWI2_CAPABLE(ha))
  4237. return QLA_FUNCTION_FAILED;
  4238. if (len == 1)
  4239. opt |= BIT_0;
  4240. if (opt & BIT_0)
  4241. len = *sfp;
  4242. mcp->mb[0] = MBC_WRITE_SFP;
  4243. mcp->mb[1] = dev;
  4244. mcp->mb[2] = MSW(sfp_dma);
  4245. mcp->mb[3] = LSW(sfp_dma);
  4246. mcp->mb[6] = MSW(MSD(sfp_dma));
  4247. mcp->mb[7] = LSW(MSD(sfp_dma));
  4248. mcp->mb[8] = len;
  4249. mcp->mb[9] = off;
  4250. mcp->mb[10] = opt;
  4251. mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  4252. mcp->in_mb = MBX_1|MBX_0;
  4253. mcp->tov = MBX_TOV_SECONDS;
  4254. mcp->flags = 0;
  4255. rval = qla2x00_mailbox_command(vha, mcp);
  4256. if (rval != QLA_SUCCESS) {
  4257. ql_dbg(ql_dbg_mbx, vha, 0x10ec,
  4258. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4259. } else {
  4260. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
  4261. "Done %s.\n", __func__);
  4262. }
  4263. return rval;
  4264. }
  4265. int
  4266. qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
  4267. uint16_t size_in_bytes, uint16_t *actual_size)
  4268. {
  4269. int rval;
  4270. mbx_cmd_t mc;
  4271. mbx_cmd_t *mcp = &mc;
  4272. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
  4273. "Entered %s.\n", __func__);
  4274. if (!IS_CNA_CAPABLE(vha->hw))
  4275. return QLA_FUNCTION_FAILED;
  4276. mcp->mb[0] = MBC_GET_XGMAC_STATS;
  4277. mcp->mb[2] = MSW(stats_dma);
  4278. mcp->mb[3] = LSW(stats_dma);
  4279. mcp->mb[6] = MSW(MSD(stats_dma));
  4280. mcp->mb[7] = LSW(MSD(stats_dma));
  4281. mcp->mb[8] = size_in_bytes >> 2;
  4282. mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
  4283. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  4284. mcp->tov = MBX_TOV_SECONDS;
  4285. mcp->flags = 0;
  4286. rval = qla2x00_mailbox_command(vha, mcp);
  4287. if (rval != QLA_SUCCESS) {
  4288. ql_dbg(ql_dbg_mbx, vha, 0x10ef,
  4289. "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
  4290. rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
  4291. } else {
  4292. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
  4293. "Done %s.\n", __func__);
  4294. *actual_size = mcp->mb[2] << 2;
  4295. }
  4296. return rval;
  4297. }
  4298. int
  4299. qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
  4300. uint16_t size)
  4301. {
  4302. int rval;
  4303. mbx_cmd_t mc;
  4304. mbx_cmd_t *mcp = &mc;
  4305. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
  4306. "Entered %s.\n", __func__);
  4307. if (!IS_CNA_CAPABLE(vha->hw))
  4308. return QLA_FUNCTION_FAILED;
  4309. mcp->mb[0] = MBC_GET_DCBX_PARAMS;
  4310. mcp->mb[1] = 0;
  4311. mcp->mb[2] = MSW(tlv_dma);
  4312. mcp->mb[3] = LSW(tlv_dma);
  4313. mcp->mb[6] = MSW(MSD(tlv_dma));
  4314. mcp->mb[7] = LSW(MSD(tlv_dma));
  4315. mcp->mb[8] = size;
  4316. mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  4317. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  4318. mcp->tov = MBX_TOV_SECONDS;
  4319. mcp->flags = 0;
  4320. rval = qla2x00_mailbox_command(vha, mcp);
  4321. if (rval != QLA_SUCCESS) {
  4322. ql_dbg(ql_dbg_mbx, vha, 0x10f2,
  4323. "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
  4324. rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
  4325. } else {
  4326. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
  4327. "Done %s.\n", __func__);
  4328. }
  4329. return rval;
  4330. }
  4331. int
  4332. qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
  4333. {
  4334. int rval;
  4335. mbx_cmd_t mc;
  4336. mbx_cmd_t *mcp = &mc;
  4337. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
  4338. "Entered %s.\n", __func__);
  4339. if (!IS_FWI2_CAPABLE(vha->hw))
  4340. return QLA_FUNCTION_FAILED;
  4341. mcp->mb[0] = MBC_READ_RAM_EXTENDED;
  4342. mcp->mb[1] = LSW(risc_addr);
  4343. mcp->mb[8] = MSW(risc_addr);
  4344. mcp->out_mb = MBX_8|MBX_1|MBX_0;
  4345. mcp->in_mb = MBX_3|MBX_2|MBX_0;
  4346. mcp->tov = 30;
  4347. mcp->flags = 0;
  4348. rval = qla2x00_mailbox_command(vha, mcp);
  4349. if (rval != QLA_SUCCESS) {
  4350. ql_dbg(ql_dbg_mbx, vha, 0x10f5,
  4351. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4352. } else {
  4353. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
  4354. "Done %s.\n", __func__);
  4355. *data = mcp->mb[3] << 16 | mcp->mb[2];
  4356. }
  4357. return rval;
  4358. }
  4359. int
  4360. qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
  4361. uint16_t *mresp)
  4362. {
  4363. int rval;
  4364. mbx_cmd_t mc;
  4365. mbx_cmd_t *mcp = &mc;
  4366. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
  4367. "Entered %s.\n", __func__);
  4368. memset(mcp->mb, 0 , sizeof(mcp->mb));
  4369. mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
  4370. mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
  4371. /* transfer count */
  4372. mcp->mb[10] = LSW(mreq->transfer_size);
  4373. mcp->mb[11] = MSW(mreq->transfer_size);
  4374. /* send data address */
  4375. mcp->mb[14] = LSW(mreq->send_dma);
  4376. mcp->mb[15] = MSW(mreq->send_dma);
  4377. mcp->mb[20] = LSW(MSD(mreq->send_dma));
  4378. mcp->mb[21] = MSW(MSD(mreq->send_dma));
  4379. /* receive data address */
  4380. mcp->mb[16] = LSW(mreq->rcv_dma);
  4381. mcp->mb[17] = MSW(mreq->rcv_dma);
  4382. mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
  4383. mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
  4384. /* Iteration count */
  4385. mcp->mb[18] = LSW(mreq->iteration_count);
  4386. mcp->mb[19] = MSW(mreq->iteration_count);
  4387. mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
  4388. MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
  4389. if (IS_CNA_CAPABLE(vha->hw))
  4390. mcp->out_mb |= MBX_2;
  4391. mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
  4392. mcp->buf_size = mreq->transfer_size;
  4393. mcp->tov = MBX_TOV_SECONDS;
  4394. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
  4395. rval = qla2x00_mailbox_command(vha, mcp);
  4396. if (rval != QLA_SUCCESS) {
  4397. ql_dbg(ql_dbg_mbx, vha, 0x10f8,
  4398. "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
  4399. "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
  4400. mcp->mb[3], mcp->mb[18], mcp->mb[19]);
  4401. } else {
  4402. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
  4403. "Done %s.\n", __func__);
  4404. }
  4405. /* Copy mailbox information */
  4406. memcpy( mresp, mcp->mb, 64);
  4407. return rval;
  4408. }
  4409. int
  4410. qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
  4411. uint16_t *mresp)
  4412. {
  4413. int rval;
  4414. mbx_cmd_t mc;
  4415. mbx_cmd_t *mcp = &mc;
  4416. struct qla_hw_data *ha = vha->hw;
  4417. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
  4418. "Entered %s.\n", __func__);
  4419. memset(mcp->mb, 0 , sizeof(mcp->mb));
  4420. mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
  4421. /* BIT_6 specifies 64bit address */
  4422. mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
  4423. if (IS_CNA_CAPABLE(ha)) {
  4424. mcp->mb[2] = vha->fcoe_fcf_idx;
  4425. }
  4426. mcp->mb[16] = LSW(mreq->rcv_dma);
  4427. mcp->mb[17] = MSW(mreq->rcv_dma);
  4428. mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
  4429. mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
  4430. mcp->mb[10] = LSW(mreq->transfer_size);
  4431. mcp->mb[14] = LSW(mreq->send_dma);
  4432. mcp->mb[15] = MSW(mreq->send_dma);
  4433. mcp->mb[20] = LSW(MSD(mreq->send_dma));
  4434. mcp->mb[21] = MSW(MSD(mreq->send_dma));
  4435. mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
  4436. MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
  4437. if (IS_CNA_CAPABLE(ha))
  4438. mcp->out_mb |= MBX_2;
  4439. mcp->in_mb = MBX_0;
  4440. if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
  4441. IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
  4442. mcp->in_mb |= MBX_1;
  4443. if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
  4444. mcp->in_mb |= MBX_3;
  4445. mcp->tov = MBX_TOV_SECONDS;
  4446. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
  4447. mcp->buf_size = mreq->transfer_size;
  4448. rval = qla2x00_mailbox_command(vha, mcp);
  4449. if (rval != QLA_SUCCESS) {
  4450. ql_dbg(ql_dbg_mbx, vha, 0x10fb,
  4451. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  4452. rval, mcp->mb[0], mcp->mb[1]);
  4453. } else {
  4454. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
  4455. "Done %s.\n", __func__);
  4456. }
  4457. /* Copy mailbox information */
  4458. memcpy(mresp, mcp->mb, 64);
  4459. return rval;
  4460. }
  4461. int
  4462. qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
  4463. {
  4464. int rval;
  4465. mbx_cmd_t mc;
  4466. mbx_cmd_t *mcp = &mc;
  4467. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
  4468. "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
  4469. mcp->mb[0] = MBC_ISP84XX_RESET;
  4470. mcp->mb[1] = enable_diagnostic;
  4471. mcp->out_mb = MBX_1|MBX_0;
  4472. mcp->in_mb = MBX_1|MBX_0;
  4473. mcp->tov = MBX_TOV_SECONDS;
  4474. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
  4475. rval = qla2x00_mailbox_command(vha, mcp);
  4476. if (rval != QLA_SUCCESS)
  4477. ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
  4478. else
  4479. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
  4480. "Done %s.\n", __func__);
  4481. return rval;
  4482. }
  4483. int
  4484. qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
  4485. {
  4486. int rval;
  4487. mbx_cmd_t mc;
  4488. mbx_cmd_t *mcp = &mc;
  4489. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
  4490. "Entered %s.\n", __func__);
  4491. if (!IS_FWI2_CAPABLE(vha->hw))
  4492. return QLA_FUNCTION_FAILED;
  4493. mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
  4494. mcp->mb[1] = LSW(risc_addr);
  4495. mcp->mb[2] = LSW(data);
  4496. mcp->mb[3] = MSW(data);
  4497. mcp->mb[8] = MSW(risc_addr);
  4498. mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
  4499. mcp->in_mb = MBX_0;
  4500. mcp->tov = 30;
  4501. mcp->flags = 0;
  4502. rval = qla2x00_mailbox_command(vha, mcp);
  4503. if (rval != QLA_SUCCESS) {
  4504. ql_dbg(ql_dbg_mbx, vha, 0x1101,
  4505. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4506. } else {
  4507. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
  4508. "Done %s.\n", __func__);
  4509. }
  4510. return rval;
  4511. }
  4512. int
  4513. qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
  4514. {
  4515. int rval;
  4516. uint32_t stat, timer;
  4517. uint16_t mb0 = 0;
  4518. struct qla_hw_data *ha = vha->hw;
  4519. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  4520. rval = QLA_SUCCESS;
  4521. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
  4522. "Entered %s.\n", __func__);
  4523. clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  4524. /* Write the MBC data to the registers */
  4525. WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
  4526. WRT_REG_WORD(&reg->mailbox1, mb[0]);
  4527. WRT_REG_WORD(&reg->mailbox2, mb[1]);
  4528. WRT_REG_WORD(&reg->mailbox3, mb[2]);
  4529. WRT_REG_WORD(&reg->mailbox4, mb[3]);
  4530. WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
  4531. /* Poll for MBC interrupt */
  4532. for (timer = 6000000; timer; timer--) {
  4533. /* Check for pending interrupts. */
  4534. stat = RD_REG_DWORD(&reg->host_status);
  4535. if (stat & HSRX_RISC_INT) {
  4536. stat &= 0xff;
  4537. if (stat == 0x1 || stat == 0x2 ||
  4538. stat == 0x10 || stat == 0x11) {
  4539. set_bit(MBX_INTERRUPT,
  4540. &ha->mbx_cmd_flags);
  4541. mb0 = RD_REG_WORD(&reg->mailbox0);
  4542. WRT_REG_DWORD(&reg->hccr,
  4543. HCCRX_CLR_RISC_INT);
  4544. RD_REG_DWORD(&reg->hccr);
  4545. break;
  4546. }
  4547. }
  4548. udelay(5);
  4549. }
  4550. if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
  4551. rval = mb0 & MBS_MASK;
  4552. else
  4553. rval = QLA_FUNCTION_FAILED;
  4554. if (rval != QLA_SUCCESS) {
  4555. ql_dbg(ql_dbg_mbx, vha, 0x1104,
  4556. "Failed=%x mb[0]=%x.\n", rval, mb[0]);
  4557. } else {
  4558. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
  4559. "Done %s.\n", __func__);
  4560. }
  4561. return rval;
  4562. }
  4563. int
  4564. qla2x00_get_data_rate(scsi_qla_host_t *vha)
  4565. {
  4566. int rval;
  4567. mbx_cmd_t mc;
  4568. mbx_cmd_t *mcp = &mc;
  4569. struct qla_hw_data *ha = vha->hw;
  4570. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
  4571. "Entered %s.\n", __func__);
  4572. if (!IS_FWI2_CAPABLE(ha))
  4573. return QLA_FUNCTION_FAILED;
  4574. mcp->mb[0] = MBC_DATA_RATE;
  4575. mcp->mb[1] = 0;
  4576. mcp->out_mb = MBX_1|MBX_0;
  4577. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  4578. if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
  4579. mcp->in_mb |= MBX_3;
  4580. mcp->tov = MBX_TOV_SECONDS;
  4581. mcp->flags = 0;
  4582. rval = qla2x00_mailbox_command(vha, mcp);
  4583. if (rval != QLA_SUCCESS) {
  4584. ql_dbg(ql_dbg_mbx, vha, 0x1107,
  4585. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4586. } else {
  4587. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
  4588. "Done %s.\n", __func__);
  4589. if (mcp->mb[1] != 0x7)
  4590. ha->link_data_rate = mcp->mb[1];
  4591. }
  4592. return rval;
  4593. }
  4594. int
  4595. qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
  4596. {
  4597. int rval;
  4598. mbx_cmd_t mc;
  4599. mbx_cmd_t *mcp = &mc;
  4600. struct qla_hw_data *ha = vha->hw;
  4601. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
  4602. "Entered %s.\n", __func__);
  4603. if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
  4604. !IS_QLA27XX(ha))
  4605. return QLA_FUNCTION_FAILED;
  4606. mcp->mb[0] = MBC_GET_PORT_CONFIG;
  4607. mcp->out_mb = MBX_0;
  4608. mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4609. mcp->tov = MBX_TOV_SECONDS;
  4610. mcp->flags = 0;
  4611. rval = qla2x00_mailbox_command(vha, mcp);
  4612. if (rval != QLA_SUCCESS) {
  4613. ql_dbg(ql_dbg_mbx, vha, 0x110a,
  4614. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4615. } else {
  4616. /* Copy all bits to preserve original value */
  4617. memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
  4618. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
  4619. "Done %s.\n", __func__);
  4620. }
  4621. return rval;
  4622. }
  4623. int
  4624. qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
  4625. {
  4626. int rval;
  4627. mbx_cmd_t mc;
  4628. mbx_cmd_t *mcp = &mc;
  4629. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
  4630. "Entered %s.\n", __func__);
  4631. mcp->mb[0] = MBC_SET_PORT_CONFIG;
  4632. /* Copy all bits to preserve original setting */
  4633. memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
  4634. mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4635. mcp->in_mb = MBX_0;
  4636. mcp->tov = MBX_TOV_SECONDS;
  4637. mcp->flags = 0;
  4638. rval = qla2x00_mailbox_command(vha, mcp);
  4639. if (rval != QLA_SUCCESS) {
  4640. ql_dbg(ql_dbg_mbx, vha, 0x110d,
  4641. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4642. } else
  4643. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
  4644. "Done %s.\n", __func__);
  4645. return rval;
  4646. }
  4647. int
  4648. qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
  4649. uint16_t *mb)
  4650. {
  4651. int rval;
  4652. mbx_cmd_t mc;
  4653. mbx_cmd_t *mcp = &mc;
  4654. struct qla_hw_data *ha = vha->hw;
  4655. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
  4656. "Entered %s.\n", __func__);
  4657. if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
  4658. return QLA_FUNCTION_FAILED;
  4659. mcp->mb[0] = MBC_PORT_PARAMS;
  4660. mcp->mb[1] = loop_id;
  4661. if (ha->flags.fcp_prio_enabled)
  4662. mcp->mb[2] = BIT_1;
  4663. else
  4664. mcp->mb[2] = BIT_2;
  4665. mcp->mb[4] = priority & 0xf;
  4666. mcp->mb[9] = vha->vp_idx;
  4667. mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4668. mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
  4669. mcp->tov = 30;
  4670. mcp->flags = 0;
  4671. rval = qla2x00_mailbox_command(vha, mcp);
  4672. if (mb != NULL) {
  4673. mb[0] = mcp->mb[0];
  4674. mb[1] = mcp->mb[1];
  4675. mb[3] = mcp->mb[3];
  4676. mb[4] = mcp->mb[4];
  4677. }
  4678. if (rval != QLA_SUCCESS) {
  4679. ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
  4680. } else {
  4681. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
  4682. "Done %s.\n", __func__);
  4683. }
  4684. return rval;
  4685. }
  4686. int
  4687. qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
  4688. {
  4689. int rval = QLA_FUNCTION_FAILED;
  4690. struct qla_hw_data *ha = vha->hw;
  4691. uint8_t byte;
  4692. if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
  4693. ql_dbg(ql_dbg_mbx, vha, 0x1150,
  4694. "Thermal not supported by this card.\n");
  4695. return rval;
  4696. }
  4697. if (IS_QLA25XX(ha)) {
  4698. if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
  4699. ha->pdev->subsystem_device == 0x0175) {
  4700. rval = qla2x00_read_sfp(vha, 0, &byte,
  4701. 0x98, 0x1, 1, BIT_13|BIT_0);
  4702. *temp = byte;
  4703. return rval;
  4704. }
  4705. if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
  4706. ha->pdev->subsystem_device == 0x338e) {
  4707. rval = qla2x00_read_sfp(vha, 0, &byte,
  4708. 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
  4709. *temp = byte;
  4710. return rval;
  4711. }
  4712. ql_dbg(ql_dbg_mbx, vha, 0x10c9,
  4713. "Thermal not supported by this card.\n");
  4714. return rval;
  4715. }
  4716. if (IS_QLA82XX(ha)) {
  4717. *temp = qla82xx_read_temperature(vha);
  4718. rval = QLA_SUCCESS;
  4719. return rval;
  4720. } else if (IS_QLA8044(ha)) {
  4721. *temp = qla8044_read_temperature(vha);
  4722. rval = QLA_SUCCESS;
  4723. return rval;
  4724. }
  4725. rval = qla2x00_read_asic_temperature(vha, temp);
  4726. return rval;
  4727. }
  4728. int
  4729. qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
  4730. {
  4731. int rval;
  4732. struct qla_hw_data *ha = vha->hw;
  4733. mbx_cmd_t mc;
  4734. mbx_cmd_t *mcp = &mc;
  4735. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
  4736. "Entered %s.\n", __func__);
  4737. if (!IS_FWI2_CAPABLE(ha))
  4738. return QLA_FUNCTION_FAILED;
  4739. memset(mcp, 0, sizeof(mbx_cmd_t));
  4740. mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
  4741. mcp->mb[1] = 1;
  4742. mcp->out_mb = MBX_1|MBX_0;
  4743. mcp->in_mb = MBX_0;
  4744. mcp->tov = 30;
  4745. mcp->flags = 0;
  4746. rval = qla2x00_mailbox_command(vha, mcp);
  4747. if (rval != QLA_SUCCESS) {
  4748. ql_dbg(ql_dbg_mbx, vha, 0x1016,
  4749. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4750. } else {
  4751. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
  4752. "Done %s.\n", __func__);
  4753. }
  4754. return rval;
  4755. }
  4756. int
  4757. qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
  4758. {
  4759. int rval;
  4760. struct qla_hw_data *ha = vha->hw;
  4761. mbx_cmd_t mc;
  4762. mbx_cmd_t *mcp = &mc;
  4763. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
  4764. "Entered %s.\n", __func__);
  4765. if (!IS_P3P_TYPE(ha))
  4766. return QLA_FUNCTION_FAILED;
  4767. memset(mcp, 0, sizeof(mbx_cmd_t));
  4768. mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
  4769. mcp->mb[1] = 0;
  4770. mcp->out_mb = MBX_1|MBX_0;
  4771. mcp->in_mb = MBX_0;
  4772. mcp->tov = 30;
  4773. mcp->flags = 0;
  4774. rval = qla2x00_mailbox_command(vha, mcp);
  4775. if (rval != QLA_SUCCESS) {
  4776. ql_dbg(ql_dbg_mbx, vha, 0x100c,
  4777. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4778. } else {
  4779. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
  4780. "Done %s.\n", __func__);
  4781. }
  4782. return rval;
  4783. }
  4784. int
  4785. qla82xx_md_get_template_size(scsi_qla_host_t *vha)
  4786. {
  4787. struct qla_hw_data *ha = vha->hw;
  4788. mbx_cmd_t mc;
  4789. mbx_cmd_t *mcp = &mc;
  4790. int rval = QLA_FUNCTION_FAILED;
  4791. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
  4792. "Entered %s.\n", __func__);
  4793. memset(mcp->mb, 0 , sizeof(mcp->mb));
  4794. mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
  4795. mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
  4796. mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
  4797. mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
  4798. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  4799. mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
  4800. MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4801. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
  4802. mcp->tov = MBX_TOV_SECONDS;
  4803. rval = qla2x00_mailbox_command(vha, mcp);
  4804. /* Always copy back return mailbox values. */
  4805. if (rval != QLA_SUCCESS) {
  4806. ql_dbg(ql_dbg_mbx, vha, 0x1120,
  4807. "mailbox command FAILED=0x%x, subcode=%x.\n",
  4808. (mcp->mb[1] << 16) | mcp->mb[0],
  4809. (mcp->mb[3] << 16) | mcp->mb[2]);
  4810. } else {
  4811. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
  4812. "Done %s.\n", __func__);
  4813. ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
  4814. if (!ha->md_template_size) {
  4815. ql_dbg(ql_dbg_mbx, vha, 0x1122,
  4816. "Null template size obtained.\n");
  4817. rval = QLA_FUNCTION_FAILED;
  4818. }
  4819. }
  4820. return rval;
  4821. }
  4822. int
  4823. qla82xx_md_get_template(scsi_qla_host_t *vha)
  4824. {
  4825. struct qla_hw_data *ha = vha->hw;
  4826. mbx_cmd_t mc;
  4827. mbx_cmd_t *mcp = &mc;
  4828. int rval = QLA_FUNCTION_FAILED;
  4829. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
  4830. "Entered %s.\n", __func__);
  4831. ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
  4832. ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
  4833. if (!ha->md_tmplt_hdr) {
  4834. ql_log(ql_log_warn, vha, 0x1124,
  4835. "Unable to allocate memory for Minidump template.\n");
  4836. return rval;
  4837. }
  4838. memset(mcp->mb, 0 , sizeof(mcp->mb));
  4839. mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
  4840. mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
  4841. mcp->mb[2] = LSW(RQST_TMPLT);
  4842. mcp->mb[3] = MSW(RQST_TMPLT);
  4843. mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
  4844. mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
  4845. mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
  4846. mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
  4847. mcp->mb[8] = LSW(ha->md_template_size);
  4848. mcp->mb[9] = MSW(ha->md_template_size);
  4849. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
  4850. mcp->tov = MBX_TOV_SECONDS;
  4851. mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
  4852. MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4853. mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  4854. rval = qla2x00_mailbox_command(vha, mcp);
  4855. if (rval != QLA_SUCCESS) {
  4856. ql_dbg(ql_dbg_mbx, vha, 0x1125,
  4857. "mailbox command FAILED=0x%x, subcode=%x.\n",
  4858. ((mcp->mb[1] << 16) | mcp->mb[0]),
  4859. ((mcp->mb[3] << 16) | mcp->mb[2]));
  4860. } else
  4861. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
  4862. "Done %s.\n", __func__);
  4863. return rval;
  4864. }
  4865. int
  4866. qla8044_md_get_template(scsi_qla_host_t *vha)
  4867. {
  4868. struct qla_hw_data *ha = vha->hw;
  4869. mbx_cmd_t mc;
  4870. mbx_cmd_t *mcp = &mc;
  4871. int rval = QLA_FUNCTION_FAILED;
  4872. int offset = 0, size = MINIDUMP_SIZE_36K;
  4873. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
  4874. "Entered %s.\n", __func__);
  4875. ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
  4876. ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
  4877. if (!ha->md_tmplt_hdr) {
  4878. ql_log(ql_log_warn, vha, 0xb11b,
  4879. "Unable to allocate memory for Minidump template.\n");
  4880. return rval;
  4881. }
  4882. memset(mcp->mb, 0 , sizeof(mcp->mb));
  4883. while (offset < ha->md_template_size) {
  4884. mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
  4885. mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
  4886. mcp->mb[2] = LSW(RQST_TMPLT);
  4887. mcp->mb[3] = MSW(RQST_TMPLT);
  4888. mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
  4889. mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
  4890. mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
  4891. mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
  4892. mcp->mb[8] = LSW(size);
  4893. mcp->mb[9] = MSW(size);
  4894. mcp->mb[10] = offset & 0x0000FFFF;
  4895. mcp->mb[11] = offset & 0xFFFF0000;
  4896. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
  4897. mcp->tov = MBX_TOV_SECONDS;
  4898. mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
  4899. MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4900. mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  4901. rval = qla2x00_mailbox_command(vha, mcp);
  4902. if (rval != QLA_SUCCESS) {
  4903. ql_dbg(ql_dbg_mbx, vha, 0xb11c,
  4904. "mailbox command FAILED=0x%x, subcode=%x.\n",
  4905. ((mcp->mb[1] << 16) | mcp->mb[0]),
  4906. ((mcp->mb[3] << 16) | mcp->mb[2]));
  4907. return rval;
  4908. } else
  4909. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
  4910. "Done %s.\n", __func__);
  4911. offset = offset + size;
  4912. }
  4913. return rval;
  4914. }
  4915. int
  4916. qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
  4917. {
  4918. int rval;
  4919. struct qla_hw_data *ha = vha->hw;
  4920. mbx_cmd_t mc;
  4921. mbx_cmd_t *mcp = &mc;
  4922. if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
  4923. return QLA_FUNCTION_FAILED;
  4924. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
  4925. "Entered %s.\n", __func__);
  4926. memset(mcp, 0, sizeof(mbx_cmd_t));
  4927. mcp->mb[0] = MBC_SET_LED_CONFIG;
  4928. mcp->mb[1] = led_cfg[0];
  4929. mcp->mb[2] = led_cfg[1];
  4930. if (IS_QLA8031(ha)) {
  4931. mcp->mb[3] = led_cfg[2];
  4932. mcp->mb[4] = led_cfg[3];
  4933. mcp->mb[5] = led_cfg[4];
  4934. mcp->mb[6] = led_cfg[5];
  4935. }
  4936. mcp->out_mb = MBX_2|MBX_1|MBX_0;
  4937. if (IS_QLA8031(ha))
  4938. mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
  4939. mcp->in_mb = MBX_0;
  4940. mcp->tov = 30;
  4941. mcp->flags = 0;
  4942. rval = qla2x00_mailbox_command(vha, mcp);
  4943. if (rval != QLA_SUCCESS) {
  4944. ql_dbg(ql_dbg_mbx, vha, 0x1134,
  4945. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4946. } else {
  4947. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
  4948. "Done %s.\n", __func__);
  4949. }
  4950. return rval;
  4951. }
  4952. int
  4953. qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
  4954. {
  4955. int rval;
  4956. struct qla_hw_data *ha = vha->hw;
  4957. mbx_cmd_t mc;
  4958. mbx_cmd_t *mcp = &mc;
  4959. if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
  4960. return QLA_FUNCTION_FAILED;
  4961. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
  4962. "Entered %s.\n", __func__);
  4963. memset(mcp, 0, sizeof(mbx_cmd_t));
  4964. mcp->mb[0] = MBC_GET_LED_CONFIG;
  4965. mcp->out_mb = MBX_0;
  4966. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  4967. if (IS_QLA8031(ha))
  4968. mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
  4969. mcp->tov = 30;
  4970. mcp->flags = 0;
  4971. rval = qla2x00_mailbox_command(vha, mcp);
  4972. if (rval != QLA_SUCCESS) {
  4973. ql_dbg(ql_dbg_mbx, vha, 0x1137,
  4974. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4975. } else {
  4976. led_cfg[0] = mcp->mb[1];
  4977. led_cfg[1] = mcp->mb[2];
  4978. if (IS_QLA8031(ha)) {
  4979. led_cfg[2] = mcp->mb[3];
  4980. led_cfg[3] = mcp->mb[4];
  4981. led_cfg[4] = mcp->mb[5];
  4982. led_cfg[5] = mcp->mb[6];
  4983. }
  4984. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
  4985. "Done %s.\n", __func__);
  4986. }
  4987. return rval;
  4988. }
  4989. int
  4990. qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
  4991. {
  4992. int rval;
  4993. struct qla_hw_data *ha = vha->hw;
  4994. mbx_cmd_t mc;
  4995. mbx_cmd_t *mcp = &mc;
  4996. if (!IS_P3P_TYPE(ha))
  4997. return QLA_FUNCTION_FAILED;
  4998. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
  4999. "Entered %s.\n", __func__);
  5000. memset(mcp, 0, sizeof(mbx_cmd_t));
  5001. mcp->mb[0] = MBC_SET_LED_CONFIG;
  5002. if (enable)
  5003. mcp->mb[7] = 0xE;
  5004. else
  5005. mcp->mb[7] = 0xD;
  5006. mcp->out_mb = MBX_7|MBX_0;
  5007. mcp->in_mb = MBX_0;
  5008. mcp->tov = MBX_TOV_SECONDS;
  5009. mcp->flags = 0;
  5010. rval = qla2x00_mailbox_command(vha, mcp);
  5011. if (rval != QLA_SUCCESS) {
  5012. ql_dbg(ql_dbg_mbx, vha, 0x1128,
  5013. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  5014. } else {
  5015. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
  5016. "Done %s.\n", __func__);
  5017. }
  5018. return rval;
  5019. }
  5020. int
  5021. qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
  5022. {
  5023. int rval;
  5024. struct qla_hw_data *ha = vha->hw;
  5025. mbx_cmd_t mc;
  5026. mbx_cmd_t *mcp = &mc;
  5027. if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
  5028. return QLA_FUNCTION_FAILED;
  5029. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
  5030. "Entered %s.\n", __func__);
  5031. mcp->mb[0] = MBC_WRITE_REMOTE_REG;
  5032. mcp->mb[1] = LSW(reg);
  5033. mcp->mb[2] = MSW(reg);
  5034. mcp->mb[3] = LSW(data);
  5035. mcp->mb[4] = MSW(data);
  5036. mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  5037. mcp->in_mb = MBX_1|MBX_0;
  5038. mcp->tov = MBX_TOV_SECONDS;
  5039. mcp->flags = 0;
  5040. rval = qla2x00_mailbox_command(vha, mcp);
  5041. if (rval != QLA_SUCCESS) {
  5042. ql_dbg(ql_dbg_mbx, vha, 0x1131,
  5043. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  5044. } else {
  5045. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
  5046. "Done %s.\n", __func__);
  5047. }
  5048. return rval;
  5049. }
  5050. int
  5051. qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
  5052. {
  5053. int rval;
  5054. struct qla_hw_data *ha = vha->hw;
  5055. mbx_cmd_t mc;
  5056. mbx_cmd_t *mcp = &mc;
  5057. if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
  5058. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
  5059. "Implicit LOGO Unsupported.\n");
  5060. return QLA_FUNCTION_FAILED;
  5061. }
  5062. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
  5063. "Entering %s.\n", __func__);
  5064. /* Perform Implicit LOGO. */
  5065. mcp->mb[0] = MBC_PORT_LOGOUT;
  5066. mcp->mb[1] = fcport->loop_id;
  5067. mcp->mb[10] = BIT_15;
  5068. mcp->out_mb = MBX_10|MBX_1|MBX_0;
  5069. mcp->in_mb = MBX_0;
  5070. mcp->tov = MBX_TOV_SECONDS;
  5071. mcp->flags = 0;
  5072. rval = qla2x00_mailbox_command(vha, mcp);
  5073. if (rval != QLA_SUCCESS)
  5074. ql_dbg(ql_dbg_mbx, vha, 0x113d,
  5075. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  5076. else
  5077. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
  5078. "Done %s.\n", __func__);
  5079. return rval;
  5080. }
  5081. int
  5082. qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
  5083. {
  5084. int rval;
  5085. mbx_cmd_t mc;
  5086. mbx_cmd_t *mcp = &mc;
  5087. struct qla_hw_data *ha = vha->hw;
  5088. unsigned long retry_max_time = jiffies + (2 * HZ);
  5089. if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
  5090. return QLA_FUNCTION_FAILED;
  5091. ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
  5092. retry_rd_reg:
  5093. mcp->mb[0] = MBC_READ_REMOTE_REG;
  5094. mcp->mb[1] = LSW(reg);
  5095. mcp->mb[2] = MSW(reg);
  5096. mcp->out_mb = MBX_2|MBX_1|MBX_0;
  5097. mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
  5098. mcp->tov = MBX_TOV_SECONDS;
  5099. mcp->flags = 0;
  5100. rval = qla2x00_mailbox_command(vha, mcp);
  5101. if (rval != QLA_SUCCESS) {
  5102. ql_dbg(ql_dbg_mbx, vha, 0x114c,
  5103. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  5104. rval, mcp->mb[0], mcp->mb[1]);
  5105. } else {
  5106. *data = (mcp->mb[3] | (mcp->mb[4] << 16));
  5107. if (*data == QLA8XXX_BAD_VALUE) {
  5108. /*
  5109. * During soft-reset CAMRAM register reads might
  5110. * return 0xbad0bad0. So retry for MAX of 2 sec
  5111. * while reading camram registers.
  5112. */
  5113. if (time_after(jiffies, retry_max_time)) {
  5114. ql_dbg(ql_dbg_mbx, vha, 0x1141,
  5115. "Failure to read CAMRAM register. "
  5116. "data=0x%x.\n", *data);
  5117. return QLA_FUNCTION_FAILED;
  5118. }
  5119. msleep(100);
  5120. goto retry_rd_reg;
  5121. }
  5122. ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
  5123. }
  5124. return rval;
  5125. }
  5126. int
  5127. qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
  5128. {
  5129. int rval;
  5130. mbx_cmd_t mc;
  5131. mbx_cmd_t *mcp = &mc;
  5132. struct qla_hw_data *ha = vha->hw;
  5133. if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
  5134. return QLA_FUNCTION_FAILED;
  5135. ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
  5136. mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
  5137. mcp->out_mb = MBX_0;
  5138. mcp->in_mb = MBX_1|MBX_0;
  5139. mcp->tov = MBX_TOV_SECONDS;
  5140. mcp->flags = 0;
  5141. rval = qla2x00_mailbox_command(vha, mcp);
  5142. if (rval != QLA_SUCCESS) {
  5143. ql_dbg(ql_dbg_mbx, vha, 0x1144,
  5144. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  5145. rval, mcp->mb[0], mcp->mb[1]);
  5146. ha->isp_ops->fw_dump(vha, 0);
  5147. } else {
  5148. ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
  5149. }
  5150. return rval;
  5151. }
  5152. int
  5153. qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
  5154. uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
  5155. {
  5156. int rval;
  5157. mbx_cmd_t mc;
  5158. mbx_cmd_t *mcp = &mc;
  5159. uint8_t subcode = (uint8_t)options;
  5160. struct qla_hw_data *ha = vha->hw;
  5161. if (!IS_QLA8031(ha))
  5162. return QLA_FUNCTION_FAILED;
  5163. ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
  5164. mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
  5165. mcp->mb[1] = options;
  5166. mcp->out_mb = MBX_1|MBX_0;
  5167. if (subcode & BIT_2) {
  5168. mcp->mb[2] = LSW(start_addr);
  5169. mcp->mb[3] = MSW(start_addr);
  5170. mcp->mb[4] = LSW(end_addr);
  5171. mcp->mb[5] = MSW(end_addr);
  5172. mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
  5173. }
  5174. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  5175. if (!(subcode & (BIT_2 | BIT_5)))
  5176. mcp->in_mb |= MBX_4|MBX_3;
  5177. mcp->tov = MBX_TOV_SECONDS;
  5178. mcp->flags = 0;
  5179. rval = qla2x00_mailbox_command(vha, mcp);
  5180. if (rval != QLA_SUCCESS) {
  5181. ql_dbg(ql_dbg_mbx, vha, 0x1147,
  5182. "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
  5183. rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
  5184. mcp->mb[4]);
  5185. ha->isp_ops->fw_dump(vha, 0);
  5186. } else {
  5187. if (subcode & BIT_5)
  5188. *sector_size = mcp->mb[1];
  5189. else if (subcode & (BIT_6 | BIT_7)) {
  5190. ql_dbg(ql_dbg_mbx, vha, 0x1148,
  5191. "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
  5192. } else if (subcode & (BIT_3 | BIT_4)) {
  5193. ql_dbg(ql_dbg_mbx, vha, 0x1149,
  5194. "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
  5195. }
  5196. ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
  5197. }
  5198. return rval;
  5199. }
  5200. int
  5201. qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
  5202. uint32_t size)
  5203. {
  5204. int rval;
  5205. mbx_cmd_t mc;
  5206. mbx_cmd_t *mcp = &mc;
  5207. if (!IS_MCTP_CAPABLE(vha->hw))
  5208. return QLA_FUNCTION_FAILED;
  5209. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
  5210. "Entered %s.\n", __func__);
  5211. mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
  5212. mcp->mb[1] = LSW(addr);
  5213. mcp->mb[2] = MSW(req_dma);
  5214. mcp->mb[3] = LSW(req_dma);
  5215. mcp->mb[4] = MSW(size);
  5216. mcp->mb[5] = LSW(size);
  5217. mcp->mb[6] = MSW(MSD(req_dma));
  5218. mcp->mb[7] = LSW(MSD(req_dma));
  5219. mcp->mb[8] = MSW(addr);
  5220. /* Setting RAM ID to valid */
  5221. /* For MCTP RAM ID is 0x40 */
  5222. mcp->mb[10] = BIT_7 | 0x40;
  5223. mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
  5224. MBX_0;
  5225. mcp->in_mb = MBX_0;
  5226. mcp->tov = MBX_TOV_SECONDS;
  5227. mcp->flags = 0;
  5228. rval = qla2x00_mailbox_command(vha, mcp);
  5229. if (rval != QLA_SUCCESS) {
  5230. ql_dbg(ql_dbg_mbx, vha, 0x114e,
  5231. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  5232. } else {
  5233. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
  5234. "Done %s.\n", __func__);
  5235. }
  5236. return rval;
  5237. }
  5238. int
  5239. qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
  5240. void *dd_buf, uint size, uint options)
  5241. {
  5242. int rval;
  5243. mbx_cmd_t mc;
  5244. mbx_cmd_t *mcp = &mc;
  5245. dma_addr_t dd_dma;
  5246. if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
  5247. return QLA_FUNCTION_FAILED;
  5248. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
  5249. "Entered %s.\n", __func__);
  5250. dd_dma = dma_map_single(&vha->hw->pdev->dev,
  5251. dd_buf, size, DMA_FROM_DEVICE);
  5252. if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
  5253. ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
  5254. return QLA_MEMORY_ALLOC_FAILED;
  5255. }
  5256. memset(dd_buf, 0, size);
  5257. mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
  5258. mcp->mb[1] = options;
  5259. mcp->mb[2] = MSW(LSD(dd_dma));
  5260. mcp->mb[3] = LSW(LSD(dd_dma));
  5261. mcp->mb[6] = MSW(MSD(dd_dma));
  5262. mcp->mb[7] = LSW(MSD(dd_dma));
  5263. mcp->mb[8] = size;
  5264. mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  5265. mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  5266. mcp->buf_size = size;
  5267. mcp->flags = MBX_DMA_IN;
  5268. mcp->tov = MBX_TOV_SECONDS * 4;
  5269. rval = qla2x00_mailbox_command(vha, mcp);
  5270. if (rval != QLA_SUCCESS) {
  5271. ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
  5272. } else {
  5273. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
  5274. "Done %s.\n", __func__);
  5275. }
  5276. dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
  5277. size, DMA_FROM_DEVICE);
  5278. return rval;
  5279. }
  5280. static void qla2x00_async_mb_sp_done(void *s, int res)
  5281. {
  5282. struct srb *sp = s;
  5283. sp->u.iocb_cmd.u.mbx.rc = res;
  5284. complete(&sp->u.iocb_cmd.u.mbx.comp);
  5285. /* don't free sp here. Let the caller do the free */
  5286. }
  5287. /*
  5288. * This mailbox uses the iocb interface to send MB command.
  5289. * This allows non-critial (non chip setup) command to go
  5290. * out in parrallel.
  5291. */
  5292. int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
  5293. {
  5294. int rval = QLA_FUNCTION_FAILED;
  5295. srb_t *sp;
  5296. struct srb_iocb *c;
  5297. if (!vha->hw->flags.fw_started)
  5298. goto done;
  5299. sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
  5300. if (!sp)
  5301. goto done;
  5302. sp->type = SRB_MB_IOCB;
  5303. sp->name = mb_to_str(mcp->mb[0]);
  5304. c = &sp->u.iocb_cmd;
  5305. c->timeout = qla2x00_async_iocb_timeout;
  5306. init_completion(&c->u.mbx.comp);
  5307. qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
  5308. memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
  5309. sp->done = qla2x00_async_mb_sp_done;
  5310. rval = qla2x00_start_sp(sp);
  5311. if (rval != QLA_SUCCESS) {
  5312. ql_dbg(ql_dbg_mbx, vha, 0x1018,
  5313. "%s: %s Failed submission. %x.\n",
  5314. __func__, sp->name, rval);
  5315. goto done_free_sp;
  5316. }
  5317. ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
  5318. sp->name, sp->handle);
  5319. wait_for_completion(&c->u.mbx.comp);
  5320. memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
  5321. rval = c->u.mbx.rc;
  5322. switch (rval) {
  5323. case QLA_FUNCTION_TIMEOUT:
  5324. ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
  5325. __func__, sp->name, rval);
  5326. break;
  5327. case QLA_SUCCESS:
  5328. ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
  5329. __func__, sp->name);
  5330. break;
  5331. default:
  5332. ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
  5333. __func__, sp->name, rval);
  5334. break;
  5335. }
  5336. done_free_sp:
  5337. sp->free(sp);
  5338. done:
  5339. return rval;
  5340. }
  5341. /*
  5342. * qla24xx_gpdb_wait
  5343. * NOTE: Do not call this routine from DPC thread
  5344. */
  5345. int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
  5346. {
  5347. int rval = QLA_FUNCTION_FAILED;
  5348. dma_addr_t pd_dma;
  5349. struct port_database_24xx *pd;
  5350. struct qla_hw_data *ha = vha->hw;
  5351. mbx_cmd_t mc;
  5352. if (!vha->hw->flags.fw_started)
  5353. goto done;
  5354. pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
  5355. if (pd == NULL) {
  5356. ql_log(ql_log_warn, vha, 0xd047,
  5357. "Failed to allocate port database structure.\n");
  5358. goto done_free_sp;
  5359. }
  5360. memset(&mc, 0, sizeof(mc));
  5361. mc.mb[0] = MBC_GET_PORT_DATABASE;
  5362. mc.mb[1] = cpu_to_le16(fcport->loop_id);
  5363. mc.mb[2] = MSW(pd_dma);
  5364. mc.mb[3] = LSW(pd_dma);
  5365. mc.mb[6] = MSW(MSD(pd_dma));
  5366. mc.mb[7] = LSW(MSD(pd_dma));
  5367. mc.mb[9] = cpu_to_le16(vha->vp_idx);
  5368. mc.mb[10] = cpu_to_le16((uint16_t)opt);
  5369. rval = qla24xx_send_mb_cmd(vha, &mc);
  5370. if (rval != QLA_SUCCESS) {
  5371. ql_dbg(ql_dbg_mbx, vha, 0x1193,
  5372. "%s: %8phC fail\n", __func__, fcport->port_name);
  5373. goto done_free_sp;
  5374. }
  5375. rval = __qla24xx_parse_gpdb(vha, fcport, pd);
  5376. ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
  5377. __func__, fcport->port_name);
  5378. done_free_sp:
  5379. if (pd)
  5380. dma_pool_free(ha->s_dma_pool, pd, pd_dma);
  5381. done:
  5382. return rval;
  5383. }
  5384. int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
  5385. struct port_database_24xx *pd)
  5386. {
  5387. int rval = QLA_SUCCESS;
  5388. uint64_t zero = 0;
  5389. u8 current_login_state, last_login_state;
  5390. if (fcport->fc4f_nvme) {
  5391. current_login_state = pd->current_login_state >> 4;
  5392. last_login_state = pd->last_login_state >> 4;
  5393. } else {
  5394. current_login_state = pd->current_login_state & 0xf;
  5395. last_login_state = pd->last_login_state & 0xf;
  5396. }
  5397. /* Check for logged in state. */
  5398. if (current_login_state != PDS_PRLI_COMPLETE) {
  5399. ql_dbg(ql_dbg_mbx, vha, 0x119a,
  5400. "Unable to verify login-state (%x/%x) for loop_id %x.\n",
  5401. current_login_state, last_login_state, fcport->loop_id);
  5402. rval = QLA_FUNCTION_FAILED;
  5403. goto gpd_error_out;
  5404. }
  5405. if (fcport->loop_id == FC_NO_LOOP_ID ||
  5406. (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
  5407. memcmp(fcport->port_name, pd->port_name, 8))) {
  5408. /* We lost the device mid way. */
  5409. rval = QLA_NOT_LOGGED_IN;
  5410. goto gpd_error_out;
  5411. }
  5412. /* Names are little-endian. */
  5413. memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
  5414. memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
  5415. /* Get port_id of device. */
  5416. fcport->d_id.b.domain = pd->port_id[0];
  5417. fcport->d_id.b.area = pd->port_id[1];
  5418. fcport->d_id.b.al_pa = pd->port_id[2];
  5419. fcport->d_id.b.rsvd_1 = 0;
  5420. if (fcport->fc4f_nvme) {
  5421. fcport->nvme_prli_service_param =
  5422. pd->prli_nvme_svc_param_word_3;
  5423. fcport->port_type = FCT_NVME;
  5424. } else {
  5425. /* If not target must be initiator or unknown type. */
  5426. if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
  5427. fcport->port_type = FCT_INITIATOR;
  5428. else
  5429. fcport->port_type = FCT_TARGET;
  5430. }
  5431. /* Passback COS information. */
  5432. fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
  5433. FC_COS_CLASS2 : FC_COS_CLASS3;
  5434. if (pd->prli_svc_param_word_3[0] & BIT_7) {
  5435. fcport->flags |= FCF_CONF_COMP_SUPPORTED;
  5436. fcport->conf_compl_supported = 1;
  5437. }
  5438. gpd_error_out:
  5439. return rval;
  5440. }
  5441. /*
  5442. * qla24xx_gidlist__wait
  5443. * NOTE: don't call this routine from DPC thread.
  5444. */
  5445. int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
  5446. void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
  5447. {
  5448. int rval = QLA_FUNCTION_FAILED;
  5449. mbx_cmd_t mc;
  5450. if (!vha->hw->flags.fw_started)
  5451. goto done;
  5452. memset(&mc, 0, sizeof(mc));
  5453. mc.mb[0] = MBC_GET_ID_LIST;
  5454. mc.mb[2] = MSW(id_list_dma);
  5455. mc.mb[3] = LSW(id_list_dma);
  5456. mc.mb[6] = MSW(MSD(id_list_dma));
  5457. mc.mb[7] = LSW(MSD(id_list_dma));
  5458. mc.mb[8] = 0;
  5459. mc.mb[9] = cpu_to_le16(vha->vp_idx);
  5460. rval = qla24xx_send_mb_cmd(vha, &mc);
  5461. if (rval != QLA_SUCCESS) {
  5462. ql_dbg(ql_dbg_mbx, vha, 0x119b,
  5463. "%s: fail\n", __func__);
  5464. } else {
  5465. *entries = mc.mb[1];
  5466. ql_dbg(ql_dbg_mbx, vha, 0x119c,
  5467. "%s: done\n", __func__);
  5468. }
  5469. done:
  5470. return rval;
  5471. }
  5472. int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
  5473. {
  5474. int rval;
  5475. mbx_cmd_t mc;
  5476. mbx_cmd_t *mcp = &mc;
  5477. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
  5478. "Entered %s\n", __func__);
  5479. memset(mcp->mb, 0 , sizeof(mcp->mb));
  5480. mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
  5481. mcp->mb[1] = cpu_to_le16(1);
  5482. mcp->mb[2] = cpu_to_le16(value);
  5483. mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
  5484. mcp->in_mb = MBX_2 | MBX_0;
  5485. mcp->tov = MBX_TOV_SECONDS;
  5486. mcp->flags = 0;
  5487. rval = qla2x00_mailbox_command(vha, mcp);
  5488. ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
  5489. (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
  5490. return rval;
  5491. }
  5492. int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
  5493. {
  5494. int rval;
  5495. mbx_cmd_t mc;
  5496. mbx_cmd_t *mcp = &mc;
  5497. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
  5498. "Entered %s\n", __func__);
  5499. memset(mcp->mb, 0, sizeof(mcp->mb));
  5500. mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
  5501. mcp->mb[1] = cpu_to_le16(0);
  5502. mcp->out_mb = MBX_1 | MBX_0;
  5503. mcp->in_mb = MBX_2 | MBX_0;
  5504. mcp->tov = MBX_TOV_SECONDS;
  5505. mcp->flags = 0;
  5506. rval = qla2x00_mailbox_command(vha, mcp);
  5507. if (rval == QLA_SUCCESS)
  5508. *value = mc.mb[2];
  5509. ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
  5510. (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
  5511. return rval;
  5512. }
  5513. int
  5514. qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
  5515. {
  5516. struct qla_hw_data *ha = vha->hw;
  5517. uint16_t iter, addr, offset;
  5518. dma_addr_t phys_addr;
  5519. int rval, c;
  5520. u8 *sfp_data;
  5521. memset(ha->sfp_data, 0, SFP_DEV_SIZE);
  5522. addr = 0xa0;
  5523. phys_addr = ha->sfp_data_dma;
  5524. sfp_data = ha->sfp_data;
  5525. offset = c = 0;
  5526. for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
  5527. if (iter == 4) {
  5528. /* Skip to next device address. */
  5529. addr = 0xa2;
  5530. offset = 0;
  5531. }
  5532. rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
  5533. addr, offset, SFP_BLOCK_SIZE, BIT_1);
  5534. if (rval != QLA_SUCCESS) {
  5535. ql_log(ql_log_warn, vha, 0x706d,
  5536. "Unable to read SFP data (%x/%x/%x).\n", rval,
  5537. addr, offset);
  5538. return rval;
  5539. }
  5540. if (buf && (c < count)) {
  5541. u16 sz;
  5542. if ((count - c) >= SFP_BLOCK_SIZE)
  5543. sz = SFP_BLOCK_SIZE;
  5544. else
  5545. sz = count - c;
  5546. memcpy(buf, sfp_data, sz);
  5547. buf += SFP_BLOCK_SIZE;
  5548. c += sz;
  5549. }
  5550. phys_addr += SFP_BLOCK_SIZE;
  5551. sfp_data += SFP_BLOCK_SIZE;
  5552. offset += SFP_BLOCK_SIZE;
  5553. }
  5554. return rval;
  5555. }
  5556. int qla24xx_res_count_wait(struct scsi_qla_host *vha,
  5557. uint16_t *out_mb, int out_mb_sz)
  5558. {
  5559. int rval = QLA_FUNCTION_FAILED;
  5560. mbx_cmd_t mc;
  5561. if (!vha->hw->flags.fw_started)
  5562. goto done;
  5563. memset(&mc, 0, sizeof(mc));
  5564. mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
  5565. rval = qla24xx_send_mb_cmd(vha, &mc);
  5566. if (rval != QLA_SUCCESS) {
  5567. ql_dbg(ql_dbg_mbx, vha, 0xffff,
  5568. "%s: fail\n", __func__);
  5569. } else {
  5570. if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
  5571. memcpy(out_mb, mc.mb, out_mb_sz);
  5572. else
  5573. memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
  5574. ql_dbg(ql_dbg_mbx, vha, 0xffff,
  5575. "%s: done\n", __func__);
  5576. }
  5577. done:
  5578. return rval;
  5579. }