smartpqi_init.c 171 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302
  1. /*
  2. * driver for Microsemi PQI-based storage controllers
  3. * Copyright (c) 2016 Microsemi Corporation
  4. * Copyright (c) 2016 PMC-Sierra, Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 of the License.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more details.
  14. *
  15. * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
  16. *
  17. */
  18. #include <linux/module.h>
  19. #include <linux/kernel.h>
  20. #include <linux/pci.h>
  21. #include <linux/delay.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/sched.h>
  24. #include <linux/rtc.h>
  25. #include <linux/bcd.h>
  26. #include <linux/cciss_ioctl.h>
  27. #include <scsi/scsi_host.h>
  28. #include <scsi/scsi_cmnd.h>
  29. #include <scsi/scsi_device.h>
  30. #include <scsi/scsi_eh.h>
  31. #include <scsi/scsi_transport_sas.h>
  32. #include <asm/unaligned.h>
  33. #include "smartpqi.h"
  34. #include "smartpqi_sis.h"
  35. #if !defined(BUILD_TIMESTAMP)
  36. #define BUILD_TIMESTAMP
  37. #endif
  38. #define DRIVER_VERSION "0.9.13-370"
  39. #define DRIVER_MAJOR 0
  40. #define DRIVER_MINOR 9
  41. #define DRIVER_RELEASE 13
  42. #define DRIVER_REVISION 370
  43. #define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
  44. #define DRIVER_NAME_SHORT "smartpqi"
  45. MODULE_AUTHOR("Microsemi");
  46. MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
  47. DRIVER_VERSION);
  48. MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
  49. MODULE_VERSION(DRIVER_VERSION);
  50. MODULE_LICENSE("GPL");
  51. #define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
  52. static char *hpe_branded_controller = "HPE Smart Array Controller";
  53. static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
  54. static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
  55. static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
  56. static void pqi_scan_start(struct Scsi_Host *shost);
  57. static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
  58. struct pqi_queue_group *queue_group, enum pqi_io_path path,
  59. struct pqi_io_request *io_request);
  60. static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
  61. struct pqi_iu_header *request, unsigned int flags,
  62. struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
  63. static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
  64. struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
  65. unsigned int cdb_length, struct pqi_queue_group *queue_group,
  66. struct pqi_encryption_info *encryption_info);
  67. /* for flags argument to pqi_submit_raid_request_synchronous() */
  68. #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
  69. static struct scsi_transport_template *pqi_sas_transport_template;
  70. static atomic_t pqi_controller_count = ATOMIC_INIT(0);
  71. static int pqi_disable_device_id_wildcards;
  72. module_param_named(disable_device_id_wildcards,
  73. pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
  74. MODULE_PARM_DESC(disable_device_id_wildcards,
  75. "Disable device ID wildcards.");
  76. static char *raid_levels[] = {
  77. "RAID-0",
  78. "RAID-4",
  79. "RAID-1(1+0)",
  80. "RAID-5",
  81. "RAID-5+1",
  82. "RAID-ADG",
  83. "RAID-1(ADM)",
  84. };
  85. static char *pqi_raid_level_to_string(u8 raid_level)
  86. {
  87. if (raid_level < ARRAY_SIZE(raid_levels))
  88. return raid_levels[raid_level];
  89. return "";
  90. }
  91. #define SA_RAID_0 0
  92. #define SA_RAID_4 1
  93. #define SA_RAID_1 2 /* also used for RAID 10 */
  94. #define SA_RAID_5 3 /* also used for RAID 50 */
  95. #define SA_RAID_51 4
  96. #define SA_RAID_6 5 /* also used for RAID 60 */
  97. #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
  98. #define SA_RAID_MAX SA_RAID_ADM
  99. #define SA_RAID_UNKNOWN 0xff
  100. static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
  101. {
  102. scmd->scsi_done(scmd);
  103. }
  104. static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
  105. {
  106. return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
  107. }
  108. static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
  109. {
  110. void *hostdata = shost_priv(shost);
  111. return *((struct pqi_ctrl_info **)hostdata);
  112. }
  113. static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
  114. {
  115. return !device->is_physical_device;
  116. }
  117. static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
  118. {
  119. return !ctrl_info->controller_online;
  120. }
  121. static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
  122. {
  123. if (ctrl_info->controller_online)
  124. if (!sis_is_firmware_running(ctrl_info))
  125. pqi_take_ctrl_offline(ctrl_info);
  126. }
  127. static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
  128. {
  129. return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
  130. }
  131. static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
  132. struct pqi_ctrl_info *ctrl_info)
  133. {
  134. return sis_read_driver_scratch(ctrl_info);
  135. }
  136. static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
  137. enum pqi_ctrl_mode mode)
  138. {
  139. sis_write_driver_scratch(ctrl_info, mode);
  140. }
  141. #define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
  142. static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
  143. {
  144. schedule_delayed_work(&ctrl_info->rescan_work,
  145. PQI_RESCAN_WORK_INTERVAL);
  146. }
  147. static int pqi_map_single(struct pci_dev *pci_dev,
  148. struct pqi_sg_descriptor *sg_descriptor, void *buffer,
  149. size_t buffer_length, int data_direction)
  150. {
  151. dma_addr_t bus_address;
  152. if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
  153. return 0;
  154. bus_address = pci_map_single(pci_dev, buffer, buffer_length,
  155. data_direction);
  156. if (pci_dma_mapping_error(pci_dev, bus_address))
  157. return -ENOMEM;
  158. put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
  159. put_unaligned_le32(buffer_length, &sg_descriptor->length);
  160. put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
  161. return 0;
  162. }
  163. static void pqi_pci_unmap(struct pci_dev *pci_dev,
  164. struct pqi_sg_descriptor *descriptors, int num_descriptors,
  165. int data_direction)
  166. {
  167. int i;
  168. if (data_direction == PCI_DMA_NONE)
  169. return;
  170. for (i = 0; i < num_descriptors; i++)
  171. pci_unmap_single(pci_dev,
  172. (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
  173. get_unaligned_le32(&descriptors[i].length),
  174. data_direction);
  175. }
  176. static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
  177. struct pqi_raid_path_request *request, u8 cmd,
  178. u8 *scsi3addr, void *buffer, size_t buffer_length,
  179. u16 vpd_page, int *pci_direction)
  180. {
  181. u8 *cdb;
  182. int pci_dir;
  183. memset(request, 0, sizeof(*request));
  184. request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
  185. put_unaligned_le16(offsetof(struct pqi_raid_path_request,
  186. sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
  187. &request->header.iu_length);
  188. put_unaligned_le32(buffer_length, &request->buffer_length);
  189. memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
  190. request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
  191. request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
  192. cdb = request->cdb;
  193. switch (cmd) {
  194. case INQUIRY:
  195. request->data_direction = SOP_READ_FLAG;
  196. cdb[0] = INQUIRY;
  197. if (vpd_page & VPD_PAGE) {
  198. cdb[1] = 0x1;
  199. cdb[2] = (u8)vpd_page;
  200. }
  201. cdb[4] = (u8)buffer_length;
  202. break;
  203. case CISS_REPORT_LOG:
  204. case CISS_REPORT_PHYS:
  205. request->data_direction = SOP_READ_FLAG;
  206. cdb[0] = cmd;
  207. if (cmd == CISS_REPORT_PHYS)
  208. cdb[1] = CISS_REPORT_PHYS_EXTENDED;
  209. else
  210. cdb[1] = CISS_REPORT_LOG_EXTENDED;
  211. put_unaligned_be32(buffer_length, &cdb[6]);
  212. break;
  213. case CISS_GET_RAID_MAP:
  214. request->data_direction = SOP_READ_FLAG;
  215. cdb[0] = CISS_READ;
  216. cdb[1] = CISS_GET_RAID_MAP;
  217. put_unaligned_be32(buffer_length, &cdb[6]);
  218. break;
  219. case SA_CACHE_FLUSH:
  220. request->data_direction = SOP_WRITE_FLAG;
  221. cdb[0] = BMIC_WRITE;
  222. cdb[6] = BMIC_CACHE_FLUSH;
  223. put_unaligned_be16(buffer_length, &cdb[7]);
  224. break;
  225. case BMIC_IDENTIFY_CONTROLLER:
  226. case BMIC_IDENTIFY_PHYSICAL_DEVICE:
  227. request->data_direction = SOP_READ_FLAG;
  228. cdb[0] = BMIC_READ;
  229. cdb[6] = cmd;
  230. put_unaligned_be16(buffer_length, &cdb[7]);
  231. break;
  232. case BMIC_WRITE_HOST_WELLNESS:
  233. request->data_direction = SOP_WRITE_FLAG;
  234. cdb[0] = BMIC_WRITE;
  235. cdb[6] = cmd;
  236. put_unaligned_be16(buffer_length, &cdb[7]);
  237. break;
  238. default:
  239. dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
  240. cmd);
  241. WARN_ON(cmd);
  242. break;
  243. }
  244. switch (request->data_direction) {
  245. case SOP_READ_FLAG:
  246. pci_dir = PCI_DMA_FROMDEVICE;
  247. break;
  248. case SOP_WRITE_FLAG:
  249. pci_dir = PCI_DMA_TODEVICE;
  250. break;
  251. case SOP_NO_DIRECTION_FLAG:
  252. pci_dir = PCI_DMA_NONE;
  253. break;
  254. default:
  255. pci_dir = PCI_DMA_BIDIRECTIONAL;
  256. break;
  257. }
  258. *pci_direction = pci_dir;
  259. return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
  260. buffer, buffer_length, pci_dir);
  261. }
  262. static struct pqi_io_request *pqi_alloc_io_request(
  263. struct pqi_ctrl_info *ctrl_info)
  264. {
  265. struct pqi_io_request *io_request;
  266. u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
  267. while (1) {
  268. io_request = &ctrl_info->io_request_pool[i];
  269. if (atomic_inc_return(&io_request->refcount) == 1)
  270. break;
  271. atomic_dec(&io_request->refcount);
  272. i = (i + 1) % ctrl_info->max_io_slots;
  273. }
  274. /* benignly racy */
  275. ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
  276. io_request->scmd = NULL;
  277. io_request->status = 0;
  278. io_request->error_info = NULL;
  279. return io_request;
  280. }
  281. static void pqi_free_io_request(struct pqi_io_request *io_request)
  282. {
  283. atomic_dec(&io_request->refcount);
  284. }
  285. static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
  286. struct bmic_identify_controller *buffer)
  287. {
  288. int rc;
  289. int pci_direction;
  290. struct pqi_raid_path_request request;
  291. rc = pqi_build_raid_path_request(ctrl_info, &request,
  292. BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
  293. sizeof(*buffer), 0, &pci_direction);
  294. if (rc)
  295. return rc;
  296. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
  297. NULL, NO_TIMEOUT);
  298. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  299. pci_direction);
  300. return rc;
  301. }
  302. static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
  303. u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
  304. {
  305. int rc;
  306. int pci_direction;
  307. struct pqi_raid_path_request request;
  308. rc = pqi_build_raid_path_request(ctrl_info, &request,
  309. INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
  310. &pci_direction);
  311. if (rc)
  312. return rc;
  313. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
  314. NULL, NO_TIMEOUT);
  315. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  316. pci_direction);
  317. return rc;
  318. }
  319. static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
  320. struct pqi_scsi_dev *device,
  321. struct bmic_identify_physical_device *buffer,
  322. size_t buffer_length)
  323. {
  324. int rc;
  325. int pci_direction;
  326. u16 bmic_device_index;
  327. struct pqi_raid_path_request request;
  328. rc = pqi_build_raid_path_request(ctrl_info, &request,
  329. BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
  330. buffer_length, 0, &pci_direction);
  331. if (rc)
  332. return rc;
  333. bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
  334. request.cdb[2] = (u8)bmic_device_index;
  335. request.cdb[9] = (u8)(bmic_device_index >> 8);
  336. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
  337. 0, NULL, NO_TIMEOUT);
  338. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  339. pci_direction);
  340. return rc;
  341. }
  342. #define SA_CACHE_FLUSH_BUFFER_LENGTH 4
  343. static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
  344. {
  345. int rc;
  346. struct pqi_raid_path_request request;
  347. int pci_direction;
  348. u8 *buffer;
  349. /*
  350. * Don't bother trying to flush the cache if the controller is
  351. * locked up.
  352. */
  353. if (pqi_ctrl_offline(ctrl_info))
  354. return -ENXIO;
  355. buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
  356. if (!buffer)
  357. return -ENOMEM;
  358. rc = pqi_build_raid_path_request(ctrl_info, &request,
  359. SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
  360. SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
  361. if (rc)
  362. goto out;
  363. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
  364. 0, NULL, NO_TIMEOUT);
  365. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  366. pci_direction);
  367. out:
  368. kfree(buffer);
  369. return rc;
  370. }
  371. static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
  372. void *buffer, size_t buffer_length)
  373. {
  374. int rc;
  375. struct pqi_raid_path_request request;
  376. int pci_direction;
  377. rc = pqi_build_raid_path_request(ctrl_info, &request,
  378. BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
  379. buffer_length, 0, &pci_direction);
  380. if (rc)
  381. return rc;
  382. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
  383. 0, NULL, NO_TIMEOUT);
  384. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  385. pci_direction);
  386. return rc;
  387. }
  388. #pragma pack(1)
  389. struct bmic_host_wellness_driver_version {
  390. u8 start_tag[4];
  391. u8 driver_version_tag[2];
  392. __le16 driver_version_length;
  393. char driver_version[32];
  394. u8 end_tag[2];
  395. };
  396. #pragma pack()
  397. static int pqi_write_driver_version_to_host_wellness(
  398. struct pqi_ctrl_info *ctrl_info)
  399. {
  400. int rc;
  401. struct bmic_host_wellness_driver_version *buffer;
  402. size_t buffer_length;
  403. buffer_length = sizeof(*buffer);
  404. buffer = kmalloc(buffer_length, GFP_KERNEL);
  405. if (!buffer)
  406. return -ENOMEM;
  407. buffer->start_tag[0] = '<';
  408. buffer->start_tag[1] = 'H';
  409. buffer->start_tag[2] = 'W';
  410. buffer->start_tag[3] = '>';
  411. buffer->driver_version_tag[0] = 'D';
  412. buffer->driver_version_tag[1] = 'V';
  413. put_unaligned_le16(sizeof(buffer->driver_version),
  414. &buffer->driver_version_length);
  415. strncpy(buffer->driver_version, DRIVER_VERSION,
  416. sizeof(buffer->driver_version) - 1);
  417. buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
  418. buffer->end_tag[0] = 'Z';
  419. buffer->end_tag[1] = 'Z';
  420. rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
  421. kfree(buffer);
  422. return rc;
  423. }
  424. #pragma pack(1)
  425. struct bmic_host_wellness_time {
  426. u8 start_tag[4];
  427. u8 time_tag[2];
  428. __le16 time_length;
  429. u8 time[8];
  430. u8 dont_write_tag[2];
  431. u8 end_tag[2];
  432. };
  433. #pragma pack()
  434. static int pqi_write_current_time_to_host_wellness(
  435. struct pqi_ctrl_info *ctrl_info)
  436. {
  437. int rc;
  438. struct bmic_host_wellness_time *buffer;
  439. size_t buffer_length;
  440. time64_t local_time;
  441. unsigned int year;
  442. struct tm tm;
  443. buffer_length = sizeof(*buffer);
  444. buffer = kmalloc(buffer_length, GFP_KERNEL);
  445. if (!buffer)
  446. return -ENOMEM;
  447. buffer->start_tag[0] = '<';
  448. buffer->start_tag[1] = 'H';
  449. buffer->start_tag[2] = 'W';
  450. buffer->start_tag[3] = '>';
  451. buffer->time_tag[0] = 'T';
  452. buffer->time_tag[1] = 'D';
  453. put_unaligned_le16(sizeof(buffer->time),
  454. &buffer->time_length);
  455. local_time = ktime_get_real_seconds();
  456. time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
  457. year = tm.tm_year + 1900;
  458. buffer->time[0] = bin2bcd(tm.tm_hour);
  459. buffer->time[1] = bin2bcd(tm.tm_min);
  460. buffer->time[2] = bin2bcd(tm.tm_sec);
  461. buffer->time[3] = 0;
  462. buffer->time[4] = bin2bcd(tm.tm_mon + 1);
  463. buffer->time[5] = bin2bcd(tm.tm_mday);
  464. buffer->time[6] = bin2bcd(year / 100);
  465. buffer->time[7] = bin2bcd(year % 100);
  466. buffer->dont_write_tag[0] = 'D';
  467. buffer->dont_write_tag[1] = 'W';
  468. buffer->end_tag[0] = 'Z';
  469. buffer->end_tag[1] = 'Z';
  470. rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
  471. kfree(buffer);
  472. return rc;
  473. }
  474. #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
  475. static void pqi_update_time_worker(struct work_struct *work)
  476. {
  477. int rc;
  478. struct pqi_ctrl_info *ctrl_info;
  479. ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
  480. update_time_work);
  481. rc = pqi_write_current_time_to_host_wellness(ctrl_info);
  482. if (rc)
  483. dev_warn(&ctrl_info->pci_dev->dev,
  484. "error updating time on controller\n");
  485. schedule_delayed_work(&ctrl_info->update_time_work,
  486. PQI_UPDATE_TIME_WORK_INTERVAL);
  487. }
  488. static inline void pqi_schedule_update_time_worker(
  489. struct pqi_ctrl_info *ctrl_info)
  490. {
  491. schedule_delayed_work(&ctrl_info->update_time_work, 0);
  492. }
  493. static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
  494. void *buffer, size_t buffer_length)
  495. {
  496. int rc;
  497. int pci_direction;
  498. struct pqi_raid_path_request request;
  499. rc = pqi_build_raid_path_request(ctrl_info, &request,
  500. cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
  501. if (rc)
  502. return rc;
  503. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
  504. NULL, NO_TIMEOUT);
  505. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  506. pci_direction);
  507. return rc;
  508. }
  509. static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
  510. void **buffer)
  511. {
  512. int rc;
  513. size_t lun_list_length;
  514. size_t lun_data_length;
  515. size_t new_lun_list_length;
  516. void *lun_data = NULL;
  517. struct report_lun_header *report_lun_header;
  518. report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
  519. if (!report_lun_header) {
  520. rc = -ENOMEM;
  521. goto out;
  522. }
  523. rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
  524. sizeof(*report_lun_header));
  525. if (rc)
  526. goto out;
  527. lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
  528. again:
  529. lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
  530. lun_data = kmalloc(lun_data_length, GFP_KERNEL);
  531. if (!lun_data) {
  532. rc = -ENOMEM;
  533. goto out;
  534. }
  535. if (lun_list_length == 0) {
  536. memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
  537. goto out;
  538. }
  539. rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
  540. if (rc)
  541. goto out;
  542. new_lun_list_length = get_unaligned_be32(
  543. &((struct report_lun_header *)lun_data)->list_length);
  544. if (new_lun_list_length > lun_list_length) {
  545. lun_list_length = new_lun_list_length;
  546. kfree(lun_data);
  547. goto again;
  548. }
  549. out:
  550. kfree(report_lun_header);
  551. if (rc) {
  552. kfree(lun_data);
  553. lun_data = NULL;
  554. }
  555. *buffer = lun_data;
  556. return rc;
  557. }
  558. static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
  559. void **buffer)
  560. {
  561. return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
  562. buffer);
  563. }
  564. static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
  565. void **buffer)
  566. {
  567. return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
  568. }
  569. static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
  570. struct report_phys_lun_extended **physdev_list,
  571. struct report_log_lun_extended **logdev_list)
  572. {
  573. int rc;
  574. size_t logdev_list_length;
  575. size_t logdev_data_length;
  576. struct report_log_lun_extended *internal_logdev_list;
  577. struct report_log_lun_extended *logdev_data;
  578. struct report_lun_header report_lun_header;
  579. rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
  580. if (rc)
  581. dev_err(&ctrl_info->pci_dev->dev,
  582. "report physical LUNs failed\n");
  583. rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
  584. if (rc)
  585. dev_err(&ctrl_info->pci_dev->dev,
  586. "report logical LUNs failed\n");
  587. /*
  588. * Tack the controller itself onto the end of the logical device list.
  589. */
  590. logdev_data = *logdev_list;
  591. if (logdev_data) {
  592. logdev_list_length =
  593. get_unaligned_be32(&logdev_data->header.list_length);
  594. } else {
  595. memset(&report_lun_header, 0, sizeof(report_lun_header));
  596. logdev_data =
  597. (struct report_log_lun_extended *)&report_lun_header;
  598. logdev_list_length = 0;
  599. }
  600. logdev_data_length = sizeof(struct report_lun_header) +
  601. logdev_list_length;
  602. internal_logdev_list = kmalloc(logdev_data_length +
  603. sizeof(struct report_log_lun_extended), GFP_KERNEL);
  604. if (!internal_logdev_list) {
  605. kfree(*logdev_list);
  606. *logdev_list = NULL;
  607. return -ENOMEM;
  608. }
  609. memcpy(internal_logdev_list, logdev_data, logdev_data_length);
  610. memset((u8 *)internal_logdev_list + logdev_data_length, 0,
  611. sizeof(struct report_log_lun_extended_entry));
  612. put_unaligned_be32(logdev_list_length +
  613. sizeof(struct report_log_lun_extended_entry),
  614. &internal_logdev_list->header.list_length);
  615. kfree(*logdev_list);
  616. *logdev_list = internal_logdev_list;
  617. return 0;
  618. }
  619. static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
  620. int bus, int target, int lun)
  621. {
  622. device->bus = bus;
  623. device->target = target;
  624. device->lun = lun;
  625. }
  626. static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
  627. {
  628. u8 *scsi3addr;
  629. u32 lunid;
  630. scsi3addr = device->scsi3addr;
  631. lunid = get_unaligned_le32(scsi3addr);
  632. if (pqi_is_hba_lunid(scsi3addr)) {
  633. /* The specified device is the controller. */
  634. pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
  635. device->target_lun_valid = true;
  636. return;
  637. }
  638. if (pqi_is_logical_device(device)) {
  639. pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
  640. lunid & 0x3fff);
  641. device->target_lun_valid = true;
  642. return;
  643. }
  644. /*
  645. * Defer target and LUN assignment for non-controller physical devices
  646. * because the SAS transport layer will make these assignments later.
  647. */
  648. pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
  649. }
  650. static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
  651. struct pqi_scsi_dev *device)
  652. {
  653. int rc;
  654. u8 raid_level;
  655. u8 *buffer;
  656. raid_level = SA_RAID_UNKNOWN;
  657. buffer = kmalloc(64, GFP_KERNEL);
  658. if (buffer) {
  659. rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
  660. VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
  661. if (rc == 0) {
  662. raid_level = buffer[8];
  663. if (raid_level > SA_RAID_MAX)
  664. raid_level = SA_RAID_UNKNOWN;
  665. }
  666. kfree(buffer);
  667. }
  668. device->raid_level = raid_level;
  669. }
  670. static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
  671. struct pqi_scsi_dev *device, struct raid_map *raid_map)
  672. {
  673. char *err_msg;
  674. u32 raid_map_size;
  675. u32 r5or6_blocks_per_row;
  676. unsigned int num_phys_disks;
  677. unsigned int num_raid_map_entries;
  678. raid_map_size = get_unaligned_le32(&raid_map->structure_size);
  679. if (raid_map_size < offsetof(struct raid_map, disk_data)) {
  680. err_msg = "RAID map too small";
  681. goto bad_raid_map;
  682. }
  683. if (raid_map_size > sizeof(*raid_map)) {
  684. err_msg = "RAID map too large";
  685. goto bad_raid_map;
  686. }
  687. num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
  688. (get_unaligned_le16(&raid_map->data_disks_per_row) +
  689. get_unaligned_le16(&raid_map->metadata_disks_per_row));
  690. num_raid_map_entries = num_phys_disks *
  691. get_unaligned_le16(&raid_map->row_cnt);
  692. if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
  693. err_msg = "invalid number of map entries in RAID map";
  694. goto bad_raid_map;
  695. }
  696. if (device->raid_level == SA_RAID_1) {
  697. if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
  698. err_msg = "invalid RAID-1 map";
  699. goto bad_raid_map;
  700. }
  701. } else if (device->raid_level == SA_RAID_ADM) {
  702. if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
  703. err_msg = "invalid RAID-1(ADM) map";
  704. goto bad_raid_map;
  705. }
  706. } else if ((device->raid_level == SA_RAID_5 ||
  707. device->raid_level == SA_RAID_6) &&
  708. get_unaligned_le16(&raid_map->layout_map_count) > 1) {
  709. /* RAID 50/60 */
  710. r5or6_blocks_per_row =
  711. get_unaligned_le16(&raid_map->strip_size) *
  712. get_unaligned_le16(&raid_map->data_disks_per_row);
  713. if (r5or6_blocks_per_row == 0) {
  714. err_msg = "invalid RAID-5 or RAID-6 map";
  715. goto bad_raid_map;
  716. }
  717. }
  718. return 0;
  719. bad_raid_map:
  720. dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
  721. return -EINVAL;
  722. }
  723. static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
  724. struct pqi_scsi_dev *device)
  725. {
  726. int rc;
  727. int pci_direction;
  728. struct pqi_raid_path_request request;
  729. struct raid_map *raid_map;
  730. raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
  731. if (!raid_map)
  732. return -ENOMEM;
  733. rc = pqi_build_raid_path_request(ctrl_info, &request,
  734. CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
  735. sizeof(*raid_map), 0, &pci_direction);
  736. if (rc)
  737. goto error;
  738. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
  739. NULL, NO_TIMEOUT);
  740. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  741. pci_direction);
  742. if (rc)
  743. goto error;
  744. rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
  745. if (rc)
  746. goto error;
  747. device->raid_map = raid_map;
  748. return 0;
  749. error:
  750. kfree(raid_map);
  751. return rc;
  752. }
  753. static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
  754. struct pqi_scsi_dev *device)
  755. {
  756. int rc;
  757. u8 *buffer;
  758. u8 offload_status;
  759. buffer = kmalloc(64, GFP_KERNEL);
  760. if (!buffer)
  761. return;
  762. rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
  763. VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
  764. if (rc)
  765. goto out;
  766. #define OFFLOAD_STATUS_BYTE 4
  767. #define OFFLOAD_CONFIGURED_BIT 0x1
  768. #define OFFLOAD_ENABLED_BIT 0x2
  769. offload_status = buffer[OFFLOAD_STATUS_BYTE];
  770. device->offload_configured =
  771. !!(offload_status & OFFLOAD_CONFIGURED_BIT);
  772. if (device->offload_configured) {
  773. device->offload_enabled_pending =
  774. !!(offload_status & OFFLOAD_ENABLED_BIT);
  775. if (pqi_get_raid_map(ctrl_info, device))
  776. device->offload_enabled_pending = false;
  777. }
  778. out:
  779. kfree(buffer);
  780. }
  781. /*
  782. * Use vendor-specific VPD to determine online/offline status of a volume.
  783. */
  784. static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
  785. struct pqi_scsi_dev *device)
  786. {
  787. int rc;
  788. size_t page_length;
  789. u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
  790. bool volume_offline = true;
  791. u32 volume_flags;
  792. struct ciss_vpd_logical_volume_status *vpd;
  793. vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
  794. if (!vpd)
  795. goto no_buffer;
  796. rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
  797. VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
  798. if (rc)
  799. goto out;
  800. page_length = offsetof(struct ciss_vpd_logical_volume_status,
  801. volume_status) + vpd->page_length;
  802. if (page_length < sizeof(*vpd))
  803. goto out;
  804. volume_status = vpd->volume_status;
  805. volume_flags = get_unaligned_be32(&vpd->flags);
  806. volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
  807. out:
  808. kfree(vpd);
  809. no_buffer:
  810. device->volume_status = volume_status;
  811. device->volume_offline = volume_offline;
  812. }
  813. static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
  814. struct pqi_scsi_dev *device)
  815. {
  816. int rc;
  817. u8 *buffer;
  818. buffer = kmalloc(64, GFP_KERNEL);
  819. if (!buffer)
  820. return -ENOMEM;
  821. /* Send an inquiry to the device to see what it is. */
  822. rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
  823. if (rc)
  824. goto out;
  825. scsi_sanitize_inquiry_string(&buffer[8], 8);
  826. scsi_sanitize_inquiry_string(&buffer[16], 16);
  827. device->devtype = buffer[0] & 0x1f;
  828. memcpy(device->vendor, &buffer[8],
  829. sizeof(device->vendor));
  830. memcpy(device->model, &buffer[16],
  831. sizeof(device->model));
  832. if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
  833. pqi_get_raid_level(ctrl_info, device);
  834. pqi_get_offload_status(ctrl_info, device);
  835. pqi_get_volume_status(ctrl_info, device);
  836. }
  837. out:
  838. kfree(buffer);
  839. return rc;
  840. }
  841. static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
  842. struct pqi_scsi_dev *device,
  843. struct bmic_identify_physical_device *id_phys)
  844. {
  845. int rc;
  846. memset(id_phys, 0, sizeof(*id_phys));
  847. rc = pqi_identify_physical_device(ctrl_info, device,
  848. id_phys, sizeof(*id_phys));
  849. if (rc) {
  850. device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
  851. return;
  852. }
  853. device->queue_depth =
  854. get_unaligned_le16(&id_phys->current_queue_depth_limit);
  855. device->device_type = id_phys->device_type;
  856. device->active_path_index = id_phys->active_path_number;
  857. device->path_map = id_phys->redundant_path_present_map;
  858. memcpy(&device->box,
  859. &id_phys->alternate_paths_phys_box_on_port,
  860. sizeof(device->box));
  861. memcpy(&device->phys_connector,
  862. &id_phys->alternate_paths_phys_connector,
  863. sizeof(device->phys_connector));
  864. device->bay = id_phys->phys_bay_in_box;
  865. }
  866. static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
  867. struct pqi_scsi_dev *device)
  868. {
  869. char *status;
  870. static const char unknown_state_str[] =
  871. "Volume is in an unknown state (%u)";
  872. char unknown_state_buffer[sizeof(unknown_state_str) + 10];
  873. switch (device->volume_status) {
  874. case CISS_LV_OK:
  875. status = "Volume online";
  876. break;
  877. case CISS_LV_FAILED:
  878. status = "Volume failed";
  879. break;
  880. case CISS_LV_NOT_CONFIGURED:
  881. status = "Volume not configured";
  882. break;
  883. case CISS_LV_DEGRADED:
  884. status = "Volume degraded";
  885. break;
  886. case CISS_LV_READY_FOR_RECOVERY:
  887. status = "Volume ready for recovery operation";
  888. break;
  889. case CISS_LV_UNDERGOING_RECOVERY:
  890. status = "Volume undergoing recovery";
  891. break;
  892. case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
  893. status = "Wrong physical drive was replaced";
  894. break;
  895. case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
  896. status = "A physical drive not properly connected";
  897. break;
  898. case CISS_LV_HARDWARE_OVERHEATING:
  899. status = "Hardware is overheating";
  900. break;
  901. case CISS_LV_HARDWARE_HAS_OVERHEATED:
  902. status = "Hardware has overheated";
  903. break;
  904. case CISS_LV_UNDERGOING_EXPANSION:
  905. status = "Volume undergoing expansion";
  906. break;
  907. case CISS_LV_NOT_AVAILABLE:
  908. status = "Volume waiting for transforming volume";
  909. break;
  910. case CISS_LV_QUEUED_FOR_EXPANSION:
  911. status = "Volume queued for expansion";
  912. break;
  913. case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
  914. status = "Volume disabled due to SCSI ID conflict";
  915. break;
  916. case CISS_LV_EJECTED:
  917. status = "Volume has been ejected";
  918. break;
  919. case CISS_LV_UNDERGOING_ERASE:
  920. status = "Volume undergoing background erase";
  921. break;
  922. case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
  923. status = "Volume ready for predictive spare rebuild";
  924. break;
  925. case CISS_LV_UNDERGOING_RPI:
  926. status = "Volume undergoing rapid parity initialization";
  927. break;
  928. case CISS_LV_PENDING_RPI:
  929. status = "Volume queued for rapid parity initialization";
  930. break;
  931. case CISS_LV_ENCRYPTED_NO_KEY:
  932. status = "Encrypted volume inaccessible - key not present";
  933. break;
  934. case CISS_LV_UNDERGOING_ENCRYPTION:
  935. status = "Volume undergoing encryption process";
  936. break;
  937. case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
  938. status = "Volume undergoing encryption re-keying process";
  939. break;
  940. case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
  941. status =
  942. "Encrypted volume inaccessible - disabled on ctrl";
  943. break;
  944. case CISS_LV_PENDING_ENCRYPTION:
  945. status = "Volume pending migration to encrypted state";
  946. break;
  947. case CISS_LV_PENDING_ENCRYPTION_REKEYING:
  948. status = "Volume pending encryption rekeying";
  949. break;
  950. case CISS_LV_NOT_SUPPORTED:
  951. status = "Volume not supported on this controller";
  952. break;
  953. case CISS_LV_STATUS_UNAVAILABLE:
  954. status = "Volume status not available";
  955. break;
  956. default:
  957. snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
  958. unknown_state_str, device->volume_status);
  959. status = unknown_state_buffer;
  960. break;
  961. }
  962. dev_info(&ctrl_info->pci_dev->dev,
  963. "scsi %d:%d:%d:%d %s\n",
  964. ctrl_info->scsi_host->host_no,
  965. device->bus, device->target, device->lun, status);
  966. }
  967. static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
  968. struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
  969. {
  970. struct pqi_scsi_dev *device;
  971. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  972. scsi_device_list_entry) {
  973. if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
  974. continue;
  975. if (pqi_is_logical_device(device))
  976. continue;
  977. if (device->aio_handle == aio_handle)
  978. return device;
  979. }
  980. return NULL;
  981. }
  982. static void pqi_update_logical_drive_queue_depth(
  983. struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
  984. {
  985. unsigned int i;
  986. struct raid_map *raid_map;
  987. struct raid_map_disk_data *disk_data;
  988. struct pqi_scsi_dev *phys_disk;
  989. unsigned int num_phys_disks;
  990. unsigned int num_raid_map_entries;
  991. unsigned int queue_depth;
  992. logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
  993. raid_map = logical_drive->raid_map;
  994. if (!raid_map)
  995. return;
  996. disk_data = raid_map->disk_data;
  997. num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
  998. (get_unaligned_le16(&raid_map->data_disks_per_row) +
  999. get_unaligned_le16(&raid_map->metadata_disks_per_row));
  1000. num_raid_map_entries = num_phys_disks *
  1001. get_unaligned_le16(&raid_map->row_cnt);
  1002. queue_depth = 0;
  1003. for (i = 0; i < num_raid_map_entries; i++) {
  1004. phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
  1005. disk_data[i].aio_handle);
  1006. if (!phys_disk) {
  1007. dev_warn(&ctrl_info->pci_dev->dev,
  1008. "failed to find physical disk for logical drive %016llx\n",
  1009. get_unaligned_be64(logical_drive->scsi3addr));
  1010. logical_drive->offload_enabled = false;
  1011. logical_drive->offload_enabled_pending = false;
  1012. kfree(raid_map);
  1013. logical_drive->raid_map = NULL;
  1014. return;
  1015. }
  1016. queue_depth += phys_disk->queue_depth;
  1017. }
  1018. logical_drive->queue_depth = queue_depth;
  1019. }
  1020. static void pqi_update_all_logical_drive_queue_depths(
  1021. struct pqi_ctrl_info *ctrl_info)
  1022. {
  1023. struct pqi_scsi_dev *device;
  1024. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  1025. scsi_device_list_entry) {
  1026. if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
  1027. continue;
  1028. if (!pqi_is_logical_device(device))
  1029. continue;
  1030. pqi_update_logical_drive_queue_depth(ctrl_info, device);
  1031. }
  1032. }
  1033. static void pqi_rescan_worker(struct work_struct *work)
  1034. {
  1035. struct pqi_ctrl_info *ctrl_info;
  1036. ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
  1037. rescan_work);
  1038. pqi_scan_scsi_devices(ctrl_info);
  1039. }
  1040. static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
  1041. struct pqi_scsi_dev *device)
  1042. {
  1043. int rc;
  1044. if (pqi_is_logical_device(device))
  1045. rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
  1046. device->target, device->lun);
  1047. else
  1048. rc = pqi_add_sas_device(ctrl_info->sas_host, device);
  1049. return rc;
  1050. }
  1051. static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
  1052. struct pqi_scsi_dev *device)
  1053. {
  1054. if (pqi_is_logical_device(device))
  1055. scsi_remove_device(device->sdev);
  1056. else
  1057. pqi_remove_sas_device(device);
  1058. }
  1059. /* Assumes the SCSI device list lock is held. */
  1060. static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
  1061. int bus, int target, int lun)
  1062. {
  1063. struct pqi_scsi_dev *device;
  1064. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  1065. scsi_device_list_entry)
  1066. if (device->bus == bus && device->target == target &&
  1067. device->lun == lun)
  1068. return device;
  1069. return NULL;
  1070. }
  1071. static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
  1072. struct pqi_scsi_dev *dev2)
  1073. {
  1074. if (dev1->is_physical_device != dev2->is_physical_device)
  1075. return false;
  1076. if (dev1->is_physical_device)
  1077. return dev1->wwid == dev2->wwid;
  1078. return memcmp(dev1->volume_id, dev2->volume_id,
  1079. sizeof(dev1->volume_id)) == 0;
  1080. }
  1081. enum pqi_find_result {
  1082. DEVICE_NOT_FOUND,
  1083. DEVICE_CHANGED,
  1084. DEVICE_SAME,
  1085. };
  1086. static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
  1087. struct pqi_scsi_dev *device_to_find,
  1088. struct pqi_scsi_dev **matching_device)
  1089. {
  1090. struct pqi_scsi_dev *device;
  1091. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  1092. scsi_device_list_entry) {
  1093. if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
  1094. device->scsi3addr)) {
  1095. *matching_device = device;
  1096. if (pqi_device_equal(device_to_find, device)) {
  1097. if (device_to_find->volume_offline)
  1098. return DEVICE_CHANGED;
  1099. return DEVICE_SAME;
  1100. }
  1101. return DEVICE_CHANGED;
  1102. }
  1103. }
  1104. return DEVICE_NOT_FOUND;
  1105. }
  1106. static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
  1107. char *action, struct pqi_scsi_dev *device)
  1108. {
  1109. dev_info(&ctrl_info->pci_dev->dev,
  1110. "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
  1111. action,
  1112. ctrl_info->scsi_host->host_no,
  1113. device->bus,
  1114. device->target,
  1115. device->lun,
  1116. scsi_device_type(device->devtype),
  1117. device->vendor,
  1118. device->model,
  1119. pqi_raid_level_to_string(device->raid_level),
  1120. device->offload_configured ? '+' : '-',
  1121. device->offload_enabled_pending ? '+' : '-',
  1122. device->expose_device ? '+' : '-',
  1123. device->queue_depth);
  1124. }
  1125. /* Assumes the SCSI device list lock is held. */
  1126. static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
  1127. struct pqi_scsi_dev *new_device)
  1128. {
  1129. existing_device->devtype = new_device->devtype;
  1130. existing_device->device_type = new_device->device_type;
  1131. existing_device->bus = new_device->bus;
  1132. if (new_device->target_lun_valid) {
  1133. existing_device->target = new_device->target;
  1134. existing_device->lun = new_device->lun;
  1135. existing_device->target_lun_valid = true;
  1136. }
  1137. /* By definition, the scsi3addr and wwid fields are already the same. */
  1138. existing_device->is_physical_device = new_device->is_physical_device;
  1139. existing_device->expose_device = new_device->expose_device;
  1140. existing_device->no_uld_attach = new_device->no_uld_attach;
  1141. existing_device->aio_enabled = new_device->aio_enabled;
  1142. memcpy(existing_device->vendor, new_device->vendor,
  1143. sizeof(existing_device->vendor));
  1144. memcpy(existing_device->model, new_device->model,
  1145. sizeof(existing_device->model));
  1146. existing_device->sas_address = new_device->sas_address;
  1147. existing_device->raid_level = new_device->raid_level;
  1148. existing_device->queue_depth = new_device->queue_depth;
  1149. existing_device->aio_handle = new_device->aio_handle;
  1150. existing_device->volume_status = new_device->volume_status;
  1151. existing_device->active_path_index = new_device->active_path_index;
  1152. existing_device->path_map = new_device->path_map;
  1153. existing_device->bay = new_device->bay;
  1154. memcpy(existing_device->box, new_device->box,
  1155. sizeof(existing_device->box));
  1156. memcpy(existing_device->phys_connector, new_device->phys_connector,
  1157. sizeof(existing_device->phys_connector));
  1158. existing_device->offload_configured = new_device->offload_configured;
  1159. existing_device->offload_enabled = false;
  1160. existing_device->offload_enabled_pending =
  1161. new_device->offload_enabled_pending;
  1162. existing_device->offload_to_mirror = 0;
  1163. kfree(existing_device->raid_map);
  1164. existing_device->raid_map = new_device->raid_map;
  1165. /* To prevent this from being freed later. */
  1166. new_device->raid_map = NULL;
  1167. }
  1168. static inline void pqi_free_device(struct pqi_scsi_dev *device)
  1169. {
  1170. if (device) {
  1171. kfree(device->raid_map);
  1172. kfree(device);
  1173. }
  1174. }
  1175. /*
  1176. * Called when exposing a new device to the OS fails in order to re-adjust
  1177. * our internal SCSI device list to match the SCSI ML's view.
  1178. */
  1179. static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
  1180. struct pqi_scsi_dev *device)
  1181. {
  1182. unsigned long flags;
  1183. spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  1184. list_del(&device->scsi_device_list_entry);
  1185. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  1186. /* Allow the device structure to be freed later. */
  1187. device->keep_device = false;
  1188. }
  1189. static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
  1190. struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
  1191. {
  1192. int rc;
  1193. unsigned int i;
  1194. unsigned long flags;
  1195. enum pqi_find_result find_result;
  1196. struct pqi_scsi_dev *device;
  1197. struct pqi_scsi_dev *next;
  1198. struct pqi_scsi_dev *matching_device;
  1199. struct list_head add_list;
  1200. struct list_head delete_list;
  1201. INIT_LIST_HEAD(&add_list);
  1202. INIT_LIST_HEAD(&delete_list);
  1203. /*
  1204. * The idea here is to do as little work as possible while holding the
  1205. * spinlock. That's why we go to great pains to defer anything other
  1206. * than updating the internal device list until after we release the
  1207. * spinlock.
  1208. */
  1209. spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  1210. /* Assume that all devices in the existing list have gone away. */
  1211. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  1212. scsi_device_list_entry)
  1213. device->device_gone = true;
  1214. for (i = 0; i < num_new_devices; i++) {
  1215. device = new_device_list[i];
  1216. find_result = pqi_scsi_find_entry(ctrl_info, device,
  1217. &matching_device);
  1218. switch (find_result) {
  1219. case DEVICE_SAME:
  1220. /*
  1221. * The newly found device is already in the existing
  1222. * device list.
  1223. */
  1224. device->new_device = false;
  1225. matching_device->device_gone = false;
  1226. pqi_scsi_update_device(matching_device, device);
  1227. break;
  1228. case DEVICE_NOT_FOUND:
  1229. /*
  1230. * The newly found device is NOT in the existing device
  1231. * list.
  1232. */
  1233. device->new_device = true;
  1234. break;
  1235. case DEVICE_CHANGED:
  1236. /*
  1237. * The original device has gone away and we need to add
  1238. * the new device.
  1239. */
  1240. device->new_device = true;
  1241. break;
  1242. default:
  1243. WARN_ON(find_result);
  1244. break;
  1245. }
  1246. }
  1247. /* Process all devices that have gone away. */
  1248. list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
  1249. scsi_device_list_entry) {
  1250. if (device->device_gone) {
  1251. list_del(&device->scsi_device_list_entry);
  1252. list_add_tail(&device->delete_list_entry, &delete_list);
  1253. }
  1254. }
  1255. /* Process all new devices. */
  1256. for (i = 0; i < num_new_devices; i++) {
  1257. device = new_device_list[i];
  1258. if (!device->new_device)
  1259. continue;
  1260. if (device->volume_offline)
  1261. continue;
  1262. list_add_tail(&device->scsi_device_list_entry,
  1263. &ctrl_info->scsi_device_list);
  1264. list_add_tail(&device->add_list_entry, &add_list);
  1265. /* To prevent this device structure from being freed later. */
  1266. device->keep_device = true;
  1267. }
  1268. pqi_update_all_logical_drive_queue_depths(ctrl_info);
  1269. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  1270. scsi_device_list_entry)
  1271. device->offload_enabled =
  1272. device->offload_enabled_pending;
  1273. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  1274. /* Remove all devices that have gone away. */
  1275. list_for_each_entry_safe(device, next, &delete_list,
  1276. delete_list_entry) {
  1277. if (device->sdev)
  1278. pqi_remove_device(ctrl_info, device);
  1279. if (device->volume_offline) {
  1280. pqi_dev_info(ctrl_info, "offline", device);
  1281. pqi_show_volume_status(ctrl_info, device);
  1282. } else {
  1283. pqi_dev_info(ctrl_info, "removed", device);
  1284. }
  1285. list_del(&device->delete_list_entry);
  1286. pqi_free_device(device);
  1287. }
  1288. /*
  1289. * Notify the SCSI ML if the queue depth of any existing device has
  1290. * changed.
  1291. */
  1292. list_for_each_entry(device, &ctrl_info->scsi_device_list,
  1293. scsi_device_list_entry) {
  1294. if (device->sdev && device->queue_depth !=
  1295. device->advertised_queue_depth) {
  1296. device->advertised_queue_depth = device->queue_depth;
  1297. scsi_change_queue_depth(device->sdev,
  1298. device->advertised_queue_depth);
  1299. }
  1300. }
  1301. /* Expose any new devices. */
  1302. list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
  1303. if (device->expose_device && !device->sdev) {
  1304. rc = pqi_add_device(ctrl_info, device);
  1305. if (rc) {
  1306. dev_warn(&ctrl_info->pci_dev->dev,
  1307. "scsi %d:%d:%d:%d addition failed, device not added\n",
  1308. ctrl_info->scsi_host->host_no,
  1309. device->bus, device->target,
  1310. device->lun);
  1311. pqi_fixup_botched_add(ctrl_info, device);
  1312. continue;
  1313. }
  1314. }
  1315. pqi_dev_info(ctrl_info, "added", device);
  1316. }
  1317. }
  1318. static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
  1319. {
  1320. bool is_supported = false;
  1321. switch (device->devtype) {
  1322. case TYPE_DISK:
  1323. case TYPE_ZBC:
  1324. case TYPE_TAPE:
  1325. case TYPE_MEDIUM_CHANGER:
  1326. case TYPE_ENCLOSURE:
  1327. is_supported = true;
  1328. break;
  1329. case TYPE_RAID:
  1330. /*
  1331. * Only support the HBA controller itself as a RAID
  1332. * controller. If it's a RAID controller other than
  1333. * the HBA itself (an external RAID controller, MSA500
  1334. * or similar), we don't support it.
  1335. */
  1336. if (pqi_is_hba_lunid(device->scsi3addr))
  1337. is_supported = true;
  1338. break;
  1339. }
  1340. return is_supported;
  1341. }
  1342. static inline bool pqi_skip_device(u8 *scsi3addr,
  1343. struct report_phys_lun_extended_entry *phys_lun_ext_entry)
  1344. {
  1345. u8 device_flags;
  1346. if (!MASKED_DEVICE(scsi3addr))
  1347. return false;
  1348. /* The device is masked. */
  1349. device_flags = phys_lun_ext_entry->device_flags;
  1350. if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
  1351. /*
  1352. * It's a non-disk device. We ignore all devices of this type
  1353. * when they're masked.
  1354. */
  1355. return true;
  1356. }
  1357. return false;
  1358. }
  1359. static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
  1360. {
  1361. /* Expose all devices except for physical devices that are masked. */
  1362. if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
  1363. return false;
  1364. return true;
  1365. }
  1366. static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
  1367. {
  1368. int i;
  1369. int rc;
  1370. struct list_head new_device_list_head;
  1371. struct report_phys_lun_extended *physdev_list = NULL;
  1372. struct report_log_lun_extended *logdev_list = NULL;
  1373. struct report_phys_lun_extended_entry *phys_lun_ext_entry;
  1374. struct report_log_lun_extended_entry *log_lun_ext_entry;
  1375. struct bmic_identify_physical_device *id_phys = NULL;
  1376. u32 num_physicals;
  1377. u32 num_logicals;
  1378. struct pqi_scsi_dev **new_device_list = NULL;
  1379. struct pqi_scsi_dev *device;
  1380. struct pqi_scsi_dev *next;
  1381. unsigned int num_new_devices;
  1382. unsigned int num_valid_devices;
  1383. bool is_physical_device;
  1384. u8 *scsi3addr;
  1385. static char *out_of_memory_msg =
  1386. "out of memory, device discovery stopped";
  1387. INIT_LIST_HEAD(&new_device_list_head);
  1388. rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
  1389. if (rc)
  1390. goto out;
  1391. if (physdev_list)
  1392. num_physicals =
  1393. get_unaligned_be32(&physdev_list->header.list_length)
  1394. / sizeof(physdev_list->lun_entries[0]);
  1395. else
  1396. num_physicals = 0;
  1397. if (logdev_list)
  1398. num_logicals =
  1399. get_unaligned_be32(&logdev_list->header.list_length)
  1400. / sizeof(logdev_list->lun_entries[0]);
  1401. else
  1402. num_logicals = 0;
  1403. if (num_physicals) {
  1404. /*
  1405. * We need this buffer for calls to pqi_get_physical_disk_info()
  1406. * below. We allocate it here instead of inside
  1407. * pqi_get_physical_disk_info() because it's a fairly large
  1408. * buffer.
  1409. */
  1410. id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
  1411. if (!id_phys) {
  1412. dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
  1413. out_of_memory_msg);
  1414. rc = -ENOMEM;
  1415. goto out;
  1416. }
  1417. }
  1418. num_new_devices = num_physicals + num_logicals;
  1419. new_device_list = kmalloc(sizeof(*new_device_list) *
  1420. num_new_devices, GFP_KERNEL);
  1421. if (!new_device_list) {
  1422. dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
  1423. rc = -ENOMEM;
  1424. goto out;
  1425. }
  1426. for (i = 0; i < num_new_devices; i++) {
  1427. device = kzalloc(sizeof(*device), GFP_KERNEL);
  1428. if (!device) {
  1429. dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
  1430. out_of_memory_msg);
  1431. rc = -ENOMEM;
  1432. goto out;
  1433. }
  1434. list_add_tail(&device->new_device_list_entry,
  1435. &new_device_list_head);
  1436. }
  1437. device = NULL;
  1438. num_valid_devices = 0;
  1439. for (i = 0; i < num_new_devices; i++) {
  1440. if (i < num_physicals) {
  1441. is_physical_device = true;
  1442. phys_lun_ext_entry = &physdev_list->lun_entries[i];
  1443. log_lun_ext_entry = NULL;
  1444. scsi3addr = phys_lun_ext_entry->lunid;
  1445. } else {
  1446. is_physical_device = false;
  1447. phys_lun_ext_entry = NULL;
  1448. log_lun_ext_entry =
  1449. &logdev_list->lun_entries[i - num_physicals];
  1450. scsi3addr = log_lun_ext_entry->lunid;
  1451. }
  1452. if (is_physical_device &&
  1453. pqi_skip_device(scsi3addr, phys_lun_ext_entry))
  1454. continue;
  1455. if (device)
  1456. device = list_next_entry(device, new_device_list_entry);
  1457. else
  1458. device = list_first_entry(&new_device_list_head,
  1459. struct pqi_scsi_dev, new_device_list_entry);
  1460. memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
  1461. device->is_physical_device = is_physical_device;
  1462. device->raid_level = SA_RAID_UNKNOWN;
  1463. /* Gather information about the device. */
  1464. rc = pqi_get_device_info(ctrl_info, device);
  1465. if (rc == -ENOMEM) {
  1466. dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
  1467. out_of_memory_msg);
  1468. goto out;
  1469. }
  1470. if (rc) {
  1471. dev_warn(&ctrl_info->pci_dev->dev,
  1472. "obtaining device info failed, skipping device %016llx\n",
  1473. get_unaligned_be64(device->scsi3addr));
  1474. rc = 0;
  1475. continue;
  1476. }
  1477. if (!pqi_is_supported_device(device))
  1478. continue;
  1479. pqi_assign_bus_target_lun(device);
  1480. device->expose_device = pqi_expose_device(device);
  1481. if (device->is_physical_device) {
  1482. device->wwid = phys_lun_ext_entry->wwid;
  1483. if ((phys_lun_ext_entry->device_flags &
  1484. REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
  1485. phys_lun_ext_entry->aio_handle)
  1486. device->aio_enabled = true;
  1487. } else {
  1488. memcpy(device->volume_id, log_lun_ext_entry->volume_id,
  1489. sizeof(device->volume_id));
  1490. }
  1491. switch (device->devtype) {
  1492. case TYPE_DISK:
  1493. case TYPE_ZBC:
  1494. case TYPE_ENCLOSURE:
  1495. if (device->is_physical_device) {
  1496. device->sas_address =
  1497. get_unaligned_be64(&device->wwid);
  1498. if (device->devtype == TYPE_DISK ||
  1499. device->devtype == TYPE_ZBC) {
  1500. device->aio_handle =
  1501. phys_lun_ext_entry->aio_handle;
  1502. pqi_get_physical_disk_info(ctrl_info,
  1503. device, id_phys);
  1504. }
  1505. }
  1506. break;
  1507. }
  1508. new_device_list[num_valid_devices++] = device;
  1509. }
  1510. pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
  1511. out:
  1512. list_for_each_entry_safe(device, next, &new_device_list_head,
  1513. new_device_list_entry) {
  1514. if (device->keep_device)
  1515. continue;
  1516. list_del(&device->new_device_list_entry);
  1517. pqi_free_device(device);
  1518. }
  1519. kfree(new_device_list);
  1520. kfree(physdev_list);
  1521. kfree(logdev_list);
  1522. kfree(id_phys);
  1523. return rc;
  1524. }
  1525. static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
  1526. {
  1527. unsigned long flags;
  1528. struct pqi_scsi_dev *device;
  1529. struct pqi_scsi_dev *next;
  1530. spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  1531. list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
  1532. scsi_device_list_entry) {
  1533. if (device->sdev)
  1534. pqi_remove_device(ctrl_info, device);
  1535. list_del(&device->scsi_device_list_entry);
  1536. pqi_free_device(device);
  1537. }
  1538. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  1539. }
  1540. static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
  1541. {
  1542. int rc;
  1543. if (pqi_ctrl_offline(ctrl_info))
  1544. return -ENXIO;
  1545. mutex_lock(&ctrl_info->scan_mutex);
  1546. rc = pqi_update_scsi_devices(ctrl_info);
  1547. if (rc)
  1548. pqi_schedule_rescan_worker(ctrl_info);
  1549. mutex_unlock(&ctrl_info->scan_mutex);
  1550. return rc;
  1551. }
  1552. static void pqi_scan_start(struct Scsi_Host *shost)
  1553. {
  1554. pqi_scan_scsi_devices(shost_to_hba(shost));
  1555. }
  1556. /* Returns TRUE if scan is finished. */
  1557. static int pqi_scan_finished(struct Scsi_Host *shost,
  1558. unsigned long elapsed_time)
  1559. {
  1560. struct pqi_ctrl_info *ctrl_info;
  1561. ctrl_info = shost_priv(shost);
  1562. return !mutex_is_locked(&ctrl_info->scan_mutex);
  1563. }
  1564. static inline void pqi_set_encryption_info(
  1565. struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
  1566. u64 first_block)
  1567. {
  1568. u32 volume_blk_size;
  1569. /*
  1570. * Set the encryption tweak values based on logical block address.
  1571. * If the block size is 512, the tweak value is equal to the LBA.
  1572. * For other block sizes, tweak value is (LBA * block size) / 512.
  1573. */
  1574. volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
  1575. if (volume_blk_size != 512)
  1576. first_block = (first_block * volume_blk_size) / 512;
  1577. encryption_info->data_encryption_key_index =
  1578. get_unaligned_le16(&raid_map->data_encryption_key_index);
  1579. encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
  1580. encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
  1581. }
  1582. /*
  1583. * Attempt to perform offload RAID mapping for a logical volume I/O.
  1584. */
  1585. #define PQI_RAID_BYPASS_INELIGIBLE 1
  1586. static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
  1587. struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
  1588. struct pqi_queue_group *queue_group)
  1589. {
  1590. struct raid_map *raid_map;
  1591. bool is_write = false;
  1592. u32 map_index;
  1593. u64 first_block;
  1594. u64 last_block;
  1595. u32 block_cnt;
  1596. u32 blocks_per_row;
  1597. u64 first_row;
  1598. u64 last_row;
  1599. u32 first_row_offset;
  1600. u32 last_row_offset;
  1601. u32 first_column;
  1602. u32 last_column;
  1603. u64 r0_first_row;
  1604. u64 r0_last_row;
  1605. u32 r5or6_blocks_per_row;
  1606. u64 r5or6_first_row;
  1607. u64 r5or6_last_row;
  1608. u32 r5or6_first_row_offset;
  1609. u32 r5or6_last_row_offset;
  1610. u32 r5or6_first_column;
  1611. u32 r5or6_last_column;
  1612. u16 data_disks_per_row;
  1613. u32 total_disks_per_row;
  1614. u16 layout_map_count;
  1615. u32 stripesize;
  1616. u16 strip_size;
  1617. u32 first_group;
  1618. u32 last_group;
  1619. u32 current_group;
  1620. u32 map_row;
  1621. u32 aio_handle;
  1622. u64 disk_block;
  1623. u32 disk_block_cnt;
  1624. u8 cdb[16];
  1625. u8 cdb_length;
  1626. int offload_to_mirror;
  1627. struct pqi_encryption_info *encryption_info_ptr;
  1628. struct pqi_encryption_info encryption_info;
  1629. #if BITS_PER_LONG == 32
  1630. u64 tmpdiv;
  1631. #endif
  1632. /* Check for valid opcode, get LBA and block count. */
  1633. switch (scmd->cmnd[0]) {
  1634. case WRITE_6:
  1635. is_write = true;
  1636. /* fall through */
  1637. case READ_6:
  1638. first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
  1639. (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
  1640. block_cnt = (u32)scmd->cmnd[4];
  1641. if (block_cnt == 0)
  1642. block_cnt = 256;
  1643. break;
  1644. case WRITE_10:
  1645. is_write = true;
  1646. /* fall through */
  1647. case READ_10:
  1648. first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
  1649. block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
  1650. break;
  1651. case WRITE_12:
  1652. is_write = true;
  1653. /* fall through */
  1654. case READ_12:
  1655. first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
  1656. block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
  1657. break;
  1658. case WRITE_16:
  1659. is_write = true;
  1660. /* fall through */
  1661. case READ_16:
  1662. first_block = get_unaligned_be64(&scmd->cmnd[2]);
  1663. block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
  1664. break;
  1665. default:
  1666. /* Process via normal I/O path. */
  1667. return PQI_RAID_BYPASS_INELIGIBLE;
  1668. }
  1669. /* Check for write to non-RAID-0. */
  1670. if (is_write && device->raid_level != SA_RAID_0)
  1671. return PQI_RAID_BYPASS_INELIGIBLE;
  1672. if (unlikely(block_cnt == 0))
  1673. return PQI_RAID_BYPASS_INELIGIBLE;
  1674. last_block = first_block + block_cnt - 1;
  1675. raid_map = device->raid_map;
  1676. /* Check for invalid block or wraparound. */
  1677. if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
  1678. last_block < first_block)
  1679. return PQI_RAID_BYPASS_INELIGIBLE;
  1680. data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
  1681. strip_size = get_unaligned_le16(&raid_map->strip_size);
  1682. layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
  1683. /* Calculate stripe information for the request. */
  1684. blocks_per_row = data_disks_per_row * strip_size;
  1685. #if BITS_PER_LONG == 32
  1686. tmpdiv = first_block;
  1687. do_div(tmpdiv, blocks_per_row);
  1688. first_row = tmpdiv;
  1689. tmpdiv = last_block;
  1690. do_div(tmpdiv, blocks_per_row);
  1691. last_row = tmpdiv;
  1692. first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
  1693. last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
  1694. tmpdiv = first_row_offset;
  1695. do_div(tmpdiv, strip_size);
  1696. first_column = tmpdiv;
  1697. tmpdiv = last_row_offset;
  1698. do_div(tmpdiv, strip_size);
  1699. last_column = tmpdiv;
  1700. #else
  1701. first_row = first_block / blocks_per_row;
  1702. last_row = last_block / blocks_per_row;
  1703. first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
  1704. last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
  1705. first_column = first_row_offset / strip_size;
  1706. last_column = last_row_offset / strip_size;
  1707. #endif
  1708. /* If this isn't a single row/column then give to the controller. */
  1709. if (first_row != last_row || first_column != last_column)
  1710. return PQI_RAID_BYPASS_INELIGIBLE;
  1711. /* Proceeding with driver mapping. */
  1712. total_disks_per_row = data_disks_per_row +
  1713. get_unaligned_le16(&raid_map->metadata_disks_per_row);
  1714. map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
  1715. get_unaligned_le16(&raid_map->row_cnt);
  1716. map_index = (map_row * total_disks_per_row) + first_column;
  1717. /* RAID 1 */
  1718. if (device->raid_level == SA_RAID_1) {
  1719. if (device->offload_to_mirror)
  1720. map_index += data_disks_per_row;
  1721. device->offload_to_mirror = !device->offload_to_mirror;
  1722. } else if (device->raid_level == SA_RAID_ADM) {
  1723. /* RAID ADM */
  1724. /*
  1725. * Handles N-way mirrors (R1-ADM) and R10 with # of drives
  1726. * divisible by 3.
  1727. */
  1728. offload_to_mirror = device->offload_to_mirror;
  1729. if (offload_to_mirror == 0) {
  1730. /* use physical disk in the first mirrored group. */
  1731. map_index %= data_disks_per_row;
  1732. } else {
  1733. do {
  1734. /*
  1735. * Determine mirror group that map_index
  1736. * indicates.
  1737. */
  1738. current_group = map_index / data_disks_per_row;
  1739. if (offload_to_mirror != current_group) {
  1740. if (current_group <
  1741. layout_map_count - 1) {
  1742. /*
  1743. * Select raid index from
  1744. * next group.
  1745. */
  1746. map_index += data_disks_per_row;
  1747. current_group++;
  1748. } else {
  1749. /*
  1750. * Select raid index from first
  1751. * group.
  1752. */
  1753. map_index %= data_disks_per_row;
  1754. current_group = 0;
  1755. }
  1756. }
  1757. } while (offload_to_mirror != current_group);
  1758. }
  1759. /* Set mirror group to use next time. */
  1760. offload_to_mirror =
  1761. (offload_to_mirror >= layout_map_count - 1) ?
  1762. 0 : offload_to_mirror + 1;
  1763. WARN_ON(offload_to_mirror >= layout_map_count);
  1764. device->offload_to_mirror = offload_to_mirror;
  1765. /*
  1766. * Avoid direct use of device->offload_to_mirror within this
  1767. * function since multiple threads might simultaneously
  1768. * increment it beyond the range of device->layout_map_count -1.
  1769. */
  1770. } else if ((device->raid_level == SA_RAID_5 ||
  1771. device->raid_level == SA_RAID_6) && layout_map_count > 1) {
  1772. /* RAID 50/60 */
  1773. /* Verify first and last block are in same RAID group */
  1774. r5or6_blocks_per_row = strip_size * data_disks_per_row;
  1775. stripesize = r5or6_blocks_per_row * layout_map_count;
  1776. #if BITS_PER_LONG == 32
  1777. tmpdiv = first_block;
  1778. first_group = do_div(tmpdiv, stripesize);
  1779. tmpdiv = first_group;
  1780. do_div(tmpdiv, r5or6_blocks_per_row);
  1781. first_group = tmpdiv;
  1782. tmpdiv = last_block;
  1783. last_group = do_div(tmpdiv, stripesize);
  1784. tmpdiv = last_group;
  1785. do_div(tmpdiv, r5or6_blocks_per_row);
  1786. last_group = tmpdiv;
  1787. #else
  1788. first_group = (first_block % stripesize) / r5or6_blocks_per_row;
  1789. last_group = (last_block % stripesize) / r5or6_blocks_per_row;
  1790. #endif
  1791. if (first_group != last_group)
  1792. return PQI_RAID_BYPASS_INELIGIBLE;
  1793. /* Verify request is in a single row of RAID 5/6 */
  1794. #if BITS_PER_LONG == 32
  1795. tmpdiv = first_block;
  1796. do_div(tmpdiv, stripesize);
  1797. first_row = r5or6_first_row = r0_first_row = tmpdiv;
  1798. tmpdiv = last_block;
  1799. do_div(tmpdiv, stripesize);
  1800. r5or6_last_row = r0_last_row = tmpdiv;
  1801. #else
  1802. first_row = r5or6_first_row = r0_first_row =
  1803. first_block / stripesize;
  1804. r5or6_last_row = r0_last_row = last_block / stripesize;
  1805. #endif
  1806. if (r5or6_first_row != r5or6_last_row)
  1807. return PQI_RAID_BYPASS_INELIGIBLE;
  1808. /* Verify request is in a single column */
  1809. #if BITS_PER_LONG == 32
  1810. tmpdiv = first_block;
  1811. first_row_offset = do_div(tmpdiv, stripesize);
  1812. tmpdiv = first_row_offset;
  1813. first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
  1814. r5or6_first_row_offset = first_row_offset;
  1815. tmpdiv = last_block;
  1816. r5or6_last_row_offset = do_div(tmpdiv, stripesize);
  1817. tmpdiv = r5or6_last_row_offset;
  1818. r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
  1819. tmpdiv = r5or6_first_row_offset;
  1820. do_div(tmpdiv, strip_size);
  1821. first_column = r5or6_first_column = tmpdiv;
  1822. tmpdiv = r5or6_last_row_offset;
  1823. do_div(tmpdiv, strip_size);
  1824. r5or6_last_column = tmpdiv;
  1825. #else
  1826. first_row_offset = r5or6_first_row_offset =
  1827. (u32)((first_block % stripesize) %
  1828. r5or6_blocks_per_row);
  1829. r5or6_last_row_offset =
  1830. (u32)((last_block % stripesize) %
  1831. r5or6_blocks_per_row);
  1832. first_column = r5or6_first_row_offset / strip_size;
  1833. r5or6_first_column = first_column;
  1834. r5or6_last_column = r5or6_last_row_offset / strip_size;
  1835. #endif
  1836. if (r5or6_first_column != r5or6_last_column)
  1837. return PQI_RAID_BYPASS_INELIGIBLE;
  1838. /* Request is eligible */
  1839. map_row =
  1840. ((u32)(first_row >> raid_map->parity_rotation_shift)) %
  1841. get_unaligned_le16(&raid_map->row_cnt);
  1842. map_index = (first_group *
  1843. (get_unaligned_le16(&raid_map->row_cnt) *
  1844. total_disks_per_row)) +
  1845. (map_row * total_disks_per_row) + first_column;
  1846. }
  1847. if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
  1848. return PQI_RAID_BYPASS_INELIGIBLE;
  1849. aio_handle = raid_map->disk_data[map_index].aio_handle;
  1850. disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
  1851. first_row * strip_size +
  1852. (first_row_offset - first_column * strip_size);
  1853. disk_block_cnt = block_cnt;
  1854. /* Handle differing logical/physical block sizes. */
  1855. if (raid_map->phys_blk_shift) {
  1856. disk_block <<= raid_map->phys_blk_shift;
  1857. disk_block_cnt <<= raid_map->phys_blk_shift;
  1858. }
  1859. if (unlikely(disk_block_cnt > 0xffff))
  1860. return PQI_RAID_BYPASS_INELIGIBLE;
  1861. /* Build the new CDB for the physical disk I/O. */
  1862. if (disk_block > 0xffffffff) {
  1863. cdb[0] = is_write ? WRITE_16 : READ_16;
  1864. cdb[1] = 0;
  1865. put_unaligned_be64(disk_block, &cdb[2]);
  1866. put_unaligned_be32(disk_block_cnt, &cdb[10]);
  1867. cdb[14] = 0;
  1868. cdb[15] = 0;
  1869. cdb_length = 16;
  1870. } else {
  1871. cdb[0] = is_write ? WRITE_10 : READ_10;
  1872. cdb[1] = 0;
  1873. put_unaligned_be32((u32)disk_block, &cdb[2]);
  1874. cdb[6] = 0;
  1875. put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
  1876. cdb[9] = 0;
  1877. cdb_length = 10;
  1878. }
  1879. if (get_unaligned_le16(&raid_map->flags) &
  1880. RAID_MAP_ENCRYPTION_ENABLED) {
  1881. pqi_set_encryption_info(&encryption_info, raid_map,
  1882. first_block);
  1883. encryption_info_ptr = &encryption_info;
  1884. } else {
  1885. encryption_info_ptr = NULL;
  1886. }
  1887. return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
  1888. cdb, cdb_length, queue_group, encryption_info_ptr);
  1889. }
  1890. #define PQI_STATUS_IDLE 0x0
  1891. #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
  1892. #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
  1893. #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
  1894. #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
  1895. #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
  1896. #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
  1897. #define PQI_DEVICE_STATE_ERROR 0x4
  1898. #define PQI_MODE_READY_TIMEOUT_SECS 30
  1899. #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
  1900. static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
  1901. {
  1902. struct pqi_device_registers __iomem *pqi_registers;
  1903. unsigned long timeout;
  1904. u64 signature;
  1905. u8 status;
  1906. pqi_registers = ctrl_info->pqi_registers;
  1907. timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
  1908. while (1) {
  1909. signature = readq(&pqi_registers->signature);
  1910. if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
  1911. sizeof(signature)) == 0)
  1912. break;
  1913. if (time_after(jiffies, timeout)) {
  1914. dev_err(&ctrl_info->pci_dev->dev,
  1915. "timed out waiting for PQI signature\n");
  1916. return -ETIMEDOUT;
  1917. }
  1918. msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
  1919. }
  1920. while (1) {
  1921. status = readb(&pqi_registers->function_and_status_code);
  1922. if (status == PQI_STATUS_IDLE)
  1923. break;
  1924. if (time_after(jiffies, timeout)) {
  1925. dev_err(&ctrl_info->pci_dev->dev,
  1926. "timed out waiting for PQI IDLE\n");
  1927. return -ETIMEDOUT;
  1928. }
  1929. msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
  1930. }
  1931. while (1) {
  1932. if (readl(&pqi_registers->device_status) ==
  1933. PQI_DEVICE_STATE_ALL_REGISTERS_READY)
  1934. break;
  1935. if (time_after(jiffies, timeout)) {
  1936. dev_err(&ctrl_info->pci_dev->dev,
  1937. "timed out waiting for PQI all registers ready\n");
  1938. return -ETIMEDOUT;
  1939. }
  1940. msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
  1941. }
  1942. return 0;
  1943. }
  1944. static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
  1945. {
  1946. struct pqi_scsi_dev *device;
  1947. device = io_request->scmd->device->hostdata;
  1948. device->offload_enabled = false;
  1949. }
  1950. static inline void pqi_take_device_offline(struct scsi_device *sdev)
  1951. {
  1952. struct pqi_ctrl_info *ctrl_info;
  1953. struct pqi_scsi_dev *device;
  1954. if (scsi_device_online(sdev)) {
  1955. scsi_device_set_state(sdev, SDEV_OFFLINE);
  1956. ctrl_info = shost_to_hba(sdev->host);
  1957. schedule_delayed_work(&ctrl_info->rescan_work, 0);
  1958. device = sdev->hostdata;
  1959. dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
  1960. ctrl_info->scsi_host->host_no, device->bus,
  1961. device->target, device->lun);
  1962. }
  1963. }
  1964. static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
  1965. {
  1966. u8 scsi_status;
  1967. u8 host_byte;
  1968. struct scsi_cmnd *scmd;
  1969. struct pqi_raid_error_info *error_info;
  1970. size_t sense_data_length;
  1971. int residual_count;
  1972. int xfer_count;
  1973. struct scsi_sense_hdr sshdr;
  1974. scmd = io_request->scmd;
  1975. if (!scmd)
  1976. return;
  1977. error_info = io_request->error_info;
  1978. scsi_status = error_info->status;
  1979. host_byte = DID_OK;
  1980. if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
  1981. xfer_count =
  1982. get_unaligned_le32(&error_info->data_out_transferred);
  1983. residual_count = scsi_bufflen(scmd) - xfer_count;
  1984. scsi_set_resid(scmd, residual_count);
  1985. if (xfer_count < scmd->underflow)
  1986. host_byte = DID_SOFT_ERROR;
  1987. }
  1988. sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
  1989. if (sense_data_length == 0)
  1990. sense_data_length =
  1991. get_unaligned_le16(&error_info->response_data_length);
  1992. if (sense_data_length) {
  1993. if (sense_data_length > sizeof(error_info->data))
  1994. sense_data_length = sizeof(error_info->data);
  1995. if (scsi_status == SAM_STAT_CHECK_CONDITION &&
  1996. scsi_normalize_sense(error_info->data,
  1997. sense_data_length, &sshdr) &&
  1998. sshdr.sense_key == HARDWARE_ERROR &&
  1999. sshdr.asc == 0x3e &&
  2000. sshdr.ascq == 0x1) {
  2001. pqi_take_device_offline(scmd->device);
  2002. host_byte = DID_NO_CONNECT;
  2003. }
  2004. if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
  2005. sense_data_length = SCSI_SENSE_BUFFERSIZE;
  2006. memcpy(scmd->sense_buffer, error_info->data,
  2007. sense_data_length);
  2008. }
  2009. scmd->result = scsi_status;
  2010. set_host_byte(scmd, host_byte);
  2011. }
  2012. static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
  2013. {
  2014. u8 scsi_status;
  2015. u8 host_byte;
  2016. struct scsi_cmnd *scmd;
  2017. struct pqi_aio_error_info *error_info;
  2018. size_t sense_data_length;
  2019. int residual_count;
  2020. int xfer_count;
  2021. bool device_offline;
  2022. scmd = io_request->scmd;
  2023. error_info = io_request->error_info;
  2024. host_byte = DID_OK;
  2025. sense_data_length = 0;
  2026. device_offline = false;
  2027. switch (error_info->service_response) {
  2028. case PQI_AIO_SERV_RESPONSE_COMPLETE:
  2029. scsi_status = error_info->status;
  2030. break;
  2031. case PQI_AIO_SERV_RESPONSE_FAILURE:
  2032. switch (error_info->status) {
  2033. case PQI_AIO_STATUS_IO_ABORTED:
  2034. scsi_status = SAM_STAT_TASK_ABORTED;
  2035. break;
  2036. case PQI_AIO_STATUS_UNDERRUN:
  2037. scsi_status = SAM_STAT_GOOD;
  2038. residual_count = get_unaligned_le32(
  2039. &error_info->residual_count);
  2040. scsi_set_resid(scmd, residual_count);
  2041. xfer_count = scsi_bufflen(scmd) - residual_count;
  2042. if (xfer_count < scmd->underflow)
  2043. host_byte = DID_SOFT_ERROR;
  2044. break;
  2045. case PQI_AIO_STATUS_OVERRUN:
  2046. scsi_status = SAM_STAT_GOOD;
  2047. break;
  2048. case PQI_AIO_STATUS_AIO_PATH_DISABLED:
  2049. pqi_aio_path_disabled(io_request);
  2050. scsi_status = SAM_STAT_GOOD;
  2051. io_request->status = -EAGAIN;
  2052. break;
  2053. case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
  2054. case PQI_AIO_STATUS_INVALID_DEVICE:
  2055. device_offline = true;
  2056. pqi_take_device_offline(scmd->device);
  2057. host_byte = DID_NO_CONNECT;
  2058. scsi_status = SAM_STAT_CHECK_CONDITION;
  2059. break;
  2060. case PQI_AIO_STATUS_IO_ERROR:
  2061. default:
  2062. scsi_status = SAM_STAT_CHECK_CONDITION;
  2063. break;
  2064. }
  2065. break;
  2066. case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
  2067. case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
  2068. scsi_status = SAM_STAT_GOOD;
  2069. break;
  2070. case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
  2071. case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
  2072. default:
  2073. scsi_status = SAM_STAT_CHECK_CONDITION;
  2074. break;
  2075. }
  2076. if (error_info->data_present) {
  2077. sense_data_length =
  2078. get_unaligned_le16(&error_info->data_length);
  2079. if (sense_data_length) {
  2080. if (sense_data_length > sizeof(error_info->data))
  2081. sense_data_length = sizeof(error_info->data);
  2082. if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
  2083. sense_data_length = SCSI_SENSE_BUFFERSIZE;
  2084. memcpy(scmd->sense_buffer, error_info->data,
  2085. sense_data_length);
  2086. }
  2087. }
  2088. if (device_offline && sense_data_length == 0)
  2089. scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
  2090. 0x3e, 0x1);
  2091. scmd->result = scsi_status;
  2092. set_host_byte(scmd, host_byte);
  2093. }
  2094. static void pqi_process_io_error(unsigned int iu_type,
  2095. struct pqi_io_request *io_request)
  2096. {
  2097. switch (iu_type) {
  2098. case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
  2099. pqi_process_raid_io_error(io_request);
  2100. break;
  2101. case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
  2102. pqi_process_aio_io_error(io_request);
  2103. break;
  2104. }
  2105. }
  2106. static int pqi_interpret_task_management_response(
  2107. struct pqi_task_management_response *response)
  2108. {
  2109. int rc;
  2110. switch (response->response_code) {
  2111. case SOP_TMF_COMPLETE:
  2112. case SOP_TMF_FUNCTION_SUCCEEDED:
  2113. rc = 0;
  2114. break;
  2115. default:
  2116. rc = -EIO;
  2117. break;
  2118. }
  2119. return rc;
  2120. }
  2121. static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
  2122. struct pqi_queue_group *queue_group)
  2123. {
  2124. unsigned int num_responses;
  2125. pqi_index_t oq_pi;
  2126. pqi_index_t oq_ci;
  2127. struct pqi_io_request *io_request;
  2128. struct pqi_io_response *response;
  2129. u16 request_id;
  2130. num_responses = 0;
  2131. oq_ci = queue_group->oq_ci_copy;
  2132. while (1) {
  2133. oq_pi = *queue_group->oq_pi;
  2134. if (oq_pi == oq_ci)
  2135. break;
  2136. num_responses++;
  2137. response = queue_group->oq_element_array +
  2138. (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
  2139. request_id = get_unaligned_le16(&response->request_id);
  2140. WARN_ON(request_id >= ctrl_info->max_io_slots);
  2141. io_request = &ctrl_info->io_request_pool[request_id];
  2142. WARN_ON(atomic_read(&io_request->refcount) == 0);
  2143. switch (response->header.iu_type) {
  2144. case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
  2145. case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
  2146. case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
  2147. break;
  2148. case PQI_RESPONSE_IU_TASK_MANAGEMENT:
  2149. io_request->status =
  2150. pqi_interpret_task_management_response(
  2151. (void *)response);
  2152. break;
  2153. case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
  2154. pqi_aio_path_disabled(io_request);
  2155. io_request->status = -EAGAIN;
  2156. break;
  2157. case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
  2158. case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
  2159. io_request->error_info = ctrl_info->error_buffer +
  2160. (get_unaligned_le16(&response->error_index) *
  2161. PQI_ERROR_BUFFER_ELEMENT_LENGTH);
  2162. pqi_process_io_error(response->header.iu_type,
  2163. io_request);
  2164. break;
  2165. default:
  2166. dev_err(&ctrl_info->pci_dev->dev,
  2167. "unexpected IU type: 0x%x\n",
  2168. response->header.iu_type);
  2169. WARN_ON(response->header.iu_type);
  2170. break;
  2171. }
  2172. io_request->io_complete_callback(io_request,
  2173. io_request->context);
  2174. /*
  2175. * Note that the I/O request structure CANNOT BE TOUCHED after
  2176. * returning from the I/O completion callback!
  2177. */
  2178. oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
  2179. }
  2180. if (num_responses) {
  2181. queue_group->oq_ci_copy = oq_ci;
  2182. writel(oq_ci, queue_group->oq_ci);
  2183. }
  2184. return num_responses;
  2185. }
  2186. static inline unsigned int pqi_num_elements_free(unsigned int pi,
  2187. unsigned int ci, unsigned int elements_in_queue)
  2188. {
  2189. unsigned int num_elements_used;
  2190. if (pi >= ci)
  2191. num_elements_used = pi - ci;
  2192. else
  2193. num_elements_used = elements_in_queue - ci + pi;
  2194. return elements_in_queue - num_elements_used - 1;
  2195. }
  2196. #define PQI_EVENT_ACK_TIMEOUT 30
  2197. static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
  2198. struct pqi_event_acknowledge_request *iu, size_t iu_length)
  2199. {
  2200. pqi_index_t iq_pi;
  2201. pqi_index_t iq_ci;
  2202. unsigned long flags;
  2203. void *next_element;
  2204. unsigned long timeout;
  2205. struct pqi_queue_group *queue_group;
  2206. queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
  2207. put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
  2208. timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
  2209. while (1) {
  2210. spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
  2211. iq_pi = queue_group->iq_pi_copy[RAID_PATH];
  2212. iq_ci = *queue_group->iq_ci[RAID_PATH];
  2213. if (pqi_num_elements_free(iq_pi, iq_ci,
  2214. ctrl_info->num_elements_per_iq))
  2215. break;
  2216. spin_unlock_irqrestore(
  2217. &queue_group->submit_lock[RAID_PATH], flags);
  2218. if (time_after(jiffies, timeout)) {
  2219. dev_err(&ctrl_info->pci_dev->dev,
  2220. "sending event acknowledge timed out\n");
  2221. return;
  2222. }
  2223. }
  2224. next_element = queue_group->iq_element_array[RAID_PATH] +
  2225. (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  2226. memcpy(next_element, iu, iu_length);
  2227. iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
  2228. queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
  2229. /*
  2230. * This write notifies the controller that an IU is available to be
  2231. * processed.
  2232. */
  2233. writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
  2234. spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
  2235. }
  2236. static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
  2237. struct pqi_event *event)
  2238. {
  2239. struct pqi_event_acknowledge_request request;
  2240. memset(&request, 0, sizeof(request));
  2241. request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
  2242. put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
  2243. &request.header.iu_length);
  2244. request.event_type = event->event_type;
  2245. request.event_id = event->event_id;
  2246. request.additional_event_id = event->additional_event_id;
  2247. pqi_start_event_ack(ctrl_info, &request, sizeof(request));
  2248. }
  2249. static void pqi_event_worker(struct work_struct *work)
  2250. {
  2251. unsigned int i;
  2252. struct pqi_ctrl_info *ctrl_info;
  2253. struct pqi_event *pending_event;
  2254. bool got_non_heartbeat_event = false;
  2255. ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
  2256. pending_event = ctrl_info->pending_events;
  2257. for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
  2258. if (pending_event->pending) {
  2259. pending_event->pending = false;
  2260. pqi_acknowledge_event(ctrl_info, pending_event);
  2261. if (i != PQI_EVENT_HEARTBEAT)
  2262. got_non_heartbeat_event = true;
  2263. }
  2264. pending_event++;
  2265. }
  2266. if (got_non_heartbeat_event)
  2267. pqi_schedule_rescan_worker(ctrl_info);
  2268. }
  2269. static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
  2270. {
  2271. unsigned int i;
  2272. unsigned int path;
  2273. struct pqi_queue_group *queue_group;
  2274. unsigned long flags;
  2275. struct pqi_io_request *io_request;
  2276. struct pqi_io_request *next;
  2277. struct scsi_cmnd *scmd;
  2278. ctrl_info->controller_online = false;
  2279. dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
  2280. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  2281. queue_group = &ctrl_info->queue_groups[i];
  2282. for (path = 0; path < 2; path++) {
  2283. spin_lock_irqsave(
  2284. &queue_group->submit_lock[path], flags);
  2285. list_for_each_entry_safe(io_request, next,
  2286. &queue_group->request_list[path],
  2287. request_list_entry) {
  2288. scmd = io_request->scmd;
  2289. if (scmd) {
  2290. set_host_byte(scmd, DID_NO_CONNECT);
  2291. pqi_scsi_done(scmd);
  2292. }
  2293. list_del(&io_request->request_list_entry);
  2294. }
  2295. spin_unlock_irqrestore(
  2296. &queue_group->submit_lock[path], flags);
  2297. }
  2298. }
  2299. }
  2300. #define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
  2301. #define PQI_MAX_HEARTBEAT_REQUESTS 5
  2302. static void pqi_heartbeat_timer_handler(unsigned long data)
  2303. {
  2304. int num_interrupts;
  2305. struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
  2306. num_interrupts = atomic_read(&ctrl_info->num_interrupts);
  2307. if (num_interrupts == ctrl_info->previous_num_interrupts) {
  2308. ctrl_info->num_heartbeats_requested++;
  2309. if (ctrl_info->num_heartbeats_requested >
  2310. PQI_MAX_HEARTBEAT_REQUESTS) {
  2311. pqi_take_ctrl_offline(ctrl_info);
  2312. return;
  2313. }
  2314. ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
  2315. schedule_work(&ctrl_info->event_work);
  2316. } else {
  2317. ctrl_info->num_heartbeats_requested = 0;
  2318. }
  2319. ctrl_info->previous_num_interrupts = num_interrupts;
  2320. mod_timer(&ctrl_info->heartbeat_timer,
  2321. jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
  2322. }
  2323. static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
  2324. {
  2325. ctrl_info->previous_num_interrupts =
  2326. atomic_read(&ctrl_info->num_interrupts);
  2327. init_timer(&ctrl_info->heartbeat_timer);
  2328. ctrl_info->heartbeat_timer.expires =
  2329. jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
  2330. ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
  2331. ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
  2332. add_timer(&ctrl_info->heartbeat_timer);
  2333. ctrl_info->heartbeat_timer_started = true;
  2334. }
  2335. static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
  2336. {
  2337. if (ctrl_info->heartbeat_timer_started)
  2338. del_timer_sync(&ctrl_info->heartbeat_timer);
  2339. }
  2340. static int pqi_event_type_to_event_index(unsigned int event_type)
  2341. {
  2342. int index;
  2343. switch (event_type) {
  2344. case PQI_EVENT_TYPE_HEARTBEAT:
  2345. index = PQI_EVENT_HEARTBEAT;
  2346. break;
  2347. case PQI_EVENT_TYPE_HOTPLUG:
  2348. index = PQI_EVENT_HOTPLUG;
  2349. break;
  2350. case PQI_EVENT_TYPE_HARDWARE:
  2351. index = PQI_EVENT_HARDWARE;
  2352. break;
  2353. case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
  2354. index = PQI_EVENT_PHYSICAL_DEVICE;
  2355. break;
  2356. case PQI_EVENT_TYPE_LOGICAL_DEVICE:
  2357. index = PQI_EVENT_LOGICAL_DEVICE;
  2358. break;
  2359. case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
  2360. index = PQI_EVENT_AIO_STATE_CHANGE;
  2361. break;
  2362. case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
  2363. index = PQI_EVENT_AIO_CONFIG_CHANGE;
  2364. break;
  2365. default:
  2366. index = -1;
  2367. break;
  2368. }
  2369. return index;
  2370. }
  2371. static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
  2372. {
  2373. unsigned int num_events;
  2374. pqi_index_t oq_pi;
  2375. pqi_index_t oq_ci;
  2376. struct pqi_event_queue *event_queue;
  2377. struct pqi_event_response *response;
  2378. struct pqi_event *pending_event;
  2379. bool need_delayed_work;
  2380. int event_index;
  2381. event_queue = &ctrl_info->event_queue;
  2382. num_events = 0;
  2383. need_delayed_work = false;
  2384. oq_ci = event_queue->oq_ci_copy;
  2385. while (1) {
  2386. oq_pi = *event_queue->oq_pi;
  2387. if (oq_pi == oq_ci)
  2388. break;
  2389. num_events++;
  2390. response = event_queue->oq_element_array +
  2391. (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
  2392. event_index =
  2393. pqi_event_type_to_event_index(response->event_type);
  2394. if (event_index >= 0) {
  2395. if (response->request_acknowlege) {
  2396. pending_event =
  2397. &ctrl_info->pending_events[event_index];
  2398. pending_event->event_type =
  2399. response->event_type;
  2400. pending_event->event_id = response->event_id;
  2401. pending_event->additional_event_id =
  2402. response->additional_event_id;
  2403. if (event_index != PQI_EVENT_HEARTBEAT) {
  2404. pending_event->pending = true;
  2405. need_delayed_work = true;
  2406. }
  2407. }
  2408. }
  2409. oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
  2410. }
  2411. if (num_events) {
  2412. event_queue->oq_ci_copy = oq_ci;
  2413. writel(oq_ci, event_queue->oq_ci);
  2414. if (need_delayed_work)
  2415. schedule_work(&ctrl_info->event_work);
  2416. }
  2417. return num_events;
  2418. }
  2419. static irqreturn_t pqi_irq_handler(int irq, void *data)
  2420. {
  2421. struct pqi_ctrl_info *ctrl_info;
  2422. struct pqi_queue_group *queue_group;
  2423. unsigned int num_responses_handled;
  2424. queue_group = data;
  2425. ctrl_info = queue_group->ctrl_info;
  2426. if (!ctrl_info || !queue_group->oq_ci)
  2427. return IRQ_NONE;
  2428. num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
  2429. if (irq == ctrl_info->event_irq)
  2430. num_responses_handled += pqi_process_event_intr(ctrl_info);
  2431. if (num_responses_handled)
  2432. atomic_inc(&ctrl_info->num_interrupts);
  2433. pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
  2434. pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
  2435. return IRQ_HANDLED;
  2436. }
  2437. static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
  2438. {
  2439. int i;
  2440. int rc;
  2441. ctrl_info->event_irq = ctrl_info->msix_vectors[0];
  2442. for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
  2443. rc = request_irq(ctrl_info->msix_vectors[i],
  2444. pqi_irq_handler, 0,
  2445. DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
  2446. if (rc) {
  2447. dev_err(&ctrl_info->pci_dev->dev,
  2448. "irq %u init failed with error %d\n",
  2449. ctrl_info->msix_vectors[i], rc);
  2450. return rc;
  2451. }
  2452. ctrl_info->num_msix_vectors_initialized++;
  2453. }
  2454. return 0;
  2455. }
  2456. static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
  2457. {
  2458. int i;
  2459. for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
  2460. free_irq(ctrl_info->msix_vectors[i],
  2461. ctrl_info->intr_data[i]);
  2462. }
  2463. static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
  2464. {
  2465. unsigned int i;
  2466. int max_vectors;
  2467. int num_vectors_enabled;
  2468. struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
  2469. max_vectors = ctrl_info->num_queue_groups;
  2470. for (i = 0; i < max_vectors; i++)
  2471. msix_entries[i].entry = i;
  2472. num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
  2473. msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
  2474. if (num_vectors_enabled < 0) {
  2475. dev_err(&ctrl_info->pci_dev->dev,
  2476. "MSI-X init failed with error %d\n",
  2477. num_vectors_enabled);
  2478. return num_vectors_enabled;
  2479. }
  2480. ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
  2481. for (i = 0; i < num_vectors_enabled; i++) {
  2482. ctrl_info->msix_vectors[i] = msix_entries[i].vector;
  2483. ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
  2484. }
  2485. return 0;
  2486. }
  2487. static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
  2488. {
  2489. int i;
  2490. int rc;
  2491. int cpu;
  2492. cpu = cpumask_first(cpu_online_mask);
  2493. for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
  2494. rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
  2495. get_cpu_mask(cpu));
  2496. if (rc)
  2497. dev_err(&ctrl_info->pci_dev->dev,
  2498. "error %d setting affinity hint for irq vector %u\n",
  2499. rc, ctrl_info->msix_vectors[i]);
  2500. cpu = cpumask_next(cpu, cpu_online_mask);
  2501. }
  2502. }
  2503. static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
  2504. {
  2505. int i;
  2506. for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
  2507. irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
  2508. }
  2509. static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
  2510. {
  2511. unsigned int i;
  2512. size_t alloc_length;
  2513. size_t element_array_length_per_iq;
  2514. size_t element_array_length_per_oq;
  2515. void *element_array;
  2516. void *next_queue_index;
  2517. void *aligned_pointer;
  2518. unsigned int num_inbound_queues;
  2519. unsigned int num_outbound_queues;
  2520. unsigned int num_queue_indexes;
  2521. struct pqi_queue_group *queue_group;
  2522. element_array_length_per_iq =
  2523. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
  2524. ctrl_info->num_elements_per_iq;
  2525. element_array_length_per_oq =
  2526. PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
  2527. ctrl_info->num_elements_per_oq;
  2528. num_inbound_queues = ctrl_info->num_queue_groups * 2;
  2529. num_outbound_queues = ctrl_info->num_queue_groups;
  2530. num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
  2531. aligned_pointer = NULL;
  2532. for (i = 0; i < num_inbound_queues; i++) {
  2533. aligned_pointer = PTR_ALIGN(aligned_pointer,
  2534. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2535. aligned_pointer += element_array_length_per_iq;
  2536. }
  2537. for (i = 0; i < num_outbound_queues; i++) {
  2538. aligned_pointer = PTR_ALIGN(aligned_pointer,
  2539. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2540. aligned_pointer += element_array_length_per_oq;
  2541. }
  2542. aligned_pointer = PTR_ALIGN(aligned_pointer,
  2543. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2544. aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
  2545. PQI_EVENT_OQ_ELEMENT_LENGTH;
  2546. for (i = 0; i < num_queue_indexes; i++) {
  2547. aligned_pointer = PTR_ALIGN(aligned_pointer,
  2548. PQI_OPERATIONAL_INDEX_ALIGNMENT);
  2549. aligned_pointer += sizeof(pqi_index_t);
  2550. }
  2551. alloc_length = (size_t)aligned_pointer +
  2552. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
  2553. ctrl_info->queue_memory_base =
  2554. dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
  2555. alloc_length,
  2556. &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
  2557. if (!ctrl_info->queue_memory_base) {
  2558. dev_err(&ctrl_info->pci_dev->dev,
  2559. "failed to allocate memory for PQI admin queues\n");
  2560. return -ENOMEM;
  2561. }
  2562. ctrl_info->queue_memory_length = alloc_length;
  2563. element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
  2564. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2565. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  2566. queue_group = &ctrl_info->queue_groups[i];
  2567. queue_group->iq_element_array[RAID_PATH] = element_array;
  2568. queue_group->iq_element_array_bus_addr[RAID_PATH] =
  2569. ctrl_info->queue_memory_base_dma_handle +
  2570. (element_array - ctrl_info->queue_memory_base);
  2571. element_array += element_array_length_per_iq;
  2572. element_array = PTR_ALIGN(element_array,
  2573. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2574. queue_group->iq_element_array[AIO_PATH] = element_array;
  2575. queue_group->iq_element_array_bus_addr[AIO_PATH] =
  2576. ctrl_info->queue_memory_base_dma_handle +
  2577. (element_array - ctrl_info->queue_memory_base);
  2578. element_array += element_array_length_per_iq;
  2579. element_array = PTR_ALIGN(element_array,
  2580. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2581. }
  2582. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  2583. queue_group = &ctrl_info->queue_groups[i];
  2584. queue_group->oq_element_array = element_array;
  2585. queue_group->oq_element_array_bus_addr =
  2586. ctrl_info->queue_memory_base_dma_handle +
  2587. (element_array - ctrl_info->queue_memory_base);
  2588. element_array += element_array_length_per_oq;
  2589. element_array = PTR_ALIGN(element_array,
  2590. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2591. }
  2592. ctrl_info->event_queue.oq_element_array = element_array;
  2593. ctrl_info->event_queue.oq_element_array_bus_addr =
  2594. ctrl_info->queue_memory_base_dma_handle +
  2595. (element_array - ctrl_info->queue_memory_base);
  2596. element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
  2597. PQI_EVENT_OQ_ELEMENT_LENGTH;
  2598. next_queue_index = PTR_ALIGN(element_array,
  2599. PQI_OPERATIONAL_INDEX_ALIGNMENT);
  2600. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  2601. queue_group = &ctrl_info->queue_groups[i];
  2602. queue_group->iq_ci[RAID_PATH] = next_queue_index;
  2603. queue_group->iq_ci_bus_addr[RAID_PATH] =
  2604. ctrl_info->queue_memory_base_dma_handle +
  2605. (next_queue_index - ctrl_info->queue_memory_base);
  2606. next_queue_index += sizeof(pqi_index_t);
  2607. next_queue_index = PTR_ALIGN(next_queue_index,
  2608. PQI_OPERATIONAL_INDEX_ALIGNMENT);
  2609. queue_group->iq_ci[AIO_PATH] = next_queue_index;
  2610. queue_group->iq_ci_bus_addr[AIO_PATH] =
  2611. ctrl_info->queue_memory_base_dma_handle +
  2612. (next_queue_index - ctrl_info->queue_memory_base);
  2613. next_queue_index += sizeof(pqi_index_t);
  2614. next_queue_index = PTR_ALIGN(next_queue_index,
  2615. PQI_OPERATIONAL_INDEX_ALIGNMENT);
  2616. queue_group->oq_pi = next_queue_index;
  2617. queue_group->oq_pi_bus_addr =
  2618. ctrl_info->queue_memory_base_dma_handle +
  2619. (next_queue_index - ctrl_info->queue_memory_base);
  2620. next_queue_index += sizeof(pqi_index_t);
  2621. next_queue_index = PTR_ALIGN(next_queue_index,
  2622. PQI_OPERATIONAL_INDEX_ALIGNMENT);
  2623. }
  2624. ctrl_info->event_queue.oq_pi = next_queue_index;
  2625. ctrl_info->event_queue.oq_pi_bus_addr =
  2626. ctrl_info->queue_memory_base_dma_handle +
  2627. (next_queue_index - ctrl_info->queue_memory_base);
  2628. return 0;
  2629. }
  2630. static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
  2631. {
  2632. unsigned int i;
  2633. u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
  2634. u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
  2635. /*
  2636. * Initialize the backpointers to the controller structure in
  2637. * each operational queue group structure.
  2638. */
  2639. for (i = 0; i < ctrl_info->num_queue_groups; i++)
  2640. ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
  2641. /*
  2642. * Assign IDs to all operational queues. Note that the IDs
  2643. * assigned to operational IQs are independent of the IDs
  2644. * assigned to operational OQs.
  2645. */
  2646. ctrl_info->event_queue.oq_id = next_oq_id++;
  2647. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  2648. ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
  2649. ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
  2650. ctrl_info->queue_groups[i].oq_id = next_oq_id++;
  2651. }
  2652. /*
  2653. * Assign MSI-X table entry indexes to all queues. Note that the
  2654. * interrupt for the event queue is shared with the first queue group.
  2655. */
  2656. ctrl_info->event_queue.int_msg_num = 0;
  2657. for (i = 0; i < ctrl_info->num_queue_groups; i++)
  2658. ctrl_info->queue_groups[i].int_msg_num = i;
  2659. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  2660. spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
  2661. spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
  2662. INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
  2663. INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
  2664. }
  2665. }
  2666. static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
  2667. {
  2668. size_t alloc_length;
  2669. struct pqi_admin_queues_aligned *admin_queues_aligned;
  2670. struct pqi_admin_queues *admin_queues;
  2671. alloc_length = sizeof(struct pqi_admin_queues_aligned) +
  2672. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
  2673. ctrl_info->admin_queue_memory_base =
  2674. dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
  2675. alloc_length,
  2676. &ctrl_info->admin_queue_memory_base_dma_handle,
  2677. GFP_KERNEL);
  2678. if (!ctrl_info->admin_queue_memory_base)
  2679. return -ENOMEM;
  2680. ctrl_info->admin_queue_memory_length = alloc_length;
  2681. admin_queues = &ctrl_info->admin_queues;
  2682. admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
  2683. PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
  2684. admin_queues->iq_element_array =
  2685. &admin_queues_aligned->iq_element_array;
  2686. admin_queues->oq_element_array =
  2687. &admin_queues_aligned->oq_element_array;
  2688. admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
  2689. admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
  2690. admin_queues->iq_element_array_bus_addr =
  2691. ctrl_info->admin_queue_memory_base_dma_handle +
  2692. (admin_queues->iq_element_array -
  2693. ctrl_info->admin_queue_memory_base);
  2694. admin_queues->oq_element_array_bus_addr =
  2695. ctrl_info->admin_queue_memory_base_dma_handle +
  2696. (admin_queues->oq_element_array -
  2697. ctrl_info->admin_queue_memory_base);
  2698. admin_queues->iq_ci_bus_addr =
  2699. ctrl_info->admin_queue_memory_base_dma_handle +
  2700. ((void *)admin_queues->iq_ci -
  2701. ctrl_info->admin_queue_memory_base);
  2702. admin_queues->oq_pi_bus_addr =
  2703. ctrl_info->admin_queue_memory_base_dma_handle +
  2704. ((void *)admin_queues->oq_pi -
  2705. ctrl_info->admin_queue_memory_base);
  2706. return 0;
  2707. }
  2708. #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
  2709. #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
  2710. static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
  2711. {
  2712. struct pqi_device_registers __iomem *pqi_registers;
  2713. struct pqi_admin_queues *admin_queues;
  2714. unsigned long timeout;
  2715. u8 status;
  2716. u32 reg;
  2717. pqi_registers = ctrl_info->pqi_registers;
  2718. admin_queues = &ctrl_info->admin_queues;
  2719. writeq((u64)admin_queues->iq_element_array_bus_addr,
  2720. &pqi_registers->admin_iq_element_array_addr);
  2721. writeq((u64)admin_queues->oq_element_array_bus_addr,
  2722. &pqi_registers->admin_oq_element_array_addr);
  2723. writeq((u64)admin_queues->iq_ci_bus_addr,
  2724. &pqi_registers->admin_iq_ci_addr);
  2725. writeq((u64)admin_queues->oq_pi_bus_addr,
  2726. &pqi_registers->admin_oq_pi_addr);
  2727. reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
  2728. (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
  2729. (admin_queues->int_msg_num << 16);
  2730. writel(reg, &pqi_registers->admin_iq_num_elements);
  2731. writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
  2732. &pqi_registers->function_and_status_code);
  2733. timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
  2734. while (1) {
  2735. status = readb(&pqi_registers->function_and_status_code);
  2736. if (status == PQI_STATUS_IDLE)
  2737. break;
  2738. if (time_after(jiffies, timeout))
  2739. return -ETIMEDOUT;
  2740. msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
  2741. }
  2742. /*
  2743. * The offset registers are not initialized to the correct
  2744. * offsets until *after* the create admin queue pair command
  2745. * completes successfully.
  2746. */
  2747. admin_queues->iq_pi = ctrl_info->iomem_base +
  2748. PQI_DEVICE_REGISTERS_OFFSET +
  2749. readq(&pqi_registers->admin_iq_pi_offset);
  2750. admin_queues->oq_ci = ctrl_info->iomem_base +
  2751. PQI_DEVICE_REGISTERS_OFFSET +
  2752. readq(&pqi_registers->admin_oq_ci_offset);
  2753. return 0;
  2754. }
  2755. static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
  2756. struct pqi_general_admin_request *request)
  2757. {
  2758. struct pqi_admin_queues *admin_queues;
  2759. void *next_element;
  2760. pqi_index_t iq_pi;
  2761. admin_queues = &ctrl_info->admin_queues;
  2762. iq_pi = admin_queues->iq_pi_copy;
  2763. next_element = admin_queues->iq_element_array +
  2764. (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
  2765. memcpy(next_element, request, sizeof(*request));
  2766. iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
  2767. admin_queues->iq_pi_copy = iq_pi;
  2768. /*
  2769. * This write notifies the controller that an IU is available to be
  2770. * processed.
  2771. */
  2772. writel(iq_pi, admin_queues->iq_pi);
  2773. }
  2774. static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
  2775. struct pqi_general_admin_response *response)
  2776. {
  2777. struct pqi_admin_queues *admin_queues;
  2778. pqi_index_t oq_pi;
  2779. pqi_index_t oq_ci;
  2780. unsigned long timeout;
  2781. admin_queues = &ctrl_info->admin_queues;
  2782. oq_ci = admin_queues->oq_ci_copy;
  2783. timeout = (3 * HZ) + jiffies;
  2784. while (1) {
  2785. oq_pi = *admin_queues->oq_pi;
  2786. if (oq_pi != oq_ci)
  2787. break;
  2788. if (time_after(jiffies, timeout)) {
  2789. dev_err(&ctrl_info->pci_dev->dev,
  2790. "timed out waiting for admin response\n");
  2791. return -ETIMEDOUT;
  2792. }
  2793. usleep_range(1000, 2000);
  2794. }
  2795. memcpy(response, admin_queues->oq_element_array +
  2796. (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
  2797. oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
  2798. admin_queues->oq_ci_copy = oq_ci;
  2799. writel(oq_ci, admin_queues->oq_ci);
  2800. return 0;
  2801. }
  2802. static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
  2803. struct pqi_queue_group *queue_group, enum pqi_io_path path,
  2804. struct pqi_io_request *io_request)
  2805. {
  2806. struct pqi_io_request *next;
  2807. void *next_element;
  2808. pqi_index_t iq_pi;
  2809. pqi_index_t iq_ci;
  2810. size_t iu_length;
  2811. unsigned long flags;
  2812. unsigned int num_elements_needed;
  2813. unsigned int num_elements_to_end_of_queue;
  2814. size_t copy_count;
  2815. struct pqi_iu_header *request;
  2816. spin_lock_irqsave(&queue_group->submit_lock[path], flags);
  2817. if (io_request)
  2818. list_add_tail(&io_request->request_list_entry,
  2819. &queue_group->request_list[path]);
  2820. iq_pi = queue_group->iq_pi_copy[path];
  2821. list_for_each_entry_safe(io_request, next,
  2822. &queue_group->request_list[path], request_list_entry) {
  2823. request = io_request->iu;
  2824. iu_length = get_unaligned_le16(&request->iu_length) +
  2825. PQI_REQUEST_HEADER_LENGTH;
  2826. num_elements_needed =
  2827. DIV_ROUND_UP(iu_length,
  2828. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  2829. iq_ci = *queue_group->iq_ci[path];
  2830. if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
  2831. ctrl_info->num_elements_per_iq))
  2832. break;
  2833. put_unaligned_le16(queue_group->oq_id,
  2834. &request->response_queue_id);
  2835. next_element = queue_group->iq_element_array[path] +
  2836. (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  2837. num_elements_to_end_of_queue =
  2838. ctrl_info->num_elements_per_iq - iq_pi;
  2839. if (num_elements_needed <= num_elements_to_end_of_queue) {
  2840. memcpy(next_element, request, iu_length);
  2841. } else {
  2842. copy_count = num_elements_to_end_of_queue *
  2843. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
  2844. memcpy(next_element, request, copy_count);
  2845. memcpy(queue_group->iq_element_array[path],
  2846. (u8 *)request + copy_count,
  2847. iu_length - copy_count);
  2848. }
  2849. iq_pi = (iq_pi + num_elements_needed) %
  2850. ctrl_info->num_elements_per_iq;
  2851. list_del(&io_request->request_list_entry);
  2852. }
  2853. if (iq_pi != queue_group->iq_pi_copy[path]) {
  2854. queue_group->iq_pi_copy[path] = iq_pi;
  2855. /*
  2856. * This write notifies the controller that one or more IUs are
  2857. * available to be processed.
  2858. */
  2859. writel(iq_pi, queue_group->iq_pi[path]);
  2860. }
  2861. spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
  2862. }
  2863. static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
  2864. void *context)
  2865. {
  2866. struct completion *waiting = context;
  2867. complete(waiting);
  2868. }
  2869. static int pqi_submit_raid_request_synchronous_with_io_request(
  2870. struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
  2871. unsigned long timeout_msecs)
  2872. {
  2873. int rc = 0;
  2874. DECLARE_COMPLETION_ONSTACK(wait);
  2875. io_request->io_complete_callback = pqi_raid_synchronous_complete;
  2876. io_request->context = &wait;
  2877. pqi_start_io(ctrl_info,
  2878. &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
  2879. io_request);
  2880. if (timeout_msecs == NO_TIMEOUT) {
  2881. wait_for_completion_io(&wait);
  2882. } else {
  2883. if (!wait_for_completion_io_timeout(&wait,
  2884. msecs_to_jiffies(timeout_msecs))) {
  2885. dev_warn(&ctrl_info->pci_dev->dev,
  2886. "command timed out\n");
  2887. rc = -ETIMEDOUT;
  2888. }
  2889. }
  2890. return rc;
  2891. }
  2892. static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
  2893. struct pqi_iu_header *request, unsigned int flags,
  2894. struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
  2895. {
  2896. int rc;
  2897. struct pqi_io_request *io_request;
  2898. unsigned long start_jiffies;
  2899. unsigned long msecs_blocked;
  2900. size_t iu_length;
  2901. /*
  2902. * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
  2903. * are mutually exclusive.
  2904. */
  2905. if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
  2906. if (down_interruptible(&ctrl_info->sync_request_sem))
  2907. return -ERESTARTSYS;
  2908. } else {
  2909. if (timeout_msecs == NO_TIMEOUT) {
  2910. down(&ctrl_info->sync_request_sem);
  2911. } else {
  2912. start_jiffies = jiffies;
  2913. if (down_timeout(&ctrl_info->sync_request_sem,
  2914. msecs_to_jiffies(timeout_msecs)))
  2915. return -ETIMEDOUT;
  2916. msecs_blocked =
  2917. jiffies_to_msecs(jiffies - start_jiffies);
  2918. if (msecs_blocked >= timeout_msecs)
  2919. return -ETIMEDOUT;
  2920. timeout_msecs -= msecs_blocked;
  2921. }
  2922. }
  2923. io_request = pqi_alloc_io_request(ctrl_info);
  2924. put_unaligned_le16(io_request->index,
  2925. &(((struct pqi_raid_path_request *)request)->request_id));
  2926. if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
  2927. ((struct pqi_raid_path_request *)request)->error_index =
  2928. ((struct pqi_raid_path_request *)request)->request_id;
  2929. iu_length = get_unaligned_le16(&request->iu_length) +
  2930. PQI_REQUEST_HEADER_LENGTH;
  2931. memcpy(io_request->iu, request, iu_length);
  2932. rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
  2933. io_request, timeout_msecs);
  2934. if (error_info) {
  2935. if (io_request->error_info)
  2936. memcpy(error_info, io_request->error_info,
  2937. sizeof(*error_info));
  2938. else
  2939. memset(error_info, 0, sizeof(*error_info));
  2940. } else if (rc == 0 && io_request->error_info) {
  2941. u8 scsi_status;
  2942. struct pqi_raid_error_info *raid_error_info;
  2943. raid_error_info = io_request->error_info;
  2944. scsi_status = raid_error_info->status;
  2945. if (scsi_status == SAM_STAT_CHECK_CONDITION &&
  2946. raid_error_info->data_out_result ==
  2947. PQI_DATA_IN_OUT_UNDERFLOW)
  2948. scsi_status = SAM_STAT_GOOD;
  2949. if (scsi_status != SAM_STAT_GOOD)
  2950. rc = -EIO;
  2951. }
  2952. pqi_free_io_request(io_request);
  2953. up(&ctrl_info->sync_request_sem);
  2954. return rc;
  2955. }
  2956. static int pqi_validate_admin_response(
  2957. struct pqi_general_admin_response *response, u8 expected_function_code)
  2958. {
  2959. if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
  2960. return -EINVAL;
  2961. if (get_unaligned_le16(&response->header.iu_length) !=
  2962. PQI_GENERAL_ADMIN_IU_LENGTH)
  2963. return -EINVAL;
  2964. if (response->function_code != expected_function_code)
  2965. return -EINVAL;
  2966. if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
  2967. return -EINVAL;
  2968. return 0;
  2969. }
  2970. static int pqi_submit_admin_request_synchronous(
  2971. struct pqi_ctrl_info *ctrl_info,
  2972. struct pqi_general_admin_request *request,
  2973. struct pqi_general_admin_response *response)
  2974. {
  2975. int rc;
  2976. pqi_submit_admin_request(ctrl_info, request);
  2977. rc = pqi_poll_for_admin_response(ctrl_info, response);
  2978. if (rc == 0)
  2979. rc = pqi_validate_admin_response(response,
  2980. request->function_code);
  2981. return rc;
  2982. }
  2983. static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
  2984. {
  2985. int rc;
  2986. struct pqi_general_admin_request request;
  2987. struct pqi_general_admin_response response;
  2988. struct pqi_device_capability *capability;
  2989. struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
  2990. capability = kmalloc(sizeof(*capability), GFP_KERNEL);
  2991. if (!capability)
  2992. return -ENOMEM;
  2993. memset(&request, 0, sizeof(request));
  2994. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  2995. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  2996. &request.header.iu_length);
  2997. request.function_code =
  2998. PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
  2999. put_unaligned_le32(sizeof(*capability),
  3000. &request.data.report_device_capability.buffer_length);
  3001. rc = pqi_map_single(ctrl_info->pci_dev,
  3002. &request.data.report_device_capability.sg_descriptor,
  3003. capability, sizeof(*capability),
  3004. PCI_DMA_FROMDEVICE);
  3005. if (rc)
  3006. goto out;
  3007. rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3008. &response);
  3009. pqi_pci_unmap(ctrl_info->pci_dev,
  3010. &request.data.report_device_capability.sg_descriptor, 1,
  3011. PCI_DMA_FROMDEVICE);
  3012. if (rc)
  3013. goto out;
  3014. if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
  3015. rc = -EIO;
  3016. goto out;
  3017. }
  3018. ctrl_info->max_inbound_queues =
  3019. get_unaligned_le16(&capability->max_inbound_queues);
  3020. ctrl_info->max_elements_per_iq =
  3021. get_unaligned_le16(&capability->max_elements_per_iq);
  3022. ctrl_info->max_iq_element_length =
  3023. get_unaligned_le16(&capability->max_iq_element_length)
  3024. * 16;
  3025. ctrl_info->max_outbound_queues =
  3026. get_unaligned_le16(&capability->max_outbound_queues);
  3027. ctrl_info->max_elements_per_oq =
  3028. get_unaligned_le16(&capability->max_elements_per_oq);
  3029. ctrl_info->max_oq_element_length =
  3030. get_unaligned_le16(&capability->max_oq_element_length)
  3031. * 16;
  3032. sop_iu_layer_descriptor =
  3033. &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
  3034. ctrl_info->max_inbound_iu_length_per_firmware =
  3035. get_unaligned_le16(
  3036. &sop_iu_layer_descriptor->max_inbound_iu_length);
  3037. ctrl_info->inbound_spanning_supported =
  3038. sop_iu_layer_descriptor->inbound_spanning_supported;
  3039. ctrl_info->outbound_spanning_supported =
  3040. sop_iu_layer_descriptor->outbound_spanning_supported;
  3041. out:
  3042. kfree(capability);
  3043. return rc;
  3044. }
  3045. static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
  3046. {
  3047. if (ctrl_info->max_iq_element_length <
  3048. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
  3049. dev_err(&ctrl_info->pci_dev->dev,
  3050. "max. inbound queue element length of %d is less than the required length of %d\n",
  3051. ctrl_info->max_iq_element_length,
  3052. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  3053. return -EINVAL;
  3054. }
  3055. if (ctrl_info->max_oq_element_length <
  3056. PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
  3057. dev_err(&ctrl_info->pci_dev->dev,
  3058. "max. outbound queue element length of %d is less than the required length of %d\n",
  3059. ctrl_info->max_oq_element_length,
  3060. PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
  3061. return -EINVAL;
  3062. }
  3063. if (ctrl_info->max_inbound_iu_length_per_firmware <
  3064. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
  3065. dev_err(&ctrl_info->pci_dev->dev,
  3066. "max. inbound IU length of %u is less than the min. required length of %d\n",
  3067. ctrl_info->max_inbound_iu_length_per_firmware,
  3068. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  3069. return -EINVAL;
  3070. }
  3071. if (!ctrl_info->inbound_spanning_supported) {
  3072. dev_err(&ctrl_info->pci_dev->dev,
  3073. "the controller does not support inbound spanning\n");
  3074. return -EINVAL;
  3075. }
  3076. if (ctrl_info->outbound_spanning_supported) {
  3077. dev_err(&ctrl_info->pci_dev->dev,
  3078. "the controller supports outbound spanning but this driver does not\n");
  3079. return -EINVAL;
  3080. }
  3081. return 0;
  3082. }
  3083. static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
  3084. bool inbound_queue, u16 queue_id)
  3085. {
  3086. struct pqi_general_admin_request request;
  3087. struct pqi_general_admin_response response;
  3088. memset(&request, 0, sizeof(request));
  3089. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3090. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3091. &request.header.iu_length);
  3092. if (inbound_queue)
  3093. request.function_code =
  3094. PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
  3095. else
  3096. request.function_code =
  3097. PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
  3098. put_unaligned_le16(queue_id,
  3099. &request.data.delete_operational_queue.queue_id);
  3100. return pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3101. &response);
  3102. }
  3103. static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
  3104. {
  3105. int rc;
  3106. struct pqi_event_queue *event_queue;
  3107. struct pqi_general_admin_request request;
  3108. struct pqi_general_admin_response response;
  3109. event_queue = &ctrl_info->event_queue;
  3110. /*
  3111. * Create OQ (Outbound Queue - device to host queue) to dedicate
  3112. * to events.
  3113. */
  3114. memset(&request, 0, sizeof(request));
  3115. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3116. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3117. &request.header.iu_length);
  3118. request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
  3119. put_unaligned_le16(event_queue->oq_id,
  3120. &request.data.create_operational_oq.queue_id);
  3121. put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
  3122. &request.data.create_operational_oq.element_array_addr);
  3123. put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
  3124. &request.data.create_operational_oq.pi_addr);
  3125. put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
  3126. &request.data.create_operational_oq.num_elements);
  3127. put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
  3128. &request.data.create_operational_oq.element_length);
  3129. request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
  3130. put_unaligned_le16(event_queue->int_msg_num,
  3131. &request.data.create_operational_oq.int_msg_num);
  3132. rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3133. &response);
  3134. if (rc)
  3135. return rc;
  3136. event_queue->oq_ci = ctrl_info->iomem_base +
  3137. PQI_DEVICE_REGISTERS_OFFSET +
  3138. get_unaligned_le64(
  3139. &response.data.create_operational_oq.oq_ci_offset);
  3140. return 0;
  3141. }
  3142. static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
  3143. {
  3144. unsigned int i;
  3145. int rc;
  3146. struct pqi_queue_group *queue_group;
  3147. struct pqi_general_admin_request request;
  3148. struct pqi_general_admin_response response;
  3149. i = ctrl_info->num_active_queue_groups;
  3150. queue_group = &ctrl_info->queue_groups[i];
  3151. /*
  3152. * Create IQ (Inbound Queue - host to device queue) for
  3153. * RAID path.
  3154. */
  3155. memset(&request, 0, sizeof(request));
  3156. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3157. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3158. &request.header.iu_length);
  3159. request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
  3160. put_unaligned_le16(queue_group->iq_id[RAID_PATH],
  3161. &request.data.create_operational_iq.queue_id);
  3162. put_unaligned_le64(
  3163. (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
  3164. &request.data.create_operational_iq.element_array_addr);
  3165. put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
  3166. &request.data.create_operational_iq.ci_addr);
  3167. put_unaligned_le16(ctrl_info->num_elements_per_iq,
  3168. &request.data.create_operational_iq.num_elements);
  3169. put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
  3170. &request.data.create_operational_iq.element_length);
  3171. request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
  3172. rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3173. &response);
  3174. if (rc) {
  3175. dev_err(&ctrl_info->pci_dev->dev,
  3176. "error creating inbound RAID queue\n");
  3177. return rc;
  3178. }
  3179. queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
  3180. PQI_DEVICE_REGISTERS_OFFSET +
  3181. get_unaligned_le64(
  3182. &response.data.create_operational_iq.iq_pi_offset);
  3183. /*
  3184. * Create IQ (Inbound Queue - host to device queue) for
  3185. * Advanced I/O (AIO) path.
  3186. */
  3187. memset(&request, 0, sizeof(request));
  3188. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3189. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3190. &request.header.iu_length);
  3191. request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
  3192. put_unaligned_le16(queue_group->iq_id[AIO_PATH],
  3193. &request.data.create_operational_iq.queue_id);
  3194. put_unaligned_le64((u64)queue_group->
  3195. iq_element_array_bus_addr[AIO_PATH],
  3196. &request.data.create_operational_iq.element_array_addr);
  3197. put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
  3198. &request.data.create_operational_iq.ci_addr);
  3199. put_unaligned_le16(ctrl_info->num_elements_per_iq,
  3200. &request.data.create_operational_iq.num_elements);
  3201. put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
  3202. &request.data.create_operational_iq.element_length);
  3203. request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
  3204. rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3205. &response);
  3206. if (rc) {
  3207. dev_err(&ctrl_info->pci_dev->dev,
  3208. "error creating inbound AIO queue\n");
  3209. goto delete_inbound_queue_raid;
  3210. }
  3211. queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
  3212. PQI_DEVICE_REGISTERS_OFFSET +
  3213. get_unaligned_le64(
  3214. &response.data.create_operational_iq.iq_pi_offset);
  3215. /*
  3216. * Designate the 2nd IQ as the AIO path. By default, all IQs are
  3217. * assumed to be for RAID path I/O unless we change the queue's
  3218. * property.
  3219. */
  3220. memset(&request, 0, sizeof(request));
  3221. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3222. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3223. &request.header.iu_length);
  3224. request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
  3225. put_unaligned_le16(queue_group->iq_id[AIO_PATH],
  3226. &request.data.change_operational_iq_properties.queue_id);
  3227. put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
  3228. &request.data.change_operational_iq_properties.vendor_specific);
  3229. rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3230. &response);
  3231. if (rc) {
  3232. dev_err(&ctrl_info->pci_dev->dev,
  3233. "error changing queue property\n");
  3234. goto delete_inbound_queue_aio;
  3235. }
  3236. /*
  3237. * Create OQ (Outbound Queue - device to host queue).
  3238. */
  3239. memset(&request, 0, sizeof(request));
  3240. request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
  3241. put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
  3242. &request.header.iu_length);
  3243. request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
  3244. put_unaligned_le16(queue_group->oq_id,
  3245. &request.data.create_operational_oq.queue_id);
  3246. put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
  3247. &request.data.create_operational_oq.element_array_addr);
  3248. put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
  3249. &request.data.create_operational_oq.pi_addr);
  3250. put_unaligned_le16(ctrl_info->num_elements_per_oq,
  3251. &request.data.create_operational_oq.num_elements);
  3252. put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
  3253. &request.data.create_operational_oq.element_length);
  3254. request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
  3255. put_unaligned_le16(queue_group->int_msg_num,
  3256. &request.data.create_operational_oq.int_msg_num);
  3257. rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
  3258. &response);
  3259. if (rc) {
  3260. dev_err(&ctrl_info->pci_dev->dev,
  3261. "error creating outbound queue\n");
  3262. goto delete_inbound_queue_aio;
  3263. }
  3264. queue_group->oq_ci = ctrl_info->iomem_base +
  3265. PQI_DEVICE_REGISTERS_OFFSET +
  3266. get_unaligned_le64(
  3267. &response.data.create_operational_oq.oq_ci_offset);
  3268. ctrl_info->num_active_queue_groups++;
  3269. return 0;
  3270. delete_inbound_queue_aio:
  3271. pqi_delete_operational_queue(ctrl_info, true,
  3272. queue_group->iq_id[AIO_PATH]);
  3273. delete_inbound_queue_raid:
  3274. pqi_delete_operational_queue(ctrl_info, true,
  3275. queue_group->iq_id[RAID_PATH]);
  3276. return rc;
  3277. }
  3278. static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
  3279. {
  3280. int rc;
  3281. unsigned int i;
  3282. rc = pqi_create_event_queue(ctrl_info);
  3283. if (rc) {
  3284. dev_err(&ctrl_info->pci_dev->dev,
  3285. "error creating event queue\n");
  3286. return rc;
  3287. }
  3288. for (i = 0; i < ctrl_info->num_queue_groups; i++) {
  3289. rc = pqi_create_queue_group(ctrl_info);
  3290. if (rc) {
  3291. dev_err(&ctrl_info->pci_dev->dev,
  3292. "error creating queue group number %u/%u\n",
  3293. i, ctrl_info->num_queue_groups);
  3294. return rc;
  3295. }
  3296. }
  3297. return 0;
  3298. }
  3299. #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
  3300. (offsetof(struct pqi_event_config, descriptors) + \
  3301. (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
  3302. static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
  3303. {
  3304. int rc;
  3305. unsigned int i;
  3306. struct pqi_event_config *event_config;
  3307. struct pqi_general_management_request request;
  3308. event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
  3309. GFP_KERNEL);
  3310. if (!event_config)
  3311. return -ENOMEM;
  3312. memset(&request, 0, sizeof(request));
  3313. request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
  3314. put_unaligned_le16(offsetof(struct pqi_general_management_request,
  3315. data.report_event_configuration.sg_descriptors[1]) -
  3316. PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
  3317. put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
  3318. &request.data.report_event_configuration.buffer_length);
  3319. rc = pqi_map_single(ctrl_info->pci_dev,
  3320. request.data.report_event_configuration.sg_descriptors,
  3321. event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
  3322. PCI_DMA_FROMDEVICE);
  3323. if (rc)
  3324. goto out;
  3325. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
  3326. 0, NULL, NO_TIMEOUT);
  3327. pqi_pci_unmap(ctrl_info->pci_dev,
  3328. request.data.report_event_configuration.sg_descriptors, 1,
  3329. PCI_DMA_FROMDEVICE);
  3330. if (rc)
  3331. goto out;
  3332. for (i = 0; i < event_config->num_event_descriptors; i++)
  3333. put_unaligned_le16(ctrl_info->event_queue.oq_id,
  3334. &event_config->descriptors[i].oq_id);
  3335. memset(&request, 0, sizeof(request));
  3336. request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
  3337. put_unaligned_le16(offsetof(struct pqi_general_management_request,
  3338. data.report_event_configuration.sg_descriptors[1]) -
  3339. PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
  3340. put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
  3341. &request.data.report_event_configuration.buffer_length);
  3342. rc = pqi_map_single(ctrl_info->pci_dev,
  3343. request.data.report_event_configuration.sg_descriptors,
  3344. event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
  3345. PCI_DMA_TODEVICE);
  3346. if (rc)
  3347. goto out;
  3348. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
  3349. NULL, NO_TIMEOUT);
  3350. pqi_pci_unmap(ctrl_info->pci_dev,
  3351. request.data.report_event_configuration.sg_descriptors, 1,
  3352. PCI_DMA_TODEVICE);
  3353. out:
  3354. kfree(event_config);
  3355. return rc;
  3356. }
  3357. static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
  3358. {
  3359. unsigned int i;
  3360. struct device *dev;
  3361. size_t sg_chain_buffer_length;
  3362. struct pqi_io_request *io_request;
  3363. if (!ctrl_info->io_request_pool)
  3364. return;
  3365. dev = &ctrl_info->pci_dev->dev;
  3366. sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
  3367. io_request = ctrl_info->io_request_pool;
  3368. for (i = 0; i < ctrl_info->max_io_slots; i++) {
  3369. kfree(io_request->iu);
  3370. if (!io_request->sg_chain_buffer)
  3371. break;
  3372. dma_free_coherent(dev, sg_chain_buffer_length,
  3373. io_request->sg_chain_buffer,
  3374. io_request->sg_chain_buffer_dma_handle);
  3375. io_request++;
  3376. }
  3377. kfree(ctrl_info->io_request_pool);
  3378. ctrl_info->io_request_pool = NULL;
  3379. }
  3380. static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
  3381. {
  3382. ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
  3383. ctrl_info->error_buffer_length,
  3384. &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
  3385. if (!ctrl_info->error_buffer)
  3386. return -ENOMEM;
  3387. return 0;
  3388. }
  3389. static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
  3390. {
  3391. unsigned int i;
  3392. void *sg_chain_buffer;
  3393. size_t sg_chain_buffer_length;
  3394. dma_addr_t sg_chain_buffer_dma_handle;
  3395. struct device *dev;
  3396. struct pqi_io_request *io_request;
  3397. ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
  3398. sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
  3399. if (!ctrl_info->io_request_pool) {
  3400. dev_err(&ctrl_info->pci_dev->dev,
  3401. "failed to allocate I/O request pool\n");
  3402. goto error;
  3403. }
  3404. dev = &ctrl_info->pci_dev->dev;
  3405. sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
  3406. io_request = ctrl_info->io_request_pool;
  3407. for (i = 0; i < ctrl_info->max_io_slots; i++) {
  3408. io_request->iu =
  3409. kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
  3410. if (!io_request->iu) {
  3411. dev_err(&ctrl_info->pci_dev->dev,
  3412. "failed to allocate IU buffers\n");
  3413. goto error;
  3414. }
  3415. sg_chain_buffer = dma_alloc_coherent(dev,
  3416. sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
  3417. GFP_KERNEL);
  3418. if (!sg_chain_buffer) {
  3419. dev_err(&ctrl_info->pci_dev->dev,
  3420. "failed to allocate PQI scatter-gather chain buffers\n");
  3421. goto error;
  3422. }
  3423. io_request->index = i;
  3424. io_request->sg_chain_buffer = sg_chain_buffer;
  3425. io_request->sg_chain_buffer_dma_handle =
  3426. sg_chain_buffer_dma_handle;
  3427. io_request++;
  3428. }
  3429. return 0;
  3430. error:
  3431. pqi_free_all_io_requests(ctrl_info);
  3432. return -ENOMEM;
  3433. }
  3434. /*
  3435. * Calculate required resources that are sized based on max. outstanding
  3436. * requests and max. transfer size.
  3437. */
  3438. static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
  3439. {
  3440. u32 max_transfer_size;
  3441. u32 max_sg_entries;
  3442. ctrl_info->scsi_ml_can_queue =
  3443. ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
  3444. ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
  3445. ctrl_info->error_buffer_length =
  3446. ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
  3447. max_transfer_size =
  3448. min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
  3449. max_sg_entries = max_transfer_size / PAGE_SIZE;
  3450. /* +1 to cover when the buffer is not page-aligned. */
  3451. max_sg_entries++;
  3452. max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
  3453. max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
  3454. ctrl_info->sg_chain_buffer_length =
  3455. max_sg_entries * sizeof(struct pqi_sg_descriptor);
  3456. ctrl_info->sg_tablesize = max_sg_entries;
  3457. ctrl_info->max_sectors = max_transfer_size / 512;
  3458. }
  3459. static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
  3460. {
  3461. int num_cpus;
  3462. int max_queue_groups;
  3463. int num_queue_groups;
  3464. u16 num_elements_per_iq;
  3465. u16 num_elements_per_oq;
  3466. max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
  3467. ctrl_info->max_outbound_queues - 1);
  3468. max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
  3469. num_cpus = num_online_cpus();
  3470. num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
  3471. num_queue_groups = min(num_queue_groups, max_queue_groups);
  3472. ctrl_info->num_queue_groups = num_queue_groups;
  3473. /*
  3474. * Make sure that the max. inbound IU length is an even multiple
  3475. * of our inbound element length.
  3476. */
  3477. ctrl_info->max_inbound_iu_length =
  3478. (ctrl_info->max_inbound_iu_length_per_firmware /
  3479. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
  3480. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
  3481. num_elements_per_iq =
  3482. (ctrl_info->max_inbound_iu_length /
  3483. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  3484. /* Add one because one element in each queue is unusable. */
  3485. num_elements_per_iq++;
  3486. num_elements_per_iq = min(num_elements_per_iq,
  3487. ctrl_info->max_elements_per_iq);
  3488. num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
  3489. num_elements_per_oq = min(num_elements_per_oq,
  3490. ctrl_info->max_elements_per_oq);
  3491. ctrl_info->num_elements_per_iq = num_elements_per_iq;
  3492. ctrl_info->num_elements_per_oq = num_elements_per_oq;
  3493. ctrl_info->max_sg_per_iu =
  3494. ((ctrl_info->max_inbound_iu_length -
  3495. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
  3496. sizeof(struct pqi_sg_descriptor)) +
  3497. PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
  3498. }
  3499. static inline void pqi_set_sg_descriptor(
  3500. struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
  3501. {
  3502. u64 address = (u64)sg_dma_address(sg);
  3503. unsigned int length = sg_dma_len(sg);
  3504. put_unaligned_le64(address, &sg_descriptor->address);
  3505. put_unaligned_le32(length, &sg_descriptor->length);
  3506. put_unaligned_le32(0, &sg_descriptor->flags);
  3507. }
  3508. static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
  3509. struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
  3510. struct pqi_io_request *io_request)
  3511. {
  3512. int i;
  3513. u16 iu_length;
  3514. int sg_count;
  3515. bool chained;
  3516. unsigned int num_sg_in_iu;
  3517. unsigned int max_sg_per_iu;
  3518. struct scatterlist *sg;
  3519. struct pqi_sg_descriptor *sg_descriptor;
  3520. sg_count = scsi_dma_map(scmd);
  3521. if (sg_count < 0)
  3522. return sg_count;
  3523. iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
  3524. PQI_REQUEST_HEADER_LENGTH;
  3525. if (sg_count == 0)
  3526. goto out;
  3527. sg = scsi_sglist(scmd);
  3528. sg_descriptor = request->sg_descriptors;
  3529. max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
  3530. chained = false;
  3531. num_sg_in_iu = 0;
  3532. i = 0;
  3533. while (1) {
  3534. pqi_set_sg_descriptor(sg_descriptor, sg);
  3535. if (!chained)
  3536. num_sg_in_iu++;
  3537. i++;
  3538. if (i == sg_count)
  3539. break;
  3540. sg_descriptor++;
  3541. if (i == max_sg_per_iu) {
  3542. put_unaligned_le64(
  3543. (u64)io_request->sg_chain_buffer_dma_handle,
  3544. &sg_descriptor->address);
  3545. put_unaligned_le32((sg_count - num_sg_in_iu)
  3546. * sizeof(*sg_descriptor),
  3547. &sg_descriptor->length);
  3548. put_unaligned_le32(CISS_SG_CHAIN,
  3549. &sg_descriptor->flags);
  3550. chained = true;
  3551. num_sg_in_iu++;
  3552. sg_descriptor = io_request->sg_chain_buffer;
  3553. }
  3554. sg = sg_next(sg);
  3555. }
  3556. put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
  3557. request->partial = chained;
  3558. iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
  3559. out:
  3560. put_unaligned_le16(iu_length, &request->header.iu_length);
  3561. return 0;
  3562. }
  3563. static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
  3564. struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
  3565. struct pqi_io_request *io_request)
  3566. {
  3567. int i;
  3568. u16 iu_length;
  3569. int sg_count;
  3570. bool chained;
  3571. unsigned int num_sg_in_iu;
  3572. unsigned int max_sg_per_iu;
  3573. struct scatterlist *sg;
  3574. struct pqi_sg_descriptor *sg_descriptor;
  3575. sg_count = scsi_dma_map(scmd);
  3576. if (sg_count < 0)
  3577. return sg_count;
  3578. iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
  3579. PQI_REQUEST_HEADER_LENGTH;
  3580. num_sg_in_iu = 0;
  3581. if (sg_count == 0)
  3582. goto out;
  3583. sg = scsi_sglist(scmd);
  3584. sg_descriptor = request->sg_descriptors;
  3585. max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
  3586. chained = false;
  3587. i = 0;
  3588. while (1) {
  3589. pqi_set_sg_descriptor(sg_descriptor, sg);
  3590. if (!chained)
  3591. num_sg_in_iu++;
  3592. i++;
  3593. if (i == sg_count)
  3594. break;
  3595. sg_descriptor++;
  3596. if (i == max_sg_per_iu) {
  3597. put_unaligned_le64(
  3598. (u64)io_request->sg_chain_buffer_dma_handle,
  3599. &sg_descriptor->address);
  3600. put_unaligned_le32((sg_count - num_sg_in_iu)
  3601. * sizeof(*sg_descriptor),
  3602. &sg_descriptor->length);
  3603. put_unaligned_le32(CISS_SG_CHAIN,
  3604. &sg_descriptor->flags);
  3605. chained = true;
  3606. num_sg_in_iu++;
  3607. sg_descriptor = io_request->sg_chain_buffer;
  3608. }
  3609. sg = sg_next(sg);
  3610. }
  3611. put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
  3612. request->partial = chained;
  3613. iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
  3614. out:
  3615. put_unaligned_le16(iu_length, &request->header.iu_length);
  3616. request->num_sg_descriptors = num_sg_in_iu;
  3617. return 0;
  3618. }
  3619. static void pqi_raid_io_complete(struct pqi_io_request *io_request,
  3620. void *context)
  3621. {
  3622. struct scsi_cmnd *scmd;
  3623. scmd = io_request->scmd;
  3624. pqi_free_io_request(io_request);
  3625. scsi_dma_unmap(scmd);
  3626. pqi_scsi_done(scmd);
  3627. }
  3628. static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
  3629. struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
  3630. struct pqi_queue_group *queue_group)
  3631. {
  3632. int rc;
  3633. size_t cdb_length;
  3634. struct pqi_io_request *io_request;
  3635. struct pqi_raid_path_request *request;
  3636. io_request = pqi_alloc_io_request(ctrl_info);
  3637. io_request->io_complete_callback = pqi_raid_io_complete;
  3638. io_request->scmd = scmd;
  3639. scmd->host_scribble = (unsigned char *)io_request;
  3640. request = io_request->iu;
  3641. memset(request, 0,
  3642. offsetof(struct pqi_raid_path_request, sg_descriptors));
  3643. request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
  3644. put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
  3645. request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
  3646. put_unaligned_le16(io_request->index, &request->request_id);
  3647. request->error_index = request->request_id;
  3648. memcpy(request->lun_number, device->scsi3addr,
  3649. sizeof(request->lun_number));
  3650. cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
  3651. memcpy(request->cdb, scmd->cmnd, cdb_length);
  3652. switch (cdb_length) {
  3653. case 6:
  3654. case 10:
  3655. case 12:
  3656. case 16:
  3657. /* No bytes in the Additional CDB bytes field */
  3658. request->additional_cdb_bytes_usage =
  3659. SOP_ADDITIONAL_CDB_BYTES_0;
  3660. break;
  3661. case 20:
  3662. /* 4 bytes in the Additional cdb field */
  3663. request->additional_cdb_bytes_usage =
  3664. SOP_ADDITIONAL_CDB_BYTES_4;
  3665. break;
  3666. case 24:
  3667. /* 8 bytes in the Additional cdb field */
  3668. request->additional_cdb_bytes_usage =
  3669. SOP_ADDITIONAL_CDB_BYTES_8;
  3670. break;
  3671. case 28:
  3672. /* 12 bytes in the Additional cdb field */
  3673. request->additional_cdb_bytes_usage =
  3674. SOP_ADDITIONAL_CDB_BYTES_12;
  3675. break;
  3676. case 32:
  3677. default:
  3678. /* 16 bytes in the Additional cdb field */
  3679. request->additional_cdb_bytes_usage =
  3680. SOP_ADDITIONAL_CDB_BYTES_16;
  3681. break;
  3682. }
  3683. switch (scmd->sc_data_direction) {
  3684. case DMA_TO_DEVICE:
  3685. request->data_direction = SOP_READ_FLAG;
  3686. break;
  3687. case DMA_FROM_DEVICE:
  3688. request->data_direction = SOP_WRITE_FLAG;
  3689. break;
  3690. case DMA_NONE:
  3691. request->data_direction = SOP_NO_DIRECTION_FLAG;
  3692. break;
  3693. case DMA_BIDIRECTIONAL:
  3694. request->data_direction = SOP_BIDIRECTIONAL;
  3695. break;
  3696. default:
  3697. dev_err(&ctrl_info->pci_dev->dev,
  3698. "unknown data direction: %d\n",
  3699. scmd->sc_data_direction);
  3700. WARN_ON(scmd->sc_data_direction);
  3701. break;
  3702. }
  3703. rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
  3704. if (rc) {
  3705. pqi_free_io_request(io_request);
  3706. return SCSI_MLQUEUE_HOST_BUSY;
  3707. }
  3708. pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
  3709. return 0;
  3710. }
  3711. static void pqi_aio_io_complete(struct pqi_io_request *io_request,
  3712. void *context)
  3713. {
  3714. struct scsi_cmnd *scmd;
  3715. scmd = io_request->scmd;
  3716. scsi_dma_unmap(scmd);
  3717. if (io_request->status == -EAGAIN)
  3718. set_host_byte(scmd, DID_IMM_RETRY);
  3719. pqi_free_io_request(io_request);
  3720. pqi_scsi_done(scmd);
  3721. }
  3722. static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
  3723. struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
  3724. struct pqi_queue_group *queue_group)
  3725. {
  3726. return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
  3727. scmd->cmnd, scmd->cmd_len, queue_group, NULL);
  3728. }
  3729. static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
  3730. struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
  3731. unsigned int cdb_length, struct pqi_queue_group *queue_group,
  3732. struct pqi_encryption_info *encryption_info)
  3733. {
  3734. int rc;
  3735. struct pqi_io_request *io_request;
  3736. struct pqi_aio_path_request *request;
  3737. io_request = pqi_alloc_io_request(ctrl_info);
  3738. io_request->io_complete_callback = pqi_aio_io_complete;
  3739. io_request->scmd = scmd;
  3740. scmd->host_scribble = (unsigned char *)io_request;
  3741. request = io_request->iu;
  3742. memset(request, 0,
  3743. offsetof(struct pqi_raid_path_request, sg_descriptors));
  3744. request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
  3745. put_unaligned_le32(aio_handle, &request->nexus_id);
  3746. put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
  3747. request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
  3748. put_unaligned_le16(io_request->index, &request->request_id);
  3749. request->error_index = request->request_id;
  3750. if (cdb_length > sizeof(request->cdb))
  3751. cdb_length = sizeof(request->cdb);
  3752. request->cdb_length = cdb_length;
  3753. memcpy(request->cdb, cdb, cdb_length);
  3754. switch (scmd->sc_data_direction) {
  3755. case DMA_TO_DEVICE:
  3756. request->data_direction = SOP_READ_FLAG;
  3757. break;
  3758. case DMA_FROM_DEVICE:
  3759. request->data_direction = SOP_WRITE_FLAG;
  3760. break;
  3761. case DMA_NONE:
  3762. request->data_direction = SOP_NO_DIRECTION_FLAG;
  3763. break;
  3764. case DMA_BIDIRECTIONAL:
  3765. request->data_direction = SOP_BIDIRECTIONAL;
  3766. break;
  3767. default:
  3768. dev_err(&ctrl_info->pci_dev->dev,
  3769. "unknown data direction: %d\n",
  3770. scmd->sc_data_direction);
  3771. WARN_ON(scmd->sc_data_direction);
  3772. break;
  3773. }
  3774. if (encryption_info) {
  3775. request->encryption_enable = true;
  3776. put_unaligned_le16(encryption_info->data_encryption_key_index,
  3777. &request->data_encryption_key_index);
  3778. put_unaligned_le32(encryption_info->encrypt_tweak_lower,
  3779. &request->encrypt_tweak_lower);
  3780. put_unaligned_le32(encryption_info->encrypt_tweak_upper,
  3781. &request->encrypt_tweak_upper);
  3782. }
  3783. rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
  3784. if (rc) {
  3785. pqi_free_io_request(io_request);
  3786. return SCSI_MLQUEUE_HOST_BUSY;
  3787. }
  3788. pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
  3789. return 0;
  3790. }
  3791. static int pqi_scsi_queue_command(struct Scsi_Host *shost,
  3792. struct scsi_cmnd *scmd)
  3793. {
  3794. int rc;
  3795. struct pqi_ctrl_info *ctrl_info;
  3796. struct pqi_scsi_dev *device;
  3797. u16 hwq;
  3798. struct pqi_queue_group *queue_group;
  3799. bool raid_bypassed;
  3800. device = scmd->device->hostdata;
  3801. ctrl_info = shost_to_hba(shost);
  3802. if (pqi_ctrl_offline(ctrl_info)) {
  3803. set_host_byte(scmd, DID_NO_CONNECT);
  3804. pqi_scsi_done(scmd);
  3805. return 0;
  3806. }
  3807. /*
  3808. * This is necessary because the SML doesn't zero out this field during
  3809. * error recovery.
  3810. */
  3811. scmd->result = 0;
  3812. hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
  3813. if (hwq >= ctrl_info->num_queue_groups)
  3814. hwq = 0;
  3815. queue_group = &ctrl_info->queue_groups[hwq];
  3816. if (pqi_is_logical_device(device)) {
  3817. raid_bypassed = false;
  3818. if (device->offload_enabled &&
  3819. scmd->request->cmd_type == REQ_TYPE_FS) {
  3820. rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
  3821. scmd, queue_group);
  3822. if (rc == 0 ||
  3823. rc == SCSI_MLQUEUE_HOST_BUSY ||
  3824. rc == SAM_STAT_CHECK_CONDITION ||
  3825. rc == SAM_STAT_RESERVATION_CONFLICT)
  3826. raid_bypassed = true;
  3827. }
  3828. if (!raid_bypassed)
  3829. rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
  3830. queue_group);
  3831. } else {
  3832. if (device->aio_enabled)
  3833. rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
  3834. queue_group);
  3835. else
  3836. rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
  3837. queue_group);
  3838. }
  3839. return rc;
  3840. }
  3841. static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
  3842. void *context)
  3843. {
  3844. struct completion *waiting = context;
  3845. complete(waiting);
  3846. }
  3847. #define PQI_LUN_RESET_TIMEOUT_SECS 10
  3848. static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
  3849. struct pqi_scsi_dev *device, struct completion *wait)
  3850. {
  3851. int rc;
  3852. unsigned int wait_secs = 0;
  3853. while (1) {
  3854. if (wait_for_completion_io_timeout(wait,
  3855. PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
  3856. rc = 0;
  3857. break;
  3858. }
  3859. pqi_check_ctrl_health(ctrl_info);
  3860. if (pqi_ctrl_offline(ctrl_info)) {
  3861. rc = -ETIMEDOUT;
  3862. break;
  3863. }
  3864. wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
  3865. dev_err(&ctrl_info->pci_dev->dev,
  3866. "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
  3867. ctrl_info->scsi_host->host_no, device->bus,
  3868. device->target, device->lun, wait_secs);
  3869. }
  3870. return rc;
  3871. }
  3872. static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
  3873. struct pqi_scsi_dev *device)
  3874. {
  3875. int rc;
  3876. struct pqi_io_request *io_request;
  3877. DECLARE_COMPLETION_ONSTACK(wait);
  3878. struct pqi_task_management_request *request;
  3879. down(&ctrl_info->lun_reset_sem);
  3880. io_request = pqi_alloc_io_request(ctrl_info);
  3881. io_request->io_complete_callback = pqi_lun_reset_complete;
  3882. io_request->context = &wait;
  3883. request = io_request->iu;
  3884. memset(request, 0, sizeof(*request));
  3885. request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
  3886. put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
  3887. &request->header.iu_length);
  3888. put_unaligned_le16(io_request->index, &request->request_id);
  3889. memcpy(request->lun_number, device->scsi3addr,
  3890. sizeof(request->lun_number));
  3891. request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
  3892. pqi_start_io(ctrl_info,
  3893. &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
  3894. io_request);
  3895. rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
  3896. if (rc == 0)
  3897. rc = io_request->status;
  3898. pqi_free_io_request(io_request);
  3899. up(&ctrl_info->lun_reset_sem);
  3900. return rc;
  3901. }
  3902. /* Performs a reset at the LUN level. */
  3903. static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
  3904. struct pqi_scsi_dev *device)
  3905. {
  3906. int rc;
  3907. pqi_check_ctrl_health(ctrl_info);
  3908. if (pqi_ctrl_offline(ctrl_info))
  3909. return FAILED;
  3910. rc = pqi_lun_reset(ctrl_info, device);
  3911. return rc == 0 ? SUCCESS : FAILED;
  3912. }
  3913. static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
  3914. {
  3915. int rc;
  3916. struct pqi_ctrl_info *ctrl_info;
  3917. struct pqi_scsi_dev *device;
  3918. ctrl_info = shost_to_hba(scmd->device->host);
  3919. device = scmd->device->hostdata;
  3920. dev_err(&ctrl_info->pci_dev->dev,
  3921. "resetting scsi %d:%d:%d:%d\n",
  3922. ctrl_info->scsi_host->host_no,
  3923. device->bus, device->target, device->lun);
  3924. rc = pqi_device_reset(ctrl_info, device);
  3925. dev_err(&ctrl_info->pci_dev->dev,
  3926. "reset of scsi %d:%d:%d:%d: %s\n",
  3927. ctrl_info->scsi_host->host_no,
  3928. device->bus, device->target, device->lun,
  3929. rc == SUCCESS ? "SUCCESS" : "FAILED");
  3930. return rc;
  3931. }
  3932. static int pqi_slave_alloc(struct scsi_device *sdev)
  3933. {
  3934. struct pqi_scsi_dev *device;
  3935. unsigned long flags;
  3936. struct pqi_ctrl_info *ctrl_info;
  3937. struct scsi_target *starget;
  3938. struct sas_rphy *rphy;
  3939. ctrl_info = shost_to_hba(sdev->host);
  3940. spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  3941. if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
  3942. starget = scsi_target(sdev);
  3943. rphy = target_to_rphy(starget);
  3944. device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
  3945. if (device) {
  3946. device->target = sdev_id(sdev);
  3947. device->lun = sdev->lun;
  3948. device->target_lun_valid = true;
  3949. }
  3950. } else {
  3951. device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
  3952. sdev_id(sdev), sdev->lun);
  3953. }
  3954. if (device && device->expose_device) {
  3955. sdev->hostdata = device;
  3956. device->sdev = sdev;
  3957. if (device->queue_depth) {
  3958. device->advertised_queue_depth = device->queue_depth;
  3959. scsi_change_queue_depth(sdev,
  3960. device->advertised_queue_depth);
  3961. }
  3962. }
  3963. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  3964. return 0;
  3965. }
  3966. static int pqi_slave_configure(struct scsi_device *sdev)
  3967. {
  3968. struct pqi_scsi_dev *device;
  3969. device = sdev->hostdata;
  3970. if (!device->expose_device)
  3971. sdev->no_uld_attach = true;
  3972. return 0;
  3973. }
  3974. static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
  3975. void __user *arg)
  3976. {
  3977. struct pci_dev *pci_dev;
  3978. u32 subsystem_vendor;
  3979. u32 subsystem_device;
  3980. cciss_pci_info_struct pciinfo;
  3981. if (!arg)
  3982. return -EINVAL;
  3983. pci_dev = ctrl_info->pci_dev;
  3984. pciinfo.domain = pci_domain_nr(pci_dev->bus);
  3985. pciinfo.bus = pci_dev->bus->number;
  3986. pciinfo.dev_fn = pci_dev->devfn;
  3987. subsystem_vendor = pci_dev->subsystem_vendor;
  3988. subsystem_device = pci_dev->subsystem_device;
  3989. pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
  3990. subsystem_vendor;
  3991. if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
  3992. return -EFAULT;
  3993. return 0;
  3994. }
  3995. static int pqi_getdrivver_ioctl(void __user *arg)
  3996. {
  3997. u32 version;
  3998. if (!arg)
  3999. return -EINVAL;
  4000. version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
  4001. (DRIVER_RELEASE << 16) | DRIVER_REVISION;
  4002. if (copy_to_user(arg, &version, sizeof(version)))
  4003. return -EFAULT;
  4004. return 0;
  4005. }
  4006. struct ciss_error_info {
  4007. u8 scsi_status;
  4008. int command_status;
  4009. size_t sense_data_length;
  4010. };
  4011. static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
  4012. struct ciss_error_info *ciss_error_info)
  4013. {
  4014. int ciss_cmd_status;
  4015. size_t sense_data_length;
  4016. switch (pqi_error_info->data_out_result) {
  4017. case PQI_DATA_IN_OUT_GOOD:
  4018. ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
  4019. break;
  4020. case PQI_DATA_IN_OUT_UNDERFLOW:
  4021. ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
  4022. break;
  4023. case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
  4024. ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
  4025. break;
  4026. case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
  4027. case PQI_DATA_IN_OUT_BUFFER_ERROR:
  4028. case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
  4029. case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
  4030. case PQI_DATA_IN_OUT_ERROR:
  4031. ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
  4032. break;
  4033. case PQI_DATA_IN_OUT_HARDWARE_ERROR:
  4034. case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
  4035. case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
  4036. case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
  4037. case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
  4038. case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
  4039. case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
  4040. case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
  4041. case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
  4042. case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
  4043. ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
  4044. break;
  4045. case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
  4046. ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
  4047. break;
  4048. case PQI_DATA_IN_OUT_ABORTED:
  4049. ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
  4050. break;
  4051. case PQI_DATA_IN_OUT_TIMEOUT:
  4052. ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
  4053. break;
  4054. default:
  4055. ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
  4056. break;
  4057. }
  4058. sense_data_length =
  4059. get_unaligned_le16(&pqi_error_info->sense_data_length);
  4060. if (sense_data_length == 0)
  4061. sense_data_length =
  4062. get_unaligned_le16(&pqi_error_info->response_data_length);
  4063. if (sense_data_length)
  4064. if (sense_data_length > sizeof(pqi_error_info->data))
  4065. sense_data_length = sizeof(pqi_error_info->data);
  4066. ciss_error_info->scsi_status = pqi_error_info->status;
  4067. ciss_error_info->command_status = ciss_cmd_status;
  4068. ciss_error_info->sense_data_length = sense_data_length;
  4069. }
  4070. static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
  4071. {
  4072. int rc;
  4073. char *kernel_buffer = NULL;
  4074. u16 iu_length;
  4075. size_t sense_data_length;
  4076. IOCTL_Command_struct iocommand;
  4077. struct pqi_raid_path_request request;
  4078. struct pqi_raid_error_info pqi_error_info;
  4079. struct ciss_error_info ciss_error_info;
  4080. if (pqi_ctrl_offline(ctrl_info))
  4081. return -ENXIO;
  4082. if (!arg)
  4083. return -EINVAL;
  4084. if (!capable(CAP_SYS_RAWIO))
  4085. return -EPERM;
  4086. if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
  4087. return -EFAULT;
  4088. if (iocommand.buf_size < 1 &&
  4089. iocommand.Request.Type.Direction != XFER_NONE)
  4090. return -EINVAL;
  4091. if (iocommand.Request.CDBLen > sizeof(request.cdb))
  4092. return -EINVAL;
  4093. if (iocommand.Request.Type.Type != TYPE_CMD)
  4094. return -EINVAL;
  4095. switch (iocommand.Request.Type.Direction) {
  4096. case XFER_NONE:
  4097. case XFER_WRITE:
  4098. case XFER_READ:
  4099. break;
  4100. default:
  4101. return -EINVAL;
  4102. }
  4103. if (iocommand.buf_size > 0) {
  4104. kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
  4105. if (!kernel_buffer)
  4106. return -ENOMEM;
  4107. if (iocommand.Request.Type.Direction & XFER_WRITE) {
  4108. if (copy_from_user(kernel_buffer, iocommand.buf,
  4109. iocommand.buf_size)) {
  4110. rc = -EFAULT;
  4111. goto out;
  4112. }
  4113. } else {
  4114. memset(kernel_buffer, 0, iocommand.buf_size);
  4115. }
  4116. }
  4117. memset(&request, 0, sizeof(request));
  4118. request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
  4119. iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
  4120. PQI_REQUEST_HEADER_LENGTH;
  4121. memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
  4122. sizeof(request.lun_number));
  4123. memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
  4124. request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
  4125. switch (iocommand.Request.Type.Direction) {
  4126. case XFER_NONE:
  4127. request.data_direction = SOP_NO_DIRECTION_FLAG;
  4128. break;
  4129. case XFER_WRITE:
  4130. request.data_direction = SOP_WRITE_FLAG;
  4131. break;
  4132. case XFER_READ:
  4133. request.data_direction = SOP_READ_FLAG;
  4134. break;
  4135. }
  4136. request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
  4137. if (iocommand.buf_size > 0) {
  4138. put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
  4139. rc = pqi_map_single(ctrl_info->pci_dev,
  4140. &request.sg_descriptors[0], kernel_buffer,
  4141. iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
  4142. if (rc)
  4143. goto out;
  4144. iu_length += sizeof(request.sg_descriptors[0]);
  4145. }
  4146. put_unaligned_le16(iu_length, &request.header.iu_length);
  4147. rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
  4148. PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
  4149. if (iocommand.buf_size > 0)
  4150. pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
  4151. PCI_DMA_BIDIRECTIONAL);
  4152. memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
  4153. if (rc == 0) {
  4154. pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
  4155. iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
  4156. iocommand.error_info.CommandStatus =
  4157. ciss_error_info.command_status;
  4158. sense_data_length = ciss_error_info.sense_data_length;
  4159. if (sense_data_length) {
  4160. if (sense_data_length >
  4161. sizeof(iocommand.error_info.SenseInfo))
  4162. sense_data_length =
  4163. sizeof(iocommand.error_info.SenseInfo);
  4164. memcpy(iocommand.error_info.SenseInfo,
  4165. pqi_error_info.data, sense_data_length);
  4166. iocommand.error_info.SenseLen = sense_data_length;
  4167. }
  4168. }
  4169. if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
  4170. rc = -EFAULT;
  4171. goto out;
  4172. }
  4173. if (rc == 0 && iocommand.buf_size > 0 &&
  4174. (iocommand.Request.Type.Direction & XFER_READ)) {
  4175. if (copy_to_user(iocommand.buf, kernel_buffer,
  4176. iocommand.buf_size)) {
  4177. rc = -EFAULT;
  4178. }
  4179. }
  4180. out:
  4181. kfree(kernel_buffer);
  4182. return rc;
  4183. }
  4184. static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
  4185. {
  4186. int rc;
  4187. struct pqi_ctrl_info *ctrl_info;
  4188. ctrl_info = shost_to_hba(sdev->host);
  4189. switch (cmd) {
  4190. case CCISS_DEREGDISK:
  4191. case CCISS_REGNEWDISK:
  4192. case CCISS_REGNEWD:
  4193. rc = pqi_scan_scsi_devices(ctrl_info);
  4194. break;
  4195. case CCISS_GETPCIINFO:
  4196. rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
  4197. break;
  4198. case CCISS_GETDRIVVER:
  4199. rc = pqi_getdrivver_ioctl(arg);
  4200. break;
  4201. case CCISS_PASSTHRU:
  4202. rc = pqi_passthru_ioctl(ctrl_info, arg);
  4203. break;
  4204. default:
  4205. rc = -EINVAL;
  4206. break;
  4207. }
  4208. return rc;
  4209. }
  4210. static ssize_t pqi_version_show(struct device *dev,
  4211. struct device_attribute *attr, char *buffer)
  4212. {
  4213. ssize_t count = 0;
  4214. struct Scsi_Host *shost;
  4215. struct pqi_ctrl_info *ctrl_info;
  4216. shost = class_to_shost(dev);
  4217. ctrl_info = shost_to_hba(shost);
  4218. count += snprintf(buffer + count, PAGE_SIZE - count,
  4219. " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
  4220. count += snprintf(buffer + count, PAGE_SIZE - count,
  4221. "firmware: %s\n", ctrl_info->firmware_version);
  4222. return count;
  4223. }
  4224. static ssize_t pqi_host_rescan_store(struct device *dev,
  4225. struct device_attribute *attr, const char *buffer, size_t count)
  4226. {
  4227. struct Scsi_Host *shost = class_to_shost(dev);
  4228. pqi_scan_start(shost);
  4229. return count;
  4230. }
  4231. static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
  4232. static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
  4233. static struct device_attribute *pqi_shost_attrs[] = {
  4234. &dev_attr_version,
  4235. &dev_attr_rescan,
  4236. NULL
  4237. };
  4238. static ssize_t pqi_sas_address_show(struct device *dev,
  4239. struct device_attribute *attr, char *buffer)
  4240. {
  4241. struct pqi_ctrl_info *ctrl_info;
  4242. struct scsi_device *sdev;
  4243. struct pqi_scsi_dev *device;
  4244. unsigned long flags;
  4245. u64 sas_address;
  4246. sdev = to_scsi_device(dev);
  4247. ctrl_info = shost_to_hba(sdev->host);
  4248. spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  4249. device = sdev->hostdata;
  4250. if (pqi_is_logical_device(device)) {
  4251. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
  4252. flags);
  4253. return -ENODEV;
  4254. }
  4255. sas_address = device->sas_address;
  4256. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  4257. return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
  4258. }
  4259. static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
  4260. struct device_attribute *attr, char *buffer)
  4261. {
  4262. struct pqi_ctrl_info *ctrl_info;
  4263. struct scsi_device *sdev;
  4264. struct pqi_scsi_dev *device;
  4265. unsigned long flags;
  4266. sdev = to_scsi_device(dev);
  4267. ctrl_info = shost_to_hba(sdev->host);
  4268. spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
  4269. device = sdev->hostdata;
  4270. buffer[0] = device->offload_enabled ? '1' : '0';
  4271. buffer[1] = '\n';
  4272. buffer[2] = '\0';
  4273. spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
  4274. return 2;
  4275. }
  4276. static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
  4277. static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
  4278. pqi_ssd_smart_path_enabled_show, NULL);
  4279. static struct device_attribute *pqi_sdev_attrs[] = {
  4280. &dev_attr_sas_address,
  4281. &dev_attr_ssd_smart_path_enabled,
  4282. NULL
  4283. };
  4284. static struct scsi_host_template pqi_driver_template = {
  4285. .module = THIS_MODULE,
  4286. .name = DRIVER_NAME_SHORT,
  4287. .proc_name = DRIVER_NAME_SHORT,
  4288. .queuecommand = pqi_scsi_queue_command,
  4289. .scan_start = pqi_scan_start,
  4290. .scan_finished = pqi_scan_finished,
  4291. .this_id = -1,
  4292. .use_clustering = ENABLE_CLUSTERING,
  4293. .eh_device_reset_handler = pqi_eh_device_reset_handler,
  4294. .ioctl = pqi_ioctl,
  4295. .slave_alloc = pqi_slave_alloc,
  4296. .slave_configure = pqi_slave_configure,
  4297. .sdev_attrs = pqi_sdev_attrs,
  4298. .shost_attrs = pqi_shost_attrs,
  4299. };
  4300. static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
  4301. {
  4302. int rc;
  4303. struct Scsi_Host *shost;
  4304. shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
  4305. if (!shost) {
  4306. dev_err(&ctrl_info->pci_dev->dev,
  4307. "scsi_host_alloc failed for controller %u\n",
  4308. ctrl_info->ctrl_id);
  4309. return -ENOMEM;
  4310. }
  4311. shost->io_port = 0;
  4312. shost->n_io_port = 0;
  4313. shost->this_id = -1;
  4314. shost->max_channel = PQI_MAX_BUS;
  4315. shost->max_cmd_len = MAX_COMMAND_SIZE;
  4316. shost->max_lun = ~0;
  4317. shost->max_id = ~0;
  4318. shost->max_sectors = ctrl_info->max_sectors;
  4319. shost->can_queue = ctrl_info->scsi_ml_can_queue;
  4320. shost->cmd_per_lun = shost->can_queue;
  4321. shost->sg_tablesize = ctrl_info->sg_tablesize;
  4322. shost->transportt = pqi_sas_transport_template;
  4323. shost->irq = ctrl_info->msix_vectors[0];
  4324. shost->unique_id = shost->irq;
  4325. shost->nr_hw_queues = ctrl_info->num_queue_groups;
  4326. shost->hostdata[0] = (unsigned long)ctrl_info;
  4327. rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
  4328. if (rc) {
  4329. dev_err(&ctrl_info->pci_dev->dev,
  4330. "scsi_add_host failed for controller %u\n",
  4331. ctrl_info->ctrl_id);
  4332. goto free_host;
  4333. }
  4334. rc = pqi_add_sas_host(shost, ctrl_info);
  4335. if (rc) {
  4336. dev_err(&ctrl_info->pci_dev->dev,
  4337. "add SAS host failed for controller %u\n",
  4338. ctrl_info->ctrl_id);
  4339. goto remove_host;
  4340. }
  4341. ctrl_info->scsi_host = shost;
  4342. return 0;
  4343. remove_host:
  4344. scsi_remove_host(shost);
  4345. free_host:
  4346. scsi_host_put(shost);
  4347. return rc;
  4348. }
  4349. static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
  4350. {
  4351. struct Scsi_Host *shost;
  4352. pqi_delete_sas_host(ctrl_info);
  4353. shost = ctrl_info->scsi_host;
  4354. if (!shost)
  4355. return;
  4356. scsi_remove_host(shost);
  4357. scsi_host_put(shost);
  4358. }
  4359. #define PQI_RESET_ACTION_RESET 0x1
  4360. #define PQI_RESET_TYPE_NO_RESET 0x0
  4361. #define PQI_RESET_TYPE_SOFT_RESET 0x1
  4362. #define PQI_RESET_TYPE_FIRM_RESET 0x2
  4363. #define PQI_RESET_TYPE_HARD_RESET 0x3
  4364. static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
  4365. {
  4366. int rc;
  4367. u32 reset_params;
  4368. reset_params = (PQI_RESET_ACTION_RESET << 5) |
  4369. PQI_RESET_TYPE_HARD_RESET;
  4370. writel(reset_params,
  4371. &ctrl_info->pqi_registers->device_reset);
  4372. rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
  4373. if (rc)
  4374. dev_err(&ctrl_info->pci_dev->dev,
  4375. "PQI reset failed\n");
  4376. return rc;
  4377. }
  4378. static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
  4379. {
  4380. int rc;
  4381. struct bmic_identify_controller *identify;
  4382. identify = kmalloc(sizeof(*identify), GFP_KERNEL);
  4383. if (!identify)
  4384. return -ENOMEM;
  4385. rc = pqi_identify_controller(ctrl_info, identify);
  4386. if (rc)
  4387. goto out;
  4388. memcpy(ctrl_info->firmware_version, identify->firmware_version,
  4389. sizeof(identify->firmware_version));
  4390. ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
  4391. snprintf(ctrl_info->firmware_version +
  4392. strlen(ctrl_info->firmware_version),
  4393. sizeof(ctrl_info->firmware_version),
  4394. "-%u", get_unaligned_le16(&identify->firmware_build_number));
  4395. out:
  4396. kfree(identify);
  4397. return rc;
  4398. }
  4399. static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
  4400. {
  4401. if (!sis_is_firmware_running(ctrl_info))
  4402. return -ENXIO;
  4403. if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
  4404. sis_disable_msix(ctrl_info);
  4405. if (pqi_reset(ctrl_info) == 0)
  4406. sis_reenable_sis_mode(ctrl_info);
  4407. }
  4408. return 0;
  4409. }
  4410. static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
  4411. {
  4412. int rc;
  4413. if (reset_devices) {
  4414. rc = pqi_kdump_init(ctrl_info);
  4415. if (rc)
  4416. return rc;
  4417. }
  4418. /*
  4419. * When the controller comes out of reset, it is always running
  4420. * in legacy SIS mode. This is so that it can be compatible
  4421. * with legacy drivers shipped with OSes. So we have to talk
  4422. * to it using SIS commands at first. Once we are satisified
  4423. * that the controller supports PQI, we transition it into PQI
  4424. * mode.
  4425. */
  4426. /*
  4427. * Wait until the controller is ready to start accepting SIS
  4428. * commands.
  4429. */
  4430. rc = sis_wait_for_ctrl_ready(ctrl_info);
  4431. if (rc) {
  4432. dev_err(&ctrl_info->pci_dev->dev,
  4433. "error initializing SIS interface\n");
  4434. return rc;
  4435. }
  4436. /*
  4437. * Get the controller properties. This allows us to determine
  4438. * whether or not it supports PQI mode.
  4439. */
  4440. rc = sis_get_ctrl_properties(ctrl_info);
  4441. if (rc) {
  4442. dev_err(&ctrl_info->pci_dev->dev,
  4443. "error obtaining controller properties\n");
  4444. return rc;
  4445. }
  4446. rc = sis_get_pqi_capabilities(ctrl_info);
  4447. if (rc) {
  4448. dev_err(&ctrl_info->pci_dev->dev,
  4449. "error obtaining controller capabilities\n");
  4450. return rc;
  4451. }
  4452. if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
  4453. ctrl_info->max_outstanding_requests =
  4454. PQI_MAX_OUTSTANDING_REQUESTS;
  4455. pqi_calculate_io_resources(ctrl_info);
  4456. rc = pqi_alloc_error_buffer(ctrl_info);
  4457. if (rc) {
  4458. dev_err(&ctrl_info->pci_dev->dev,
  4459. "failed to allocate PQI error buffer\n");
  4460. return rc;
  4461. }
  4462. /*
  4463. * If the function we are about to call succeeds, the
  4464. * controller will transition from legacy SIS mode
  4465. * into PQI mode.
  4466. */
  4467. rc = sis_init_base_struct_addr(ctrl_info);
  4468. if (rc) {
  4469. dev_err(&ctrl_info->pci_dev->dev,
  4470. "error initializing PQI mode\n");
  4471. return rc;
  4472. }
  4473. /* Wait for the controller to complete the SIS -> PQI transition. */
  4474. rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
  4475. if (rc) {
  4476. dev_err(&ctrl_info->pci_dev->dev,
  4477. "transition to PQI mode failed\n");
  4478. return rc;
  4479. }
  4480. /* From here on, we are running in PQI mode. */
  4481. ctrl_info->pqi_mode_enabled = true;
  4482. pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
  4483. rc = pqi_alloc_admin_queues(ctrl_info);
  4484. if (rc) {
  4485. dev_err(&ctrl_info->pci_dev->dev,
  4486. "error allocating admin queues\n");
  4487. return rc;
  4488. }
  4489. rc = pqi_create_admin_queues(ctrl_info);
  4490. if (rc) {
  4491. dev_err(&ctrl_info->pci_dev->dev,
  4492. "error creating admin queues\n");
  4493. return rc;
  4494. }
  4495. rc = pqi_report_device_capability(ctrl_info);
  4496. if (rc) {
  4497. dev_err(&ctrl_info->pci_dev->dev,
  4498. "obtaining device capability failed\n");
  4499. return rc;
  4500. }
  4501. rc = pqi_validate_device_capability(ctrl_info);
  4502. if (rc)
  4503. return rc;
  4504. pqi_calculate_queue_resources(ctrl_info);
  4505. rc = pqi_enable_msix_interrupts(ctrl_info);
  4506. if (rc)
  4507. return rc;
  4508. if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
  4509. ctrl_info->max_msix_vectors =
  4510. ctrl_info->num_msix_vectors_enabled;
  4511. pqi_calculate_queue_resources(ctrl_info);
  4512. }
  4513. rc = pqi_alloc_io_resources(ctrl_info);
  4514. if (rc)
  4515. return rc;
  4516. rc = pqi_alloc_operational_queues(ctrl_info);
  4517. if (rc)
  4518. return rc;
  4519. pqi_init_operational_queues(ctrl_info);
  4520. rc = pqi_request_irqs(ctrl_info);
  4521. if (rc)
  4522. return rc;
  4523. pqi_irq_set_affinity_hint(ctrl_info);
  4524. rc = pqi_create_queues(ctrl_info);
  4525. if (rc)
  4526. return rc;
  4527. sis_enable_msix(ctrl_info);
  4528. rc = pqi_configure_events(ctrl_info);
  4529. if (rc) {
  4530. dev_err(&ctrl_info->pci_dev->dev,
  4531. "error configuring events\n");
  4532. return rc;
  4533. }
  4534. pqi_start_heartbeat_timer(ctrl_info);
  4535. ctrl_info->controller_online = true;
  4536. /* Register with the SCSI subsystem. */
  4537. rc = pqi_register_scsi(ctrl_info);
  4538. if (rc)
  4539. return rc;
  4540. rc = pqi_get_ctrl_firmware_version(ctrl_info);
  4541. if (rc) {
  4542. dev_err(&ctrl_info->pci_dev->dev,
  4543. "error obtaining firmware version\n");
  4544. return rc;
  4545. }
  4546. rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
  4547. if (rc) {
  4548. dev_err(&ctrl_info->pci_dev->dev,
  4549. "error updating host wellness\n");
  4550. return rc;
  4551. }
  4552. pqi_schedule_update_time_worker(ctrl_info);
  4553. pqi_scan_scsi_devices(ctrl_info);
  4554. return 0;
  4555. }
  4556. static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
  4557. {
  4558. int rc;
  4559. u64 mask;
  4560. rc = pci_enable_device(ctrl_info->pci_dev);
  4561. if (rc) {
  4562. dev_err(&ctrl_info->pci_dev->dev,
  4563. "failed to enable PCI device\n");
  4564. return rc;
  4565. }
  4566. if (sizeof(dma_addr_t) > 4)
  4567. mask = DMA_BIT_MASK(64);
  4568. else
  4569. mask = DMA_BIT_MASK(32);
  4570. rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
  4571. if (rc) {
  4572. dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
  4573. goto disable_device;
  4574. }
  4575. rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
  4576. if (rc) {
  4577. dev_err(&ctrl_info->pci_dev->dev,
  4578. "failed to obtain PCI resources\n");
  4579. goto disable_device;
  4580. }
  4581. ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
  4582. ctrl_info->pci_dev, 0),
  4583. sizeof(struct pqi_ctrl_registers));
  4584. if (!ctrl_info->iomem_base) {
  4585. dev_err(&ctrl_info->pci_dev->dev,
  4586. "failed to map memory for controller registers\n");
  4587. rc = -ENOMEM;
  4588. goto release_regions;
  4589. }
  4590. ctrl_info->registers = ctrl_info->iomem_base;
  4591. ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
  4592. /* Enable bus mastering. */
  4593. pci_set_master(ctrl_info->pci_dev);
  4594. pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
  4595. return 0;
  4596. release_regions:
  4597. pci_release_regions(ctrl_info->pci_dev);
  4598. disable_device:
  4599. pci_disable_device(ctrl_info->pci_dev);
  4600. return rc;
  4601. }
  4602. static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
  4603. {
  4604. iounmap(ctrl_info->iomem_base);
  4605. pci_release_regions(ctrl_info->pci_dev);
  4606. pci_disable_device(ctrl_info->pci_dev);
  4607. pci_set_drvdata(ctrl_info->pci_dev, NULL);
  4608. }
  4609. static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
  4610. {
  4611. struct pqi_ctrl_info *ctrl_info;
  4612. ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
  4613. GFP_KERNEL, numa_node);
  4614. if (!ctrl_info)
  4615. return NULL;
  4616. mutex_init(&ctrl_info->scan_mutex);
  4617. INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
  4618. spin_lock_init(&ctrl_info->scsi_device_list_lock);
  4619. INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
  4620. atomic_set(&ctrl_info->num_interrupts, 0);
  4621. INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
  4622. INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
  4623. sema_init(&ctrl_info->sync_request_sem,
  4624. PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
  4625. sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
  4626. ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
  4627. ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
  4628. return ctrl_info;
  4629. }
  4630. static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
  4631. {
  4632. kfree(ctrl_info);
  4633. }
  4634. static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
  4635. {
  4636. pqi_irq_unset_affinity_hint(ctrl_info);
  4637. pqi_free_irqs(ctrl_info);
  4638. if (ctrl_info->num_msix_vectors_enabled)
  4639. pci_disable_msix(ctrl_info->pci_dev);
  4640. }
  4641. static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
  4642. {
  4643. pqi_stop_heartbeat_timer(ctrl_info);
  4644. pqi_free_interrupts(ctrl_info);
  4645. if (ctrl_info->queue_memory_base)
  4646. dma_free_coherent(&ctrl_info->pci_dev->dev,
  4647. ctrl_info->queue_memory_length,
  4648. ctrl_info->queue_memory_base,
  4649. ctrl_info->queue_memory_base_dma_handle);
  4650. if (ctrl_info->admin_queue_memory_base)
  4651. dma_free_coherent(&ctrl_info->pci_dev->dev,
  4652. ctrl_info->admin_queue_memory_length,
  4653. ctrl_info->admin_queue_memory_base,
  4654. ctrl_info->admin_queue_memory_base_dma_handle);
  4655. pqi_free_all_io_requests(ctrl_info);
  4656. if (ctrl_info->error_buffer)
  4657. dma_free_coherent(&ctrl_info->pci_dev->dev,
  4658. ctrl_info->error_buffer_length,
  4659. ctrl_info->error_buffer,
  4660. ctrl_info->error_buffer_dma_handle);
  4661. if (ctrl_info->iomem_base)
  4662. pqi_cleanup_pci_init(ctrl_info);
  4663. pqi_free_ctrl_info(ctrl_info);
  4664. }
  4665. static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
  4666. {
  4667. cancel_delayed_work_sync(&ctrl_info->rescan_work);
  4668. cancel_delayed_work_sync(&ctrl_info->update_time_work);
  4669. pqi_remove_all_scsi_devices(ctrl_info);
  4670. pqi_unregister_scsi(ctrl_info);
  4671. if (ctrl_info->pqi_mode_enabled) {
  4672. sis_disable_msix(ctrl_info);
  4673. if (pqi_reset(ctrl_info) == 0)
  4674. sis_reenable_sis_mode(ctrl_info);
  4675. }
  4676. pqi_free_ctrl_resources(ctrl_info);
  4677. }
  4678. static void pqi_print_ctrl_info(struct pci_dev *pdev,
  4679. const struct pci_device_id *id)
  4680. {
  4681. char *ctrl_description;
  4682. if (id->driver_data) {
  4683. ctrl_description = (char *)id->driver_data;
  4684. } else {
  4685. switch (id->subvendor) {
  4686. case PCI_VENDOR_ID_HP:
  4687. ctrl_description = hpe_branded_controller;
  4688. break;
  4689. case PCI_VENDOR_ID_ADAPTEC2:
  4690. default:
  4691. ctrl_description = microsemi_branded_controller;
  4692. break;
  4693. }
  4694. }
  4695. dev_info(&pdev->dev, "%s found\n", ctrl_description);
  4696. }
  4697. static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  4698. {
  4699. int rc;
  4700. int node;
  4701. struct pqi_ctrl_info *ctrl_info;
  4702. pqi_print_ctrl_info(pdev, id);
  4703. if (pqi_disable_device_id_wildcards &&
  4704. id->subvendor == PCI_ANY_ID &&
  4705. id->subdevice == PCI_ANY_ID) {
  4706. dev_warn(&pdev->dev,
  4707. "controller not probed because device ID wildcards are disabled\n");
  4708. return -ENODEV;
  4709. }
  4710. if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
  4711. dev_warn(&pdev->dev,
  4712. "controller device ID matched using wildcards\n");
  4713. node = dev_to_node(&pdev->dev);
  4714. if (node == NUMA_NO_NODE)
  4715. set_dev_node(&pdev->dev, 0);
  4716. ctrl_info = pqi_alloc_ctrl_info(node);
  4717. if (!ctrl_info) {
  4718. dev_err(&pdev->dev,
  4719. "failed to allocate controller info block\n");
  4720. return -ENOMEM;
  4721. }
  4722. ctrl_info->pci_dev = pdev;
  4723. rc = pqi_pci_init(ctrl_info);
  4724. if (rc)
  4725. goto error;
  4726. rc = pqi_ctrl_init(ctrl_info);
  4727. if (rc)
  4728. goto error;
  4729. return 0;
  4730. error:
  4731. pqi_remove_ctrl(ctrl_info);
  4732. return rc;
  4733. }
  4734. static void pqi_pci_remove(struct pci_dev *pdev)
  4735. {
  4736. struct pqi_ctrl_info *ctrl_info;
  4737. ctrl_info = pci_get_drvdata(pdev);
  4738. if (!ctrl_info)
  4739. return;
  4740. pqi_remove_ctrl(ctrl_info);
  4741. }
  4742. static void pqi_shutdown(struct pci_dev *pdev)
  4743. {
  4744. int rc;
  4745. struct pqi_ctrl_info *ctrl_info;
  4746. ctrl_info = pci_get_drvdata(pdev);
  4747. if (!ctrl_info)
  4748. goto error;
  4749. /*
  4750. * Write all data in the controller's battery-backed cache to
  4751. * storage.
  4752. */
  4753. rc = pqi_flush_cache(ctrl_info);
  4754. if (rc == 0)
  4755. return;
  4756. error:
  4757. dev_warn(&pdev->dev,
  4758. "unable to flush controller cache\n");
  4759. }
  4760. /* Define the PCI IDs for the controllers that we support. */
  4761. static const struct pci_device_id pqi_pci_id_table[] = {
  4762. {
  4763. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4764. PCI_VENDOR_ID_ADAPTEC2, 0x0110)
  4765. },
  4766. {
  4767. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4768. PCI_VENDOR_ID_HP, 0x0600)
  4769. },
  4770. {
  4771. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4772. PCI_VENDOR_ID_HP, 0x0601)
  4773. },
  4774. {
  4775. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4776. PCI_VENDOR_ID_HP, 0x0602)
  4777. },
  4778. {
  4779. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4780. PCI_VENDOR_ID_HP, 0x0603)
  4781. },
  4782. {
  4783. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4784. PCI_VENDOR_ID_HP, 0x0650)
  4785. },
  4786. {
  4787. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4788. PCI_VENDOR_ID_HP, 0x0651)
  4789. },
  4790. {
  4791. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4792. PCI_VENDOR_ID_HP, 0x0652)
  4793. },
  4794. {
  4795. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4796. PCI_VENDOR_ID_HP, 0x0653)
  4797. },
  4798. {
  4799. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4800. PCI_VENDOR_ID_HP, 0x0654)
  4801. },
  4802. {
  4803. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4804. PCI_VENDOR_ID_HP, 0x0655)
  4805. },
  4806. {
  4807. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4808. PCI_VENDOR_ID_HP, 0x0700)
  4809. },
  4810. {
  4811. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4812. PCI_VENDOR_ID_HP, 0x0701)
  4813. },
  4814. {
  4815. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4816. PCI_VENDOR_ID_ADAPTEC2, 0x0800)
  4817. },
  4818. {
  4819. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4820. PCI_VENDOR_ID_ADAPTEC2, 0x0801)
  4821. },
  4822. {
  4823. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4824. PCI_VENDOR_ID_ADAPTEC2, 0x0802)
  4825. },
  4826. {
  4827. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4828. PCI_VENDOR_ID_ADAPTEC2, 0x0803)
  4829. },
  4830. {
  4831. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4832. PCI_VENDOR_ID_ADAPTEC2, 0x0804)
  4833. },
  4834. {
  4835. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4836. PCI_VENDOR_ID_ADAPTEC2, 0x0805)
  4837. },
  4838. {
  4839. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4840. PCI_VENDOR_ID_ADAPTEC2, 0x0900)
  4841. },
  4842. {
  4843. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4844. PCI_VENDOR_ID_ADAPTEC2, 0x0901)
  4845. },
  4846. {
  4847. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4848. PCI_VENDOR_ID_ADAPTEC2, 0x0902)
  4849. },
  4850. {
  4851. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4852. PCI_VENDOR_ID_ADAPTEC2, 0x0903)
  4853. },
  4854. {
  4855. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4856. PCI_VENDOR_ID_ADAPTEC2, 0x0904)
  4857. },
  4858. {
  4859. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4860. PCI_VENDOR_ID_ADAPTEC2, 0x0905)
  4861. },
  4862. {
  4863. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4864. PCI_VENDOR_ID_ADAPTEC2, 0x0906)
  4865. },
  4866. {
  4867. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4868. PCI_VENDOR_ID_HP, 0x1001)
  4869. },
  4870. {
  4871. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4872. PCI_VENDOR_ID_HP, 0x1100)
  4873. },
  4874. {
  4875. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4876. PCI_VENDOR_ID_HP, 0x1101)
  4877. },
  4878. {
  4879. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4880. PCI_VENDOR_ID_HP, 0x1102)
  4881. },
  4882. {
  4883. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4884. PCI_VENDOR_ID_HP, 0x1150)
  4885. },
  4886. {
  4887. PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
  4888. PCI_ANY_ID, PCI_ANY_ID)
  4889. },
  4890. { 0 }
  4891. };
  4892. MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
  4893. static struct pci_driver pqi_pci_driver = {
  4894. .name = DRIVER_NAME_SHORT,
  4895. .id_table = pqi_pci_id_table,
  4896. .probe = pqi_pci_probe,
  4897. .remove = pqi_pci_remove,
  4898. .shutdown = pqi_shutdown,
  4899. };
  4900. static int __init pqi_init(void)
  4901. {
  4902. int rc;
  4903. pr_info(DRIVER_NAME "\n");
  4904. pqi_sas_transport_template =
  4905. sas_attach_transport(&pqi_sas_transport_functions);
  4906. if (!pqi_sas_transport_template)
  4907. return -ENODEV;
  4908. rc = pci_register_driver(&pqi_pci_driver);
  4909. if (rc)
  4910. sas_release_transport(pqi_sas_transport_template);
  4911. return rc;
  4912. }
  4913. static void __exit pqi_cleanup(void)
  4914. {
  4915. pci_unregister_driver(&pqi_pci_driver);
  4916. sas_release_transport(pqi_sas_transport_template);
  4917. }
  4918. module_init(pqi_init);
  4919. module_exit(pqi_cleanup);
  4920. static void __attribute__((unused)) verify_structures(void)
  4921. {
  4922. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4923. sis_host_to_ctrl_doorbell) != 0x20);
  4924. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4925. sis_interrupt_mask) != 0x34);
  4926. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4927. sis_ctrl_to_host_doorbell) != 0x9c);
  4928. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4929. sis_ctrl_to_host_doorbell_clear) != 0xa0);
  4930. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4931. sis_driver_scratch) != 0xb0);
  4932. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4933. sis_firmware_status) != 0xbc);
  4934. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4935. sis_mailbox) != 0x1000);
  4936. BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
  4937. pqi_registers) != 0x4000);
  4938. BUILD_BUG_ON(offsetof(struct pqi_iu_header,
  4939. iu_type) != 0x0);
  4940. BUILD_BUG_ON(offsetof(struct pqi_iu_header,
  4941. iu_length) != 0x2);
  4942. BUILD_BUG_ON(offsetof(struct pqi_iu_header,
  4943. response_queue_id) != 0x4);
  4944. BUILD_BUG_ON(offsetof(struct pqi_iu_header,
  4945. work_area) != 0x6);
  4946. BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
  4947. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4948. status) != 0x0);
  4949. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4950. service_response) != 0x1);
  4951. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4952. data_present) != 0x2);
  4953. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4954. reserved) != 0x3);
  4955. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4956. residual_count) != 0x4);
  4957. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4958. data_length) != 0x8);
  4959. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4960. reserved1) != 0xa);
  4961. BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
  4962. data) != 0xc);
  4963. BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
  4964. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4965. data_in_result) != 0x0);
  4966. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4967. data_out_result) != 0x1);
  4968. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4969. reserved) != 0x2);
  4970. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4971. status) != 0x5);
  4972. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4973. status_qualifier) != 0x6);
  4974. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4975. sense_data_length) != 0x8);
  4976. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4977. response_data_length) != 0xa);
  4978. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4979. data_in_transferred) != 0xc);
  4980. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4981. data_out_transferred) != 0x10);
  4982. BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
  4983. data) != 0x14);
  4984. BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
  4985. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  4986. signature) != 0x0);
  4987. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  4988. function_and_status_code) != 0x8);
  4989. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  4990. max_admin_iq_elements) != 0x10);
  4991. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  4992. max_admin_oq_elements) != 0x11);
  4993. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  4994. admin_iq_element_length) != 0x12);
  4995. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  4996. admin_oq_element_length) != 0x13);
  4997. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  4998. max_reset_timeout) != 0x14);
  4999. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5000. legacy_intx_status) != 0x18);
  5001. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5002. legacy_intx_mask_set) != 0x1c);
  5003. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5004. legacy_intx_mask_clear) != 0x20);
  5005. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5006. device_status) != 0x40);
  5007. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5008. admin_iq_pi_offset) != 0x48);
  5009. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5010. admin_oq_ci_offset) != 0x50);
  5011. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5012. admin_iq_element_array_addr) != 0x58);
  5013. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5014. admin_oq_element_array_addr) != 0x60);
  5015. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5016. admin_iq_ci_addr) != 0x68);
  5017. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5018. admin_oq_pi_addr) != 0x70);
  5019. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5020. admin_iq_num_elements) != 0x78);
  5021. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5022. admin_oq_num_elements) != 0x79);
  5023. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5024. admin_queue_int_msg_num) != 0x7a);
  5025. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5026. device_error) != 0x80);
  5027. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5028. error_details) != 0x88);
  5029. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5030. device_reset) != 0x90);
  5031. BUILD_BUG_ON(offsetof(struct pqi_device_registers,
  5032. power_action) != 0x94);
  5033. BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
  5034. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5035. header.iu_type) != 0);
  5036. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5037. header.iu_length) != 2);
  5038. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5039. header.work_area) != 6);
  5040. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5041. request_id) != 8);
  5042. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5043. function_code) != 10);
  5044. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5045. data.report_device_capability.buffer_length) != 44);
  5046. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5047. data.report_device_capability.sg_descriptor) != 48);
  5048. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5049. data.create_operational_iq.queue_id) != 12);
  5050. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5051. data.create_operational_iq.element_array_addr) != 16);
  5052. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5053. data.create_operational_iq.ci_addr) != 24);
  5054. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5055. data.create_operational_iq.num_elements) != 32);
  5056. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5057. data.create_operational_iq.element_length) != 34);
  5058. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5059. data.create_operational_iq.queue_protocol) != 36);
  5060. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5061. data.create_operational_oq.queue_id) != 12);
  5062. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5063. data.create_operational_oq.element_array_addr) != 16);
  5064. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5065. data.create_operational_oq.pi_addr) != 24);
  5066. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5067. data.create_operational_oq.num_elements) != 32);
  5068. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5069. data.create_operational_oq.element_length) != 34);
  5070. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5071. data.create_operational_oq.queue_protocol) != 36);
  5072. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5073. data.create_operational_oq.int_msg_num) != 40);
  5074. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5075. data.create_operational_oq.coalescing_count) != 42);
  5076. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5077. data.create_operational_oq.min_coalescing_time) != 44);
  5078. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5079. data.create_operational_oq.max_coalescing_time) != 48);
  5080. BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
  5081. data.delete_operational_queue.queue_id) != 12);
  5082. BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
  5083. BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
  5084. data.create_operational_iq) != 64 - 11);
  5085. BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
  5086. data.create_operational_oq) != 64 - 11);
  5087. BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
  5088. data.delete_operational_queue) != 64 - 11);
  5089. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5090. header.iu_type) != 0);
  5091. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5092. header.iu_length) != 2);
  5093. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5094. header.work_area) != 6);
  5095. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5096. request_id) != 8);
  5097. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5098. function_code) != 10);
  5099. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5100. status) != 11);
  5101. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5102. data.create_operational_iq.status_descriptor) != 12);
  5103. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5104. data.create_operational_iq.iq_pi_offset) != 16);
  5105. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5106. data.create_operational_oq.status_descriptor) != 12);
  5107. BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
  5108. data.create_operational_oq.oq_ci_offset) != 16);
  5109. BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
  5110. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5111. header.iu_type) != 0);
  5112. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5113. header.iu_length) != 2);
  5114. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5115. header.response_queue_id) != 4);
  5116. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5117. header.work_area) != 6);
  5118. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5119. request_id) != 8);
  5120. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5121. nexus_id) != 10);
  5122. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5123. buffer_length) != 12);
  5124. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5125. lun_number) != 16);
  5126. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5127. protocol_specific) != 24);
  5128. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5129. error_index) != 27);
  5130. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5131. cdb) != 32);
  5132. BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
  5133. sg_descriptors) != 64);
  5134. BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
  5135. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  5136. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5137. header.iu_type) != 0);
  5138. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5139. header.iu_length) != 2);
  5140. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5141. header.response_queue_id) != 4);
  5142. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5143. header.work_area) != 6);
  5144. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5145. request_id) != 8);
  5146. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5147. nexus_id) != 12);
  5148. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5149. buffer_length) != 16);
  5150. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5151. data_encryption_key_index) != 22);
  5152. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5153. encrypt_tweak_lower) != 24);
  5154. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5155. encrypt_tweak_upper) != 28);
  5156. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5157. cdb) != 32);
  5158. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5159. error_index) != 48);
  5160. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5161. num_sg_descriptors) != 50);
  5162. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5163. cdb_length) != 51);
  5164. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5165. lun_number) != 52);
  5166. BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
  5167. sg_descriptors) != 64);
  5168. BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
  5169. PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
  5170. BUILD_BUG_ON(offsetof(struct pqi_io_response,
  5171. header.iu_type) != 0);
  5172. BUILD_BUG_ON(offsetof(struct pqi_io_response,
  5173. header.iu_length) != 2);
  5174. BUILD_BUG_ON(offsetof(struct pqi_io_response,
  5175. request_id) != 8);
  5176. BUILD_BUG_ON(offsetof(struct pqi_io_response,
  5177. error_index) != 10);
  5178. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5179. header.iu_type) != 0);
  5180. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5181. header.iu_length) != 2);
  5182. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5183. header.response_queue_id) != 4);
  5184. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5185. request_id) != 8);
  5186. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5187. data.report_event_configuration.buffer_length) != 12);
  5188. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5189. data.report_event_configuration.sg_descriptors) != 16);
  5190. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5191. data.set_event_configuration.global_event_oq_id) != 10);
  5192. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5193. data.set_event_configuration.buffer_length) != 12);
  5194. BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
  5195. data.set_event_configuration.sg_descriptors) != 16);
  5196. BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
  5197. max_inbound_iu_length) != 6);
  5198. BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
  5199. max_outbound_iu_length) != 14);
  5200. BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
  5201. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5202. data_length) != 0);
  5203. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5204. iq_arbitration_priority_support_bitmask) != 8);
  5205. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5206. maximum_aw_a) != 9);
  5207. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5208. maximum_aw_b) != 10);
  5209. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5210. maximum_aw_c) != 11);
  5211. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5212. max_inbound_queues) != 16);
  5213. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5214. max_elements_per_iq) != 18);
  5215. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5216. max_iq_element_length) != 24);
  5217. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5218. min_iq_element_length) != 26);
  5219. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5220. max_outbound_queues) != 30);
  5221. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5222. max_elements_per_oq) != 32);
  5223. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5224. intr_coalescing_time_granularity) != 34);
  5225. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5226. max_oq_element_length) != 36);
  5227. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5228. min_oq_element_length) != 38);
  5229. BUILD_BUG_ON(offsetof(struct pqi_device_capability,
  5230. iu_layer_descriptors) != 64);
  5231. BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
  5232. BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
  5233. event_type) != 0);
  5234. BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
  5235. oq_id) != 2);
  5236. BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
  5237. BUILD_BUG_ON(offsetof(struct pqi_event_config,
  5238. num_event_descriptors) != 2);
  5239. BUILD_BUG_ON(offsetof(struct pqi_event_config,
  5240. descriptors) != 4);
  5241. BUILD_BUG_ON(offsetof(struct pqi_event_response,
  5242. header.iu_type) != 0);
  5243. BUILD_BUG_ON(offsetof(struct pqi_event_response,
  5244. header.iu_length) != 2);
  5245. BUILD_BUG_ON(offsetof(struct pqi_event_response,
  5246. event_type) != 8);
  5247. BUILD_BUG_ON(offsetof(struct pqi_event_response,
  5248. event_id) != 10);
  5249. BUILD_BUG_ON(offsetof(struct pqi_event_response,
  5250. additional_event_id) != 12);
  5251. BUILD_BUG_ON(offsetof(struct pqi_event_response,
  5252. data) != 16);
  5253. BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
  5254. BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
  5255. header.iu_type) != 0);
  5256. BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
  5257. header.iu_length) != 2);
  5258. BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
  5259. event_type) != 8);
  5260. BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
  5261. event_id) != 10);
  5262. BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
  5263. additional_event_id) != 12);
  5264. BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
  5265. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5266. header.iu_type) != 0);
  5267. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5268. header.iu_length) != 2);
  5269. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5270. request_id) != 8);
  5271. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5272. nexus_id) != 10);
  5273. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5274. lun_number) != 16);
  5275. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5276. protocol_specific) != 24);
  5277. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5278. outbound_queue_id_to_manage) != 26);
  5279. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5280. request_id_to_manage) != 28);
  5281. BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
  5282. task_management_function) != 30);
  5283. BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
  5284. BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
  5285. header.iu_type) != 0);
  5286. BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
  5287. header.iu_length) != 2);
  5288. BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
  5289. request_id) != 8);
  5290. BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
  5291. nexus_id) != 10);
  5292. BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
  5293. additional_response_info) != 12);
  5294. BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
  5295. response_code) != 15);
  5296. BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
  5297. BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
  5298. configured_logical_drive_count) != 0);
  5299. BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
  5300. configuration_signature) != 1);
  5301. BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
  5302. firmware_version) != 5);
  5303. BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
  5304. extended_logical_unit_count) != 154);
  5305. BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
  5306. firmware_build_number) != 190);
  5307. BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
  5308. controller_mode) != 292);
  5309. BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
  5310. BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
  5311. BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
  5312. PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
  5313. BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
  5314. PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
  5315. BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
  5316. BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
  5317. PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
  5318. BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
  5319. BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
  5320. PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
  5321. BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
  5322. }