123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412 |
- /* Copyright 2012 STEC, Inc.
- *
- * This file is licensed under the terms of the 3-clause
- * BSD License (http://opensource.org/licenses/BSD-3-Clause)
- * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
- * at your option. Both licenses are also available in the LICENSE file
- * distributed with this project. This file may not be copied, modified,
- * or distributed except in accordance with those terms.
- * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
- * Initial Driver Design!
- * Thomas Swann <tswann@stec-inc.com>
- * Interrupt handling.
- * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
- * biomode implementation.
- * Akhil Bhansali <abhansali@stec-inc.com>
- * Added support for DISCARD / FLUSH and FUA.
- */
- #include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/init.h>
- #include <linux/pci.h>
- #include <linux/slab.h>
- #include <linux/spinlock.h>
- #include <linux/blkdev.h>
- #include <linux/sched.h>
- #include <linux/interrupt.h>
- #include <linux/compiler.h>
- #include <linux/workqueue.h>
- #include <linux/bitops.h>
- #include <linux/delay.h>
- #include <linux/time.h>
- #include <linux/hdreg.h>
- #include <linux/dma-mapping.h>
- #include <linux/completion.h>
- #include <linux/scatterlist.h>
- #include <linux/version.h>
- #include <linux/err.h>
- #include <linux/scatterlist.h>
- #include <linux/aer.h>
- #include <linux/ctype.h>
- #include <linux/wait.h>
- #include <linux/uio.h>
- #include <scsi/scsi.h>
- #include <scsi/sg.h>
- #include <linux/io.h>
- #include <linux/uaccess.h>
- #include <asm/unaligned.h>
- #include "skd_s1120.h"
- static int skd_dbg_level;
- static int skd_isr_comp_limit = 4;
- enum {
- STEC_LINK_2_5GTS = 0,
- STEC_LINK_5GTS = 1,
- STEC_LINK_8GTS = 2,
- STEC_LINK_UNKNOWN = 0xFF
- };
- enum {
- SKD_FLUSH_INITIALIZER,
- SKD_FLUSH_ZERO_SIZE_FIRST,
- SKD_FLUSH_DATA_SECOND,
- };
- #define SKD_ASSERT(expr) \
- do { \
- if (unlikely(!(expr))) { \
- pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
- # expr, __FILE__, __func__, __LINE__); \
- } \
- } while (0)
- #define DRV_NAME "skd"
- #define DRV_VERSION "2.2.1"
- #define DRV_BUILD_ID "0260"
- #define PFX DRV_NAME ": "
- #define DRV_BIN_VERSION 0x100
- #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
- MODULE_AUTHOR("bug-reports: support@stec-inc.com");
- MODULE_LICENSE("Dual BSD/GPL");
- MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
- MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
- #define PCI_VENDOR_ID_STEC 0x1B39
- #define PCI_DEVICE_ID_S1120 0x0001
- #define SKD_FUA_NV (1 << 1)
- #define SKD_MINORS_PER_DEVICE 16
- #define SKD_MAX_QUEUE_DEPTH 200u
- #define SKD_PAUSE_TIMEOUT (5 * 1000)
- #define SKD_N_FITMSG_BYTES (512u)
- #define SKD_N_SPECIAL_CONTEXT 32u
- #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
- /* SG elements are 32 bytes, so we can make this 4096 and still be under the
- * 128KB limit. That allows 4096*4K = 16M xfer size
- */
- #define SKD_N_SG_PER_REQ_DEFAULT 256u
- #define SKD_N_SG_PER_SPECIAL 256u
- #define SKD_N_COMPLETION_ENTRY 256u
- #define SKD_N_READ_CAP_BYTES (8u)
- #define SKD_N_INTERNAL_BYTES (512u)
- /* 5 bits of uniqifier, 0xF800 */
- #define SKD_ID_INCR (0x400)
- #define SKD_ID_TABLE_MASK (3u << 8u)
- #define SKD_ID_RW_REQUEST (0u << 8u)
- #define SKD_ID_INTERNAL (1u << 8u)
- #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
- #define SKD_ID_FIT_MSG (3u << 8u)
- #define SKD_ID_SLOT_MASK 0x00FFu
- #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
- #define SKD_N_TIMEOUT_SLOT 4u
- #define SKD_TIMEOUT_SLOT_MASK 3u
- #define SKD_N_MAX_SECTORS 2048u
- #define SKD_MAX_RETRIES 2u
- #define SKD_TIMER_SECONDS(seconds) (seconds)
- #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
- #define INQ_STD_NBYTES 36
- #define SKD_DISCARD_CDB_LENGTH 24
- enum skd_drvr_state {
- SKD_DRVR_STATE_LOAD,
- SKD_DRVR_STATE_IDLE,
- SKD_DRVR_STATE_BUSY,
- SKD_DRVR_STATE_STARTING,
- SKD_DRVR_STATE_ONLINE,
- SKD_DRVR_STATE_PAUSING,
- SKD_DRVR_STATE_PAUSED,
- SKD_DRVR_STATE_DRAINING_TIMEOUT,
- SKD_DRVR_STATE_RESTARTING,
- SKD_DRVR_STATE_RESUMING,
- SKD_DRVR_STATE_STOPPING,
- SKD_DRVR_STATE_FAULT,
- SKD_DRVR_STATE_DISAPPEARED,
- SKD_DRVR_STATE_PROTOCOL_MISMATCH,
- SKD_DRVR_STATE_BUSY_ERASE,
- SKD_DRVR_STATE_BUSY_SANITIZE,
- SKD_DRVR_STATE_BUSY_IMMINENT,
- SKD_DRVR_STATE_WAIT_BOOT,
- SKD_DRVR_STATE_SYNCING,
- };
- #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
- #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
- #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
- #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
- #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
- #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
- #define SKD_START_WAIT_SECONDS 90u
- enum skd_req_state {
- SKD_REQ_STATE_IDLE,
- SKD_REQ_STATE_SETUP,
- SKD_REQ_STATE_BUSY,
- SKD_REQ_STATE_COMPLETED,
- SKD_REQ_STATE_TIMEOUT,
- SKD_REQ_STATE_ABORTED,
- };
- enum skd_fit_msg_state {
- SKD_MSG_STATE_IDLE,
- SKD_MSG_STATE_BUSY,
- };
- enum skd_check_status_action {
- SKD_CHECK_STATUS_REPORT_GOOD,
- SKD_CHECK_STATUS_REPORT_SMART_ALERT,
- SKD_CHECK_STATUS_REQUEUE_REQUEST,
- SKD_CHECK_STATUS_REPORT_ERROR,
- SKD_CHECK_STATUS_BUSY_IMMINENT,
- };
- struct skd_fitmsg_context {
- enum skd_fit_msg_state state;
- struct skd_fitmsg_context *next;
- u32 id;
- u16 outstanding;
- u32 length;
- u32 offset;
- u8 *msg_buf;
- dma_addr_t mb_dma_address;
- };
- struct skd_request_context {
- enum skd_req_state state;
- struct skd_request_context *next;
- u16 id;
- u32 fitmsg_id;
- struct request *req;
- u8 flush_cmd;
- u8 discard_page;
- u32 timeout_stamp;
- u8 sg_data_dir;
- struct scatterlist *sg;
- u32 n_sg;
- u32 sg_byte_count;
- struct fit_sg_descriptor *sksg_list;
- dma_addr_t sksg_dma_address;
- struct fit_completion_entry_v1 completion;
- struct fit_comp_error_info err_info;
- };
- #define SKD_DATA_DIR_HOST_TO_CARD 1
- #define SKD_DATA_DIR_CARD_TO_HOST 2
- #define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
- struct skd_special_context {
- struct skd_request_context req;
- u8 orphaned;
- void *data_buf;
- dma_addr_t db_dma_address;
- u8 *msg_buf;
- dma_addr_t mb_dma_address;
- };
- struct skd_sg_io {
- fmode_t mode;
- void __user *argp;
- struct sg_io_hdr sg;
- u8 cdb[16];
- u32 dxfer_len;
- u32 iovcnt;
- struct sg_iovec *iov;
- struct sg_iovec no_iov_iov;
- struct skd_special_context *skspcl;
- };
- typedef enum skd_irq_type {
- SKD_IRQ_LEGACY,
- SKD_IRQ_MSI,
- SKD_IRQ_MSIX
- } skd_irq_type_t;
- #define SKD_MAX_BARS 2
- struct skd_device {
- volatile void __iomem *mem_map[SKD_MAX_BARS];
- resource_size_t mem_phys[SKD_MAX_BARS];
- u32 mem_size[SKD_MAX_BARS];
- skd_irq_type_t irq_type;
- u32 msix_count;
- struct skd_msix_entry *msix_entries;
- struct pci_dev *pdev;
- int pcie_error_reporting_is_enabled;
- spinlock_t lock;
- struct gendisk *disk;
- struct request_queue *queue;
- struct device *class_dev;
- int gendisk_on;
- int sync_done;
- atomic_t device_count;
- u32 devno;
- u32 major;
- char name[32];
- char isr_name[30];
- enum skd_drvr_state state;
- u32 drive_state;
- u32 in_flight;
- u32 cur_max_queue_depth;
- u32 queue_low_water_mark;
- u32 dev_max_queue_depth;
- u32 num_fitmsg_context;
- u32 num_req_context;
- u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
- u32 timeout_stamp;
- struct skd_fitmsg_context *skmsg_free_list;
- struct skd_fitmsg_context *skmsg_table;
- struct skd_request_context *skreq_free_list;
- struct skd_request_context *skreq_table;
- struct skd_special_context *skspcl_free_list;
- struct skd_special_context *skspcl_table;
- struct skd_special_context internal_skspcl;
- u32 read_cap_blocksize;
- u32 read_cap_last_lba;
- int read_cap_is_valid;
- int inquiry_is_valid;
- u8 inq_serial_num[13]; /*12 chars plus null term */
- u8 id_str[80]; /* holds a composite name (pci + sernum) */
- u8 skcomp_cycle;
- u32 skcomp_ix;
- struct fit_completion_entry_v1 *skcomp_table;
- struct fit_comp_error_info *skerr_table;
- dma_addr_t cq_dma_address;
- wait_queue_head_t waitq;
- struct timer_list timer;
- u32 timer_countdown;
- u32 timer_substate;
- int n_special;
- int sgs_per_request;
- u32 last_mtd;
- u32 proto_ver;
- int dbg_level;
- u32 connect_time_stamp;
- int connect_retries;
- #define SKD_MAX_CONNECT_RETRIES 16
- u32 drive_jiffies;
- u32 timo_slot;
- struct work_struct completion_worker;
- };
- #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
- #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
- #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
- static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
- {
- u32 val;
- if (likely(skdev->dbg_level < 2))
- return readl(skdev->mem_map[1] + offset);
- else {
- barrier();
- val = readl(skdev->mem_map[1] + offset);
- barrier();
- pr_debug("%s:%s:%d offset %x = %x\n",
- skdev->name, __func__, __LINE__, offset, val);
- return val;
- }
- }
- static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
- u32 offset)
- {
- if (likely(skdev->dbg_level < 2)) {
- writel(val, skdev->mem_map[1] + offset);
- barrier();
- } else {
- barrier();
- writel(val, skdev->mem_map[1] + offset);
- barrier();
- pr_debug("%s:%s:%d offset %x = %x\n",
- skdev->name, __func__, __LINE__, offset, val);
- }
- }
- static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
- u32 offset)
- {
- if (likely(skdev->dbg_level < 2)) {
- writeq(val, skdev->mem_map[1] + offset);
- barrier();
- } else {
- barrier();
- writeq(val, skdev->mem_map[1] + offset);
- barrier();
- pr_debug("%s:%s:%d offset %x = %016llx\n",
- skdev->name, __func__, __LINE__, offset, val);
- }
- }
- #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
- static int skd_isr_type = SKD_IRQ_DEFAULT;
- module_param(skd_isr_type, int, 0444);
- MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
- " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
- #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
- static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
- module_param(skd_max_req_per_msg, int, 0444);
- MODULE_PARM_DESC(skd_max_req_per_msg,
- "Maximum SCSI requests packed in a single message."
- " (1-14, default==1)");
- #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
- #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
- static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
- module_param(skd_max_queue_depth, int, 0444);
- MODULE_PARM_DESC(skd_max_queue_depth,
- "Maximum SCSI requests issued to s1120."
- " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
- static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
- module_param(skd_sgs_per_request, int, 0444);
- MODULE_PARM_DESC(skd_sgs_per_request,
- "Maximum SG elements per block request."
- " (1-4096, default==256)");
- static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
- module_param(skd_max_pass_thru, int, 0444);
- MODULE_PARM_DESC(skd_max_pass_thru,
- "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
- module_param(skd_dbg_level, int, 0444);
- MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
- module_param(skd_isr_comp_limit, int, 0444);
- MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
- /* Major device number dynamically assigned. */
- static u32 skd_major;
- static void skd_destruct(struct skd_device *skdev);
- static const struct block_device_operations skd_blockdev_ops;
- static void skd_send_fitmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg);
- static void skd_send_special_fitmsg(struct skd_device *skdev,
- struct skd_special_context *skspcl);
- static void skd_request_fn(struct request_queue *rq);
- static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, int error);
- static int skd_preop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq);
- static void skd_postop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq);
- static void skd_restart_device(struct skd_device *skdev);
- static int skd_quiesce_dev(struct skd_device *skdev);
- static int skd_unquiesce_dev(struct skd_device *skdev);
- static void skd_release_special(struct skd_device *skdev,
- struct skd_special_context *skspcl);
- static void skd_disable_interrupts(struct skd_device *skdev);
- static void skd_isr_fwstate(struct skd_device *skdev);
- static void skd_recover_requests(struct skd_device *skdev, int requeue);
- static void skd_soft_reset(struct skd_device *skdev);
- static const char *skd_name(struct skd_device *skdev);
- const char *skd_drive_state_to_str(int state);
- const char *skd_skdev_state_to_str(enum skd_drvr_state state);
- static void skd_log_skdev(struct skd_device *skdev, const char *event);
- static void skd_log_skmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg, const char *event);
- static void skd_log_skreq(struct skd_device *skdev,
- struct skd_request_context *skreq, const char *event);
- /*
- *****************************************************************************
- * READ/WRITE REQUESTS
- *****************************************************************************
- */
- static void skd_fail_all_pending(struct skd_device *skdev)
- {
- struct request_queue *q = skdev->queue;
- struct request *req;
- for (;; ) {
- req = blk_peek_request(q);
- if (req == NULL)
- break;
- blk_start_request(req);
- __blk_end_request_all(req, -EIO);
- }
- }
- static void
- skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
- int data_dir, unsigned lba,
- unsigned count)
- {
- if (data_dir == READ)
- scsi_req->cdb[0] = 0x28;
- else
- scsi_req->cdb[0] = 0x2a;
- scsi_req->cdb[1] = 0;
- scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
- scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
- scsi_req->cdb[4] = (lba & 0xff00) >> 8;
- scsi_req->cdb[5] = (lba & 0xff);
- scsi_req->cdb[6] = 0;
- scsi_req->cdb[7] = (count & 0xff00) >> 8;
- scsi_req->cdb[8] = count & 0xff;
- scsi_req->cdb[9] = 0;
- }
- static void
- skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
- struct skd_request_context *skreq)
- {
- skreq->flush_cmd = 1;
- scsi_req->cdb[0] = 0x35;
- scsi_req->cdb[1] = 0;
- scsi_req->cdb[2] = 0;
- scsi_req->cdb[3] = 0;
- scsi_req->cdb[4] = 0;
- scsi_req->cdb[5] = 0;
- scsi_req->cdb[6] = 0;
- scsi_req->cdb[7] = 0;
- scsi_req->cdb[8] = 0;
- scsi_req->cdb[9] = 0;
- }
- static void
- skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
- struct skd_request_context *skreq,
- struct page *page,
- u32 lba, u32 count)
- {
- char *buf;
- unsigned long len;
- struct request *req;
- buf = page_address(page);
- len = SKD_DISCARD_CDB_LENGTH;
- scsi_req->cdb[0] = UNMAP;
- scsi_req->cdb[8] = len;
- put_unaligned_be16(6 + 16, &buf[0]);
- put_unaligned_be16(16, &buf[2]);
- put_unaligned_be64(lba, &buf[8]);
- put_unaligned_be32(count, &buf[16]);
- req = skreq->req;
- blk_add_request_payload(req, page, len);
- }
- static void skd_request_fn_not_online(struct request_queue *q);
- static void skd_request_fn(struct request_queue *q)
- {
- struct skd_device *skdev = q->queuedata;
- struct skd_fitmsg_context *skmsg = NULL;
- struct fit_msg_hdr *fmh = NULL;
- struct skd_request_context *skreq;
- struct request *req = NULL;
- struct skd_scsi_request *scsi_req;
- struct page *page;
- unsigned long io_flags;
- int error;
- u32 lba;
- u32 count;
- int data_dir;
- u32 be_lba;
- u32 be_count;
- u64 be_dmaa;
- u64 cmdctxt;
- u32 timo_slot;
- void *cmd_ptr;
- int flush, fua;
- if (skdev->state != SKD_DRVR_STATE_ONLINE) {
- skd_request_fn_not_online(q);
- return;
- }
- if (blk_queue_stopped(skdev->queue)) {
- if (skdev->skmsg_free_list == NULL ||
- skdev->skreq_free_list == NULL ||
- skdev->in_flight >= skdev->queue_low_water_mark)
- /* There is still some kind of shortage */
- return;
- queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
- }
- /*
- * Stop conditions:
- * - There are no more native requests
- * - There are already the maximum number of requests in progress
- * - There are no more skd_request_context entries
- * - There are no more FIT msg buffers
- */
- for (;; ) {
- flush = fua = 0;
- req = blk_peek_request(q);
- /* Are there any native requests to start? */
- if (req == NULL)
- break;
- lba = (u32)blk_rq_pos(req);
- count = blk_rq_sectors(req);
- data_dir = rq_data_dir(req);
- io_flags = req->cmd_flags;
- if (io_flags & REQ_FLUSH)
- flush++;
- if (io_flags & REQ_FUA)
- fua++;
- pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
- "count=%u(0x%x) dir=%d\n",
- skdev->name, __func__, __LINE__,
- req, lba, lba, count, count, data_dir);
- /* At this point we know there is a request */
- /* Are too many requets already in progress? */
- if (skdev->in_flight >= skdev->cur_max_queue_depth) {
- pr_debug("%s:%s:%d qdepth %d, limit %d\n",
- skdev->name, __func__, __LINE__,
- skdev->in_flight, skdev->cur_max_queue_depth);
- break;
- }
- /* Is a skd_request_context available? */
- skreq = skdev->skreq_free_list;
- if (skreq == NULL) {
- pr_debug("%s:%s:%d Out of req=%p\n",
- skdev->name, __func__, __LINE__, q);
- break;
- }
- SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
- SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
- /* Now we check to see if we can get a fit msg */
- if (skmsg == NULL) {
- if (skdev->skmsg_free_list == NULL) {
- pr_debug("%s:%s:%d Out of msg\n",
- skdev->name, __func__, __LINE__);
- break;
- }
- }
- skreq->flush_cmd = 0;
- skreq->n_sg = 0;
- skreq->sg_byte_count = 0;
- skreq->discard_page = 0;
- /*
- * OK to now dequeue request from q.
- *
- * At this point we are comitted to either start or reject
- * the native request. Note that skd_request_context is
- * available but is still at the head of the free list.
- */
- blk_start_request(req);
- skreq->req = req;
- skreq->fitmsg_id = 0;
- /* Either a FIT msg is in progress or we have to start one. */
- if (skmsg == NULL) {
- /* Are there any FIT msg buffers available? */
- skmsg = skdev->skmsg_free_list;
- if (skmsg == NULL) {
- pr_debug("%s:%s:%d Out of msg skdev=%p\n",
- skdev->name, __func__, __LINE__,
- skdev);
- break;
- }
- SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
- SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
- skdev->skmsg_free_list = skmsg->next;
- skmsg->state = SKD_MSG_STATE_BUSY;
- skmsg->id += SKD_ID_INCR;
- /* Initialize the FIT msg header */
- fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
- memset(fmh, 0, sizeof(*fmh));
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- skmsg->length = sizeof(*fmh);
- }
- skreq->fitmsg_id = skmsg->id;
- /*
- * Note that a FIT msg may have just been started
- * but contains no SoFIT requests yet.
- */
- /*
- * Transcode the request, checking as we go. The outcome of
- * the transcoding is represented by the error variable.
- */
- cmd_ptr = &skmsg->msg_buf[skmsg->length];
- memset(cmd_ptr, 0, 32);
- be_lba = cpu_to_be32(lba);
- be_count = cpu_to_be32(count);
- be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
- cmdctxt = skreq->id + SKD_ID_INCR;
- scsi_req = cmd_ptr;
- scsi_req->hdr.tag = cmdctxt;
- scsi_req->hdr.sg_list_dma_address = be_dmaa;
- if (data_dir == READ)
- skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
- else
- skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
- if (io_flags & REQ_DISCARD) {
- page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
- if (!page) {
- pr_err("request_fn:Page allocation failed.\n");
- skd_end_request(skdev, skreq, -ENOMEM);
- break;
- }
- skreq->discard_page = 1;
- req->completion_data = page;
- skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
- } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
- skd_prep_zerosize_flush_cdb(scsi_req, skreq);
- SKD_ASSERT(skreq->flush_cmd == 1);
- } else {
- skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
- }
- if (fua)
- scsi_req->cdb[1] |= SKD_FUA_NV;
- if (!req->bio)
- goto skip_sg;
- error = skd_preop_sg_list(skdev, skreq);
- if (error != 0) {
- /*
- * Complete the native request with error.
- * Note that the request context is still at the
- * head of the free list, and that the SoFIT request
- * was encoded into the FIT msg buffer but the FIT
- * msg length has not been updated. In short, the
- * only resource that has been allocated but might
- * not be used is that the FIT msg could be empty.
- */
- pr_debug("%s:%s:%d error Out\n",
- skdev->name, __func__, __LINE__);
- skd_end_request(skdev, skreq, error);
- continue;
- }
- skip_sg:
- scsi_req->hdr.sg_list_len_bytes =
- cpu_to_be32(skreq->sg_byte_count);
- /* Complete resource allocations. */
- skdev->skreq_free_list = skreq->next;
- skreq->state = SKD_REQ_STATE_BUSY;
- skreq->id += SKD_ID_INCR;
- skmsg->length += sizeof(struct skd_scsi_request);
- fmh->num_protocol_cmds_coalesced++;
- /*
- * Update the active request counts.
- * Capture the timeout timestamp.
- */
- skreq->timeout_stamp = skdev->timeout_stamp;
- timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
- skdev->timeout_slot[timo_slot]++;
- skdev->in_flight++;
- pr_debug("%s:%s:%d req=0x%x busy=%d\n",
- skdev->name, __func__, __LINE__,
- skreq->id, skdev->in_flight);
- /*
- * If the FIT msg buffer is full send it.
- */
- if (skmsg->length >= SKD_N_FITMSG_BYTES ||
- fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
- skd_send_fitmsg(skdev, skmsg);
- skmsg = NULL;
- fmh = NULL;
- }
- }
- /*
- * Is a FIT msg in progress? If it is empty put the buffer back
- * on the free list. If it is non-empty send what we got.
- * This minimizes latency when there are fewer requests than
- * what fits in a FIT msg.
- */
- if (skmsg != NULL) {
- /* Bigger than just a FIT msg header? */
- if (skmsg->length > sizeof(struct fit_msg_hdr)) {
- pr_debug("%s:%s:%d sending msg=%p, len %d\n",
- skdev->name, __func__, __LINE__,
- skmsg, skmsg->length);
- skd_send_fitmsg(skdev, skmsg);
- } else {
- /*
- * The FIT msg is empty. It means we got started
- * on the msg, but the requests were rejected.
- */
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- skmsg->next = skdev->skmsg_free_list;
- skdev->skmsg_free_list = skmsg;
- }
- skmsg = NULL;
- fmh = NULL;
- }
- /*
- * If req is non-NULL it means there is something to do but
- * we are out of a resource.
- */
- if (req)
- blk_stop_queue(skdev->queue);
- }
- static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, int error)
- {
- struct request *req = skreq->req;
- unsigned int io_flags = req->cmd_flags;
- if ((io_flags & REQ_DISCARD) &&
- (skreq->discard_page == 1)) {
- pr_debug("%s:%s:%d, free the page!",
- skdev->name, __func__, __LINE__);
- __free_page(req->completion_data);
- }
- if (unlikely(error)) {
- struct request *req = skreq->req;
- char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
- u32 lba = (u32)blk_rq_pos(req);
- u32 count = blk_rq_sectors(req);
- pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
- skd_name(skdev), cmd, lba, count, skreq->id);
- } else
- pr_debug("%s:%s:%d id=0x%x error=%d\n",
- skdev->name, __func__, __LINE__, skreq->id, error);
- __blk_end_request_all(skreq->req, error);
- }
- static int skd_preop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq)
- {
- struct request *req = skreq->req;
- int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
- int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
- struct scatterlist *sg = &skreq->sg[0];
- int n_sg;
- int i;
- skreq->sg_byte_count = 0;
- /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
- skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
- n_sg = blk_rq_map_sg(skdev->queue, req, sg);
- if (n_sg <= 0)
- return -EINVAL;
- /*
- * Map scatterlist to PCI bus addresses.
- * Note PCI might change the number of entries.
- */
- n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
- if (n_sg <= 0)
- return -EINVAL;
- SKD_ASSERT(n_sg <= skdev->sgs_per_request);
- skreq->n_sg = n_sg;
- for (i = 0; i < n_sg; i++) {
- struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
- u32 cnt = sg_dma_len(&sg[i]);
- uint64_t dma_addr = sg_dma_address(&sg[i]);
- sgd->control = FIT_SGD_CONTROL_NOT_LAST;
- sgd->byte_count = cnt;
- skreq->sg_byte_count += cnt;
- sgd->host_side_addr = dma_addr;
- sgd->dev_side_addr = 0;
- }
- skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
- skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
- if (unlikely(skdev->dbg_level > 1)) {
- pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
- skdev->name, __func__, __LINE__,
- skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
- for (i = 0; i < n_sg; i++) {
- struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
- pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- skdev->name, __func__, __LINE__,
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
- }
- }
- return 0;
- }
- static void skd_postop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq)
- {
- int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
- int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
- /*
- * restore the next ptr for next IO request so we
- * don't have to set it every time.
- */
- skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
- skreq->sksg_dma_address +
- ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
- pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
- }
- static void skd_request_fn_not_online(struct request_queue *q)
- {
- struct skd_device *skdev = q->queuedata;
- int error;
- SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
- skd_log_skdev(skdev, "req_not_online");
- switch (skdev->state) {
- case SKD_DRVR_STATE_PAUSING:
- case SKD_DRVR_STATE_PAUSED:
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- case SKD_DRVR_STATE_WAIT_BOOT:
- /* In case of starting, we haven't started the queue,
- * so we can't get here... but requests are
- * possibly hanging out waiting for us because we
- * reported the dev/skd0 already. They'll wait
- * forever if connect doesn't complete.
- * What to do??? delay dev/skd0 ??
- */
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- case SKD_DRVR_STATE_BUSY_ERASE:
- case SKD_DRVR_STATE_DRAINING_TIMEOUT:
- return;
- case SKD_DRVR_STATE_BUSY_SANITIZE:
- case SKD_DRVR_STATE_STOPPING:
- case SKD_DRVR_STATE_SYNCING:
- case SKD_DRVR_STATE_FAULT:
- case SKD_DRVR_STATE_DISAPPEARED:
- default:
- error = -EIO;
- break;
- }
- /* If we get here, terminate all pending block requeusts
- * with EIO and any scsi pass thru with appropriate sense
- */
- skd_fail_all_pending(skdev);
- }
- /*
- *****************************************************************************
- * TIMER
- *****************************************************************************
- */
- static void skd_timer_tick_not_online(struct skd_device *skdev);
- static void skd_timer_tick(ulong arg)
- {
- struct skd_device *skdev = (struct skd_device *)arg;
- u32 timo_slot;
- u32 overdue_timestamp;
- unsigned long reqflags;
- u32 state;
- if (skdev->state == SKD_DRVR_STATE_FAULT)
- /* The driver has declared fault, and we want it to
- * stay that way until driver is reloaded.
- */
- return;
- spin_lock_irqsave(&skdev->lock, reqflags);
- state = SKD_READL(skdev, FIT_STATUS);
- state &= FIT_SR_DRIVE_STATE_MASK;
- if (state != skdev->drive_state)
- skd_isr_fwstate(skdev);
- if (skdev->state != SKD_DRVR_STATE_ONLINE) {
- skd_timer_tick_not_online(skdev);
- goto timer_func_out;
- }
- skdev->timeout_stamp++;
- timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
- /*
- * All requests that happened during the previous use of
- * this slot should be done by now. The previous use was
- * over 7 seconds ago.
- */
- if (skdev->timeout_slot[timo_slot] == 0)
- goto timer_func_out;
- /* Something is overdue */
- overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
- pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->timeout_slot[timo_slot], skdev->in_flight);
- pr_err("(%s): Overdue IOs (%d), busy %d\n",
- skd_name(skdev), skdev->timeout_slot[timo_slot],
- skdev->in_flight);
- skdev->timer_countdown = SKD_DRAINING_TIMO;
- skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
- skdev->timo_slot = timo_slot;
- blk_stop_queue(skdev->queue);
- timer_func_out:
- mod_timer(&skdev->timer, (jiffies + HZ));
- spin_unlock_irqrestore(&skdev->lock, reqflags);
- }
- static void skd_timer_tick_not_online(struct skd_device *skdev)
- {
- switch (skdev->state) {
- case SKD_DRVR_STATE_IDLE:
- case SKD_DRVR_STATE_LOAD:
- break;
- case SKD_DRVR_STATE_BUSY_SANITIZE:
- pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
- skdev->name, __func__, __LINE__,
- skdev->drive_state, skdev->state);
- /* If we've been in sanitize for 3 seconds, we figure we're not
- * going to get anymore completions, so recover requests now
- */
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- skd_recover_requests(skdev, 0);
- break;
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- case SKD_DRVR_STATE_BUSY_ERASE:
- pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->state, skdev->timer_countdown);
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
- skdev->name, __func__, __LINE__,
- skdev->state, skdev->timer_countdown);
- skd_restart_device(skdev);
- break;
- case SKD_DRVR_STATE_WAIT_BOOT:
- case SKD_DRVR_STATE_STARTING:
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- /* For now, we fault the drive. Could attempt resets to
- * revcover at some point. */
- skdev->state = SKD_DRVR_STATE_FAULT;
- pr_err("(%s): DriveFault Connect Timeout (%x)\n",
- skd_name(skdev), skdev->drive_state);
- /*start the queue so we can respond with error to requests */
- /* wakeup anyone waiting for startup complete */
- blk_start_queue(skdev->queue);
- skdev->gendisk_on = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
- case SKD_DRVR_STATE_ONLINE:
- /* shouldn't get here. */
- break;
- case SKD_DRVR_STATE_PAUSING:
- case SKD_DRVR_STATE_PAUSED:
- break;
- case SKD_DRVR_STATE_DRAINING_TIMEOUT:
- pr_debug("%s:%s:%d "
- "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
- skdev->name, __func__, __LINE__,
- skdev->timo_slot,
- skdev->timer_countdown,
- skdev->in_flight,
- skdev->timeout_slot[skdev->timo_slot]);
- /* if the slot has cleared we can let the I/O continue */
- if (skdev->timeout_slot[skdev->timo_slot] == 0) {
- pr_debug("%s:%s:%d Slot drained, starting queue.\n",
- skdev->name, __func__, __LINE__);
- skdev->state = SKD_DRVR_STATE_ONLINE;
- blk_start_queue(skdev->queue);
- return;
- }
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- skd_restart_device(skdev);
- break;
- case SKD_DRVR_STATE_RESTARTING:
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- /* For now, we fault the drive. Could attempt resets to
- * revcover at some point. */
- skdev->state = SKD_DRVR_STATE_FAULT;
- pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
- skd_name(skdev), skdev->drive_state);
- /*
- * Recovering does two things:
- * 1. completes IO with error
- * 2. reclaims dma resources
- * When is it safe to recover requests?
- * - if the drive state is faulted
- * - if the state is still soft reset after out timeout
- * - if the drive registers are dead (state = FF)
- * If it is "unsafe", we still need to recover, so we will
- * disable pci bus mastering and disable our interrupts.
- */
- if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
- (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
- (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
- /* It never came out of soft reset. Try to
- * recover the requests and then let them
- * fail. This is to mitigate hung processes. */
- skd_recover_requests(skdev, 0);
- else {
- pr_err("(%s): Disable BusMaster (%x)\n",
- skd_name(skdev), skdev->drive_state);
- pci_disable_device(skdev->pdev);
- skd_disable_interrupts(skdev);
- skd_recover_requests(skdev, 0);
- }
- /*start the queue so we can respond with error to requests */
- /* wakeup anyone waiting for startup complete */
- blk_start_queue(skdev->queue);
- skdev->gendisk_on = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
- case SKD_DRVR_STATE_RESUMING:
- case SKD_DRVR_STATE_STOPPING:
- case SKD_DRVR_STATE_SYNCING:
- case SKD_DRVR_STATE_FAULT:
- case SKD_DRVR_STATE_DISAPPEARED:
- default:
- break;
- }
- }
- static int skd_start_timer(struct skd_device *skdev)
- {
- int rc;
- init_timer(&skdev->timer);
- setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
- rc = mod_timer(&skdev->timer, (jiffies + HZ));
- if (rc)
- pr_err("%s: failed to start timer %d\n",
- __func__, rc);
- return rc;
- }
- static void skd_kill_timer(struct skd_device *skdev)
- {
- del_timer_sync(&skdev->timer);
- }
- /*
- *****************************************************************************
- * IOCTL
- *****************************************************************************
- */
- static int skd_ioctl_sg_io(struct skd_device *skdev,
- fmode_t mode, void __user *argp);
- static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
- static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
- static int skd_sg_io_prep_buffering(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
- static int skd_sg_io_copy_buffer(struct skd_device *skdev,
- struct skd_sg_io *sksgio, int dxfer_dir);
- static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
- static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
- static int skd_sg_io_release_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
- static int skd_sg_io_put_status(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
- static void skd_complete_special(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl);
- static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
- uint cmd_in, ulong arg)
- {
- int rc = 0;
- struct gendisk *disk = bdev->bd_disk;
- struct skd_device *skdev = disk->private_data;
- void __user *p = (void *)arg;
- pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
- skdev->name, __func__, __LINE__,
- disk->disk_name, current->comm, mode, cmd_in, arg);
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- switch (cmd_in) {
- case SG_SET_TIMEOUT:
- case SG_GET_TIMEOUT:
- case SG_GET_VERSION_NUM:
- rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
- break;
- case SG_IO:
- rc = skd_ioctl_sg_io(skdev, mode, p);
- break;
- default:
- rc = -ENOTTY;
- break;
- }
- pr_debug("%s:%s:%d %s: completion rc %d\n",
- skdev->name, __func__, __LINE__, disk->disk_name, rc);
- return rc;
- }
- static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
- void __user *argp)
- {
- int rc;
- struct skd_sg_io sksgio;
- memset(&sksgio, 0, sizeof(sksgio));
- sksgio.mode = mode;
- sksgio.argp = argp;
- sksgio.iov = &sksgio.no_iov_iov;
- switch (skdev->state) {
- case SKD_DRVR_STATE_ONLINE:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- break;
- default:
- pr_debug("%s:%s:%d drive not online\n",
- skdev->name, __func__, __LINE__);
- rc = -ENXIO;
- goto out;
- }
- rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
- if (rc)
- goto out;
- rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
- if (rc)
- goto out;
- rc = skd_sg_io_prep_buffering(skdev, &sksgio);
- if (rc)
- goto out;
- rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
- if (rc)
- goto out;
- rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
- if (rc)
- goto out;
- rc = skd_sg_io_await(skdev, &sksgio);
- if (rc)
- goto out;
- rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
- if (rc)
- goto out;
- rc = skd_sg_io_put_status(skdev, &sksgio);
- if (rc)
- goto out;
- rc = 0;
- out:
- skd_sg_io_release_skspcl(skdev, &sksgio);
- if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
- kfree(sksgio.iov);
- return rc;
- }
- static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
- {
- struct sg_io_hdr *sgp = &sksgio->sg;
- int i, acc;
- if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
- pr_debug("%s:%s:%d access sg failed %p\n",
- skdev->name, __func__, __LINE__, sksgio->argp);
- return -EFAULT;
- }
- if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
- pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
- skdev->name, __func__, __LINE__, sksgio->argp);
- return -EFAULT;
- }
- if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
- pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
- skdev->name, __func__, __LINE__, sgp->interface_id);
- return -EINVAL;
- }
- if (sgp->cmd_len > sizeof(sksgio->cdb)) {
- pr_debug("%s:%s:%d cmd_len invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->cmd_len);
- return -EINVAL;
- }
- if (sgp->iovec_count > 256) {
- pr_debug("%s:%s:%d iovec_count invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->iovec_count);
- return -EINVAL;
- }
- if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
- pr_debug("%s:%s:%d dxfer_len invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->dxfer_len);
- return -EINVAL;
- }
- switch (sgp->dxfer_direction) {
- case SG_DXFER_NONE:
- acc = -1;
- break;
- case SG_DXFER_TO_DEV:
- acc = VERIFY_READ;
- break;
- case SG_DXFER_FROM_DEV:
- case SG_DXFER_TO_FROM_DEV:
- acc = VERIFY_WRITE;
- break;
- default:
- pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->dxfer_direction);
- return -EINVAL;
- }
- if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
- pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
- skdev->name, __func__, __LINE__, sgp->cmdp);
- return -EFAULT;
- }
- if (sgp->mx_sb_len != 0) {
- if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
- pr_debug("%s:%s:%d access sbp failed %p\n",
- skdev->name, __func__, __LINE__, sgp->sbp);
- return -EFAULT;
- }
- }
- if (sgp->iovec_count == 0) {
- sksgio->iov[0].iov_base = sgp->dxferp;
- sksgio->iov[0].iov_len = sgp->dxfer_len;
- sksgio->iovcnt = 1;
- sksgio->dxfer_len = sgp->dxfer_len;
- } else {
- struct sg_iovec *iov;
- uint nbytes = sizeof(*iov) * sgp->iovec_count;
- size_t iov_data_len;
- iov = kmalloc(nbytes, GFP_KERNEL);
- if (iov == NULL) {
- pr_debug("%s:%s:%d alloc iovec failed %d\n",
- skdev->name, __func__, __LINE__,
- sgp->iovec_count);
- return -ENOMEM;
- }
- sksgio->iov = iov;
- sksgio->iovcnt = sgp->iovec_count;
- if (copy_from_user(iov, sgp->dxferp, nbytes)) {
- pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
- skdev->name, __func__, __LINE__, sgp->dxferp);
- return -EFAULT;
- }
- /*
- * Sum up the vecs, making sure they don't overflow
- */
- iov_data_len = 0;
- for (i = 0; i < sgp->iovec_count; i++) {
- if (iov_data_len + iov[i].iov_len < iov_data_len)
- return -EINVAL;
- iov_data_len += iov[i].iov_len;
- }
- /* SG_IO howto says that the shorter of the two wins */
- if (sgp->dxfer_len < iov_data_len) {
- sksgio->iovcnt = iov_shorten((struct iovec *)iov,
- sgp->iovec_count,
- sgp->dxfer_len);
- sksgio->dxfer_len = sgp->dxfer_len;
- } else
- sksgio->dxfer_len = iov_data_len;
- }
- if (sgp->dxfer_direction != SG_DXFER_NONE) {
- struct sg_iovec *iov = sksgio->iov;
- for (i = 0; i < sksgio->iovcnt; i++, iov++) {
- if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
- pr_debug("%s:%s:%d access data failed %p/%d\n",
- skdev->name, __func__, __LINE__,
- iov->iov_base, (int)iov->iov_len);
- return -EFAULT;
- }
- }
- }
- return 0;
- }
- static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
- {
- struct skd_special_context *skspcl = NULL;
- int rc;
- for (;;) {
- ulong flags;
- spin_lock_irqsave(&skdev->lock, flags);
- skspcl = skdev->skspcl_free_list;
- if (skspcl != NULL) {
- skdev->skspcl_free_list =
- (struct skd_special_context *)skspcl->req.next;
- skspcl->req.id += SKD_ID_INCR;
- skspcl->req.state = SKD_REQ_STATE_SETUP;
- skspcl->orphaned = 0;
- skspcl->req.n_sg = 0;
- }
- spin_unlock_irqrestore(&skdev->lock, flags);
- if (skspcl != NULL) {
- rc = 0;
- break;
- }
- pr_debug("%s:%s:%d blocking\n",
- skdev->name, __func__, __LINE__);
- rc = wait_event_interruptible_timeout(
- skdev->waitq,
- (skdev->skspcl_free_list != NULL),
- msecs_to_jiffies(sksgio->sg.timeout));
- pr_debug("%s:%s:%d unblocking, rc=%d\n",
- skdev->name, __func__, __LINE__, rc);
- if (rc <= 0) {
- if (rc == 0)
- rc = -ETIMEDOUT;
- else
- rc = -EINTR;
- break;
- }
- /*
- * If we get here rc > 0 meaning the timeout to
- * wait_event_interruptible_timeout() had time left, hence the
- * sought event -- non-empty free list -- happened.
- * Retry the allocation.
- */
- }
- sksgio->skspcl = skspcl;
- return rc;
- }
- static int skd_skreq_prep_buffering(struct skd_device *skdev,
- struct skd_request_context *skreq,
- u32 dxfer_len)
- {
- u32 resid = dxfer_len;
- /*
- * The DMA engine must have aligned addresses and byte counts.
- */
- resid += (-resid) & 3;
- skreq->sg_byte_count = resid;
- skreq->n_sg = 0;
- while (resid > 0) {
- u32 nbytes = PAGE_SIZE;
- u32 ix = skreq->n_sg;
- struct scatterlist *sg = &skreq->sg[ix];
- struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
- struct page *page;
- if (nbytes > resid)
- nbytes = resid;
- page = alloc_page(GFP_KERNEL);
- if (page == NULL)
- return -ENOMEM;
- sg_set_page(sg, page, nbytes, 0);
- /* TODO: This should be going through a pci_???()
- * routine to do proper mapping. */
- sksg->control = FIT_SGD_CONTROL_NOT_LAST;
- sksg->byte_count = nbytes;
- sksg->host_side_addr = sg_phys(sg);
- sksg->dev_side_addr = 0;
- sksg->next_desc_ptr = skreq->sksg_dma_address +
- (ix + 1) * sizeof(*sksg);
- skreq->n_sg++;
- resid -= nbytes;
- }
- if (skreq->n_sg > 0) {
- u32 ix = skreq->n_sg - 1;
- struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
- sksg->control = FIT_SGD_CONTROL_LAST;
- sksg->next_desc_ptr = 0;
- }
- if (unlikely(skdev->dbg_level > 1)) {
- u32 i;
- pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
- skdev->name, __func__, __LINE__,
- skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
- for (i = 0; i < skreq->n_sg; i++) {
- struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
- pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- skdev->name, __func__, __LINE__,
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
- }
- }
- return 0;
- }
- static int skd_sg_io_prep_buffering(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
- {
- struct skd_special_context *skspcl = sksgio->skspcl;
- struct skd_request_context *skreq = &skspcl->req;
- u32 dxfer_len = sksgio->dxfer_len;
- int rc;
- rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
- /*
- * Eventually, errors or not, skd_release_special() is called
- * to recover allocations including partial allocations.
- */
- return rc;
- }
- static int skd_sg_io_copy_buffer(struct skd_device *skdev,
- struct skd_sg_io *sksgio, int dxfer_dir)
- {
- struct skd_special_context *skspcl = sksgio->skspcl;
- u32 iov_ix = 0;
- struct sg_iovec curiov;
- u32 sksg_ix = 0;
- u8 *bufp = NULL;
- u32 buf_len = 0;
- u32 resid = sksgio->dxfer_len;
- int rc;
- curiov.iov_len = 0;
- curiov.iov_base = NULL;
- if (dxfer_dir != sksgio->sg.dxfer_direction) {
- if (dxfer_dir != SG_DXFER_TO_DEV ||
- sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
- return 0;
- }
- while (resid > 0) {
- u32 nbytes = PAGE_SIZE;
- if (curiov.iov_len == 0) {
- curiov = sksgio->iov[iov_ix++];
- continue;
- }
- if (buf_len == 0) {
- struct page *page;
- page = sg_page(&skspcl->req.sg[sksg_ix++]);
- bufp = page_address(page);
- buf_len = PAGE_SIZE;
- }
- nbytes = min_t(u32, nbytes, resid);
- nbytes = min_t(u32, nbytes, curiov.iov_len);
- nbytes = min_t(u32, nbytes, buf_len);
- if (dxfer_dir == SG_DXFER_TO_DEV)
- rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
- else
- rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
- if (rc)
- return -EFAULT;
- resid -= nbytes;
- curiov.iov_len -= nbytes;
- curiov.iov_base += nbytes;
- buf_len -= nbytes;
- }
- return 0;
- }
- static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
- {
- struct skd_special_context *skspcl = sksgio->skspcl;
- struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
- struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
- memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
- /* Initialize the FIT msg header */
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- fmh->num_protocol_cmds_coalesced = 1;
- /* Initialize the SCSI request */
- if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
- scsi_req->hdr.sg_list_dma_address =
- cpu_to_be64(skspcl->req.sksg_dma_address);
- scsi_req->hdr.tag = skspcl->req.id;
- scsi_req->hdr.sg_list_len_bytes =
- cpu_to_be32(skspcl->req.sg_byte_count);
- memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
- skspcl->req.state = SKD_REQ_STATE_BUSY;
- skd_send_special_fitmsg(skdev, skspcl);
- return 0;
- }
- static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
- {
- unsigned long flags;
- int rc;
- rc = wait_event_interruptible_timeout(skdev->waitq,
- (sksgio->skspcl->req.state !=
- SKD_REQ_STATE_BUSY),
- msecs_to_jiffies(sksgio->sg.
- timeout));
- spin_lock_irqsave(&skdev->lock, flags);
- if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
- pr_debug("%s:%s:%d skspcl %p aborted\n",
- skdev->name, __func__, __LINE__, sksgio->skspcl);
- /* Build check cond, sense and let command finish. */
- /* For a timeout, we must fabricate completion and sense
- * data to complete the command */
- sksgio->skspcl->req.completion.status =
- SAM_STAT_CHECK_CONDITION;
- memset(&sksgio->skspcl->req.err_info, 0,
- sizeof(sksgio->skspcl->req.err_info));
- sksgio->skspcl->req.err_info.type = 0x70;
- sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
- sksgio->skspcl->req.err_info.code = 0x44;
- sksgio->skspcl->req.err_info.qual = 0;
- rc = 0;
- } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
- /* No longer on the adapter. We finish. */
- rc = 0;
- else {
- /* Something's gone wrong. Still busy. Timeout or
- * user interrupted (control-C). Mark as an orphan
- * so it will be disposed when completed. */
- sksgio->skspcl->orphaned = 1;
- sksgio->skspcl = NULL;
- if (rc == 0) {
- pr_debug("%s:%s:%d timed out %p (%u ms)\n",
- skdev->name, __func__, __LINE__,
- sksgio, sksgio->sg.timeout);
- rc = -ETIMEDOUT;
- } else {
- pr_debug("%s:%s:%d cntlc %p\n",
- skdev->name, __func__, __LINE__, sksgio);
- rc = -EINTR;
- }
- }
- spin_unlock_irqrestore(&skdev->lock, flags);
- return rc;
- }
- static int skd_sg_io_put_status(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
- {
- struct sg_io_hdr *sgp = &sksgio->sg;
- struct skd_special_context *skspcl = sksgio->skspcl;
- int resid = 0;
- u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
- sgp->status = skspcl->req.completion.status;
- resid = sksgio->dxfer_len - nb;
- sgp->masked_status = sgp->status & STATUS_MASK;
- sgp->msg_status = 0;
- sgp->host_status = 0;
- sgp->driver_status = 0;
- sgp->resid = resid;
- if (sgp->masked_status || sgp->host_status || sgp->driver_status)
- sgp->info |= SG_INFO_CHECK;
- pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
- skdev->name, __func__, __LINE__,
- sgp->status, sgp->masked_status, sgp->resid);
- if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
- if (sgp->mx_sb_len > 0) {
- struct fit_comp_error_info *ei = &skspcl->req.err_info;
- u32 nbytes = sizeof(*ei);
- nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
- sgp->sb_len_wr = nbytes;
- if (__copy_to_user(sgp->sbp, ei, nbytes)) {
- pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
- skdev->name, __func__, __LINE__,
- sgp->sbp);
- return -EFAULT;
- }
- }
- }
- if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
- pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
- skdev->name, __func__, __LINE__, sksgio->argp);
- return -EFAULT;
- }
- return 0;
- }
- static int skd_sg_io_release_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
- {
- struct skd_special_context *skspcl = sksgio->skspcl;
- if (skspcl != NULL) {
- ulong flags;
- sksgio->skspcl = NULL;
- spin_lock_irqsave(&skdev->lock, flags);
- skd_release_special(skdev, skspcl);
- spin_unlock_irqrestore(&skdev->lock, flags);
- }
- return 0;
- }
- /*
- *****************************************************************************
- * INTERNAL REQUESTS -- generated by driver itself
- *****************************************************************************
- */
- static int skd_format_internal_skspcl(struct skd_device *skdev)
- {
- struct skd_special_context *skspcl = &skdev->internal_skspcl;
- struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
- struct fit_msg_hdr *fmh;
- uint64_t dma_address;
- struct skd_scsi_request *scsi;
- fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- fmh->num_protocol_cmds_coalesced = 1;
- scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
- memset(scsi, 0, sizeof(*scsi));
- dma_address = skspcl->req.sksg_dma_address;
- scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
- sgd->control = FIT_SGD_CONTROL_LAST;
- sgd->byte_count = 0;
- sgd->host_side_addr = skspcl->db_dma_address;
- sgd->dev_side_addr = 0;
- sgd->next_desc_ptr = 0LL;
- return 1;
- }
- #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
- static void skd_send_internal_skspcl(struct skd_device *skdev,
- struct skd_special_context *skspcl,
- u8 opcode)
- {
- struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
- struct skd_scsi_request *scsi;
- unsigned char *buf = skspcl->data_buf;
- int i;
- if (skspcl->req.state != SKD_REQ_STATE_IDLE)
- /*
- * A refresh is already in progress.
- * Just wait for it to finish.
- */
- return;
- SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
- skspcl->req.state = SKD_REQ_STATE_BUSY;
- skspcl->req.id += SKD_ID_INCR;
- scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
- scsi->hdr.tag = skspcl->req.id;
- memset(scsi->cdb, 0, sizeof(scsi->cdb));
- switch (opcode) {
- case TEST_UNIT_READY:
- scsi->cdb[0] = TEST_UNIT_READY;
- sgd->byte_count = 0;
- scsi->hdr.sg_list_len_bytes = 0;
- break;
- case READ_CAPACITY:
- scsi->cdb[0] = READ_CAPACITY;
- sgd->byte_count = SKD_N_READ_CAP_BYTES;
- scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
- break;
- case INQUIRY:
- scsi->cdb[0] = INQUIRY;
- scsi->cdb[1] = 0x01; /* evpd */
- scsi->cdb[2] = 0x80; /* serial number page */
- scsi->cdb[4] = 0x10;
- sgd->byte_count = 16;
- scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
- break;
- case SYNCHRONIZE_CACHE:
- scsi->cdb[0] = SYNCHRONIZE_CACHE;
- sgd->byte_count = 0;
- scsi->hdr.sg_list_len_bytes = 0;
- break;
- case WRITE_BUFFER:
- scsi->cdb[0] = WRITE_BUFFER;
- scsi->cdb[1] = 0x02;
- scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
- scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
- sgd->byte_count = WR_BUF_SIZE;
- scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
- /* fill incrementing byte pattern */
- for (i = 0; i < sgd->byte_count; i++)
- buf[i] = i & 0xFF;
- break;
- case READ_BUFFER:
- scsi->cdb[0] = READ_BUFFER;
- scsi->cdb[1] = 0x02;
- scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
- scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
- sgd->byte_count = WR_BUF_SIZE;
- scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
- memset(skspcl->data_buf, 0, sgd->byte_count);
- break;
- default:
- SKD_ASSERT("Don't know what to send");
- return;
- }
- skd_send_special_fitmsg(skdev, skspcl);
- }
- static void skd_refresh_device_data(struct skd_device *skdev)
- {
- struct skd_special_context *skspcl = &skdev->internal_skspcl;
- skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
- }
- static int skd_chk_read_buf(struct skd_device *skdev,
- struct skd_special_context *skspcl)
- {
- unsigned char *buf = skspcl->data_buf;
- int i;
- /* check for incrementing byte pattern */
- for (i = 0; i < WR_BUF_SIZE; i++)
- if (buf[i] != (i & 0xFF))
- return 1;
- return 0;
- }
- static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
- u8 code, u8 qual, u8 fruc)
- {
- /* If the check condition is of special interest, log a message */
- if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
- && (code == 0x04) && (qual == 0x06)) {
- pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
- "ascq/fruc %02x/%02x/%02x/%02x\n",
- skd_name(skdev), key, code, qual, fruc);
- }
- }
- static void skd_complete_internal(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl)
- {
- u8 *buf = skspcl->data_buf;
- u8 status;
- int i;
- struct skd_scsi_request *scsi =
- (struct skd_scsi_request *)&skspcl->msg_buf[64];
- SKD_ASSERT(skspcl == &skdev->internal_skspcl);
- pr_debug("%s:%s:%d complete internal %x\n",
- skdev->name, __func__, __LINE__, scsi->cdb[0]);
- skspcl->req.completion = *skcomp;
- skspcl->req.state = SKD_REQ_STATE_IDLE;
- skspcl->req.id += SKD_ID_INCR;
- status = skspcl->req.completion.status;
- skd_log_check_status(skdev, status, skerr->key, skerr->code,
- skerr->qual, skerr->fruc);
- switch (scsi->cdb[0]) {
- case TEST_UNIT_READY:
- if (status == SAM_STAT_GOOD)
- skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
- else if ((status == SAM_STAT_CHECK_CONDITION) &&
- (skerr->key == MEDIUM_ERROR))
- skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
- else {
- if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
- skdev->name, __func__, __LINE__,
- skdev->state);
- return;
- }
- pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, 0x00);
- }
- break;
- case WRITE_BUFFER:
- if (status == SAM_STAT_GOOD)
- skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
- else {
- if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
- skdev->name, __func__, __LINE__,
- skdev->state);
- return;
- }
- pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, 0x00);
- }
- break;
- case READ_BUFFER:
- if (status == SAM_STAT_GOOD) {
- if (skd_chk_read_buf(skdev, skspcl) == 0)
- skd_send_internal_skspcl(skdev, skspcl,
- READ_CAPACITY);
- else {
- pr_err(
- "(%s):*** W/R Buffer mismatch %d ***\n",
- skd_name(skdev), skdev->connect_retries);
- if (skdev->connect_retries <
- SKD_MAX_CONNECT_RETRIES) {
- skdev->connect_retries++;
- skd_soft_reset(skdev);
- } else {
- pr_err(
- "(%s): W/R Buffer Connect Error\n",
- skd_name(skdev));
- return;
- }
- }
- } else {
- if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- pr_debug("%s:%s:%d "
- "read buffer failed, don't send anymore state 0x%x\n",
- skdev->name, __func__, __LINE__,
- skdev->state);
- return;
- }
- pr_debug("%s:%s:%d "
- "**** read buffer failed, retry skerr\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, 0x00);
- }
- break;
- case READ_CAPACITY:
- skdev->read_cap_is_valid = 0;
- if (status == SAM_STAT_GOOD) {
- skdev->read_cap_last_lba =
- (buf[0] << 24) | (buf[1] << 16) |
- (buf[2] << 8) | buf[3];
- skdev->read_cap_blocksize =
- (buf[4] << 24) | (buf[5] << 16) |
- (buf[6] << 8) | buf[7];
- pr_debug("%s:%s:%d last lba %d, bs %d\n",
- skdev->name, __func__, __LINE__,
- skdev->read_cap_last_lba,
- skdev->read_cap_blocksize);
- set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
- skdev->read_cap_is_valid = 1;
- skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
- } else if ((status == SAM_STAT_CHECK_CONDITION) &&
- (skerr->key == MEDIUM_ERROR)) {
- skdev->read_cap_last_lba = ~0;
- set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
- pr_debug("%s:%s:%d "
- "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
- } else {
- pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl,
- TEST_UNIT_READY);
- }
- break;
- case INQUIRY:
- skdev->inquiry_is_valid = 0;
- if (status == SAM_STAT_GOOD) {
- skdev->inquiry_is_valid = 1;
- for (i = 0; i < 12; i++)
- skdev->inq_serial_num[i] = buf[i + 4];
- skdev->inq_serial_num[12] = 0;
- }
- if (skd_unquiesce_dev(skdev) < 0)
- pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
- skdev->name, __func__, __LINE__);
- /* connection is complete */
- skdev->connect_retries = 0;
- break;
- case SYNCHRONIZE_CACHE:
- if (status == SAM_STAT_GOOD)
- skdev->sync_done = 1;
- else
- skdev->sync_done = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
- default:
- SKD_ASSERT("we didn't send this");
- }
- }
- /*
- *****************************************************************************
- * FIT MESSAGES
- *****************************************************************************
- */
- static void skd_send_fitmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg)
- {
- u64 qcmd;
- struct fit_msg_hdr *fmh;
- pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
- skdev->name, __func__, __LINE__,
- skmsg->mb_dma_address, skdev->in_flight);
- pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
- skdev->name, __func__, __LINE__,
- skmsg->msg_buf, skmsg->offset);
- qcmd = skmsg->mb_dma_address;
- qcmd |= FIT_QCMD_QID_NORMAL;
- fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
- skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
- if (unlikely(skdev->dbg_level > 1)) {
- u8 *bp = (u8 *)skmsg->msg_buf;
- int i;
- for (i = 0; i < skmsg->length; i += 8) {
- pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
- "%02x %02x %02x %02x\n",
- skdev->name, __func__, __LINE__,
- i, bp[i + 0], bp[i + 1], bp[i + 2],
- bp[i + 3], bp[i + 4], bp[i + 5],
- bp[i + 6], bp[i + 7]);
- if (i == 0)
- i = 64 - 8;
- }
- }
- if (skmsg->length > 256)
- qcmd |= FIT_QCMD_MSGSIZE_512;
- else if (skmsg->length > 128)
- qcmd |= FIT_QCMD_MSGSIZE_256;
- else if (skmsg->length > 64)
- qcmd |= FIT_QCMD_MSGSIZE_128;
- else
- /*
- * This makes no sense because the FIT msg header is
- * 64 bytes. If the msg is only 64 bytes long it has
- * no payload.
- */
- qcmd |= FIT_QCMD_MSGSIZE_64;
- SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
- }
- static void skd_send_special_fitmsg(struct skd_device *skdev,
- struct skd_special_context *skspcl)
- {
- u64 qcmd;
- if (unlikely(skdev->dbg_level > 1)) {
- u8 *bp = (u8 *)skspcl->msg_buf;
- int i;
- for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
- pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
- "%02x %02x %02x %02x\n",
- skdev->name, __func__, __LINE__, i,
- bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
- bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
- if (i == 0)
- i = 64 - 8;
- }
- pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
- skdev->name, __func__, __LINE__,
- skspcl, skspcl->req.id, skspcl->req.sksg_list,
- skspcl->req.sksg_dma_address);
- for (i = 0; i < skspcl->req.n_sg; i++) {
- struct fit_sg_descriptor *sgd =
- &skspcl->req.sksg_list[i];
- pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- skdev->name, __func__, __LINE__,
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
- }
- }
- /*
- * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
- * and one 64-byte SSDI command.
- */
- qcmd = skspcl->mb_dma_address;
- qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
- SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
- }
- /*
- *****************************************************************************
- * COMPLETION QUEUE
- *****************************************************************************
- */
- static void skd_complete_other(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr);
- struct sns_info {
- u8 type;
- u8 stat;
- u8 key;
- u8 asc;
- u8 ascq;
- u8 mask;
- enum skd_check_status_action action;
- };
- static struct sns_info skd_chkstat_table[] = {
- /* Good */
- { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
- SKD_CHECK_STATUS_REPORT_GOOD },
- /* Smart alerts */
- { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
- SKD_CHECK_STATUS_REPORT_SMART_ALERT },
- { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
- SKD_CHECK_STATUS_REPORT_SMART_ALERT },
- { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
- SKD_CHECK_STATUS_REPORT_SMART_ALERT },
- /* Retry (with limits) */
- { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
- SKD_CHECK_STATUS_REQUEUE_REQUEST },
- { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
- SKD_CHECK_STATUS_REQUEUE_REQUEST },
- { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
- SKD_CHECK_STATUS_REQUEUE_REQUEST },
- { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
- SKD_CHECK_STATUS_REQUEUE_REQUEST },
- /* Busy (or about to be) */
- { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
- SKD_CHECK_STATUS_BUSY_IMMINENT },
- };
- /*
- * Look up status and sense data to decide how to handle the error
- * from the device.
- * mask says which fields must match e.g., mask=0x18 means check
- * type and stat, ignore key, asc, ascq.
- */
- static enum skd_check_status_action
- skd_check_status(struct skd_device *skdev,
- u8 cmp_status, volatile struct fit_comp_error_info *skerr)
- {
- int i, n;
- pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
- skd_name(skdev), skerr->key, skerr->code, skerr->qual,
- skerr->fruc);
- pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
- skdev->name, __func__, __LINE__, skerr->type, cmp_status,
- skerr->key, skerr->code, skerr->qual, skerr->fruc);
- /* Does the info match an entry in the good category? */
- n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
- for (i = 0; i < n; i++) {
- struct sns_info *sns = &skd_chkstat_table[i];
- if (sns->mask & 0x10)
- if (skerr->type != sns->type)
- continue;
- if (sns->mask & 0x08)
- if (cmp_status != sns->stat)
- continue;
- if (sns->mask & 0x04)
- if (skerr->key != sns->key)
- continue;
- if (sns->mask & 0x02)
- if (skerr->code != sns->asc)
- continue;
- if (sns->mask & 0x01)
- if (skerr->qual != sns->ascq)
- continue;
- if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
- pr_err("(%s): SMART Alert: sense key/asc/ascq "
- "%02x/%02x/%02x\n",
- skd_name(skdev), skerr->key,
- skerr->code, skerr->qual);
- }
- return sns->action;
- }
- /* No other match, so nonzero status means error,
- * zero status means good
- */
- if (cmp_status) {
- pr_debug("%s:%s:%d status check: error\n",
- skdev->name, __func__, __LINE__);
- return SKD_CHECK_STATUS_REPORT_ERROR;
- }
- pr_debug("%s:%s:%d status check good default\n",
- skdev->name, __func__, __LINE__);
- return SKD_CHECK_STATUS_REPORT_GOOD;
- }
- static void skd_resolve_req_exception(struct skd_device *skdev,
- struct skd_request_context *skreq)
- {
- u8 cmp_status = skreq->completion.status;
- switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
- case SKD_CHECK_STATUS_REPORT_GOOD:
- case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
- skd_end_request(skdev, skreq, 0);
- break;
- case SKD_CHECK_STATUS_BUSY_IMMINENT:
- skd_log_skreq(skdev, skreq, "retry(busy)");
- blk_requeue_request(skdev->queue, skreq->req);
- pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
- skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
- skdev->timer_countdown = SKD_TIMER_MINUTES(20);
- skd_quiesce_dev(skdev);
- break;
- case SKD_CHECK_STATUS_REQUEUE_REQUEST:
- if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
- skd_log_skreq(skdev, skreq, "retry");
- blk_requeue_request(skdev->queue, skreq->req);
- break;
- }
- /* fall through to report error */
- case SKD_CHECK_STATUS_REPORT_ERROR:
- default:
- skd_end_request(skdev, skreq, -EIO);
- break;
- }
- }
- /* assume spinlock is already held */
- static void skd_release_skreq(struct skd_device *skdev,
- struct skd_request_context *skreq)
- {
- u32 msg_slot;
- struct skd_fitmsg_context *skmsg;
- u32 timo_slot;
- /*
- * Reclaim the FIT msg buffer if this is
- * the first of the requests it carried to
- * be completed. The FIT msg buffer used to
- * send this request cannot be reused until
- * we are sure the s1120 card has copied
- * it to its memory. The FIT msg might have
- * contained several requests. As soon as
- * any of them are completed we know that
- * the entire FIT msg was transferred.
- * Only the first completed request will
- * match the FIT msg buffer id. The FIT
- * msg buffer id is immediately updated.
- * When subsequent requests complete the FIT
- * msg buffer id won't match, so we know
- * quite cheaply that it is already done.
- */
- msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
- SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
- skmsg = &skdev->skmsg_table[msg_slot];
- if (skmsg->id == skreq->fitmsg_id) {
- SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
- SKD_ASSERT(skmsg->outstanding > 0);
- skmsg->outstanding--;
- if (skmsg->outstanding == 0) {
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- skmsg->next = skdev->skmsg_free_list;
- skdev->skmsg_free_list = skmsg;
- }
- }
- /*
- * Decrease the number of active requests.
- * Also decrements the count in the timeout slot.
- */
- SKD_ASSERT(skdev->in_flight > 0);
- skdev->in_flight -= 1;
- timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
- SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
- skdev->timeout_slot[timo_slot] -= 1;
- /*
- * Reset backpointer
- */
- skreq->req = NULL;
- /*
- * Reclaim the skd_request_context
- */
- skreq->state = SKD_REQ_STATE_IDLE;
- skreq->id += SKD_ID_INCR;
- skreq->next = skdev->skreq_free_list;
- skdev->skreq_free_list = skreq;
- }
- #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
- static void skd_do_inq_page_00(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr,
- uint8_t *cdb, uint8_t *buf)
- {
- uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
- /* Caller requested "supported pages". The driver needs to insert
- * its page.
- */
- pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
- skdev->name, __func__, __LINE__);
- /* If the device rejected the request because the CDB was
- * improperly formed, then just leave.
- */
- if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
- skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
- return;
- /* Get the amount of space the caller allocated */
- max_bytes = (cdb[3] << 8) | cdb[4];
- /* Get the number of pages actually returned by the device */
- drive_pages = (buf[2] << 8) | buf[3];
- drive_bytes = drive_pages + 4;
- new_size = drive_pages + 1;
- /* Supported pages must be in numerical order, so find where
- * the driver page needs to be inserted into the list of
- * pages returned by the device.
- */
- for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
- if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
- return; /* Device using this page code. abort */
- else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
- break;
- }
- if (insert_pt < max_bytes) {
- uint16_t u;
- /* Shift everything up one byte to make room. */
- for (u = new_size + 3; u > insert_pt; u--)
- buf[u] = buf[u - 1];
- buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
- /* SCSI byte order increment of num_returned_bytes by 1 */
- skcomp->num_returned_bytes =
- be32_to_cpu(skcomp->num_returned_bytes) + 1;
- skcomp->num_returned_bytes =
- be32_to_cpu(skcomp->num_returned_bytes);
- }
- /* update page length field to reflect the driver's page too */
- buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
- buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
- }
- static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
- {
- int pcie_reg;
- u16 pci_bus_speed;
- u8 pci_lanes;
- pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (pcie_reg) {
- u16 linksta;
- pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
- pci_bus_speed = linksta & 0xF;
- pci_lanes = (linksta & 0x3F0) >> 4;
- } else {
- *speed = STEC_LINK_UNKNOWN;
- *width = 0xFF;
- return;
- }
- switch (pci_bus_speed) {
- case 1:
- *speed = STEC_LINK_2_5GTS;
- break;
- case 2:
- *speed = STEC_LINK_5GTS;
- break;
- case 3:
- *speed = STEC_LINK_8GTS;
- break;
- default:
- *speed = STEC_LINK_UNKNOWN;
- break;
- }
- if (pci_lanes <= 0x20)
- *width = pci_lanes;
- else
- *width = 0xFF;
- }
- static void skd_do_inq_page_da(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr,
- uint8_t *cdb, uint8_t *buf)
- {
- struct pci_dev *pdev = skdev->pdev;
- unsigned max_bytes;
- struct driver_inquiry_data inq;
- u16 val;
- pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
- skdev->name, __func__, __LINE__);
- memset(&inq, 0, sizeof(inq));
- inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
- skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
- inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
- inq.pcie_device_number = PCI_SLOT(pdev->devfn);
- inq.pcie_function_number = PCI_FUNC(pdev->devfn);
- pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
- inq.pcie_vendor_id = cpu_to_be16(val);
- pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
- inq.pcie_device_id = cpu_to_be16(val);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
- inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
- inq.pcie_subsystem_device_id = cpu_to_be16(val);
- /* Driver version, fixed lenth, padded with spaces on the right */
- inq.driver_version_length = sizeof(inq.driver_version);
- memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
- memcpy(inq.driver_version, DRV_VER_COMPL,
- min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
- inq.page_length = cpu_to_be16((sizeof(inq) - 4));
- /* Clear the error set by the device */
- skcomp->status = SAM_STAT_GOOD;
- memset((void *)skerr, 0, sizeof(*skerr));
- /* copy response into output buffer */
- max_bytes = (cdb[3] << 8) | cdb[4];
- memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
- skcomp->num_returned_bytes =
- be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
- }
- static void skd_do_driver_inq(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr,
- uint8_t *cdb, uint8_t *buf)
- {
- if (!buf)
- return;
- else if (cdb[0] != INQUIRY)
- return; /* Not an INQUIRY */
- else if ((cdb[1] & 1) == 0)
- return; /* EVPD not set */
- else if (cdb[2] == 0)
- /* Need to add driver's page to supported pages list */
- skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
- else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
- /* Caller requested driver's page */
- skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
- }
- static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
- {
- if (!sg)
- return NULL;
- if (!sg_page(sg))
- return NULL;
- return sg_virt(sg);
- }
- static void skd_process_scsi_inq(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl)
- {
- uint8_t *buf;
- struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
- struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
- dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
- skspcl->req.sg_data_dir);
- buf = skd_sg_1st_page_ptr(skspcl->req.sg);
- if (buf)
- skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
- }
- static int skd_isr_completion_posted(struct skd_device *skdev,
- int limit, int *enqueued)
- {
- volatile struct fit_completion_entry_v1 *skcmp = NULL;
- volatile struct fit_comp_error_info *skerr;
- u16 req_id;
- u32 req_slot;
- struct skd_request_context *skreq;
- u16 cmp_cntxt = 0;
- u8 cmp_status = 0;
- u8 cmp_cycle = 0;
- u32 cmp_bytes = 0;
- int rc = 0;
- int processed = 0;
- for (;; ) {
- SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
- skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
- cmp_cycle = skcmp->cycle;
- cmp_cntxt = skcmp->tag;
- cmp_status = skcmp->status;
- cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
- skerr = &skdev->skerr_table[skdev->skcomp_ix];
- pr_debug("%s:%s:%d "
- "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
- "busy=%d rbytes=0x%x proto=%d\n",
- skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
- skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
- skdev->in_flight, cmp_bytes, skdev->proto_ver);
- if (cmp_cycle != skdev->skcomp_cycle) {
- pr_debug("%s:%s:%d end of completions\n",
- skdev->name, __func__, __LINE__);
- break;
- }
- /*
- * Update the completion queue head index and possibly
- * the completion cycle count. 8-bit wrap-around.
- */
- skdev->skcomp_ix++;
- if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
- skdev->skcomp_ix = 0;
- skdev->skcomp_cycle++;
- }
- /*
- * The command context is a unique 32-bit ID. The low order
- * bits help locate the request. The request is usually a
- * r/w request (see skd_start() above) or a special request.
- */
- req_id = cmp_cntxt;
- req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
- /* Is this other than a r/w request? */
- if (req_slot >= skdev->num_req_context) {
- /*
- * This is not a completion for a r/w request.
- */
- skd_complete_other(skdev, skcmp, skerr);
- continue;
- }
- skreq = &skdev->skreq_table[req_slot];
- /*
- * Make sure the request ID for the slot matches.
- */
- if (skreq->id != req_id) {
- pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
- skdev->name, __func__, __LINE__,
- req_id, skreq->id);
- {
- u16 new_id = cmp_cntxt;
- pr_err("(%s): Completion mismatch "
- "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
- skd_name(skdev), req_id,
- skreq->id, new_id);
- continue;
- }
- }
- SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
- if (skreq->state == SKD_REQ_STATE_ABORTED) {
- pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
- skdev->name, __func__, __LINE__,
- skreq, skreq->id);
- /* a previously timed out command can
- * now be cleaned up */
- skd_release_skreq(skdev, skreq);
- continue;
- }
- skreq->completion = *skcmp;
- if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
- skreq->err_info = *skerr;
- skd_log_check_status(skdev, cmp_status, skerr->key,
- skerr->code, skerr->qual,
- skerr->fruc);
- }
- /* Release DMA resources for the request. */
- if (skreq->n_sg > 0)
- skd_postop_sg_list(skdev, skreq);
- if (!skreq->req) {
- pr_debug("%s:%s:%d NULL backptr skdreq %p, "
- "req=0x%x req_id=0x%x\n",
- skdev->name, __func__, __LINE__,
- skreq, skreq->id, req_id);
- } else {
- /*
- * Capture the outcome and post it back to the
- * native request.
- */
- if (likely(cmp_status == SAM_STAT_GOOD))
- skd_end_request(skdev, skreq, 0);
- else
- skd_resolve_req_exception(skdev, skreq);
- }
- /*
- * Release the skreq, its FIT msg (if one), timeout slot,
- * and queue depth.
- */
- skd_release_skreq(skdev, skreq);
- /* skd_isr_comp_limit equal zero means no limit */
- if (limit) {
- if (++processed >= limit) {
- rc = 1;
- break;
- }
- }
- }
- if ((skdev->state == SKD_DRVR_STATE_PAUSING)
- && (skdev->in_flight) == 0) {
- skdev->state = SKD_DRVR_STATE_PAUSED;
- wake_up_interruptible(&skdev->waitq);
- }
- return rc;
- }
- static void skd_complete_other(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr)
- {
- u32 req_id = 0;
- u32 req_table;
- u32 req_slot;
- struct skd_special_context *skspcl;
- req_id = skcomp->tag;
- req_table = req_id & SKD_ID_TABLE_MASK;
- req_slot = req_id & SKD_ID_SLOT_MASK;
- pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
- skdev->name, __func__, __LINE__,
- req_table, req_id, req_slot);
- /*
- * Based on the request id, determine how to dispatch this completion.
- * This swich/case is finding the good cases and forwarding the
- * completion entry. Errors are reported below the switch.
- */
- switch (req_table) {
- case SKD_ID_RW_REQUEST:
- /*
- * The caller, skd_completion_posted_isr() above,
- * handles r/w requests. The only way we get here
- * is if the req_slot is out of bounds.
- */
- break;
- case SKD_ID_SPECIAL_REQUEST:
- /*
- * Make sure the req_slot is in bounds and that the id
- * matches.
- */
- if (req_slot < skdev->n_special) {
- skspcl = &skdev->skspcl_table[req_slot];
- if (skspcl->req.id == req_id &&
- skspcl->req.state == SKD_REQ_STATE_BUSY) {
- skd_complete_special(skdev,
- skcomp, skerr, skspcl);
- return;
- }
- }
- break;
- case SKD_ID_INTERNAL:
- if (req_slot == 0) {
- skspcl = &skdev->internal_skspcl;
- if (skspcl->req.id == req_id &&
- skspcl->req.state == SKD_REQ_STATE_BUSY) {
- skd_complete_internal(skdev,
- skcomp, skerr, skspcl);
- return;
- }
- }
- break;
- case SKD_ID_FIT_MSG:
- /*
- * These id's should never appear in a completion record.
- */
- break;
- default:
- /*
- * These id's should never appear anywhere;
- */
- break;
- }
- /*
- * If we get here it is a bad or stale id.
- */
- }
- static void skd_complete_special(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl)
- {
- pr_debug("%s:%s:%d completing special request %p\n",
- skdev->name, __func__, __LINE__, skspcl);
- if (skspcl->orphaned) {
- /* Discard orphaned request */
- /* ?: Can this release directly or does it need
- * to use a worker? */
- pr_debug("%s:%s:%d release orphaned %p\n",
- skdev->name, __func__, __LINE__, skspcl);
- skd_release_special(skdev, skspcl);
- return;
- }
- skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
- skspcl->req.state = SKD_REQ_STATE_COMPLETED;
- skspcl->req.completion = *skcomp;
- skspcl->req.err_info = *skerr;
- skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
- skerr->code, skerr->qual, skerr->fruc);
- wake_up_interruptible(&skdev->waitq);
- }
- /* assume spinlock is already held */
- static void skd_release_special(struct skd_device *skdev,
- struct skd_special_context *skspcl)
- {
- int i, was_depleted;
- for (i = 0; i < skspcl->req.n_sg; i++) {
- struct page *page = sg_page(&skspcl->req.sg[i]);
- __free_page(page);
- }
- was_depleted = (skdev->skspcl_free_list == NULL);
- skspcl->req.state = SKD_REQ_STATE_IDLE;
- skspcl->req.id += SKD_ID_INCR;
- skspcl->req.next =
- (struct skd_request_context *)skdev->skspcl_free_list;
- skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
- if (was_depleted) {
- pr_debug("%s:%s:%d skspcl was depleted\n",
- skdev->name, __func__, __LINE__);
- /* Free list was depleted. Their might be waiters. */
- wake_up_interruptible(&skdev->waitq);
- }
- }
- static void skd_reset_skcomp(struct skd_device *skdev)
- {
- u32 nbytes;
- struct fit_completion_entry_v1 *skcomp;
- nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
- nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
- memset(skdev->skcomp_table, 0, nbytes);
- skdev->skcomp_ix = 0;
- skdev->skcomp_cycle = 1;
- }
- /*
- *****************************************************************************
- * INTERRUPTS
- *****************************************************************************
- */
- static void skd_completion_worker(struct work_struct *work)
- {
- struct skd_device *skdev =
- container_of(work, struct skd_device, completion_worker);
- unsigned long flags;
- int flush_enqueued = 0;
- spin_lock_irqsave(&skdev->lock, flags);
- /*
- * pass in limit=0, which means no limit..
- * process everything in compq
- */
- skd_isr_completion_posted(skdev, 0, &flush_enqueued);
- skd_request_fn(skdev->queue);
- spin_unlock_irqrestore(&skdev->lock, flags);
- }
- static void skd_isr_msg_from_dev(struct skd_device *skdev);
- irqreturn_t
- static skd_isr(int irq, void *ptr)
- {
- struct skd_device *skdev;
- u32 intstat;
- u32 ack;
- int rc = 0;
- int deferred = 0;
- int flush_enqueued = 0;
- skdev = (struct skd_device *)ptr;
- spin_lock(&skdev->lock);
- for (;; ) {
- intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
- ack = FIT_INT_DEF_MASK;
- ack &= intstat;
- pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
- skdev->name, __func__, __LINE__, intstat, ack);
- /* As long as there is an int pending on device, keep
- * running loop. When none, get out, but if we've never
- * done any processing, call completion handler?
- */
- if (ack == 0) {
- /* No interrupts on device, but run the completion
- * processor anyway?
- */
- if (rc == 0)
- if (likely (skdev->state
- == SKD_DRVR_STATE_ONLINE))
- deferred = 1;
- break;
- }
- rc = IRQ_HANDLED;
- SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
- if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
- (skdev->state != SKD_DRVR_STATE_STOPPING))) {
- if (intstat & FIT_ISH_COMPLETION_POSTED) {
- /*
- * If we have already deferred completion
- * processing, don't bother running it again
- */
- if (deferred == 0)
- deferred =
- skd_isr_completion_posted(skdev,
- skd_isr_comp_limit, &flush_enqueued);
- }
- if (intstat & FIT_ISH_FW_STATE_CHANGE) {
- skd_isr_fwstate(skdev);
- if (skdev->state == SKD_DRVR_STATE_FAULT ||
- skdev->state ==
- SKD_DRVR_STATE_DISAPPEARED) {
- spin_unlock(&skdev->lock);
- return rc;
- }
- }
- if (intstat & FIT_ISH_MSG_FROM_DEV)
- skd_isr_msg_from_dev(skdev);
- }
- }
- if (unlikely(flush_enqueued))
- skd_request_fn(skdev->queue);
- if (deferred)
- schedule_work(&skdev->completion_worker);
- else if (!flush_enqueued)
- skd_request_fn(skdev->queue);
- spin_unlock(&skdev->lock);
- return rc;
- }
- static void skd_drive_fault(struct skd_device *skdev)
- {
- skdev->state = SKD_DRVR_STATE_FAULT;
- pr_err("(%s): Drive FAULT\n", skd_name(skdev));
- }
- static void skd_drive_disappeared(struct skd_device *skdev)
- {
- skdev->state = SKD_DRVR_STATE_DISAPPEARED;
- pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
- }
- static void skd_isr_fwstate(struct skd_device *skdev)
- {
- u32 sense;
- u32 state;
- u32 mtd;
- int prev_driver_state = skdev->state;
- sense = SKD_READL(skdev, FIT_STATUS);
- state = sense & FIT_SR_DRIVE_STATE_MASK;
- pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
- skd_name(skdev),
- skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
- skd_drive_state_to_str(state), state);
- skdev->drive_state = state;
- switch (skdev->drive_state) {
- case FIT_SR_DRIVE_INIT:
- if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
- skd_disable_interrupts(skdev);
- break;
- }
- if (skdev->state == SKD_DRVR_STATE_RESTARTING)
- skd_recover_requests(skdev, 0);
- if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
- skdev->timer_countdown = SKD_STARTING_TIMO;
- skdev->state = SKD_DRVR_STATE_STARTING;
- skd_soft_reset(skdev);
- break;
- }
- mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
- case FIT_SR_DRIVE_ONLINE:
- skdev->cur_max_queue_depth = skd_max_queue_depth;
- if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
- skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
- skdev->queue_low_water_mark =
- skdev->cur_max_queue_depth * 2 / 3 + 1;
- if (skdev->queue_low_water_mark < 1)
- skdev->queue_low_water_mark = 1;
- pr_info(
- "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
- skd_name(skdev),
- skdev->cur_max_queue_depth,
- skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
- skd_refresh_device_data(skdev);
- break;
- case FIT_SR_DRIVE_BUSY:
- skdev->state = SKD_DRVR_STATE_BUSY;
- skdev->timer_countdown = SKD_BUSY_TIMO;
- skd_quiesce_dev(skdev);
- break;
- case FIT_SR_DRIVE_BUSY_SANITIZE:
- /* set timer for 3 seconds, we'll abort any unfinished
- * commands after that expires
- */
- skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
- skdev->timer_countdown = SKD_TIMER_SECONDS(3);
- blk_start_queue(skdev->queue);
- break;
- case FIT_SR_DRIVE_BUSY_ERASE:
- skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
- skdev->timer_countdown = SKD_BUSY_TIMO;
- break;
- case FIT_SR_DRIVE_OFFLINE:
- skdev->state = SKD_DRVR_STATE_IDLE;
- break;
- case FIT_SR_DRIVE_SOFT_RESET:
- switch (skdev->state) {
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- /* Expected by a caller of skd_soft_reset() */
- break;
- default:
- skdev->state = SKD_DRVR_STATE_RESTARTING;
- break;
- }
- break;
- case FIT_SR_DRIVE_FW_BOOTING:
- pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
- skdev->name, __func__, __LINE__, skdev->name);
- skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
- skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
- break;
- case FIT_SR_DRIVE_DEGRADED:
- case FIT_SR_PCIE_LINK_DOWN:
- case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
- break;
- case FIT_SR_DRIVE_FAULT:
- skd_drive_fault(skdev);
- skd_recover_requests(skdev, 0);
- blk_start_queue(skdev->queue);
- break;
- /* PCIe bus returned all Fs? */
- case 0xFF:
- pr_info("(%s): state=0x%x sense=0x%x\n",
- skd_name(skdev), state, sense);
- skd_drive_disappeared(skdev);
- skd_recover_requests(skdev, 0);
- blk_start_queue(skdev->queue);
- break;
- default:
- /*
- * Uknown FW State. Wait for a state we recognize.
- */
- break;
- }
- pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
- skd_name(skdev),
- skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
- skd_skdev_state_to_str(skdev->state), skdev->state);
- }
- static void skd_recover_requests(struct skd_device *skdev, int requeue)
- {
- int i;
- for (i = 0; i < skdev->num_req_context; i++) {
- struct skd_request_context *skreq = &skdev->skreq_table[i];
- if (skreq->state == SKD_REQ_STATE_BUSY) {
- skd_log_skreq(skdev, skreq, "recover");
- SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
- SKD_ASSERT(skreq->req != NULL);
- /* Release DMA resources for the request. */
- if (skreq->n_sg > 0)
- skd_postop_sg_list(skdev, skreq);
- if (requeue &&
- (unsigned long) ++skreq->req->special <
- SKD_MAX_RETRIES)
- blk_requeue_request(skdev->queue, skreq->req);
- else
- skd_end_request(skdev, skreq, -EIO);
- skreq->req = NULL;
- skreq->state = SKD_REQ_STATE_IDLE;
- skreq->id += SKD_ID_INCR;
- }
- if (i > 0)
- skreq[-1].next = skreq;
- skreq->next = NULL;
- }
- skdev->skreq_free_list = skdev->skreq_table;
- for (i = 0; i < skdev->num_fitmsg_context; i++) {
- struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
- if (skmsg->state == SKD_MSG_STATE_BUSY) {
- skd_log_skmsg(skdev, skmsg, "salvaged");
- SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- }
- if (i > 0)
- skmsg[-1].next = skmsg;
- skmsg->next = NULL;
- }
- skdev->skmsg_free_list = skdev->skmsg_table;
- for (i = 0; i < skdev->n_special; i++) {
- struct skd_special_context *skspcl = &skdev->skspcl_table[i];
- /* If orphaned, reclaim it because it has already been reported
- * to the process as an error (it was just waiting for
- * a completion that didn't come, and now it will never come)
- * If busy, change to a state that will cause it to error
- * out in the wait routine and let it do the normal
- * reporting and reclaiming
- */
- if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
- if (skspcl->orphaned) {
- pr_debug("%s:%s:%d orphaned %p\n",
- skdev->name, __func__, __LINE__,
- skspcl);
- skd_release_special(skdev, skspcl);
- } else {
- pr_debug("%s:%s:%d not orphaned %p\n",
- skdev->name, __func__, __LINE__,
- skspcl);
- skspcl->req.state = SKD_REQ_STATE_ABORTED;
- }
- }
- }
- skdev->skspcl_free_list = skdev->skspcl_table;
- for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
- skdev->timeout_slot[i] = 0;
- skdev->in_flight = 0;
- }
- static void skd_isr_msg_from_dev(struct skd_device *skdev)
- {
- u32 mfd;
- u32 mtd;
- u32 data;
- mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
- pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
- skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
- /* ignore any mtd that is an ack for something we didn't send */
- if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
- return;
- switch (FIT_MXD_TYPE(mfd)) {
- case FIT_MTD_FITFW_INIT:
- skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
- if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
- pr_err("(%s): protocol mismatch\n",
- skdev->name);
- pr_err("(%s): got=%d support=%d\n",
- skdev->name, skdev->proto_ver,
- FIT_PROTOCOL_VERSION_1);
- pr_err("(%s): please upgrade driver\n",
- skdev->name);
- skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
- skd_soft_reset(skdev);
- break;
- }
- mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
- case FIT_MTD_GET_CMDQ_DEPTH:
- skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
- mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
- SKD_N_COMPLETION_ENTRY);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
- case FIT_MTD_SET_COMPQ_DEPTH:
- SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
- mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
- case FIT_MTD_SET_COMPQ_ADDR:
- skd_reset_skcomp(skdev);
- mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
- case FIT_MTD_CMD_LOG_HOST_ID:
- skdev->connect_time_stamp = get_seconds();
- data = skdev->connect_time_stamp & 0xFFFF;
- mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
- case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
- skdev->drive_jiffies = FIT_MXD_DATA(mfd);
- data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
- mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- break;
- case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
- skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
- mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
- SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
- skdev->last_mtd = mtd;
- pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
- skd_name(skdev),
- skdev->connect_time_stamp, skdev->drive_jiffies);
- break;
- case FIT_MTD_ARM_QUEUE:
- skdev->last_mtd = 0;
- /*
- * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
- */
- break;
- default:
- break;
- }
- }
- static void skd_disable_interrupts(struct skd_device *skdev)
- {
- u32 sense;
- sense = SKD_READL(skdev, FIT_CONTROL);
- sense &= ~FIT_CR_ENABLE_INTERRUPTS;
- SKD_WRITEL(skdev, sense, FIT_CONTROL);
- pr_debug("%s:%s:%d sense 0x%x\n",
- skdev->name, __func__, __LINE__, sense);
- /* Note that the 1s is written. A 1-bit means
- * disable, a 0 means enable.
- */
- SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
- }
- static void skd_enable_interrupts(struct skd_device *skdev)
- {
- u32 val;
- /* unmask interrupts first */
- val = FIT_ISH_FW_STATE_CHANGE +
- FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
- /* Note that the compliment of mask is written. A 1-bit means
- * disable, a 0 means enable. */
- SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
- pr_debug("%s:%s:%d interrupt mask=0x%x\n",
- skdev->name, __func__, __LINE__, ~val);
- val = SKD_READL(skdev, FIT_CONTROL);
- val |= FIT_CR_ENABLE_INTERRUPTS;
- pr_debug("%s:%s:%d control=0x%x\n",
- skdev->name, __func__, __LINE__, val);
- SKD_WRITEL(skdev, val, FIT_CONTROL);
- }
- /*
- *****************************************************************************
- * START, STOP, RESTART, QUIESCE, UNQUIESCE
- *****************************************************************************
- */
- static void skd_soft_reset(struct skd_device *skdev)
- {
- u32 val;
- val = SKD_READL(skdev, FIT_CONTROL);
- val |= (FIT_CR_SOFT_RESET);
- pr_debug("%s:%s:%d control=0x%x\n",
- skdev->name, __func__, __LINE__, val);
- SKD_WRITEL(skdev, val, FIT_CONTROL);
- }
- static void skd_start_device(struct skd_device *skdev)
- {
- unsigned long flags;
- u32 sense;
- u32 state;
- spin_lock_irqsave(&skdev->lock, flags);
- /* ack all ghost interrupts */
- SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
- sense = SKD_READL(skdev, FIT_STATUS);
- pr_debug("%s:%s:%d initial status=0x%x\n",
- skdev->name, __func__, __LINE__, sense);
- state = sense & FIT_SR_DRIVE_STATE_MASK;
- skdev->drive_state = state;
- skdev->last_mtd = 0;
- skdev->state = SKD_DRVR_STATE_STARTING;
- skdev->timer_countdown = SKD_STARTING_TIMO;
- skd_enable_interrupts(skdev);
- switch (skdev->drive_state) {
- case FIT_SR_DRIVE_OFFLINE:
- pr_err("(%s): Drive offline...\n", skd_name(skdev));
- break;
- case FIT_SR_DRIVE_FW_BOOTING:
- pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
- skdev->name, __func__, __LINE__, skdev->name);
- skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
- skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
- break;
- case FIT_SR_DRIVE_BUSY_SANITIZE:
- pr_info("(%s): Start: BUSY_SANITIZE\n",
- skd_name(skdev));
- skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
- skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
- break;
- case FIT_SR_DRIVE_BUSY_ERASE:
- pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
- skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
- skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
- break;
- case FIT_SR_DRIVE_INIT:
- case FIT_SR_DRIVE_ONLINE:
- skd_soft_reset(skdev);
- break;
- case FIT_SR_DRIVE_BUSY:
- pr_err("(%s): Drive Busy...\n", skd_name(skdev));
- skdev->state = SKD_DRVR_STATE_BUSY;
- skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
- break;
- case FIT_SR_DRIVE_SOFT_RESET:
- pr_err("(%s) drive soft reset in prog\n",
- skd_name(skdev));
- break;
- case FIT_SR_DRIVE_FAULT:
- /* Fault state is bad...soft reset won't do it...
- * Hard reset, maybe, but does it work on device?
- * For now, just fault so the system doesn't hang.
- */
- skd_drive_fault(skdev);
- /*start the queue so we can respond with error to requests */
- pr_debug("%s:%s:%d starting %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_start_queue(skdev->queue);
- skdev->gendisk_on = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
- case 0xFF:
- /* Most likely the device isn't there or isn't responding
- * to the BAR1 addresses. */
- skd_drive_disappeared(skdev);
- /*start the queue so we can respond with error to requests */
- pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_start_queue(skdev->queue);
- skdev->gendisk_on = -1;
- wake_up_interruptible(&skdev->waitq);
- break;
- default:
- pr_err("(%s) Start: unknown state %x\n",
- skd_name(skdev), skdev->drive_state);
- break;
- }
- state = SKD_READL(skdev, FIT_CONTROL);
- pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
- skdev->name, __func__, __LINE__, state);
- state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
- pr_debug("%s:%s:%d Intr Status=0x%x\n",
- skdev->name, __func__, __LINE__, state);
- state = SKD_READL(skdev, FIT_INT_MASK_HOST);
- pr_debug("%s:%s:%d Intr Mask=0x%x\n",
- skdev->name, __func__, __LINE__, state);
- state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
- pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
- skdev->name, __func__, __LINE__, state);
- state = SKD_READL(skdev, FIT_HW_VERSION);
- pr_debug("%s:%s:%d HW version=0x%x\n",
- skdev->name, __func__, __LINE__, state);
- spin_unlock_irqrestore(&skdev->lock, flags);
- }
- static void skd_stop_device(struct skd_device *skdev)
- {
- unsigned long flags;
- struct skd_special_context *skspcl = &skdev->internal_skspcl;
- u32 dev_state;
- int i;
- spin_lock_irqsave(&skdev->lock, flags);
- if (skdev->state != SKD_DRVR_STATE_ONLINE) {
- pr_err("(%s): skd_stop_device not online no sync\n",
- skd_name(skdev));
- goto stop_out;
- }
- if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
- pr_err("(%s): skd_stop_device no special\n",
- skd_name(skdev));
- goto stop_out;
- }
- skdev->state = SKD_DRVR_STATE_SYNCING;
- skdev->sync_done = 0;
- skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
- spin_unlock_irqrestore(&skdev->lock, flags);
- wait_event_interruptible_timeout(skdev->waitq,
- (skdev->sync_done), (10 * HZ));
- spin_lock_irqsave(&skdev->lock, flags);
- switch (skdev->sync_done) {
- case 0:
- pr_err("(%s): skd_stop_device no sync\n",
- skd_name(skdev));
- break;
- case 1:
- pr_err("(%s): skd_stop_device sync done\n",
- skd_name(skdev));
- break;
- default:
- pr_err("(%s): skd_stop_device sync error\n",
- skd_name(skdev));
- }
- stop_out:
- skdev->state = SKD_DRVR_STATE_STOPPING;
- spin_unlock_irqrestore(&skdev->lock, flags);
- skd_kill_timer(skdev);
- spin_lock_irqsave(&skdev->lock, flags);
- skd_disable_interrupts(skdev);
- /* ensure all ints on device are cleared */
- /* soft reset the device to unload with a clean slate */
- SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
- SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
- spin_unlock_irqrestore(&skdev->lock, flags);
- /* poll every 100ms, 1 second timeout */
- for (i = 0; i < 10; i++) {
- dev_state =
- SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
- if (dev_state == FIT_SR_DRIVE_INIT)
- break;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(100));
- }
- if (dev_state != FIT_SR_DRIVE_INIT)
- pr_err("(%s): skd_stop_device state error 0x%02x\n",
- skd_name(skdev), dev_state);
- }
- /* assume spinlock is held */
- static void skd_restart_device(struct skd_device *skdev)
- {
- u32 state;
- /* ack all ghost interrupts */
- SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
- state = SKD_READL(skdev, FIT_STATUS);
- pr_debug("%s:%s:%d drive status=0x%x\n",
- skdev->name, __func__, __LINE__, state);
- state &= FIT_SR_DRIVE_STATE_MASK;
- skdev->drive_state = state;
- skdev->last_mtd = 0;
- skdev->state = SKD_DRVR_STATE_RESTARTING;
- skdev->timer_countdown = SKD_RESTARTING_TIMO;
- skd_soft_reset(skdev);
- }
- /* assume spinlock is held */
- static int skd_quiesce_dev(struct skd_device *skdev)
- {
- int rc = 0;
- switch (skdev->state) {
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- pr_debug("%s:%s:%d stopping %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_stop_queue(skdev->queue);
- break;
- case SKD_DRVR_STATE_ONLINE:
- case SKD_DRVR_STATE_STOPPING:
- case SKD_DRVR_STATE_SYNCING:
- case SKD_DRVR_STATE_PAUSING:
- case SKD_DRVR_STATE_PAUSED:
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- case SKD_DRVR_STATE_RESUMING:
- default:
- rc = -EINVAL;
- pr_debug("%s:%s:%d state [%d] not implemented\n",
- skdev->name, __func__, __LINE__, skdev->state);
- }
- return rc;
- }
- /* assume spinlock is held */
- static int skd_unquiesce_dev(struct skd_device *skdev)
- {
- int prev_driver_state = skdev->state;
- skd_log_skdev(skdev, "unquiesce");
- if (skdev->state == SKD_DRVR_STATE_ONLINE) {
- pr_debug("%s:%s:%d **** device already ONLINE\n",
- skdev->name, __func__, __LINE__);
- return 0;
- }
- if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
- /*
- * If there has been an state change to other than
- * ONLINE, we will rely on controller state change
- * to come back online and restart the queue.
- * The BUSY state means that driver is ready to
- * continue normal processing but waiting for controller
- * to become available.
- */
- skdev->state = SKD_DRVR_STATE_BUSY;
- pr_debug("%s:%s:%d drive BUSY state\n",
- skdev->name, __func__, __LINE__);
- return 0;
- }
- /*
- * Drive has just come online, driver is either in startup,
- * paused performing a task, or bust waiting for hardware.
- */
- switch (skdev->state) {
- case SKD_DRVR_STATE_PAUSED:
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- case SKD_DRVR_STATE_BUSY_ERASE:
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- case SKD_DRVR_STATE_FAULT:
- case SKD_DRVR_STATE_IDLE:
- case SKD_DRVR_STATE_LOAD:
- skdev->state = SKD_DRVR_STATE_ONLINE;
- pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
- skd_name(skdev),
- skd_skdev_state_to_str(prev_driver_state),
- prev_driver_state, skd_skdev_state_to_str(skdev->state),
- skdev->state);
- pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
- skdev->name, __func__, __LINE__);
- pr_debug("%s:%s:%d starting %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
- blk_start_queue(skdev->queue);
- skdev->gendisk_on = 1;
- wake_up_interruptible(&skdev->waitq);
- break;
- case SKD_DRVR_STATE_DISAPPEARED:
- default:
- pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
- skdev->name, __func__, __LINE__,
- skdev->state);
- return -EBUSY;
- }
- return 0;
- }
- /*
- *****************************************************************************
- * PCIe MSI/MSI-X INTERRUPT HANDLERS
- *****************************************************************************
- */
- static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
- {
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
- spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
- irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
- }
- static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
- {
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
- spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
- skd_isr_fwstate(skdev);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
- }
- static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
- {
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
- int flush_enqueued = 0;
- int deferred;
- spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
- deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
- &flush_enqueued);
- if (flush_enqueued)
- skd_request_fn(skdev->queue);
- if (deferred)
- schedule_work(&skdev->completion_worker);
- else if (!flush_enqueued)
- skd_request_fn(skdev->queue);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
- }
- static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
- {
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
- spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
- skd_isr_msg_from_dev(skdev);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
- }
- static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
- {
- struct skd_device *skdev = skd_host_data;
- unsigned long flags;
- spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
- spin_unlock_irqrestore(&skdev->lock, flags);
- return IRQ_HANDLED;
- }
- /*
- *****************************************************************************
- * PCIe MSI/MSI-X SETUP
- *****************************************************************************
- */
- struct skd_msix_entry {
- int have_irq;
- u32 vector;
- u32 entry;
- struct skd_device *rsp;
- char isr_name[30];
- };
- struct skd_init_msix_entry {
- const char *name;
- irq_handler_t handler;
- };
- #define SKD_MAX_MSIX_COUNT 13
- #define SKD_MIN_MSIX_COUNT 7
- #define SKD_BASE_MSIX_IRQ 4
- static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
- { "(DMA 0)", skd_reserved_isr },
- { "(DMA 1)", skd_reserved_isr },
- { "(DMA 2)", skd_reserved_isr },
- { "(DMA 3)", skd_reserved_isr },
- { "(State Change)", skd_statec_isr },
- { "(COMPL_Q)", skd_comp_q },
- { "(MSG)", skd_msg_isr },
- { "(Reserved)", skd_reserved_isr },
- { "(Reserved)", skd_reserved_isr },
- { "(Queue Full 0)", skd_qfull_isr },
- { "(Queue Full 1)", skd_qfull_isr },
- { "(Queue Full 2)", skd_qfull_isr },
- { "(Queue Full 3)", skd_qfull_isr },
- };
- static void skd_release_msix(struct skd_device *skdev)
- {
- struct skd_msix_entry *qentry;
- int i;
- if (skdev->msix_entries) {
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
- skdev = qentry->rsp;
- if (qentry->have_irq)
- devm_free_irq(&skdev->pdev->dev,
- qentry->vector, qentry->rsp);
- }
- kfree(skdev->msix_entries);
- }
- if (skdev->msix_count)
- pci_disable_msix(skdev->pdev);
- skdev->msix_count = 0;
- skdev->msix_entries = NULL;
- }
- static int skd_acquire_msix(struct skd_device *skdev)
- {
- int i, rc;
- struct pci_dev *pdev = skdev->pdev;
- struct msix_entry *entries;
- struct skd_msix_entry *qentry;
- entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
- GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
- for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
- entries[i].entry = i;
- rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT);
- if (rc) {
- pr_err("(%s): failed to enable MSI-X %d\n",
- skd_name(skdev), rc);
- goto msix_out;
- }
- skdev->msix_count = SKD_MAX_MSIX_COUNT;
- skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
- skdev->msix_count, GFP_KERNEL);
- if (!skdev->msix_entries) {
- rc = -ENOMEM;
- pr_err("(%s): msix table allocation error\n",
- skd_name(skdev));
- goto msix_out;
- }
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
- qentry->vector = entries[i].vector;
- qentry->entry = entries[i].entry;
- qentry->rsp = NULL;
- qentry->have_irq = 0;
- pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
- skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name,
- i, qentry->vector, qentry->entry);
- }
- /* Enable MSI-X vectors for the base queue */
- for (i = 0; i < skdev->msix_count; i++) {
- qentry = &skdev->msix_entries[i];
- snprintf(qentry->isr_name, sizeof(qentry->isr_name),
- "%s%d-msix %s", DRV_NAME, skdev->devno,
- msix_entries[i].name);
- rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
- msix_entries[i].handler, 0,
- qentry->isr_name, skdev);
- if (rc) {
- pr_err("(%s): Unable to register(%d) MSI-X "
- "handler %d: %s\n",
- skd_name(skdev), rc, i, qentry->isr_name);
- goto msix_out;
- } else {
- qentry->have_irq = 1;
- qentry->rsp = skdev;
- }
- }
- pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
- skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name, skdev->msix_count);
- return 0;
- msix_out:
- if (entries)
- kfree(entries);
- skd_release_msix(skdev);
- return rc;
- }
- static int skd_acquire_irq(struct skd_device *skdev)
- {
- int rc;
- struct pci_dev *pdev;
- pdev = skdev->pdev;
- skdev->msix_count = 0;
- RETRY_IRQ_TYPE:
- switch (skdev->irq_type) {
- case SKD_IRQ_MSIX:
- rc = skd_acquire_msix(skdev);
- if (!rc)
- pr_info("(%s): MSI-X %d irqs enabled\n",
- skd_name(skdev), skdev->msix_count);
- else {
- pr_err(
- "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
- skd_name(skdev), rc);
- skdev->irq_type = SKD_IRQ_MSI;
- goto RETRY_IRQ_TYPE;
- }
- break;
- case SKD_IRQ_MSI:
- snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
- DRV_NAME, skdev->devno);
- rc = pci_enable_msi_range(pdev, 1, 1);
- if (rc > 0) {
- rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
- skdev->isr_name, skdev);
- if (rc) {
- pci_disable_msi(pdev);
- pr_err(
- "(%s): failed to allocate the MSI interrupt %d\n",
- skd_name(skdev), rc);
- goto RETRY_IRQ_LEGACY;
- }
- pr_info("(%s): MSI irq %d enabled\n",
- skd_name(skdev), pdev->irq);
- } else {
- RETRY_IRQ_LEGACY:
- pr_err(
- "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
- skd_name(skdev), rc);
- skdev->irq_type = SKD_IRQ_LEGACY;
- goto RETRY_IRQ_TYPE;
- }
- break;
- case SKD_IRQ_LEGACY:
- snprintf(skdev->isr_name, sizeof(skdev->isr_name),
- "%s%d-legacy", DRV_NAME, skdev->devno);
- rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
- IRQF_SHARED, skdev->isr_name, skdev);
- if (!rc)
- pr_info("(%s): LEGACY irq %d enabled\n",
- skd_name(skdev), pdev->irq);
- else
- pr_err("(%s): request LEGACY irq error %d\n",
- skd_name(skdev), rc);
- break;
- default:
- pr_info("(%s): irq_type %d invalid, re-set to %d\n",
- skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
- skdev->irq_type = SKD_IRQ_LEGACY;
- goto RETRY_IRQ_TYPE;
- }
- return rc;
- }
- static void skd_release_irq(struct skd_device *skdev)
- {
- switch (skdev->irq_type) {
- case SKD_IRQ_MSIX:
- skd_release_msix(skdev);
- break;
- case SKD_IRQ_MSI:
- devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
- pci_disable_msi(skdev->pdev);
- break;
- case SKD_IRQ_LEGACY:
- devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
- break;
- default:
- pr_err("(%s): wrong irq type %d!",
- skd_name(skdev), skdev->irq_type);
- break;
- }
- }
- /*
- *****************************************************************************
- * CONSTRUCT
- *****************************************************************************
- */
- static int skd_cons_skcomp(struct skd_device *skdev)
- {
- int rc = 0;
- struct fit_completion_entry_v1 *skcomp;
- u32 nbytes;
- nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
- nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
- pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
- skdev->name, __func__, __LINE__,
- nbytes, SKD_N_COMPLETION_ENTRY);
- skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
- &skdev->cq_dma_address);
- if (skcomp == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- skdev->skcomp_table = skcomp;
- skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
- sizeof(*skcomp) *
- SKD_N_COMPLETION_ENTRY);
- err_out:
- return rc;
- }
- static int skd_cons_skmsg(struct skd_device *skdev)
- {
- int rc = 0;
- u32 i;
- pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
- skdev->name, __func__, __LINE__,
- sizeof(struct skd_fitmsg_context),
- skdev->num_fitmsg_context,
- sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
- skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
- *skdev->num_fitmsg_context, GFP_KERNEL);
- if (skdev->skmsg_table == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- for (i = 0; i < skdev->num_fitmsg_context; i++) {
- struct skd_fitmsg_context *skmsg;
- skmsg = &skdev->skmsg_table[i];
- skmsg->id = i + SKD_ID_FIT_MSG;
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
- SKD_N_FITMSG_BYTES + 64,
- &skmsg->mb_dma_address);
- if (skmsg->msg_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- skmsg->offset = (u32)((u64)skmsg->msg_buf &
- (~FIT_QCMD_BASE_ADDRESS_MASK));
- skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
- skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
- FIT_QCMD_BASE_ADDRESS_MASK);
- skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
- skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
- memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
- skmsg->next = &skmsg[1];
- }
- /* Free list is in order starting with the 0th entry. */
- skdev->skmsg_table[i - 1].next = NULL;
- skdev->skmsg_free_list = skdev->skmsg_table;
- err_out:
- return rc;
- }
- static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
- u32 n_sg,
- dma_addr_t *ret_dma_addr)
- {
- struct fit_sg_descriptor *sg_list;
- u32 nbytes;
- nbytes = sizeof(*sg_list) * n_sg;
- sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
- if (sg_list != NULL) {
- uint64_t dma_address = *ret_dma_addr;
- u32 i;
- memset(sg_list, 0, nbytes);
- for (i = 0; i < n_sg - 1; i++) {
- uint64_t ndp_off;
- ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
- sg_list[i].next_desc_ptr = dma_address + ndp_off;
- }
- sg_list[i].next_desc_ptr = 0LL;
- }
- return sg_list;
- }
- static int skd_cons_skreq(struct skd_device *skdev)
- {
- int rc = 0;
- u32 i;
- pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
- skdev->name, __func__, __LINE__,
- sizeof(struct skd_request_context),
- skdev->num_req_context,
- sizeof(struct skd_request_context) * skdev->num_req_context);
- skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
- * skdev->num_req_context, GFP_KERNEL);
- if (skdev->skreq_table == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
- skdev->name, __func__, __LINE__,
- skdev->sgs_per_request, sizeof(struct scatterlist),
- skdev->sgs_per_request * sizeof(struct scatterlist));
- for (i = 0; i < skdev->num_req_context; i++) {
- struct skd_request_context *skreq;
- skreq = &skdev->skreq_table[i];
- skreq->id = i + SKD_ID_RW_REQUEST;
- skreq->state = SKD_REQ_STATE_IDLE;
- skreq->sg = kzalloc(sizeof(struct scatterlist) *
- skdev->sgs_per_request, GFP_KERNEL);
- if (skreq->sg == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- sg_init_table(skreq->sg, skdev->sgs_per_request);
- skreq->sksg_list = skd_cons_sg_list(skdev,
- skdev->sgs_per_request,
- &skreq->sksg_dma_address);
- if (skreq->sksg_list == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- skreq->next = &skreq[1];
- }
- /* Free list is in order starting with the 0th entry. */
- skdev->skreq_table[i - 1].next = NULL;
- skdev->skreq_free_list = skdev->skreq_table;
- err_out:
- return rc;
- }
- static int skd_cons_skspcl(struct skd_device *skdev)
- {
- int rc = 0;
- u32 i, nbytes;
- pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
- skdev->name, __func__, __LINE__,
- sizeof(struct skd_special_context),
- skdev->n_special,
- sizeof(struct skd_special_context) * skdev->n_special);
- skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
- * skdev->n_special, GFP_KERNEL);
- if (skdev->skspcl_table == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- for (i = 0; i < skdev->n_special; i++) {
- struct skd_special_context *skspcl;
- skspcl = &skdev->skspcl_table[i];
- skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
- skspcl->req.state = SKD_REQ_STATE_IDLE;
- skspcl->req.next = &skspcl[1].req;
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- skspcl->msg_buf =
- pci_zalloc_consistent(skdev->pdev, nbytes,
- &skspcl->mb_dma_address);
- if (skspcl->msg_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
- SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
- if (skspcl->req.sg == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- skspcl->req.sksg_list = skd_cons_sg_list(skdev,
- SKD_N_SG_PER_SPECIAL,
- &skspcl->req.
- sksg_dma_address);
- if (skspcl->req.sksg_list == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- }
- /* Free list is in order starting with the 0th entry. */
- skdev->skspcl_table[i - 1].req.next = NULL;
- skdev->skspcl_free_list = skdev->skspcl_table;
- return rc;
- err_out:
- return rc;
- }
- static int skd_cons_sksb(struct skd_device *skdev)
- {
- int rc = 0;
- struct skd_special_context *skspcl;
- u32 nbytes;
- skspcl = &skdev->internal_skspcl;
- skspcl->req.id = 0 + SKD_ID_INTERNAL;
- skspcl->req.state = SKD_REQ_STATE_IDLE;
- nbytes = SKD_N_INTERNAL_BYTES;
- skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
- &skspcl->db_dma_address);
- if (skspcl->data_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
- &skspcl->mb_dma_address);
- if (skspcl->msg_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
- &skspcl->req.sksg_dma_address);
- if (skspcl->req.sksg_list == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- if (!skd_format_internal_skspcl(skdev)) {
- rc = -EINVAL;
- goto err_out;
- }
- err_out:
- return rc;
- }
- static int skd_cons_disk(struct skd_device *skdev)
- {
- int rc = 0;
- struct gendisk *disk;
- struct request_queue *q;
- unsigned long flags;
- disk = alloc_disk(SKD_MINORS_PER_DEVICE);
- if (!disk) {
- rc = -ENOMEM;
- goto err_out;
- }
- skdev->disk = disk;
- sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
- disk->major = skdev->major;
- disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
- disk->fops = &skd_blockdev_ops;
- disk->private_data = skdev;
- q = blk_init_queue(skd_request_fn, &skdev->lock);
- if (!q) {
- rc = -ENOMEM;
- goto err_out;
- }
- skdev->queue = q;
- disk->queue = q;
- q->queuedata = skdev;
- blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
- blk_queue_max_segments(q, skdev->sgs_per_request);
- blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
- /* set sysfs ptimal_io_size to 8K */
- blk_queue_io_opt(q, 8192);
- /* DISCARD Flag initialization. */
- q->limits.discard_granularity = 8192;
- q->limits.discard_alignment = 0;
- q->limits.max_discard_sectors = UINT_MAX >> 9;
- q->limits.discard_zeroes_data = 1;
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
- queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
- spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d stopping %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_stop_queue(skdev->queue);
- spin_unlock_irqrestore(&skdev->lock, flags);
- err_out:
- return rc;
- }
- #define SKD_N_DEV_TABLE 16u
- static u32 skd_next_devno;
- static struct skd_device *skd_construct(struct pci_dev *pdev)
- {
- struct skd_device *skdev;
- int blk_major = skd_major;
- int rc;
- skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
- if (!skdev) {
- pr_err(PFX "(%s): memory alloc failure\n",
- pci_name(pdev));
- return NULL;
- }
- skdev->state = SKD_DRVR_STATE_LOAD;
- skdev->pdev = pdev;
- skdev->devno = skd_next_devno++;
- skdev->major = blk_major;
- skdev->irq_type = skd_isr_type;
- sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
- skdev->dev_max_queue_depth = 0;
- skdev->num_req_context = skd_max_queue_depth;
- skdev->num_fitmsg_context = skd_max_queue_depth;
- skdev->n_special = skd_max_pass_thru;
- skdev->cur_max_queue_depth = 1;
- skdev->queue_low_water_mark = 1;
- skdev->proto_ver = 99;
- skdev->sgs_per_request = skd_sgs_per_request;
- skdev->dbg_level = skd_dbg_level;
- atomic_set(&skdev->device_count, 0);
- spin_lock_init(&skdev->lock);
- INIT_WORK(&skdev->completion_worker, skd_completion_worker);
- pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skcomp(skdev);
- if (rc < 0)
- goto err_out;
- pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skmsg(skdev);
- if (rc < 0)
- goto err_out;
- pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skreq(skdev);
- if (rc < 0)
- goto err_out;
- pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skspcl(skdev);
- if (rc < 0)
- goto err_out;
- pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_sksb(skdev);
- if (rc < 0)
- goto err_out;
- pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_disk(skdev);
- if (rc < 0)
- goto err_out;
- pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
- return skdev;
- err_out:
- pr_debug("%s:%s:%d construct failed\n",
- skdev->name, __func__, __LINE__);
- skd_destruct(skdev);
- return NULL;
- }
- /*
- *****************************************************************************
- * DESTRUCT (FREE)
- *****************************************************************************
- */
- static void skd_free_skcomp(struct skd_device *skdev)
- {
- if (skdev->skcomp_table != NULL) {
- u32 nbytes;
- nbytes = sizeof(skdev->skcomp_table[0]) *
- SKD_N_COMPLETION_ENTRY;
- pci_free_consistent(skdev->pdev, nbytes,
- skdev->skcomp_table, skdev->cq_dma_address);
- }
- skdev->skcomp_table = NULL;
- skdev->cq_dma_address = 0;
- }
- static void skd_free_skmsg(struct skd_device *skdev)
- {
- u32 i;
- if (skdev->skmsg_table == NULL)
- return;
- for (i = 0; i < skdev->num_fitmsg_context; i++) {
- struct skd_fitmsg_context *skmsg;
- skmsg = &skdev->skmsg_table[i];
- if (skmsg->msg_buf != NULL) {
- skmsg->msg_buf += skmsg->offset;
- skmsg->mb_dma_address += skmsg->offset;
- pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
- skmsg->msg_buf,
- skmsg->mb_dma_address);
- }
- skmsg->msg_buf = NULL;
- skmsg->mb_dma_address = 0;
- }
- kfree(skdev->skmsg_table);
- skdev->skmsg_table = NULL;
- }
- static void skd_free_sg_list(struct skd_device *skdev,
- struct fit_sg_descriptor *sg_list,
- u32 n_sg, dma_addr_t dma_addr)
- {
- if (sg_list != NULL) {
- u32 nbytes;
- nbytes = sizeof(*sg_list) * n_sg;
- pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
- }
- }
- static void skd_free_skreq(struct skd_device *skdev)
- {
- u32 i;
- if (skdev->skreq_table == NULL)
- return;
- for (i = 0; i < skdev->num_req_context; i++) {
- struct skd_request_context *skreq;
- skreq = &skdev->skreq_table[i];
- skd_free_sg_list(skdev, skreq->sksg_list,
- skdev->sgs_per_request,
- skreq->sksg_dma_address);
- skreq->sksg_list = NULL;
- skreq->sksg_dma_address = 0;
- kfree(skreq->sg);
- }
- kfree(skdev->skreq_table);
- skdev->skreq_table = NULL;
- }
- static void skd_free_skspcl(struct skd_device *skdev)
- {
- u32 i;
- u32 nbytes;
- if (skdev->skspcl_table == NULL)
- return;
- for (i = 0; i < skdev->n_special; i++) {
- struct skd_special_context *skspcl;
- skspcl = &skdev->skspcl_table[i];
- if (skspcl->msg_buf != NULL) {
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- pci_free_consistent(skdev->pdev, nbytes,
- skspcl->msg_buf,
- skspcl->mb_dma_address);
- }
- skspcl->msg_buf = NULL;
- skspcl->mb_dma_address = 0;
- skd_free_sg_list(skdev, skspcl->req.sksg_list,
- SKD_N_SG_PER_SPECIAL,
- skspcl->req.sksg_dma_address);
- skspcl->req.sksg_list = NULL;
- skspcl->req.sksg_dma_address = 0;
- kfree(skspcl->req.sg);
- }
- kfree(skdev->skspcl_table);
- skdev->skspcl_table = NULL;
- }
- static void skd_free_sksb(struct skd_device *skdev)
- {
- struct skd_special_context *skspcl;
- u32 nbytes;
- skspcl = &skdev->internal_skspcl;
- if (skspcl->data_buf != NULL) {
- nbytes = SKD_N_INTERNAL_BYTES;
- pci_free_consistent(skdev->pdev, nbytes,
- skspcl->data_buf, skspcl->db_dma_address);
- }
- skspcl->data_buf = NULL;
- skspcl->db_dma_address = 0;
- if (skspcl->msg_buf != NULL) {
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- pci_free_consistent(skdev->pdev, nbytes,
- skspcl->msg_buf, skspcl->mb_dma_address);
- }
- skspcl->msg_buf = NULL;
- skspcl->mb_dma_address = 0;
- skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
- skspcl->req.sksg_dma_address);
- skspcl->req.sksg_list = NULL;
- skspcl->req.sksg_dma_address = 0;
- }
- static void skd_free_disk(struct skd_device *skdev)
- {
- struct gendisk *disk = skdev->disk;
- if (disk != NULL) {
- struct request_queue *q = disk->queue;
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- if (q)
- blk_cleanup_queue(q);
- put_disk(disk);
- }
- skdev->disk = NULL;
- }
- static void skd_destruct(struct skd_device *skdev)
- {
- if (skdev == NULL)
- return;
- pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
- skd_free_disk(skdev);
- pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
- skd_free_sksb(skdev);
- pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
- skd_free_skspcl(skdev);
- pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
- skd_free_skreq(skdev);
- pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
- skd_free_skmsg(skdev);
- pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
- skd_free_skcomp(skdev);
- pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
- kfree(skdev);
- }
- /*
- *****************************************************************************
- * BLOCK DEVICE (BDEV) GLUE
- *****************************************************************************
- */
- static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
- {
- struct skd_device *skdev;
- u64 capacity;
- skdev = bdev->bd_disk->private_data;
- pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
- skdev->name, __func__, __LINE__,
- bdev->bd_disk->disk_name, current->comm);
- if (skdev->read_cap_is_valid) {
- capacity = get_capacity(skdev->disk);
- geo->heads = 64;
- geo->sectors = 255;
- geo->cylinders = (capacity) / (255 * 64);
- return 0;
- }
- return -EIO;
- }
- static int skd_bdev_attach(struct skd_device *skdev)
- {
- pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
- add_disk(skdev->disk);
- return 0;
- }
- static const struct block_device_operations skd_blockdev_ops = {
- .owner = THIS_MODULE,
- .ioctl = skd_bdev_ioctl,
- .getgeo = skd_bdev_getgeo,
- };
- /*
- *****************************************************************************
- * PCIe DRIVER GLUE
- *****************************************************************************
- */
- static const struct pci_device_id skd_pci_tbl[] = {
- { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
- { 0 } /* terminate list */
- };
- MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
- static char *skd_pci_info(struct skd_device *skdev, char *str)
- {
- int pcie_reg;
- strcpy(str, "PCIe (");
- pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
- if (pcie_reg) {
- char lwstr[6];
- uint16_t pcie_lstat, lspeed, lwidth;
- pcie_reg += 0x12;
- pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
- lspeed = pcie_lstat & (0xF);
- lwidth = (pcie_lstat & 0x3F0) >> 4;
- if (lspeed == 1)
- strcat(str, "2.5GT/s ");
- else if (lspeed == 2)
- strcat(str, "5.0GT/s ");
- else
- strcat(str, "<unknown> ");
- snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
- strcat(str, lwstr);
- }
- return str;
- }
- static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
- {
- int i;
- int rc = 0;
- char pci_str[32];
- struct skd_device *skdev;
- pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
- DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
- pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
- pci_name(pdev), pdev->vendor, pdev->device);
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out;
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!rc) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- pr_err("(%s): consistent DMA mask error %d\n",
- pci_name(pdev), rc);
- }
- } else {
- (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
- if (rc) {
- pr_err("(%s): DMA mask error %d\n",
- pci_name(pdev), rc);
- goto err_out_regions;
- }
- }
- if (!skd_major) {
- rc = register_blkdev(0, DRV_NAME);
- if (rc < 0)
- goto err_out_regions;
- BUG_ON(!rc);
- skd_major = rc;
- }
- skdev = skd_construct(pdev);
- if (skdev == NULL) {
- rc = -ENOMEM;
- goto err_out_regions;
- }
- skd_pci_info(skdev, pci_str);
- pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
- pci_set_master(pdev);
- rc = pci_enable_pcie_error_reporting(pdev);
- if (rc) {
- pr_err(
- "(%s): bad enable of PCIe error reporting rc=%d\n",
- skd_name(skdev), rc);
- skdev->pcie_error_reporting_is_enabled = 0;
- } else
- skdev->pcie_error_reporting_is_enabled = 1;
- pci_set_drvdata(pdev, skdev);
- skdev->disk->driverfs_dev = &pdev->dev;
- for (i = 0; i < SKD_MAX_BARS; i++) {
- skdev->mem_phys[i] = pci_resource_start(pdev, i);
- skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
- skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
- skdev->mem_size[i]);
- if (!skdev->mem_map[i]) {
- pr_err("(%s): Unable to map adapter memory!\n",
- skd_name(skdev));
- rc = -ENODEV;
- goto err_out_iounmap;
- }
- pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->mem_map[i],
- (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
- }
- rc = skd_acquire_irq(skdev);
- if (rc) {
- pr_err("(%s): interrupt resource error %d\n",
- skd_name(skdev), rc);
- goto err_out_iounmap;
- }
- rc = skd_start_timer(skdev);
- if (rc)
- goto err_out_timer;
- init_waitqueue_head(&skdev->waitq);
- skd_start_device(skdev);
- rc = wait_event_interruptible_timeout(skdev->waitq,
- (skdev->gendisk_on),
- (SKD_START_WAIT_SECONDS * HZ));
- if (skdev->gendisk_on > 0) {
- /* device came on-line after reset */
- skd_bdev_attach(skdev);
- rc = 0;
- } else {
- /* we timed out, something is wrong with the device,
- don't add the disk structure */
- pr_err(
- "(%s): error: waiting for s1120 timed out %d!\n",
- skd_name(skdev), rc);
- /* in case of no error; we timeout with ENXIO */
- if (!rc)
- rc = -ENXIO;
- goto err_out_timer;
- }
- #ifdef SKD_VMK_POLL_HANDLER
- if (skdev->irq_type == SKD_IRQ_MSIX) {
- /* MSIX completion handler is being used for coredump */
- vmklnx_scsi_register_poll_handler(skdev->scsi_host,
- skdev->msix_entries[5].vector,
- skd_comp_q, skdev);
- } else {
- vmklnx_scsi_register_poll_handler(skdev->scsi_host,
- skdev->pdev->irq, skd_isr,
- skdev);
- }
- #endif /* SKD_VMK_POLL_HANDLER */
- return rc;
- err_out_timer:
- skd_stop_device(skdev);
- skd_release_irq(skdev);
- err_out_iounmap:
- for (i = 0; i < SKD_MAX_BARS; i++)
- if (skdev->mem_map[i])
- iounmap(skdev->mem_map[i]);
- if (skdev->pcie_error_reporting_is_enabled)
- pci_disable_pcie_error_reporting(pdev);
- skd_destruct(skdev);
- err_out_regions:
- pci_release_regions(pdev);
- err_out:
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
- return rc;
- }
- static void skd_pci_remove(struct pci_dev *pdev)
- {
- int i;
- struct skd_device *skdev;
- skdev = pci_get_drvdata(pdev);
- if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
- return;
- }
- skd_stop_device(skdev);
- skd_release_irq(skdev);
- for (i = 0; i < SKD_MAX_BARS; i++)
- if (skdev->mem_map[i])
- iounmap((u32 *)skdev->mem_map[i]);
- if (skdev->pcie_error_reporting_is_enabled)
- pci_disable_pcie_error_reporting(pdev);
- skd_destruct(skdev);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
- return;
- }
- static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
- {
- int i;
- struct skd_device *skdev;
- skdev = pci_get_drvdata(pdev);
- if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
- return -EIO;
- }
- skd_stop_device(skdev);
- skd_release_irq(skdev);
- for (i = 0; i < SKD_MAX_BARS; i++)
- if (skdev->mem_map[i])
- iounmap((u32 *)skdev->mem_map[i]);
- if (skdev->pcie_error_reporting_is_enabled)
- pci_disable_pcie_error_reporting(pdev);
- pci_release_regions(pdev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return 0;
- }
- static int skd_pci_resume(struct pci_dev *pdev)
- {
- int i;
- int rc = 0;
- struct skd_device *skdev;
- skdev = pci_get_drvdata(pdev);
- if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
- return -1;
- }
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out;
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!rc) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- pr_err("(%s): consistent DMA mask error %d\n",
- pci_name(pdev), rc);
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- pr_err("(%s): DMA mask error %d\n",
- pci_name(pdev), rc);
- goto err_out_regions;
- }
- }
- pci_set_master(pdev);
- rc = pci_enable_pcie_error_reporting(pdev);
- if (rc) {
- pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
- skdev->name, rc);
- skdev->pcie_error_reporting_is_enabled = 0;
- } else
- skdev->pcie_error_reporting_is_enabled = 1;
- for (i = 0; i < SKD_MAX_BARS; i++) {
- skdev->mem_phys[i] = pci_resource_start(pdev, i);
- skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
- skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
- skdev->mem_size[i]);
- if (!skdev->mem_map[i]) {
- pr_err("(%s): Unable to map adapter memory!\n",
- skd_name(skdev));
- rc = -ENODEV;
- goto err_out_iounmap;
- }
- pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->mem_map[i],
- (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
- }
- rc = skd_acquire_irq(skdev);
- if (rc) {
- pr_err("(%s): interrupt resource error %d\n",
- pci_name(pdev), rc);
- goto err_out_iounmap;
- }
- rc = skd_start_timer(skdev);
- if (rc)
- goto err_out_timer;
- init_waitqueue_head(&skdev->waitq);
- skd_start_device(skdev);
- return rc;
- err_out_timer:
- skd_stop_device(skdev);
- skd_release_irq(skdev);
- err_out_iounmap:
- for (i = 0; i < SKD_MAX_BARS; i++)
- if (skdev->mem_map[i])
- iounmap(skdev->mem_map[i]);
- if (skdev->pcie_error_reporting_is_enabled)
- pci_disable_pcie_error_reporting(pdev);
- err_out_regions:
- pci_release_regions(pdev);
- err_out:
- pci_disable_device(pdev);
- return rc;
- }
- static void skd_pci_shutdown(struct pci_dev *pdev)
- {
- struct skd_device *skdev;
- pr_err("skd_pci_shutdown called\n");
- skdev = pci_get_drvdata(pdev);
- if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
- return;
- }
- pr_err("%s: calling stop\n", skd_name(skdev));
- skd_stop_device(skdev);
- }
- static struct pci_driver skd_driver = {
- .name = DRV_NAME,
- .id_table = skd_pci_tbl,
- .probe = skd_pci_probe,
- .remove = skd_pci_remove,
- .suspend = skd_pci_suspend,
- .resume = skd_pci_resume,
- .shutdown = skd_pci_shutdown,
- };
- /*
- *****************************************************************************
- * LOGGING SUPPORT
- *****************************************************************************
- */
- static const char *skd_name(struct skd_device *skdev)
- {
- memset(skdev->id_str, 0, sizeof(skdev->id_str));
- if (skdev->inquiry_is_valid)
- snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
- skdev->name, skdev->inq_serial_num,
- pci_name(skdev->pdev));
- else
- snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
- skdev->name, pci_name(skdev->pdev));
- return skdev->id_str;
- }
- const char *skd_drive_state_to_str(int state)
- {
- switch (state) {
- case FIT_SR_DRIVE_OFFLINE:
- return "OFFLINE";
- case FIT_SR_DRIVE_INIT:
- return "INIT";
- case FIT_SR_DRIVE_ONLINE:
- return "ONLINE";
- case FIT_SR_DRIVE_BUSY:
- return "BUSY";
- case FIT_SR_DRIVE_FAULT:
- return "FAULT";
- case FIT_SR_DRIVE_DEGRADED:
- return "DEGRADED";
- case FIT_SR_PCIE_LINK_DOWN:
- return "INK_DOWN";
- case FIT_SR_DRIVE_SOFT_RESET:
- return "SOFT_RESET";
- case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
- return "NEED_FW";
- case FIT_SR_DRIVE_INIT_FAULT:
- return "INIT_FAULT";
- case FIT_SR_DRIVE_BUSY_SANITIZE:
- return "BUSY_SANITIZE";
- case FIT_SR_DRIVE_BUSY_ERASE:
- return "BUSY_ERASE";
- case FIT_SR_DRIVE_FW_BOOTING:
- return "FW_BOOTING";
- default:
- return "???";
- }
- }
- const char *skd_skdev_state_to_str(enum skd_drvr_state state)
- {
- switch (state) {
- case SKD_DRVR_STATE_LOAD:
- return "LOAD";
- case SKD_DRVR_STATE_IDLE:
- return "IDLE";
- case SKD_DRVR_STATE_BUSY:
- return "BUSY";
- case SKD_DRVR_STATE_STARTING:
- return "STARTING";
- case SKD_DRVR_STATE_ONLINE:
- return "ONLINE";
- case SKD_DRVR_STATE_PAUSING:
- return "PAUSING";
- case SKD_DRVR_STATE_PAUSED:
- return "PAUSED";
- case SKD_DRVR_STATE_DRAINING_TIMEOUT:
- return "DRAINING_TIMEOUT";
- case SKD_DRVR_STATE_RESTARTING:
- return "RESTARTING";
- case SKD_DRVR_STATE_RESUMING:
- return "RESUMING";
- case SKD_DRVR_STATE_STOPPING:
- return "STOPPING";
- case SKD_DRVR_STATE_SYNCING:
- return "SYNCING";
- case SKD_DRVR_STATE_FAULT:
- return "FAULT";
- case SKD_DRVR_STATE_DISAPPEARED:
- return "DISAPPEARED";
- case SKD_DRVR_STATE_BUSY_ERASE:
- return "BUSY_ERASE";
- case SKD_DRVR_STATE_BUSY_SANITIZE:
- return "BUSY_SANITIZE";
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- return "BUSY_IMMINENT";
- case SKD_DRVR_STATE_WAIT_BOOT:
- return "WAIT_BOOT";
- default:
- return "???";
- }
- }
- static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
- {
- switch (state) {
- case SKD_MSG_STATE_IDLE:
- return "IDLE";
- case SKD_MSG_STATE_BUSY:
- return "BUSY";
- default:
- return "???";
- }
- }
- static const char *skd_skreq_state_to_str(enum skd_req_state state)
- {
- switch (state) {
- case SKD_REQ_STATE_IDLE:
- return "IDLE";
- case SKD_REQ_STATE_SETUP:
- return "SETUP";
- case SKD_REQ_STATE_BUSY:
- return "BUSY";
- case SKD_REQ_STATE_COMPLETED:
- return "COMPLETED";
- case SKD_REQ_STATE_TIMEOUT:
- return "TIMEOUT";
- case SKD_REQ_STATE_ABORTED:
- return "ABORTED";
- default:
- return "???";
- }
- }
- static void skd_log_skdev(struct skd_device *skdev, const char *event)
- {
- pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
- skdev->name, __func__, __LINE__, skdev->name, skdev, event);
- pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
- skdev->name, __func__, __LINE__,
- skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
- skd_skdev_state_to_str(skdev->state), skdev->state);
- pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->in_flight, skdev->cur_max_queue_depth,
- skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
- pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
- }
- static void skd_log_skmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg, const char *event)
- {
- pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
- skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
- pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
- skdev->name, __func__, __LINE__,
- skd_skmsg_state_to_str(skmsg->state), skmsg->state,
- skmsg->id, skmsg->length);
- }
- static void skd_log_skreq(struct skd_device *skdev,
- struct skd_request_context *skreq, const char *event)
- {
- pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
- skdev->name, __func__, __LINE__, skdev->name, skreq, event);
- pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
- skdev->name, __func__, __LINE__,
- skd_skreq_state_to_str(skreq->state), skreq->state,
- skreq->id, skreq->fitmsg_id);
- pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
- skdev->name, __func__, __LINE__,
- skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
- if (skreq->req != NULL) {
- struct request *req = skreq->req;
- u32 lba = (u32)blk_rq_pos(req);
- u32 count = blk_rq_sectors(req);
- pr_debug("%s:%s:%d "
- "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
- skdev->name, __func__, __LINE__,
- req, lba, lba, count, count,
- (int)rq_data_dir(req));
- } else
- pr_debug("%s:%s:%d req=NULL\n",
- skdev->name, __func__, __LINE__);
- }
- /*
- *****************************************************************************
- * MODULE GLUE
- *****************************************************************************
- */
- static int __init skd_init(void)
- {
- pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
- switch (skd_isr_type) {
- case SKD_IRQ_LEGACY:
- case SKD_IRQ_MSI:
- case SKD_IRQ_MSIX:
- break;
- default:
- pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
- skd_isr_type, SKD_IRQ_DEFAULT);
- skd_isr_type = SKD_IRQ_DEFAULT;
- }
- if (skd_max_queue_depth < 1 ||
- skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
- pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
- skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
- skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
- }
- if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
- pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
- skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
- skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
- }
- if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
- pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
- skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
- skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
- }
- if (skd_dbg_level < 0 || skd_dbg_level > 2) {
- pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
- skd_dbg_level, 0);
- skd_dbg_level = 0;
- }
- if (skd_isr_comp_limit < 0) {
- pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
- skd_isr_comp_limit, 0);
- skd_isr_comp_limit = 0;
- }
- if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
- pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
- skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
- skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
- }
- return pci_register_driver(&skd_driver);
- }
- static void __exit skd_exit(void)
- {
- pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
- pci_unregister_driver(&skd_driver);
- if (skd_major)
- unregister_blkdev(skd_major, DRV_NAME);
- }
- module_init(skd_init);
- module_exit(skd_exit);
|