skd_main.c 138 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412
  1. /* Copyright 2012 STEC, Inc.
  2. *
  3. * This file is licensed under the terms of the 3-clause
  4. * BSD License (http://opensource.org/licenses/BSD-3-Clause)
  5. * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
  6. * at your option. Both licenses are also available in the LICENSE file
  7. * distributed with this project. This file may not be copied, modified,
  8. * or distributed except in accordance with those terms.
  9. * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
  10. * Initial Driver Design!
  11. * Thomas Swann <tswann@stec-inc.com>
  12. * Interrupt handling.
  13. * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
  14. * biomode implementation.
  15. * Akhil Bhansali <abhansali@stec-inc.com>
  16. * Added support for DISCARD / FLUSH and FUA.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/pci.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/sched.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/compiler.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/bitops.h>
  30. #include <linux/delay.h>
  31. #include <linux/time.h>
  32. #include <linux/hdreg.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/completion.h>
  35. #include <linux/scatterlist.h>
  36. #include <linux/version.h>
  37. #include <linux/err.h>
  38. #include <linux/scatterlist.h>
  39. #include <linux/aer.h>
  40. #include <linux/ctype.h>
  41. #include <linux/wait.h>
  42. #include <linux/uio.h>
  43. #include <scsi/scsi.h>
  44. #include <scsi/sg.h>
  45. #include <linux/io.h>
  46. #include <linux/uaccess.h>
  47. #include <asm/unaligned.h>
  48. #include "skd_s1120.h"
  49. static int skd_dbg_level;
  50. static int skd_isr_comp_limit = 4;
  51. enum {
  52. STEC_LINK_2_5GTS = 0,
  53. STEC_LINK_5GTS = 1,
  54. STEC_LINK_8GTS = 2,
  55. STEC_LINK_UNKNOWN = 0xFF
  56. };
  57. enum {
  58. SKD_FLUSH_INITIALIZER,
  59. SKD_FLUSH_ZERO_SIZE_FIRST,
  60. SKD_FLUSH_DATA_SECOND,
  61. };
  62. #define SKD_ASSERT(expr) \
  63. do { \
  64. if (unlikely(!(expr))) { \
  65. pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
  66. # expr, __FILE__, __func__, __LINE__); \
  67. } \
  68. } while (0)
  69. #define DRV_NAME "skd"
  70. #define DRV_VERSION "2.2.1"
  71. #define DRV_BUILD_ID "0260"
  72. #define PFX DRV_NAME ": "
  73. #define DRV_BIN_VERSION 0x100
  74. #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
  75. MODULE_AUTHOR("bug-reports: support@stec-inc.com");
  76. MODULE_LICENSE("Dual BSD/GPL");
  77. MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
  78. MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
  79. #define PCI_VENDOR_ID_STEC 0x1B39
  80. #define PCI_DEVICE_ID_S1120 0x0001
  81. #define SKD_FUA_NV (1 << 1)
  82. #define SKD_MINORS_PER_DEVICE 16
  83. #define SKD_MAX_QUEUE_DEPTH 200u
  84. #define SKD_PAUSE_TIMEOUT (5 * 1000)
  85. #define SKD_N_FITMSG_BYTES (512u)
  86. #define SKD_N_SPECIAL_CONTEXT 32u
  87. #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
  88. /* SG elements are 32 bytes, so we can make this 4096 and still be under the
  89. * 128KB limit. That allows 4096*4K = 16M xfer size
  90. */
  91. #define SKD_N_SG_PER_REQ_DEFAULT 256u
  92. #define SKD_N_SG_PER_SPECIAL 256u
  93. #define SKD_N_COMPLETION_ENTRY 256u
  94. #define SKD_N_READ_CAP_BYTES (8u)
  95. #define SKD_N_INTERNAL_BYTES (512u)
  96. /* 5 bits of uniqifier, 0xF800 */
  97. #define SKD_ID_INCR (0x400)
  98. #define SKD_ID_TABLE_MASK (3u << 8u)
  99. #define SKD_ID_RW_REQUEST (0u << 8u)
  100. #define SKD_ID_INTERNAL (1u << 8u)
  101. #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
  102. #define SKD_ID_FIT_MSG (3u << 8u)
  103. #define SKD_ID_SLOT_MASK 0x00FFu
  104. #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
  105. #define SKD_N_TIMEOUT_SLOT 4u
  106. #define SKD_TIMEOUT_SLOT_MASK 3u
  107. #define SKD_N_MAX_SECTORS 2048u
  108. #define SKD_MAX_RETRIES 2u
  109. #define SKD_TIMER_SECONDS(seconds) (seconds)
  110. #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
  111. #define INQ_STD_NBYTES 36
  112. #define SKD_DISCARD_CDB_LENGTH 24
  113. enum skd_drvr_state {
  114. SKD_DRVR_STATE_LOAD,
  115. SKD_DRVR_STATE_IDLE,
  116. SKD_DRVR_STATE_BUSY,
  117. SKD_DRVR_STATE_STARTING,
  118. SKD_DRVR_STATE_ONLINE,
  119. SKD_DRVR_STATE_PAUSING,
  120. SKD_DRVR_STATE_PAUSED,
  121. SKD_DRVR_STATE_DRAINING_TIMEOUT,
  122. SKD_DRVR_STATE_RESTARTING,
  123. SKD_DRVR_STATE_RESUMING,
  124. SKD_DRVR_STATE_STOPPING,
  125. SKD_DRVR_STATE_FAULT,
  126. SKD_DRVR_STATE_DISAPPEARED,
  127. SKD_DRVR_STATE_PROTOCOL_MISMATCH,
  128. SKD_DRVR_STATE_BUSY_ERASE,
  129. SKD_DRVR_STATE_BUSY_SANITIZE,
  130. SKD_DRVR_STATE_BUSY_IMMINENT,
  131. SKD_DRVR_STATE_WAIT_BOOT,
  132. SKD_DRVR_STATE_SYNCING,
  133. };
  134. #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
  135. #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
  136. #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
  137. #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
  138. #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
  139. #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
  140. #define SKD_START_WAIT_SECONDS 90u
  141. enum skd_req_state {
  142. SKD_REQ_STATE_IDLE,
  143. SKD_REQ_STATE_SETUP,
  144. SKD_REQ_STATE_BUSY,
  145. SKD_REQ_STATE_COMPLETED,
  146. SKD_REQ_STATE_TIMEOUT,
  147. SKD_REQ_STATE_ABORTED,
  148. };
  149. enum skd_fit_msg_state {
  150. SKD_MSG_STATE_IDLE,
  151. SKD_MSG_STATE_BUSY,
  152. };
  153. enum skd_check_status_action {
  154. SKD_CHECK_STATUS_REPORT_GOOD,
  155. SKD_CHECK_STATUS_REPORT_SMART_ALERT,
  156. SKD_CHECK_STATUS_REQUEUE_REQUEST,
  157. SKD_CHECK_STATUS_REPORT_ERROR,
  158. SKD_CHECK_STATUS_BUSY_IMMINENT,
  159. };
  160. struct skd_fitmsg_context {
  161. enum skd_fit_msg_state state;
  162. struct skd_fitmsg_context *next;
  163. u32 id;
  164. u16 outstanding;
  165. u32 length;
  166. u32 offset;
  167. u8 *msg_buf;
  168. dma_addr_t mb_dma_address;
  169. };
  170. struct skd_request_context {
  171. enum skd_req_state state;
  172. struct skd_request_context *next;
  173. u16 id;
  174. u32 fitmsg_id;
  175. struct request *req;
  176. u8 flush_cmd;
  177. u8 discard_page;
  178. u32 timeout_stamp;
  179. u8 sg_data_dir;
  180. struct scatterlist *sg;
  181. u32 n_sg;
  182. u32 sg_byte_count;
  183. struct fit_sg_descriptor *sksg_list;
  184. dma_addr_t sksg_dma_address;
  185. struct fit_completion_entry_v1 completion;
  186. struct fit_comp_error_info err_info;
  187. };
  188. #define SKD_DATA_DIR_HOST_TO_CARD 1
  189. #define SKD_DATA_DIR_CARD_TO_HOST 2
  190. #define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
  191. struct skd_special_context {
  192. struct skd_request_context req;
  193. u8 orphaned;
  194. void *data_buf;
  195. dma_addr_t db_dma_address;
  196. u8 *msg_buf;
  197. dma_addr_t mb_dma_address;
  198. };
  199. struct skd_sg_io {
  200. fmode_t mode;
  201. void __user *argp;
  202. struct sg_io_hdr sg;
  203. u8 cdb[16];
  204. u32 dxfer_len;
  205. u32 iovcnt;
  206. struct sg_iovec *iov;
  207. struct sg_iovec no_iov_iov;
  208. struct skd_special_context *skspcl;
  209. };
  210. typedef enum skd_irq_type {
  211. SKD_IRQ_LEGACY,
  212. SKD_IRQ_MSI,
  213. SKD_IRQ_MSIX
  214. } skd_irq_type_t;
  215. #define SKD_MAX_BARS 2
  216. struct skd_device {
  217. volatile void __iomem *mem_map[SKD_MAX_BARS];
  218. resource_size_t mem_phys[SKD_MAX_BARS];
  219. u32 mem_size[SKD_MAX_BARS];
  220. skd_irq_type_t irq_type;
  221. u32 msix_count;
  222. struct skd_msix_entry *msix_entries;
  223. struct pci_dev *pdev;
  224. int pcie_error_reporting_is_enabled;
  225. spinlock_t lock;
  226. struct gendisk *disk;
  227. struct request_queue *queue;
  228. struct device *class_dev;
  229. int gendisk_on;
  230. int sync_done;
  231. atomic_t device_count;
  232. u32 devno;
  233. u32 major;
  234. char name[32];
  235. char isr_name[30];
  236. enum skd_drvr_state state;
  237. u32 drive_state;
  238. u32 in_flight;
  239. u32 cur_max_queue_depth;
  240. u32 queue_low_water_mark;
  241. u32 dev_max_queue_depth;
  242. u32 num_fitmsg_context;
  243. u32 num_req_context;
  244. u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
  245. u32 timeout_stamp;
  246. struct skd_fitmsg_context *skmsg_free_list;
  247. struct skd_fitmsg_context *skmsg_table;
  248. struct skd_request_context *skreq_free_list;
  249. struct skd_request_context *skreq_table;
  250. struct skd_special_context *skspcl_free_list;
  251. struct skd_special_context *skspcl_table;
  252. struct skd_special_context internal_skspcl;
  253. u32 read_cap_blocksize;
  254. u32 read_cap_last_lba;
  255. int read_cap_is_valid;
  256. int inquiry_is_valid;
  257. u8 inq_serial_num[13]; /*12 chars plus null term */
  258. u8 id_str[80]; /* holds a composite name (pci + sernum) */
  259. u8 skcomp_cycle;
  260. u32 skcomp_ix;
  261. struct fit_completion_entry_v1 *skcomp_table;
  262. struct fit_comp_error_info *skerr_table;
  263. dma_addr_t cq_dma_address;
  264. wait_queue_head_t waitq;
  265. struct timer_list timer;
  266. u32 timer_countdown;
  267. u32 timer_substate;
  268. int n_special;
  269. int sgs_per_request;
  270. u32 last_mtd;
  271. u32 proto_ver;
  272. int dbg_level;
  273. u32 connect_time_stamp;
  274. int connect_retries;
  275. #define SKD_MAX_CONNECT_RETRIES 16
  276. u32 drive_jiffies;
  277. u32 timo_slot;
  278. struct work_struct completion_worker;
  279. };
  280. #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
  281. #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
  282. #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
  283. static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
  284. {
  285. u32 val;
  286. if (likely(skdev->dbg_level < 2))
  287. return readl(skdev->mem_map[1] + offset);
  288. else {
  289. barrier();
  290. val = readl(skdev->mem_map[1] + offset);
  291. barrier();
  292. pr_debug("%s:%s:%d offset %x = %x\n",
  293. skdev->name, __func__, __LINE__, offset, val);
  294. return val;
  295. }
  296. }
  297. static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
  298. u32 offset)
  299. {
  300. if (likely(skdev->dbg_level < 2)) {
  301. writel(val, skdev->mem_map[1] + offset);
  302. barrier();
  303. } else {
  304. barrier();
  305. writel(val, skdev->mem_map[1] + offset);
  306. barrier();
  307. pr_debug("%s:%s:%d offset %x = %x\n",
  308. skdev->name, __func__, __LINE__, offset, val);
  309. }
  310. }
  311. static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
  312. u32 offset)
  313. {
  314. if (likely(skdev->dbg_level < 2)) {
  315. writeq(val, skdev->mem_map[1] + offset);
  316. barrier();
  317. } else {
  318. barrier();
  319. writeq(val, skdev->mem_map[1] + offset);
  320. barrier();
  321. pr_debug("%s:%s:%d offset %x = %016llx\n",
  322. skdev->name, __func__, __LINE__, offset, val);
  323. }
  324. }
  325. #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
  326. static int skd_isr_type = SKD_IRQ_DEFAULT;
  327. module_param(skd_isr_type, int, 0444);
  328. MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
  329. " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
  330. #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
  331. static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  332. module_param(skd_max_req_per_msg, int, 0444);
  333. MODULE_PARM_DESC(skd_max_req_per_msg,
  334. "Maximum SCSI requests packed in a single message."
  335. " (1-14, default==1)");
  336. #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
  337. #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
  338. static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  339. module_param(skd_max_queue_depth, int, 0444);
  340. MODULE_PARM_DESC(skd_max_queue_depth,
  341. "Maximum SCSI requests issued to s1120."
  342. " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
  343. static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  344. module_param(skd_sgs_per_request, int, 0444);
  345. MODULE_PARM_DESC(skd_sgs_per_request,
  346. "Maximum SG elements per block request."
  347. " (1-4096, default==256)");
  348. static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
  349. module_param(skd_max_pass_thru, int, 0444);
  350. MODULE_PARM_DESC(skd_max_pass_thru,
  351. "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
  352. module_param(skd_dbg_level, int, 0444);
  353. MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
  354. module_param(skd_isr_comp_limit, int, 0444);
  355. MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
  356. /* Major device number dynamically assigned. */
  357. static u32 skd_major;
  358. static void skd_destruct(struct skd_device *skdev);
  359. static const struct block_device_operations skd_blockdev_ops;
  360. static void skd_send_fitmsg(struct skd_device *skdev,
  361. struct skd_fitmsg_context *skmsg);
  362. static void skd_send_special_fitmsg(struct skd_device *skdev,
  363. struct skd_special_context *skspcl);
  364. static void skd_request_fn(struct request_queue *rq);
  365. static void skd_end_request(struct skd_device *skdev,
  366. struct skd_request_context *skreq, int error);
  367. static int skd_preop_sg_list(struct skd_device *skdev,
  368. struct skd_request_context *skreq);
  369. static void skd_postop_sg_list(struct skd_device *skdev,
  370. struct skd_request_context *skreq);
  371. static void skd_restart_device(struct skd_device *skdev);
  372. static int skd_quiesce_dev(struct skd_device *skdev);
  373. static int skd_unquiesce_dev(struct skd_device *skdev);
  374. static void skd_release_special(struct skd_device *skdev,
  375. struct skd_special_context *skspcl);
  376. static void skd_disable_interrupts(struct skd_device *skdev);
  377. static void skd_isr_fwstate(struct skd_device *skdev);
  378. static void skd_recover_requests(struct skd_device *skdev, int requeue);
  379. static void skd_soft_reset(struct skd_device *skdev);
  380. static const char *skd_name(struct skd_device *skdev);
  381. const char *skd_drive_state_to_str(int state);
  382. const char *skd_skdev_state_to_str(enum skd_drvr_state state);
  383. static void skd_log_skdev(struct skd_device *skdev, const char *event);
  384. static void skd_log_skmsg(struct skd_device *skdev,
  385. struct skd_fitmsg_context *skmsg, const char *event);
  386. static void skd_log_skreq(struct skd_device *skdev,
  387. struct skd_request_context *skreq, const char *event);
  388. /*
  389. *****************************************************************************
  390. * READ/WRITE REQUESTS
  391. *****************************************************************************
  392. */
  393. static void skd_fail_all_pending(struct skd_device *skdev)
  394. {
  395. struct request_queue *q = skdev->queue;
  396. struct request *req;
  397. for (;; ) {
  398. req = blk_peek_request(q);
  399. if (req == NULL)
  400. break;
  401. blk_start_request(req);
  402. __blk_end_request_all(req, -EIO);
  403. }
  404. }
  405. static void
  406. skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
  407. int data_dir, unsigned lba,
  408. unsigned count)
  409. {
  410. if (data_dir == READ)
  411. scsi_req->cdb[0] = 0x28;
  412. else
  413. scsi_req->cdb[0] = 0x2a;
  414. scsi_req->cdb[1] = 0;
  415. scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
  416. scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
  417. scsi_req->cdb[4] = (lba & 0xff00) >> 8;
  418. scsi_req->cdb[5] = (lba & 0xff);
  419. scsi_req->cdb[6] = 0;
  420. scsi_req->cdb[7] = (count & 0xff00) >> 8;
  421. scsi_req->cdb[8] = count & 0xff;
  422. scsi_req->cdb[9] = 0;
  423. }
  424. static void
  425. skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
  426. struct skd_request_context *skreq)
  427. {
  428. skreq->flush_cmd = 1;
  429. scsi_req->cdb[0] = 0x35;
  430. scsi_req->cdb[1] = 0;
  431. scsi_req->cdb[2] = 0;
  432. scsi_req->cdb[3] = 0;
  433. scsi_req->cdb[4] = 0;
  434. scsi_req->cdb[5] = 0;
  435. scsi_req->cdb[6] = 0;
  436. scsi_req->cdb[7] = 0;
  437. scsi_req->cdb[8] = 0;
  438. scsi_req->cdb[9] = 0;
  439. }
  440. static void
  441. skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
  442. struct skd_request_context *skreq,
  443. struct page *page,
  444. u32 lba, u32 count)
  445. {
  446. char *buf;
  447. unsigned long len;
  448. struct request *req;
  449. buf = page_address(page);
  450. len = SKD_DISCARD_CDB_LENGTH;
  451. scsi_req->cdb[0] = UNMAP;
  452. scsi_req->cdb[8] = len;
  453. put_unaligned_be16(6 + 16, &buf[0]);
  454. put_unaligned_be16(16, &buf[2]);
  455. put_unaligned_be64(lba, &buf[8]);
  456. put_unaligned_be32(count, &buf[16]);
  457. req = skreq->req;
  458. blk_add_request_payload(req, page, len);
  459. }
  460. static void skd_request_fn_not_online(struct request_queue *q);
  461. static void skd_request_fn(struct request_queue *q)
  462. {
  463. struct skd_device *skdev = q->queuedata;
  464. struct skd_fitmsg_context *skmsg = NULL;
  465. struct fit_msg_hdr *fmh = NULL;
  466. struct skd_request_context *skreq;
  467. struct request *req = NULL;
  468. struct skd_scsi_request *scsi_req;
  469. struct page *page;
  470. unsigned long io_flags;
  471. int error;
  472. u32 lba;
  473. u32 count;
  474. int data_dir;
  475. u32 be_lba;
  476. u32 be_count;
  477. u64 be_dmaa;
  478. u64 cmdctxt;
  479. u32 timo_slot;
  480. void *cmd_ptr;
  481. int flush, fua;
  482. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  483. skd_request_fn_not_online(q);
  484. return;
  485. }
  486. if (blk_queue_stopped(skdev->queue)) {
  487. if (skdev->skmsg_free_list == NULL ||
  488. skdev->skreq_free_list == NULL ||
  489. skdev->in_flight >= skdev->queue_low_water_mark)
  490. /* There is still some kind of shortage */
  491. return;
  492. queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
  493. }
  494. /*
  495. * Stop conditions:
  496. * - There are no more native requests
  497. * - There are already the maximum number of requests in progress
  498. * - There are no more skd_request_context entries
  499. * - There are no more FIT msg buffers
  500. */
  501. for (;; ) {
  502. flush = fua = 0;
  503. req = blk_peek_request(q);
  504. /* Are there any native requests to start? */
  505. if (req == NULL)
  506. break;
  507. lba = (u32)blk_rq_pos(req);
  508. count = blk_rq_sectors(req);
  509. data_dir = rq_data_dir(req);
  510. io_flags = req->cmd_flags;
  511. if (io_flags & REQ_FLUSH)
  512. flush++;
  513. if (io_flags & REQ_FUA)
  514. fua++;
  515. pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
  516. "count=%u(0x%x) dir=%d\n",
  517. skdev->name, __func__, __LINE__,
  518. req, lba, lba, count, count, data_dir);
  519. /* At this point we know there is a request */
  520. /* Are too many requets already in progress? */
  521. if (skdev->in_flight >= skdev->cur_max_queue_depth) {
  522. pr_debug("%s:%s:%d qdepth %d, limit %d\n",
  523. skdev->name, __func__, __LINE__,
  524. skdev->in_flight, skdev->cur_max_queue_depth);
  525. break;
  526. }
  527. /* Is a skd_request_context available? */
  528. skreq = skdev->skreq_free_list;
  529. if (skreq == NULL) {
  530. pr_debug("%s:%s:%d Out of req=%p\n",
  531. skdev->name, __func__, __LINE__, q);
  532. break;
  533. }
  534. SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
  535. SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
  536. /* Now we check to see if we can get a fit msg */
  537. if (skmsg == NULL) {
  538. if (skdev->skmsg_free_list == NULL) {
  539. pr_debug("%s:%s:%d Out of msg\n",
  540. skdev->name, __func__, __LINE__);
  541. break;
  542. }
  543. }
  544. skreq->flush_cmd = 0;
  545. skreq->n_sg = 0;
  546. skreq->sg_byte_count = 0;
  547. skreq->discard_page = 0;
  548. /*
  549. * OK to now dequeue request from q.
  550. *
  551. * At this point we are comitted to either start or reject
  552. * the native request. Note that skd_request_context is
  553. * available but is still at the head of the free list.
  554. */
  555. blk_start_request(req);
  556. skreq->req = req;
  557. skreq->fitmsg_id = 0;
  558. /* Either a FIT msg is in progress or we have to start one. */
  559. if (skmsg == NULL) {
  560. /* Are there any FIT msg buffers available? */
  561. skmsg = skdev->skmsg_free_list;
  562. if (skmsg == NULL) {
  563. pr_debug("%s:%s:%d Out of msg skdev=%p\n",
  564. skdev->name, __func__, __LINE__,
  565. skdev);
  566. break;
  567. }
  568. SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
  569. SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
  570. skdev->skmsg_free_list = skmsg->next;
  571. skmsg->state = SKD_MSG_STATE_BUSY;
  572. skmsg->id += SKD_ID_INCR;
  573. /* Initialize the FIT msg header */
  574. fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
  575. memset(fmh, 0, sizeof(*fmh));
  576. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  577. skmsg->length = sizeof(*fmh);
  578. }
  579. skreq->fitmsg_id = skmsg->id;
  580. /*
  581. * Note that a FIT msg may have just been started
  582. * but contains no SoFIT requests yet.
  583. */
  584. /*
  585. * Transcode the request, checking as we go. The outcome of
  586. * the transcoding is represented by the error variable.
  587. */
  588. cmd_ptr = &skmsg->msg_buf[skmsg->length];
  589. memset(cmd_ptr, 0, 32);
  590. be_lba = cpu_to_be32(lba);
  591. be_count = cpu_to_be32(count);
  592. be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
  593. cmdctxt = skreq->id + SKD_ID_INCR;
  594. scsi_req = cmd_ptr;
  595. scsi_req->hdr.tag = cmdctxt;
  596. scsi_req->hdr.sg_list_dma_address = be_dmaa;
  597. if (data_dir == READ)
  598. skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
  599. else
  600. skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
  601. if (io_flags & REQ_DISCARD) {
  602. page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
  603. if (!page) {
  604. pr_err("request_fn:Page allocation failed.\n");
  605. skd_end_request(skdev, skreq, -ENOMEM);
  606. break;
  607. }
  608. skreq->discard_page = 1;
  609. req->completion_data = page;
  610. skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
  611. } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
  612. skd_prep_zerosize_flush_cdb(scsi_req, skreq);
  613. SKD_ASSERT(skreq->flush_cmd == 1);
  614. } else {
  615. skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
  616. }
  617. if (fua)
  618. scsi_req->cdb[1] |= SKD_FUA_NV;
  619. if (!req->bio)
  620. goto skip_sg;
  621. error = skd_preop_sg_list(skdev, skreq);
  622. if (error != 0) {
  623. /*
  624. * Complete the native request with error.
  625. * Note that the request context is still at the
  626. * head of the free list, and that the SoFIT request
  627. * was encoded into the FIT msg buffer but the FIT
  628. * msg length has not been updated. In short, the
  629. * only resource that has been allocated but might
  630. * not be used is that the FIT msg could be empty.
  631. */
  632. pr_debug("%s:%s:%d error Out\n",
  633. skdev->name, __func__, __LINE__);
  634. skd_end_request(skdev, skreq, error);
  635. continue;
  636. }
  637. skip_sg:
  638. scsi_req->hdr.sg_list_len_bytes =
  639. cpu_to_be32(skreq->sg_byte_count);
  640. /* Complete resource allocations. */
  641. skdev->skreq_free_list = skreq->next;
  642. skreq->state = SKD_REQ_STATE_BUSY;
  643. skreq->id += SKD_ID_INCR;
  644. skmsg->length += sizeof(struct skd_scsi_request);
  645. fmh->num_protocol_cmds_coalesced++;
  646. /*
  647. * Update the active request counts.
  648. * Capture the timeout timestamp.
  649. */
  650. skreq->timeout_stamp = skdev->timeout_stamp;
  651. timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  652. skdev->timeout_slot[timo_slot]++;
  653. skdev->in_flight++;
  654. pr_debug("%s:%s:%d req=0x%x busy=%d\n",
  655. skdev->name, __func__, __LINE__,
  656. skreq->id, skdev->in_flight);
  657. /*
  658. * If the FIT msg buffer is full send it.
  659. */
  660. if (skmsg->length >= SKD_N_FITMSG_BYTES ||
  661. fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
  662. skd_send_fitmsg(skdev, skmsg);
  663. skmsg = NULL;
  664. fmh = NULL;
  665. }
  666. }
  667. /*
  668. * Is a FIT msg in progress? If it is empty put the buffer back
  669. * on the free list. If it is non-empty send what we got.
  670. * This minimizes latency when there are fewer requests than
  671. * what fits in a FIT msg.
  672. */
  673. if (skmsg != NULL) {
  674. /* Bigger than just a FIT msg header? */
  675. if (skmsg->length > sizeof(struct fit_msg_hdr)) {
  676. pr_debug("%s:%s:%d sending msg=%p, len %d\n",
  677. skdev->name, __func__, __LINE__,
  678. skmsg, skmsg->length);
  679. skd_send_fitmsg(skdev, skmsg);
  680. } else {
  681. /*
  682. * The FIT msg is empty. It means we got started
  683. * on the msg, but the requests were rejected.
  684. */
  685. skmsg->state = SKD_MSG_STATE_IDLE;
  686. skmsg->id += SKD_ID_INCR;
  687. skmsg->next = skdev->skmsg_free_list;
  688. skdev->skmsg_free_list = skmsg;
  689. }
  690. skmsg = NULL;
  691. fmh = NULL;
  692. }
  693. /*
  694. * If req is non-NULL it means there is something to do but
  695. * we are out of a resource.
  696. */
  697. if (req)
  698. blk_stop_queue(skdev->queue);
  699. }
  700. static void skd_end_request(struct skd_device *skdev,
  701. struct skd_request_context *skreq, int error)
  702. {
  703. struct request *req = skreq->req;
  704. unsigned int io_flags = req->cmd_flags;
  705. if ((io_flags & REQ_DISCARD) &&
  706. (skreq->discard_page == 1)) {
  707. pr_debug("%s:%s:%d, free the page!",
  708. skdev->name, __func__, __LINE__);
  709. __free_page(req->completion_data);
  710. }
  711. if (unlikely(error)) {
  712. struct request *req = skreq->req;
  713. char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
  714. u32 lba = (u32)blk_rq_pos(req);
  715. u32 count = blk_rq_sectors(req);
  716. pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
  717. skd_name(skdev), cmd, lba, count, skreq->id);
  718. } else
  719. pr_debug("%s:%s:%d id=0x%x error=%d\n",
  720. skdev->name, __func__, __LINE__, skreq->id, error);
  721. __blk_end_request_all(skreq->req, error);
  722. }
  723. static int skd_preop_sg_list(struct skd_device *skdev,
  724. struct skd_request_context *skreq)
  725. {
  726. struct request *req = skreq->req;
  727. int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
  728. int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
  729. struct scatterlist *sg = &skreq->sg[0];
  730. int n_sg;
  731. int i;
  732. skreq->sg_byte_count = 0;
  733. /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
  734. skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
  735. n_sg = blk_rq_map_sg(skdev->queue, req, sg);
  736. if (n_sg <= 0)
  737. return -EINVAL;
  738. /*
  739. * Map scatterlist to PCI bus addresses.
  740. * Note PCI might change the number of entries.
  741. */
  742. n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
  743. if (n_sg <= 0)
  744. return -EINVAL;
  745. SKD_ASSERT(n_sg <= skdev->sgs_per_request);
  746. skreq->n_sg = n_sg;
  747. for (i = 0; i < n_sg; i++) {
  748. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  749. u32 cnt = sg_dma_len(&sg[i]);
  750. uint64_t dma_addr = sg_dma_address(&sg[i]);
  751. sgd->control = FIT_SGD_CONTROL_NOT_LAST;
  752. sgd->byte_count = cnt;
  753. skreq->sg_byte_count += cnt;
  754. sgd->host_side_addr = dma_addr;
  755. sgd->dev_side_addr = 0;
  756. }
  757. skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
  758. skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
  759. if (unlikely(skdev->dbg_level > 1)) {
  760. pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
  761. skdev->name, __func__, __LINE__,
  762. skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
  763. for (i = 0; i < n_sg; i++) {
  764. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  765. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  766. "addr=0x%llx next=0x%llx\n",
  767. skdev->name, __func__, __LINE__,
  768. i, sgd->byte_count, sgd->control,
  769. sgd->host_side_addr, sgd->next_desc_ptr);
  770. }
  771. }
  772. return 0;
  773. }
  774. static void skd_postop_sg_list(struct skd_device *skdev,
  775. struct skd_request_context *skreq)
  776. {
  777. int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
  778. int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
  779. /*
  780. * restore the next ptr for next IO request so we
  781. * don't have to set it every time.
  782. */
  783. skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
  784. skreq->sksg_dma_address +
  785. ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
  786. pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
  787. }
  788. static void skd_request_fn_not_online(struct request_queue *q)
  789. {
  790. struct skd_device *skdev = q->queuedata;
  791. int error;
  792. SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
  793. skd_log_skdev(skdev, "req_not_online");
  794. switch (skdev->state) {
  795. case SKD_DRVR_STATE_PAUSING:
  796. case SKD_DRVR_STATE_PAUSED:
  797. case SKD_DRVR_STATE_STARTING:
  798. case SKD_DRVR_STATE_RESTARTING:
  799. case SKD_DRVR_STATE_WAIT_BOOT:
  800. /* In case of starting, we haven't started the queue,
  801. * so we can't get here... but requests are
  802. * possibly hanging out waiting for us because we
  803. * reported the dev/skd0 already. They'll wait
  804. * forever if connect doesn't complete.
  805. * What to do??? delay dev/skd0 ??
  806. */
  807. case SKD_DRVR_STATE_BUSY:
  808. case SKD_DRVR_STATE_BUSY_IMMINENT:
  809. case SKD_DRVR_STATE_BUSY_ERASE:
  810. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  811. return;
  812. case SKD_DRVR_STATE_BUSY_SANITIZE:
  813. case SKD_DRVR_STATE_STOPPING:
  814. case SKD_DRVR_STATE_SYNCING:
  815. case SKD_DRVR_STATE_FAULT:
  816. case SKD_DRVR_STATE_DISAPPEARED:
  817. default:
  818. error = -EIO;
  819. break;
  820. }
  821. /* If we get here, terminate all pending block requeusts
  822. * with EIO and any scsi pass thru with appropriate sense
  823. */
  824. skd_fail_all_pending(skdev);
  825. }
  826. /*
  827. *****************************************************************************
  828. * TIMER
  829. *****************************************************************************
  830. */
  831. static void skd_timer_tick_not_online(struct skd_device *skdev);
  832. static void skd_timer_tick(ulong arg)
  833. {
  834. struct skd_device *skdev = (struct skd_device *)arg;
  835. u32 timo_slot;
  836. u32 overdue_timestamp;
  837. unsigned long reqflags;
  838. u32 state;
  839. if (skdev->state == SKD_DRVR_STATE_FAULT)
  840. /* The driver has declared fault, and we want it to
  841. * stay that way until driver is reloaded.
  842. */
  843. return;
  844. spin_lock_irqsave(&skdev->lock, reqflags);
  845. state = SKD_READL(skdev, FIT_STATUS);
  846. state &= FIT_SR_DRIVE_STATE_MASK;
  847. if (state != skdev->drive_state)
  848. skd_isr_fwstate(skdev);
  849. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  850. skd_timer_tick_not_online(skdev);
  851. goto timer_func_out;
  852. }
  853. skdev->timeout_stamp++;
  854. timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  855. /*
  856. * All requests that happened during the previous use of
  857. * this slot should be done by now. The previous use was
  858. * over 7 seconds ago.
  859. */
  860. if (skdev->timeout_slot[timo_slot] == 0)
  861. goto timer_func_out;
  862. /* Something is overdue */
  863. overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
  864. pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
  865. skdev->name, __func__, __LINE__,
  866. skdev->timeout_slot[timo_slot], skdev->in_flight);
  867. pr_err("(%s): Overdue IOs (%d), busy %d\n",
  868. skd_name(skdev), skdev->timeout_slot[timo_slot],
  869. skdev->in_flight);
  870. skdev->timer_countdown = SKD_DRAINING_TIMO;
  871. skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
  872. skdev->timo_slot = timo_slot;
  873. blk_stop_queue(skdev->queue);
  874. timer_func_out:
  875. mod_timer(&skdev->timer, (jiffies + HZ));
  876. spin_unlock_irqrestore(&skdev->lock, reqflags);
  877. }
  878. static void skd_timer_tick_not_online(struct skd_device *skdev)
  879. {
  880. switch (skdev->state) {
  881. case SKD_DRVR_STATE_IDLE:
  882. case SKD_DRVR_STATE_LOAD:
  883. break;
  884. case SKD_DRVR_STATE_BUSY_SANITIZE:
  885. pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
  886. skdev->name, __func__, __LINE__,
  887. skdev->drive_state, skdev->state);
  888. /* If we've been in sanitize for 3 seconds, we figure we're not
  889. * going to get anymore completions, so recover requests now
  890. */
  891. if (skdev->timer_countdown > 0) {
  892. skdev->timer_countdown--;
  893. return;
  894. }
  895. skd_recover_requests(skdev, 0);
  896. break;
  897. case SKD_DRVR_STATE_BUSY:
  898. case SKD_DRVR_STATE_BUSY_IMMINENT:
  899. case SKD_DRVR_STATE_BUSY_ERASE:
  900. pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
  901. skdev->name, __func__, __LINE__,
  902. skdev->state, skdev->timer_countdown);
  903. if (skdev->timer_countdown > 0) {
  904. skdev->timer_countdown--;
  905. return;
  906. }
  907. pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
  908. skdev->name, __func__, __LINE__,
  909. skdev->state, skdev->timer_countdown);
  910. skd_restart_device(skdev);
  911. break;
  912. case SKD_DRVR_STATE_WAIT_BOOT:
  913. case SKD_DRVR_STATE_STARTING:
  914. if (skdev->timer_countdown > 0) {
  915. skdev->timer_countdown--;
  916. return;
  917. }
  918. /* For now, we fault the drive. Could attempt resets to
  919. * revcover at some point. */
  920. skdev->state = SKD_DRVR_STATE_FAULT;
  921. pr_err("(%s): DriveFault Connect Timeout (%x)\n",
  922. skd_name(skdev), skdev->drive_state);
  923. /*start the queue so we can respond with error to requests */
  924. /* wakeup anyone waiting for startup complete */
  925. blk_start_queue(skdev->queue);
  926. skdev->gendisk_on = -1;
  927. wake_up_interruptible(&skdev->waitq);
  928. break;
  929. case SKD_DRVR_STATE_ONLINE:
  930. /* shouldn't get here. */
  931. break;
  932. case SKD_DRVR_STATE_PAUSING:
  933. case SKD_DRVR_STATE_PAUSED:
  934. break;
  935. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  936. pr_debug("%s:%s:%d "
  937. "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
  938. skdev->name, __func__, __LINE__,
  939. skdev->timo_slot,
  940. skdev->timer_countdown,
  941. skdev->in_flight,
  942. skdev->timeout_slot[skdev->timo_slot]);
  943. /* if the slot has cleared we can let the I/O continue */
  944. if (skdev->timeout_slot[skdev->timo_slot] == 0) {
  945. pr_debug("%s:%s:%d Slot drained, starting queue.\n",
  946. skdev->name, __func__, __LINE__);
  947. skdev->state = SKD_DRVR_STATE_ONLINE;
  948. blk_start_queue(skdev->queue);
  949. return;
  950. }
  951. if (skdev->timer_countdown > 0) {
  952. skdev->timer_countdown--;
  953. return;
  954. }
  955. skd_restart_device(skdev);
  956. break;
  957. case SKD_DRVR_STATE_RESTARTING:
  958. if (skdev->timer_countdown > 0) {
  959. skdev->timer_countdown--;
  960. return;
  961. }
  962. /* For now, we fault the drive. Could attempt resets to
  963. * revcover at some point. */
  964. skdev->state = SKD_DRVR_STATE_FAULT;
  965. pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
  966. skd_name(skdev), skdev->drive_state);
  967. /*
  968. * Recovering does two things:
  969. * 1. completes IO with error
  970. * 2. reclaims dma resources
  971. * When is it safe to recover requests?
  972. * - if the drive state is faulted
  973. * - if the state is still soft reset after out timeout
  974. * - if the drive registers are dead (state = FF)
  975. * If it is "unsafe", we still need to recover, so we will
  976. * disable pci bus mastering and disable our interrupts.
  977. */
  978. if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
  979. (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
  980. (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
  981. /* It never came out of soft reset. Try to
  982. * recover the requests and then let them
  983. * fail. This is to mitigate hung processes. */
  984. skd_recover_requests(skdev, 0);
  985. else {
  986. pr_err("(%s): Disable BusMaster (%x)\n",
  987. skd_name(skdev), skdev->drive_state);
  988. pci_disable_device(skdev->pdev);
  989. skd_disable_interrupts(skdev);
  990. skd_recover_requests(skdev, 0);
  991. }
  992. /*start the queue so we can respond with error to requests */
  993. /* wakeup anyone waiting for startup complete */
  994. blk_start_queue(skdev->queue);
  995. skdev->gendisk_on = -1;
  996. wake_up_interruptible(&skdev->waitq);
  997. break;
  998. case SKD_DRVR_STATE_RESUMING:
  999. case SKD_DRVR_STATE_STOPPING:
  1000. case SKD_DRVR_STATE_SYNCING:
  1001. case SKD_DRVR_STATE_FAULT:
  1002. case SKD_DRVR_STATE_DISAPPEARED:
  1003. default:
  1004. break;
  1005. }
  1006. }
  1007. static int skd_start_timer(struct skd_device *skdev)
  1008. {
  1009. int rc;
  1010. init_timer(&skdev->timer);
  1011. setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
  1012. rc = mod_timer(&skdev->timer, (jiffies + HZ));
  1013. if (rc)
  1014. pr_err("%s: failed to start timer %d\n",
  1015. __func__, rc);
  1016. return rc;
  1017. }
  1018. static void skd_kill_timer(struct skd_device *skdev)
  1019. {
  1020. del_timer_sync(&skdev->timer);
  1021. }
  1022. /*
  1023. *****************************************************************************
  1024. * IOCTL
  1025. *****************************************************************************
  1026. */
  1027. static int skd_ioctl_sg_io(struct skd_device *skdev,
  1028. fmode_t mode, void __user *argp);
  1029. static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
  1030. struct skd_sg_io *sksgio);
  1031. static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
  1032. struct skd_sg_io *sksgio);
  1033. static int skd_sg_io_prep_buffering(struct skd_device *skdev,
  1034. struct skd_sg_io *sksgio);
  1035. static int skd_sg_io_copy_buffer(struct skd_device *skdev,
  1036. struct skd_sg_io *sksgio, int dxfer_dir);
  1037. static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
  1038. struct skd_sg_io *sksgio);
  1039. static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
  1040. static int skd_sg_io_release_skspcl(struct skd_device *skdev,
  1041. struct skd_sg_io *sksgio);
  1042. static int skd_sg_io_put_status(struct skd_device *skdev,
  1043. struct skd_sg_io *sksgio);
  1044. static void skd_complete_special(struct skd_device *skdev,
  1045. volatile struct fit_completion_entry_v1
  1046. *skcomp,
  1047. volatile struct fit_comp_error_info *skerr,
  1048. struct skd_special_context *skspcl);
  1049. static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
  1050. uint cmd_in, ulong arg)
  1051. {
  1052. int rc = 0;
  1053. struct gendisk *disk = bdev->bd_disk;
  1054. struct skd_device *skdev = disk->private_data;
  1055. void __user *p = (void *)arg;
  1056. pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
  1057. skdev->name, __func__, __LINE__,
  1058. disk->disk_name, current->comm, mode, cmd_in, arg);
  1059. if (!capable(CAP_SYS_ADMIN))
  1060. return -EPERM;
  1061. switch (cmd_in) {
  1062. case SG_SET_TIMEOUT:
  1063. case SG_GET_TIMEOUT:
  1064. case SG_GET_VERSION_NUM:
  1065. rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
  1066. break;
  1067. case SG_IO:
  1068. rc = skd_ioctl_sg_io(skdev, mode, p);
  1069. break;
  1070. default:
  1071. rc = -ENOTTY;
  1072. break;
  1073. }
  1074. pr_debug("%s:%s:%d %s: completion rc %d\n",
  1075. skdev->name, __func__, __LINE__, disk->disk_name, rc);
  1076. return rc;
  1077. }
  1078. static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
  1079. void __user *argp)
  1080. {
  1081. int rc;
  1082. struct skd_sg_io sksgio;
  1083. memset(&sksgio, 0, sizeof(sksgio));
  1084. sksgio.mode = mode;
  1085. sksgio.argp = argp;
  1086. sksgio.iov = &sksgio.no_iov_iov;
  1087. switch (skdev->state) {
  1088. case SKD_DRVR_STATE_ONLINE:
  1089. case SKD_DRVR_STATE_BUSY_IMMINENT:
  1090. break;
  1091. default:
  1092. pr_debug("%s:%s:%d drive not online\n",
  1093. skdev->name, __func__, __LINE__);
  1094. rc = -ENXIO;
  1095. goto out;
  1096. }
  1097. rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
  1098. if (rc)
  1099. goto out;
  1100. rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
  1101. if (rc)
  1102. goto out;
  1103. rc = skd_sg_io_prep_buffering(skdev, &sksgio);
  1104. if (rc)
  1105. goto out;
  1106. rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
  1107. if (rc)
  1108. goto out;
  1109. rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
  1110. if (rc)
  1111. goto out;
  1112. rc = skd_sg_io_await(skdev, &sksgio);
  1113. if (rc)
  1114. goto out;
  1115. rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
  1116. if (rc)
  1117. goto out;
  1118. rc = skd_sg_io_put_status(skdev, &sksgio);
  1119. if (rc)
  1120. goto out;
  1121. rc = 0;
  1122. out:
  1123. skd_sg_io_release_skspcl(skdev, &sksgio);
  1124. if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
  1125. kfree(sksgio.iov);
  1126. return rc;
  1127. }
  1128. static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
  1129. struct skd_sg_io *sksgio)
  1130. {
  1131. struct sg_io_hdr *sgp = &sksgio->sg;
  1132. int i, acc;
  1133. if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
  1134. pr_debug("%s:%s:%d access sg failed %p\n",
  1135. skdev->name, __func__, __LINE__, sksgio->argp);
  1136. return -EFAULT;
  1137. }
  1138. if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
  1139. pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
  1140. skdev->name, __func__, __LINE__, sksgio->argp);
  1141. return -EFAULT;
  1142. }
  1143. if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
  1144. pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
  1145. skdev->name, __func__, __LINE__, sgp->interface_id);
  1146. return -EINVAL;
  1147. }
  1148. if (sgp->cmd_len > sizeof(sksgio->cdb)) {
  1149. pr_debug("%s:%s:%d cmd_len invalid %d\n",
  1150. skdev->name, __func__, __LINE__, sgp->cmd_len);
  1151. return -EINVAL;
  1152. }
  1153. if (sgp->iovec_count > 256) {
  1154. pr_debug("%s:%s:%d iovec_count invalid %d\n",
  1155. skdev->name, __func__, __LINE__, sgp->iovec_count);
  1156. return -EINVAL;
  1157. }
  1158. if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
  1159. pr_debug("%s:%s:%d dxfer_len invalid %d\n",
  1160. skdev->name, __func__, __LINE__, sgp->dxfer_len);
  1161. return -EINVAL;
  1162. }
  1163. switch (sgp->dxfer_direction) {
  1164. case SG_DXFER_NONE:
  1165. acc = -1;
  1166. break;
  1167. case SG_DXFER_TO_DEV:
  1168. acc = VERIFY_READ;
  1169. break;
  1170. case SG_DXFER_FROM_DEV:
  1171. case SG_DXFER_TO_FROM_DEV:
  1172. acc = VERIFY_WRITE;
  1173. break;
  1174. default:
  1175. pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
  1176. skdev->name, __func__, __LINE__, sgp->dxfer_direction);
  1177. return -EINVAL;
  1178. }
  1179. if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
  1180. pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
  1181. skdev->name, __func__, __LINE__, sgp->cmdp);
  1182. return -EFAULT;
  1183. }
  1184. if (sgp->mx_sb_len != 0) {
  1185. if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
  1186. pr_debug("%s:%s:%d access sbp failed %p\n",
  1187. skdev->name, __func__, __LINE__, sgp->sbp);
  1188. return -EFAULT;
  1189. }
  1190. }
  1191. if (sgp->iovec_count == 0) {
  1192. sksgio->iov[0].iov_base = sgp->dxferp;
  1193. sksgio->iov[0].iov_len = sgp->dxfer_len;
  1194. sksgio->iovcnt = 1;
  1195. sksgio->dxfer_len = sgp->dxfer_len;
  1196. } else {
  1197. struct sg_iovec *iov;
  1198. uint nbytes = sizeof(*iov) * sgp->iovec_count;
  1199. size_t iov_data_len;
  1200. iov = kmalloc(nbytes, GFP_KERNEL);
  1201. if (iov == NULL) {
  1202. pr_debug("%s:%s:%d alloc iovec failed %d\n",
  1203. skdev->name, __func__, __LINE__,
  1204. sgp->iovec_count);
  1205. return -ENOMEM;
  1206. }
  1207. sksgio->iov = iov;
  1208. sksgio->iovcnt = sgp->iovec_count;
  1209. if (copy_from_user(iov, sgp->dxferp, nbytes)) {
  1210. pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
  1211. skdev->name, __func__, __LINE__, sgp->dxferp);
  1212. return -EFAULT;
  1213. }
  1214. /*
  1215. * Sum up the vecs, making sure they don't overflow
  1216. */
  1217. iov_data_len = 0;
  1218. for (i = 0; i < sgp->iovec_count; i++) {
  1219. if (iov_data_len + iov[i].iov_len < iov_data_len)
  1220. return -EINVAL;
  1221. iov_data_len += iov[i].iov_len;
  1222. }
  1223. /* SG_IO howto says that the shorter of the two wins */
  1224. if (sgp->dxfer_len < iov_data_len) {
  1225. sksgio->iovcnt = iov_shorten((struct iovec *)iov,
  1226. sgp->iovec_count,
  1227. sgp->dxfer_len);
  1228. sksgio->dxfer_len = sgp->dxfer_len;
  1229. } else
  1230. sksgio->dxfer_len = iov_data_len;
  1231. }
  1232. if (sgp->dxfer_direction != SG_DXFER_NONE) {
  1233. struct sg_iovec *iov = sksgio->iov;
  1234. for (i = 0; i < sksgio->iovcnt; i++, iov++) {
  1235. if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
  1236. pr_debug("%s:%s:%d access data failed %p/%d\n",
  1237. skdev->name, __func__, __LINE__,
  1238. iov->iov_base, (int)iov->iov_len);
  1239. return -EFAULT;
  1240. }
  1241. }
  1242. }
  1243. return 0;
  1244. }
  1245. static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
  1246. struct skd_sg_io *sksgio)
  1247. {
  1248. struct skd_special_context *skspcl = NULL;
  1249. int rc;
  1250. for (;;) {
  1251. ulong flags;
  1252. spin_lock_irqsave(&skdev->lock, flags);
  1253. skspcl = skdev->skspcl_free_list;
  1254. if (skspcl != NULL) {
  1255. skdev->skspcl_free_list =
  1256. (struct skd_special_context *)skspcl->req.next;
  1257. skspcl->req.id += SKD_ID_INCR;
  1258. skspcl->req.state = SKD_REQ_STATE_SETUP;
  1259. skspcl->orphaned = 0;
  1260. skspcl->req.n_sg = 0;
  1261. }
  1262. spin_unlock_irqrestore(&skdev->lock, flags);
  1263. if (skspcl != NULL) {
  1264. rc = 0;
  1265. break;
  1266. }
  1267. pr_debug("%s:%s:%d blocking\n",
  1268. skdev->name, __func__, __LINE__);
  1269. rc = wait_event_interruptible_timeout(
  1270. skdev->waitq,
  1271. (skdev->skspcl_free_list != NULL),
  1272. msecs_to_jiffies(sksgio->sg.timeout));
  1273. pr_debug("%s:%s:%d unblocking, rc=%d\n",
  1274. skdev->name, __func__, __LINE__, rc);
  1275. if (rc <= 0) {
  1276. if (rc == 0)
  1277. rc = -ETIMEDOUT;
  1278. else
  1279. rc = -EINTR;
  1280. break;
  1281. }
  1282. /*
  1283. * If we get here rc > 0 meaning the timeout to
  1284. * wait_event_interruptible_timeout() had time left, hence the
  1285. * sought event -- non-empty free list -- happened.
  1286. * Retry the allocation.
  1287. */
  1288. }
  1289. sksgio->skspcl = skspcl;
  1290. return rc;
  1291. }
  1292. static int skd_skreq_prep_buffering(struct skd_device *skdev,
  1293. struct skd_request_context *skreq,
  1294. u32 dxfer_len)
  1295. {
  1296. u32 resid = dxfer_len;
  1297. /*
  1298. * The DMA engine must have aligned addresses and byte counts.
  1299. */
  1300. resid += (-resid) & 3;
  1301. skreq->sg_byte_count = resid;
  1302. skreq->n_sg = 0;
  1303. while (resid > 0) {
  1304. u32 nbytes = PAGE_SIZE;
  1305. u32 ix = skreq->n_sg;
  1306. struct scatterlist *sg = &skreq->sg[ix];
  1307. struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
  1308. struct page *page;
  1309. if (nbytes > resid)
  1310. nbytes = resid;
  1311. page = alloc_page(GFP_KERNEL);
  1312. if (page == NULL)
  1313. return -ENOMEM;
  1314. sg_set_page(sg, page, nbytes, 0);
  1315. /* TODO: This should be going through a pci_???()
  1316. * routine to do proper mapping. */
  1317. sksg->control = FIT_SGD_CONTROL_NOT_LAST;
  1318. sksg->byte_count = nbytes;
  1319. sksg->host_side_addr = sg_phys(sg);
  1320. sksg->dev_side_addr = 0;
  1321. sksg->next_desc_ptr = skreq->sksg_dma_address +
  1322. (ix + 1) * sizeof(*sksg);
  1323. skreq->n_sg++;
  1324. resid -= nbytes;
  1325. }
  1326. if (skreq->n_sg > 0) {
  1327. u32 ix = skreq->n_sg - 1;
  1328. struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
  1329. sksg->control = FIT_SGD_CONTROL_LAST;
  1330. sksg->next_desc_ptr = 0;
  1331. }
  1332. if (unlikely(skdev->dbg_level > 1)) {
  1333. u32 i;
  1334. pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
  1335. skdev->name, __func__, __LINE__,
  1336. skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
  1337. for (i = 0; i < skreq->n_sg; i++) {
  1338. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  1339. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  1340. "addr=0x%llx next=0x%llx\n",
  1341. skdev->name, __func__, __LINE__,
  1342. i, sgd->byte_count, sgd->control,
  1343. sgd->host_side_addr, sgd->next_desc_ptr);
  1344. }
  1345. }
  1346. return 0;
  1347. }
  1348. static int skd_sg_io_prep_buffering(struct skd_device *skdev,
  1349. struct skd_sg_io *sksgio)
  1350. {
  1351. struct skd_special_context *skspcl = sksgio->skspcl;
  1352. struct skd_request_context *skreq = &skspcl->req;
  1353. u32 dxfer_len = sksgio->dxfer_len;
  1354. int rc;
  1355. rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
  1356. /*
  1357. * Eventually, errors or not, skd_release_special() is called
  1358. * to recover allocations including partial allocations.
  1359. */
  1360. return rc;
  1361. }
  1362. static int skd_sg_io_copy_buffer(struct skd_device *skdev,
  1363. struct skd_sg_io *sksgio, int dxfer_dir)
  1364. {
  1365. struct skd_special_context *skspcl = sksgio->skspcl;
  1366. u32 iov_ix = 0;
  1367. struct sg_iovec curiov;
  1368. u32 sksg_ix = 0;
  1369. u8 *bufp = NULL;
  1370. u32 buf_len = 0;
  1371. u32 resid = sksgio->dxfer_len;
  1372. int rc;
  1373. curiov.iov_len = 0;
  1374. curiov.iov_base = NULL;
  1375. if (dxfer_dir != sksgio->sg.dxfer_direction) {
  1376. if (dxfer_dir != SG_DXFER_TO_DEV ||
  1377. sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
  1378. return 0;
  1379. }
  1380. while (resid > 0) {
  1381. u32 nbytes = PAGE_SIZE;
  1382. if (curiov.iov_len == 0) {
  1383. curiov = sksgio->iov[iov_ix++];
  1384. continue;
  1385. }
  1386. if (buf_len == 0) {
  1387. struct page *page;
  1388. page = sg_page(&skspcl->req.sg[sksg_ix++]);
  1389. bufp = page_address(page);
  1390. buf_len = PAGE_SIZE;
  1391. }
  1392. nbytes = min_t(u32, nbytes, resid);
  1393. nbytes = min_t(u32, nbytes, curiov.iov_len);
  1394. nbytes = min_t(u32, nbytes, buf_len);
  1395. if (dxfer_dir == SG_DXFER_TO_DEV)
  1396. rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
  1397. else
  1398. rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
  1399. if (rc)
  1400. return -EFAULT;
  1401. resid -= nbytes;
  1402. curiov.iov_len -= nbytes;
  1403. curiov.iov_base += nbytes;
  1404. buf_len -= nbytes;
  1405. }
  1406. return 0;
  1407. }
  1408. static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
  1409. struct skd_sg_io *sksgio)
  1410. {
  1411. struct skd_special_context *skspcl = sksgio->skspcl;
  1412. struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
  1413. struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
  1414. memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
  1415. /* Initialize the FIT msg header */
  1416. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  1417. fmh->num_protocol_cmds_coalesced = 1;
  1418. /* Initialize the SCSI request */
  1419. if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
  1420. scsi_req->hdr.sg_list_dma_address =
  1421. cpu_to_be64(skspcl->req.sksg_dma_address);
  1422. scsi_req->hdr.tag = skspcl->req.id;
  1423. scsi_req->hdr.sg_list_len_bytes =
  1424. cpu_to_be32(skspcl->req.sg_byte_count);
  1425. memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
  1426. skspcl->req.state = SKD_REQ_STATE_BUSY;
  1427. skd_send_special_fitmsg(skdev, skspcl);
  1428. return 0;
  1429. }
  1430. static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
  1431. {
  1432. unsigned long flags;
  1433. int rc;
  1434. rc = wait_event_interruptible_timeout(skdev->waitq,
  1435. (sksgio->skspcl->req.state !=
  1436. SKD_REQ_STATE_BUSY),
  1437. msecs_to_jiffies(sksgio->sg.
  1438. timeout));
  1439. spin_lock_irqsave(&skdev->lock, flags);
  1440. if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
  1441. pr_debug("%s:%s:%d skspcl %p aborted\n",
  1442. skdev->name, __func__, __LINE__, sksgio->skspcl);
  1443. /* Build check cond, sense and let command finish. */
  1444. /* For a timeout, we must fabricate completion and sense
  1445. * data to complete the command */
  1446. sksgio->skspcl->req.completion.status =
  1447. SAM_STAT_CHECK_CONDITION;
  1448. memset(&sksgio->skspcl->req.err_info, 0,
  1449. sizeof(sksgio->skspcl->req.err_info));
  1450. sksgio->skspcl->req.err_info.type = 0x70;
  1451. sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
  1452. sksgio->skspcl->req.err_info.code = 0x44;
  1453. sksgio->skspcl->req.err_info.qual = 0;
  1454. rc = 0;
  1455. } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
  1456. /* No longer on the adapter. We finish. */
  1457. rc = 0;
  1458. else {
  1459. /* Something's gone wrong. Still busy. Timeout or
  1460. * user interrupted (control-C). Mark as an orphan
  1461. * so it will be disposed when completed. */
  1462. sksgio->skspcl->orphaned = 1;
  1463. sksgio->skspcl = NULL;
  1464. if (rc == 0) {
  1465. pr_debug("%s:%s:%d timed out %p (%u ms)\n",
  1466. skdev->name, __func__, __LINE__,
  1467. sksgio, sksgio->sg.timeout);
  1468. rc = -ETIMEDOUT;
  1469. } else {
  1470. pr_debug("%s:%s:%d cntlc %p\n",
  1471. skdev->name, __func__, __LINE__, sksgio);
  1472. rc = -EINTR;
  1473. }
  1474. }
  1475. spin_unlock_irqrestore(&skdev->lock, flags);
  1476. return rc;
  1477. }
  1478. static int skd_sg_io_put_status(struct skd_device *skdev,
  1479. struct skd_sg_io *sksgio)
  1480. {
  1481. struct sg_io_hdr *sgp = &sksgio->sg;
  1482. struct skd_special_context *skspcl = sksgio->skspcl;
  1483. int resid = 0;
  1484. u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
  1485. sgp->status = skspcl->req.completion.status;
  1486. resid = sksgio->dxfer_len - nb;
  1487. sgp->masked_status = sgp->status & STATUS_MASK;
  1488. sgp->msg_status = 0;
  1489. sgp->host_status = 0;
  1490. sgp->driver_status = 0;
  1491. sgp->resid = resid;
  1492. if (sgp->masked_status || sgp->host_status || sgp->driver_status)
  1493. sgp->info |= SG_INFO_CHECK;
  1494. pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
  1495. skdev->name, __func__, __LINE__,
  1496. sgp->status, sgp->masked_status, sgp->resid);
  1497. if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
  1498. if (sgp->mx_sb_len > 0) {
  1499. struct fit_comp_error_info *ei = &skspcl->req.err_info;
  1500. u32 nbytes = sizeof(*ei);
  1501. nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
  1502. sgp->sb_len_wr = nbytes;
  1503. if (__copy_to_user(sgp->sbp, ei, nbytes)) {
  1504. pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
  1505. skdev->name, __func__, __LINE__,
  1506. sgp->sbp);
  1507. return -EFAULT;
  1508. }
  1509. }
  1510. }
  1511. if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
  1512. pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
  1513. skdev->name, __func__, __LINE__, sksgio->argp);
  1514. return -EFAULT;
  1515. }
  1516. return 0;
  1517. }
  1518. static int skd_sg_io_release_skspcl(struct skd_device *skdev,
  1519. struct skd_sg_io *sksgio)
  1520. {
  1521. struct skd_special_context *skspcl = sksgio->skspcl;
  1522. if (skspcl != NULL) {
  1523. ulong flags;
  1524. sksgio->skspcl = NULL;
  1525. spin_lock_irqsave(&skdev->lock, flags);
  1526. skd_release_special(skdev, skspcl);
  1527. spin_unlock_irqrestore(&skdev->lock, flags);
  1528. }
  1529. return 0;
  1530. }
  1531. /*
  1532. *****************************************************************************
  1533. * INTERNAL REQUESTS -- generated by driver itself
  1534. *****************************************************************************
  1535. */
  1536. static int skd_format_internal_skspcl(struct skd_device *skdev)
  1537. {
  1538. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1539. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  1540. struct fit_msg_hdr *fmh;
  1541. uint64_t dma_address;
  1542. struct skd_scsi_request *scsi;
  1543. fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
  1544. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  1545. fmh->num_protocol_cmds_coalesced = 1;
  1546. scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1547. memset(scsi, 0, sizeof(*scsi));
  1548. dma_address = skspcl->req.sksg_dma_address;
  1549. scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
  1550. sgd->control = FIT_SGD_CONTROL_LAST;
  1551. sgd->byte_count = 0;
  1552. sgd->host_side_addr = skspcl->db_dma_address;
  1553. sgd->dev_side_addr = 0;
  1554. sgd->next_desc_ptr = 0LL;
  1555. return 1;
  1556. }
  1557. #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
  1558. static void skd_send_internal_skspcl(struct skd_device *skdev,
  1559. struct skd_special_context *skspcl,
  1560. u8 opcode)
  1561. {
  1562. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  1563. struct skd_scsi_request *scsi;
  1564. unsigned char *buf = skspcl->data_buf;
  1565. int i;
  1566. if (skspcl->req.state != SKD_REQ_STATE_IDLE)
  1567. /*
  1568. * A refresh is already in progress.
  1569. * Just wait for it to finish.
  1570. */
  1571. return;
  1572. SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
  1573. skspcl->req.state = SKD_REQ_STATE_BUSY;
  1574. skspcl->req.id += SKD_ID_INCR;
  1575. scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1576. scsi->hdr.tag = skspcl->req.id;
  1577. memset(scsi->cdb, 0, sizeof(scsi->cdb));
  1578. switch (opcode) {
  1579. case TEST_UNIT_READY:
  1580. scsi->cdb[0] = TEST_UNIT_READY;
  1581. sgd->byte_count = 0;
  1582. scsi->hdr.sg_list_len_bytes = 0;
  1583. break;
  1584. case READ_CAPACITY:
  1585. scsi->cdb[0] = READ_CAPACITY;
  1586. sgd->byte_count = SKD_N_READ_CAP_BYTES;
  1587. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1588. break;
  1589. case INQUIRY:
  1590. scsi->cdb[0] = INQUIRY;
  1591. scsi->cdb[1] = 0x01; /* evpd */
  1592. scsi->cdb[2] = 0x80; /* serial number page */
  1593. scsi->cdb[4] = 0x10;
  1594. sgd->byte_count = 16;
  1595. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1596. break;
  1597. case SYNCHRONIZE_CACHE:
  1598. scsi->cdb[0] = SYNCHRONIZE_CACHE;
  1599. sgd->byte_count = 0;
  1600. scsi->hdr.sg_list_len_bytes = 0;
  1601. break;
  1602. case WRITE_BUFFER:
  1603. scsi->cdb[0] = WRITE_BUFFER;
  1604. scsi->cdb[1] = 0x02;
  1605. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  1606. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  1607. sgd->byte_count = WR_BUF_SIZE;
  1608. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1609. /* fill incrementing byte pattern */
  1610. for (i = 0; i < sgd->byte_count; i++)
  1611. buf[i] = i & 0xFF;
  1612. break;
  1613. case READ_BUFFER:
  1614. scsi->cdb[0] = READ_BUFFER;
  1615. scsi->cdb[1] = 0x02;
  1616. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  1617. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  1618. sgd->byte_count = WR_BUF_SIZE;
  1619. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1620. memset(skspcl->data_buf, 0, sgd->byte_count);
  1621. break;
  1622. default:
  1623. SKD_ASSERT("Don't know what to send");
  1624. return;
  1625. }
  1626. skd_send_special_fitmsg(skdev, skspcl);
  1627. }
  1628. static void skd_refresh_device_data(struct skd_device *skdev)
  1629. {
  1630. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1631. skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
  1632. }
  1633. static int skd_chk_read_buf(struct skd_device *skdev,
  1634. struct skd_special_context *skspcl)
  1635. {
  1636. unsigned char *buf = skspcl->data_buf;
  1637. int i;
  1638. /* check for incrementing byte pattern */
  1639. for (i = 0; i < WR_BUF_SIZE; i++)
  1640. if (buf[i] != (i & 0xFF))
  1641. return 1;
  1642. return 0;
  1643. }
  1644. static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
  1645. u8 code, u8 qual, u8 fruc)
  1646. {
  1647. /* If the check condition is of special interest, log a message */
  1648. if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
  1649. && (code == 0x04) && (qual == 0x06)) {
  1650. pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
  1651. "ascq/fruc %02x/%02x/%02x/%02x\n",
  1652. skd_name(skdev), key, code, qual, fruc);
  1653. }
  1654. }
  1655. static void skd_complete_internal(struct skd_device *skdev,
  1656. volatile struct fit_completion_entry_v1
  1657. *skcomp,
  1658. volatile struct fit_comp_error_info *skerr,
  1659. struct skd_special_context *skspcl)
  1660. {
  1661. u8 *buf = skspcl->data_buf;
  1662. u8 status;
  1663. int i;
  1664. struct skd_scsi_request *scsi =
  1665. (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1666. SKD_ASSERT(skspcl == &skdev->internal_skspcl);
  1667. pr_debug("%s:%s:%d complete internal %x\n",
  1668. skdev->name, __func__, __LINE__, scsi->cdb[0]);
  1669. skspcl->req.completion = *skcomp;
  1670. skspcl->req.state = SKD_REQ_STATE_IDLE;
  1671. skspcl->req.id += SKD_ID_INCR;
  1672. status = skspcl->req.completion.status;
  1673. skd_log_check_status(skdev, status, skerr->key, skerr->code,
  1674. skerr->qual, skerr->fruc);
  1675. switch (scsi->cdb[0]) {
  1676. case TEST_UNIT_READY:
  1677. if (status == SAM_STAT_GOOD)
  1678. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  1679. else if ((status == SAM_STAT_CHECK_CONDITION) &&
  1680. (skerr->key == MEDIUM_ERROR))
  1681. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  1682. else {
  1683. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1684. pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
  1685. skdev->name, __func__, __LINE__,
  1686. skdev->state);
  1687. return;
  1688. }
  1689. pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
  1690. skdev->name, __func__, __LINE__);
  1691. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1692. }
  1693. break;
  1694. case WRITE_BUFFER:
  1695. if (status == SAM_STAT_GOOD)
  1696. skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
  1697. else {
  1698. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1699. pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
  1700. skdev->name, __func__, __LINE__,
  1701. skdev->state);
  1702. return;
  1703. }
  1704. pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
  1705. skdev->name, __func__, __LINE__);
  1706. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1707. }
  1708. break;
  1709. case READ_BUFFER:
  1710. if (status == SAM_STAT_GOOD) {
  1711. if (skd_chk_read_buf(skdev, skspcl) == 0)
  1712. skd_send_internal_skspcl(skdev, skspcl,
  1713. READ_CAPACITY);
  1714. else {
  1715. pr_err(
  1716. "(%s):*** W/R Buffer mismatch %d ***\n",
  1717. skd_name(skdev), skdev->connect_retries);
  1718. if (skdev->connect_retries <
  1719. SKD_MAX_CONNECT_RETRIES) {
  1720. skdev->connect_retries++;
  1721. skd_soft_reset(skdev);
  1722. } else {
  1723. pr_err(
  1724. "(%s): W/R Buffer Connect Error\n",
  1725. skd_name(skdev));
  1726. return;
  1727. }
  1728. }
  1729. } else {
  1730. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1731. pr_debug("%s:%s:%d "
  1732. "read buffer failed, don't send anymore state 0x%x\n",
  1733. skdev->name, __func__, __LINE__,
  1734. skdev->state);
  1735. return;
  1736. }
  1737. pr_debug("%s:%s:%d "
  1738. "**** read buffer failed, retry skerr\n",
  1739. skdev->name, __func__, __LINE__);
  1740. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1741. }
  1742. break;
  1743. case READ_CAPACITY:
  1744. skdev->read_cap_is_valid = 0;
  1745. if (status == SAM_STAT_GOOD) {
  1746. skdev->read_cap_last_lba =
  1747. (buf[0] << 24) | (buf[1] << 16) |
  1748. (buf[2] << 8) | buf[3];
  1749. skdev->read_cap_blocksize =
  1750. (buf[4] << 24) | (buf[5] << 16) |
  1751. (buf[6] << 8) | buf[7];
  1752. pr_debug("%s:%s:%d last lba %d, bs %d\n",
  1753. skdev->name, __func__, __LINE__,
  1754. skdev->read_cap_last_lba,
  1755. skdev->read_cap_blocksize);
  1756. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  1757. skdev->read_cap_is_valid = 1;
  1758. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  1759. } else if ((status == SAM_STAT_CHECK_CONDITION) &&
  1760. (skerr->key == MEDIUM_ERROR)) {
  1761. skdev->read_cap_last_lba = ~0;
  1762. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  1763. pr_debug("%s:%s:%d "
  1764. "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
  1765. skdev->name, __func__, __LINE__);
  1766. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  1767. } else {
  1768. pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
  1769. skdev->name, __func__, __LINE__);
  1770. skd_send_internal_skspcl(skdev, skspcl,
  1771. TEST_UNIT_READY);
  1772. }
  1773. break;
  1774. case INQUIRY:
  1775. skdev->inquiry_is_valid = 0;
  1776. if (status == SAM_STAT_GOOD) {
  1777. skdev->inquiry_is_valid = 1;
  1778. for (i = 0; i < 12; i++)
  1779. skdev->inq_serial_num[i] = buf[i + 4];
  1780. skdev->inq_serial_num[12] = 0;
  1781. }
  1782. if (skd_unquiesce_dev(skdev) < 0)
  1783. pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
  1784. skdev->name, __func__, __LINE__);
  1785. /* connection is complete */
  1786. skdev->connect_retries = 0;
  1787. break;
  1788. case SYNCHRONIZE_CACHE:
  1789. if (status == SAM_STAT_GOOD)
  1790. skdev->sync_done = 1;
  1791. else
  1792. skdev->sync_done = -1;
  1793. wake_up_interruptible(&skdev->waitq);
  1794. break;
  1795. default:
  1796. SKD_ASSERT("we didn't send this");
  1797. }
  1798. }
  1799. /*
  1800. *****************************************************************************
  1801. * FIT MESSAGES
  1802. *****************************************************************************
  1803. */
  1804. static void skd_send_fitmsg(struct skd_device *skdev,
  1805. struct skd_fitmsg_context *skmsg)
  1806. {
  1807. u64 qcmd;
  1808. struct fit_msg_hdr *fmh;
  1809. pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
  1810. skdev->name, __func__, __LINE__,
  1811. skmsg->mb_dma_address, skdev->in_flight);
  1812. pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
  1813. skdev->name, __func__, __LINE__,
  1814. skmsg->msg_buf, skmsg->offset);
  1815. qcmd = skmsg->mb_dma_address;
  1816. qcmd |= FIT_QCMD_QID_NORMAL;
  1817. fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
  1818. skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
  1819. if (unlikely(skdev->dbg_level > 1)) {
  1820. u8 *bp = (u8 *)skmsg->msg_buf;
  1821. int i;
  1822. for (i = 0; i < skmsg->length; i += 8) {
  1823. pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
  1824. "%02x %02x %02x %02x\n",
  1825. skdev->name, __func__, __LINE__,
  1826. i, bp[i + 0], bp[i + 1], bp[i + 2],
  1827. bp[i + 3], bp[i + 4], bp[i + 5],
  1828. bp[i + 6], bp[i + 7]);
  1829. if (i == 0)
  1830. i = 64 - 8;
  1831. }
  1832. }
  1833. if (skmsg->length > 256)
  1834. qcmd |= FIT_QCMD_MSGSIZE_512;
  1835. else if (skmsg->length > 128)
  1836. qcmd |= FIT_QCMD_MSGSIZE_256;
  1837. else if (skmsg->length > 64)
  1838. qcmd |= FIT_QCMD_MSGSIZE_128;
  1839. else
  1840. /*
  1841. * This makes no sense because the FIT msg header is
  1842. * 64 bytes. If the msg is only 64 bytes long it has
  1843. * no payload.
  1844. */
  1845. qcmd |= FIT_QCMD_MSGSIZE_64;
  1846. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1847. }
  1848. static void skd_send_special_fitmsg(struct skd_device *skdev,
  1849. struct skd_special_context *skspcl)
  1850. {
  1851. u64 qcmd;
  1852. if (unlikely(skdev->dbg_level > 1)) {
  1853. u8 *bp = (u8 *)skspcl->msg_buf;
  1854. int i;
  1855. for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
  1856. pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
  1857. "%02x %02x %02x %02x\n",
  1858. skdev->name, __func__, __LINE__, i,
  1859. bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
  1860. bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
  1861. if (i == 0)
  1862. i = 64 - 8;
  1863. }
  1864. pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
  1865. skdev->name, __func__, __LINE__,
  1866. skspcl, skspcl->req.id, skspcl->req.sksg_list,
  1867. skspcl->req.sksg_dma_address);
  1868. for (i = 0; i < skspcl->req.n_sg; i++) {
  1869. struct fit_sg_descriptor *sgd =
  1870. &skspcl->req.sksg_list[i];
  1871. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  1872. "addr=0x%llx next=0x%llx\n",
  1873. skdev->name, __func__, __LINE__,
  1874. i, sgd->byte_count, sgd->control,
  1875. sgd->host_side_addr, sgd->next_desc_ptr);
  1876. }
  1877. }
  1878. /*
  1879. * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
  1880. * and one 64-byte SSDI command.
  1881. */
  1882. qcmd = skspcl->mb_dma_address;
  1883. qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
  1884. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1885. }
  1886. /*
  1887. *****************************************************************************
  1888. * COMPLETION QUEUE
  1889. *****************************************************************************
  1890. */
  1891. static void skd_complete_other(struct skd_device *skdev,
  1892. volatile struct fit_completion_entry_v1 *skcomp,
  1893. volatile struct fit_comp_error_info *skerr);
  1894. struct sns_info {
  1895. u8 type;
  1896. u8 stat;
  1897. u8 key;
  1898. u8 asc;
  1899. u8 ascq;
  1900. u8 mask;
  1901. enum skd_check_status_action action;
  1902. };
  1903. static struct sns_info skd_chkstat_table[] = {
  1904. /* Good */
  1905. { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
  1906. SKD_CHECK_STATUS_REPORT_GOOD },
  1907. /* Smart alerts */
  1908. { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
  1909. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1910. { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
  1911. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1912. { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
  1913. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1914. /* Retry (with limits) */
  1915. { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
  1916. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1917. { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
  1918. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1919. { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
  1920. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1921. { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
  1922. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1923. /* Busy (or about to be) */
  1924. { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
  1925. SKD_CHECK_STATUS_BUSY_IMMINENT },
  1926. };
  1927. /*
  1928. * Look up status and sense data to decide how to handle the error
  1929. * from the device.
  1930. * mask says which fields must match e.g., mask=0x18 means check
  1931. * type and stat, ignore key, asc, ascq.
  1932. */
  1933. static enum skd_check_status_action
  1934. skd_check_status(struct skd_device *skdev,
  1935. u8 cmp_status, volatile struct fit_comp_error_info *skerr)
  1936. {
  1937. int i, n;
  1938. pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
  1939. skd_name(skdev), skerr->key, skerr->code, skerr->qual,
  1940. skerr->fruc);
  1941. pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
  1942. skdev->name, __func__, __LINE__, skerr->type, cmp_status,
  1943. skerr->key, skerr->code, skerr->qual, skerr->fruc);
  1944. /* Does the info match an entry in the good category? */
  1945. n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
  1946. for (i = 0; i < n; i++) {
  1947. struct sns_info *sns = &skd_chkstat_table[i];
  1948. if (sns->mask & 0x10)
  1949. if (skerr->type != sns->type)
  1950. continue;
  1951. if (sns->mask & 0x08)
  1952. if (cmp_status != sns->stat)
  1953. continue;
  1954. if (sns->mask & 0x04)
  1955. if (skerr->key != sns->key)
  1956. continue;
  1957. if (sns->mask & 0x02)
  1958. if (skerr->code != sns->asc)
  1959. continue;
  1960. if (sns->mask & 0x01)
  1961. if (skerr->qual != sns->ascq)
  1962. continue;
  1963. if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
  1964. pr_err("(%s): SMART Alert: sense key/asc/ascq "
  1965. "%02x/%02x/%02x\n",
  1966. skd_name(skdev), skerr->key,
  1967. skerr->code, skerr->qual);
  1968. }
  1969. return sns->action;
  1970. }
  1971. /* No other match, so nonzero status means error,
  1972. * zero status means good
  1973. */
  1974. if (cmp_status) {
  1975. pr_debug("%s:%s:%d status check: error\n",
  1976. skdev->name, __func__, __LINE__);
  1977. return SKD_CHECK_STATUS_REPORT_ERROR;
  1978. }
  1979. pr_debug("%s:%s:%d status check good default\n",
  1980. skdev->name, __func__, __LINE__);
  1981. return SKD_CHECK_STATUS_REPORT_GOOD;
  1982. }
  1983. static void skd_resolve_req_exception(struct skd_device *skdev,
  1984. struct skd_request_context *skreq)
  1985. {
  1986. u8 cmp_status = skreq->completion.status;
  1987. switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
  1988. case SKD_CHECK_STATUS_REPORT_GOOD:
  1989. case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
  1990. skd_end_request(skdev, skreq, 0);
  1991. break;
  1992. case SKD_CHECK_STATUS_BUSY_IMMINENT:
  1993. skd_log_skreq(skdev, skreq, "retry(busy)");
  1994. blk_requeue_request(skdev->queue, skreq->req);
  1995. pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
  1996. skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
  1997. skdev->timer_countdown = SKD_TIMER_MINUTES(20);
  1998. skd_quiesce_dev(skdev);
  1999. break;
  2000. case SKD_CHECK_STATUS_REQUEUE_REQUEST:
  2001. if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
  2002. skd_log_skreq(skdev, skreq, "retry");
  2003. blk_requeue_request(skdev->queue, skreq->req);
  2004. break;
  2005. }
  2006. /* fall through to report error */
  2007. case SKD_CHECK_STATUS_REPORT_ERROR:
  2008. default:
  2009. skd_end_request(skdev, skreq, -EIO);
  2010. break;
  2011. }
  2012. }
  2013. /* assume spinlock is already held */
  2014. static void skd_release_skreq(struct skd_device *skdev,
  2015. struct skd_request_context *skreq)
  2016. {
  2017. u32 msg_slot;
  2018. struct skd_fitmsg_context *skmsg;
  2019. u32 timo_slot;
  2020. /*
  2021. * Reclaim the FIT msg buffer if this is
  2022. * the first of the requests it carried to
  2023. * be completed. The FIT msg buffer used to
  2024. * send this request cannot be reused until
  2025. * we are sure the s1120 card has copied
  2026. * it to its memory. The FIT msg might have
  2027. * contained several requests. As soon as
  2028. * any of them are completed we know that
  2029. * the entire FIT msg was transferred.
  2030. * Only the first completed request will
  2031. * match the FIT msg buffer id. The FIT
  2032. * msg buffer id is immediately updated.
  2033. * When subsequent requests complete the FIT
  2034. * msg buffer id won't match, so we know
  2035. * quite cheaply that it is already done.
  2036. */
  2037. msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
  2038. SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
  2039. skmsg = &skdev->skmsg_table[msg_slot];
  2040. if (skmsg->id == skreq->fitmsg_id) {
  2041. SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
  2042. SKD_ASSERT(skmsg->outstanding > 0);
  2043. skmsg->outstanding--;
  2044. if (skmsg->outstanding == 0) {
  2045. skmsg->state = SKD_MSG_STATE_IDLE;
  2046. skmsg->id += SKD_ID_INCR;
  2047. skmsg->next = skdev->skmsg_free_list;
  2048. skdev->skmsg_free_list = skmsg;
  2049. }
  2050. }
  2051. /*
  2052. * Decrease the number of active requests.
  2053. * Also decrements the count in the timeout slot.
  2054. */
  2055. SKD_ASSERT(skdev->in_flight > 0);
  2056. skdev->in_flight -= 1;
  2057. timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  2058. SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
  2059. skdev->timeout_slot[timo_slot] -= 1;
  2060. /*
  2061. * Reset backpointer
  2062. */
  2063. skreq->req = NULL;
  2064. /*
  2065. * Reclaim the skd_request_context
  2066. */
  2067. skreq->state = SKD_REQ_STATE_IDLE;
  2068. skreq->id += SKD_ID_INCR;
  2069. skreq->next = skdev->skreq_free_list;
  2070. skdev->skreq_free_list = skreq;
  2071. }
  2072. #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
  2073. static void skd_do_inq_page_00(struct skd_device *skdev,
  2074. volatile struct fit_completion_entry_v1 *skcomp,
  2075. volatile struct fit_comp_error_info *skerr,
  2076. uint8_t *cdb, uint8_t *buf)
  2077. {
  2078. uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
  2079. /* Caller requested "supported pages". The driver needs to insert
  2080. * its page.
  2081. */
  2082. pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
  2083. skdev->name, __func__, __LINE__);
  2084. /* If the device rejected the request because the CDB was
  2085. * improperly formed, then just leave.
  2086. */
  2087. if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
  2088. skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
  2089. return;
  2090. /* Get the amount of space the caller allocated */
  2091. max_bytes = (cdb[3] << 8) | cdb[4];
  2092. /* Get the number of pages actually returned by the device */
  2093. drive_pages = (buf[2] << 8) | buf[3];
  2094. drive_bytes = drive_pages + 4;
  2095. new_size = drive_pages + 1;
  2096. /* Supported pages must be in numerical order, so find where
  2097. * the driver page needs to be inserted into the list of
  2098. * pages returned by the device.
  2099. */
  2100. for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
  2101. if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
  2102. return; /* Device using this page code. abort */
  2103. else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
  2104. break;
  2105. }
  2106. if (insert_pt < max_bytes) {
  2107. uint16_t u;
  2108. /* Shift everything up one byte to make room. */
  2109. for (u = new_size + 3; u > insert_pt; u--)
  2110. buf[u] = buf[u - 1];
  2111. buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
  2112. /* SCSI byte order increment of num_returned_bytes by 1 */
  2113. skcomp->num_returned_bytes =
  2114. be32_to_cpu(skcomp->num_returned_bytes) + 1;
  2115. skcomp->num_returned_bytes =
  2116. be32_to_cpu(skcomp->num_returned_bytes);
  2117. }
  2118. /* update page length field to reflect the driver's page too */
  2119. buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
  2120. buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
  2121. }
  2122. static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
  2123. {
  2124. int pcie_reg;
  2125. u16 pci_bus_speed;
  2126. u8 pci_lanes;
  2127. pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  2128. if (pcie_reg) {
  2129. u16 linksta;
  2130. pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
  2131. pci_bus_speed = linksta & 0xF;
  2132. pci_lanes = (linksta & 0x3F0) >> 4;
  2133. } else {
  2134. *speed = STEC_LINK_UNKNOWN;
  2135. *width = 0xFF;
  2136. return;
  2137. }
  2138. switch (pci_bus_speed) {
  2139. case 1:
  2140. *speed = STEC_LINK_2_5GTS;
  2141. break;
  2142. case 2:
  2143. *speed = STEC_LINK_5GTS;
  2144. break;
  2145. case 3:
  2146. *speed = STEC_LINK_8GTS;
  2147. break;
  2148. default:
  2149. *speed = STEC_LINK_UNKNOWN;
  2150. break;
  2151. }
  2152. if (pci_lanes <= 0x20)
  2153. *width = pci_lanes;
  2154. else
  2155. *width = 0xFF;
  2156. }
  2157. static void skd_do_inq_page_da(struct skd_device *skdev,
  2158. volatile struct fit_completion_entry_v1 *skcomp,
  2159. volatile struct fit_comp_error_info *skerr,
  2160. uint8_t *cdb, uint8_t *buf)
  2161. {
  2162. struct pci_dev *pdev = skdev->pdev;
  2163. unsigned max_bytes;
  2164. struct driver_inquiry_data inq;
  2165. u16 val;
  2166. pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
  2167. skdev->name, __func__, __LINE__);
  2168. memset(&inq, 0, sizeof(inq));
  2169. inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
  2170. skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
  2171. inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
  2172. inq.pcie_device_number = PCI_SLOT(pdev->devfn);
  2173. inq.pcie_function_number = PCI_FUNC(pdev->devfn);
  2174. pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
  2175. inq.pcie_vendor_id = cpu_to_be16(val);
  2176. pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
  2177. inq.pcie_device_id = cpu_to_be16(val);
  2178. pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
  2179. inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
  2180. pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
  2181. inq.pcie_subsystem_device_id = cpu_to_be16(val);
  2182. /* Driver version, fixed lenth, padded with spaces on the right */
  2183. inq.driver_version_length = sizeof(inq.driver_version);
  2184. memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
  2185. memcpy(inq.driver_version, DRV_VER_COMPL,
  2186. min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
  2187. inq.page_length = cpu_to_be16((sizeof(inq) - 4));
  2188. /* Clear the error set by the device */
  2189. skcomp->status = SAM_STAT_GOOD;
  2190. memset((void *)skerr, 0, sizeof(*skerr));
  2191. /* copy response into output buffer */
  2192. max_bytes = (cdb[3] << 8) | cdb[4];
  2193. memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
  2194. skcomp->num_returned_bytes =
  2195. be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
  2196. }
  2197. static void skd_do_driver_inq(struct skd_device *skdev,
  2198. volatile struct fit_completion_entry_v1 *skcomp,
  2199. volatile struct fit_comp_error_info *skerr,
  2200. uint8_t *cdb, uint8_t *buf)
  2201. {
  2202. if (!buf)
  2203. return;
  2204. else if (cdb[0] != INQUIRY)
  2205. return; /* Not an INQUIRY */
  2206. else if ((cdb[1] & 1) == 0)
  2207. return; /* EVPD not set */
  2208. else if (cdb[2] == 0)
  2209. /* Need to add driver's page to supported pages list */
  2210. skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
  2211. else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
  2212. /* Caller requested driver's page */
  2213. skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
  2214. }
  2215. static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
  2216. {
  2217. if (!sg)
  2218. return NULL;
  2219. if (!sg_page(sg))
  2220. return NULL;
  2221. return sg_virt(sg);
  2222. }
  2223. static void skd_process_scsi_inq(struct skd_device *skdev,
  2224. volatile struct fit_completion_entry_v1
  2225. *skcomp,
  2226. volatile struct fit_comp_error_info *skerr,
  2227. struct skd_special_context *skspcl)
  2228. {
  2229. uint8_t *buf;
  2230. struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
  2231. struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
  2232. dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
  2233. skspcl->req.sg_data_dir);
  2234. buf = skd_sg_1st_page_ptr(skspcl->req.sg);
  2235. if (buf)
  2236. skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
  2237. }
  2238. static int skd_isr_completion_posted(struct skd_device *skdev,
  2239. int limit, int *enqueued)
  2240. {
  2241. volatile struct fit_completion_entry_v1 *skcmp = NULL;
  2242. volatile struct fit_comp_error_info *skerr;
  2243. u16 req_id;
  2244. u32 req_slot;
  2245. struct skd_request_context *skreq;
  2246. u16 cmp_cntxt = 0;
  2247. u8 cmp_status = 0;
  2248. u8 cmp_cycle = 0;
  2249. u32 cmp_bytes = 0;
  2250. int rc = 0;
  2251. int processed = 0;
  2252. for (;; ) {
  2253. SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
  2254. skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
  2255. cmp_cycle = skcmp->cycle;
  2256. cmp_cntxt = skcmp->tag;
  2257. cmp_status = skcmp->status;
  2258. cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
  2259. skerr = &skdev->skerr_table[skdev->skcomp_ix];
  2260. pr_debug("%s:%s:%d "
  2261. "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
  2262. "busy=%d rbytes=0x%x proto=%d\n",
  2263. skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
  2264. skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
  2265. skdev->in_flight, cmp_bytes, skdev->proto_ver);
  2266. if (cmp_cycle != skdev->skcomp_cycle) {
  2267. pr_debug("%s:%s:%d end of completions\n",
  2268. skdev->name, __func__, __LINE__);
  2269. break;
  2270. }
  2271. /*
  2272. * Update the completion queue head index and possibly
  2273. * the completion cycle count. 8-bit wrap-around.
  2274. */
  2275. skdev->skcomp_ix++;
  2276. if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
  2277. skdev->skcomp_ix = 0;
  2278. skdev->skcomp_cycle++;
  2279. }
  2280. /*
  2281. * The command context is a unique 32-bit ID. The low order
  2282. * bits help locate the request. The request is usually a
  2283. * r/w request (see skd_start() above) or a special request.
  2284. */
  2285. req_id = cmp_cntxt;
  2286. req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
  2287. /* Is this other than a r/w request? */
  2288. if (req_slot >= skdev->num_req_context) {
  2289. /*
  2290. * This is not a completion for a r/w request.
  2291. */
  2292. skd_complete_other(skdev, skcmp, skerr);
  2293. continue;
  2294. }
  2295. skreq = &skdev->skreq_table[req_slot];
  2296. /*
  2297. * Make sure the request ID for the slot matches.
  2298. */
  2299. if (skreq->id != req_id) {
  2300. pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
  2301. skdev->name, __func__, __LINE__,
  2302. req_id, skreq->id);
  2303. {
  2304. u16 new_id = cmp_cntxt;
  2305. pr_err("(%s): Completion mismatch "
  2306. "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
  2307. skd_name(skdev), req_id,
  2308. skreq->id, new_id);
  2309. continue;
  2310. }
  2311. }
  2312. SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
  2313. if (skreq->state == SKD_REQ_STATE_ABORTED) {
  2314. pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
  2315. skdev->name, __func__, __LINE__,
  2316. skreq, skreq->id);
  2317. /* a previously timed out command can
  2318. * now be cleaned up */
  2319. skd_release_skreq(skdev, skreq);
  2320. continue;
  2321. }
  2322. skreq->completion = *skcmp;
  2323. if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
  2324. skreq->err_info = *skerr;
  2325. skd_log_check_status(skdev, cmp_status, skerr->key,
  2326. skerr->code, skerr->qual,
  2327. skerr->fruc);
  2328. }
  2329. /* Release DMA resources for the request. */
  2330. if (skreq->n_sg > 0)
  2331. skd_postop_sg_list(skdev, skreq);
  2332. if (!skreq->req) {
  2333. pr_debug("%s:%s:%d NULL backptr skdreq %p, "
  2334. "req=0x%x req_id=0x%x\n",
  2335. skdev->name, __func__, __LINE__,
  2336. skreq, skreq->id, req_id);
  2337. } else {
  2338. /*
  2339. * Capture the outcome and post it back to the
  2340. * native request.
  2341. */
  2342. if (likely(cmp_status == SAM_STAT_GOOD))
  2343. skd_end_request(skdev, skreq, 0);
  2344. else
  2345. skd_resolve_req_exception(skdev, skreq);
  2346. }
  2347. /*
  2348. * Release the skreq, its FIT msg (if one), timeout slot,
  2349. * and queue depth.
  2350. */
  2351. skd_release_skreq(skdev, skreq);
  2352. /* skd_isr_comp_limit equal zero means no limit */
  2353. if (limit) {
  2354. if (++processed >= limit) {
  2355. rc = 1;
  2356. break;
  2357. }
  2358. }
  2359. }
  2360. if ((skdev->state == SKD_DRVR_STATE_PAUSING)
  2361. && (skdev->in_flight) == 0) {
  2362. skdev->state = SKD_DRVR_STATE_PAUSED;
  2363. wake_up_interruptible(&skdev->waitq);
  2364. }
  2365. return rc;
  2366. }
  2367. static void skd_complete_other(struct skd_device *skdev,
  2368. volatile struct fit_completion_entry_v1 *skcomp,
  2369. volatile struct fit_comp_error_info *skerr)
  2370. {
  2371. u32 req_id = 0;
  2372. u32 req_table;
  2373. u32 req_slot;
  2374. struct skd_special_context *skspcl;
  2375. req_id = skcomp->tag;
  2376. req_table = req_id & SKD_ID_TABLE_MASK;
  2377. req_slot = req_id & SKD_ID_SLOT_MASK;
  2378. pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
  2379. skdev->name, __func__, __LINE__,
  2380. req_table, req_id, req_slot);
  2381. /*
  2382. * Based on the request id, determine how to dispatch this completion.
  2383. * This swich/case is finding the good cases and forwarding the
  2384. * completion entry. Errors are reported below the switch.
  2385. */
  2386. switch (req_table) {
  2387. case SKD_ID_RW_REQUEST:
  2388. /*
  2389. * The caller, skd_completion_posted_isr() above,
  2390. * handles r/w requests. The only way we get here
  2391. * is if the req_slot is out of bounds.
  2392. */
  2393. break;
  2394. case SKD_ID_SPECIAL_REQUEST:
  2395. /*
  2396. * Make sure the req_slot is in bounds and that the id
  2397. * matches.
  2398. */
  2399. if (req_slot < skdev->n_special) {
  2400. skspcl = &skdev->skspcl_table[req_slot];
  2401. if (skspcl->req.id == req_id &&
  2402. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2403. skd_complete_special(skdev,
  2404. skcomp, skerr, skspcl);
  2405. return;
  2406. }
  2407. }
  2408. break;
  2409. case SKD_ID_INTERNAL:
  2410. if (req_slot == 0) {
  2411. skspcl = &skdev->internal_skspcl;
  2412. if (skspcl->req.id == req_id &&
  2413. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2414. skd_complete_internal(skdev,
  2415. skcomp, skerr, skspcl);
  2416. return;
  2417. }
  2418. }
  2419. break;
  2420. case SKD_ID_FIT_MSG:
  2421. /*
  2422. * These id's should never appear in a completion record.
  2423. */
  2424. break;
  2425. default:
  2426. /*
  2427. * These id's should never appear anywhere;
  2428. */
  2429. break;
  2430. }
  2431. /*
  2432. * If we get here it is a bad or stale id.
  2433. */
  2434. }
  2435. static void skd_complete_special(struct skd_device *skdev,
  2436. volatile struct fit_completion_entry_v1
  2437. *skcomp,
  2438. volatile struct fit_comp_error_info *skerr,
  2439. struct skd_special_context *skspcl)
  2440. {
  2441. pr_debug("%s:%s:%d completing special request %p\n",
  2442. skdev->name, __func__, __LINE__, skspcl);
  2443. if (skspcl->orphaned) {
  2444. /* Discard orphaned request */
  2445. /* ?: Can this release directly or does it need
  2446. * to use a worker? */
  2447. pr_debug("%s:%s:%d release orphaned %p\n",
  2448. skdev->name, __func__, __LINE__, skspcl);
  2449. skd_release_special(skdev, skspcl);
  2450. return;
  2451. }
  2452. skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
  2453. skspcl->req.state = SKD_REQ_STATE_COMPLETED;
  2454. skspcl->req.completion = *skcomp;
  2455. skspcl->req.err_info = *skerr;
  2456. skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
  2457. skerr->code, skerr->qual, skerr->fruc);
  2458. wake_up_interruptible(&skdev->waitq);
  2459. }
  2460. /* assume spinlock is already held */
  2461. static void skd_release_special(struct skd_device *skdev,
  2462. struct skd_special_context *skspcl)
  2463. {
  2464. int i, was_depleted;
  2465. for (i = 0; i < skspcl->req.n_sg; i++) {
  2466. struct page *page = sg_page(&skspcl->req.sg[i]);
  2467. __free_page(page);
  2468. }
  2469. was_depleted = (skdev->skspcl_free_list == NULL);
  2470. skspcl->req.state = SKD_REQ_STATE_IDLE;
  2471. skspcl->req.id += SKD_ID_INCR;
  2472. skspcl->req.next =
  2473. (struct skd_request_context *)skdev->skspcl_free_list;
  2474. skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
  2475. if (was_depleted) {
  2476. pr_debug("%s:%s:%d skspcl was depleted\n",
  2477. skdev->name, __func__, __LINE__);
  2478. /* Free list was depleted. Their might be waiters. */
  2479. wake_up_interruptible(&skdev->waitq);
  2480. }
  2481. }
  2482. static void skd_reset_skcomp(struct skd_device *skdev)
  2483. {
  2484. u32 nbytes;
  2485. struct fit_completion_entry_v1 *skcomp;
  2486. nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
  2487. nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
  2488. memset(skdev->skcomp_table, 0, nbytes);
  2489. skdev->skcomp_ix = 0;
  2490. skdev->skcomp_cycle = 1;
  2491. }
  2492. /*
  2493. *****************************************************************************
  2494. * INTERRUPTS
  2495. *****************************************************************************
  2496. */
  2497. static void skd_completion_worker(struct work_struct *work)
  2498. {
  2499. struct skd_device *skdev =
  2500. container_of(work, struct skd_device, completion_worker);
  2501. unsigned long flags;
  2502. int flush_enqueued = 0;
  2503. spin_lock_irqsave(&skdev->lock, flags);
  2504. /*
  2505. * pass in limit=0, which means no limit..
  2506. * process everything in compq
  2507. */
  2508. skd_isr_completion_posted(skdev, 0, &flush_enqueued);
  2509. skd_request_fn(skdev->queue);
  2510. spin_unlock_irqrestore(&skdev->lock, flags);
  2511. }
  2512. static void skd_isr_msg_from_dev(struct skd_device *skdev);
  2513. irqreturn_t
  2514. static skd_isr(int irq, void *ptr)
  2515. {
  2516. struct skd_device *skdev;
  2517. u32 intstat;
  2518. u32 ack;
  2519. int rc = 0;
  2520. int deferred = 0;
  2521. int flush_enqueued = 0;
  2522. skdev = (struct skd_device *)ptr;
  2523. spin_lock(&skdev->lock);
  2524. for (;; ) {
  2525. intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  2526. ack = FIT_INT_DEF_MASK;
  2527. ack &= intstat;
  2528. pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
  2529. skdev->name, __func__, __LINE__, intstat, ack);
  2530. /* As long as there is an int pending on device, keep
  2531. * running loop. When none, get out, but if we've never
  2532. * done any processing, call completion handler?
  2533. */
  2534. if (ack == 0) {
  2535. /* No interrupts on device, but run the completion
  2536. * processor anyway?
  2537. */
  2538. if (rc == 0)
  2539. if (likely (skdev->state
  2540. == SKD_DRVR_STATE_ONLINE))
  2541. deferred = 1;
  2542. break;
  2543. }
  2544. rc = IRQ_HANDLED;
  2545. SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
  2546. if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
  2547. (skdev->state != SKD_DRVR_STATE_STOPPING))) {
  2548. if (intstat & FIT_ISH_COMPLETION_POSTED) {
  2549. /*
  2550. * If we have already deferred completion
  2551. * processing, don't bother running it again
  2552. */
  2553. if (deferred == 0)
  2554. deferred =
  2555. skd_isr_completion_posted(skdev,
  2556. skd_isr_comp_limit, &flush_enqueued);
  2557. }
  2558. if (intstat & FIT_ISH_FW_STATE_CHANGE) {
  2559. skd_isr_fwstate(skdev);
  2560. if (skdev->state == SKD_DRVR_STATE_FAULT ||
  2561. skdev->state ==
  2562. SKD_DRVR_STATE_DISAPPEARED) {
  2563. spin_unlock(&skdev->lock);
  2564. return rc;
  2565. }
  2566. }
  2567. if (intstat & FIT_ISH_MSG_FROM_DEV)
  2568. skd_isr_msg_from_dev(skdev);
  2569. }
  2570. }
  2571. if (unlikely(flush_enqueued))
  2572. skd_request_fn(skdev->queue);
  2573. if (deferred)
  2574. schedule_work(&skdev->completion_worker);
  2575. else if (!flush_enqueued)
  2576. skd_request_fn(skdev->queue);
  2577. spin_unlock(&skdev->lock);
  2578. return rc;
  2579. }
  2580. static void skd_drive_fault(struct skd_device *skdev)
  2581. {
  2582. skdev->state = SKD_DRVR_STATE_FAULT;
  2583. pr_err("(%s): Drive FAULT\n", skd_name(skdev));
  2584. }
  2585. static void skd_drive_disappeared(struct skd_device *skdev)
  2586. {
  2587. skdev->state = SKD_DRVR_STATE_DISAPPEARED;
  2588. pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
  2589. }
  2590. static void skd_isr_fwstate(struct skd_device *skdev)
  2591. {
  2592. u32 sense;
  2593. u32 state;
  2594. u32 mtd;
  2595. int prev_driver_state = skdev->state;
  2596. sense = SKD_READL(skdev, FIT_STATUS);
  2597. state = sense & FIT_SR_DRIVE_STATE_MASK;
  2598. pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
  2599. skd_name(skdev),
  2600. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  2601. skd_drive_state_to_str(state), state);
  2602. skdev->drive_state = state;
  2603. switch (skdev->drive_state) {
  2604. case FIT_SR_DRIVE_INIT:
  2605. if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
  2606. skd_disable_interrupts(skdev);
  2607. break;
  2608. }
  2609. if (skdev->state == SKD_DRVR_STATE_RESTARTING)
  2610. skd_recover_requests(skdev, 0);
  2611. if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
  2612. skdev->timer_countdown = SKD_STARTING_TIMO;
  2613. skdev->state = SKD_DRVR_STATE_STARTING;
  2614. skd_soft_reset(skdev);
  2615. break;
  2616. }
  2617. mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
  2618. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2619. skdev->last_mtd = mtd;
  2620. break;
  2621. case FIT_SR_DRIVE_ONLINE:
  2622. skdev->cur_max_queue_depth = skd_max_queue_depth;
  2623. if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
  2624. skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
  2625. skdev->queue_low_water_mark =
  2626. skdev->cur_max_queue_depth * 2 / 3 + 1;
  2627. if (skdev->queue_low_water_mark < 1)
  2628. skdev->queue_low_water_mark = 1;
  2629. pr_info(
  2630. "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
  2631. skd_name(skdev),
  2632. skdev->cur_max_queue_depth,
  2633. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  2634. skd_refresh_device_data(skdev);
  2635. break;
  2636. case FIT_SR_DRIVE_BUSY:
  2637. skdev->state = SKD_DRVR_STATE_BUSY;
  2638. skdev->timer_countdown = SKD_BUSY_TIMO;
  2639. skd_quiesce_dev(skdev);
  2640. break;
  2641. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2642. /* set timer for 3 seconds, we'll abort any unfinished
  2643. * commands after that expires
  2644. */
  2645. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  2646. skdev->timer_countdown = SKD_TIMER_SECONDS(3);
  2647. blk_start_queue(skdev->queue);
  2648. break;
  2649. case FIT_SR_DRIVE_BUSY_ERASE:
  2650. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  2651. skdev->timer_countdown = SKD_BUSY_TIMO;
  2652. break;
  2653. case FIT_SR_DRIVE_OFFLINE:
  2654. skdev->state = SKD_DRVR_STATE_IDLE;
  2655. break;
  2656. case FIT_SR_DRIVE_SOFT_RESET:
  2657. switch (skdev->state) {
  2658. case SKD_DRVR_STATE_STARTING:
  2659. case SKD_DRVR_STATE_RESTARTING:
  2660. /* Expected by a caller of skd_soft_reset() */
  2661. break;
  2662. default:
  2663. skdev->state = SKD_DRVR_STATE_RESTARTING;
  2664. break;
  2665. }
  2666. break;
  2667. case FIT_SR_DRIVE_FW_BOOTING:
  2668. pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
  2669. skdev->name, __func__, __LINE__, skdev->name);
  2670. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  2671. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  2672. break;
  2673. case FIT_SR_DRIVE_DEGRADED:
  2674. case FIT_SR_PCIE_LINK_DOWN:
  2675. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  2676. break;
  2677. case FIT_SR_DRIVE_FAULT:
  2678. skd_drive_fault(skdev);
  2679. skd_recover_requests(skdev, 0);
  2680. blk_start_queue(skdev->queue);
  2681. break;
  2682. /* PCIe bus returned all Fs? */
  2683. case 0xFF:
  2684. pr_info("(%s): state=0x%x sense=0x%x\n",
  2685. skd_name(skdev), state, sense);
  2686. skd_drive_disappeared(skdev);
  2687. skd_recover_requests(skdev, 0);
  2688. blk_start_queue(skdev->queue);
  2689. break;
  2690. default:
  2691. /*
  2692. * Uknown FW State. Wait for a state we recognize.
  2693. */
  2694. break;
  2695. }
  2696. pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
  2697. skd_name(skdev),
  2698. skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
  2699. skd_skdev_state_to_str(skdev->state), skdev->state);
  2700. }
  2701. static void skd_recover_requests(struct skd_device *skdev, int requeue)
  2702. {
  2703. int i;
  2704. for (i = 0; i < skdev->num_req_context; i++) {
  2705. struct skd_request_context *skreq = &skdev->skreq_table[i];
  2706. if (skreq->state == SKD_REQ_STATE_BUSY) {
  2707. skd_log_skreq(skdev, skreq, "recover");
  2708. SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
  2709. SKD_ASSERT(skreq->req != NULL);
  2710. /* Release DMA resources for the request. */
  2711. if (skreq->n_sg > 0)
  2712. skd_postop_sg_list(skdev, skreq);
  2713. if (requeue &&
  2714. (unsigned long) ++skreq->req->special <
  2715. SKD_MAX_RETRIES)
  2716. blk_requeue_request(skdev->queue, skreq->req);
  2717. else
  2718. skd_end_request(skdev, skreq, -EIO);
  2719. skreq->req = NULL;
  2720. skreq->state = SKD_REQ_STATE_IDLE;
  2721. skreq->id += SKD_ID_INCR;
  2722. }
  2723. if (i > 0)
  2724. skreq[-1].next = skreq;
  2725. skreq->next = NULL;
  2726. }
  2727. skdev->skreq_free_list = skdev->skreq_table;
  2728. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  2729. struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
  2730. if (skmsg->state == SKD_MSG_STATE_BUSY) {
  2731. skd_log_skmsg(skdev, skmsg, "salvaged");
  2732. SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
  2733. skmsg->state = SKD_MSG_STATE_IDLE;
  2734. skmsg->id += SKD_ID_INCR;
  2735. }
  2736. if (i > 0)
  2737. skmsg[-1].next = skmsg;
  2738. skmsg->next = NULL;
  2739. }
  2740. skdev->skmsg_free_list = skdev->skmsg_table;
  2741. for (i = 0; i < skdev->n_special; i++) {
  2742. struct skd_special_context *skspcl = &skdev->skspcl_table[i];
  2743. /* If orphaned, reclaim it because it has already been reported
  2744. * to the process as an error (it was just waiting for
  2745. * a completion that didn't come, and now it will never come)
  2746. * If busy, change to a state that will cause it to error
  2747. * out in the wait routine and let it do the normal
  2748. * reporting and reclaiming
  2749. */
  2750. if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2751. if (skspcl->orphaned) {
  2752. pr_debug("%s:%s:%d orphaned %p\n",
  2753. skdev->name, __func__, __LINE__,
  2754. skspcl);
  2755. skd_release_special(skdev, skspcl);
  2756. } else {
  2757. pr_debug("%s:%s:%d not orphaned %p\n",
  2758. skdev->name, __func__, __LINE__,
  2759. skspcl);
  2760. skspcl->req.state = SKD_REQ_STATE_ABORTED;
  2761. }
  2762. }
  2763. }
  2764. skdev->skspcl_free_list = skdev->skspcl_table;
  2765. for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
  2766. skdev->timeout_slot[i] = 0;
  2767. skdev->in_flight = 0;
  2768. }
  2769. static void skd_isr_msg_from_dev(struct skd_device *skdev)
  2770. {
  2771. u32 mfd;
  2772. u32 mtd;
  2773. u32 data;
  2774. mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  2775. pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
  2776. skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
  2777. /* ignore any mtd that is an ack for something we didn't send */
  2778. if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
  2779. return;
  2780. switch (FIT_MXD_TYPE(mfd)) {
  2781. case FIT_MTD_FITFW_INIT:
  2782. skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
  2783. if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
  2784. pr_err("(%s): protocol mismatch\n",
  2785. skdev->name);
  2786. pr_err("(%s): got=%d support=%d\n",
  2787. skdev->name, skdev->proto_ver,
  2788. FIT_PROTOCOL_VERSION_1);
  2789. pr_err("(%s): please upgrade driver\n",
  2790. skdev->name);
  2791. skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
  2792. skd_soft_reset(skdev);
  2793. break;
  2794. }
  2795. mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
  2796. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2797. skdev->last_mtd = mtd;
  2798. break;
  2799. case FIT_MTD_GET_CMDQ_DEPTH:
  2800. skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
  2801. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
  2802. SKD_N_COMPLETION_ENTRY);
  2803. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2804. skdev->last_mtd = mtd;
  2805. break;
  2806. case FIT_MTD_SET_COMPQ_DEPTH:
  2807. SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
  2808. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
  2809. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2810. skdev->last_mtd = mtd;
  2811. break;
  2812. case FIT_MTD_SET_COMPQ_ADDR:
  2813. skd_reset_skcomp(skdev);
  2814. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
  2815. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2816. skdev->last_mtd = mtd;
  2817. break;
  2818. case FIT_MTD_CMD_LOG_HOST_ID:
  2819. skdev->connect_time_stamp = get_seconds();
  2820. data = skdev->connect_time_stamp & 0xFFFF;
  2821. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
  2822. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2823. skdev->last_mtd = mtd;
  2824. break;
  2825. case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
  2826. skdev->drive_jiffies = FIT_MXD_DATA(mfd);
  2827. data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
  2828. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
  2829. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2830. skdev->last_mtd = mtd;
  2831. break;
  2832. case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
  2833. skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
  2834. mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
  2835. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2836. skdev->last_mtd = mtd;
  2837. pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
  2838. skd_name(skdev),
  2839. skdev->connect_time_stamp, skdev->drive_jiffies);
  2840. break;
  2841. case FIT_MTD_ARM_QUEUE:
  2842. skdev->last_mtd = 0;
  2843. /*
  2844. * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
  2845. */
  2846. break;
  2847. default:
  2848. break;
  2849. }
  2850. }
  2851. static void skd_disable_interrupts(struct skd_device *skdev)
  2852. {
  2853. u32 sense;
  2854. sense = SKD_READL(skdev, FIT_CONTROL);
  2855. sense &= ~FIT_CR_ENABLE_INTERRUPTS;
  2856. SKD_WRITEL(skdev, sense, FIT_CONTROL);
  2857. pr_debug("%s:%s:%d sense 0x%x\n",
  2858. skdev->name, __func__, __LINE__, sense);
  2859. /* Note that the 1s is written. A 1-bit means
  2860. * disable, a 0 means enable.
  2861. */
  2862. SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
  2863. }
  2864. static void skd_enable_interrupts(struct skd_device *skdev)
  2865. {
  2866. u32 val;
  2867. /* unmask interrupts first */
  2868. val = FIT_ISH_FW_STATE_CHANGE +
  2869. FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
  2870. /* Note that the compliment of mask is written. A 1-bit means
  2871. * disable, a 0 means enable. */
  2872. SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
  2873. pr_debug("%s:%s:%d interrupt mask=0x%x\n",
  2874. skdev->name, __func__, __LINE__, ~val);
  2875. val = SKD_READL(skdev, FIT_CONTROL);
  2876. val |= FIT_CR_ENABLE_INTERRUPTS;
  2877. pr_debug("%s:%s:%d control=0x%x\n",
  2878. skdev->name, __func__, __LINE__, val);
  2879. SKD_WRITEL(skdev, val, FIT_CONTROL);
  2880. }
  2881. /*
  2882. *****************************************************************************
  2883. * START, STOP, RESTART, QUIESCE, UNQUIESCE
  2884. *****************************************************************************
  2885. */
  2886. static void skd_soft_reset(struct skd_device *skdev)
  2887. {
  2888. u32 val;
  2889. val = SKD_READL(skdev, FIT_CONTROL);
  2890. val |= (FIT_CR_SOFT_RESET);
  2891. pr_debug("%s:%s:%d control=0x%x\n",
  2892. skdev->name, __func__, __LINE__, val);
  2893. SKD_WRITEL(skdev, val, FIT_CONTROL);
  2894. }
  2895. static void skd_start_device(struct skd_device *skdev)
  2896. {
  2897. unsigned long flags;
  2898. u32 sense;
  2899. u32 state;
  2900. spin_lock_irqsave(&skdev->lock, flags);
  2901. /* ack all ghost interrupts */
  2902. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  2903. sense = SKD_READL(skdev, FIT_STATUS);
  2904. pr_debug("%s:%s:%d initial status=0x%x\n",
  2905. skdev->name, __func__, __LINE__, sense);
  2906. state = sense & FIT_SR_DRIVE_STATE_MASK;
  2907. skdev->drive_state = state;
  2908. skdev->last_mtd = 0;
  2909. skdev->state = SKD_DRVR_STATE_STARTING;
  2910. skdev->timer_countdown = SKD_STARTING_TIMO;
  2911. skd_enable_interrupts(skdev);
  2912. switch (skdev->drive_state) {
  2913. case FIT_SR_DRIVE_OFFLINE:
  2914. pr_err("(%s): Drive offline...\n", skd_name(skdev));
  2915. break;
  2916. case FIT_SR_DRIVE_FW_BOOTING:
  2917. pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
  2918. skdev->name, __func__, __LINE__, skdev->name);
  2919. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  2920. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  2921. break;
  2922. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2923. pr_info("(%s): Start: BUSY_SANITIZE\n",
  2924. skd_name(skdev));
  2925. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  2926. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2927. break;
  2928. case FIT_SR_DRIVE_BUSY_ERASE:
  2929. pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
  2930. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  2931. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2932. break;
  2933. case FIT_SR_DRIVE_INIT:
  2934. case FIT_SR_DRIVE_ONLINE:
  2935. skd_soft_reset(skdev);
  2936. break;
  2937. case FIT_SR_DRIVE_BUSY:
  2938. pr_err("(%s): Drive Busy...\n", skd_name(skdev));
  2939. skdev->state = SKD_DRVR_STATE_BUSY;
  2940. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2941. break;
  2942. case FIT_SR_DRIVE_SOFT_RESET:
  2943. pr_err("(%s) drive soft reset in prog\n",
  2944. skd_name(skdev));
  2945. break;
  2946. case FIT_SR_DRIVE_FAULT:
  2947. /* Fault state is bad...soft reset won't do it...
  2948. * Hard reset, maybe, but does it work on device?
  2949. * For now, just fault so the system doesn't hang.
  2950. */
  2951. skd_drive_fault(skdev);
  2952. /*start the queue so we can respond with error to requests */
  2953. pr_debug("%s:%s:%d starting %s queue\n",
  2954. skdev->name, __func__, __LINE__, skdev->name);
  2955. blk_start_queue(skdev->queue);
  2956. skdev->gendisk_on = -1;
  2957. wake_up_interruptible(&skdev->waitq);
  2958. break;
  2959. case 0xFF:
  2960. /* Most likely the device isn't there or isn't responding
  2961. * to the BAR1 addresses. */
  2962. skd_drive_disappeared(skdev);
  2963. /*start the queue so we can respond with error to requests */
  2964. pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
  2965. skdev->name, __func__, __LINE__, skdev->name);
  2966. blk_start_queue(skdev->queue);
  2967. skdev->gendisk_on = -1;
  2968. wake_up_interruptible(&skdev->waitq);
  2969. break;
  2970. default:
  2971. pr_err("(%s) Start: unknown state %x\n",
  2972. skd_name(skdev), skdev->drive_state);
  2973. break;
  2974. }
  2975. state = SKD_READL(skdev, FIT_CONTROL);
  2976. pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
  2977. skdev->name, __func__, __LINE__, state);
  2978. state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  2979. pr_debug("%s:%s:%d Intr Status=0x%x\n",
  2980. skdev->name, __func__, __LINE__, state);
  2981. state = SKD_READL(skdev, FIT_INT_MASK_HOST);
  2982. pr_debug("%s:%s:%d Intr Mask=0x%x\n",
  2983. skdev->name, __func__, __LINE__, state);
  2984. state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  2985. pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
  2986. skdev->name, __func__, __LINE__, state);
  2987. state = SKD_READL(skdev, FIT_HW_VERSION);
  2988. pr_debug("%s:%s:%d HW version=0x%x\n",
  2989. skdev->name, __func__, __LINE__, state);
  2990. spin_unlock_irqrestore(&skdev->lock, flags);
  2991. }
  2992. static void skd_stop_device(struct skd_device *skdev)
  2993. {
  2994. unsigned long flags;
  2995. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  2996. u32 dev_state;
  2997. int i;
  2998. spin_lock_irqsave(&skdev->lock, flags);
  2999. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  3000. pr_err("(%s): skd_stop_device not online no sync\n",
  3001. skd_name(skdev));
  3002. goto stop_out;
  3003. }
  3004. if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
  3005. pr_err("(%s): skd_stop_device no special\n",
  3006. skd_name(skdev));
  3007. goto stop_out;
  3008. }
  3009. skdev->state = SKD_DRVR_STATE_SYNCING;
  3010. skdev->sync_done = 0;
  3011. skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
  3012. spin_unlock_irqrestore(&skdev->lock, flags);
  3013. wait_event_interruptible_timeout(skdev->waitq,
  3014. (skdev->sync_done), (10 * HZ));
  3015. spin_lock_irqsave(&skdev->lock, flags);
  3016. switch (skdev->sync_done) {
  3017. case 0:
  3018. pr_err("(%s): skd_stop_device no sync\n",
  3019. skd_name(skdev));
  3020. break;
  3021. case 1:
  3022. pr_err("(%s): skd_stop_device sync done\n",
  3023. skd_name(skdev));
  3024. break;
  3025. default:
  3026. pr_err("(%s): skd_stop_device sync error\n",
  3027. skd_name(skdev));
  3028. }
  3029. stop_out:
  3030. skdev->state = SKD_DRVR_STATE_STOPPING;
  3031. spin_unlock_irqrestore(&skdev->lock, flags);
  3032. skd_kill_timer(skdev);
  3033. spin_lock_irqsave(&skdev->lock, flags);
  3034. skd_disable_interrupts(skdev);
  3035. /* ensure all ints on device are cleared */
  3036. /* soft reset the device to unload with a clean slate */
  3037. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  3038. SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
  3039. spin_unlock_irqrestore(&skdev->lock, flags);
  3040. /* poll every 100ms, 1 second timeout */
  3041. for (i = 0; i < 10; i++) {
  3042. dev_state =
  3043. SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
  3044. if (dev_state == FIT_SR_DRIVE_INIT)
  3045. break;
  3046. set_current_state(TASK_INTERRUPTIBLE);
  3047. schedule_timeout(msecs_to_jiffies(100));
  3048. }
  3049. if (dev_state != FIT_SR_DRIVE_INIT)
  3050. pr_err("(%s): skd_stop_device state error 0x%02x\n",
  3051. skd_name(skdev), dev_state);
  3052. }
  3053. /* assume spinlock is held */
  3054. static void skd_restart_device(struct skd_device *skdev)
  3055. {
  3056. u32 state;
  3057. /* ack all ghost interrupts */
  3058. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  3059. state = SKD_READL(skdev, FIT_STATUS);
  3060. pr_debug("%s:%s:%d drive status=0x%x\n",
  3061. skdev->name, __func__, __LINE__, state);
  3062. state &= FIT_SR_DRIVE_STATE_MASK;
  3063. skdev->drive_state = state;
  3064. skdev->last_mtd = 0;
  3065. skdev->state = SKD_DRVR_STATE_RESTARTING;
  3066. skdev->timer_countdown = SKD_RESTARTING_TIMO;
  3067. skd_soft_reset(skdev);
  3068. }
  3069. /* assume spinlock is held */
  3070. static int skd_quiesce_dev(struct skd_device *skdev)
  3071. {
  3072. int rc = 0;
  3073. switch (skdev->state) {
  3074. case SKD_DRVR_STATE_BUSY:
  3075. case SKD_DRVR_STATE_BUSY_IMMINENT:
  3076. pr_debug("%s:%s:%d stopping %s queue\n",
  3077. skdev->name, __func__, __LINE__, skdev->name);
  3078. blk_stop_queue(skdev->queue);
  3079. break;
  3080. case SKD_DRVR_STATE_ONLINE:
  3081. case SKD_DRVR_STATE_STOPPING:
  3082. case SKD_DRVR_STATE_SYNCING:
  3083. case SKD_DRVR_STATE_PAUSING:
  3084. case SKD_DRVR_STATE_PAUSED:
  3085. case SKD_DRVR_STATE_STARTING:
  3086. case SKD_DRVR_STATE_RESTARTING:
  3087. case SKD_DRVR_STATE_RESUMING:
  3088. default:
  3089. rc = -EINVAL;
  3090. pr_debug("%s:%s:%d state [%d] not implemented\n",
  3091. skdev->name, __func__, __LINE__, skdev->state);
  3092. }
  3093. return rc;
  3094. }
  3095. /* assume spinlock is held */
  3096. static int skd_unquiesce_dev(struct skd_device *skdev)
  3097. {
  3098. int prev_driver_state = skdev->state;
  3099. skd_log_skdev(skdev, "unquiesce");
  3100. if (skdev->state == SKD_DRVR_STATE_ONLINE) {
  3101. pr_debug("%s:%s:%d **** device already ONLINE\n",
  3102. skdev->name, __func__, __LINE__);
  3103. return 0;
  3104. }
  3105. if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
  3106. /*
  3107. * If there has been an state change to other than
  3108. * ONLINE, we will rely on controller state change
  3109. * to come back online and restart the queue.
  3110. * The BUSY state means that driver is ready to
  3111. * continue normal processing but waiting for controller
  3112. * to become available.
  3113. */
  3114. skdev->state = SKD_DRVR_STATE_BUSY;
  3115. pr_debug("%s:%s:%d drive BUSY state\n",
  3116. skdev->name, __func__, __LINE__);
  3117. return 0;
  3118. }
  3119. /*
  3120. * Drive has just come online, driver is either in startup,
  3121. * paused performing a task, or bust waiting for hardware.
  3122. */
  3123. switch (skdev->state) {
  3124. case SKD_DRVR_STATE_PAUSED:
  3125. case SKD_DRVR_STATE_BUSY:
  3126. case SKD_DRVR_STATE_BUSY_IMMINENT:
  3127. case SKD_DRVR_STATE_BUSY_ERASE:
  3128. case SKD_DRVR_STATE_STARTING:
  3129. case SKD_DRVR_STATE_RESTARTING:
  3130. case SKD_DRVR_STATE_FAULT:
  3131. case SKD_DRVR_STATE_IDLE:
  3132. case SKD_DRVR_STATE_LOAD:
  3133. skdev->state = SKD_DRVR_STATE_ONLINE;
  3134. pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
  3135. skd_name(skdev),
  3136. skd_skdev_state_to_str(prev_driver_state),
  3137. prev_driver_state, skd_skdev_state_to_str(skdev->state),
  3138. skdev->state);
  3139. pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
  3140. skdev->name, __func__, __LINE__);
  3141. pr_debug("%s:%s:%d starting %s queue\n",
  3142. skdev->name, __func__, __LINE__, skdev->name);
  3143. pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
  3144. blk_start_queue(skdev->queue);
  3145. skdev->gendisk_on = 1;
  3146. wake_up_interruptible(&skdev->waitq);
  3147. break;
  3148. case SKD_DRVR_STATE_DISAPPEARED:
  3149. default:
  3150. pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
  3151. skdev->name, __func__, __LINE__,
  3152. skdev->state);
  3153. return -EBUSY;
  3154. }
  3155. return 0;
  3156. }
  3157. /*
  3158. *****************************************************************************
  3159. * PCIe MSI/MSI-X INTERRUPT HANDLERS
  3160. *****************************************************************************
  3161. */
  3162. static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
  3163. {
  3164. struct skd_device *skdev = skd_host_data;
  3165. unsigned long flags;
  3166. spin_lock_irqsave(&skdev->lock, flags);
  3167. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3168. skdev->name, __func__, __LINE__,
  3169. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3170. pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
  3171. irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3172. SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
  3173. spin_unlock_irqrestore(&skdev->lock, flags);
  3174. return IRQ_HANDLED;
  3175. }
  3176. static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
  3177. {
  3178. struct skd_device *skdev = skd_host_data;
  3179. unsigned long flags;
  3180. spin_lock_irqsave(&skdev->lock, flags);
  3181. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3182. skdev->name, __func__, __LINE__,
  3183. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3184. SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
  3185. skd_isr_fwstate(skdev);
  3186. spin_unlock_irqrestore(&skdev->lock, flags);
  3187. return IRQ_HANDLED;
  3188. }
  3189. static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
  3190. {
  3191. struct skd_device *skdev = skd_host_data;
  3192. unsigned long flags;
  3193. int flush_enqueued = 0;
  3194. int deferred;
  3195. spin_lock_irqsave(&skdev->lock, flags);
  3196. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3197. skdev->name, __func__, __LINE__,
  3198. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3199. SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
  3200. deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
  3201. &flush_enqueued);
  3202. if (flush_enqueued)
  3203. skd_request_fn(skdev->queue);
  3204. if (deferred)
  3205. schedule_work(&skdev->completion_worker);
  3206. else if (!flush_enqueued)
  3207. skd_request_fn(skdev->queue);
  3208. spin_unlock_irqrestore(&skdev->lock, flags);
  3209. return IRQ_HANDLED;
  3210. }
  3211. static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
  3212. {
  3213. struct skd_device *skdev = skd_host_data;
  3214. unsigned long flags;
  3215. spin_lock_irqsave(&skdev->lock, flags);
  3216. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3217. skdev->name, __func__, __LINE__,
  3218. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3219. SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
  3220. skd_isr_msg_from_dev(skdev);
  3221. spin_unlock_irqrestore(&skdev->lock, flags);
  3222. return IRQ_HANDLED;
  3223. }
  3224. static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
  3225. {
  3226. struct skd_device *skdev = skd_host_data;
  3227. unsigned long flags;
  3228. spin_lock_irqsave(&skdev->lock, flags);
  3229. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3230. skdev->name, __func__, __LINE__,
  3231. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3232. SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
  3233. spin_unlock_irqrestore(&skdev->lock, flags);
  3234. return IRQ_HANDLED;
  3235. }
  3236. /*
  3237. *****************************************************************************
  3238. * PCIe MSI/MSI-X SETUP
  3239. *****************************************************************************
  3240. */
  3241. struct skd_msix_entry {
  3242. int have_irq;
  3243. u32 vector;
  3244. u32 entry;
  3245. struct skd_device *rsp;
  3246. char isr_name[30];
  3247. };
  3248. struct skd_init_msix_entry {
  3249. const char *name;
  3250. irq_handler_t handler;
  3251. };
  3252. #define SKD_MAX_MSIX_COUNT 13
  3253. #define SKD_MIN_MSIX_COUNT 7
  3254. #define SKD_BASE_MSIX_IRQ 4
  3255. static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
  3256. { "(DMA 0)", skd_reserved_isr },
  3257. { "(DMA 1)", skd_reserved_isr },
  3258. { "(DMA 2)", skd_reserved_isr },
  3259. { "(DMA 3)", skd_reserved_isr },
  3260. { "(State Change)", skd_statec_isr },
  3261. { "(COMPL_Q)", skd_comp_q },
  3262. { "(MSG)", skd_msg_isr },
  3263. { "(Reserved)", skd_reserved_isr },
  3264. { "(Reserved)", skd_reserved_isr },
  3265. { "(Queue Full 0)", skd_qfull_isr },
  3266. { "(Queue Full 1)", skd_qfull_isr },
  3267. { "(Queue Full 2)", skd_qfull_isr },
  3268. { "(Queue Full 3)", skd_qfull_isr },
  3269. };
  3270. static void skd_release_msix(struct skd_device *skdev)
  3271. {
  3272. struct skd_msix_entry *qentry;
  3273. int i;
  3274. if (skdev->msix_entries) {
  3275. for (i = 0; i < skdev->msix_count; i++) {
  3276. qentry = &skdev->msix_entries[i];
  3277. skdev = qentry->rsp;
  3278. if (qentry->have_irq)
  3279. devm_free_irq(&skdev->pdev->dev,
  3280. qentry->vector, qentry->rsp);
  3281. }
  3282. kfree(skdev->msix_entries);
  3283. }
  3284. if (skdev->msix_count)
  3285. pci_disable_msix(skdev->pdev);
  3286. skdev->msix_count = 0;
  3287. skdev->msix_entries = NULL;
  3288. }
  3289. static int skd_acquire_msix(struct skd_device *skdev)
  3290. {
  3291. int i, rc;
  3292. struct pci_dev *pdev = skdev->pdev;
  3293. struct msix_entry *entries;
  3294. struct skd_msix_entry *qentry;
  3295. entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
  3296. GFP_KERNEL);
  3297. if (!entries)
  3298. return -ENOMEM;
  3299. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
  3300. entries[i].entry = i;
  3301. rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT);
  3302. if (rc) {
  3303. pr_err("(%s): failed to enable MSI-X %d\n",
  3304. skd_name(skdev), rc);
  3305. goto msix_out;
  3306. }
  3307. skdev->msix_count = SKD_MAX_MSIX_COUNT;
  3308. skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
  3309. skdev->msix_count, GFP_KERNEL);
  3310. if (!skdev->msix_entries) {
  3311. rc = -ENOMEM;
  3312. pr_err("(%s): msix table allocation error\n",
  3313. skd_name(skdev));
  3314. goto msix_out;
  3315. }
  3316. for (i = 0; i < skdev->msix_count; i++) {
  3317. qentry = &skdev->msix_entries[i];
  3318. qentry->vector = entries[i].vector;
  3319. qentry->entry = entries[i].entry;
  3320. qentry->rsp = NULL;
  3321. qentry->have_irq = 0;
  3322. pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
  3323. skdev->name, __func__, __LINE__,
  3324. pci_name(pdev), skdev->name,
  3325. i, qentry->vector, qentry->entry);
  3326. }
  3327. /* Enable MSI-X vectors for the base queue */
  3328. for (i = 0; i < skdev->msix_count; i++) {
  3329. qentry = &skdev->msix_entries[i];
  3330. snprintf(qentry->isr_name, sizeof(qentry->isr_name),
  3331. "%s%d-msix %s", DRV_NAME, skdev->devno,
  3332. msix_entries[i].name);
  3333. rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
  3334. msix_entries[i].handler, 0,
  3335. qentry->isr_name, skdev);
  3336. if (rc) {
  3337. pr_err("(%s): Unable to register(%d) MSI-X "
  3338. "handler %d: %s\n",
  3339. skd_name(skdev), rc, i, qentry->isr_name);
  3340. goto msix_out;
  3341. } else {
  3342. qentry->have_irq = 1;
  3343. qentry->rsp = skdev;
  3344. }
  3345. }
  3346. pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
  3347. skdev->name, __func__, __LINE__,
  3348. pci_name(pdev), skdev->name, skdev->msix_count);
  3349. return 0;
  3350. msix_out:
  3351. if (entries)
  3352. kfree(entries);
  3353. skd_release_msix(skdev);
  3354. return rc;
  3355. }
  3356. static int skd_acquire_irq(struct skd_device *skdev)
  3357. {
  3358. int rc;
  3359. struct pci_dev *pdev;
  3360. pdev = skdev->pdev;
  3361. skdev->msix_count = 0;
  3362. RETRY_IRQ_TYPE:
  3363. switch (skdev->irq_type) {
  3364. case SKD_IRQ_MSIX:
  3365. rc = skd_acquire_msix(skdev);
  3366. if (!rc)
  3367. pr_info("(%s): MSI-X %d irqs enabled\n",
  3368. skd_name(skdev), skdev->msix_count);
  3369. else {
  3370. pr_err(
  3371. "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
  3372. skd_name(skdev), rc);
  3373. skdev->irq_type = SKD_IRQ_MSI;
  3374. goto RETRY_IRQ_TYPE;
  3375. }
  3376. break;
  3377. case SKD_IRQ_MSI:
  3378. snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
  3379. DRV_NAME, skdev->devno);
  3380. rc = pci_enable_msi_range(pdev, 1, 1);
  3381. if (rc > 0) {
  3382. rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
  3383. skdev->isr_name, skdev);
  3384. if (rc) {
  3385. pci_disable_msi(pdev);
  3386. pr_err(
  3387. "(%s): failed to allocate the MSI interrupt %d\n",
  3388. skd_name(skdev), rc);
  3389. goto RETRY_IRQ_LEGACY;
  3390. }
  3391. pr_info("(%s): MSI irq %d enabled\n",
  3392. skd_name(skdev), pdev->irq);
  3393. } else {
  3394. RETRY_IRQ_LEGACY:
  3395. pr_err(
  3396. "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
  3397. skd_name(skdev), rc);
  3398. skdev->irq_type = SKD_IRQ_LEGACY;
  3399. goto RETRY_IRQ_TYPE;
  3400. }
  3401. break;
  3402. case SKD_IRQ_LEGACY:
  3403. snprintf(skdev->isr_name, sizeof(skdev->isr_name),
  3404. "%s%d-legacy", DRV_NAME, skdev->devno);
  3405. rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
  3406. IRQF_SHARED, skdev->isr_name, skdev);
  3407. if (!rc)
  3408. pr_info("(%s): LEGACY irq %d enabled\n",
  3409. skd_name(skdev), pdev->irq);
  3410. else
  3411. pr_err("(%s): request LEGACY irq error %d\n",
  3412. skd_name(skdev), rc);
  3413. break;
  3414. default:
  3415. pr_info("(%s): irq_type %d invalid, re-set to %d\n",
  3416. skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
  3417. skdev->irq_type = SKD_IRQ_LEGACY;
  3418. goto RETRY_IRQ_TYPE;
  3419. }
  3420. return rc;
  3421. }
  3422. static void skd_release_irq(struct skd_device *skdev)
  3423. {
  3424. switch (skdev->irq_type) {
  3425. case SKD_IRQ_MSIX:
  3426. skd_release_msix(skdev);
  3427. break;
  3428. case SKD_IRQ_MSI:
  3429. devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
  3430. pci_disable_msi(skdev->pdev);
  3431. break;
  3432. case SKD_IRQ_LEGACY:
  3433. devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
  3434. break;
  3435. default:
  3436. pr_err("(%s): wrong irq type %d!",
  3437. skd_name(skdev), skdev->irq_type);
  3438. break;
  3439. }
  3440. }
  3441. /*
  3442. *****************************************************************************
  3443. * CONSTRUCT
  3444. *****************************************************************************
  3445. */
  3446. static int skd_cons_skcomp(struct skd_device *skdev)
  3447. {
  3448. int rc = 0;
  3449. struct fit_completion_entry_v1 *skcomp;
  3450. u32 nbytes;
  3451. nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
  3452. nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
  3453. pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
  3454. skdev->name, __func__, __LINE__,
  3455. nbytes, SKD_N_COMPLETION_ENTRY);
  3456. skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
  3457. &skdev->cq_dma_address);
  3458. if (skcomp == NULL) {
  3459. rc = -ENOMEM;
  3460. goto err_out;
  3461. }
  3462. skdev->skcomp_table = skcomp;
  3463. skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
  3464. sizeof(*skcomp) *
  3465. SKD_N_COMPLETION_ENTRY);
  3466. err_out:
  3467. return rc;
  3468. }
  3469. static int skd_cons_skmsg(struct skd_device *skdev)
  3470. {
  3471. int rc = 0;
  3472. u32 i;
  3473. pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
  3474. skdev->name, __func__, __LINE__,
  3475. sizeof(struct skd_fitmsg_context),
  3476. skdev->num_fitmsg_context,
  3477. sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
  3478. skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
  3479. *skdev->num_fitmsg_context, GFP_KERNEL);
  3480. if (skdev->skmsg_table == NULL) {
  3481. rc = -ENOMEM;
  3482. goto err_out;
  3483. }
  3484. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  3485. struct skd_fitmsg_context *skmsg;
  3486. skmsg = &skdev->skmsg_table[i];
  3487. skmsg->id = i + SKD_ID_FIT_MSG;
  3488. skmsg->state = SKD_MSG_STATE_IDLE;
  3489. skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
  3490. SKD_N_FITMSG_BYTES + 64,
  3491. &skmsg->mb_dma_address);
  3492. if (skmsg->msg_buf == NULL) {
  3493. rc = -ENOMEM;
  3494. goto err_out;
  3495. }
  3496. skmsg->offset = (u32)((u64)skmsg->msg_buf &
  3497. (~FIT_QCMD_BASE_ADDRESS_MASK));
  3498. skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
  3499. skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
  3500. FIT_QCMD_BASE_ADDRESS_MASK);
  3501. skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
  3502. skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
  3503. memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
  3504. skmsg->next = &skmsg[1];
  3505. }
  3506. /* Free list is in order starting with the 0th entry. */
  3507. skdev->skmsg_table[i - 1].next = NULL;
  3508. skdev->skmsg_free_list = skdev->skmsg_table;
  3509. err_out:
  3510. return rc;
  3511. }
  3512. static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
  3513. u32 n_sg,
  3514. dma_addr_t *ret_dma_addr)
  3515. {
  3516. struct fit_sg_descriptor *sg_list;
  3517. u32 nbytes;
  3518. nbytes = sizeof(*sg_list) * n_sg;
  3519. sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
  3520. if (sg_list != NULL) {
  3521. uint64_t dma_address = *ret_dma_addr;
  3522. u32 i;
  3523. memset(sg_list, 0, nbytes);
  3524. for (i = 0; i < n_sg - 1; i++) {
  3525. uint64_t ndp_off;
  3526. ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
  3527. sg_list[i].next_desc_ptr = dma_address + ndp_off;
  3528. }
  3529. sg_list[i].next_desc_ptr = 0LL;
  3530. }
  3531. return sg_list;
  3532. }
  3533. static int skd_cons_skreq(struct skd_device *skdev)
  3534. {
  3535. int rc = 0;
  3536. u32 i;
  3537. pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
  3538. skdev->name, __func__, __LINE__,
  3539. sizeof(struct skd_request_context),
  3540. skdev->num_req_context,
  3541. sizeof(struct skd_request_context) * skdev->num_req_context);
  3542. skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
  3543. * skdev->num_req_context, GFP_KERNEL);
  3544. if (skdev->skreq_table == NULL) {
  3545. rc = -ENOMEM;
  3546. goto err_out;
  3547. }
  3548. pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
  3549. skdev->name, __func__, __LINE__,
  3550. skdev->sgs_per_request, sizeof(struct scatterlist),
  3551. skdev->sgs_per_request * sizeof(struct scatterlist));
  3552. for (i = 0; i < skdev->num_req_context; i++) {
  3553. struct skd_request_context *skreq;
  3554. skreq = &skdev->skreq_table[i];
  3555. skreq->id = i + SKD_ID_RW_REQUEST;
  3556. skreq->state = SKD_REQ_STATE_IDLE;
  3557. skreq->sg = kzalloc(sizeof(struct scatterlist) *
  3558. skdev->sgs_per_request, GFP_KERNEL);
  3559. if (skreq->sg == NULL) {
  3560. rc = -ENOMEM;
  3561. goto err_out;
  3562. }
  3563. sg_init_table(skreq->sg, skdev->sgs_per_request);
  3564. skreq->sksg_list = skd_cons_sg_list(skdev,
  3565. skdev->sgs_per_request,
  3566. &skreq->sksg_dma_address);
  3567. if (skreq->sksg_list == NULL) {
  3568. rc = -ENOMEM;
  3569. goto err_out;
  3570. }
  3571. skreq->next = &skreq[1];
  3572. }
  3573. /* Free list is in order starting with the 0th entry. */
  3574. skdev->skreq_table[i - 1].next = NULL;
  3575. skdev->skreq_free_list = skdev->skreq_table;
  3576. err_out:
  3577. return rc;
  3578. }
  3579. static int skd_cons_skspcl(struct skd_device *skdev)
  3580. {
  3581. int rc = 0;
  3582. u32 i, nbytes;
  3583. pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
  3584. skdev->name, __func__, __LINE__,
  3585. sizeof(struct skd_special_context),
  3586. skdev->n_special,
  3587. sizeof(struct skd_special_context) * skdev->n_special);
  3588. skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
  3589. * skdev->n_special, GFP_KERNEL);
  3590. if (skdev->skspcl_table == NULL) {
  3591. rc = -ENOMEM;
  3592. goto err_out;
  3593. }
  3594. for (i = 0; i < skdev->n_special; i++) {
  3595. struct skd_special_context *skspcl;
  3596. skspcl = &skdev->skspcl_table[i];
  3597. skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
  3598. skspcl->req.state = SKD_REQ_STATE_IDLE;
  3599. skspcl->req.next = &skspcl[1].req;
  3600. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3601. skspcl->msg_buf =
  3602. pci_zalloc_consistent(skdev->pdev, nbytes,
  3603. &skspcl->mb_dma_address);
  3604. if (skspcl->msg_buf == NULL) {
  3605. rc = -ENOMEM;
  3606. goto err_out;
  3607. }
  3608. skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
  3609. SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
  3610. if (skspcl->req.sg == NULL) {
  3611. rc = -ENOMEM;
  3612. goto err_out;
  3613. }
  3614. skspcl->req.sksg_list = skd_cons_sg_list(skdev,
  3615. SKD_N_SG_PER_SPECIAL,
  3616. &skspcl->req.
  3617. sksg_dma_address);
  3618. if (skspcl->req.sksg_list == NULL) {
  3619. rc = -ENOMEM;
  3620. goto err_out;
  3621. }
  3622. }
  3623. /* Free list is in order starting with the 0th entry. */
  3624. skdev->skspcl_table[i - 1].req.next = NULL;
  3625. skdev->skspcl_free_list = skdev->skspcl_table;
  3626. return rc;
  3627. err_out:
  3628. return rc;
  3629. }
  3630. static int skd_cons_sksb(struct skd_device *skdev)
  3631. {
  3632. int rc = 0;
  3633. struct skd_special_context *skspcl;
  3634. u32 nbytes;
  3635. skspcl = &skdev->internal_skspcl;
  3636. skspcl->req.id = 0 + SKD_ID_INTERNAL;
  3637. skspcl->req.state = SKD_REQ_STATE_IDLE;
  3638. nbytes = SKD_N_INTERNAL_BYTES;
  3639. skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
  3640. &skspcl->db_dma_address);
  3641. if (skspcl->data_buf == NULL) {
  3642. rc = -ENOMEM;
  3643. goto err_out;
  3644. }
  3645. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3646. skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
  3647. &skspcl->mb_dma_address);
  3648. if (skspcl->msg_buf == NULL) {
  3649. rc = -ENOMEM;
  3650. goto err_out;
  3651. }
  3652. skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
  3653. &skspcl->req.sksg_dma_address);
  3654. if (skspcl->req.sksg_list == NULL) {
  3655. rc = -ENOMEM;
  3656. goto err_out;
  3657. }
  3658. if (!skd_format_internal_skspcl(skdev)) {
  3659. rc = -EINVAL;
  3660. goto err_out;
  3661. }
  3662. err_out:
  3663. return rc;
  3664. }
  3665. static int skd_cons_disk(struct skd_device *skdev)
  3666. {
  3667. int rc = 0;
  3668. struct gendisk *disk;
  3669. struct request_queue *q;
  3670. unsigned long flags;
  3671. disk = alloc_disk(SKD_MINORS_PER_DEVICE);
  3672. if (!disk) {
  3673. rc = -ENOMEM;
  3674. goto err_out;
  3675. }
  3676. skdev->disk = disk;
  3677. sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
  3678. disk->major = skdev->major;
  3679. disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
  3680. disk->fops = &skd_blockdev_ops;
  3681. disk->private_data = skdev;
  3682. q = blk_init_queue(skd_request_fn, &skdev->lock);
  3683. if (!q) {
  3684. rc = -ENOMEM;
  3685. goto err_out;
  3686. }
  3687. skdev->queue = q;
  3688. disk->queue = q;
  3689. q->queuedata = skdev;
  3690. blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
  3691. blk_queue_max_segments(q, skdev->sgs_per_request);
  3692. blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
  3693. /* set sysfs ptimal_io_size to 8K */
  3694. blk_queue_io_opt(q, 8192);
  3695. /* DISCARD Flag initialization. */
  3696. q->limits.discard_granularity = 8192;
  3697. q->limits.discard_alignment = 0;
  3698. q->limits.max_discard_sectors = UINT_MAX >> 9;
  3699. q->limits.discard_zeroes_data = 1;
  3700. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
  3701. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
  3702. queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
  3703. spin_lock_irqsave(&skdev->lock, flags);
  3704. pr_debug("%s:%s:%d stopping %s queue\n",
  3705. skdev->name, __func__, __LINE__, skdev->name);
  3706. blk_stop_queue(skdev->queue);
  3707. spin_unlock_irqrestore(&skdev->lock, flags);
  3708. err_out:
  3709. return rc;
  3710. }
  3711. #define SKD_N_DEV_TABLE 16u
  3712. static u32 skd_next_devno;
  3713. static struct skd_device *skd_construct(struct pci_dev *pdev)
  3714. {
  3715. struct skd_device *skdev;
  3716. int blk_major = skd_major;
  3717. int rc;
  3718. skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
  3719. if (!skdev) {
  3720. pr_err(PFX "(%s): memory alloc failure\n",
  3721. pci_name(pdev));
  3722. return NULL;
  3723. }
  3724. skdev->state = SKD_DRVR_STATE_LOAD;
  3725. skdev->pdev = pdev;
  3726. skdev->devno = skd_next_devno++;
  3727. skdev->major = blk_major;
  3728. skdev->irq_type = skd_isr_type;
  3729. sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
  3730. skdev->dev_max_queue_depth = 0;
  3731. skdev->num_req_context = skd_max_queue_depth;
  3732. skdev->num_fitmsg_context = skd_max_queue_depth;
  3733. skdev->n_special = skd_max_pass_thru;
  3734. skdev->cur_max_queue_depth = 1;
  3735. skdev->queue_low_water_mark = 1;
  3736. skdev->proto_ver = 99;
  3737. skdev->sgs_per_request = skd_sgs_per_request;
  3738. skdev->dbg_level = skd_dbg_level;
  3739. atomic_set(&skdev->device_count, 0);
  3740. spin_lock_init(&skdev->lock);
  3741. INIT_WORK(&skdev->completion_worker, skd_completion_worker);
  3742. pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
  3743. rc = skd_cons_skcomp(skdev);
  3744. if (rc < 0)
  3745. goto err_out;
  3746. pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
  3747. rc = skd_cons_skmsg(skdev);
  3748. if (rc < 0)
  3749. goto err_out;
  3750. pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
  3751. rc = skd_cons_skreq(skdev);
  3752. if (rc < 0)
  3753. goto err_out;
  3754. pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
  3755. rc = skd_cons_skspcl(skdev);
  3756. if (rc < 0)
  3757. goto err_out;
  3758. pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
  3759. rc = skd_cons_sksb(skdev);
  3760. if (rc < 0)
  3761. goto err_out;
  3762. pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
  3763. rc = skd_cons_disk(skdev);
  3764. if (rc < 0)
  3765. goto err_out;
  3766. pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
  3767. return skdev;
  3768. err_out:
  3769. pr_debug("%s:%s:%d construct failed\n",
  3770. skdev->name, __func__, __LINE__);
  3771. skd_destruct(skdev);
  3772. return NULL;
  3773. }
  3774. /*
  3775. *****************************************************************************
  3776. * DESTRUCT (FREE)
  3777. *****************************************************************************
  3778. */
  3779. static void skd_free_skcomp(struct skd_device *skdev)
  3780. {
  3781. if (skdev->skcomp_table != NULL) {
  3782. u32 nbytes;
  3783. nbytes = sizeof(skdev->skcomp_table[0]) *
  3784. SKD_N_COMPLETION_ENTRY;
  3785. pci_free_consistent(skdev->pdev, nbytes,
  3786. skdev->skcomp_table, skdev->cq_dma_address);
  3787. }
  3788. skdev->skcomp_table = NULL;
  3789. skdev->cq_dma_address = 0;
  3790. }
  3791. static void skd_free_skmsg(struct skd_device *skdev)
  3792. {
  3793. u32 i;
  3794. if (skdev->skmsg_table == NULL)
  3795. return;
  3796. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  3797. struct skd_fitmsg_context *skmsg;
  3798. skmsg = &skdev->skmsg_table[i];
  3799. if (skmsg->msg_buf != NULL) {
  3800. skmsg->msg_buf += skmsg->offset;
  3801. skmsg->mb_dma_address += skmsg->offset;
  3802. pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
  3803. skmsg->msg_buf,
  3804. skmsg->mb_dma_address);
  3805. }
  3806. skmsg->msg_buf = NULL;
  3807. skmsg->mb_dma_address = 0;
  3808. }
  3809. kfree(skdev->skmsg_table);
  3810. skdev->skmsg_table = NULL;
  3811. }
  3812. static void skd_free_sg_list(struct skd_device *skdev,
  3813. struct fit_sg_descriptor *sg_list,
  3814. u32 n_sg, dma_addr_t dma_addr)
  3815. {
  3816. if (sg_list != NULL) {
  3817. u32 nbytes;
  3818. nbytes = sizeof(*sg_list) * n_sg;
  3819. pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
  3820. }
  3821. }
  3822. static void skd_free_skreq(struct skd_device *skdev)
  3823. {
  3824. u32 i;
  3825. if (skdev->skreq_table == NULL)
  3826. return;
  3827. for (i = 0; i < skdev->num_req_context; i++) {
  3828. struct skd_request_context *skreq;
  3829. skreq = &skdev->skreq_table[i];
  3830. skd_free_sg_list(skdev, skreq->sksg_list,
  3831. skdev->sgs_per_request,
  3832. skreq->sksg_dma_address);
  3833. skreq->sksg_list = NULL;
  3834. skreq->sksg_dma_address = 0;
  3835. kfree(skreq->sg);
  3836. }
  3837. kfree(skdev->skreq_table);
  3838. skdev->skreq_table = NULL;
  3839. }
  3840. static void skd_free_skspcl(struct skd_device *skdev)
  3841. {
  3842. u32 i;
  3843. u32 nbytes;
  3844. if (skdev->skspcl_table == NULL)
  3845. return;
  3846. for (i = 0; i < skdev->n_special; i++) {
  3847. struct skd_special_context *skspcl;
  3848. skspcl = &skdev->skspcl_table[i];
  3849. if (skspcl->msg_buf != NULL) {
  3850. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3851. pci_free_consistent(skdev->pdev, nbytes,
  3852. skspcl->msg_buf,
  3853. skspcl->mb_dma_address);
  3854. }
  3855. skspcl->msg_buf = NULL;
  3856. skspcl->mb_dma_address = 0;
  3857. skd_free_sg_list(skdev, skspcl->req.sksg_list,
  3858. SKD_N_SG_PER_SPECIAL,
  3859. skspcl->req.sksg_dma_address);
  3860. skspcl->req.sksg_list = NULL;
  3861. skspcl->req.sksg_dma_address = 0;
  3862. kfree(skspcl->req.sg);
  3863. }
  3864. kfree(skdev->skspcl_table);
  3865. skdev->skspcl_table = NULL;
  3866. }
  3867. static void skd_free_sksb(struct skd_device *skdev)
  3868. {
  3869. struct skd_special_context *skspcl;
  3870. u32 nbytes;
  3871. skspcl = &skdev->internal_skspcl;
  3872. if (skspcl->data_buf != NULL) {
  3873. nbytes = SKD_N_INTERNAL_BYTES;
  3874. pci_free_consistent(skdev->pdev, nbytes,
  3875. skspcl->data_buf, skspcl->db_dma_address);
  3876. }
  3877. skspcl->data_buf = NULL;
  3878. skspcl->db_dma_address = 0;
  3879. if (skspcl->msg_buf != NULL) {
  3880. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3881. pci_free_consistent(skdev->pdev, nbytes,
  3882. skspcl->msg_buf, skspcl->mb_dma_address);
  3883. }
  3884. skspcl->msg_buf = NULL;
  3885. skspcl->mb_dma_address = 0;
  3886. skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
  3887. skspcl->req.sksg_dma_address);
  3888. skspcl->req.sksg_list = NULL;
  3889. skspcl->req.sksg_dma_address = 0;
  3890. }
  3891. static void skd_free_disk(struct skd_device *skdev)
  3892. {
  3893. struct gendisk *disk = skdev->disk;
  3894. if (disk != NULL) {
  3895. struct request_queue *q = disk->queue;
  3896. if (disk->flags & GENHD_FL_UP)
  3897. del_gendisk(disk);
  3898. if (q)
  3899. blk_cleanup_queue(q);
  3900. put_disk(disk);
  3901. }
  3902. skdev->disk = NULL;
  3903. }
  3904. static void skd_destruct(struct skd_device *skdev)
  3905. {
  3906. if (skdev == NULL)
  3907. return;
  3908. pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
  3909. skd_free_disk(skdev);
  3910. pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
  3911. skd_free_sksb(skdev);
  3912. pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
  3913. skd_free_skspcl(skdev);
  3914. pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
  3915. skd_free_skreq(skdev);
  3916. pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
  3917. skd_free_skmsg(skdev);
  3918. pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
  3919. skd_free_skcomp(skdev);
  3920. pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
  3921. kfree(skdev);
  3922. }
  3923. /*
  3924. *****************************************************************************
  3925. * BLOCK DEVICE (BDEV) GLUE
  3926. *****************************************************************************
  3927. */
  3928. static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  3929. {
  3930. struct skd_device *skdev;
  3931. u64 capacity;
  3932. skdev = bdev->bd_disk->private_data;
  3933. pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
  3934. skdev->name, __func__, __LINE__,
  3935. bdev->bd_disk->disk_name, current->comm);
  3936. if (skdev->read_cap_is_valid) {
  3937. capacity = get_capacity(skdev->disk);
  3938. geo->heads = 64;
  3939. geo->sectors = 255;
  3940. geo->cylinders = (capacity) / (255 * 64);
  3941. return 0;
  3942. }
  3943. return -EIO;
  3944. }
  3945. static int skd_bdev_attach(struct skd_device *skdev)
  3946. {
  3947. pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
  3948. add_disk(skdev->disk);
  3949. return 0;
  3950. }
  3951. static const struct block_device_operations skd_blockdev_ops = {
  3952. .owner = THIS_MODULE,
  3953. .ioctl = skd_bdev_ioctl,
  3954. .getgeo = skd_bdev_getgeo,
  3955. };
  3956. /*
  3957. *****************************************************************************
  3958. * PCIe DRIVER GLUE
  3959. *****************************************************************************
  3960. */
  3961. static const struct pci_device_id skd_pci_tbl[] = {
  3962. { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
  3963. PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
  3964. { 0 } /* terminate list */
  3965. };
  3966. MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
  3967. static char *skd_pci_info(struct skd_device *skdev, char *str)
  3968. {
  3969. int pcie_reg;
  3970. strcpy(str, "PCIe (");
  3971. pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
  3972. if (pcie_reg) {
  3973. char lwstr[6];
  3974. uint16_t pcie_lstat, lspeed, lwidth;
  3975. pcie_reg += 0x12;
  3976. pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
  3977. lspeed = pcie_lstat & (0xF);
  3978. lwidth = (pcie_lstat & 0x3F0) >> 4;
  3979. if (lspeed == 1)
  3980. strcat(str, "2.5GT/s ");
  3981. else if (lspeed == 2)
  3982. strcat(str, "5.0GT/s ");
  3983. else
  3984. strcat(str, "<unknown> ");
  3985. snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
  3986. strcat(str, lwstr);
  3987. }
  3988. return str;
  3989. }
  3990. static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  3991. {
  3992. int i;
  3993. int rc = 0;
  3994. char pci_str[32];
  3995. struct skd_device *skdev;
  3996. pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
  3997. DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
  3998. pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
  3999. pci_name(pdev), pdev->vendor, pdev->device);
  4000. rc = pci_enable_device(pdev);
  4001. if (rc)
  4002. return rc;
  4003. rc = pci_request_regions(pdev, DRV_NAME);
  4004. if (rc)
  4005. goto err_out;
  4006. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  4007. if (!rc) {
  4008. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4009. pr_err("(%s): consistent DMA mask error %d\n",
  4010. pci_name(pdev), rc);
  4011. }
  4012. } else {
  4013. (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
  4014. if (rc) {
  4015. pr_err("(%s): DMA mask error %d\n",
  4016. pci_name(pdev), rc);
  4017. goto err_out_regions;
  4018. }
  4019. }
  4020. if (!skd_major) {
  4021. rc = register_blkdev(0, DRV_NAME);
  4022. if (rc < 0)
  4023. goto err_out_regions;
  4024. BUG_ON(!rc);
  4025. skd_major = rc;
  4026. }
  4027. skdev = skd_construct(pdev);
  4028. if (skdev == NULL) {
  4029. rc = -ENOMEM;
  4030. goto err_out_regions;
  4031. }
  4032. skd_pci_info(skdev, pci_str);
  4033. pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
  4034. pci_set_master(pdev);
  4035. rc = pci_enable_pcie_error_reporting(pdev);
  4036. if (rc) {
  4037. pr_err(
  4038. "(%s): bad enable of PCIe error reporting rc=%d\n",
  4039. skd_name(skdev), rc);
  4040. skdev->pcie_error_reporting_is_enabled = 0;
  4041. } else
  4042. skdev->pcie_error_reporting_is_enabled = 1;
  4043. pci_set_drvdata(pdev, skdev);
  4044. skdev->disk->driverfs_dev = &pdev->dev;
  4045. for (i = 0; i < SKD_MAX_BARS; i++) {
  4046. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  4047. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  4048. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  4049. skdev->mem_size[i]);
  4050. if (!skdev->mem_map[i]) {
  4051. pr_err("(%s): Unable to map adapter memory!\n",
  4052. skd_name(skdev));
  4053. rc = -ENODEV;
  4054. goto err_out_iounmap;
  4055. }
  4056. pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
  4057. skdev->name, __func__, __LINE__,
  4058. skdev->mem_map[i],
  4059. (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
  4060. }
  4061. rc = skd_acquire_irq(skdev);
  4062. if (rc) {
  4063. pr_err("(%s): interrupt resource error %d\n",
  4064. skd_name(skdev), rc);
  4065. goto err_out_iounmap;
  4066. }
  4067. rc = skd_start_timer(skdev);
  4068. if (rc)
  4069. goto err_out_timer;
  4070. init_waitqueue_head(&skdev->waitq);
  4071. skd_start_device(skdev);
  4072. rc = wait_event_interruptible_timeout(skdev->waitq,
  4073. (skdev->gendisk_on),
  4074. (SKD_START_WAIT_SECONDS * HZ));
  4075. if (skdev->gendisk_on > 0) {
  4076. /* device came on-line after reset */
  4077. skd_bdev_attach(skdev);
  4078. rc = 0;
  4079. } else {
  4080. /* we timed out, something is wrong with the device,
  4081. don't add the disk structure */
  4082. pr_err(
  4083. "(%s): error: waiting for s1120 timed out %d!\n",
  4084. skd_name(skdev), rc);
  4085. /* in case of no error; we timeout with ENXIO */
  4086. if (!rc)
  4087. rc = -ENXIO;
  4088. goto err_out_timer;
  4089. }
  4090. #ifdef SKD_VMK_POLL_HANDLER
  4091. if (skdev->irq_type == SKD_IRQ_MSIX) {
  4092. /* MSIX completion handler is being used for coredump */
  4093. vmklnx_scsi_register_poll_handler(skdev->scsi_host,
  4094. skdev->msix_entries[5].vector,
  4095. skd_comp_q, skdev);
  4096. } else {
  4097. vmklnx_scsi_register_poll_handler(skdev->scsi_host,
  4098. skdev->pdev->irq, skd_isr,
  4099. skdev);
  4100. }
  4101. #endif /* SKD_VMK_POLL_HANDLER */
  4102. return rc;
  4103. err_out_timer:
  4104. skd_stop_device(skdev);
  4105. skd_release_irq(skdev);
  4106. err_out_iounmap:
  4107. for (i = 0; i < SKD_MAX_BARS; i++)
  4108. if (skdev->mem_map[i])
  4109. iounmap(skdev->mem_map[i]);
  4110. if (skdev->pcie_error_reporting_is_enabled)
  4111. pci_disable_pcie_error_reporting(pdev);
  4112. skd_destruct(skdev);
  4113. err_out_regions:
  4114. pci_release_regions(pdev);
  4115. err_out:
  4116. pci_disable_device(pdev);
  4117. pci_set_drvdata(pdev, NULL);
  4118. return rc;
  4119. }
  4120. static void skd_pci_remove(struct pci_dev *pdev)
  4121. {
  4122. int i;
  4123. struct skd_device *skdev;
  4124. skdev = pci_get_drvdata(pdev);
  4125. if (!skdev) {
  4126. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4127. return;
  4128. }
  4129. skd_stop_device(skdev);
  4130. skd_release_irq(skdev);
  4131. for (i = 0; i < SKD_MAX_BARS; i++)
  4132. if (skdev->mem_map[i])
  4133. iounmap((u32 *)skdev->mem_map[i]);
  4134. if (skdev->pcie_error_reporting_is_enabled)
  4135. pci_disable_pcie_error_reporting(pdev);
  4136. skd_destruct(skdev);
  4137. pci_release_regions(pdev);
  4138. pci_disable_device(pdev);
  4139. pci_set_drvdata(pdev, NULL);
  4140. return;
  4141. }
  4142. static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  4143. {
  4144. int i;
  4145. struct skd_device *skdev;
  4146. skdev = pci_get_drvdata(pdev);
  4147. if (!skdev) {
  4148. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4149. return -EIO;
  4150. }
  4151. skd_stop_device(skdev);
  4152. skd_release_irq(skdev);
  4153. for (i = 0; i < SKD_MAX_BARS; i++)
  4154. if (skdev->mem_map[i])
  4155. iounmap((u32 *)skdev->mem_map[i]);
  4156. if (skdev->pcie_error_reporting_is_enabled)
  4157. pci_disable_pcie_error_reporting(pdev);
  4158. pci_release_regions(pdev);
  4159. pci_save_state(pdev);
  4160. pci_disable_device(pdev);
  4161. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  4162. return 0;
  4163. }
  4164. static int skd_pci_resume(struct pci_dev *pdev)
  4165. {
  4166. int i;
  4167. int rc = 0;
  4168. struct skd_device *skdev;
  4169. skdev = pci_get_drvdata(pdev);
  4170. if (!skdev) {
  4171. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4172. return -1;
  4173. }
  4174. pci_set_power_state(pdev, PCI_D0);
  4175. pci_enable_wake(pdev, PCI_D0, 0);
  4176. pci_restore_state(pdev);
  4177. rc = pci_enable_device(pdev);
  4178. if (rc)
  4179. return rc;
  4180. rc = pci_request_regions(pdev, DRV_NAME);
  4181. if (rc)
  4182. goto err_out;
  4183. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  4184. if (!rc) {
  4185. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4186. pr_err("(%s): consistent DMA mask error %d\n",
  4187. pci_name(pdev), rc);
  4188. }
  4189. } else {
  4190. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  4191. if (rc) {
  4192. pr_err("(%s): DMA mask error %d\n",
  4193. pci_name(pdev), rc);
  4194. goto err_out_regions;
  4195. }
  4196. }
  4197. pci_set_master(pdev);
  4198. rc = pci_enable_pcie_error_reporting(pdev);
  4199. if (rc) {
  4200. pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
  4201. skdev->name, rc);
  4202. skdev->pcie_error_reporting_is_enabled = 0;
  4203. } else
  4204. skdev->pcie_error_reporting_is_enabled = 1;
  4205. for (i = 0; i < SKD_MAX_BARS; i++) {
  4206. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  4207. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  4208. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  4209. skdev->mem_size[i]);
  4210. if (!skdev->mem_map[i]) {
  4211. pr_err("(%s): Unable to map adapter memory!\n",
  4212. skd_name(skdev));
  4213. rc = -ENODEV;
  4214. goto err_out_iounmap;
  4215. }
  4216. pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
  4217. skdev->name, __func__, __LINE__,
  4218. skdev->mem_map[i],
  4219. (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
  4220. }
  4221. rc = skd_acquire_irq(skdev);
  4222. if (rc) {
  4223. pr_err("(%s): interrupt resource error %d\n",
  4224. pci_name(pdev), rc);
  4225. goto err_out_iounmap;
  4226. }
  4227. rc = skd_start_timer(skdev);
  4228. if (rc)
  4229. goto err_out_timer;
  4230. init_waitqueue_head(&skdev->waitq);
  4231. skd_start_device(skdev);
  4232. return rc;
  4233. err_out_timer:
  4234. skd_stop_device(skdev);
  4235. skd_release_irq(skdev);
  4236. err_out_iounmap:
  4237. for (i = 0; i < SKD_MAX_BARS; i++)
  4238. if (skdev->mem_map[i])
  4239. iounmap(skdev->mem_map[i]);
  4240. if (skdev->pcie_error_reporting_is_enabled)
  4241. pci_disable_pcie_error_reporting(pdev);
  4242. err_out_regions:
  4243. pci_release_regions(pdev);
  4244. err_out:
  4245. pci_disable_device(pdev);
  4246. return rc;
  4247. }
  4248. static void skd_pci_shutdown(struct pci_dev *pdev)
  4249. {
  4250. struct skd_device *skdev;
  4251. pr_err("skd_pci_shutdown called\n");
  4252. skdev = pci_get_drvdata(pdev);
  4253. if (!skdev) {
  4254. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4255. return;
  4256. }
  4257. pr_err("%s: calling stop\n", skd_name(skdev));
  4258. skd_stop_device(skdev);
  4259. }
  4260. static struct pci_driver skd_driver = {
  4261. .name = DRV_NAME,
  4262. .id_table = skd_pci_tbl,
  4263. .probe = skd_pci_probe,
  4264. .remove = skd_pci_remove,
  4265. .suspend = skd_pci_suspend,
  4266. .resume = skd_pci_resume,
  4267. .shutdown = skd_pci_shutdown,
  4268. };
  4269. /*
  4270. *****************************************************************************
  4271. * LOGGING SUPPORT
  4272. *****************************************************************************
  4273. */
  4274. static const char *skd_name(struct skd_device *skdev)
  4275. {
  4276. memset(skdev->id_str, 0, sizeof(skdev->id_str));
  4277. if (skdev->inquiry_is_valid)
  4278. snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
  4279. skdev->name, skdev->inq_serial_num,
  4280. pci_name(skdev->pdev));
  4281. else
  4282. snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
  4283. skdev->name, pci_name(skdev->pdev));
  4284. return skdev->id_str;
  4285. }
  4286. const char *skd_drive_state_to_str(int state)
  4287. {
  4288. switch (state) {
  4289. case FIT_SR_DRIVE_OFFLINE:
  4290. return "OFFLINE";
  4291. case FIT_SR_DRIVE_INIT:
  4292. return "INIT";
  4293. case FIT_SR_DRIVE_ONLINE:
  4294. return "ONLINE";
  4295. case FIT_SR_DRIVE_BUSY:
  4296. return "BUSY";
  4297. case FIT_SR_DRIVE_FAULT:
  4298. return "FAULT";
  4299. case FIT_SR_DRIVE_DEGRADED:
  4300. return "DEGRADED";
  4301. case FIT_SR_PCIE_LINK_DOWN:
  4302. return "INK_DOWN";
  4303. case FIT_SR_DRIVE_SOFT_RESET:
  4304. return "SOFT_RESET";
  4305. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  4306. return "NEED_FW";
  4307. case FIT_SR_DRIVE_INIT_FAULT:
  4308. return "INIT_FAULT";
  4309. case FIT_SR_DRIVE_BUSY_SANITIZE:
  4310. return "BUSY_SANITIZE";
  4311. case FIT_SR_DRIVE_BUSY_ERASE:
  4312. return "BUSY_ERASE";
  4313. case FIT_SR_DRIVE_FW_BOOTING:
  4314. return "FW_BOOTING";
  4315. default:
  4316. return "???";
  4317. }
  4318. }
  4319. const char *skd_skdev_state_to_str(enum skd_drvr_state state)
  4320. {
  4321. switch (state) {
  4322. case SKD_DRVR_STATE_LOAD:
  4323. return "LOAD";
  4324. case SKD_DRVR_STATE_IDLE:
  4325. return "IDLE";
  4326. case SKD_DRVR_STATE_BUSY:
  4327. return "BUSY";
  4328. case SKD_DRVR_STATE_STARTING:
  4329. return "STARTING";
  4330. case SKD_DRVR_STATE_ONLINE:
  4331. return "ONLINE";
  4332. case SKD_DRVR_STATE_PAUSING:
  4333. return "PAUSING";
  4334. case SKD_DRVR_STATE_PAUSED:
  4335. return "PAUSED";
  4336. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  4337. return "DRAINING_TIMEOUT";
  4338. case SKD_DRVR_STATE_RESTARTING:
  4339. return "RESTARTING";
  4340. case SKD_DRVR_STATE_RESUMING:
  4341. return "RESUMING";
  4342. case SKD_DRVR_STATE_STOPPING:
  4343. return "STOPPING";
  4344. case SKD_DRVR_STATE_SYNCING:
  4345. return "SYNCING";
  4346. case SKD_DRVR_STATE_FAULT:
  4347. return "FAULT";
  4348. case SKD_DRVR_STATE_DISAPPEARED:
  4349. return "DISAPPEARED";
  4350. case SKD_DRVR_STATE_BUSY_ERASE:
  4351. return "BUSY_ERASE";
  4352. case SKD_DRVR_STATE_BUSY_SANITIZE:
  4353. return "BUSY_SANITIZE";
  4354. case SKD_DRVR_STATE_BUSY_IMMINENT:
  4355. return "BUSY_IMMINENT";
  4356. case SKD_DRVR_STATE_WAIT_BOOT:
  4357. return "WAIT_BOOT";
  4358. default:
  4359. return "???";
  4360. }
  4361. }
  4362. static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
  4363. {
  4364. switch (state) {
  4365. case SKD_MSG_STATE_IDLE:
  4366. return "IDLE";
  4367. case SKD_MSG_STATE_BUSY:
  4368. return "BUSY";
  4369. default:
  4370. return "???";
  4371. }
  4372. }
  4373. static const char *skd_skreq_state_to_str(enum skd_req_state state)
  4374. {
  4375. switch (state) {
  4376. case SKD_REQ_STATE_IDLE:
  4377. return "IDLE";
  4378. case SKD_REQ_STATE_SETUP:
  4379. return "SETUP";
  4380. case SKD_REQ_STATE_BUSY:
  4381. return "BUSY";
  4382. case SKD_REQ_STATE_COMPLETED:
  4383. return "COMPLETED";
  4384. case SKD_REQ_STATE_TIMEOUT:
  4385. return "TIMEOUT";
  4386. case SKD_REQ_STATE_ABORTED:
  4387. return "ABORTED";
  4388. default:
  4389. return "???";
  4390. }
  4391. }
  4392. static void skd_log_skdev(struct skd_device *skdev, const char *event)
  4393. {
  4394. pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
  4395. skdev->name, __func__, __LINE__, skdev->name, skdev, event);
  4396. pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
  4397. skdev->name, __func__, __LINE__,
  4398. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  4399. skd_skdev_state_to_str(skdev->state), skdev->state);
  4400. pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
  4401. skdev->name, __func__, __LINE__,
  4402. skdev->in_flight, skdev->cur_max_queue_depth,
  4403. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  4404. pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
  4405. skdev->name, __func__, __LINE__,
  4406. skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
  4407. }
  4408. static void skd_log_skmsg(struct skd_device *skdev,
  4409. struct skd_fitmsg_context *skmsg, const char *event)
  4410. {
  4411. pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
  4412. skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
  4413. pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
  4414. skdev->name, __func__, __LINE__,
  4415. skd_skmsg_state_to_str(skmsg->state), skmsg->state,
  4416. skmsg->id, skmsg->length);
  4417. }
  4418. static void skd_log_skreq(struct skd_device *skdev,
  4419. struct skd_request_context *skreq, const char *event)
  4420. {
  4421. pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
  4422. skdev->name, __func__, __LINE__, skdev->name, skreq, event);
  4423. pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
  4424. skdev->name, __func__, __LINE__,
  4425. skd_skreq_state_to_str(skreq->state), skreq->state,
  4426. skreq->id, skreq->fitmsg_id);
  4427. pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
  4428. skdev->name, __func__, __LINE__,
  4429. skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
  4430. if (skreq->req != NULL) {
  4431. struct request *req = skreq->req;
  4432. u32 lba = (u32)blk_rq_pos(req);
  4433. u32 count = blk_rq_sectors(req);
  4434. pr_debug("%s:%s:%d "
  4435. "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
  4436. skdev->name, __func__, __LINE__,
  4437. req, lba, lba, count, count,
  4438. (int)rq_data_dir(req));
  4439. } else
  4440. pr_debug("%s:%s:%d req=NULL\n",
  4441. skdev->name, __func__, __LINE__);
  4442. }
  4443. /*
  4444. *****************************************************************************
  4445. * MODULE GLUE
  4446. *****************************************************************************
  4447. */
  4448. static int __init skd_init(void)
  4449. {
  4450. pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
  4451. switch (skd_isr_type) {
  4452. case SKD_IRQ_LEGACY:
  4453. case SKD_IRQ_MSI:
  4454. case SKD_IRQ_MSIX:
  4455. break;
  4456. default:
  4457. pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
  4458. skd_isr_type, SKD_IRQ_DEFAULT);
  4459. skd_isr_type = SKD_IRQ_DEFAULT;
  4460. }
  4461. if (skd_max_queue_depth < 1 ||
  4462. skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
  4463. pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
  4464. skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
  4465. skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  4466. }
  4467. if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
  4468. pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
  4469. skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
  4470. skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  4471. }
  4472. if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
  4473. pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
  4474. skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
  4475. skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  4476. }
  4477. if (skd_dbg_level < 0 || skd_dbg_level > 2) {
  4478. pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
  4479. skd_dbg_level, 0);
  4480. skd_dbg_level = 0;
  4481. }
  4482. if (skd_isr_comp_limit < 0) {
  4483. pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
  4484. skd_isr_comp_limit, 0);
  4485. skd_isr_comp_limit = 0;
  4486. }
  4487. if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
  4488. pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
  4489. skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
  4490. skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
  4491. }
  4492. return pci_register_driver(&skd_driver);
  4493. }
  4494. static void __exit skd_exit(void)
  4495. {
  4496. pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
  4497. pci_unregister_driver(&skd_driver);
  4498. if (skd_major)
  4499. unregister_blkdev(skd_major, DRV_NAME);
  4500. }
  4501. module_init(skd_init);
  4502. module_exit(skd_exit);