dc395x.c 141 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899
  1. /*
  2. * dc395x.c
  3. *
  4. * Device Driver for Tekram DC395(U/UW/F), DC315(U)
  5. * PCI SCSI Bus Master Host Adapter
  6. * (SCSI chip set used Tekram ASIC TRM-S1040)
  7. *
  8. * Authors:
  9. * C.L. Huang <ching@tekram.com.tw>
  10. * Erich Chen <erich@tekram.com.tw>
  11. * (C) Copyright 1995-1999 Tekram Technology Co., Ltd.
  12. *
  13. * Kurt Garloff <garloff@suse.de>
  14. * (C) 1999-2000 Kurt Garloff
  15. *
  16. * Oliver Neukum <oliver@neukum.name>
  17. * Ali Akcaagac <aliakc@web.de>
  18. * Jamie Lenehan <lenehan@twibble.org>
  19. * (C) 2003
  20. *
  21. * License: GNU GPL
  22. *
  23. *************************************************************************
  24. *
  25. * Redistribution and use in source and binary forms, with or without
  26. * modification, are permitted provided that the following conditions
  27. * are met:
  28. * 1. Redistributions of source code must retain the above copyright
  29. * notice, this list of conditions and the following disclaimer.
  30. * 2. Redistributions in binary form must reproduce the above copyright
  31. * notice, this list of conditions and the following disclaimer in the
  32. * documentation and/or other materials provided with the distribution.
  33. * 3. The name of the author may not be used to endorse or promote products
  34. * derived from this software without specific prior written permission.
  35. *
  36. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  37. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  38. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  39. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  40. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  41. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  45. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46. *
  47. ************************************************************************
  48. */
  49. #include <linux/module.h>
  50. #include <linux/moduleparam.h>
  51. #include <linux/delay.h>
  52. #include <linux/ctype.h>
  53. #include <linux/blkdev.h>
  54. #include <linux/interrupt.h>
  55. #include <linux/init.h>
  56. #include <linux/spinlock.h>
  57. #include <linux/pci.h>
  58. #include <linux/list.h>
  59. #include <linux/vmalloc.h>
  60. #include <linux/slab.h>
  61. #include <asm/io.h>
  62. #include <scsi/scsi.h>
  63. #include <scsi/scsicam.h> /* needed for scsicam_bios_param */
  64. #include <scsi/scsi_cmnd.h>
  65. #include <scsi/scsi_device.h>
  66. #include <scsi/scsi_host.h>
  67. #include "dc395x.h"
  68. #define DC395X_NAME "dc395x"
  69. #define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
  70. #define DC395X_VERSION "v2.05, 2004/03/08"
  71. /*---------------------------------------------------------------------------
  72. Features
  73. ---------------------------------------------------------------------------*/
  74. /*
  75. * Set to disable parts of the driver
  76. */
  77. /*#define DC395x_NO_DISCONNECT*/
  78. /*#define DC395x_NO_TAGQ*/
  79. /*#define DC395x_NO_SYNC*/
  80. /*#define DC395x_NO_WIDE*/
  81. /*---------------------------------------------------------------------------
  82. Debugging
  83. ---------------------------------------------------------------------------*/
  84. /*
  85. * Types of debugging that can be enabled and disabled
  86. */
  87. #define DBG_KG 0x0001
  88. #define DBG_0 0x0002
  89. #define DBG_1 0x0004
  90. #define DBG_SG 0x0020
  91. #define DBG_FIFO 0x0040
  92. #define DBG_PIO 0x0080
  93. /*
  94. * Set set of things to output debugging for.
  95. * Undefine to remove all debugging
  96. */
  97. /*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/
  98. /*#define DEBUG_MASK DBG_0*/
  99. /*
  100. * Output a kernel mesage at the specified level and append the
  101. * driver name and a ": " to the start of the message
  102. */
  103. #define dprintkl(level, format, arg...) \
  104. printk(level DC395X_NAME ": " format , ## arg)
  105. #ifdef DEBUG_MASK
  106. /*
  107. * print a debug message - this is formated with KERN_DEBUG, then the
  108. * driver name followed by a ": " and then the message is output.
  109. * This also checks that the specified debug level is enabled before
  110. * outputing the message
  111. */
  112. #define dprintkdbg(type, format, arg...) \
  113. do { \
  114. if ((type) & (DEBUG_MASK)) \
  115. dprintkl(KERN_DEBUG , format , ## arg); \
  116. } while (0)
  117. /*
  118. * Check if the specified type of debugging is enabled
  119. */
  120. #define debug_enabled(type) ((DEBUG_MASK) & (type))
  121. #else
  122. /*
  123. * No debugging. Do nothing
  124. */
  125. #define dprintkdbg(type, format, arg...) \
  126. do {} while (0)
  127. #define debug_enabled(type) (0)
  128. #endif
  129. #ifndef PCI_VENDOR_ID_TEKRAM
  130. #define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */
  131. #endif
  132. #ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
  133. #define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391 /* Device ID */
  134. #endif
  135. #define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
  136. #define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
  137. #define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
  138. #define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
  139. #define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
  140. #define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
  141. #define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
  142. #define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
  143. /* cmd->result */
  144. #define RES_TARGET 0x000000FF /* Target State */
  145. #define RES_TARGET_LNX STATUS_MASK /* Only official ... */
  146. #define RES_ENDMSG 0x0000FF00 /* End Message */
  147. #define RES_DID 0x00FF0000 /* DID_ codes */
  148. #define RES_DRV 0xFF000000 /* DRIVER_ codes */
  149. #define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
  150. #define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
  151. #define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
  152. #define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
  153. #define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
  154. #define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
  155. #define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
  156. #define TAG_NONE 255
  157. /*
  158. * srb->segement_x is the hw sg list. It is always allocated as a
  159. * DC395x_MAX_SG_LISTENTRY entries in a linear block which does not
  160. * cross a page boundy.
  161. */
  162. #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
  163. struct SGentry {
  164. u32 address; /* bus! address */
  165. u32 length;
  166. };
  167. /* The SEEPROM structure for TRM_S1040 */
  168. struct NVRamTarget {
  169. u8 cfg0; /* Target configuration byte 0 */
  170. u8 period; /* Target period */
  171. u8 cfg2; /* Target configuration byte 2 */
  172. u8 cfg3; /* Target configuration byte 3 */
  173. };
  174. struct NvRamType {
  175. u8 sub_vendor_id[2]; /* 0,1 Sub Vendor ID */
  176. u8 sub_sys_id[2]; /* 2,3 Sub System ID */
  177. u8 sub_class; /* 4 Sub Class */
  178. u8 vendor_id[2]; /* 5,6 Vendor ID */
  179. u8 device_id[2]; /* 7,8 Device ID */
  180. u8 reserved; /* 9 Reserved */
  181. struct NVRamTarget target[DC395x_MAX_SCSI_ID];
  182. /** 10,11,12,13
  183. ** 14,15,16,17
  184. ** ....
  185. ** ....
  186. ** 70,71,72,73
  187. */
  188. u8 scsi_id; /* 74 Host Adapter SCSI ID */
  189. u8 channel_cfg; /* 75 Channel configuration */
  190. u8 delay_time; /* 76 Power on delay time */
  191. u8 max_tag; /* 77 Maximum tags */
  192. u8 reserved0; /* 78 */
  193. u8 boot_target; /* 79 */
  194. u8 boot_lun; /* 80 */
  195. u8 reserved1; /* 81 */
  196. u16 reserved2[22]; /* 82,..125 */
  197. u16 cksum; /* 126,127 */
  198. };
  199. struct ScsiReqBlk {
  200. struct list_head list; /* next/prev ptrs for srb lists */
  201. struct DeviceCtlBlk *dcb;
  202. struct scsi_cmnd *cmd;
  203. struct SGentry *segment_x; /* Linear array of hw sg entries (up to 64 entries) */
  204. dma_addr_t sg_bus_addr; /* Bus address of sg list (ie, of segment_x) */
  205. u8 sg_count; /* No of HW sg entries for this request */
  206. u8 sg_index; /* Index of HW sg entry for this request */
  207. size_t total_xfer_length; /* Total number of bytes remaining to be transferred */
  208. size_t request_length; /* Total number of bytes in this request */
  209. /*
  210. * The sense buffer handling function, request_sense, uses
  211. * the first hw sg entry (segment_x[0]) and the transfer
  212. * length (total_xfer_length). While doing this it stores the
  213. * original values into the last sg hw list
  214. * (srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1] and the
  215. * total_xfer_length in xferred. These values are restored in
  216. * pci_unmap_srb_sense. This is the only place xferred is used.
  217. */
  218. size_t xferred; /* Saved copy of total_xfer_length */
  219. u16 state;
  220. u8 msgin_buf[6];
  221. u8 msgout_buf[6];
  222. u8 adapter_status;
  223. u8 target_status;
  224. u8 msg_count;
  225. u8 end_message;
  226. u8 tag_number;
  227. u8 status;
  228. u8 retry_count;
  229. u8 flag;
  230. u8 scsi_phase;
  231. };
  232. struct DeviceCtlBlk {
  233. struct list_head list; /* next/prev ptrs for the dcb list */
  234. struct AdapterCtlBlk *acb;
  235. struct list_head srb_going_list; /* head of going srb list */
  236. struct list_head srb_waiting_list; /* head of waiting srb list */
  237. struct ScsiReqBlk *active_srb;
  238. u32 tag_mask;
  239. u16 max_command;
  240. u8 target_id; /* SCSI Target ID (SCSI Only) */
  241. u8 target_lun; /* SCSI Log. Unit (SCSI Only) */
  242. u8 identify_msg;
  243. u8 dev_mode;
  244. u8 inquiry7; /* To store Inquiry flags */
  245. u8 sync_mode; /* 0:async mode */
  246. u8 min_nego_period; /* for nego. */
  247. u8 sync_period; /* for reg. */
  248. u8 sync_offset; /* for reg. and nego.(low nibble) */
  249. u8 flag;
  250. u8 dev_type;
  251. u8 init_tcq_flag;
  252. };
  253. struct AdapterCtlBlk {
  254. struct Scsi_Host *scsi_host;
  255. unsigned long io_port_base;
  256. unsigned long io_port_len;
  257. struct list_head dcb_list; /* head of going dcb list */
  258. struct DeviceCtlBlk *dcb_run_robin;
  259. struct DeviceCtlBlk *active_dcb;
  260. struct list_head srb_free_list; /* head of free srb list */
  261. struct ScsiReqBlk *tmp_srb;
  262. struct timer_list waiting_timer;
  263. struct timer_list selto_timer;
  264. unsigned long last_reset;
  265. u16 srb_count;
  266. u8 sel_timeout;
  267. unsigned int irq_level;
  268. u8 tag_max_num;
  269. u8 acb_flag;
  270. u8 gmode2;
  271. u8 config;
  272. u8 lun_chk;
  273. u8 scan_devices;
  274. u8 hostid_bit;
  275. u8 dcb_map[DC395x_MAX_SCSI_ID];
  276. struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
  277. struct pci_dev *dev;
  278. u8 msg_len;
  279. struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
  280. struct ScsiReqBlk srb;
  281. struct NvRamType eeprom; /* eeprom settings for this adapter */
  282. };
  283. /*---------------------------------------------------------------------------
  284. Forward declarations
  285. ---------------------------------------------------------------------------*/
  286. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  287. u16 *pscsi_status);
  288. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  289. u16 *pscsi_status);
  290. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  291. u16 *pscsi_status);
  292. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  293. u16 *pscsi_status);
  294. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  295. u16 *pscsi_status);
  296. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  297. u16 *pscsi_status);
  298. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  299. u16 *pscsi_status);
  300. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  301. u16 *pscsi_status);
  302. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  303. u16 *pscsi_status);
  304. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  305. u16 *pscsi_status);
  306. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  307. u16 *pscsi_status);
  308. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  309. u16 *pscsi_status);
  310. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  311. u16 *pscsi_status);
  312. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  313. u16 *pscsi_status);
  314. static void set_basic_config(struct AdapterCtlBlk *acb);
  315. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  316. struct ScsiReqBlk *srb);
  317. static void reset_scsi_bus(struct AdapterCtlBlk *acb);
  318. static void data_io_transfer(struct AdapterCtlBlk *acb,
  319. struct ScsiReqBlk *srb, u16 io_dir);
  320. static void disconnect(struct AdapterCtlBlk *acb);
  321. static void reselect(struct AdapterCtlBlk *acb);
  322. static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  323. struct ScsiReqBlk *srb);
  324. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  325. struct ScsiReqBlk *srb);
  326. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  327. struct ScsiReqBlk *srb);
  328. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
  329. struct scsi_cmnd *cmd, u8 force);
  330. static void scsi_reset_detect(struct AdapterCtlBlk *acb);
  331. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
  332. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  333. struct ScsiReqBlk *srb);
  334. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  335. struct ScsiReqBlk *srb);
  336. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  337. struct ScsiReqBlk *srb);
  338. static void set_xfer_rate(struct AdapterCtlBlk *acb,
  339. struct DeviceCtlBlk *dcb);
  340. static void waiting_timeout(struct timer_list *t);
  341. /*---------------------------------------------------------------------------
  342. Static Data
  343. ---------------------------------------------------------------------------*/
  344. static u16 current_sync_offset = 0;
  345. static void *dc395x_scsi_phase0[] = {
  346. data_out_phase0,/* phase:0 */
  347. data_in_phase0, /* phase:1 */
  348. command_phase0, /* phase:2 */
  349. status_phase0, /* phase:3 */
  350. nop0, /* phase:4 PH_BUS_FREE .. initial phase */
  351. nop0, /* phase:5 PH_BUS_FREE .. initial phase */
  352. msgout_phase0, /* phase:6 */
  353. msgin_phase0, /* phase:7 */
  354. };
  355. static void *dc395x_scsi_phase1[] = {
  356. data_out_phase1,/* phase:0 */
  357. data_in_phase1, /* phase:1 */
  358. command_phase1, /* phase:2 */
  359. status_phase1, /* phase:3 */
  360. nop1, /* phase:4 PH_BUS_FREE .. initial phase */
  361. nop1, /* phase:5 PH_BUS_FREE .. initial phase */
  362. msgout_phase1, /* phase:6 */
  363. msgin_phase1, /* phase:7 */
  364. };
  365. /*
  366. *Fast20: 000 50ns, 20.0 MHz
  367. * 001 75ns, 13.3 MHz
  368. * 010 100ns, 10.0 MHz
  369. * 011 125ns, 8.0 MHz
  370. * 100 150ns, 6.6 MHz
  371. * 101 175ns, 5.7 MHz
  372. * 110 200ns, 5.0 MHz
  373. * 111 250ns, 4.0 MHz
  374. *
  375. *Fast40(LVDS): 000 25ns, 40.0 MHz
  376. * 001 50ns, 20.0 MHz
  377. * 010 75ns, 13.3 MHz
  378. * 011 100ns, 10.0 MHz
  379. * 100 125ns, 8.0 MHz
  380. * 101 150ns, 6.6 MHz
  381. * 110 175ns, 5.7 MHz
  382. * 111 200ns, 5.0 MHz
  383. */
  384. /*static u8 clock_period[] = {12,19,25,31,37,44,50,62};*/
  385. /* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */
  386. static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
  387. static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
  388. /*---------------------------------------------------------------------------
  389. Configuration
  390. ---------------------------------------------------------------------------*/
  391. /*
  392. * Module/boot parameters currently effect *all* instances of the
  393. * card in the system.
  394. */
  395. /*
  396. * Command line parameters are stored in a structure below.
  397. * These are the index's into the structure for the various
  398. * command line options.
  399. */
  400. #define CFG_ADAPTER_ID 0
  401. #define CFG_MAX_SPEED 1
  402. #define CFG_DEV_MODE 2
  403. #define CFG_ADAPTER_MODE 3
  404. #define CFG_TAGS 4
  405. #define CFG_RESET_DELAY 5
  406. #define CFG_NUM 6 /* number of configuration items */
  407. /*
  408. * Value used to indicate that a command line override
  409. * hasn't been used to modify the value.
  410. */
  411. #define CFG_PARAM_UNSET -1
  412. /*
  413. * Hold command line parameters.
  414. */
  415. struct ParameterData {
  416. int value; /* value of this setting */
  417. int min; /* minimum value */
  418. int max; /* maximum value */
  419. int def; /* default value */
  420. int safe; /* safe value */
  421. };
  422. static struct ParameterData cfg_data[] = {
  423. { /* adapter id */
  424. CFG_PARAM_UNSET,
  425. 0,
  426. 15,
  427. 7,
  428. 7
  429. },
  430. { /* max speed */
  431. CFG_PARAM_UNSET,
  432. 0,
  433. 7,
  434. 1, /* 13.3Mhz */
  435. 4, /* 6.7Hmz */
  436. },
  437. { /* dev mode */
  438. CFG_PARAM_UNSET,
  439. 0,
  440. 0x3f,
  441. NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
  442. NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
  443. NTC_DO_SEND_START,
  444. NTC_DO_PARITY_CHK | NTC_DO_SEND_START
  445. },
  446. { /* adapter mode */
  447. CFG_PARAM_UNSET,
  448. 0,
  449. 0x2f,
  450. NAC_SCANLUN |
  451. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
  452. /*| NAC_ACTIVE_NEG*/,
  453. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
  454. },
  455. { /* tags */
  456. CFG_PARAM_UNSET,
  457. 0,
  458. 5,
  459. 3, /* 16 tags (??) */
  460. 2,
  461. },
  462. { /* reset delay */
  463. CFG_PARAM_UNSET,
  464. 0,
  465. 180,
  466. 1, /* 1 second */
  467. 10, /* 10 seconds */
  468. }
  469. };
  470. /*
  471. * Safe settings. If set to zero the BIOS/default values with
  472. * command line overrides will be used. If set to 1 then safe and
  473. * slow settings will be used.
  474. */
  475. static bool use_safe_settings = 0;
  476. module_param_named(safe, use_safe_settings, bool, 0);
  477. MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
  478. module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
  479. MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
  480. module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
  481. MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
  482. module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
  483. MODULE_PARM_DESC(dev_mode, "Device mode.");
  484. module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
  485. MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
  486. module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
  487. MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
  488. module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
  489. MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
  490. /**
  491. * set_safe_settings - if the use_safe_settings option is set then
  492. * set all values to the safe and slow values.
  493. **/
  494. static void set_safe_settings(void)
  495. {
  496. if (use_safe_settings)
  497. {
  498. int i;
  499. dprintkl(KERN_INFO, "Using safe settings.\n");
  500. for (i = 0; i < CFG_NUM; i++)
  501. {
  502. cfg_data[i].value = cfg_data[i].safe;
  503. }
  504. }
  505. }
  506. /**
  507. * fix_settings - reset any boot parameters which are out of range
  508. * back to the default values.
  509. **/
  510. static void fix_settings(void)
  511. {
  512. int i;
  513. dprintkdbg(DBG_1,
  514. "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
  515. "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
  516. cfg_data[CFG_ADAPTER_ID].value,
  517. cfg_data[CFG_MAX_SPEED].value,
  518. cfg_data[CFG_DEV_MODE].value,
  519. cfg_data[CFG_ADAPTER_MODE].value,
  520. cfg_data[CFG_TAGS].value,
  521. cfg_data[CFG_RESET_DELAY].value);
  522. for (i = 0; i < CFG_NUM; i++)
  523. {
  524. if (cfg_data[i].value < cfg_data[i].min
  525. || cfg_data[i].value > cfg_data[i].max)
  526. cfg_data[i].value = cfg_data[i].def;
  527. }
  528. }
  529. /*
  530. * Mapping from the eeprom delay index value (index into this array)
  531. * to the number of actual seconds that the delay should be for.
  532. */
  533. static char eeprom_index_to_delay_map[] =
  534. { 1, 3, 5, 10, 16, 30, 60, 120 };
  535. /**
  536. * eeprom_index_to_delay - Take the eeprom delay setting and convert it
  537. * into a number of seconds.
  538. *
  539. * @eeprom: The eeprom structure in which we find the delay index to map.
  540. **/
  541. static void eeprom_index_to_delay(struct NvRamType *eeprom)
  542. {
  543. eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
  544. }
  545. /**
  546. * delay_to_eeprom_index - Take a delay in seconds and return the
  547. * closest eeprom index which will delay for at least that amount of
  548. * seconds.
  549. *
  550. * @delay: The delay, in seconds, to find the eeprom index for.
  551. **/
  552. static int delay_to_eeprom_index(int delay)
  553. {
  554. u8 idx = 0;
  555. while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
  556. idx++;
  557. return idx;
  558. }
  559. /**
  560. * eeprom_override - Override the eeprom settings, in the provided
  561. * eeprom structure, with values that have been set on the command
  562. * line.
  563. *
  564. * @eeprom: The eeprom data to override with command line options.
  565. **/
  566. static void eeprom_override(struct NvRamType *eeprom)
  567. {
  568. u8 id;
  569. /* Adapter Settings */
  570. if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
  571. eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
  572. if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
  573. eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
  574. if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
  575. eeprom->delay_time = delay_to_eeprom_index(
  576. cfg_data[CFG_RESET_DELAY].value);
  577. if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
  578. eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
  579. /* Device Settings */
  580. for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
  581. if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
  582. eeprom->target[id].cfg0 =
  583. (u8)cfg_data[CFG_DEV_MODE].value;
  584. if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
  585. eeprom->target[id].period =
  586. (u8)cfg_data[CFG_MAX_SPEED].value;
  587. }
  588. }
  589. /*---------------------------------------------------------------------------
  590. ---------------------------------------------------------------------------*/
  591. static unsigned int list_size(struct list_head *head)
  592. {
  593. unsigned int count = 0;
  594. struct list_head *pos;
  595. list_for_each(pos, head)
  596. count++;
  597. return count;
  598. }
  599. static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
  600. struct DeviceCtlBlk *pos)
  601. {
  602. int use_next = 0;
  603. struct DeviceCtlBlk* next = NULL;
  604. struct DeviceCtlBlk* i;
  605. if (list_empty(head))
  606. return NULL;
  607. /* find supplied dcb and then select the next one */
  608. list_for_each_entry(i, head, list)
  609. if (use_next) {
  610. next = i;
  611. break;
  612. } else if (i == pos) {
  613. use_next = 1;
  614. }
  615. /* if no next one take the head one (ie, wraparound) */
  616. if (!next)
  617. list_for_each_entry(i, head, list) {
  618. next = i;
  619. break;
  620. }
  621. return next;
  622. }
  623. static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  624. {
  625. if (srb->tag_number < 255) {
  626. dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */
  627. srb->tag_number = 255;
  628. }
  629. }
  630. /* Find cmd in SRB list */
  631. static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
  632. struct list_head *head)
  633. {
  634. struct ScsiReqBlk *i;
  635. list_for_each_entry(i, head, list)
  636. if (i->cmd == cmd)
  637. return i;
  638. return NULL;
  639. }
  640. static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
  641. {
  642. struct list_head *head = &acb->srb_free_list;
  643. struct ScsiReqBlk *srb = NULL;
  644. if (!list_empty(head)) {
  645. srb = list_entry(head->next, struct ScsiReqBlk, list);
  646. list_del(head->next);
  647. dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
  648. }
  649. return srb;
  650. }
  651. static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  652. {
  653. dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
  654. list_add_tail(&srb->list, &acb->srb_free_list);
  655. }
  656. static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
  657. struct ScsiReqBlk *srb)
  658. {
  659. dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
  660. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  661. list_add(&srb->list, &dcb->srb_waiting_list);
  662. }
  663. static void srb_waiting_append(struct DeviceCtlBlk *dcb,
  664. struct ScsiReqBlk *srb)
  665. {
  666. dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
  667. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  668. list_add_tail(&srb->list, &dcb->srb_waiting_list);
  669. }
  670. static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  671. {
  672. dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
  673. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  674. list_add_tail(&srb->list, &dcb->srb_going_list);
  675. }
  676. static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  677. {
  678. struct ScsiReqBlk *i;
  679. struct ScsiReqBlk *tmp;
  680. dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
  681. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  682. list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
  683. if (i == srb) {
  684. list_del(&srb->list);
  685. break;
  686. }
  687. }
  688. static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
  689. struct ScsiReqBlk *srb)
  690. {
  691. struct ScsiReqBlk *i;
  692. struct ScsiReqBlk *tmp;
  693. dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
  694. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  695. list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
  696. if (i == srb) {
  697. list_del(&srb->list);
  698. break;
  699. }
  700. }
  701. static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
  702. struct ScsiReqBlk *srb)
  703. {
  704. dprintkdbg(DBG_0,
  705. "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
  706. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  707. list_move(&srb->list, &dcb->srb_waiting_list);
  708. }
  709. static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
  710. struct ScsiReqBlk *srb)
  711. {
  712. dprintkdbg(DBG_0,
  713. "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
  714. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  715. list_move(&srb->list, &dcb->srb_going_list);
  716. }
  717. /* Sets the timer to wake us up */
  718. static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
  719. {
  720. if (timer_pending(&acb->waiting_timer))
  721. return;
  722. if (time_before(jiffies + to, acb->last_reset - HZ / 2))
  723. acb->waiting_timer.expires =
  724. acb->last_reset - HZ / 2 + 1;
  725. else
  726. acb->waiting_timer.expires = jiffies + to + 1;
  727. add_timer(&acb->waiting_timer);
  728. }
  729. /* Send the next command from the waiting list to the bus */
  730. static void waiting_process_next(struct AdapterCtlBlk *acb)
  731. {
  732. struct DeviceCtlBlk *start = NULL;
  733. struct DeviceCtlBlk *pos;
  734. struct DeviceCtlBlk *dcb;
  735. struct ScsiReqBlk *srb;
  736. struct list_head *dcb_list_head = &acb->dcb_list;
  737. if (acb->active_dcb
  738. || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
  739. return;
  740. if (timer_pending(&acb->waiting_timer))
  741. del_timer(&acb->waiting_timer);
  742. if (list_empty(dcb_list_head))
  743. return;
  744. /*
  745. * Find the starting dcb. Need to find it again in the list
  746. * since the list may have changed since we set the ptr to it
  747. */
  748. list_for_each_entry(dcb, dcb_list_head, list)
  749. if (dcb == acb->dcb_run_robin) {
  750. start = dcb;
  751. break;
  752. }
  753. if (!start) {
  754. /* This can happen! */
  755. start = list_entry(dcb_list_head->next, typeof(*start), list);
  756. acb->dcb_run_robin = start;
  757. }
  758. /*
  759. * Loop over the dcb, but we start somewhere (potentially) in
  760. * the middle of the loop so we need to manully do this.
  761. */
  762. pos = start;
  763. do {
  764. struct list_head *waiting_list_head = &pos->srb_waiting_list;
  765. /* Make sure, the next another device gets scheduled ... */
  766. acb->dcb_run_robin = dcb_get_next(dcb_list_head,
  767. acb->dcb_run_robin);
  768. if (list_empty(waiting_list_head) ||
  769. pos->max_command <= list_size(&pos->srb_going_list)) {
  770. /* move to next dcb */
  771. pos = dcb_get_next(dcb_list_head, pos);
  772. } else {
  773. srb = list_entry(waiting_list_head->next,
  774. struct ScsiReqBlk, list);
  775. /* Try to send to the bus */
  776. if (!start_scsi(acb, pos, srb))
  777. srb_waiting_to_going_move(pos, srb);
  778. else
  779. waiting_set_timer(acb, HZ/50);
  780. break;
  781. }
  782. } while (pos != start);
  783. }
  784. /* Wake up waiting queue */
  785. static void waiting_timeout(struct timer_list *t)
  786. {
  787. unsigned long flags;
  788. struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer);
  789. dprintkdbg(DBG_1,
  790. "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
  791. DC395x_LOCK_IO(acb->scsi_host, flags);
  792. waiting_process_next(acb);
  793. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  794. }
  795. /* Get the DCB for a given ID/LUN combination */
  796. static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
  797. {
  798. return acb->children[id][lun];
  799. }
  800. /* Send SCSI Request Block (srb) to adapter (acb) */
  801. static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  802. {
  803. struct DeviceCtlBlk *dcb = srb->dcb;
  804. if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
  805. acb->active_dcb ||
  806. (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
  807. srb_waiting_append(dcb, srb);
  808. waiting_process_next(acb);
  809. return;
  810. }
  811. if (!start_scsi(acb, dcb, srb))
  812. srb_going_append(dcb, srb);
  813. else {
  814. srb_waiting_insert(dcb, srb);
  815. waiting_set_timer(acb, HZ / 50);
  816. }
  817. }
  818. /* Prepare SRB for being sent to Device DCB w/ command *cmd */
  819. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  820. struct ScsiReqBlk *srb)
  821. {
  822. int nseg;
  823. enum dma_data_direction dir = cmd->sc_data_direction;
  824. dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
  825. cmd, dcb->target_id, dcb->target_lun);
  826. srb->dcb = dcb;
  827. srb->cmd = cmd;
  828. srb->sg_count = 0;
  829. srb->total_xfer_length = 0;
  830. srb->sg_bus_addr = 0;
  831. srb->sg_index = 0;
  832. srb->adapter_status = 0;
  833. srb->target_status = 0;
  834. srb->msg_count = 0;
  835. srb->status = 0;
  836. srb->flag = 0;
  837. srb->state = 0;
  838. srb->retry_count = 0;
  839. srb->tag_number = TAG_NONE;
  840. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  841. srb->end_message = 0;
  842. nseg = scsi_dma_map(cmd);
  843. BUG_ON(nseg < 0);
  844. if (dir == PCI_DMA_NONE || !nseg) {
  845. dprintkdbg(DBG_0,
  846. "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
  847. cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
  848. srb->segment_x[0].address);
  849. } else {
  850. int i;
  851. u32 reqlen = scsi_bufflen(cmd);
  852. struct scatterlist *sg;
  853. struct SGentry *sgp = srb->segment_x;
  854. srb->sg_count = nseg;
  855. dprintkdbg(DBG_0,
  856. "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
  857. reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
  858. srb->sg_count);
  859. scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
  860. u32 busaddr = (u32)sg_dma_address(sg);
  861. u32 seglen = (u32)sg->length;
  862. sgp[i].address = busaddr;
  863. sgp[i].length = seglen;
  864. srb->total_xfer_length += seglen;
  865. }
  866. sgp += srb->sg_count - 1;
  867. /*
  868. * adjust last page if too big as it is allocated
  869. * on even page boundaries
  870. */
  871. if (srb->total_xfer_length > reqlen) {
  872. sgp->length -= (srb->total_xfer_length - reqlen);
  873. srb->total_xfer_length = reqlen;
  874. }
  875. /* Fixup for WIDE padding - make sure length is even */
  876. if (dcb->sync_period & WIDE_SYNC &&
  877. srb->total_xfer_length % 2) {
  878. srb->total_xfer_length++;
  879. sgp->length++;
  880. }
  881. srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
  882. srb->segment_x,
  883. SEGMENTX_LEN,
  884. PCI_DMA_TODEVICE);
  885. dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
  886. srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
  887. }
  888. srb->request_length = srb->total_xfer_length;
  889. }
  890. /**
  891. * dc395x_queue_command - queue scsi command passed from the mid
  892. * layer, invoke 'done' on completion
  893. *
  894. * @cmd: pointer to scsi command object
  895. * @done: function pointer to be invoked on completion
  896. *
  897. * Returns 1 if the adapter (host) is busy, else returns 0. One
  898. * reason for an adapter to be busy is that the number
  899. * of outstanding queued commands is already equal to
  900. * struct Scsi_Host::can_queue .
  901. *
  902. * Required: if struct Scsi_Host::can_queue is ever non-zero
  903. * then this function is required.
  904. *
  905. * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
  906. * and is expected to be held on return.
  907. *
  908. **/
  909. static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
  910. {
  911. struct DeviceCtlBlk *dcb;
  912. struct ScsiReqBlk *srb;
  913. struct AdapterCtlBlk *acb =
  914. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  915. dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
  916. cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
  917. /* Assume BAD_TARGET; will be cleared later */
  918. cmd->result = DID_BAD_TARGET << 16;
  919. /* ignore invalid targets */
  920. if (cmd->device->id >= acb->scsi_host->max_id ||
  921. cmd->device->lun >= acb->scsi_host->max_lun ||
  922. cmd->device->lun >31) {
  923. goto complete;
  924. }
  925. /* does the specified lun on the specified device exist */
  926. if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
  927. dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
  928. cmd->device->id, (u8)cmd->device->lun);
  929. goto complete;
  930. }
  931. /* do we have a DCB for the device */
  932. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  933. if (!dcb) {
  934. /* should never happen */
  935. dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
  936. cmd->device->id, (u8)cmd->device->lun);
  937. goto complete;
  938. }
  939. /* set callback and clear result in the command */
  940. cmd->scsi_done = done;
  941. cmd->result = 0;
  942. srb = srb_get_free(acb);
  943. if (!srb)
  944. {
  945. /*
  946. * Return 1 since we are unable to queue this command at this
  947. * point in time.
  948. */
  949. dprintkdbg(DBG_0, "queue_command: No free srb's\n");
  950. return 1;
  951. }
  952. build_srb(cmd, dcb, srb);
  953. if (!list_empty(&dcb->srb_waiting_list)) {
  954. /* append to waiting queue */
  955. srb_waiting_append(dcb, srb);
  956. waiting_process_next(acb);
  957. } else {
  958. /* process immediately */
  959. send_srb(acb, srb);
  960. }
  961. dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
  962. return 0;
  963. complete:
  964. /*
  965. * Complete the command immediatey, and then return 0 to
  966. * indicate that we have handled the command. This is usually
  967. * done when the commad is for things like non existent
  968. * devices.
  969. */
  970. done(cmd);
  971. return 0;
  972. }
  973. static DEF_SCSI_QCMD(dc395x_queue_command)
  974. /*
  975. * Return the disk geometry for the given SCSI device.
  976. */
  977. static int dc395x_bios_param(struct scsi_device *sdev,
  978. struct block_device *bdev, sector_t capacity, int *info)
  979. {
  980. #ifdef CONFIG_SCSI_DC395x_TRMS1040_TRADMAP
  981. int heads, sectors, cylinders;
  982. struct AdapterCtlBlk *acb;
  983. int size = capacity;
  984. dprintkdbg(DBG_0, "dc395x_bios_param..............\n");
  985. acb = (struct AdapterCtlBlk *)sdev->host->hostdata;
  986. heads = 64;
  987. sectors = 32;
  988. cylinders = size / (heads * sectors);
  989. if ((acb->gmode2 & NAC_GREATER_1G) && (cylinders > 1024)) {
  990. heads = 255;
  991. sectors = 63;
  992. cylinders = size / (heads * sectors);
  993. }
  994. geom[0] = heads;
  995. geom[1] = sectors;
  996. geom[2] = cylinders;
  997. return 0;
  998. #else
  999. return scsicam_bios_param(bdev, capacity, info);
  1000. #endif
  1001. }
  1002. static void dump_register_info(struct AdapterCtlBlk *acb,
  1003. struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  1004. {
  1005. u16 pstat;
  1006. struct pci_dev *dev = acb->dev;
  1007. pci_read_config_word(dev, PCI_STATUS, &pstat);
  1008. if (!dcb)
  1009. dcb = acb->active_dcb;
  1010. if (!srb && dcb)
  1011. srb = dcb->active_srb;
  1012. if (srb) {
  1013. if (!srb->cmd)
  1014. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
  1015. srb, srb->cmd);
  1016. else
  1017. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
  1018. "cmnd=0x%02x <%02i-%i>\n",
  1019. srb, srb->cmd,
  1020. srb->cmd->cmnd[0], srb->cmd->device->id,
  1021. (u8)srb->cmd->device->lun);
  1022. printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
  1023. srb->segment_x, srb->sg_count, srb->sg_index,
  1024. srb->total_xfer_length);
  1025. printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
  1026. srb->state, srb->status, srb->scsi_phase,
  1027. (acb->active_dcb) ? "" : "not");
  1028. }
  1029. dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
  1030. "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
  1031. "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
  1032. "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
  1033. DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
  1034. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1035. DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
  1036. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
  1037. DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
  1038. DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
  1039. DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
  1040. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  1041. DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
  1042. DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
  1043. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
  1044. DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
  1045. DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
  1046. dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
  1047. "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
  1048. "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
  1049. DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
  1050. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1051. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1052. DC395x_read8(acb, TRM_S1040_DMA_STATUS),
  1053. DC395x_read8(acb, TRM_S1040_DMA_INTEN),
  1054. DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
  1055. DC395x_read32(acb, TRM_S1040_DMA_XCNT),
  1056. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  1057. DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
  1058. DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
  1059. dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
  1060. "pci{status=0x%04x}\n",
  1061. DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
  1062. DC395x_read8(acb, TRM_S1040_GEN_STATUS),
  1063. DC395x_read8(acb, TRM_S1040_GEN_TIMER),
  1064. pstat);
  1065. }
  1066. static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
  1067. {
  1068. #if debug_enabled(DBG_FIFO)
  1069. u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  1070. u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  1071. if (!(fifocnt & 0x40))
  1072. dprintkdbg(DBG_FIFO,
  1073. "clear_fifo: (%i bytes) on phase %02x in %s\n",
  1074. fifocnt & 0x3f, lines, txt);
  1075. #endif
  1076. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
  1077. }
  1078. static void reset_dev_param(struct AdapterCtlBlk *acb)
  1079. {
  1080. struct DeviceCtlBlk *dcb;
  1081. struct NvRamType *eeprom = &acb->eeprom;
  1082. dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
  1083. list_for_each_entry(dcb, &acb->dcb_list, list) {
  1084. u8 period_index;
  1085. dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
  1086. dcb->sync_period = 0;
  1087. dcb->sync_offset = 0;
  1088. dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
  1089. period_index = eeprom->target[dcb->target_id].period & 0x07;
  1090. dcb->min_nego_period = clock_period[period_index];
  1091. if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
  1092. || !(acb->config & HCC_WIDE_CARD))
  1093. dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
  1094. }
  1095. }
  1096. /*
  1097. * perform a hard reset on the SCSI bus
  1098. * @cmd - some command for this host (for fetching hooks)
  1099. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  1100. */
  1101. static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  1102. {
  1103. struct AdapterCtlBlk *acb =
  1104. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  1105. dprintkl(KERN_INFO,
  1106. "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
  1107. cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
  1108. if (timer_pending(&acb->waiting_timer))
  1109. del_timer(&acb->waiting_timer);
  1110. /*
  1111. * disable interrupt
  1112. */
  1113. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  1114. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  1115. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  1116. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  1117. reset_scsi_bus(acb);
  1118. udelay(500);
  1119. /* We may be in serious trouble. Wait some seconds */
  1120. acb->last_reset =
  1121. jiffies + 3 * HZ / 2 +
  1122. HZ * acb->eeprom.delay_time;
  1123. /*
  1124. * re-enable interrupt
  1125. */
  1126. /* Clear SCSI FIFO */
  1127. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1128. clear_fifo(acb, "eh_bus_reset");
  1129. /* Delete pending IRQ */
  1130. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1131. set_basic_config(acb);
  1132. reset_dev_param(acb);
  1133. doing_srb_done(acb, DID_RESET, cmd, 0);
  1134. acb->active_dcb = NULL;
  1135. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE ,RESET_DEV */
  1136. waiting_process_next(acb);
  1137. return SUCCESS;
  1138. }
  1139. static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  1140. {
  1141. int rc;
  1142. spin_lock_irq(cmd->device->host->host_lock);
  1143. rc = __dc395x_eh_bus_reset(cmd);
  1144. spin_unlock_irq(cmd->device->host->host_lock);
  1145. return rc;
  1146. }
  1147. /*
  1148. * abort an errant SCSI command
  1149. * @cmd - command to be aborted
  1150. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  1151. */
  1152. static int dc395x_eh_abort(struct scsi_cmnd *cmd)
  1153. {
  1154. /*
  1155. * Look into our command queues: If it has not been sent already,
  1156. * we remove it and return success. Otherwise fail.
  1157. */
  1158. struct AdapterCtlBlk *acb =
  1159. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  1160. struct DeviceCtlBlk *dcb;
  1161. struct ScsiReqBlk *srb;
  1162. dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
  1163. cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
  1164. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  1165. if (!dcb) {
  1166. dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
  1167. return FAILED;
  1168. }
  1169. srb = find_cmd(cmd, &dcb->srb_waiting_list);
  1170. if (srb) {
  1171. srb_waiting_remove(dcb, srb);
  1172. pci_unmap_srb_sense(acb, srb);
  1173. pci_unmap_srb(acb, srb);
  1174. free_tag(dcb, srb);
  1175. srb_free_insert(acb, srb);
  1176. dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
  1177. cmd->result = DID_ABORT << 16;
  1178. return SUCCESS;
  1179. }
  1180. srb = find_cmd(cmd, &dcb->srb_going_list);
  1181. if (srb) {
  1182. dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
  1183. /* XXX: Should abort the command here */
  1184. } else {
  1185. dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
  1186. }
  1187. return FAILED;
  1188. }
  1189. /* SDTR */
  1190. static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1191. struct ScsiReqBlk *srb)
  1192. {
  1193. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1194. if (srb->msg_count > 1) {
  1195. dprintkl(KERN_INFO,
  1196. "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1197. srb->msg_count, srb->msgout_buf[0],
  1198. srb->msgout_buf[1]);
  1199. return;
  1200. }
  1201. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
  1202. dcb->sync_offset = 0;
  1203. dcb->min_nego_period = 200 >> 2;
  1204. } else if (dcb->sync_offset == 0)
  1205. dcb->sync_offset = SYNC_NEGO_OFFSET;
  1206. *ptr++ = MSG_EXTENDED; /* (01h) */
  1207. *ptr++ = 3; /* length */
  1208. *ptr++ = EXTENDED_SDTR; /* (01h) */
  1209. *ptr++ = dcb->min_nego_period; /* Transfer period (in 4ns) */
  1210. *ptr++ = dcb->sync_offset; /* Transfer period (max. REQ/ACK dist) */
  1211. srb->msg_count += 5;
  1212. srb->state |= SRB_DO_SYNC_NEGO;
  1213. }
  1214. /* WDTR */
  1215. static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1216. struct ScsiReqBlk *srb)
  1217. {
  1218. u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
  1219. (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
  1220. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1221. if (srb->msg_count > 1) {
  1222. dprintkl(KERN_INFO,
  1223. "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1224. srb->msg_count, srb->msgout_buf[0],
  1225. srb->msgout_buf[1]);
  1226. return;
  1227. }
  1228. *ptr++ = MSG_EXTENDED; /* (01h) */
  1229. *ptr++ = 2; /* length */
  1230. *ptr++ = EXTENDED_WDTR; /* (03h) */
  1231. *ptr++ = wide;
  1232. srb->msg_count += 4;
  1233. srb->state |= SRB_DO_WIDE_NEGO;
  1234. }
  1235. #if 0
  1236. /* Timer to work around chip flaw: When selecting and the bus is
  1237. * busy, we sometimes miss a Selection timeout IRQ */
  1238. void selection_timeout_missed(unsigned long ptr);
  1239. /* Sets the timer to wake us up */
  1240. static void selto_timer(struct AdapterCtlBlk *acb)
  1241. {
  1242. if (timer_pending(&acb->selto_timer))
  1243. return;
  1244. acb->selto_timer.function = selection_timeout_missed;
  1245. acb->selto_timer.data = (unsigned long) acb;
  1246. if (time_before
  1247. (jiffies + HZ, acb->last_reset + HZ / 2))
  1248. acb->selto_timer.expires =
  1249. acb->last_reset + HZ / 2 + 1;
  1250. else
  1251. acb->selto_timer.expires = jiffies + HZ + 1;
  1252. add_timer(&acb->selto_timer);
  1253. }
  1254. void selection_timeout_missed(unsigned long ptr)
  1255. {
  1256. unsigned long flags;
  1257. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
  1258. struct ScsiReqBlk *srb;
  1259. dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
  1260. if (!acb->active_dcb || !acb->active_dcb->active_srb) {
  1261. dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
  1262. return;
  1263. }
  1264. DC395x_LOCK_IO(acb->scsi_host, flags);
  1265. srb = acb->active_dcb->active_srb;
  1266. disconnect(acb);
  1267. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1268. }
  1269. #endif
  1270. static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
  1271. struct ScsiReqBlk* srb)
  1272. {
  1273. u16 s_stat2, return_code;
  1274. u8 s_stat, scsicommand, i, identify_message;
  1275. u8 *ptr;
  1276. dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
  1277. dcb->target_id, dcb->target_lun, srb);
  1278. srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
  1279. s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  1280. s_stat2 = 0;
  1281. s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1282. #if 1
  1283. if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) {
  1284. dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
  1285. s_stat, s_stat2);
  1286. /*
  1287. * Try anyway?
  1288. *
  1289. * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection
  1290. * Timeout, a Disconnect or a Reselection IRQ, so we would be screwed!
  1291. * (This is likely to be a bug in the hardware. Obviously, most people
  1292. * only have one initiator per SCSI bus.)
  1293. * Instead let this fail and have the timer make sure the command is
  1294. * tried again after a short time
  1295. */
  1296. /*selto_timer (acb); */
  1297. return 1;
  1298. }
  1299. #endif
  1300. if (acb->active_dcb) {
  1301. dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
  1302. "command while another command (0x%p) is active.",
  1303. srb->cmd,
  1304. acb->active_dcb->active_srb ?
  1305. acb->active_dcb->active_srb->cmd : 0);
  1306. return 1;
  1307. }
  1308. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1309. dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
  1310. return 1;
  1311. }
  1312. /* Allow starting of SCSI commands half a second before we allow the mid-level
  1313. * to queue them again after a reset */
  1314. if (time_before(jiffies, acb->last_reset - HZ / 2)) {
  1315. dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
  1316. return 1;
  1317. }
  1318. /* Flush FIFO */
  1319. clear_fifo(acb, "start_scsi");
  1320. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  1321. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  1322. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  1323. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  1324. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1325. identify_message = dcb->identify_msg;
  1326. /*DC395x_TRM_write8(TRM_S1040_SCSI_IDMSG, identify_message); */
  1327. /* Don't allow disconnection for AUTO_REQSENSE: Cont.All.Cond.! */
  1328. if (srb->flag & AUTO_REQSENSE)
  1329. identify_message &= 0xBF;
  1330. if (((srb->cmd->cmnd[0] == INQUIRY)
  1331. || (srb->cmd->cmnd[0] == REQUEST_SENSE)
  1332. || (srb->flag & AUTO_REQSENSE))
  1333. && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
  1334. && !(dcb->sync_mode & WIDE_NEGO_DONE))
  1335. || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  1336. && !(dcb->sync_mode & SYNC_NEGO_DONE)))
  1337. && (dcb->target_lun == 0)) {
  1338. srb->msgout_buf[0] = identify_message;
  1339. srb->msg_count = 1;
  1340. scsicommand = SCMD_SEL_ATNSTOP;
  1341. srb->state = SRB_MSGOUT;
  1342. #ifndef SYNC_FIRST
  1343. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1344. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1345. build_wdtr(acb, dcb, srb);
  1346. goto no_cmd;
  1347. }
  1348. #endif
  1349. if (dcb->sync_mode & SYNC_NEGO_ENABLE
  1350. && dcb->inquiry7 & SCSI_INQ_SYNC) {
  1351. build_sdtr(acb, dcb, srb);
  1352. goto no_cmd;
  1353. }
  1354. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1355. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1356. build_wdtr(acb, dcb, srb);
  1357. goto no_cmd;
  1358. }
  1359. srb->msg_count = 0;
  1360. }
  1361. /* Send identify message */
  1362. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
  1363. scsicommand = SCMD_SEL_ATN;
  1364. srb->state = SRB_START_;
  1365. #ifndef DC395x_NO_TAGQ
  1366. if ((dcb->sync_mode & EN_TAG_QUEUEING)
  1367. && (identify_message & 0xC0)) {
  1368. /* Send Tag message */
  1369. u32 tag_mask = 1;
  1370. u8 tag_number = 0;
  1371. while (tag_mask & dcb->tag_mask
  1372. && tag_number < dcb->max_command) {
  1373. tag_mask = tag_mask << 1;
  1374. tag_number++;
  1375. }
  1376. if (tag_number >= dcb->max_command) {
  1377. dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
  1378. "Out of tags target=<%02i-%i>)\n",
  1379. srb->cmd, srb->cmd->device->id,
  1380. (u8)srb->cmd->device->lun);
  1381. srb->state = SRB_READY;
  1382. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1383. DO_HWRESELECT);
  1384. return 1;
  1385. }
  1386. /* Send Tag id */
  1387. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
  1388. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
  1389. dcb->tag_mask |= tag_mask;
  1390. srb->tag_number = tag_number;
  1391. scsicommand = SCMD_SEL_ATN3;
  1392. srb->state = SRB_START_;
  1393. }
  1394. #endif
  1395. /*polling:*/
  1396. /* Send CDB ..command block ......... */
  1397. dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
  1398. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
  1399. srb->cmd->cmnd[0], srb->tag_number);
  1400. if (srb->flag & AUTO_REQSENSE) {
  1401. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1402. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1403. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1404. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1405. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
  1406. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1407. } else {
  1408. ptr = (u8 *)srb->cmd->cmnd;
  1409. for (i = 0; i < srb->cmd->cmd_len; i++)
  1410. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1411. }
  1412. no_cmd:
  1413. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1414. DO_HWRESELECT | DO_DATALATCH);
  1415. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1416. /*
  1417. * If start_scsi return 1:
  1418. * we caught an interrupt (must be reset or reselection ... )
  1419. * : Let's process it first!
  1420. */
  1421. dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
  1422. srb->cmd, dcb->target_id, dcb->target_lun);
  1423. srb->state = SRB_READY;
  1424. free_tag(dcb, srb);
  1425. srb->msg_count = 0;
  1426. return_code = 1;
  1427. /* This IRQ should NOT get lost, as we did not acknowledge it */
  1428. } else {
  1429. /*
  1430. * If start_scsi returns 0:
  1431. * we know that the SCSI processor is free
  1432. */
  1433. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1434. dcb->active_srb = srb;
  1435. acb->active_dcb = dcb;
  1436. return_code = 0;
  1437. /* it's important for atn stop */
  1438. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1439. DO_DATALATCH | DO_HWRESELECT);
  1440. /* SCSI command */
  1441. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
  1442. }
  1443. return return_code;
  1444. }
  1445. #define DC395x_ENABLE_MSGOUT \
  1446. DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
  1447. srb->state |= SRB_MSGOUT
  1448. /* abort command */
  1449. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  1450. struct ScsiReqBlk *srb)
  1451. {
  1452. srb->msgout_buf[0] = ABORT;
  1453. srb->msg_count = 1;
  1454. DC395x_ENABLE_MSGOUT;
  1455. srb->state &= ~SRB_MSGIN;
  1456. srb->state |= SRB_MSGOUT;
  1457. }
  1458. /**
  1459. * dc395x_handle_interrupt - Handle an interrupt that has been confirmed to
  1460. * have been triggered for this card.
  1461. *
  1462. * @acb: a pointer to the adpter control block
  1463. * @scsi_status: the status return when we checked the card
  1464. **/
  1465. static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
  1466. u16 scsi_status)
  1467. {
  1468. struct DeviceCtlBlk *dcb;
  1469. struct ScsiReqBlk *srb;
  1470. u16 phase;
  1471. u8 scsi_intstatus;
  1472. unsigned long flags;
  1473. void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
  1474. u16 *);
  1475. DC395x_LOCK_IO(acb->scsi_host, flags);
  1476. /* This acknowledges the IRQ */
  1477. scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1478. if ((scsi_status & 0x2007) == 0x2002)
  1479. dprintkl(KERN_DEBUG,
  1480. "COP after COP completed? %04x\n", scsi_status);
  1481. if (debug_enabled(DBG_KG)) {
  1482. if (scsi_intstatus & INT_SELTIMEOUT)
  1483. dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
  1484. }
  1485. /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */
  1486. if (timer_pending(&acb->selto_timer))
  1487. del_timer(&acb->selto_timer);
  1488. if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
  1489. disconnect(acb); /* bus free interrupt */
  1490. goto out_unlock;
  1491. }
  1492. if (scsi_intstatus & INT_RESELECTED) {
  1493. reselect(acb);
  1494. goto out_unlock;
  1495. }
  1496. if (scsi_intstatus & INT_SELECT) {
  1497. dprintkl(KERN_INFO, "Host does not support target mode!\n");
  1498. goto out_unlock;
  1499. }
  1500. if (scsi_intstatus & INT_SCSIRESET) {
  1501. scsi_reset_detect(acb);
  1502. goto out_unlock;
  1503. }
  1504. if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
  1505. dcb = acb->active_dcb;
  1506. if (!dcb) {
  1507. dprintkl(KERN_DEBUG,
  1508. "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
  1509. scsi_status, scsi_intstatus);
  1510. goto out_unlock;
  1511. }
  1512. srb = dcb->active_srb;
  1513. if (dcb->flag & ABORT_DEV_) {
  1514. dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
  1515. enable_msgout_abort(acb, srb);
  1516. }
  1517. /* software sequential machine */
  1518. phase = (u16)srb->scsi_phase;
  1519. /*
  1520. * 62037 or 62137
  1521. * call dc395x_scsi_phase0[]... "phase entry"
  1522. * handle every phase before start transfer
  1523. */
  1524. /* data_out_phase0, phase:0 */
  1525. /* data_in_phase0, phase:1 */
  1526. /* command_phase0, phase:2 */
  1527. /* status_phase0, phase:3 */
  1528. /* nop0, phase:4 PH_BUS_FREE .. initial phase */
  1529. /* nop0, phase:5 PH_BUS_FREE .. initial phase */
  1530. /* msgout_phase0, phase:6 */
  1531. /* msgin_phase0, phase:7 */
  1532. dc395x_statev = dc395x_scsi_phase0[phase];
  1533. dc395x_statev(acb, srb, &scsi_status);
  1534. /*
  1535. * if there were any exception occurred scsi_status
  1536. * will be modify to bus free phase new scsi_status
  1537. * transfer out from ... previous dc395x_statev
  1538. */
  1539. srb->scsi_phase = scsi_status & PHASEMASK;
  1540. phase = (u16)scsi_status & PHASEMASK;
  1541. /*
  1542. * call dc395x_scsi_phase1[]... "phase entry" handle
  1543. * every phase to do transfer
  1544. */
  1545. /* data_out_phase1, phase:0 */
  1546. /* data_in_phase1, phase:1 */
  1547. /* command_phase1, phase:2 */
  1548. /* status_phase1, phase:3 */
  1549. /* nop1, phase:4 PH_BUS_FREE .. initial phase */
  1550. /* nop1, phase:5 PH_BUS_FREE .. initial phase */
  1551. /* msgout_phase1, phase:6 */
  1552. /* msgin_phase1, phase:7 */
  1553. dc395x_statev = dc395x_scsi_phase1[phase];
  1554. dc395x_statev(acb, srb, &scsi_status);
  1555. }
  1556. out_unlock:
  1557. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1558. }
  1559. static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
  1560. {
  1561. struct AdapterCtlBlk *acb = dev_id;
  1562. u16 scsi_status;
  1563. u8 dma_status;
  1564. irqreturn_t handled = IRQ_NONE;
  1565. /*
  1566. * Check for pending interrupt
  1567. */
  1568. scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1569. dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  1570. if (scsi_status & SCSIINTERRUPT) {
  1571. /* interrupt pending - let's process it! */
  1572. dc395x_handle_interrupt(acb, scsi_status);
  1573. handled = IRQ_HANDLED;
  1574. }
  1575. else if (dma_status & 0x20) {
  1576. /* Error from the DMA engine */
  1577. dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
  1578. #if 0
  1579. dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
  1580. if (acb->active_dcb) {
  1581. acb->active_dcb-> flag |= ABORT_DEV_;
  1582. if (acb->active_dcb->active_srb)
  1583. enable_msgout_abort(acb, acb->active_dcb->active_srb);
  1584. }
  1585. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
  1586. #else
  1587. dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
  1588. acb = NULL;
  1589. #endif
  1590. handled = IRQ_HANDLED;
  1591. }
  1592. return handled;
  1593. }
  1594. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1595. u16 *pscsi_status)
  1596. {
  1597. dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
  1598. if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
  1599. *pscsi_status = PH_BUS_FREE; /*.. initial phase */
  1600. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  1601. srb->state &= ~SRB_MSGOUT;
  1602. }
  1603. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1604. u16 *pscsi_status)
  1605. {
  1606. u16 i;
  1607. u8 *ptr;
  1608. dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
  1609. clear_fifo(acb, "msgout_phase1");
  1610. if (!(srb->state & SRB_MSGOUT)) {
  1611. srb->state |= SRB_MSGOUT;
  1612. dprintkl(KERN_DEBUG,
  1613. "msgout_phase1: (0x%p) Phase unexpected\n",
  1614. srb->cmd); /* So what ? */
  1615. }
  1616. if (!srb->msg_count) {
  1617. dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
  1618. srb->cmd);
  1619. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
  1620. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  1621. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1622. return;
  1623. }
  1624. ptr = (u8 *)srb->msgout_buf;
  1625. for (i = 0; i < srb->msg_count; i++)
  1626. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1627. srb->msg_count = 0;
  1628. if (srb->msgout_buf[0] == MSG_ABORT)
  1629. srb->state = SRB_ABORT_SENT;
  1630. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1631. }
  1632. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1633. u16 *pscsi_status)
  1634. {
  1635. dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
  1636. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1637. }
  1638. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1639. u16 *pscsi_status)
  1640. {
  1641. struct DeviceCtlBlk *dcb;
  1642. u8 *ptr;
  1643. u16 i;
  1644. dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
  1645. clear_fifo(acb, "command_phase1");
  1646. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
  1647. if (!(srb->flag & AUTO_REQSENSE)) {
  1648. ptr = (u8 *)srb->cmd->cmnd;
  1649. for (i = 0; i < srb->cmd->cmd_len; i++) {
  1650. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr);
  1651. ptr++;
  1652. }
  1653. } else {
  1654. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1655. dcb = acb->active_dcb;
  1656. /* target id */
  1657. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1658. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1659. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1660. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
  1661. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1662. }
  1663. srb->state |= SRB_COMMAND;
  1664. /* it's important for atn stop */
  1665. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1666. /* SCSI command */
  1667. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1668. }
  1669. /*
  1670. * Verify that the remaining space in the hw sg lists is the same as
  1671. * the count of remaining bytes in srb->total_xfer_length
  1672. */
  1673. static void sg_verify_length(struct ScsiReqBlk *srb)
  1674. {
  1675. if (debug_enabled(DBG_SG)) {
  1676. unsigned len = 0;
  1677. unsigned idx = srb->sg_index;
  1678. struct SGentry *psge = srb->segment_x + idx;
  1679. for (; idx < srb->sg_count; psge++, idx++)
  1680. len += psge->length;
  1681. if (len != srb->total_xfer_length)
  1682. dprintkdbg(DBG_SG,
  1683. "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
  1684. srb->total_xfer_length, len);
  1685. }
  1686. }
  1687. /*
  1688. * Compute the next Scatter Gather list index and adjust its length
  1689. * and address if necessary
  1690. */
  1691. static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
  1692. {
  1693. u8 idx;
  1694. u32 xferred = srb->total_xfer_length - left; /* bytes transferred */
  1695. struct SGentry *psge = srb->segment_x + srb->sg_index;
  1696. dprintkdbg(DBG_0,
  1697. "sg_update_list: Transferred %i of %i bytes, %i remain\n",
  1698. xferred, srb->total_xfer_length, left);
  1699. if (xferred == 0) {
  1700. /* nothing to update since we did not transfer any data */
  1701. return;
  1702. }
  1703. sg_verify_length(srb);
  1704. srb->total_xfer_length = left; /* update remaining count */
  1705. for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
  1706. if (xferred >= psge->length) {
  1707. /* Complete SG entries done */
  1708. xferred -= psge->length;
  1709. } else {
  1710. /* Partial SG entry done */
  1711. pci_dma_sync_single_for_cpu(srb->dcb->
  1712. acb->dev,
  1713. srb->sg_bus_addr,
  1714. SEGMENTX_LEN,
  1715. PCI_DMA_TODEVICE);
  1716. psge->length -= xferred;
  1717. psge->address += xferred;
  1718. srb->sg_index = idx;
  1719. pci_dma_sync_single_for_device(srb->dcb->
  1720. acb->dev,
  1721. srb->sg_bus_addr,
  1722. SEGMENTX_LEN,
  1723. PCI_DMA_TODEVICE);
  1724. break;
  1725. }
  1726. psge++;
  1727. }
  1728. sg_verify_length(srb);
  1729. }
  1730. /*
  1731. * We have transferred a single byte (PIO mode?) and need to update
  1732. * the count of bytes remaining (total_xfer_length) and update the sg
  1733. * entry to either point to next byte in the current sg entry, or of
  1734. * already at the end to point to the start of the next sg entry
  1735. */
  1736. static void sg_subtract_one(struct ScsiReqBlk *srb)
  1737. {
  1738. sg_update_list(srb, srb->total_xfer_length - 1);
  1739. }
  1740. /*
  1741. * cleanup_after_transfer
  1742. *
  1743. * Makes sure, DMA and SCSI engine are empty, after the transfer has finished
  1744. * KG: Currently called from StatusPhase1 ()
  1745. * Should probably also be called from other places
  1746. * Best might be to call it in DataXXPhase0, if new phase will differ
  1747. */
  1748. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  1749. struct ScsiReqBlk *srb)
  1750. {
  1751. /*DC395x_write8 (TRM_S1040_DMA_STATUS, FORCEDMACOMP); */
  1752. if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) { /* read */
  1753. if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
  1754. clear_fifo(acb, "cleanup/in");
  1755. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
  1756. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1757. } else { /* write */
  1758. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
  1759. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1760. if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
  1761. clear_fifo(acb, "cleanup/out");
  1762. }
  1763. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1764. }
  1765. /*
  1766. * Those no of bytes will be transferred w/ PIO through the SCSI FIFO
  1767. * Seems to be needed for unknown reasons; could be a hardware bug :-(
  1768. */
  1769. #define DC395x_LASTPIO 4
  1770. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1771. u16 *pscsi_status)
  1772. {
  1773. struct DeviceCtlBlk *dcb = srb->dcb;
  1774. u16 scsi_status = *pscsi_status;
  1775. u32 d_left_counter = 0;
  1776. dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
  1777. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
  1778. /*
  1779. * KG: We need to drain the buffers before we draw any conclusions!
  1780. * This means telling the DMA to push the rest into SCSI, telling
  1781. * SCSI to push the rest to the bus.
  1782. * However, the device might have been the one to stop us (phase
  1783. * change), and the data in transit just needs to be accounted so
  1784. * it can be retransmitted.)
  1785. */
  1786. /*
  1787. * KG: Stop DMA engine pushing more data into the SCSI FIFO
  1788. * If we need more data, the DMA SG list will be freshly set up, anyway
  1789. */
  1790. dprintkdbg(DBG_PIO, "data_out_phase0: "
  1791. "DMA{fifocnt=0x%02x fifostat=0x%02x} "
  1792. "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
  1793. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1794. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1795. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1796. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
  1797. srb->total_xfer_length);
  1798. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
  1799. if (!(srb->state & SRB_XFERPAD)) {
  1800. if (scsi_status & PARITYERROR)
  1801. srb->status |= PARITY_ERROR;
  1802. /*
  1803. * KG: Right, we can't just rely on the SCSI_COUNTER, because this
  1804. * is the no of bytes it got from the DMA engine not the no it
  1805. * transferred successfully to the device. (And the difference could
  1806. * be as much as the FIFO size, I guess ...)
  1807. */
  1808. if (!(scsi_status & SCSIXFERDONE)) {
  1809. /*
  1810. * when data transfer from DMA FIFO to SCSI FIFO
  1811. * if there was some data left in SCSI FIFO
  1812. */
  1813. d_left_counter =
  1814. (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
  1815. 0x1F);
  1816. if (dcb->sync_period & WIDE_SYNC)
  1817. d_left_counter <<= 1;
  1818. dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
  1819. "SCSI{fifocnt=0x%02x cnt=0x%08x} "
  1820. "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
  1821. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1822. (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
  1823. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1824. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  1825. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1826. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1827. DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
  1828. }
  1829. /*
  1830. * calculate all the residue data that not yet tranfered
  1831. * SCSI transfer counter + left in SCSI FIFO data
  1832. *
  1833. * .....TRM_S1040_SCSI_COUNTER (24bits)
  1834. * The counter always decrement by one for every SCSI byte transfer.
  1835. * .....TRM_S1040_SCSI_FIFOCNT ( 5bits)
  1836. * The counter is SCSI FIFO offset counter (in units of bytes or! words)
  1837. */
  1838. if (srb->total_xfer_length > DC395x_LASTPIO)
  1839. d_left_counter +=
  1840. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
  1841. /* Is this a good idea? */
  1842. /*clear_fifo(acb, "DOP1"); */
  1843. /* KG: What is this supposed to be useful for? WIDE padding stuff? */
  1844. if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
  1845. && scsi_bufflen(srb->cmd) % 2) {
  1846. d_left_counter = 0;
  1847. dprintkl(KERN_INFO,
  1848. "data_out_phase0: Discard 1 byte (0x%02x)\n",
  1849. scsi_status);
  1850. }
  1851. /*
  1852. * KG: Oops again. Same thinko as above: The SCSI might have been
  1853. * faster than the DMA engine, so that it ran out of data.
  1854. * In that case, we have to do just nothing!
  1855. * But: Why the interrupt: No phase change. No XFERCNT_2_ZERO. Or?
  1856. */
  1857. /*
  1858. * KG: This is nonsense: We have been WRITING data to the bus
  1859. * If the SCSI engine has no bytes left, how should the DMA engine?
  1860. */
  1861. if (d_left_counter == 0) {
  1862. srb->total_xfer_length = 0;
  1863. } else {
  1864. /*
  1865. * if transfer not yet complete
  1866. * there were some data residue in SCSI FIFO or
  1867. * SCSI transfer counter not empty
  1868. */
  1869. long oldxferred =
  1870. srb->total_xfer_length - d_left_counter;
  1871. const int diff =
  1872. (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
  1873. sg_update_list(srb, d_left_counter);
  1874. /* KG: Most ugly hack! Apparently, this works around a chip bug */
  1875. if ((srb->segment_x[srb->sg_index].length ==
  1876. diff && scsi_sg_count(srb->cmd))
  1877. || ((oldxferred & ~PAGE_MASK) ==
  1878. (PAGE_SIZE - diff))
  1879. ) {
  1880. dprintkl(KERN_INFO, "data_out_phase0: "
  1881. "Work around chip bug (%i)?\n", diff);
  1882. d_left_counter =
  1883. srb->total_xfer_length - diff;
  1884. sg_update_list(srb, d_left_counter);
  1885. /*srb->total_xfer_length -= diff; */
  1886. /*srb->virt_addr += diff; */
  1887. /*if (srb->cmd->use_sg) */
  1888. /* srb->sg_index++; */
  1889. }
  1890. }
  1891. }
  1892. if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
  1893. cleanup_after_transfer(acb, srb);
  1894. }
  1895. }
  1896. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1897. u16 *pscsi_status)
  1898. {
  1899. dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
  1900. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
  1901. clear_fifo(acb, "data_out_phase1");
  1902. /* do prepare before transfer when data out phase */
  1903. data_io_transfer(acb, srb, XFERDATAOUT);
  1904. }
  1905. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1906. u16 *pscsi_status)
  1907. {
  1908. u16 scsi_status = *pscsi_status;
  1909. dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
  1910. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
  1911. /*
  1912. * KG: DataIn is much more tricky than DataOut. When the device is finished
  1913. * and switches to another phase, the SCSI engine should be finished too.
  1914. * But: There might still be bytes left in its FIFO to be fetched by the DMA
  1915. * engine and transferred to memory.
  1916. * We should wait for the FIFOs to be emptied by that (is there any way to
  1917. * enforce this?) and then stop the DMA engine, because it might think, that
  1918. * there are more bytes to follow. Yes, the device might disconnect prior to
  1919. * having all bytes transferred!
  1920. * Also we should make sure that all data from the DMA engine buffer's really
  1921. * made its way to the system memory! Some documentation on this would not
  1922. * seem to be a bad idea, actually.
  1923. */
  1924. if (!(srb->state & SRB_XFERPAD)) {
  1925. u32 d_left_counter;
  1926. unsigned int sc, fc;
  1927. if (scsi_status & PARITYERROR) {
  1928. dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
  1929. "Parity Error\n", srb->cmd);
  1930. srb->status |= PARITY_ERROR;
  1931. }
  1932. /*
  1933. * KG: We should wait for the DMA FIFO to be empty ...
  1934. * but: it would be better to wait first for the SCSI FIFO and then the
  1935. * the DMA FIFO to become empty? How do we know, that the device not already
  1936. * sent data to the FIFO in a MsgIn phase, eg.?
  1937. */
  1938. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
  1939. #if 0
  1940. int ctr = 6000000;
  1941. dprintkl(KERN_DEBUG,
  1942. "DIP0: Wait for DMA FIFO to flush ...\n");
  1943. /*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */
  1944. /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */
  1945. /*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */
  1946. while (!
  1947. (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
  1948. 0x80) && --ctr);
  1949. if (ctr < 6000000 - 1)
  1950. dprintkl(KERN_DEBUG
  1951. "DIP0: Had to wait for DMA ...\n");
  1952. if (!ctr)
  1953. dprintkl(KERN_ERR,
  1954. "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
  1955. /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */
  1956. #endif
  1957. dprintkdbg(DBG_KG, "data_in_phase0: "
  1958. "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
  1959. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1960. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
  1961. }
  1962. /* Now: Check remainig data: The SCSI counters should tell us ... */
  1963. sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
  1964. fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  1965. d_left_counter = sc + ((fc & 0x1f)
  1966. << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
  1967. 0));
  1968. dprintkdbg(DBG_KG, "data_in_phase0: "
  1969. "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
  1970. "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
  1971. "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
  1972. fc,
  1973. (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
  1974. sc,
  1975. fc,
  1976. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1977. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  1978. srb->total_xfer_length, d_left_counter);
  1979. #if DC395x_LASTPIO
  1980. /* KG: Less than or equal to 4 bytes can not be transferred via DMA, it seems. */
  1981. if (d_left_counter
  1982. && srb->total_xfer_length <= DC395x_LASTPIO) {
  1983. size_t left_io = srb->total_xfer_length;
  1984. /*u32 addr = (srb->segment_x[srb->sg_index].address); */
  1985. /*sg_update_list (srb, d_left_counter); */
  1986. dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) "
  1987. "for remaining %i bytes:",
  1988. fc & 0x1f,
  1989. (srb->dcb->sync_period & WIDE_SYNC) ?
  1990. "words" : "bytes",
  1991. srb->total_xfer_length);
  1992. if (srb->dcb->sync_period & WIDE_SYNC)
  1993. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  1994. CFG2_WIDEFIFO);
  1995. while (left_io) {
  1996. unsigned char *virt, *base = NULL;
  1997. unsigned long flags = 0;
  1998. size_t len = left_io;
  1999. size_t offset = srb->request_length - left_io;
  2000. local_irq_save(flags);
  2001. /* Assumption: it's inside one page as it's at most 4 bytes and
  2002. I just assume it's on a 4-byte boundary */
  2003. base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
  2004. srb->sg_count, &offset, &len);
  2005. virt = base + offset;
  2006. left_io -= len;
  2007. while (len) {
  2008. u8 byte;
  2009. byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2010. *virt++ = byte;
  2011. if (debug_enabled(DBG_PIO))
  2012. printk(" %02x", byte);
  2013. d_left_counter--;
  2014. sg_subtract_one(srb);
  2015. len--;
  2016. fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  2017. if (fc == 0x40) {
  2018. left_io = 0;
  2019. break;
  2020. }
  2021. }
  2022. WARN_ON((fc != 0x40) == !d_left_counter);
  2023. if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
  2024. /* Read the last byte ... */
  2025. if (srb->total_xfer_length > 0) {
  2026. u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2027. *virt++ = byte;
  2028. srb->total_xfer_length--;
  2029. if (debug_enabled(DBG_PIO))
  2030. printk(" %02x", byte);
  2031. }
  2032. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2033. }
  2034. scsi_kunmap_atomic_sg(base);
  2035. local_irq_restore(flags);
  2036. }
  2037. /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */
  2038. /*srb->total_xfer_length = 0; */
  2039. if (debug_enabled(DBG_PIO))
  2040. printk("\n");
  2041. }
  2042. #endif /* DC395x_LASTPIO */
  2043. #if 0
  2044. /*
  2045. * KG: This was in DATAOUT. Does it also belong here?
  2046. * Nobody seems to know what counter and fifo_cnt count exactly ...
  2047. */
  2048. if (!(scsi_status & SCSIXFERDONE)) {
  2049. /*
  2050. * when data transfer from DMA FIFO to SCSI FIFO
  2051. * if there was some data left in SCSI FIFO
  2052. */
  2053. d_left_counter =
  2054. (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
  2055. 0x1F);
  2056. if (srb->dcb->sync_period & WIDE_SYNC)
  2057. d_left_counter <<= 1;
  2058. /*
  2059. * if WIDE scsi SCSI FIFOCNT unit is word !!!
  2060. * so need to *= 2
  2061. * KG: Seems to be correct ...
  2062. */
  2063. }
  2064. #endif
  2065. /* KG: This should not be needed any more! */
  2066. if (d_left_counter == 0
  2067. || (scsi_status & SCSIXFERCNT_2_ZERO)) {
  2068. #if 0
  2069. int ctr = 6000000;
  2070. u8 TempDMAstatus;
  2071. do {
  2072. TempDMAstatus =
  2073. DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  2074. } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
  2075. if (!ctr)
  2076. dprintkl(KERN_ERR,
  2077. "Deadlock in DataInPhase0 waiting for DMA!!\n");
  2078. srb->total_xfer_length = 0;
  2079. #endif
  2080. srb->total_xfer_length = d_left_counter;
  2081. } else { /* phase changed */
  2082. /*
  2083. * parsing the case:
  2084. * when a transfer not yet complete
  2085. * but be disconnected by target
  2086. * if transfer not yet complete
  2087. * there were some data residue in SCSI FIFO or
  2088. * SCSI transfer counter not empty
  2089. */
  2090. sg_update_list(srb, d_left_counter);
  2091. }
  2092. }
  2093. /* KG: The target may decide to disconnect: Empty FIFO before! */
  2094. if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
  2095. cleanup_after_transfer(acb, srb);
  2096. }
  2097. }
  2098. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2099. u16 *pscsi_status)
  2100. {
  2101. dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
  2102. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
  2103. data_io_transfer(acb, srb, XFERDATAIN);
  2104. }
  2105. static void data_io_transfer(struct AdapterCtlBlk *acb,
  2106. struct ScsiReqBlk *srb, u16 io_dir)
  2107. {
  2108. struct DeviceCtlBlk *dcb = srb->dcb;
  2109. u8 bval;
  2110. dprintkdbg(DBG_0,
  2111. "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
  2112. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
  2113. ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
  2114. srb->total_xfer_length, srb->sg_index, srb->sg_count);
  2115. if (srb == acb->tmp_srb)
  2116. dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
  2117. if (srb->sg_index >= srb->sg_count) {
  2118. /* can't happen? out of bounds error */
  2119. return;
  2120. }
  2121. if (srb->total_xfer_length > DC395x_LASTPIO) {
  2122. u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  2123. /*
  2124. * KG: What should we do: Use SCSI Cmd 0x90/0x92?
  2125. * Maybe, even ABORTXFER would be appropriate
  2126. */
  2127. if (dma_status & XFERPENDING) {
  2128. dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
  2129. "Expect trouble!\n");
  2130. dump_register_info(acb, dcb, srb);
  2131. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  2132. }
  2133. /* clear_fifo(acb, "IO"); */
  2134. /*
  2135. * load what physical address of Scatter/Gather list table
  2136. * want to be transfer
  2137. */
  2138. srb->state |= SRB_DATA_XFER;
  2139. DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
  2140. if (scsi_sg_count(srb->cmd)) { /* with S/G */
  2141. io_dir |= DMACMD_SG;
  2142. DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
  2143. srb->sg_bus_addr +
  2144. sizeof(struct SGentry) *
  2145. srb->sg_index);
  2146. /* load how many bytes in the sg list table */
  2147. DC395x_write32(acb, TRM_S1040_DMA_XCNT,
  2148. ((u32)(srb->sg_count -
  2149. srb->sg_index) << 3));
  2150. } else { /* without S/G */
  2151. io_dir &= ~DMACMD_SG;
  2152. DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
  2153. srb->segment_x[0].address);
  2154. DC395x_write32(acb, TRM_S1040_DMA_XCNT,
  2155. srb->segment_x[0].length);
  2156. }
  2157. /* load total transfer length (24bits) max value 16Mbyte */
  2158. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
  2159. srb->total_xfer_length);
  2160. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2161. if (io_dir & DMACMD_DIR) { /* read */
  2162. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2163. SCMD_DMA_IN);
  2164. DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
  2165. } else {
  2166. DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
  2167. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2168. SCMD_DMA_OUT);
  2169. }
  2170. }
  2171. #if DC395x_LASTPIO
  2172. else if (srb->total_xfer_length > 0) { /* The last four bytes: Do PIO */
  2173. /*
  2174. * load what physical address of Scatter/Gather list table
  2175. * want to be transfer
  2176. */
  2177. srb->state |= SRB_DATA_XFER;
  2178. /* load total transfer length (24bits) max value 16Mbyte */
  2179. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
  2180. srb->total_xfer_length);
  2181. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2182. if (io_dir & DMACMD_DIR) { /* read */
  2183. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2184. SCMD_FIFO_IN);
  2185. } else { /* write */
  2186. int ln = srb->total_xfer_length;
  2187. size_t left_io = srb->total_xfer_length;
  2188. if (srb->dcb->sync_period & WIDE_SYNC)
  2189. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2190. CFG2_WIDEFIFO);
  2191. while (left_io) {
  2192. unsigned char *virt, *base = NULL;
  2193. unsigned long flags = 0;
  2194. size_t len = left_io;
  2195. size_t offset = srb->request_length - left_io;
  2196. local_irq_save(flags);
  2197. /* Again, max 4 bytes */
  2198. base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
  2199. srb->sg_count, &offset, &len);
  2200. virt = base + offset;
  2201. left_io -= len;
  2202. while (len--) {
  2203. if (debug_enabled(DBG_PIO))
  2204. printk(" %02x", *virt);
  2205. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++);
  2206. sg_subtract_one(srb);
  2207. }
  2208. scsi_kunmap_atomic_sg(base);
  2209. local_irq_restore(flags);
  2210. }
  2211. if (srb->dcb->sync_period & WIDE_SYNC) {
  2212. if (ln % 2) {
  2213. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  2214. if (debug_enabled(DBG_PIO))
  2215. printk(" |00");
  2216. }
  2217. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2218. }
  2219. /*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */
  2220. if (debug_enabled(DBG_PIO))
  2221. printk("\n");
  2222. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2223. SCMD_FIFO_OUT);
  2224. }
  2225. }
  2226. #endif /* DC395x_LASTPIO */
  2227. else { /* xfer pad */
  2228. u8 data = 0, data2 = 0;
  2229. if (srb->sg_count) {
  2230. srb->adapter_status = H_OVER_UNDER_RUN;
  2231. srb->status |= OVER_RUN;
  2232. }
  2233. /*
  2234. * KG: despite the fact that we are using 16 bits I/O ops
  2235. * the SCSI FIFO is only 8 bits according to the docs
  2236. * (we can set bit 1 in 0x8f to serialize FIFO access ...)
  2237. */
  2238. if (dcb->sync_period & WIDE_SYNC) {
  2239. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2);
  2240. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2241. CFG2_WIDEFIFO);
  2242. if (io_dir & DMACMD_DIR) {
  2243. data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2244. data2 = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2245. } else {
  2246. /* Danger, Robinson: If you find KGs
  2247. * scattered over the wide disk, the driver
  2248. * or chip is to blame :-( */
  2249. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
  2250. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G');
  2251. }
  2252. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2253. } else {
  2254. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
  2255. /* Danger, Robinson: If you find a collection of Ks on your disk
  2256. * something broke :-( */
  2257. if (io_dir & DMACMD_DIR)
  2258. data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2259. else
  2260. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
  2261. }
  2262. srb->state |= SRB_XFERPAD;
  2263. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2264. /* SCSI command */
  2265. bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT;
  2266. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval);
  2267. }
  2268. }
  2269. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2270. u16 *pscsi_status)
  2271. {
  2272. dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
  2273. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
  2274. srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2275. srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
  2276. srb->state = SRB_COMPLETED;
  2277. *pscsi_status = PH_BUS_FREE; /*.. initial phase */
  2278. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2279. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2280. }
  2281. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2282. u16 *pscsi_status)
  2283. {
  2284. dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
  2285. srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
  2286. srb->state = SRB_STATUS;
  2287. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2288. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
  2289. }
  2290. /* Check if the message is complete */
  2291. static inline u8 msgin_completed(u8 * msgbuf, u32 len)
  2292. {
  2293. if (*msgbuf == EXTENDED_MESSAGE) {
  2294. if (len < 2)
  2295. return 0;
  2296. if (len < msgbuf[1] + 2)
  2297. return 0;
  2298. } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) /* two byte messages */
  2299. if (len < 2)
  2300. return 0;
  2301. return 1;
  2302. }
  2303. /* reject_msg */
  2304. static inline void msgin_reject(struct AdapterCtlBlk *acb,
  2305. struct ScsiReqBlk *srb)
  2306. {
  2307. srb->msgout_buf[0] = MESSAGE_REJECT;
  2308. srb->msg_count = 1;
  2309. DC395x_ENABLE_MSGOUT;
  2310. srb->state &= ~SRB_MSGIN;
  2311. srb->state |= SRB_MSGOUT;
  2312. dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
  2313. srb->msgin_buf[0],
  2314. srb->dcb->target_id, srb->dcb->target_lun);
  2315. }
  2316. static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
  2317. struct DeviceCtlBlk *dcb, u8 tag)
  2318. {
  2319. struct ScsiReqBlk *srb = NULL;
  2320. struct ScsiReqBlk *i;
  2321. dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
  2322. srb->cmd, tag, srb);
  2323. if (!(dcb->tag_mask & (1 << tag)))
  2324. dprintkl(KERN_DEBUG,
  2325. "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
  2326. dcb->tag_mask, tag);
  2327. if (list_empty(&dcb->srb_going_list))
  2328. goto mingx0;
  2329. list_for_each_entry(i, &dcb->srb_going_list, list) {
  2330. if (i->tag_number == tag) {
  2331. srb = i;
  2332. break;
  2333. }
  2334. }
  2335. if (!srb)
  2336. goto mingx0;
  2337. dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
  2338. srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
  2339. if (dcb->flag & ABORT_DEV_) {
  2340. /*srb->state = SRB_ABORT_SENT; */
  2341. enable_msgout_abort(acb, srb);
  2342. }
  2343. if (!(srb->state & SRB_DISCONNECT))
  2344. goto mingx0;
  2345. memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
  2346. srb->state |= dcb->active_srb->state;
  2347. srb->state |= SRB_DATA_XFER;
  2348. dcb->active_srb = srb;
  2349. /* How can we make the DORS happy? */
  2350. return srb;
  2351. mingx0:
  2352. srb = acb->tmp_srb;
  2353. srb->state = SRB_UNEXPECT_RESEL;
  2354. dcb->active_srb = srb;
  2355. srb->msgout_buf[0] = MSG_ABORT_TAG;
  2356. srb->msg_count = 1;
  2357. DC395x_ENABLE_MSGOUT;
  2358. dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
  2359. return srb;
  2360. }
  2361. static inline void reprogram_regs(struct AdapterCtlBlk *acb,
  2362. struct DeviceCtlBlk *dcb)
  2363. {
  2364. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  2365. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  2366. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  2367. set_xfer_rate(acb, dcb);
  2368. }
  2369. /* set async transfer mode */
  2370. static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2371. {
  2372. struct DeviceCtlBlk *dcb = srb->dcb;
  2373. dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
  2374. dcb->target_id, dcb->target_lun);
  2375. dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
  2376. dcb->sync_mode |= SYNC_NEGO_DONE;
  2377. /*dcb->sync_period &= 0; */
  2378. dcb->sync_offset = 0;
  2379. dcb->min_nego_period = 200 >> 2; /* 200ns <=> 5 MHz */
  2380. srb->state &= ~SRB_DO_SYNC_NEGO;
  2381. reprogram_regs(acb, dcb);
  2382. if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
  2383. && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
  2384. build_wdtr(acb, dcb, srb);
  2385. DC395x_ENABLE_MSGOUT;
  2386. dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
  2387. }
  2388. }
  2389. /* set sync transfer mode */
  2390. static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2391. {
  2392. struct DeviceCtlBlk *dcb = srb->dcb;
  2393. u8 bval;
  2394. int fact;
  2395. dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
  2396. "(%02i.%01i MHz) Offset %i\n",
  2397. dcb->target_id, srb->msgin_buf[3] << 2,
  2398. (250 / srb->msgin_buf[3]),
  2399. ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
  2400. srb->msgin_buf[4]);
  2401. if (srb->msgin_buf[4] > 15)
  2402. srb->msgin_buf[4] = 15;
  2403. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
  2404. dcb->sync_offset = 0;
  2405. else if (dcb->sync_offset == 0)
  2406. dcb->sync_offset = srb->msgin_buf[4];
  2407. if (srb->msgin_buf[4] > dcb->sync_offset)
  2408. srb->msgin_buf[4] = dcb->sync_offset;
  2409. else
  2410. dcb->sync_offset = srb->msgin_buf[4];
  2411. bval = 0;
  2412. while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
  2413. || dcb->min_nego_period >
  2414. clock_period[bval]))
  2415. bval++;
  2416. if (srb->msgin_buf[3] < clock_period[bval])
  2417. dprintkl(KERN_INFO,
  2418. "msgin_set_sync: Increase sync nego period to %ins\n",
  2419. clock_period[bval] << 2);
  2420. srb->msgin_buf[3] = clock_period[bval];
  2421. dcb->sync_period &= 0xf0;
  2422. dcb->sync_period |= ALT_SYNC | bval;
  2423. dcb->min_nego_period = srb->msgin_buf[3];
  2424. if (dcb->sync_period & WIDE_SYNC)
  2425. fact = 500;
  2426. else
  2427. fact = 250;
  2428. dprintkl(KERN_INFO,
  2429. "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
  2430. dcb->target_id, (fact == 500) ? "Wide16" : "",
  2431. dcb->min_nego_period << 2, dcb->sync_offset,
  2432. (fact / dcb->min_nego_period),
  2433. ((fact % dcb->min_nego_period) * 10 +
  2434. dcb->min_nego_period / 2) / dcb->min_nego_period);
  2435. if (!(srb->state & SRB_DO_SYNC_NEGO)) {
  2436. /* Reply with corrected SDTR Message */
  2437. dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
  2438. srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
  2439. memcpy(srb->msgout_buf, srb->msgin_buf, 5);
  2440. srb->msg_count = 5;
  2441. DC395x_ENABLE_MSGOUT;
  2442. dcb->sync_mode |= SYNC_NEGO_DONE;
  2443. } else {
  2444. if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
  2445. && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
  2446. build_wdtr(acb, dcb, srb);
  2447. DC395x_ENABLE_MSGOUT;
  2448. dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
  2449. }
  2450. }
  2451. srb->state &= ~SRB_DO_SYNC_NEGO;
  2452. dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
  2453. reprogram_regs(acb, dcb);
  2454. }
  2455. static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
  2456. struct ScsiReqBlk *srb)
  2457. {
  2458. struct DeviceCtlBlk *dcb = srb->dcb;
  2459. dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
  2460. dcb->sync_period &= ~WIDE_SYNC;
  2461. dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
  2462. dcb->sync_mode |= WIDE_NEGO_DONE;
  2463. srb->state &= ~SRB_DO_WIDE_NEGO;
  2464. reprogram_regs(acb, dcb);
  2465. if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  2466. && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
  2467. build_sdtr(acb, dcb, srb);
  2468. DC395x_ENABLE_MSGOUT;
  2469. dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
  2470. }
  2471. }
  2472. static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2473. {
  2474. struct DeviceCtlBlk *dcb = srb->dcb;
  2475. u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
  2476. && acb->config & HCC_WIDE_CARD) ? 1 : 0;
  2477. dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
  2478. if (srb->msgin_buf[3] > wide)
  2479. srb->msgin_buf[3] = wide;
  2480. /* Completed */
  2481. if (!(srb->state & SRB_DO_WIDE_NEGO)) {
  2482. dprintkl(KERN_DEBUG,
  2483. "msgin_set_wide: Wide nego initiated <%02i>\n",
  2484. dcb->target_id);
  2485. memcpy(srb->msgout_buf, srb->msgin_buf, 4);
  2486. srb->msg_count = 4;
  2487. srb->state |= SRB_DO_WIDE_NEGO;
  2488. DC395x_ENABLE_MSGOUT;
  2489. }
  2490. dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
  2491. if (srb->msgin_buf[3] > 0)
  2492. dcb->sync_period |= WIDE_SYNC;
  2493. else
  2494. dcb->sync_period &= ~WIDE_SYNC;
  2495. srb->state &= ~SRB_DO_WIDE_NEGO;
  2496. /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */
  2497. dprintkdbg(DBG_1,
  2498. "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
  2499. (8 << srb->msgin_buf[3]), dcb->target_id);
  2500. reprogram_regs(acb, dcb);
  2501. if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  2502. && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
  2503. build_sdtr(acb, dcb, srb);
  2504. DC395x_ENABLE_MSGOUT;
  2505. dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
  2506. }
  2507. }
  2508. /*
  2509. * extended message codes:
  2510. *
  2511. * code description
  2512. *
  2513. * 02h Reserved
  2514. * 00h MODIFY DATA POINTER
  2515. * 01h SYNCHRONOUS DATA TRANSFER REQUEST
  2516. * 03h WIDE DATA TRANSFER REQUEST
  2517. * 04h - 7Fh Reserved
  2518. * 80h - FFh Vendor specific
  2519. */
  2520. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2521. u16 *pscsi_status)
  2522. {
  2523. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2524. dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
  2525. srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2526. if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
  2527. /* Now eval the msg */
  2528. switch (srb->msgin_buf[0]) {
  2529. case DISCONNECT:
  2530. srb->state = SRB_DISCONNECT;
  2531. break;
  2532. case SIMPLE_QUEUE_TAG:
  2533. case HEAD_OF_QUEUE_TAG:
  2534. case ORDERED_QUEUE_TAG:
  2535. srb =
  2536. msgin_qtag(acb, dcb,
  2537. srb->msgin_buf[1]);
  2538. break;
  2539. case MESSAGE_REJECT:
  2540. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  2541. DO_CLRATN | DO_DATALATCH);
  2542. /* A sync nego message was rejected ! */
  2543. if (srb->state & SRB_DO_SYNC_NEGO) {
  2544. msgin_set_async(acb, srb);
  2545. break;
  2546. }
  2547. /* A wide nego message was rejected ! */
  2548. if (srb->state & SRB_DO_WIDE_NEGO) {
  2549. msgin_set_nowide(acb, srb);
  2550. break;
  2551. }
  2552. enable_msgout_abort(acb, srb);
  2553. /*srb->state |= SRB_ABORT_SENT */
  2554. break;
  2555. case EXTENDED_MESSAGE:
  2556. /* SDTR */
  2557. if (srb->msgin_buf[1] == 3
  2558. && srb->msgin_buf[2] == EXTENDED_SDTR) {
  2559. msgin_set_sync(acb, srb);
  2560. break;
  2561. }
  2562. /* WDTR */
  2563. if (srb->msgin_buf[1] == 2
  2564. && srb->msgin_buf[2] == EXTENDED_WDTR
  2565. && srb->msgin_buf[3] <= 2) { /* sanity check ... */
  2566. msgin_set_wide(acb, srb);
  2567. break;
  2568. }
  2569. msgin_reject(acb, srb);
  2570. break;
  2571. case MSG_IGNOREWIDE:
  2572. /* Discard wide residual */
  2573. dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
  2574. break;
  2575. case COMMAND_COMPLETE:
  2576. /* nothing has to be done */
  2577. break;
  2578. case SAVE_POINTERS:
  2579. /*
  2580. * SAVE POINTER may be ignored as we have the struct
  2581. * ScsiReqBlk* associated with the scsi command.
  2582. */
  2583. dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
  2584. "SAVE POINTER rem=%i Ignore\n",
  2585. srb->cmd, srb->total_xfer_length);
  2586. break;
  2587. case RESTORE_POINTERS:
  2588. dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
  2589. break;
  2590. case ABORT:
  2591. dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
  2592. "<%02i-%i> ABORT msg\n",
  2593. srb->cmd, dcb->target_id,
  2594. dcb->target_lun);
  2595. dcb->flag |= ABORT_DEV_;
  2596. enable_msgout_abort(acb, srb);
  2597. break;
  2598. default:
  2599. /* reject unknown messages */
  2600. if (srb->msgin_buf[0] & IDENTIFY_BASE) {
  2601. dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
  2602. srb->msg_count = 1;
  2603. srb->msgout_buf[0] = dcb->identify_msg;
  2604. DC395x_ENABLE_MSGOUT;
  2605. srb->state |= SRB_MSGOUT;
  2606. /*break; */
  2607. }
  2608. msgin_reject(acb, srb);
  2609. }
  2610. /* Clear counter and MsgIn state */
  2611. srb->state &= ~SRB_MSGIN;
  2612. acb->msg_len = 0;
  2613. }
  2614. *pscsi_status = PH_BUS_FREE;
  2615. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important ... you know! */
  2616. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2617. }
  2618. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2619. u16 *pscsi_status)
  2620. {
  2621. dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
  2622. clear_fifo(acb, "msgin_phase1");
  2623. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
  2624. if (!(srb->state & SRB_MSGIN)) {
  2625. srb->state &= ~SRB_DISCONNECT;
  2626. srb->state |= SRB_MSGIN;
  2627. }
  2628. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2629. /* SCSI command */
  2630. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN);
  2631. }
  2632. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2633. u16 *pscsi_status)
  2634. {
  2635. }
  2636. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2637. u16 *pscsi_status)
  2638. {
  2639. }
  2640. static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
  2641. {
  2642. struct DeviceCtlBlk *i;
  2643. /* set all lun device's period, offset */
  2644. if (dcb->identify_msg & 0x07)
  2645. return;
  2646. if (acb->scan_devices) {
  2647. current_sync_offset = dcb->sync_offset;
  2648. return;
  2649. }
  2650. list_for_each_entry(i, &acb->dcb_list, list)
  2651. if (i->target_id == dcb->target_id) {
  2652. i->sync_period = dcb->sync_period;
  2653. i->sync_offset = dcb->sync_offset;
  2654. i->sync_mode = dcb->sync_mode;
  2655. i->min_nego_period = dcb->min_nego_period;
  2656. }
  2657. }
  2658. static void disconnect(struct AdapterCtlBlk *acb)
  2659. {
  2660. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2661. struct ScsiReqBlk *srb;
  2662. if (!dcb) {
  2663. dprintkl(KERN_ERR, "disconnect: No such device\n");
  2664. udelay(500);
  2665. /* Suspend queue for a while */
  2666. acb->last_reset =
  2667. jiffies + HZ / 2 +
  2668. HZ * acb->eeprom.delay_time;
  2669. clear_fifo(acb, "disconnectEx");
  2670. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
  2671. return;
  2672. }
  2673. srb = dcb->active_srb;
  2674. acb->active_dcb = NULL;
  2675. dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
  2676. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  2677. clear_fifo(acb, "disconnect");
  2678. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
  2679. if (srb->state & SRB_UNEXPECT_RESEL) {
  2680. dprintkl(KERN_ERR,
  2681. "disconnect: Unexpected reselection <%02i-%i>\n",
  2682. dcb->target_id, dcb->target_lun);
  2683. srb->state = 0;
  2684. waiting_process_next(acb);
  2685. } else if (srb->state & SRB_ABORT_SENT) {
  2686. dcb->flag &= ~ABORT_DEV_;
  2687. acb->last_reset = jiffies + HZ / 2 + 1;
  2688. dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
  2689. doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
  2690. waiting_process_next(acb);
  2691. } else {
  2692. if ((srb->state & (SRB_START_ + SRB_MSGOUT))
  2693. || !(srb->
  2694. state & (SRB_DISCONNECT + SRB_COMPLETED))) {
  2695. /*
  2696. * Selection time out
  2697. * SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED)
  2698. */
  2699. /* Unexp. Disc / Sel Timeout */
  2700. if (srb->state != SRB_START_
  2701. && srb->state != SRB_MSGOUT) {
  2702. srb->state = SRB_READY;
  2703. dprintkl(KERN_DEBUG,
  2704. "disconnect: (0x%p) Unexpected\n",
  2705. srb->cmd);
  2706. srb->target_status = SCSI_STAT_SEL_TIMEOUT;
  2707. goto disc1;
  2708. } else {
  2709. /* Normal selection timeout */
  2710. dprintkdbg(DBG_KG, "disconnect: (0x%p) "
  2711. "<%02i-%i> SelTO\n", srb->cmd,
  2712. dcb->target_id, dcb->target_lun);
  2713. if (srb->retry_count++ > DC395x_MAX_RETRIES
  2714. || acb->scan_devices) {
  2715. srb->target_status =
  2716. SCSI_STAT_SEL_TIMEOUT;
  2717. goto disc1;
  2718. }
  2719. free_tag(dcb, srb);
  2720. srb_going_to_waiting_move(dcb, srb);
  2721. dprintkdbg(DBG_KG,
  2722. "disconnect: (0x%p) Retry\n",
  2723. srb->cmd);
  2724. waiting_set_timer(acb, HZ / 20);
  2725. }
  2726. } else if (srb->state & SRB_DISCONNECT) {
  2727. u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  2728. /*
  2729. * SRB_DISCONNECT (This is what we expect!)
  2730. */
  2731. if (bval & 0x40) {
  2732. dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
  2733. " 0x%02x: ACK set! Other controllers?\n",
  2734. bval);
  2735. /* It could come from another initiator, therefore don't do much ! */
  2736. } else
  2737. waiting_process_next(acb);
  2738. } else if (srb->state & SRB_COMPLETED) {
  2739. disc1:
  2740. /*
  2741. ** SRB_COMPLETED
  2742. */
  2743. free_tag(dcb, srb);
  2744. dcb->active_srb = NULL;
  2745. srb->state = SRB_FREE;
  2746. srb_done(acb, dcb, srb);
  2747. }
  2748. }
  2749. }
  2750. static void reselect(struct AdapterCtlBlk *acb)
  2751. {
  2752. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2753. struct ScsiReqBlk *srb = NULL;
  2754. u16 rsel_tar_lun_id;
  2755. u8 id, lun;
  2756. u8 arblostflag = 0;
  2757. dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
  2758. clear_fifo(acb, "reselect");
  2759. /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */
  2760. /* Read Reselected Target ID and LUN */
  2761. rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID);
  2762. if (dcb) { /* Arbitration lost but Reselection win */
  2763. srb = dcb->active_srb;
  2764. if (!srb) {
  2765. dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
  2766. "but active_srb == NULL\n");
  2767. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2768. return;
  2769. }
  2770. /* Why the if ? */
  2771. if (!acb->scan_devices) {
  2772. dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
  2773. "Arb lost but Resel win rsel=%i stat=0x%04x\n",
  2774. srb->cmd, dcb->target_id,
  2775. dcb->target_lun, rsel_tar_lun_id,
  2776. DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
  2777. arblostflag = 1;
  2778. /*srb->state |= SRB_DISCONNECT; */
  2779. srb->state = SRB_READY;
  2780. free_tag(dcb, srb);
  2781. srb_going_to_waiting_move(dcb, srb);
  2782. waiting_set_timer(acb, HZ / 20);
  2783. /* return; */
  2784. }
  2785. }
  2786. /* Read Reselected Target Id and LUN */
  2787. if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
  2788. dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
  2789. "Got %i!\n", rsel_tar_lun_id);
  2790. id = rsel_tar_lun_id & 0xff;
  2791. lun = (rsel_tar_lun_id >> 8) & 7;
  2792. dcb = find_dcb(acb, id, lun);
  2793. if (!dcb) {
  2794. dprintkl(KERN_ERR, "reselect: From non existent device "
  2795. "<%02i-%i>\n", id, lun);
  2796. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2797. return;
  2798. }
  2799. acb->active_dcb = dcb;
  2800. if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
  2801. dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
  2802. "disconnection? <%02i-%i>\n",
  2803. dcb->target_id, dcb->target_lun);
  2804. if (dcb->sync_mode & EN_TAG_QUEUEING /*&& !arblostflag */) {
  2805. srb = acb->tmp_srb;
  2806. dcb->active_srb = srb;
  2807. } else {
  2808. /* There can be only one! */
  2809. srb = dcb->active_srb;
  2810. if (!srb || !(srb->state & SRB_DISCONNECT)) {
  2811. /*
  2812. * abort command
  2813. */
  2814. dprintkl(KERN_DEBUG,
  2815. "reselect: w/o disconnected cmds <%02i-%i>\n",
  2816. dcb->target_id, dcb->target_lun);
  2817. srb = acb->tmp_srb;
  2818. srb->state = SRB_UNEXPECT_RESEL;
  2819. dcb->active_srb = srb;
  2820. enable_msgout_abort(acb, srb);
  2821. } else {
  2822. if (dcb->flag & ABORT_DEV_) {
  2823. /*srb->state = SRB_ABORT_SENT; */
  2824. enable_msgout_abort(acb, srb);
  2825. } else
  2826. srb->state = SRB_DATA_XFER;
  2827. }
  2828. }
  2829. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  2830. /* Program HA ID, target ID, period and offset */
  2831. dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
  2832. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */
  2833. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */
  2834. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */
  2835. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); /* sync period, wide */
  2836. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2837. /* SCSI command */
  2838. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2839. }
  2840. static inline u8 tagq_blacklist(char *name)
  2841. {
  2842. #ifndef DC395x_NO_TAGQ
  2843. #if 0
  2844. u8 i;
  2845. for (i = 0; i < BADDEVCNT; i++)
  2846. if (memcmp(name, DC395x_baddevname1[i], 28) == 0)
  2847. return 1;
  2848. #endif
  2849. return 0;
  2850. #else
  2851. return 1;
  2852. #endif
  2853. }
  2854. static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
  2855. {
  2856. /* Check for SCSI format (ANSI and Response data format) */
  2857. if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) {
  2858. if ((ptr->Flags & SCSI_INQ_CMDQUEUE)
  2859. && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
  2860. /*(dcb->dev_mode & NTC_DO_DISCONNECT) */
  2861. /* ((dcb->dev_type == TYPE_DISK)
  2862. || (dcb->dev_type == TYPE_MOD)) && */
  2863. !tagq_blacklist(((char *)ptr) + 8)) {
  2864. if (dcb->max_command == 1)
  2865. dcb->max_command =
  2866. dcb->acb->tag_max_num;
  2867. dcb->sync_mode |= EN_TAG_QUEUEING;
  2868. /*dcb->tag_mask = 0; */
  2869. } else
  2870. dcb->max_command = 1;
  2871. }
  2872. }
  2873. static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  2874. struct ScsiInqData *ptr)
  2875. {
  2876. u8 bval1 = ptr->DevType & SCSI_DEVTYPE;
  2877. dcb->dev_type = bval1;
  2878. /* if (bval1 == TYPE_DISK || bval1 == TYPE_MOD) */
  2879. disc_tagq_set(dcb, ptr);
  2880. }
  2881. /* unmap mapped pci regions from SRB */
  2882. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2883. {
  2884. struct scsi_cmnd *cmd = srb->cmd;
  2885. enum dma_data_direction dir = cmd->sc_data_direction;
  2886. if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
  2887. /* unmap DC395x SG list */
  2888. dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
  2889. srb->sg_bus_addr, SEGMENTX_LEN);
  2890. pci_unmap_single(acb->dev, srb->sg_bus_addr,
  2891. SEGMENTX_LEN,
  2892. PCI_DMA_TODEVICE);
  2893. dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
  2894. scsi_sg_count(cmd), scsi_bufflen(cmd));
  2895. /* unmap the sg segments */
  2896. scsi_dma_unmap(cmd);
  2897. }
  2898. }
  2899. /* unmap mapped pci sense buffer from SRB */
  2900. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  2901. struct ScsiReqBlk *srb)
  2902. {
  2903. if (!(srb->flag & AUTO_REQSENSE))
  2904. return;
  2905. /* Unmap sense buffer */
  2906. dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
  2907. srb->segment_x[0].address);
  2908. pci_unmap_single(acb->dev, srb->segment_x[0].address,
  2909. srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
  2910. /* Restore SG stuff */
  2911. srb->total_xfer_length = srb->xferred;
  2912. srb->segment_x[0].address =
  2913. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
  2914. srb->segment_x[0].length =
  2915. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
  2916. }
  2917. /*
  2918. * Complete execution of a SCSI command
  2919. * Signal completion to the generic SCSI driver
  2920. */
  2921. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  2922. struct ScsiReqBlk *srb)
  2923. {
  2924. u8 tempcnt, status;
  2925. struct scsi_cmnd *cmd = srb->cmd;
  2926. enum dma_data_direction dir = cmd->sc_data_direction;
  2927. int ckc_only = 1;
  2928. dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
  2929. srb->cmd->device->id, (u8)srb->cmd->device->lun);
  2930. dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
  2931. srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
  2932. scsi_sgtalbe(cmd));
  2933. status = srb->target_status;
  2934. if (srb->flag & AUTO_REQSENSE) {
  2935. dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
  2936. pci_unmap_srb_sense(acb, srb);
  2937. /*
  2938. ** target status..........................
  2939. */
  2940. srb->flag &= ~AUTO_REQSENSE;
  2941. srb->adapter_status = 0;
  2942. srb->target_status = CHECK_CONDITION << 1;
  2943. if (debug_enabled(DBG_1)) {
  2944. switch (cmd->sense_buffer[2] & 0x0f) {
  2945. case NOT_READY:
  2946. dprintkl(KERN_DEBUG,
  2947. "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2948. cmd->cmnd[0], dcb->target_id,
  2949. dcb->target_lun, status, acb->scan_devices);
  2950. break;
  2951. case UNIT_ATTENTION:
  2952. dprintkl(KERN_DEBUG,
  2953. "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2954. cmd->cmnd[0], dcb->target_id,
  2955. dcb->target_lun, status, acb->scan_devices);
  2956. break;
  2957. case ILLEGAL_REQUEST:
  2958. dprintkl(KERN_DEBUG,
  2959. "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2960. cmd->cmnd[0], dcb->target_id,
  2961. dcb->target_lun, status, acb->scan_devices);
  2962. break;
  2963. case MEDIUM_ERROR:
  2964. dprintkl(KERN_DEBUG,
  2965. "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2966. cmd->cmnd[0], dcb->target_id,
  2967. dcb->target_lun, status, acb->scan_devices);
  2968. break;
  2969. case HARDWARE_ERROR:
  2970. dprintkl(KERN_DEBUG,
  2971. "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2972. cmd->cmnd[0], dcb->target_id,
  2973. dcb->target_lun, status, acb->scan_devices);
  2974. break;
  2975. }
  2976. if (cmd->sense_buffer[7] >= 6)
  2977. printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
  2978. "(0x%08x 0x%08x)\n",
  2979. cmd->sense_buffer[2], cmd->sense_buffer[12],
  2980. cmd->sense_buffer[13],
  2981. *((unsigned int *)(cmd->sense_buffer + 3)),
  2982. *((unsigned int *)(cmd->sense_buffer + 8)));
  2983. else
  2984. printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
  2985. cmd->sense_buffer[2],
  2986. *((unsigned int *)(cmd->sense_buffer + 3)));
  2987. }
  2988. if (status == (CHECK_CONDITION << 1)) {
  2989. cmd->result = DID_BAD_TARGET << 16;
  2990. goto ckc_e;
  2991. }
  2992. dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
  2993. if (srb->total_xfer_length
  2994. && srb->total_xfer_length >= cmd->underflow)
  2995. cmd->result =
  2996. MK_RES_LNX(DRIVER_SENSE, DID_OK,
  2997. srb->end_message, CHECK_CONDITION);
  2998. /*SET_RES_DID(cmd->result,DID_OK) */
  2999. else
  3000. cmd->result =
  3001. MK_RES_LNX(DRIVER_SENSE, DID_OK,
  3002. srb->end_message, CHECK_CONDITION);
  3003. goto ckc_e;
  3004. }
  3005. /*************************************************************/
  3006. if (status) {
  3007. /*
  3008. * target status..........................
  3009. */
  3010. if (status_byte(status) == CHECK_CONDITION) {
  3011. request_sense(acb, dcb, srb);
  3012. return;
  3013. } else if (status_byte(status) == QUEUE_FULL) {
  3014. tempcnt = (u8)list_size(&dcb->srb_going_list);
  3015. dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
  3016. dcb->target_id, dcb->target_lun, tempcnt);
  3017. if (tempcnt > 1)
  3018. tempcnt--;
  3019. dcb->max_command = tempcnt;
  3020. free_tag(dcb, srb);
  3021. srb_going_to_waiting_move(dcb, srb);
  3022. waiting_set_timer(acb, HZ / 20);
  3023. srb->adapter_status = 0;
  3024. srb->target_status = 0;
  3025. return;
  3026. } else if (status == SCSI_STAT_SEL_TIMEOUT) {
  3027. srb->adapter_status = H_SEL_TIMEOUT;
  3028. srb->target_status = 0;
  3029. cmd->result = DID_NO_CONNECT << 16;
  3030. } else {
  3031. srb->adapter_status = 0;
  3032. SET_RES_DID(cmd->result, DID_ERROR);
  3033. SET_RES_MSG(cmd->result, srb->end_message);
  3034. SET_RES_TARGET(cmd->result, status);
  3035. }
  3036. } else {
  3037. /*
  3038. ** process initiator status..........................
  3039. */
  3040. status = srb->adapter_status;
  3041. if (status & H_OVER_UNDER_RUN) {
  3042. srb->target_status = 0;
  3043. SET_RES_DID(cmd->result, DID_OK);
  3044. SET_RES_MSG(cmd->result, srb->end_message);
  3045. } else if (srb->status & PARITY_ERROR) {
  3046. SET_RES_DID(cmd->result, DID_PARITY);
  3047. SET_RES_MSG(cmd->result, srb->end_message);
  3048. } else { /* No error */
  3049. srb->adapter_status = 0;
  3050. srb->target_status = 0;
  3051. SET_RES_DID(cmd->result, DID_OK);
  3052. }
  3053. }
  3054. ckc_only = 0;
  3055. /* Check Error Conditions */
  3056. ckc_e:
  3057. pci_unmap_srb(acb, srb);
  3058. if (cmd->cmnd[0] == INQUIRY) {
  3059. unsigned char *base = NULL;
  3060. struct ScsiInqData *ptr;
  3061. unsigned long flags = 0;
  3062. struct scatterlist* sg = scsi_sglist(cmd);
  3063. size_t offset = 0, len = sizeof(struct ScsiInqData);
  3064. local_irq_save(flags);
  3065. base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
  3066. ptr = (struct ScsiInqData *)(base + offset);
  3067. if (!ckc_only && (cmd->result & RES_DID) == 0
  3068. && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
  3069. && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
  3070. dcb->inquiry7 = ptr->Flags;
  3071. /*if( srb->cmd->cmnd[0] == INQUIRY && */
  3072. /* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
  3073. if ((cmd->result == (DID_OK << 16) ||
  3074. status_byte(cmd->result) == CHECK_CONDITION)) {
  3075. if (!dcb->init_tcq_flag) {
  3076. add_dev(acb, dcb, ptr);
  3077. dcb->init_tcq_flag = 1;
  3078. }
  3079. }
  3080. scsi_kunmap_atomic_sg(base);
  3081. local_irq_restore(flags);
  3082. }
  3083. /* Here is the info for Doug Gilbert's sg3 ... */
  3084. scsi_set_resid(cmd, srb->total_xfer_length);
  3085. /* This may be interpreted by sb. or not ... */
  3086. cmd->SCp.this_residual = srb->total_xfer_length;
  3087. cmd->SCp.buffers_residual = 0;
  3088. if (debug_enabled(DBG_KG)) {
  3089. if (srb->total_xfer_length)
  3090. dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
  3091. "cmnd=0x%02x Missed %i bytes\n",
  3092. cmd, cmd->device->id, (u8)cmd->device->lun,
  3093. cmd->cmnd[0], srb->total_xfer_length);
  3094. }
  3095. srb_going_remove(dcb, srb);
  3096. /* Add to free list */
  3097. if (srb == acb->tmp_srb)
  3098. dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
  3099. else {
  3100. dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
  3101. cmd, cmd->result);
  3102. srb_free_insert(acb, srb);
  3103. }
  3104. cmd->scsi_done(cmd);
  3105. waiting_process_next(acb);
  3106. }
  3107. /* abort all cmds in our queues */
  3108. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
  3109. struct scsi_cmnd *cmd, u8 force)
  3110. {
  3111. struct DeviceCtlBlk *dcb;
  3112. dprintkl(KERN_INFO, "doing_srb_done: pids ");
  3113. list_for_each_entry(dcb, &acb->dcb_list, list) {
  3114. struct ScsiReqBlk *srb;
  3115. struct ScsiReqBlk *tmp;
  3116. struct scsi_cmnd *p;
  3117. list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
  3118. enum dma_data_direction dir;
  3119. int result;
  3120. p = srb->cmd;
  3121. dir = p->sc_data_direction;
  3122. result = MK_RES(0, did_flag, 0, 0);
  3123. printk("G:%p(%02i-%i) ", p,
  3124. p->device->id, (u8)p->device->lun);
  3125. srb_going_remove(dcb, srb);
  3126. free_tag(dcb, srb);
  3127. srb_free_insert(acb, srb);
  3128. p->result = result;
  3129. pci_unmap_srb_sense(acb, srb);
  3130. pci_unmap_srb(acb, srb);
  3131. if (force) {
  3132. /* For new EH, we normally don't need to give commands back,
  3133. * as they all complete or all time out */
  3134. p->scsi_done(p);
  3135. }
  3136. }
  3137. if (!list_empty(&dcb->srb_going_list))
  3138. dprintkl(KERN_DEBUG,
  3139. "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
  3140. dcb->target_id, dcb->target_lun);
  3141. if (dcb->tag_mask)
  3142. dprintkl(KERN_DEBUG,
  3143. "tag_mask for <%02i-%i> should be empty, is %08x!\n",
  3144. dcb->target_id, dcb->target_lun,
  3145. dcb->tag_mask);
  3146. /* Waiting queue */
  3147. list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
  3148. int result;
  3149. p = srb->cmd;
  3150. result = MK_RES(0, did_flag, 0, 0);
  3151. printk("W:%p<%02i-%i>", p, p->device->id,
  3152. (u8)p->device->lun);
  3153. srb_waiting_remove(dcb, srb);
  3154. srb_free_insert(acb, srb);
  3155. p->result = result;
  3156. pci_unmap_srb_sense(acb, srb);
  3157. pci_unmap_srb(acb, srb);
  3158. if (force) {
  3159. /* For new EH, we normally don't need to give commands back,
  3160. * as they all complete or all time out */
  3161. cmd->scsi_done(cmd);
  3162. }
  3163. }
  3164. if (!list_empty(&dcb->srb_waiting_list))
  3165. dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
  3166. list_size(&dcb->srb_waiting_list), dcb->target_id,
  3167. dcb->target_lun);
  3168. dcb->flag &= ~ABORT_DEV_;
  3169. }
  3170. printk("\n");
  3171. }
  3172. static void reset_scsi_bus(struct AdapterCtlBlk *acb)
  3173. {
  3174. dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
  3175. acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3176. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
  3177. while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET))
  3178. /* nothing */;
  3179. }
  3180. static void set_basic_config(struct AdapterCtlBlk *acb)
  3181. {
  3182. u8 bval;
  3183. u16 wval;
  3184. DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout);
  3185. if (acb->config & HCC_PARITY)
  3186. bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK;
  3187. else
  3188. bval = PHASELATCH | INITIATOR | BLOCKRST;
  3189. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval);
  3190. /* program configuration 1: Act_Neg (+ Act_Neg_Enh? + Fast_Filter? + DataDis?) */
  3191. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03); /* was 0x13: default */
  3192. /* program Host ID */
  3193. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  3194. /* set ansynchronous transfer */
  3195. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00);
  3196. /* Turn LED control off */
  3197. wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F;
  3198. DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval);
  3199. /* DMA config */
  3200. wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL;
  3201. wval |=
  3202. DMA_FIFO_HALF_HALF | DMA_ENHANCE /*| DMA_MEM_MULTI_READ */ ;
  3203. DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval);
  3204. /* Clear pending interrupt status */
  3205. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  3206. /* Enable SCSI interrupt */
  3207. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F);
  3208. DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR
  3209. /*| EN_DMAXFERABORT | EN_DMAXFERCOMP | EN_FORCEDMACOMP */
  3210. );
  3211. }
  3212. static void scsi_reset_detect(struct AdapterCtlBlk *acb)
  3213. {
  3214. dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
  3215. /* delay half a second */
  3216. if (timer_pending(&acb->waiting_timer))
  3217. del_timer(&acb->waiting_timer);
  3218. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  3219. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  3220. /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */
  3221. udelay(500);
  3222. /* Maybe we locked up the bus? Then lets wait even longer ... */
  3223. acb->last_reset =
  3224. jiffies + 5 * HZ / 2 +
  3225. HZ * acb->eeprom.delay_time;
  3226. clear_fifo(acb, "scsi_reset_detect");
  3227. set_basic_config(acb);
  3228. /*1.25 */
  3229. /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); */
  3230. if (acb->acb_flag & RESET_DEV) { /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3231. acb->acb_flag |= RESET_DONE;
  3232. } else {
  3233. acb->acb_flag |= RESET_DETECT;
  3234. reset_dev_param(acb);
  3235. doing_srb_done(acb, DID_RESET, NULL, 1);
  3236. /*DC395x_RecoverSRB( acb ); */
  3237. acb->active_dcb = NULL;
  3238. acb->acb_flag = 0;
  3239. waiting_process_next(acb);
  3240. }
  3241. }
  3242. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  3243. struct ScsiReqBlk *srb)
  3244. {
  3245. struct scsi_cmnd *cmd = srb->cmd;
  3246. dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
  3247. cmd, cmd->device->id, (u8)cmd->device->lun);
  3248. srb->flag |= AUTO_REQSENSE;
  3249. srb->adapter_status = 0;
  3250. srb->target_status = 0;
  3251. /* KG: Can this prevent crap sense data ? */
  3252. memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  3253. /* Save some data */
  3254. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
  3255. srb->segment_x[0].address;
  3256. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
  3257. srb->segment_x[0].length;
  3258. srb->xferred = srb->total_xfer_length;
  3259. /* srb->segment_x : a one entry of S/G list table */
  3260. srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
  3261. srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
  3262. /* Map sense buffer */
  3263. srb->segment_x[0].address =
  3264. pci_map_single(acb->dev, cmd->sense_buffer,
  3265. SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
  3266. dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
  3267. cmd->sense_buffer, srb->segment_x[0].address,
  3268. SCSI_SENSE_BUFFERSIZE);
  3269. srb->sg_count = 1;
  3270. srb->sg_index = 0;
  3271. if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
  3272. dprintkl(KERN_DEBUG,
  3273. "request_sense: (0x%p) failed <%02i-%i>\n",
  3274. srb->cmd, dcb->target_id, dcb->target_lun);
  3275. srb_going_to_waiting_move(dcb, srb);
  3276. waiting_set_timer(acb, HZ / 100);
  3277. }
  3278. }
  3279. /**
  3280. * device_alloc - Allocate a new device instance. This create the
  3281. * devices instance and sets up all the data items. The adapter
  3282. * instance is required to obtain confiuration information for this
  3283. * device. This does *not* add this device to the adapters device
  3284. * list.
  3285. *
  3286. * @acb: The adapter to obtain configuration information from.
  3287. * @target: The target for the new device.
  3288. * @lun: The lun for the new device.
  3289. *
  3290. * Return the new device if successful or NULL on failure.
  3291. **/
  3292. static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
  3293. u8 target, u8 lun)
  3294. {
  3295. struct NvRamType *eeprom = &acb->eeprom;
  3296. u8 period_index = eeprom->target[target].period & 0x07;
  3297. struct DeviceCtlBlk *dcb;
  3298. dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
  3299. dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
  3300. if (!dcb)
  3301. return NULL;
  3302. dcb->acb = NULL;
  3303. INIT_LIST_HEAD(&dcb->srb_going_list);
  3304. INIT_LIST_HEAD(&dcb->srb_waiting_list);
  3305. dcb->active_srb = NULL;
  3306. dcb->tag_mask = 0;
  3307. dcb->max_command = 1;
  3308. dcb->target_id = target;
  3309. dcb->target_lun = lun;
  3310. dcb->dev_mode = eeprom->target[target].cfg0;
  3311. #ifndef DC395x_NO_DISCONNECT
  3312. dcb->identify_msg =
  3313. IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
  3314. #else
  3315. dcb->identify_msg = IDENTIFY(0, lun);
  3316. #endif
  3317. dcb->inquiry7 = 0;
  3318. dcb->sync_mode = 0;
  3319. dcb->min_nego_period = clock_period[period_index];
  3320. dcb->sync_period = 0;
  3321. dcb->sync_offset = 0;
  3322. dcb->flag = 0;
  3323. #ifndef DC395x_NO_WIDE
  3324. if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
  3325. && (acb->config & HCC_WIDE_CARD))
  3326. dcb->sync_mode |= WIDE_NEGO_ENABLE;
  3327. #endif
  3328. #ifndef DC395x_NO_SYNC
  3329. if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
  3330. if (!(lun) || current_sync_offset)
  3331. dcb->sync_mode |= SYNC_NEGO_ENABLE;
  3332. #endif
  3333. if (dcb->target_lun != 0) {
  3334. /* Copy settings */
  3335. struct DeviceCtlBlk *p;
  3336. list_for_each_entry(p, &acb->dcb_list, list)
  3337. if (p->target_id == dcb->target_id)
  3338. break;
  3339. dprintkdbg(DBG_1,
  3340. "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
  3341. dcb->target_id, dcb->target_lun,
  3342. p->target_id, p->target_lun);
  3343. dcb->sync_mode = p->sync_mode;
  3344. dcb->sync_period = p->sync_period;
  3345. dcb->min_nego_period = p->min_nego_period;
  3346. dcb->sync_offset = p->sync_offset;
  3347. dcb->inquiry7 = p->inquiry7;
  3348. }
  3349. return dcb;
  3350. }
  3351. /**
  3352. * adapter_add_device - Adds the device instance to the adaptor instance.
  3353. *
  3354. * @acb: The adapter device to be updated
  3355. * @dcb: A newly created and initialised device instance to add.
  3356. **/
  3357. static void adapter_add_device(struct AdapterCtlBlk *acb,
  3358. struct DeviceCtlBlk *dcb)
  3359. {
  3360. /* backpointer to adapter */
  3361. dcb->acb = acb;
  3362. /* set run_robin to this device if it is currently empty */
  3363. if (list_empty(&acb->dcb_list))
  3364. acb->dcb_run_robin = dcb;
  3365. /* add device to list */
  3366. list_add_tail(&dcb->list, &acb->dcb_list);
  3367. /* update device maps */
  3368. acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
  3369. acb->children[dcb->target_id][dcb->target_lun] = dcb;
  3370. }
  3371. /**
  3372. * adapter_remove_device - Removes the device instance from the adaptor
  3373. * instance. The device instance is not check in any way or freed by this.
  3374. * The caller is expected to take care of that. This will simply remove the
  3375. * device from the adapters data strcutures.
  3376. *
  3377. * @acb: The adapter device to be updated
  3378. * @dcb: A device that has previously been added to the adapter.
  3379. **/
  3380. static void adapter_remove_device(struct AdapterCtlBlk *acb,
  3381. struct DeviceCtlBlk *dcb)
  3382. {
  3383. struct DeviceCtlBlk *i;
  3384. struct DeviceCtlBlk *tmp;
  3385. dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
  3386. dcb->target_id, dcb->target_lun);
  3387. /* fix up any pointers to this device that we have in the adapter */
  3388. if (acb->active_dcb == dcb)
  3389. acb->active_dcb = NULL;
  3390. if (acb->dcb_run_robin == dcb)
  3391. acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
  3392. /* unlink from list */
  3393. list_for_each_entry_safe(i, tmp, &acb->dcb_list, list)
  3394. if (dcb == i) {
  3395. list_del(&i->list);
  3396. break;
  3397. }
  3398. /* clear map and children */
  3399. acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
  3400. acb->children[dcb->target_id][dcb->target_lun] = NULL;
  3401. dcb->acb = NULL;
  3402. }
  3403. /**
  3404. * adapter_remove_and_free_device - Removes a single device from the adapter
  3405. * and then frees the device information.
  3406. *
  3407. * @acb: The adapter device to be updated
  3408. * @dcb: A device that has previously been added to the adapter.
  3409. */
  3410. static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
  3411. struct DeviceCtlBlk *dcb)
  3412. {
  3413. if (list_size(&dcb->srb_going_list) > 1) {
  3414. dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
  3415. "Won't remove because of %i active requests.\n",
  3416. dcb->target_id, dcb->target_lun,
  3417. list_size(&dcb->srb_going_list));
  3418. return;
  3419. }
  3420. adapter_remove_device(acb, dcb);
  3421. kfree(dcb);
  3422. }
  3423. /**
  3424. * adapter_remove_and_free_all_devices - Removes and frees all of the
  3425. * devices associated with the specified adapter.
  3426. *
  3427. * @acb: The adapter from which all devices should be removed.
  3428. **/
  3429. static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
  3430. {
  3431. struct DeviceCtlBlk *dcb;
  3432. struct DeviceCtlBlk *tmp;
  3433. dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
  3434. list_size(&acb->dcb_list));
  3435. list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
  3436. adapter_remove_and_free_device(acb, dcb);
  3437. }
  3438. /**
  3439. * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new
  3440. * scsi device that we need to deal with. We allocate a new device and then
  3441. * insert that device into the adapters device list.
  3442. *
  3443. * @scsi_device: The new scsi device that we need to handle.
  3444. **/
  3445. static int dc395x_slave_alloc(struct scsi_device *scsi_device)
  3446. {
  3447. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
  3448. struct DeviceCtlBlk *dcb;
  3449. dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
  3450. if (!dcb)
  3451. return -ENOMEM;
  3452. adapter_add_device(acb, dcb);
  3453. return 0;
  3454. }
  3455. /**
  3456. * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a
  3457. * device that is going away.
  3458. *
  3459. * @scsi_device: The new scsi device that we need to handle.
  3460. **/
  3461. static void dc395x_slave_destroy(struct scsi_device *scsi_device)
  3462. {
  3463. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
  3464. struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
  3465. if (dcb)
  3466. adapter_remove_and_free_device(acb, dcb);
  3467. }
  3468. /**
  3469. * trms1040_wait_30us: wait for 30 us
  3470. *
  3471. * Waits for 30us (using the chip by the looks of it..)
  3472. *
  3473. * @io_port: base I/O address
  3474. **/
  3475. static void trms1040_wait_30us(unsigned long io_port)
  3476. {
  3477. /* ScsiPortStallExecution(30); wait 30 us */
  3478. outb(5, io_port + TRM_S1040_GEN_TIMER);
  3479. while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT))
  3480. /* nothing */ ;
  3481. }
  3482. /**
  3483. * trms1040_write_cmd - write the secified command and address to
  3484. * chip
  3485. *
  3486. * @io_port: base I/O address
  3487. * @cmd: SB + op code (command) to send
  3488. * @addr: address to send
  3489. **/
  3490. static void trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
  3491. {
  3492. int i;
  3493. u8 send_data;
  3494. /* program SB + OP code */
  3495. for (i = 0; i < 3; i++, cmd <<= 1) {
  3496. send_data = NVR_SELECT;
  3497. if (cmd & 0x04) /* Start from bit 2 */
  3498. send_data |= NVR_BITOUT;
  3499. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3500. trms1040_wait_30us(io_port);
  3501. outb((send_data | NVR_CLOCK),
  3502. io_port + TRM_S1040_GEN_NVRAM);
  3503. trms1040_wait_30us(io_port);
  3504. }
  3505. /* send address */
  3506. for (i = 0; i < 7; i++, addr <<= 1) {
  3507. send_data = NVR_SELECT;
  3508. if (addr & 0x40) /* Start from bit 6 */
  3509. send_data |= NVR_BITOUT;
  3510. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3511. trms1040_wait_30us(io_port);
  3512. outb((send_data | NVR_CLOCK),
  3513. io_port + TRM_S1040_GEN_NVRAM);
  3514. trms1040_wait_30us(io_port);
  3515. }
  3516. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3517. trms1040_wait_30us(io_port);
  3518. }
  3519. /**
  3520. * trms1040_set_data - store a single byte in the eeprom
  3521. *
  3522. * Called from write all to write a single byte into the SSEEPROM
  3523. * Which is done one bit at a time.
  3524. *
  3525. * @io_port: base I/O address
  3526. * @addr: offset into EEPROM
  3527. * @byte: bytes to write
  3528. **/
  3529. static void trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
  3530. {
  3531. int i;
  3532. u8 send_data;
  3533. /* Send write command & address */
  3534. trms1040_write_cmd(io_port, 0x05, addr);
  3535. /* Write data */
  3536. for (i = 0; i < 8; i++, byte <<= 1) {
  3537. send_data = NVR_SELECT;
  3538. if (byte & 0x80) /* Start from bit 7 */
  3539. send_data |= NVR_BITOUT;
  3540. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3541. trms1040_wait_30us(io_port);
  3542. outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3543. trms1040_wait_30us(io_port);
  3544. }
  3545. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3546. trms1040_wait_30us(io_port);
  3547. /* Disable chip select */
  3548. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3549. trms1040_wait_30us(io_port);
  3550. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3551. trms1040_wait_30us(io_port);
  3552. /* Wait for write ready */
  3553. while (1) {
  3554. outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3555. trms1040_wait_30us(io_port);
  3556. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3557. trms1040_wait_30us(io_port);
  3558. if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN)
  3559. break;
  3560. }
  3561. /* Disable chip select */
  3562. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3563. }
  3564. /**
  3565. * trms1040_write_all - write 128 bytes to the eeprom
  3566. *
  3567. * Write the supplied 128 bytes to the chips SEEPROM
  3568. *
  3569. * @eeprom: the data to write
  3570. * @io_port: the base io port
  3571. **/
  3572. static void trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
  3573. {
  3574. u8 *b_eeprom = (u8 *)eeprom;
  3575. u8 addr;
  3576. /* Enable SEEPROM */
  3577. outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
  3578. io_port + TRM_S1040_GEN_CONTROL);
  3579. /* write enable */
  3580. trms1040_write_cmd(io_port, 0x04, 0xFF);
  3581. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3582. trms1040_wait_30us(io_port);
  3583. /* write */
  3584. for (addr = 0; addr < 128; addr++, b_eeprom++)
  3585. trms1040_set_data(io_port, addr, *b_eeprom);
  3586. /* write disable */
  3587. trms1040_write_cmd(io_port, 0x04, 0x00);
  3588. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3589. trms1040_wait_30us(io_port);
  3590. /* Disable SEEPROM */
  3591. outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
  3592. io_port + TRM_S1040_GEN_CONTROL);
  3593. }
  3594. /**
  3595. * trms1040_get_data - get a single byte from the eeprom
  3596. *
  3597. * Called from read all to read a single byte into the SSEEPROM
  3598. * Which is done one bit at a time.
  3599. *
  3600. * @io_port: base I/O address
  3601. * @addr: offset into SEEPROM
  3602. *
  3603. * Returns the byte read.
  3604. **/
  3605. static u8 trms1040_get_data(unsigned long io_port, u8 addr)
  3606. {
  3607. int i;
  3608. u8 read_byte;
  3609. u8 result = 0;
  3610. /* Send read command & address */
  3611. trms1040_write_cmd(io_port, 0x06, addr);
  3612. /* read data */
  3613. for (i = 0; i < 8; i++) {
  3614. outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3615. trms1040_wait_30us(io_port);
  3616. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3617. /* Get data bit while falling edge */
  3618. read_byte = inb(io_port + TRM_S1040_GEN_NVRAM);
  3619. result <<= 1;
  3620. if (read_byte & NVR_BITIN)
  3621. result |= 1;
  3622. trms1040_wait_30us(io_port);
  3623. }
  3624. /* Disable chip select */
  3625. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3626. return result;
  3627. }
  3628. /**
  3629. * trms1040_read_all - read all bytes from the eeprom
  3630. *
  3631. * Read the 128 bytes from the SEEPROM.
  3632. *
  3633. * @eeprom: where to store the data
  3634. * @io_port: the base io port
  3635. **/
  3636. static void trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
  3637. {
  3638. u8 *b_eeprom = (u8 *)eeprom;
  3639. u8 addr;
  3640. /* Enable SEEPROM */
  3641. outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
  3642. io_port + TRM_S1040_GEN_CONTROL);
  3643. /* read details */
  3644. for (addr = 0; addr < 128; addr++, b_eeprom++)
  3645. *b_eeprom = trms1040_get_data(io_port, addr);
  3646. /* Disable SEEPROM */
  3647. outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
  3648. io_port + TRM_S1040_GEN_CONTROL);
  3649. }
  3650. /**
  3651. * check_eeprom - get and check contents of the eeprom
  3652. *
  3653. * Read seeprom 128 bytes into the memory provider in eeprom.
  3654. * Checks the checksum and if it's not correct it uses a set of default
  3655. * values.
  3656. *
  3657. * @eeprom: caller allocated strcuture to read the eeprom data into
  3658. * @io_port: io port to read from
  3659. **/
  3660. static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
  3661. {
  3662. u16 *w_eeprom = (u16 *)eeprom;
  3663. u16 w_addr;
  3664. u16 cksum;
  3665. u32 d_addr;
  3666. u32 *d_eeprom;
  3667. trms1040_read_all(eeprom, io_port); /* read eeprom */
  3668. cksum = 0;
  3669. for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64;
  3670. w_addr++, w_eeprom++)
  3671. cksum += *w_eeprom;
  3672. if (cksum != 0x1234) {
  3673. /*
  3674. * Checksum is wrong.
  3675. * Load a set of defaults into the eeprom buffer
  3676. */
  3677. dprintkl(KERN_WARNING,
  3678. "EEProm checksum error: using default values and options.\n");
  3679. eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
  3680. eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
  3681. eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
  3682. eeprom->sub_sys_id[1] =
  3683. (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
  3684. eeprom->sub_class = 0x00;
  3685. eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
  3686. eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
  3687. eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
  3688. eeprom->device_id[1] =
  3689. (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
  3690. eeprom->reserved = 0x00;
  3691. for (d_addr = 0, d_eeprom = (u32 *)eeprom->target;
  3692. d_addr < 16; d_addr++, d_eeprom++)
  3693. *d_eeprom = 0x00000077; /* cfg3,cfg2,period,cfg0 */
  3694. *d_eeprom++ = 0x04000F07; /* max_tag,delay_time,channel_cfg,scsi_id */
  3695. *d_eeprom++ = 0x00000015; /* reserved1,boot_lun,boot_target,reserved0 */
  3696. for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++)
  3697. *d_eeprom = 0x00;
  3698. /* Now load defaults (maybe set by boot/module params) */
  3699. set_safe_settings();
  3700. fix_settings();
  3701. eeprom_override(eeprom);
  3702. eeprom->cksum = 0x00;
  3703. for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom;
  3704. w_addr < 63; w_addr++, w_eeprom++)
  3705. cksum += *w_eeprom;
  3706. *w_eeprom = 0x1234 - cksum;
  3707. trms1040_write_all(eeprom, io_port);
  3708. eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value;
  3709. } else {
  3710. set_safe_settings();
  3711. eeprom_index_to_delay(eeprom);
  3712. eeprom_override(eeprom);
  3713. }
  3714. }
  3715. /**
  3716. * print_eeprom_settings - output the eeprom settings
  3717. * to the kernel log so people can see what they were.
  3718. *
  3719. * @eeprom: The eeprom data strucutre to show details for.
  3720. **/
  3721. static void print_eeprom_settings(struct NvRamType *eeprom)
  3722. {
  3723. dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
  3724. eeprom->scsi_id,
  3725. eeprom->target[0].period,
  3726. clock_speed[eeprom->target[0].period] / 10,
  3727. clock_speed[eeprom->target[0].period] % 10,
  3728. eeprom->target[0].cfg0);
  3729. dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
  3730. eeprom->channel_cfg, eeprom->max_tag,
  3731. 1 << eeprom->max_tag, eeprom->delay_time);
  3732. }
  3733. /* Free SG tables */
  3734. static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
  3735. {
  3736. int i;
  3737. const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
  3738. for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
  3739. kfree(acb->srb_array[i].segment_x);
  3740. }
  3741. /*
  3742. * Allocate SG tables; as we have to pci_map them, an SG list (struct SGentry*)
  3743. * should never cross a page boundary */
  3744. static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
  3745. {
  3746. const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
  3747. *SEGMENTX_LEN;
  3748. int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
  3749. const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
  3750. int srb_idx = 0;
  3751. unsigned i = 0;
  3752. struct SGentry *uninitialized_var(ptr);
  3753. for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
  3754. acb->srb_array[i].segment_x = NULL;
  3755. dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
  3756. while (pages--) {
  3757. ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
  3758. if (!ptr) {
  3759. adapter_sg_tables_free(acb);
  3760. return 1;
  3761. }
  3762. dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
  3763. PAGE_SIZE, ptr, srb_idx);
  3764. i = 0;
  3765. while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
  3766. acb->srb_array[srb_idx++].segment_x =
  3767. ptr + (i++ * DC395x_MAX_SG_LISTENTRY);
  3768. }
  3769. if (i < srbs_per_page)
  3770. acb->srb.segment_x =
  3771. ptr + (i * DC395x_MAX_SG_LISTENTRY);
  3772. else
  3773. dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
  3774. return 0;
  3775. }
  3776. /**
  3777. * adapter_print_config - print adapter connection and termination
  3778. * config
  3779. *
  3780. * The io port in the adapter needs to have been set before calling
  3781. * this function.
  3782. *
  3783. * @acb: The adapter to print the information for.
  3784. **/
  3785. static void adapter_print_config(struct AdapterCtlBlk *acb)
  3786. {
  3787. u8 bval;
  3788. bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
  3789. dprintkl(KERN_INFO, "%sConnectors: ",
  3790. ((bval & WIDESCSI) ? "(Wide) " : ""));
  3791. if (!(bval & CON5068))
  3792. printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
  3793. if (!(bval & CON68))
  3794. printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)");
  3795. if (!(bval & CON50))
  3796. printk("int50 ");
  3797. if ((bval & (CON5068 | CON50 | CON68)) ==
  3798. 0 /*(CON5068 | CON50 | CON68) */ )
  3799. printk(" Oops! (All 3?) ");
  3800. bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL);
  3801. printk(" Termination: ");
  3802. if (bval & DIS_TERM)
  3803. printk("Disabled\n");
  3804. else {
  3805. if (bval & AUTOTERM)
  3806. printk("Auto ");
  3807. if (bval & LOW8TERM)
  3808. printk("Low ");
  3809. if (bval & UP8TERM)
  3810. printk("High ");
  3811. printk("\n");
  3812. }
  3813. }
  3814. /**
  3815. * adapter_init_params - Initialize the various parameters in the
  3816. * adapter structure. Note that the pointer to the scsi_host is set
  3817. * early (when this instance is created) and the io_port and irq
  3818. * values are set later after they have been reserved. This just gets
  3819. * everything set to a good starting position.
  3820. *
  3821. * The eeprom structure in the adapter needs to have been set before
  3822. * calling this function.
  3823. *
  3824. * @acb: The adapter to initialize.
  3825. **/
  3826. static void adapter_init_params(struct AdapterCtlBlk *acb)
  3827. {
  3828. struct NvRamType *eeprom = &acb->eeprom;
  3829. int i;
  3830. /* NOTE: acb->scsi_host is set at scsi_host/acb creation time */
  3831. /* NOTE: acb->io_port_base is set at port registration time */
  3832. /* NOTE: acb->io_port_len is set at port registration time */
  3833. INIT_LIST_HEAD(&acb->dcb_list);
  3834. acb->dcb_run_robin = NULL;
  3835. acb->active_dcb = NULL;
  3836. INIT_LIST_HEAD(&acb->srb_free_list);
  3837. /* temp SRB for Q tag used or abort command used */
  3838. acb->tmp_srb = &acb->srb;
  3839. timer_setup(&acb->waiting_timer, waiting_timeout, 0);
  3840. timer_setup(&acb->selto_timer, NULL, 0);
  3841. acb->srb_count = DC395x_MAX_SRB_CNT;
  3842. acb->sel_timeout = DC395x_SEL_TIMEOUT; /* timeout=250ms */
  3843. /* NOTE: acb->irq_level is set at IRQ registration time */
  3844. acb->tag_max_num = 1 << eeprom->max_tag;
  3845. if (acb->tag_max_num > 30)
  3846. acb->tag_max_num = 30;
  3847. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3848. acb->gmode2 = eeprom->channel_cfg;
  3849. acb->config = 0; /* NOTE: actually set in adapter_init_chip */
  3850. if (eeprom->channel_cfg & NAC_SCANLUN)
  3851. acb->lun_chk = 1;
  3852. acb->scan_devices = 1;
  3853. acb->scsi_host->this_id = eeprom->scsi_id;
  3854. acb->hostid_bit = (1 << acb->scsi_host->this_id);
  3855. for (i = 0; i < DC395x_MAX_SCSI_ID; i++)
  3856. acb->dcb_map[i] = 0;
  3857. acb->msg_len = 0;
  3858. /* link static array of srbs into the srb free list */
  3859. for (i = 0; i < acb->srb_count - 1; i++)
  3860. srb_free_insert(acb, &acb->srb_array[i]);
  3861. }
  3862. /**
  3863. * adapter_init_host - Initialize the scsi host instance based on
  3864. * values that we have already stored in the adapter instance. There's
  3865. * some mention that a lot of these are deprecated, so we won't use
  3866. * them (we'll use the ones in the adapter instance) but we'll fill
  3867. * them in in case something else needs them.
  3868. *
  3869. * The eeprom structure, irq and io ports in the adapter need to have
  3870. * been set before calling this function.
  3871. *
  3872. * @host: The scsi host instance to fill in the values for.
  3873. **/
  3874. static void adapter_init_scsi_host(struct Scsi_Host *host)
  3875. {
  3876. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
  3877. struct NvRamType *eeprom = &acb->eeprom;
  3878. host->max_cmd_len = 24;
  3879. host->can_queue = DC395x_MAX_CMD_QUEUE;
  3880. host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN;
  3881. host->this_id = (int)eeprom->scsi_id;
  3882. host->io_port = acb->io_port_base;
  3883. host->n_io_port = acb->io_port_len;
  3884. host->dma_channel = -1;
  3885. host->unique_id = acb->io_port_base;
  3886. host->irq = acb->irq_level;
  3887. acb->last_reset = jiffies;
  3888. host->max_id = 16;
  3889. if (host->max_id - 1 == eeprom->scsi_id)
  3890. host->max_id--;
  3891. if (eeprom->channel_cfg & NAC_SCANLUN)
  3892. host->max_lun = 8;
  3893. else
  3894. host->max_lun = 1;
  3895. }
  3896. /**
  3897. * adapter_init_chip - Get the chip into a know state and figure out
  3898. * some of the settings that apply to this adapter.
  3899. *
  3900. * The io port in the adapter needs to have been set before calling
  3901. * this function. The config will be configured correctly on return.
  3902. *
  3903. * @acb: The adapter which we are to init.
  3904. **/
  3905. static void adapter_init_chip(struct AdapterCtlBlk *acb)
  3906. {
  3907. struct NvRamType *eeprom = &acb->eeprom;
  3908. /* Mask all the interrupt */
  3909. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  3910. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  3911. /* Reset SCSI module */
  3912. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  3913. /* Reset PCI/DMA module */
  3914. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  3915. udelay(20);
  3916. /* program configuration 0 */
  3917. acb->config = HCC_AUTOTERM | HCC_PARITY;
  3918. if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI)
  3919. acb->config |= HCC_WIDE_CARD;
  3920. if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET)
  3921. acb->config |= HCC_SCSI_RESET;
  3922. if (acb->config & HCC_SCSI_RESET) {
  3923. dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
  3924. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
  3925. /*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */
  3926. /*spin_unlock_irq (&io_request_lock); */
  3927. udelay(500);
  3928. acb->last_reset =
  3929. jiffies + HZ / 2 +
  3930. HZ * acb->eeprom.delay_time;
  3931. /*spin_lock_irq (&io_request_lock); */
  3932. }
  3933. }
  3934. /**
  3935. * init_adapter - Grab the resource for the card, setup the adapter
  3936. * information, set the card into a known state, create the various
  3937. * tables etc etc. This basically gets all adapter information all up
  3938. * to date, initialised and gets the chip in sync with it.
  3939. *
  3940. * @host: This hosts adapter structure
  3941. * @io_port: The base I/O port
  3942. * @irq: IRQ
  3943. *
  3944. * Returns 0 if the initialization succeeds, any other value on
  3945. * failure.
  3946. **/
  3947. static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
  3948. u32 io_port_len, unsigned int irq)
  3949. {
  3950. if (!request_region(io_port, io_port_len, DC395X_NAME)) {
  3951. dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
  3952. goto failed;
  3953. }
  3954. /* store port base to indicate we have registered it */
  3955. acb->io_port_base = io_port;
  3956. acb->io_port_len = io_port_len;
  3957. if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
  3958. /* release the region we just claimed */
  3959. dprintkl(KERN_INFO, "Failed to register IRQ\n");
  3960. goto failed;
  3961. }
  3962. /* store irq to indicate we have registered it */
  3963. acb->irq_level = irq;
  3964. /* get eeprom configuration information and command line settings etc */
  3965. check_eeprom(&acb->eeprom, io_port);
  3966. print_eeprom_settings(&acb->eeprom);
  3967. /* setup adapter control block */
  3968. adapter_init_params(acb);
  3969. /* display card connectors/termination settings */
  3970. adapter_print_config(acb);
  3971. if (adapter_sg_tables_alloc(acb)) {
  3972. dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
  3973. goto failed;
  3974. }
  3975. adapter_init_scsi_host(acb->scsi_host);
  3976. adapter_init_chip(acb);
  3977. set_basic_config(acb);
  3978. dprintkdbg(DBG_0,
  3979. "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
  3980. "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
  3981. acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
  3982. sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
  3983. return 0;
  3984. failed:
  3985. if (acb->irq_level)
  3986. free_irq(acb->irq_level, acb);
  3987. if (acb->io_port_base)
  3988. release_region(acb->io_port_base, acb->io_port_len);
  3989. adapter_sg_tables_free(acb);
  3990. return 1;
  3991. }
  3992. /**
  3993. * adapter_uninit_chip - cleanly shut down the scsi controller chip,
  3994. * stopping all operations and disabling interrupt generation on the
  3995. * card.
  3996. *
  3997. * @acb: The adapter which we are to shutdown.
  3998. **/
  3999. static void adapter_uninit_chip(struct AdapterCtlBlk *acb)
  4000. {
  4001. /* disable interrupts */
  4002. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0);
  4003. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0);
  4004. /* reset the scsi bus */
  4005. if (acb->config & HCC_SCSI_RESET)
  4006. reset_scsi_bus(acb);
  4007. /* clear any pending interrupt state */
  4008. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  4009. }
  4010. /**
  4011. * adapter_uninit - Shut down the chip and release any resources that
  4012. * we had allocated. Once this returns the adapter should not be used
  4013. * anymore.
  4014. *
  4015. * @acb: The adapter which we are to un-initialize.
  4016. **/
  4017. static void adapter_uninit(struct AdapterCtlBlk *acb)
  4018. {
  4019. unsigned long flags;
  4020. DC395x_LOCK_IO(acb->scsi_host, flags);
  4021. /* remove timers */
  4022. if (timer_pending(&acb->waiting_timer))
  4023. del_timer(&acb->waiting_timer);
  4024. if (timer_pending(&acb->selto_timer))
  4025. del_timer(&acb->selto_timer);
  4026. adapter_uninit_chip(acb);
  4027. adapter_remove_and_free_all_devices(acb);
  4028. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  4029. if (acb->irq_level)
  4030. free_irq(acb->irq_level, acb);
  4031. if (acb->io_port_base)
  4032. release_region(acb->io_port_base, acb->io_port_len);
  4033. adapter_sg_tables_free(acb);
  4034. }
  4035. #undef YESNO
  4036. #define YESNO(YN) \
  4037. if (YN) seq_printf(m, " Yes ");\
  4038. else seq_printf(m, " No ")
  4039. static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
  4040. {
  4041. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
  4042. int spd, spd1;
  4043. struct DeviceCtlBlk *dcb;
  4044. unsigned long flags;
  4045. int dev;
  4046. seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n"
  4047. " Driver Version " DC395X_VERSION "\n");
  4048. DC395x_LOCK_IO(acb->scsi_host, flags);
  4049. seq_printf(m, "SCSI Host Nr %i, ", host->host_no);
  4050. seq_printf(m, "DC395U/UW/F DC315/U %s\n",
  4051. (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
  4052. seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base);
  4053. seq_printf(m, "irq_level 0x%04x, ", acb->irq_level);
  4054. seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
  4055. seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
  4056. seq_printf(m, "AdapterID %i\n", host->this_id);
  4057. seq_printf(m, "tag_max_num %i", acb->tag_max_num);
  4058. /*seq_printf(m, ", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */
  4059. seq_printf(m, ", FilterCfg 0x%02x",
  4060. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
  4061. seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time);
  4062. /*seq_printf(m, "\n"); */
  4063. seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
  4064. seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
  4065. acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
  4066. acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
  4067. acb->dcb_map[6], acb->dcb_map[7]);
  4068. seq_printf(m, " %02x %02x %02x %02x %02x %02x %02x %02x\n",
  4069. acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
  4070. acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
  4071. acb->dcb_map[14], acb->dcb_map[15]);
  4072. seq_puts(m,
  4073. "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
  4074. dev = 0;
  4075. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4076. int nego_period;
  4077. seq_printf(m, "%02i %02i %02i ", dev, dcb->target_id,
  4078. dcb->target_lun);
  4079. YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
  4080. YESNO(dcb->sync_offset);
  4081. YESNO(dcb->sync_period & WIDE_SYNC);
  4082. YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
  4083. YESNO(dcb->dev_mode & NTC_DO_SEND_START);
  4084. YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
  4085. nego_period = clock_period[dcb->sync_period & 0x07] << 2;
  4086. if (dcb->sync_offset)
  4087. seq_printf(m, " %03i ns ", nego_period);
  4088. else
  4089. seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2));
  4090. if (dcb->sync_offset & 0x0f) {
  4091. spd = 1000 / (nego_period);
  4092. spd1 = 1000 % (nego_period);
  4093. spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
  4094. seq_printf(m, " %2i.%1i M %02i ", spd, spd1,
  4095. (dcb->sync_offset & 0x0f));
  4096. } else
  4097. seq_puts(m, " ");
  4098. /* Add more info ... */
  4099. seq_printf(m, " %02i\n", dcb->max_command);
  4100. dev++;
  4101. }
  4102. if (timer_pending(&acb->waiting_timer))
  4103. seq_puts(m, "Waiting queue timer running\n");
  4104. else
  4105. seq_putc(m, '\n');
  4106. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4107. struct ScsiReqBlk *srb;
  4108. if (!list_empty(&dcb->srb_waiting_list))
  4109. seq_printf(m, "DCB (%02i-%i): Waiting: %i:",
  4110. dcb->target_id, dcb->target_lun,
  4111. list_size(&dcb->srb_waiting_list));
  4112. list_for_each_entry(srb, &dcb->srb_waiting_list, list)
  4113. seq_printf(m, " %p", srb->cmd);
  4114. if (!list_empty(&dcb->srb_going_list))
  4115. seq_printf(m, "\nDCB (%02i-%i): Going : %i:",
  4116. dcb->target_id, dcb->target_lun,
  4117. list_size(&dcb->srb_going_list));
  4118. list_for_each_entry(srb, &dcb->srb_going_list, list)
  4119. seq_printf(m, " %p", srb->cmd);
  4120. if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
  4121. seq_putc(m, '\n');
  4122. }
  4123. if (debug_enabled(DBG_1)) {
  4124. seq_printf(m, "DCB list for ACB %p:\n", acb);
  4125. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4126. seq_printf(m, "%p -> ", dcb);
  4127. }
  4128. seq_puts(m, "END\n");
  4129. }
  4130. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  4131. return 0;
  4132. }
  4133. static struct scsi_host_template dc395x_driver_template = {
  4134. .module = THIS_MODULE,
  4135. .proc_name = DC395X_NAME,
  4136. .show_info = dc395x_show_info,
  4137. .name = DC395X_BANNER " " DC395X_VERSION,
  4138. .queuecommand = dc395x_queue_command,
  4139. .bios_param = dc395x_bios_param,
  4140. .slave_alloc = dc395x_slave_alloc,
  4141. .slave_destroy = dc395x_slave_destroy,
  4142. .can_queue = DC395x_MAX_CAN_QUEUE,
  4143. .this_id = 7,
  4144. .sg_tablesize = DC395x_MAX_SG_TABLESIZE,
  4145. .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
  4146. .eh_abort_handler = dc395x_eh_abort,
  4147. .eh_bus_reset_handler = dc395x_eh_bus_reset,
  4148. .use_clustering = DISABLE_CLUSTERING,
  4149. };
  4150. /**
  4151. * banner_display - Display banner on first instance of driver
  4152. * initialized.
  4153. **/
  4154. static void banner_display(void)
  4155. {
  4156. static int banner_done = 0;
  4157. if (!banner_done)
  4158. {
  4159. dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
  4160. banner_done = 1;
  4161. }
  4162. }
  4163. /**
  4164. * dc395x_init_one - Initialise a single instance of the adapter.
  4165. *
  4166. * The PCI layer will call this once for each instance of the adapter
  4167. * that it finds in the system. The pci_dev strcuture indicates which
  4168. * instance we are being called from.
  4169. *
  4170. * @dev: The PCI device to initialize.
  4171. * @id: Looks like a pointer to the entry in our pci device table
  4172. * that was actually matched by the PCI subsystem.
  4173. *
  4174. * Returns 0 on success, or an error code (-ve) on failure.
  4175. **/
  4176. static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
  4177. {
  4178. struct Scsi_Host *scsi_host = NULL;
  4179. struct AdapterCtlBlk *acb = NULL;
  4180. unsigned long io_port_base;
  4181. unsigned int io_port_len;
  4182. unsigned int irq;
  4183. dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
  4184. banner_display();
  4185. if (pci_enable_device(dev))
  4186. {
  4187. dprintkl(KERN_INFO, "PCI Enable device failed.\n");
  4188. return -ENODEV;
  4189. }
  4190. io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
  4191. io_port_len = pci_resource_len(dev, 0);
  4192. irq = dev->irq;
  4193. dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
  4194. /* allocate scsi host information (includes out adapter) */
  4195. scsi_host = scsi_host_alloc(&dc395x_driver_template,
  4196. sizeof(struct AdapterCtlBlk));
  4197. if (!scsi_host) {
  4198. dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
  4199. goto fail;
  4200. }
  4201. acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
  4202. acb->scsi_host = scsi_host;
  4203. acb->dev = dev;
  4204. /* initialise the adapter and everything we need */
  4205. if (adapter_init(acb, io_port_base, io_port_len, irq)) {
  4206. dprintkl(KERN_INFO, "adapter init failed\n");
  4207. goto fail;
  4208. }
  4209. pci_set_master(dev);
  4210. /* get the scsi mid level to scan for new devices on the bus */
  4211. if (scsi_add_host(scsi_host, &dev->dev)) {
  4212. dprintkl(KERN_ERR, "scsi_add_host failed\n");
  4213. goto fail;
  4214. }
  4215. pci_set_drvdata(dev, scsi_host);
  4216. scsi_scan_host(scsi_host);
  4217. return 0;
  4218. fail:
  4219. if (acb != NULL)
  4220. adapter_uninit(acb);
  4221. if (scsi_host != NULL)
  4222. scsi_host_put(scsi_host);
  4223. pci_disable_device(dev);
  4224. return -ENODEV;
  4225. }
  4226. /**
  4227. * dc395x_remove_one - Called to remove a single instance of the
  4228. * adapter.
  4229. *
  4230. * @dev: The PCI device to initialize.
  4231. **/
  4232. static void dc395x_remove_one(struct pci_dev *dev)
  4233. {
  4234. struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
  4235. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
  4236. dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
  4237. scsi_remove_host(scsi_host);
  4238. adapter_uninit(acb);
  4239. pci_disable_device(dev);
  4240. scsi_host_put(scsi_host);
  4241. }
  4242. static struct pci_device_id dc395x_pci_table[] = {
  4243. {
  4244. .vendor = PCI_VENDOR_ID_TEKRAM,
  4245. .device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
  4246. .subvendor = PCI_ANY_ID,
  4247. .subdevice = PCI_ANY_ID,
  4248. },
  4249. {} /* Terminating entry */
  4250. };
  4251. MODULE_DEVICE_TABLE(pci, dc395x_pci_table);
  4252. static struct pci_driver dc395x_driver = {
  4253. .name = DC395X_NAME,
  4254. .id_table = dc395x_pci_table,
  4255. .probe = dc395x_init_one,
  4256. .remove = dc395x_remove_one,
  4257. };
  4258. /**
  4259. * dc395x_module_init - Module initialization function
  4260. *
  4261. * Used by both module and built-in driver to initialise this driver.
  4262. **/
  4263. static int __init dc395x_module_init(void)
  4264. {
  4265. return pci_register_driver(&dc395x_driver);
  4266. }
  4267. /**
  4268. * dc395x_module_exit - Module cleanup function.
  4269. **/
  4270. static void __exit dc395x_module_exit(void)
  4271. {
  4272. pci_unregister_driver(&dc395x_driver);
  4273. }
  4274. module_init(dc395x_module_init);
  4275. module_exit(dc395x_module_exit);
  4276. MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
  4277. MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
  4278. MODULE_LICENSE("GPL");