bfa_fcpim.c 95 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938
  1. /*
  2. * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  3. * Copyright (c) 2014- QLogic Corporation.
  4. * All rights reserved
  5. * www.qlogic.com
  6. *
  7. * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License (GPL) Version 2 as
  11. * published by the Free Software Foundation
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. */
  18. #include "bfad_drv.h"
  19. #include "bfa_modules.h"
  20. BFA_TRC_FILE(HAL, FCPIM);
  21. /*
  22. * BFA ITNIM Related definitions
  23. */
  24. static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
  25. static void bfa_ioim_lm_init(struct bfa_s *bfa);
  26. #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
  27. (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
  28. #define bfa_fcpim_additn(__itnim) \
  29. list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
  30. #define bfa_fcpim_delitn(__itnim) do { \
  31. WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
  32. bfa_itnim_update_del_itn_stats(__itnim); \
  33. list_del(&(__itnim)->qe); \
  34. WARN_ON(!list_empty(&(__itnim)->io_q)); \
  35. WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
  36. WARN_ON(!list_empty(&(__itnim)->pending_q)); \
  37. } while (0)
  38. #define bfa_itnim_online_cb(__itnim) do { \
  39. if ((__itnim)->bfa->fcs) \
  40. bfa_cb_itnim_online((__itnim)->ditn); \
  41. else { \
  42. bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
  43. __bfa_cb_itnim_online, (__itnim)); \
  44. } \
  45. } while (0)
  46. #define bfa_itnim_offline_cb(__itnim) do { \
  47. if ((__itnim)->bfa->fcs) \
  48. bfa_cb_itnim_offline((__itnim)->ditn); \
  49. else { \
  50. bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
  51. __bfa_cb_itnim_offline, (__itnim)); \
  52. } \
  53. } while (0)
  54. #define bfa_itnim_sler_cb(__itnim) do { \
  55. if ((__itnim)->bfa->fcs) \
  56. bfa_cb_itnim_sler((__itnim)->ditn); \
  57. else { \
  58. bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
  59. __bfa_cb_itnim_sler, (__itnim)); \
  60. } \
  61. } while (0)
  62. enum bfa_ioim_lm_ua_status {
  63. BFA_IOIM_LM_UA_RESET = 0,
  64. BFA_IOIM_LM_UA_SET = 1,
  65. };
  66. /*
  67. * itnim state machine event
  68. */
  69. enum bfa_itnim_event {
  70. BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
  71. BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
  72. BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
  73. BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
  74. BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
  75. BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
  76. BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
  77. BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
  78. BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
  79. };
  80. /*
  81. * BFA IOIM related definitions
  82. */
  83. #define bfa_ioim_move_to_comp_q(__ioim) do { \
  84. list_del(&(__ioim)->qe); \
  85. list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
  86. } while (0)
  87. #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
  88. if ((__fcpim)->profile_comp) \
  89. (__fcpim)->profile_comp(__ioim); \
  90. } while (0)
  91. #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
  92. if ((__fcpim)->profile_start) \
  93. (__fcpim)->profile_start(__ioim); \
  94. } while (0)
  95. /*
  96. * IO state machine events
  97. */
  98. enum bfa_ioim_event {
  99. BFA_IOIM_SM_START = 1, /* io start request from host */
  100. BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
  101. BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
  102. BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
  103. BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
  104. BFA_IOIM_SM_FREE = 6, /* io resource is freed */
  105. BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
  106. BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
  107. BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
  108. BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
  109. BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
  110. BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
  111. BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
  112. BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
  113. BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
  114. BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
  115. BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
  116. BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
  117. };
  118. /*
  119. * BFA TSKIM related definitions
  120. */
  121. /*
  122. * task management completion handling
  123. */
  124. #define bfa_tskim_qcomp(__tskim, __cbfn) do { \
  125. bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
  126. bfa_tskim_notify_comp(__tskim); \
  127. } while (0)
  128. #define bfa_tskim_notify_comp(__tskim) do { \
  129. if ((__tskim)->notify) \
  130. bfa_itnim_tskdone((__tskim)->itnim); \
  131. } while (0)
  132. enum bfa_tskim_event {
  133. BFA_TSKIM_SM_START = 1, /* TM command start */
  134. BFA_TSKIM_SM_DONE = 2, /* TM completion */
  135. BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
  136. BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
  137. BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
  138. BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
  139. BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
  140. BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
  141. BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */
  142. };
  143. /*
  144. * forward declaration for BFA ITNIM functions
  145. */
  146. static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
  147. static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
  148. static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
  149. static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
  150. static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
  151. static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
  152. static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
  153. static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
  154. static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
  155. static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
  156. static void bfa_itnim_iotov(void *itnim_arg);
  157. static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
  158. static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
  159. static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
  160. /*
  161. * forward declaration of ITNIM state machine
  162. */
  163. static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
  164. enum bfa_itnim_event event);
  165. static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
  166. enum bfa_itnim_event event);
  167. static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
  168. enum bfa_itnim_event event);
  169. static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
  170. enum bfa_itnim_event event);
  171. static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
  172. enum bfa_itnim_event event);
  173. static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
  174. enum bfa_itnim_event event);
  175. static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
  176. enum bfa_itnim_event event);
  177. static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
  178. enum bfa_itnim_event event);
  179. static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
  180. enum bfa_itnim_event event);
  181. static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
  182. enum bfa_itnim_event event);
  183. static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
  184. enum bfa_itnim_event event);
  185. static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
  186. enum bfa_itnim_event event);
  187. static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
  188. enum bfa_itnim_event event);
  189. static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
  190. enum bfa_itnim_event event);
  191. static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
  192. enum bfa_itnim_event event);
  193. /*
  194. * forward declaration for BFA IOIM functions
  195. */
  196. static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
  197. static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
  198. static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
  199. static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
  200. static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
  201. static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
  202. static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
  203. static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
  204. static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
  205. static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
  206. /*
  207. * forward declaration of BFA IO state machine
  208. */
  209. static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
  210. enum bfa_ioim_event event);
  211. static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
  212. enum bfa_ioim_event event);
  213. static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
  214. enum bfa_ioim_event event);
  215. static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
  216. enum bfa_ioim_event event);
  217. static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
  218. enum bfa_ioim_event event);
  219. static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
  220. enum bfa_ioim_event event);
  221. static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
  222. enum bfa_ioim_event event);
  223. static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
  224. enum bfa_ioim_event event);
  225. static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
  226. enum bfa_ioim_event event);
  227. static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
  228. enum bfa_ioim_event event);
  229. static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
  230. enum bfa_ioim_event event);
  231. static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
  232. enum bfa_ioim_event event);
  233. /*
  234. * forward declaration for BFA TSKIM functions
  235. */
  236. static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
  237. static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
  238. static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
  239. struct scsi_lun lun);
  240. static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
  241. static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
  242. static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
  243. static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
  244. static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
  245. static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
  246. /*
  247. * forward declaration of BFA TSKIM state machine
  248. */
  249. static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
  250. enum bfa_tskim_event event);
  251. static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
  252. enum bfa_tskim_event event);
  253. static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
  254. enum bfa_tskim_event event);
  255. static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
  256. enum bfa_tskim_event event);
  257. static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
  258. enum bfa_tskim_event event);
  259. static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
  260. enum bfa_tskim_event event);
  261. static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
  262. enum bfa_tskim_event event);
  263. /*
  264. * BFA FCP Initiator Mode module
  265. */
  266. /*
  267. * Compute and return memory needed by FCP(im) module.
  268. */
  269. static void
  270. bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
  271. {
  272. bfa_itnim_meminfo(cfg, km_len);
  273. /*
  274. * IO memory
  275. */
  276. *km_len += cfg->fwcfg.num_ioim_reqs *
  277. (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
  278. /*
  279. * task management command memory
  280. */
  281. if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
  282. cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
  283. *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
  284. }
  285. static void
  286. bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
  287. struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
  288. {
  289. struct bfa_fcpim_s *fcpim = &fcp->fcpim;
  290. struct bfa_s *bfa = fcp->bfa;
  291. bfa_trc(bfa, cfg->drvcfg.path_tov);
  292. bfa_trc(bfa, cfg->fwcfg.num_rports);
  293. bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
  294. bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
  295. fcpim->fcp = fcp;
  296. fcpim->bfa = bfa;
  297. fcpim->num_itnims = cfg->fwcfg.num_rports;
  298. fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
  299. fcpim->path_tov = cfg->drvcfg.path_tov;
  300. fcpim->delay_comp = cfg->drvcfg.delay_comp;
  301. fcpim->profile_comp = NULL;
  302. fcpim->profile_start = NULL;
  303. bfa_itnim_attach(fcpim);
  304. bfa_tskim_attach(fcpim);
  305. bfa_ioim_attach(fcpim);
  306. }
  307. static void
  308. bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
  309. {
  310. struct bfa_fcpim_s *fcpim = &fcp->fcpim;
  311. struct bfa_itnim_s *itnim;
  312. struct list_head *qe, *qen;
  313. /* Enqueue unused ioim resources to free_q */
  314. list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
  315. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  316. itnim = (struct bfa_itnim_s *) qe;
  317. bfa_itnim_iocdisable(itnim);
  318. }
  319. }
  320. void
  321. bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
  322. {
  323. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  324. fcpim->path_tov = path_tov * 1000;
  325. if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
  326. fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
  327. }
  328. u16
  329. bfa_fcpim_path_tov_get(struct bfa_s *bfa)
  330. {
  331. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  332. return fcpim->path_tov / 1000;
  333. }
  334. #define bfa_fcpim_add_iostats(__l, __r, __stats) \
  335. (__l->__stats += __r->__stats)
  336. void
  337. bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
  338. struct bfa_itnim_iostats_s *rstats)
  339. {
  340. bfa_fcpim_add_iostats(lstats, rstats, total_ios);
  341. bfa_fcpim_add_iostats(lstats, rstats, qresumes);
  342. bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
  343. bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
  344. bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
  345. bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
  346. bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
  347. bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
  348. bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
  349. bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
  350. bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
  351. bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
  352. bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
  353. bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
  354. bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
  355. bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
  356. bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
  357. bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
  358. bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
  359. bfa_fcpim_add_iostats(lstats, rstats, onlines);
  360. bfa_fcpim_add_iostats(lstats, rstats, offlines);
  361. bfa_fcpim_add_iostats(lstats, rstats, creates);
  362. bfa_fcpim_add_iostats(lstats, rstats, deletes);
  363. bfa_fcpim_add_iostats(lstats, rstats, create_comps);
  364. bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
  365. bfa_fcpim_add_iostats(lstats, rstats, sler_events);
  366. bfa_fcpim_add_iostats(lstats, rstats, fw_create);
  367. bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
  368. bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
  369. bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
  370. bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
  371. bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
  372. bfa_fcpim_add_iostats(lstats, rstats, tm_success);
  373. bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
  374. bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
  375. bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
  376. bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
  377. bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
  378. bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
  379. bfa_fcpim_add_iostats(lstats, rstats, io_comps);
  380. bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
  381. bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
  382. bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
  383. bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
  384. }
  385. bfa_status_t
  386. bfa_fcpim_port_iostats(struct bfa_s *bfa,
  387. struct bfa_itnim_iostats_s *stats, u8 lp_tag)
  388. {
  389. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  390. struct list_head *qe, *qen;
  391. struct bfa_itnim_s *itnim;
  392. /* accumulate IO stats from itnim */
  393. memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
  394. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  395. itnim = (struct bfa_itnim_s *) qe;
  396. if (itnim->rport->rport_info.lp_tag != lp_tag)
  397. continue;
  398. bfa_fcpim_add_stats(stats, &(itnim->stats));
  399. }
  400. return BFA_STATUS_OK;
  401. }
  402. void
  403. bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
  404. {
  405. struct bfa_itnim_latency_s *io_lat =
  406. &(ioim->itnim->ioprofile.io_latency);
  407. u32 val, idx;
  408. val = (u32)(jiffies - ioim->start_time);
  409. idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
  410. bfa_itnim_ioprofile_update(ioim->itnim, idx);
  411. io_lat->count[idx]++;
  412. io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
  413. io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
  414. io_lat->avg[idx] += val;
  415. }
  416. void
  417. bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
  418. {
  419. ioim->start_time = jiffies;
  420. }
  421. bfa_status_t
  422. bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
  423. {
  424. struct bfa_itnim_s *itnim;
  425. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  426. struct list_head *qe, *qen;
  427. /* accumulate IO stats from itnim */
  428. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  429. itnim = (struct bfa_itnim_s *) qe;
  430. bfa_itnim_clear_stats(itnim);
  431. }
  432. fcpim->io_profile = BFA_TRUE;
  433. fcpim->io_profile_start_time = time;
  434. fcpim->profile_comp = bfa_ioim_profile_comp;
  435. fcpim->profile_start = bfa_ioim_profile_start;
  436. return BFA_STATUS_OK;
  437. }
  438. bfa_status_t
  439. bfa_fcpim_profile_off(struct bfa_s *bfa)
  440. {
  441. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  442. fcpim->io_profile = BFA_FALSE;
  443. fcpim->io_profile_start_time = 0;
  444. fcpim->profile_comp = NULL;
  445. fcpim->profile_start = NULL;
  446. return BFA_STATUS_OK;
  447. }
  448. u16
  449. bfa_fcpim_qdepth_get(struct bfa_s *bfa)
  450. {
  451. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  452. return fcpim->q_depth;
  453. }
  454. /*
  455. * BFA ITNIM module state machine functions
  456. */
  457. /*
  458. * Beginning/unallocated state - no events expected.
  459. */
  460. static void
  461. bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  462. {
  463. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  464. bfa_trc(itnim->bfa, event);
  465. switch (event) {
  466. case BFA_ITNIM_SM_CREATE:
  467. bfa_sm_set_state(itnim, bfa_itnim_sm_created);
  468. itnim->is_online = BFA_FALSE;
  469. bfa_fcpim_additn(itnim);
  470. break;
  471. default:
  472. bfa_sm_fault(itnim->bfa, event);
  473. }
  474. }
  475. /*
  476. * Beginning state, only online event expected.
  477. */
  478. static void
  479. bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  480. {
  481. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  482. bfa_trc(itnim->bfa, event);
  483. switch (event) {
  484. case BFA_ITNIM_SM_ONLINE:
  485. if (bfa_itnim_send_fwcreate(itnim))
  486. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  487. else
  488. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
  489. break;
  490. case BFA_ITNIM_SM_DELETE:
  491. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  492. bfa_fcpim_delitn(itnim);
  493. break;
  494. case BFA_ITNIM_SM_HWFAIL:
  495. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  496. break;
  497. default:
  498. bfa_sm_fault(itnim->bfa, event);
  499. }
  500. }
  501. /*
  502. * Waiting for itnim create response from firmware.
  503. */
  504. static void
  505. bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  506. {
  507. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  508. bfa_trc(itnim->bfa, event);
  509. switch (event) {
  510. case BFA_ITNIM_SM_FWRSP:
  511. bfa_sm_set_state(itnim, bfa_itnim_sm_online);
  512. itnim->is_online = BFA_TRUE;
  513. bfa_itnim_iotov_online(itnim);
  514. bfa_itnim_online_cb(itnim);
  515. break;
  516. case BFA_ITNIM_SM_DELETE:
  517. bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
  518. break;
  519. case BFA_ITNIM_SM_OFFLINE:
  520. if (bfa_itnim_send_fwdelete(itnim))
  521. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
  522. else
  523. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
  524. break;
  525. case BFA_ITNIM_SM_HWFAIL:
  526. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  527. break;
  528. default:
  529. bfa_sm_fault(itnim->bfa, event);
  530. }
  531. }
  532. static void
  533. bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
  534. enum bfa_itnim_event event)
  535. {
  536. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  537. bfa_trc(itnim->bfa, event);
  538. switch (event) {
  539. case BFA_ITNIM_SM_QRESUME:
  540. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  541. bfa_itnim_send_fwcreate(itnim);
  542. break;
  543. case BFA_ITNIM_SM_DELETE:
  544. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  545. bfa_reqq_wcancel(&itnim->reqq_wait);
  546. bfa_fcpim_delitn(itnim);
  547. break;
  548. case BFA_ITNIM_SM_OFFLINE:
  549. bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
  550. bfa_reqq_wcancel(&itnim->reqq_wait);
  551. bfa_itnim_offline_cb(itnim);
  552. break;
  553. case BFA_ITNIM_SM_HWFAIL:
  554. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  555. bfa_reqq_wcancel(&itnim->reqq_wait);
  556. break;
  557. default:
  558. bfa_sm_fault(itnim->bfa, event);
  559. }
  560. }
  561. /*
  562. * Waiting for itnim create response from firmware, a delete is pending.
  563. */
  564. static void
  565. bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
  566. enum bfa_itnim_event event)
  567. {
  568. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  569. bfa_trc(itnim->bfa, event);
  570. switch (event) {
  571. case BFA_ITNIM_SM_FWRSP:
  572. if (bfa_itnim_send_fwdelete(itnim))
  573. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  574. else
  575. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
  576. break;
  577. case BFA_ITNIM_SM_HWFAIL:
  578. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  579. bfa_fcpim_delitn(itnim);
  580. break;
  581. default:
  582. bfa_sm_fault(itnim->bfa, event);
  583. }
  584. }
  585. /*
  586. * Online state - normal parking state.
  587. */
  588. static void
  589. bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  590. {
  591. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  592. bfa_trc(itnim->bfa, event);
  593. switch (event) {
  594. case BFA_ITNIM_SM_OFFLINE:
  595. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
  596. itnim->is_online = BFA_FALSE;
  597. bfa_itnim_iotov_start(itnim);
  598. bfa_itnim_cleanup(itnim);
  599. break;
  600. case BFA_ITNIM_SM_DELETE:
  601. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
  602. itnim->is_online = BFA_FALSE;
  603. bfa_itnim_cleanup(itnim);
  604. break;
  605. case BFA_ITNIM_SM_SLER:
  606. bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
  607. itnim->is_online = BFA_FALSE;
  608. bfa_itnim_iotov_start(itnim);
  609. bfa_itnim_sler_cb(itnim);
  610. break;
  611. case BFA_ITNIM_SM_HWFAIL:
  612. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  613. itnim->is_online = BFA_FALSE;
  614. bfa_itnim_iotov_start(itnim);
  615. bfa_itnim_iocdisable_cleanup(itnim);
  616. break;
  617. default:
  618. bfa_sm_fault(itnim->bfa, event);
  619. }
  620. }
  621. /*
  622. * Second level error recovery need.
  623. */
  624. static void
  625. bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  626. {
  627. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  628. bfa_trc(itnim->bfa, event);
  629. switch (event) {
  630. case BFA_ITNIM_SM_OFFLINE:
  631. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
  632. bfa_itnim_cleanup(itnim);
  633. break;
  634. case BFA_ITNIM_SM_DELETE:
  635. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
  636. bfa_itnim_cleanup(itnim);
  637. bfa_itnim_iotov_delete(itnim);
  638. break;
  639. case BFA_ITNIM_SM_HWFAIL:
  640. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  641. bfa_itnim_iocdisable_cleanup(itnim);
  642. break;
  643. default:
  644. bfa_sm_fault(itnim->bfa, event);
  645. }
  646. }
  647. /*
  648. * Going offline. Waiting for active IO cleanup.
  649. */
  650. static void
  651. bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
  652. enum bfa_itnim_event event)
  653. {
  654. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  655. bfa_trc(itnim->bfa, event);
  656. switch (event) {
  657. case BFA_ITNIM_SM_CLEANUP:
  658. if (bfa_itnim_send_fwdelete(itnim))
  659. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
  660. else
  661. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
  662. break;
  663. case BFA_ITNIM_SM_DELETE:
  664. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
  665. bfa_itnim_iotov_delete(itnim);
  666. break;
  667. case BFA_ITNIM_SM_HWFAIL:
  668. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  669. bfa_itnim_iocdisable_cleanup(itnim);
  670. bfa_itnim_offline_cb(itnim);
  671. break;
  672. case BFA_ITNIM_SM_SLER:
  673. break;
  674. default:
  675. bfa_sm_fault(itnim->bfa, event);
  676. }
  677. }
  678. /*
  679. * Deleting itnim. Waiting for active IO cleanup.
  680. */
  681. static void
  682. bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
  683. enum bfa_itnim_event event)
  684. {
  685. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  686. bfa_trc(itnim->bfa, event);
  687. switch (event) {
  688. case BFA_ITNIM_SM_CLEANUP:
  689. if (bfa_itnim_send_fwdelete(itnim))
  690. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  691. else
  692. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
  693. break;
  694. case BFA_ITNIM_SM_HWFAIL:
  695. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  696. bfa_itnim_iocdisable_cleanup(itnim);
  697. break;
  698. default:
  699. bfa_sm_fault(itnim->bfa, event);
  700. }
  701. }
  702. /*
  703. * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
  704. */
  705. static void
  706. bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  707. {
  708. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  709. bfa_trc(itnim->bfa, event);
  710. switch (event) {
  711. case BFA_ITNIM_SM_FWRSP:
  712. bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
  713. bfa_itnim_offline_cb(itnim);
  714. break;
  715. case BFA_ITNIM_SM_DELETE:
  716. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  717. break;
  718. case BFA_ITNIM_SM_HWFAIL:
  719. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  720. bfa_itnim_offline_cb(itnim);
  721. break;
  722. default:
  723. bfa_sm_fault(itnim->bfa, event);
  724. }
  725. }
  726. static void
  727. bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
  728. enum bfa_itnim_event event)
  729. {
  730. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  731. bfa_trc(itnim->bfa, event);
  732. switch (event) {
  733. case BFA_ITNIM_SM_QRESUME:
  734. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
  735. bfa_itnim_send_fwdelete(itnim);
  736. break;
  737. case BFA_ITNIM_SM_DELETE:
  738. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
  739. break;
  740. case BFA_ITNIM_SM_HWFAIL:
  741. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  742. bfa_reqq_wcancel(&itnim->reqq_wait);
  743. bfa_itnim_offline_cb(itnim);
  744. break;
  745. default:
  746. bfa_sm_fault(itnim->bfa, event);
  747. }
  748. }
  749. /*
  750. * Offline state.
  751. */
  752. static void
  753. bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  754. {
  755. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  756. bfa_trc(itnim->bfa, event);
  757. switch (event) {
  758. case BFA_ITNIM_SM_DELETE:
  759. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  760. bfa_itnim_iotov_delete(itnim);
  761. bfa_fcpim_delitn(itnim);
  762. break;
  763. case BFA_ITNIM_SM_ONLINE:
  764. if (bfa_itnim_send_fwcreate(itnim))
  765. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  766. else
  767. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
  768. break;
  769. case BFA_ITNIM_SM_HWFAIL:
  770. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  771. break;
  772. default:
  773. bfa_sm_fault(itnim->bfa, event);
  774. }
  775. }
  776. static void
  777. bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
  778. enum bfa_itnim_event event)
  779. {
  780. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  781. bfa_trc(itnim->bfa, event);
  782. switch (event) {
  783. case BFA_ITNIM_SM_DELETE:
  784. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  785. bfa_itnim_iotov_delete(itnim);
  786. bfa_fcpim_delitn(itnim);
  787. break;
  788. case BFA_ITNIM_SM_OFFLINE:
  789. bfa_itnim_offline_cb(itnim);
  790. break;
  791. case BFA_ITNIM_SM_ONLINE:
  792. if (bfa_itnim_send_fwcreate(itnim))
  793. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  794. else
  795. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
  796. break;
  797. case BFA_ITNIM_SM_HWFAIL:
  798. break;
  799. default:
  800. bfa_sm_fault(itnim->bfa, event);
  801. }
  802. }
  803. /*
  804. * Itnim is deleted, waiting for firmware response to delete.
  805. */
  806. static void
  807. bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  808. {
  809. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  810. bfa_trc(itnim->bfa, event);
  811. switch (event) {
  812. case BFA_ITNIM_SM_FWRSP:
  813. case BFA_ITNIM_SM_HWFAIL:
  814. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  815. bfa_fcpim_delitn(itnim);
  816. break;
  817. default:
  818. bfa_sm_fault(itnim->bfa, event);
  819. }
  820. }
  821. static void
  822. bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
  823. enum bfa_itnim_event event)
  824. {
  825. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  826. bfa_trc(itnim->bfa, event);
  827. switch (event) {
  828. case BFA_ITNIM_SM_QRESUME:
  829. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  830. bfa_itnim_send_fwdelete(itnim);
  831. break;
  832. case BFA_ITNIM_SM_HWFAIL:
  833. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  834. bfa_reqq_wcancel(&itnim->reqq_wait);
  835. bfa_fcpim_delitn(itnim);
  836. break;
  837. default:
  838. bfa_sm_fault(itnim->bfa, event);
  839. }
  840. }
  841. /*
  842. * Initiate cleanup of all IOs on an IOC failure.
  843. */
  844. static void
  845. bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
  846. {
  847. struct bfa_tskim_s *tskim;
  848. struct bfa_ioim_s *ioim;
  849. struct list_head *qe, *qen;
  850. list_for_each_safe(qe, qen, &itnim->tsk_q) {
  851. tskim = (struct bfa_tskim_s *) qe;
  852. bfa_tskim_iocdisable(tskim);
  853. }
  854. list_for_each_safe(qe, qen, &itnim->io_q) {
  855. ioim = (struct bfa_ioim_s *) qe;
  856. bfa_ioim_iocdisable(ioim);
  857. }
  858. /*
  859. * For IO request in pending queue, we pretend an early timeout.
  860. */
  861. list_for_each_safe(qe, qen, &itnim->pending_q) {
  862. ioim = (struct bfa_ioim_s *) qe;
  863. bfa_ioim_tov(ioim);
  864. }
  865. list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
  866. ioim = (struct bfa_ioim_s *) qe;
  867. bfa_ioim_iocdisable(ioim);
  868. }
  869. }
  870. /*
  871. * IO cleanup completion
  872. */
  873. static void
  874. bfa_itnim_cleanp_comp(void *itnim_cbarg)
  875. {
  876. struct bfa_itnim_s *itnim = itnim_cbarg;
  877. bfa_stats(itnim, cleanup_comps);
  878. bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
  879. }
  880. /*
  881. * Initiate cleanup of all IOs.
  882. */
  883. static void
  884. bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
  885. {
  886. struct bfa_ioim_s *ioim;
  887. struct bfa_tskim_s *tskim;
  888. struct list_head *qe, *qen;
  889. bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
  890. list_for_each_safe(qe, qen, &itnim->io_q) {
  891. ioim = (struct bfa_ioim_s *) qe;
  892. /*
  893. * Move IO to a cleanup queue from active queue so that a later
  894. * TM will not pickup this IO.
  895. */
  896. list_del(&ioim->qe);
  897. list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
  898. bfa_wc_up(&itnim->wc);
  899. bfa_ioim_cleanup(ioim);
  900. }
  901. list_for_each_safe(qe, qen, &itnim->tsk_q) {
  902. tskim = (struct bfa_tskim_s *) qe;
  903. bfa_wc_up(&itnim->wc);
  904. bfa_tskim_cleanup(tskim);
  905. }
  906. bfa_wc_wait(&itnim->wc);
  907. }
  908. static void
  909. __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
  910. {
  911. struct bfa_itnim_s *itnim = cbarg;
  912. if (complete)
  913. bfa_cb_itnim_online(itnim->ditn);
  914. }
  915. static void
  916. __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
  917. {
  918. struct bfa_itnim_s *itnim = cbarg;
  919. if (complete)
  920. bfa_cb_itnim_offline(itnim->ditn);
  921. }
  922. static void
  923. __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
  924. {
  925. struct bfa_itnim_s *itnim = cbarg;
  926. if (complete)
  927. bfa_cb_itnim_sler(itnim->ditn);
  928. }
  929. /*
  930. * Call to resume any I/O requests waiting for room in request queue.
  931. */
  932. static void
  933. bfa_itnim_qresume(void *cbarg)
  934. {
  935. struct bfa_itnim_s *itnim = cbarg;
  936. bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
  937. }
  938. /*
  939. * bfa_itnim_public
  940. */
  941. void
  942. bfa_itnim_iodone(struct bfa_itnim_s *itnim)
  943. {
  944. bfa_wc_down(&itnim->wc);
  945. }
  946. void
  947. bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
  948. {
  949. bfa_wc_down(&itnim->wc);
  950. }
  951. void
  952. bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
  953. {
  954. /*
  955. * ITN memory
  956. */
  957. *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
  958. }
  959. void
  960. bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
  961. {
  962. struct bfa_s *bfa = fcpim->bfa;
  963. struct bfa_fcp_mod_s *fcp = fcpim->fcp;
  964. struct bfa_itnim_s *itnim;
  965. int i, j;
  966. INIT_LIST_HEAD(&fcpim->itnim_q);
  967. itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
  968. fcpim->itnim_arr = itnim;
  969. for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
  970. memset(itnim, 0, sizeof(struct bfa_itnim_s));
  971. itnim->bfa = bfa;
  972. itnim->fcpim = fcpim;
  973. itnim->reqq = BFA_REQQ_QOS_LO;
  974. itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
  975. itnim->iotov_active = BFA_FALSE;
  976. bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
  977. INIT_LIST_HEAD(&itnim->io_q);
  978. INIT_LIST_HEAD(&itnim->io_cleanup_q);
  979. INIT_LIST_HEAD(&itnim->pending_q);
  980. INIT_LIST_HEAD(&itnim->tsk_q);
  981. INIT_LIST_HEAD(&itnim->delay_comp_q);
  982. for (j = 0; j < BFA_IOBUCKET_MAX; j++)
  983. itnim->ioprofile.io_latency.min[j] = ~0;
  984. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  985. }
  986. bfa_mem_kva_curp(fcp) = (u8 *) itnim;
  987. }
  988. void
  989. bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
  990. {
  991. bfa_stats(itnim, ioc_disabled);
  992. bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
  993. }
  994. static bfa_boolean_t
  995. bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
  996. {
  997. struct bfi_itn_create_req_s *m;
  998. itnim->msg_no++;
  999. /*
  1000. * check for room in queue to send request now
  1001. */
  1002. m = bfa_reqq_next(itnim->bfa, itnim->reqq);
  1003. if (!m) {
  1004. bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
  1005. return BFA_FALSE;
  1006. }
  1007. bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
  1008. bfa_fn_lpu(itnim->bfa));
  1009. m->fw_handle = itnim->rport->fw_handle;
  1010. m->class = FC_CLASS_3;
  1011. m->seq_rec = itnim->seq_rec;
  1012. m->msg_no = itnim->msg_no;
  1013. bfa_stats(itnim, fw_create);
  1014. /*
  1015. * queue I/O message to firmware
  1016. */
  1017. bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
  1018. return BFA_TRUE;
  1019. }
  1020. static bfa_boolean_t
  1021. bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
  1022. {
  1023. struct bfi_itn_delete_req_s *m;
  1024. /*
  1025. * check for room in queue to send request now
  1026. */
  1027. m = bfa_reqq_next(itnim->bfa, itnim->reqq);
  1028. if (!m) {
  1029. bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
  1030. return BFA_FALSE;
  1031. }
  1032. bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
  1033. bfa_fn_lpu(itnim->bfa));
  1034. m->fw_handle = itnim->rport->fw_handle;
  1035. bfa_stats(itnim, fw_delete);
  1036. /*
  1037. * queue I/O message to firmware
  1038. */
  1039. bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
  1040. return BFA_TRUE;
  1041. }
  1042. /*
  1043. * Cleanup all pending failed inflight requests.
  1044. */
  1045. static void
  1046. bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
  1047. {
  1048. struct bfa_ioim_s *ioim;
  1049. struct list_head *qe, *qen;
  1050. list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
  1051. ioim = (struct bfa_ioim_s *)qe;
  1052. bfa_ioim_delayed_comp(ioim, iotov);
  1053. }
  1054. }
  1055. /*
  1056. * Start all pending IO requests.
  1057. */
  1058. static void
  1059. bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
  1060. {
  1061. struct bfa_ioim_s *ioim;
  1062. bfa_itnim_iotov_stop(itnim);
  1063. /*
  1064. * Abort all inflight IO requests in the queue
  1065. */
  1066. bfa_itnim_delayed_comp(itnim, BFA_FALSE);
  1067. /*
  1068. * Start all pending IO requests.
  1069. */
  1070. while (!list_empty(&itnim->pending_q)) {
  1071. bfa_q_deq(&itnim->pending_q, &ioim);
  1072. list_add_tail(&ioim->qe, &itnim->io_q);
  1073. bfa_ioim_start(ioim);
  1074. }
  1075. }
  1076. /*
  1077. * Fail all pending IO requests
  1078. */
  1079. static void
  1080. bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
  1081. {
  1082. struct bfa_ioim_s *ioim;
  1083. /*
  1084. * Fail all inflight IO requests in the queue
  1085. */
  1086. bfa_itnim_delayed_comp(itnim, BFA_TRUE);
  1087. /*
  1088. * Fail any pending IO requests.
  1089. */
  1090. while (!list_empty(&itnim->pending_q)) {
  1091. bfa_q_deq(&itnim->pending_q, &ioim);
  1092. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  1093. bfa_ioim_tov(ioim);
  1094. }
  1095. }
  1096. /*
  1097. * IO TOV timer callback. Fail any pending IO requests.
  1098. */
  1099. static void
  1100. bfa_itnim_iotov(void *itnim_arg)
  1101. {
  1102. struct bfa_itnim_s *itnim = itnim_arg;
  1103. itnim->iotov_active = BFA_FALSE;
  1104. bfa_cb_itnim_tov_begin(itnim->ditn);
  1105. bfa_itnim_iotov_cleanup(itnim);
  1106. bfa_cb_itnim_tov(itnim->ditn);
  1107. }
  1108. /*
  1109. * Start IO TOV timer for failing back pending IO requests in offline state.
  1110. */
  1111. static void
  1112. bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
  1113. {
  1114. if (itnim->fcpim->path_tov > 0) {
  1115. itnim->iotov_active = BFA_TRUE;
  1116. WARN_ON(!bfa_itnim_hold_io(itnim));
  1117. bfa_timer_start(itnim->bfa, &itnim->timer,
  1118. bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
  1119. }
  1120. }
  1121. /*
  1122. * Stop IO TOV timer.
  1123. */
  1124. static void
  1125. bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
  1126. {
  1127. if (itnim->iotov_active) {
  1128. itnim->iotov_active = BFA_FALSE;
  1129. bfa_timer_stop(&itnim->timer);
  1130. }
  1131. }
  1132. /*
  1133. * Stop IO TOV timer.
  1134. */
  1135. static void
  1136. bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
  1137. {
  1138. bfa_boolean_t pathtov_active = BFA_FALSE;
  1139. if (itnim->iotov_active)
  1140. pathtov_active = BFA_TRUE;
  1141. bfa_itnim_iotov_stop(itnim);
  1142. if (pathtov_active)
  1143. bfa_cb_itnim_tov_begin(itnim->ditn);
  1144. bfa_itnim_iotov_cleanup(itnim);
  1145. if (pathtov_active)
  1146. bfa_cb_itnim_tov(itnim->ditn);
  1147. }
  1148. static void
  1149. bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
  1150. {
  1151. struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
  1152. fcpim->del_itn_stats.del_itn_iocomp_aborted +=
  1153. itnim->stats.iocomp_aborted;
  1154. fcpim->del_itn_stats.del_itn_iocomp_timedout +=
  1155. itnim->stats.iocomp_timedout;
  1156. fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
  1157. itnim->stats.iocom_sqer_needed;
  1158. fcpim->del_itn_stats.del_itn_iocom_res_free +=
  1159. itnim->stats.iocom_res_free;
  1160. fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
  1161. itnim->stats.iocom_hostabrts;
  1162. fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
  1163. fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
  1164. fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
  1165. }
  1166. /*
  1167. * bfa_itnim_public
  1168. */
  1169. /*
  1170. * Itnim interrupt processing.
  1171. */
  1172. void
  1173. bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  1174. {
  1175. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  1176. union bfi_itn_i2h_msg_u msg;
  1177. struct bfa_itnim_s *itnim;
  1178. bfa_trc(bfa, m->mhdr.msg_id);
  1179. msg.msg = m;
  1180. switch (m->mhdr.msg_id) {
  1181. case BFI_ITN_I2H_CREATE_RSP:
  1182. itnim = BFA_ITNIM_FROM_TAG(fcpim,
  1183. msg.create_rsp->bfa_handle);
  1184. WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
  1185. bfa_stats(itnim, create_comps);
  1186. bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
  1187. break;
  1188. case BFI_ITN_I2H_DELETE_RSP:
  1189. itnim = BFA_ITNIM_FROM_TAG(fcpim,
  1190. msg.delete_rsp->bfa_handle);
  1191. WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
  1192. bfa_stats(itnim, delete_comps);
  1193. bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
  1194. break;
  1195. case BFI_ITN_I2H_SLER_EVENT:
  1196. itnim = BFA_ITNIM_FROM_TAG(fcpim,
  1197. msg.sler_event->bfa_handle);
  1198. bfa_stats(itnim, sler_events);
  1199. bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
  1200. break;
  1201. default:
  1202. bfa_trc(bfa, m->mhdr.msg_id);
  1203. WARN_ON(1);
  1204. }
  1205. }
  1206. /*
  1207. * bfa_itnim_api
  1208. */
  1209. struct bfa_itnim_s *
  1210. bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
  1211. {
  1212. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  1213. struct bfa_itnim_s *itnim;
  1214. bfa_itn_create(bfa, rport, bfa_itnim_isr);
  1215. itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
  1216. WARN_ON(itnim->rport != rport);
  1217. itnim->ditn = ditn;
  1218. bfa_stats(itnim, creates);
  1219. bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
  1220. return itnim;
  1221. }
  1222. void
  1223. bfa_itnim_delete(struct bfa_itnim_s *itnim)
  1224. {
  1225. bfa_stats(itnim, deletes);
  1226. bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
  1227. }
  1228. void
  1229. bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
  1230. {
  1231. itnim->seq_rec = seq_rec;
  1232. bfa_stats(itnim, onlines);
  1233. bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
  1234. }
  1235. void
  1236. bfa_itnim_offline(struct bfa_itnim_s *itnim)
  1237. {
  1238. bfa_stats(itnim, offlines);
  1239. bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
  1240. }
  1241. /*
  1242. * Return true if itnim is considered offline for holding off IO request.
  1243. * IO is not held if itnim is being deleted.
  1244. */
  1245. bfa_boolean_t
  1246. bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
  1247. {
  1248. return itnim->fcpim->path_tov && itnim->iotov_active &&
  1249. (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
  1250. bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
  1251. bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
  1252. bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
  1253. bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
  1254. bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
  1255. }
  1256. #define bfa_io_lat_clock_res_div HZ
  1257. #define bfa_io_lat_clock_res_mul 1000
  1258. bfa_status_t
  1259. bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
  1260. struct bfa_itnim_ioprofile_s *ioprofile)
  1261. {
  1262. struct bfa_fcpim_s *fcpim;
  1263. if (!itnim)
  1264. return BFA_STATUS_NO_FCPIM_NEXUS;
  1265. fcpim = BFA_FCPIM(itnim->bfa);
  1266. if (!fcpim->io_profile)
  1267. return BFA_STATUS_IOPROFILE_OFF;
  1268. itnim->ioprofile.index = BFA_IOBUCKET_MAX;
  1269. itnim->ioprofile.io_profile_start_time =
  1270. bfa_io_profile_start_time(itnim->bfa);
  1271. itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
  1272. itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
  1273. *ioprofile = itnim->ioprofile;
  1274. return BFA_STATUS_OK;
  1275. }
  1276. void
  1277. bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
  1278. {
  1279. int j;
  1280. if (!itnim)
  1281. return;
  1282. memset(&itnim->stats, 0, sizeof(itnim->stats));
  1283. memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
  1284. for (j = 0; j < BFA_IOBUCKET_MAX; j++)
  1285. itnim->ioprofile.io_latency.min[j] = ~0;
  1286. }
  1287. /*
  1288. * BFA IO module state machine functions
  1289. */
  1290. /*
  1291. * IO is not started (unallocated).
  1292. */
  1293. static void
  1294. bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1295. {
  1296. switch (event) {
  1297. case BFA_IOIM_SM_START:
  1298. if (!bfa_itnim_is_online(ioim->itnim)) {
  1299. if (!bfa_itnim_hold_io(ioim->itnim)) {
  1300. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1301. list_del(&ioim->qe);
  1302. list_add_tail(&ioim->qe,
  1303. &ioim->fcpim->ioim_comp_q);
  1304. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1305. __bfa_cb_ioim_pathtov, ioim);
  1306. } else {
  1307. list_del(&ioim->qe);
  1308. list_add_tail(&ioim->qe,
  1309. &ioim->itnim->pending_q);
  1310. }
  1311. break;
  1312. }
  1313. if (ioim->nsges > BFI_SGE_INLINE) {
  1314. if (!bfa_ioim_sgpg_alloc(ioim)) {
  1315. bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
  1316. return;
  1317. }
  1318. }
  1319. if (!bfa_ioim_send_ioreq(ioim)) {
  1320. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  1321. break;
  1322. }
  1323. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1324. break;
  1325. case BFA_IOIM_SM_IOTOV:
  1326. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1327. bfa_ioim_move_to_comp_q(ioim);
  1328. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1329. __bfa_cb_ioim_pathtov, ioim);
  1330. break;
  1331. case BFA_IOIM_SM_ABORT:
  1332. /*
  1333. * IO in pending queue can get abort requests. Complete abort
  1334. * requests immediately.
  1335. */
  1336. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1337. WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
  1338. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1339. __bfa_cb_ioim_abort, ioim);
  1340. break;
  1341. default:
  1342. bfa_sm_fault(ioim->bfa, event);
  1343. }
  1344. }
  1345. /*
  1346. * IO is waiting for SG pages.
  1347. */
  1348. static void
  1349. bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1350. {
  1351. bfa_trc(ioim->bfa, ioim->iotag);
  1352. bfa_trc(ioim->bfa, event);
  1353. switch (event) {
  1354. case BFA_IOIM_SM_SGALLOCED:
  1355. if (!bfa_ioim_send_ioreq(ioim)) {
  1356. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  1357. break;
  1358. }
  1359. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1360. break;
  1361. case BFA_IOIM_SM_CLEANUP:
  1362. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1363. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  1364. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1365. ioim);
  1366. bfa_ioim_notify_cleanup(ioim);
  1367. break;
  1368. case BFA_IOIM_SM_ABORT:
  1369. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1370. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  1371. bfa_ioim_move_to_comp_q(ioim);
  1372. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1373. ioim);
  1374. break;
  1375. case BFA_IOIM_SM_HWFAIL:
  1376. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1377. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  1378. bfa_ioim_move_to_comp_q(ioim);
  1379. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1380. ioim);
  1381. break;
  1382. default:
  1383. bfa_sm_fault(ioim->bfa, event);
  1384. }
  1385. }
  1386. /*
  1387. * IO is active.
  1388. */
  1389. static void
  1390. bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1391. {
  1392. switch (event) {
  1393. case BFA_IOIM_SM_COMP_GOOD:
  1394. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1395. bfa_ioim_move_to_comp_q(ioim);
  1396. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1397. __bfa_cb_ioim_good_comp, ioim);
  1398. break;
  1399. case BFA_IOIM_SM_COMP:
  1400. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1401. bfa_ioim_move_to_comp_q(ioim);
  1402. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
  1403. ioim);
  1404. break;
  1405. case BFA_IOIM_SM_DONE:
  1406. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1407. bfa_ioim_move_to_comp_q(ioim);
  1408. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
  1409. ioim);
  1410. break;
  1411. case BFA_IOIM_SM_ABORT:
  1412. ioim->iosp->abort_explicit = BFA_TRUE;
  1413. ioim->io_cbfn = __bfa_cb_ioim_abort;
  1414. if (bfa_ioim_send_abort(ioim))
  1415. bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
  1416. else {
  1417. bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
  1418. bfa_stats(ioim->itnim, qwait);
  1419. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1420. &ioim->iosp->reqq_wait);
  1421. }
  1422. break;
  1423. case BFA_IOIM_SM_CLEANUP:
  1424. ioim->iosp->abort_explicit = BFA_FALSE;
  1425. ioim->io_cbfn = __bfa_cb_ioim_failed;
  1426. if (bfa_ioim_send_abort(ioim))
  1427. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1428. else {
  1429. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1430. bfa_stats(ioim->itnim, qwait);
  1431. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1432. &ioim->iosp->reqq_wait);
  1433. }
  1434. break;
  1435. case BFA_IOIM_SM_HWFAIL:
  1436. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1437. bfa_ioim_move_to_comp_q(ioim);
  1438. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1439. ioim);
  1440. break;
  1441. case BFA_IOIM_SM_SQRETRY:
  1442. if (bfa_ioim_maxretry_reached(ioim)) {
  1443. /* max retry reached, free IO */
  1444. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1445. bfa_ioim_move_to_comp_q(ioim);
  1446. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1447. __bfa_cb_ioim_failed, ioim);
  1448. break;
  1449. }
  1450. /* waiting for IO tag resource free */
  1451. bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
  1452. break;
  1453. default:
  1454. bfa_sm_fault(ioim->bfa, event);
  1455. }
  1456. }
  1457. /*
  1458. * IO is retried with new tag.
  1459. */
  1460. static void
  1461. bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1462. {
  1463. switch (event) {
  1464. case BFA_IOIM_SM_FREE:
  1465. /* abts and rrq done. Now retry the IO with new tag */
  1466. bfa_ioim_update_iotag(ioim);
  1467. if (!bfa_ioim_send_ioreq(ioim)) {
  1468. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  1469. break;
  1470. }
  1471. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1472. break;
  1473. case BFA_IOIM_SM_CLEANUP:
  1474. ioim->iosp->abort_explicit = BFA_FALSE;
  1475. ioim->io_cbfn = __bfa_cb_ioim_failed;
  1476. if (bfa_ioim_send_abort(ioim))
  1477. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1478. else {
  1479. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1480. bfa_stats(ioim->itnim, qwait);
  1481. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1482. &ioim->iosp->reqq_wait);
  1483. }
  1484. break;
  1485. case BFA_IOIM_SM_HWFAIL:
  1486. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1487. bfa_ioim_move_to_comp_q(ioim);
  1488. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1489. __bfa_cb_ioim_failed, ioim);
  1490. break;
  1491. case BFA_IOIM_SM_ABORT:
  1492. /* in this state IO abort is done.
  1493. * Waiting for IO tag resource free.
  1494. */
  1495. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1496. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1497. ioim);
  1498. break;
  1499. default:
  1500. bfa_sm_fault(ioim->bfa, event);
  1501. }
  1502. }
  1503. /*
  1504. * IO is being aborted, waiting for completion from firmware.
  1505. */
  1506. static void
  1507. bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1508. {
  1509. bfa_trc(ioim->bfa, ioim->iotag);
  1510. bfa_trc(ioim->bfa, event);
  1511. switch (event) {
  1512. case BFA_IOIM_SM_COMP_GOOD:
  1513. case BFA_IOIM_SM_COMP:
  1514. case BFA_IOIM_SM_DONE:
  1515. case BFA_IOIM_SM_FREE:
  1516. break;
  1517. case BFA_IOIM_SM_ABORT_DONE:
  1518. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1519. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1520. ioim);
  1521. break;
  1522. case BFA_IOIM_SM_ABORT_COMP:
  1523. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1524. bfa_ioim_move_to_comp_q(ioim);
  1525. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1526. ioim);
  1527. break;
  1528. case BFA_IOIM_SM_COMP_UTAG:
  1529. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1530. bfa_ioim_move_to_comp_q(ioim);
  1531. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1532. ioim);
  1533. break;
  1534. case BFA_IOIM_SM_CLEANUP:
  1535. WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
  1536. ioim->iosp->abort_explicit = BFA_FALSE;
  1537. if (bfa_ioim_send_abort(ioim))
  1538. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1539. else {
  1540. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1541. bfa_stats(ioim->itnim, qwait);
  1542. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1543. &ioim->iosp->reqq_wait);
  1544. }
  1545. break;
  1546. case BFA_IOIM_SM_HWFAIL:
  1547. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1548. bfa_ioim_move_to_comp_q(ioim);
  1549. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1550. ioim);
  1551. break;
  1552. default:
  1553. bfa_sm_fault(ioim->bfa, event);
  1554. }
  1555. }
  1556. /*
  1557. * IO is being cleaned up (implicit abort), waiting for completion from
  1558. * firmware.
  1559. */
  1560. static void
  1561. bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1562. {
  1563. bfa_trc(ioim->bfa, ioim->iotag);
  1564. bfa_trc(ioim->bfa, event);
  1565. switch (event) {
  1566. case BFA_IOIM_SM_COMP_GOOD:
  1567. case BFA_IOIM_SM_COMP:
  1568. case BFA_IOIM_SM_DONE:
  1569. case BFA_IOIM_SM_FREE:
  1570. break;
  1571. case BFA_IOIM_SM_ABORT:
  1572. /*
  1573. * IO is already being aborted implicitly
  1574. */
  1575. ioim->io_cbfn = __bfa_cb_ioim_abort;
  1576. break;
  1577. case BFA_IOIM_SM_ABORT_DONE:
  1578. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1579. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1580. bfa_ioim_notify_cleanup(ioim);
  1581. break;
  1582. case BFA_IOIM_SM_ABORT_COMP:
  1583. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1584. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1585. bfa_ioim_notify_cleanup(ioim);
  1586. break;
  1587. case BFA_IOIM_SM_COMP_UTAG:
  1588. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1589. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1590. bfa_ioim_notify_cleanup(ioim);
  1591. break;
  1592. case BFA_IOIM_SM_HWFAIL:
  1593. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1594. bfa_ioim_move_to_comp_q(ioim);
  1595. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1596. ioim);
  1597. break;
  1598. case BFA_IOIM_SM_CLEANUP:
  1599. /*
  1600. * IO can be in cleanup state already due to TM command.
  1601. * 2nd cleanup request comes from ITN offline event.
  1602. */
  1603. break;
  1604. default:
  1605. bfa_sm_fault(ioim->bfa, event);
  1606. }
  1607. }
  1608. /*
  1609. * IO is waiting for room in request CQ
  1610. */
  1611. static void
  1612. bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1613. {
  1614. bfa_trc(ioim->bfa, ioim->iotag);
  1615. bfa_trc(ioim->bfa, event);
  1616. switch (event) {
  1617. case BFA_IOIM_SM_QRESUME:
  1618. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1619. bfa_ioim_send_ioreq(ioim);
  1620. break;
  1621. case BFA_IOIM_SM_ABORT:
  1622. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1623. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1624. bfa_ioim_move_to_comp_q(ioim);
  1625. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1626. ioim);
  1627. break;
  1628. case BFA_IOIM_SM_CLEANUP:
  1629. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1630. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1631. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1632. ioim);
  1633. bfa_ioim_notify_cleanup(ioim);
  1634. break;
  1635. case BFA_IOIM_SM_HWFAIL:
  1636. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1637. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1638. bfa_ioim_move_to_comp_q(ioim);
  1639. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1640. ioim);
  1641. break;
  1642. default:
  1643. bfa_sm_fault(ioim->bfa, event);
  1644. }
  1645. }
  1646. /*
  1647. * Active IO is being aborted, waiting for room in request CQ.
  1648. */
  1649. static void
  1650. bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1651. {
  1652. bfa_trc(ioim->bfa, ioim->iotag);
  1653. bfa_trc(ioim->bfa, event);
  1654. switch (event) {
  1655. case BFA_IOIM_SM_QRESUME:
  1656. bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
  1657. bfa_ioim_send_abort(ioim);
  1658. break;
  1659. case BFA_IOIM_SM_CLEANUP:
  1660. WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
  1661. ioim->iosp->abort_explicit = BFA_FALSE;
  1662. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1663. break;
  1664. case BFA_IOIM_SM_COMP_GOOD:
  1665. case BFA_IOIM_SM_COMP:
  1666. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1667. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1668. bfa_ioim_move_to_comp_q(ioim);
  1669. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1670. ioim);
  1671. break;
  1672. case BFA_IOIM_SM_DONE:
  1673. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1674. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1675. bfa_ioim_move_to_comp_q(ioim);
  1676. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1677. ioim);
  1678. break;
  1679. case BFA_IOIM_SM_HWFAIL:
  1680. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1681. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1682. bfa_ioim_move_to_comp_q(ioim);
  1683. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1684. ioim);
  1685. break;
  1686. default:
  1687. bfa_sm_fault(ioim->bfa, event);
  1688. }
  1689. }
  1690. /*
  1691. * Active IO is being cleaned up, waiting for room in request CQ.
  1692. */
  1693. static void
  1694. bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1695. {
  1696. bfa_trc(ioim->bfa, ioim->iotag);
  1697. bfa_trc(ioim->bfa, event);
  1698. switch (event) {
  1699. case BFA_IOIM_SM_QRESUME:
  1700. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1701. bfa_ioim_send_abort(ioim);
  1702. break;
  1703. case BFA_IOIM_SM_ABORT:
  1704. /*
  1705. * IO is already being cleaned up implicitly
  1706. */
  1707. ioim->io_cbfn = __bfa_cb_ioim_abort;
  1708. break;
  1709. case BFA_IOIM_SM_COMP_GOOD:
  1710. case BFA_IOIM_SM_COMP:
  1711. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1712. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1713. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1714. bfa_ioim_notify_cleanup(ioim);
  1715. break;
  1716. case BFA_IOIM_SM_DONE:
  1717. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1718. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1719. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1720. bfa_ioim_notify_cleanup(ioim);
  1721. break;
  1722. case BFA_IOIM_SM_HWFAIL:
  1723. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1724. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1725. bfa_ioim_move_to_comp_q(ioim);
  1726. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1727. ioim);
  1728. break;
  1729. default:
  1730. bfa_sm_fault(ioim->bfa, event);
  1731. }
  1732. }
  1733. /*
  1734. * IO bfa callback is pending.
  1735. */
  1736. static void
  1737. bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1738. {
  1739. switch (event) {
  1740. case BFA_IOIM_SM_HCB:
  1741. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  1742. bfa_ioim_free(ioim);
  1743. break;
  1744. case BFA_IOIM_SM_CLEANUP:
  1745. bfa_ioim_notify_cleanup(ioim);
  1746. break;
  1747. case BFA_IOIM_SM_HWFAIL:
  1748. break;
  1749. default:
  1750. bfa_sm_fault(ioim->bfa, event);
  1751. }
  1752. }
  1753. /*
  1754. * IO bfa callback is pending. IO resource cannot be freed.
  1755. */
  1756. static void
  1757. bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1758. {
  1759. bfa_trc(ioim->bfa, ioim->iotag);
  1760. bfa_trc(ioim->bfa, event);
  1761. switch (event) {
  1762. case BFA_IOIM_SM_HCB:
  1763. bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
  1764. list_del(&ioim->qe);
  1765. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
  1766. break;
  1767. case BFA_IOIM_SM_FREE:
  1768. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1769. break;
  1770. case BFA_IOIM_SM_CLEANUP:
  1771. bfa_ioim_notify_cleanup(ioim);
  1772. break;
  1773. case BFA_IOIM_SM_HWFAIL:
  1774. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1775. break;
  1776. default:
  1777. bfa_sm_fault(ioim->bfa, event);
  1778. }
  1779. }
  1780. /*
  1781. * IO is completed, waiting resource free from firmware.
  1782. */
  1783. static void
  1784. bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1785. {
  1786. bfa_trc(ioim->bfa, ioim->iotag);
  1787. bfa_trc(ioim->bfa, event);
  1788. switch (event) {
  1789. case BFA_IOIM_SM_FREE:
  1790. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  1791. bfa_ioim_free(ioim);
  1792. break;
  1793. case BFA_IOIM_SM_CLEANUP:
  1794. bfa_ioim_notify_cleanup(ioim);
  1795. break;
  1796. case BFA_IOIM_SM_HWFAIL:
  1797. break;
  1798. default:
  1799. bfa_sm_fault(ioim->bfa, event);
  1800. }
  1801. }
  1802. /*
  1803. * This is called from bfa_fcpim_start after the bfa_init() with flash read
  1804. * is complete by driver. now invalidate the stale content of lun mask
  1805. * like unit attention, rp tag and lp tag.
  1806. */
  1807. static void
  1808. bfa_ioim_lm_init(struct bfa_s *bfa)
  1809. {
  1810. struct bfa_lun_mask_s *lunm_list;
  1811. int i;
  1812. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1813. return;
  1814. lunm_list = bfa_get_lun_mask_list(bfa);
  1815. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1816. lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
  1817. lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
  1818. lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
  1819. }
  1820. }
  1821. static void
  1822. __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
  1823. {
  1824. struct bfa_ioim_s *ioim = cbarg;
  1825. if (!complete) {
  1826. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  1827. return;
  1828. }
  1829. bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
  1830. }
  1831. static void
  1832. __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
  1833. {
  1834. struct bfa_ioim_s *ioim = cbarg;
  1835. struct bfi_ioim_rsp_s *m;
  1836. u8 *snsinfo = NULL;
  1837. u8 sns_len = 0;
  1838. s32 residue = 0;
  1839. if (!complete) {
  1840. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  1841. return;
  1842. }
  1843. m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
  1844. if (m->io_status == BFI_IOIM_STS_OK) {
  1845. /*
  1846. * setup sense information, if present
  1847. */
  1848. if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
  1849. m->sns_len) {
  1850. sns_len = m->sns_len;
  1851. snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
  1852. ioim->iotag);
  1853. }
  1854. /*
  1855. * setup residue value correctly for normal completions
  1856. */
  1857. if (m->resid_flags == FCP_RESID_UNDER) {
  1858. residue = be32_to_cpu(m->residue);
  1859. bfa_stats(ioim->itnim, iocomp_underrun);
  1860. }
  1861. if (m->resid_flags == FCP_RESID_OVER) {
  1862. residue = be32_to_cpu(m->residue);
  1863. residue = -residue;
  1864. bfa_stats(ioim->itnim, iocomp_overrun);
  1865. }
  1866. }
  1867. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
  1868. m->scsi_status, sns_len, snsinfo, residue);
  1869. }
  1870. void
  1871. bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
  1872. u16 rp_tag, u8 lp_tag)
  1873. {
  1874. struct bfa_lun_mask_s *lun_list;
  1875. u8 i;
  1876. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1877. return;
  1878. lun_list = bfa_get_lun_mask_list(bfa);
  1879. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1880. if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
  1881. if ((lun_list[i].lp_wwn == lp_wwn) &&
  1882. (lun_list[i].rp_wwn == rp_wwn)) {
  1883. lun_list[i].rp_tag = rp_tag;
  1884. lun_list[i].lp_tag = lp_tag;
  1885. }
  1886. }
  1887. }
  1888. }
  1889. /*
  1890. * set UA for all active luns in LM DB
  1891. */
  1892. static void
  1893. bfa_ioim_lm_set_ua(struct bfa_s *bfa)
  1894. {
  1895. struct bfa_lun_mask_s *lunm_list;
  1896. int i;
  1897. lunm_list = bfa_get_lun_mask_list(bfa);
  1898. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1899. if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
  1900. continue;
  1901. lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
  1902. }
  1903. }
  1904. bfa_status_t
  1905. bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
  1906. {
  1907. struct bfa_lunmask_cfg_s *lun_mask;
  1908. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  1909. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1910. return BFA_STATUS_FAILED;
  1911. if (bfa_get_lun_mask_status(bfa) == update)
  1912. return BFA_STATUS_NO_CHANGE;
  1913. lun_mask = bfa_get_lun_mask(bfa);
  1914. lun_mask->status = update;
  1915. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
  1916. bfa_ioim_lm_set_ua(bfa);
  1917. return bfa_dconf_update(bfa);
  1918. }
  1919. bfa_status_t
  1920. bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
  1921. {
  1922. int i;
  1923. struct bfa_lun_mask_s *lunm_list;
  1924. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  1925. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1926. return BFA_STATUS_FAILED;
  1927. lunm_list = bfa_get_lun_mask_list(bfa);
  1928. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1929. if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
  1930. if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
  1931. bfa_rport_unset_lunmask(bfa,
  1932. BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
  1933. }
  1934. }
  1935. memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
  1936. return bfa_dconf_update(bfa);
  1937. }
  1938. bfa_status_t
  1939. bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
  1940. {
  1941. struct bfa_lunmask_cfg_s *lun_mask;
  1942. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  1943. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1944. return BFA_STATUS_FAILED;
  1945. lun_mask = bfa_get_lun_mask(bfa);
  1946. memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
  1947. return BFA_STATUS_OK;
  1948. }
  1949. bfa_status_t
  1950. bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
  1951. wwn_t rpwwn, struct scsi_lun lun)
  1952. {
  1953. struct bfa_lun_mask_s *lunm_list;
  1954. struct bfa_rport_s *rp = NULL;
  1955. int i, free_index = MAX_LUN_MASK_CFG + 1;
  1956. struct bfa_fcs_lport_s *port = NULL;
  1957. struct bfa_fcs_rport_s *rp_fcs;
  1958. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  1959. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1960. return BFA_STATUS_FAILED;
  1961. port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
  1962. vf_id, *pwwn);
  1963. if (port) {
  1964. *pwwn = port->port_cfg.pwwn;
  1965. rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
  1966. if (rp_fcs)
  1967. rp = rp_fcs->bfa_rport;
  1968. }
  1969. lunm_list = bfa_get_lun_mask_list(bfa);
  1970. /* if entry exists */
  1971. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1972. if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
  1973. free_index = i;
  1974. if ((lunm_list[i].lp_wwn == *pwwn) &&
  1975. (lunm_list[i].rp_wwn == rpwwn) &&
  1976. (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
  1977. scsilun_to_int((struct scsi_lun *)&lun)))
  1978. return BFA_STATUS_ENTRY_EXISTS;
  1979. }
  1980. if (free_index > MAX_LUN_MASK_CFG)
  1981. return BFA_STATUS_MAX_ENTRY_REACHED;
  1982. if (rp) {
  1983. lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
  1984. rp->rport_info.local_pid);
  1985. lunm_list[free_index].rp_tag = rp->rport_tag;
  1986. } else {
  1987. lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
  1988. lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
  1989. }
  1990. lunm_list[free_index].lp_wwn = *pwwn;
  1991. lunm_list[free_index].rp_wwn = rpwwn;
  1992. lunm_list[free_index].lun = lun;
  1993. lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
  1994. /* set for all luns in this rp */
  1995. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1996. if ((lunm_list[i].lp_wwn == *pwwn) &&
  1997. (lunm_list[i].rp_wwn == rpwwn))
  1998. lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
  1999. }
  2000. return bfa_dconf_update(bfa);
  2001. }
  2002. bfa_status_t
  2003. bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
  2004. wwn_t rpwwn, struct scsi_lun lun)
  2005. {
  2006. struct bfa_lun_mask_s *lunm_list;
  2007. struct bfa_rport_s *rp = NULL;
  2008. struct bfa_fcs_lport_s *port = NULL;
  2009. struct bfa_fcs_rport_s *rp_fcs;
  2010. int i;
  2011. /* in min cfg lunm_list could be NULL but no commands should run. */
  2012. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  2013. return BFA_STATUS_FAILED;
  2014. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  2015. bfa_trc(bfa, *pwwn);
  2016. bfa_trc(bfa, rpwwn);
  2017. bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
  2018. if (*pwwn == 0) {
  2019. port = bfa_fcs_lookup_port(
  2020. &((struct bfad_s *)bfa->bfad)->bfa_fcs,
  2021. vf_id, *pwwn);
  2022. if (port) {
  2023. *pwwn = port->port_cfg.pwwn;
  2024. rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
  2025. if (rp_fcs)
  2026. rp = rp_fcs->bfa_rport;
  2027. }
  2028. }
  2029. lunm_list = bfa_get_lun_mask_list(bfa);
  2030. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2031. if ((lunm_list[i].lp_wwn == *pwwn) &&
  2032. (lunm_list[i].rp_wwn == rpwwn) &&
  2033. (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
  2034. scsilun_to_int((struct scsi_lun *)&lun))) {
  2035. lunm_list[i].lp_wwn = 0;
  2036. lunm_list[i].rp_wwn = 0;
  2037. int_to_scsilun(0, &lunm_list[i].lun);
  2038. lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
  2039. if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
  2040. lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
  2041. lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
  2042. }
  2043. return bfa_dconf_update(bfa);
  2044. }
  2045. }
  2046. /* set for all luns in this rp */
  2047. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2048. if ((lunm_list[i].lp_wwn == *pwwn) &&
  2049. (lunm_list[i].rp_wwn == rpwwn))
  2050. lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
  2051. }
  2052. return BFA_STATUS_ENTRY_NOT_EXISTS;
  2053. }
  2054. static void
  2055. __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
  2056. {
  2057. struct bfa_ioim_s *ioim = cbarg;
  2058. if (!complete) {
  2059. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2060. return;
  2061. }
  2062. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
  2063. 0, 0, NULL, 0);
  2064. }
  2065. static void
  2066. __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
  2067. {
  2068. struct bfa_ioim_s *ioim = cbarg;
  2069. bfa_stats(ioim->itnim, path_tov_expired);
  2070. if (!complete) {
  2071. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2072. return;
  2073. }
  2074. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
  2075. 0, 0, NULL, 0);
  2076. }
  2077. static void
  2078. __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
  2079. {
  2080. struct bfa_ioim_s *ioim = cbarg;
  2081. if (!complete) {
  2082. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2083. return;
  2084. }
  2085. bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
  2086. }
  2087. static void
  2088. bfa_ioim_sgpg_alloced(void *cbarg)
  2089. {
  2090. struct bfa_ioim_s *ioim = cbarg;
  2091. ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
  2092. list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
  2093. ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
  2094. bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
  2095. }
  2096. /*
  2097. * Send I/O request to firmware.
  2098. */
  2099. static bfa_boolean_t
  2100. bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
  2101. {
  2102. struct bfa_itnim_s *itnim = ioim->itnim;
  2103. struct bfi_ioim_req_s *m;
  2104. static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
  2105. struct bfi_sge_s *sge, *sgpge;
  2106. u32 pgdlen = 0;
  2107. u32 fcp_dl;
  2108. u64 addr;
  2109. struct scatterlist *sg;
  2110. struct bfa_sgpg_s *sgpg;
  2111. struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
  2112. u32 i, sge_id, pgcumsz;
  2113. enum dma_data_direction dmadir;
  2114. /*
  2115. * check for room in queue to send request now
  2116. */
  2117. m = bfa_reqq_next(ioim->bfa, ioim->reqq);
  2118. if (!m) {
  2119. bfa_stats(ioim->itnim, qwait);
  2120. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  2121. &ioim->iosp->reqq_wait);
  2122. return BFA_FALSE;
  2123. }
  2124. /*
  2125. * build i/o request message next
  2126. */
  2127. m->io_tag = cpu_to_be16(ioim->iotag);
  2128. m->rport_hdl = ioim->itnim->rport->fw_handle;
  2129. m->io_timeout = 0;
  2130. sge = &m->sges[0];
  2131. sgpg = ioim->sgpg;
  2132. sge_id = 0;
  2133. sgpge = NULL;
  2134. pgcumsz = 0;
  2135. scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
  2136. if (i == 0) {
  2137. /* build inline IO SG element */
  2138. addr = bfa_sgaddr_le(sg_dma_address(sg));
  2139. sge->sga = *(union bfi_addr_u *) &addr;
  2140. pgdlen = sg_dma_len(sg);
  2141. sge->sg_len = pgdlen;
  2142. sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
  2143. BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
  2144. bfa_sge_to_be(sge);
  2145. sge++;
  2146. } else {
  2147. if (sge_id == 0)
  2148. sgpge = sgpg->sgpg->sges;
  2149. addr = bfa_sgaddr_le(sg_dma_address(sg));
  2150. sgpge->sga = *(union bfi_addr_u *) &addr;
  2151. sgpge->sg_len = sg_dma_len(sg);
  2152. pgcumsz += sgpge->sg_len;
  2153. /* set flags */
  2154. if (i < (ioim->nsges - 1) &&
  2155. sge_id < (BFI_SGPG_DATA_SGES - 1))
  2156. sgpge->flags = BFI_SGE_DATA;
  2157. else if (i < (ioim->nsges - 1))
  2158. sgpge->flags = BFI_SGE_DATA_CPL;
  2159. else
  2160. sgpge->flags = BFI_SGE_DATA_LAST;
  2161. bfa_sge_to_le(sgpge);
  2162. sgpge++;
  2163. if (i == (ioim->nsges - 1)) {
  2164. sgpge->flags = BFI_SGE_PGDLEN;
  2165. sgpge->sga.a32.addr_lo = 0;
  2166. sgpge->sga.a32.addr_hi = 0;
  2167. sgpge->sg_len = pgcumsz;
  2168. bfa_sge_to_le(sgpge);
  2169. } else if (++sge_id == BFI_SGPG_DATA_SGES) {
  2170. sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
  2171. sgpge->flags = BFI_SGE_LINK;
  2172. sgpge->sga = sgpg->sgpg_pa;
  2173. sgpge->sg_len = pgcumsz;
  2174. bfa_sge_to_le(sgpge);
  2175. sge_id = 0;
  2176. pgcumsz = 0;
  2177. }
  2178. }
  2179. }
  2180. if (ioim->nsges > BFI_SGE_INLINE) {
  2181. sge->sga = ioim->sgpg->sgpg_pa;
  2182. } else {
  2183. sge->sga.a32.addr_lo = 0;
  2184. sge->sga.a32.addr_hi = 0;
  2185. }
  2186. sge->sg_len = pgdlen;
  2187. sge->flags = BFI_SGE_PGDLEN;
  2188. bfa_sge_to_be(sge);
  2189. /*
  2190. * set up I/O command parameters
  2191. */
  2192. m->cmnd = cmnd_z0;
  2193. int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
  2194. dmadir = cmnd->sc_data_direction;
  2195. if (dmadir == DMA_TO_DEVICE)
  2196. m->cmnd.iodir = FCP_IODIR_WRITE;
  2197. else if (dmadir == DMA_FROM_DEVICE)
  2198. m->cmnd.iodir = FCP_IODIR_READ;
  2199. else
  2200. m->cmnd.iodir = FCP_IODIR_NONE;
  2201. m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
  2202. fcp_dl = scsi_bufflen(cmnd);
  2203. m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
  2204. /*
  2205. * set up I/O message header
  2206. */
  2207. switch (m->cmnd.iodir) {
  2208. case FCP_IODIR_READ:
  2209. bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
  2210. bfa_stats(itnim, input_reqs);
  2211. ioim->itnim->stats.rd_throughput += fcp_dl;
  2212. break;
  2213. case FCP_IODIR_WRITE:
  2214. bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
  2215. bfa_stats(itnim, output_reqs);
  2216. ioim->itnim->stats.wr_throughput += fcp_dl;
  2217. break;
  2218. case FCP_IODIR_RW:
  2219. bfa_stats(itnim, input_reqs);
  2220. bfa_stats(itnim, output_reqs);
  2221. default:
  2222. bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
  2223. }
  2224. if (itnim->seq_rec ||
  2225. (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
  2226. bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
  2227. /*
  2228. * queue I/O message to firmware
  2229. */
  2230. bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
  2231. return BFA_TRUE;
  2232. }
  2233. /*
  2234. * Setup any additional SG pages needed.Inline SG element is setup
  2235. * at queuing time.
  2236. */
  2237. static bfa_boolean_t
  2238. bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
  2239. {
  2240. u16 nsgpgs;
  2241. WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
  2242. /*
  2243. * allocate SG pages needed
  2244. */
  2245. nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
  2246. if (!nsgpgs)
  2247. return BFA_TRUE;
  2248. if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
  2249. != BFA_STATUS_OK) {
  2250. bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
  2251. return BFA_FALSE;
  2252. }
  2253. ioim->nsgpgs = nsgpgs;
  2254. ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
  2255. return BFA_TRUE;
  2256. }
  2257. /*
  2258. * Send I/O abort request to firmware.
  2259. */
  2260. static bfa_boolean_t
  2261. bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
  2262. {
  2263. struct bfi_ioim_abort_req_s *m;
  2264. enum bfi_ioim_h2i msgop;
  2265. /*
  2266. * check for room in queue to send request now
  2267. */
  2268. m = bfa_reqq_next(ioim->bfa, ioim->reqq);
  2269. if (!m)
  2270. return BFA_FALSE;
  2271. /*
  2272. * build i/o request message next
  2273. */
  2274. if (ioim->iosp->abort_explicit)
  2275. msgop = BFI_IOIM_H2I_IOABORT_REQ;
  2276. else
  2277. msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
  2278. bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
  2279. m->io_tag = cpu_to_be16(ioim->iotag);
  2280. m->abort_tag = ++ioim->abort_tag;
  2281. /*
  2282. * queue I/O message to firmware
  2283. */
  2284. bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
  2285. return BFA_TRUE;
  2286. }
  2287. /*
  2288. * Call to resume any I/O requests waiting for room in request queue.
  2289. */
  2290. static void
  2291. bfa_ioim_qresume(void *cbarg)
  2292. {
  2293. struct bfa_ioim_s *ioim = cbarg;
  2294. bfa_stats(ioim->itnim, qresumes);
  2295. bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
  2296. }
  2297. static void
  2298. bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
  2299. {
  2300. /*
  2301. * Move IO from itnim queue to fcpim global queue since itnim will be
  2302. * freed.
  2303. */
  2304. list_del(&ioim->qe);
  2305. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  2306. if (!ioim->iosp->tskim) {
  2307. if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
  2308. bfa_cb_dequeue(&ioim->hcb_qe);
  2309. list_del(&ioim->qe);
  2310. list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
  2311. }
  2312. bfa_itnim_iodone(ioim->itnim);
  2313. } else
  2314. bfa_wc_down(&ioim->iosp->tskim->wc);
  2315. }
  2316. static bfa_boolean_t
  2317. bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
  2318. {
  2319. if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
  2320. (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
  2321. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
  2322. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
  2323. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
  2324. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
  2325. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
  2326. return BFA_FALSE;
  2327. return BFA_TRUE;
  2328. }
  2329. void
  2330. bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
  2331. {
  2332. /*
  2333. * If path tov timer expired, failback with PATHTOV status - these
  2334. * IO requests are not normally retried by IO stack.
  2335. *
  2336. * Otherwise device cameback online and fail it with normal failed
  2337. * status so that IO stack retries these failed IO requests.
  2338. */
  2339. if (iotov)
  2340. ioim->io_cbfn = __bfa_cb_ioim_pathtov;
  2341. else {
  2342. ioim->io_cbfn = __bfa_cb_ioim_failed;
  2343. bfa_stats(ioim->itnim, iocom_nexus_abort);
  2344. }
  2345. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  2346. /*
  2347. * Move IO to fcpim global queue since itnim will be
  2348. * freed.
  2349. */
  2350. list_del(&ioim->qe);
  2351. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  2352. }
  2353. /*
  2354. * Memory allocation and initialization.
  2355. */
  2356. void
  2357. bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
  2358. {
  2359. struct bfa_ioim_s *ioim;
  2360. struct bfa_fcp_mod_s *fcp = fcpim->fcp;
  2361. struct bfa_ioim_sp_s *iosp;
  2362. u16 i;
  2363. /*
  2364. * claim memory first
  2365. */
  2366. ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
  2367. fcpim->ioim_arr = ioim;
  2368. bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
  2369. iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
  2370. fcpim->ioim_sp_arr = iosp;
  2371. bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
  2372. /*
  2373. * Initialize ioim free queues
  2374. */
  2375. INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
  2376. INIT_LIST_HEAD(&fcpim->ioim_comp_q);
  2377. for (i = 0; i < fcpim->fcp->num_ioim_reqs;
  2378. i++, ioim++, iosp++) {
  2379. /*
  2380. * initialize IOIM
  2381. */
  2382. memset(ioim, 0, sizeof(struct bfa_ioim_s));
  2383. ioim->iotag = i;
  2384. ioim->bfa = fcpim->bfa;
  2385. ioim->fcpim = fcpim;
  2386. ioim->iosp = iosp;
  2387. INIT_LIST_HEAD(&ioim->sgpg_q);
  2388. bfa_reqq_winit(&ioim->iosp->reqq_wait,
  2389. bfa_ioim_qresume, ioim);
  2390. bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
  2391. bfa_ioim_sgpg_alloced, ioim);
  2392. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  2393. }
  2394. }
  2395. void
  2396. bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  2397. {
  2398. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  2399. struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
  2400. struct bfa_ioim_s *ioim;
  2401. u16 iotag;
  2402. enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
  2403. iotag = be16_to_cpu(rsp->io_tag);
  2404. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
  2405. WARN_ON(ioim->iotag != iotag);
  2406. bfa_trc(ioim->bfa, ioim->iotag);
  2407. bfa_trc(ioim->bfa, rsp->io_status);
  2408. bfa_trc(ioim->bfa, rsp->reuse_io_tag);
  2409. if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
  2410. ioim->iosp->comp_rspmsg = *m;
  2411. switch (rsp->io_status) {
  2412. case BFI_IOIM_STS_OK:
  2413. bfa_stats(ioim->itnim, iocomp_ok);
  2414. if (rsp->reuse_io_tag == 0)
  2415. evt = BFA_IOIM_SM_DONE;
  2416. else
  2417. evt = BFA_IOIM_SM_COMP;
  2418. break;
  2419. case BFI_IOIM_STS_TIMEDOUT:
  2420. bfa_stats(ioim->itnim, iocomp_timedout);
  2421. case BFI_IOIM_STS_ABORTED:
  2422. rsp->io_status = BFI_IOIM_STS_ABORTED;
  2423. bfa_stats(ioim->itnim, iocomp_aborted);
  2424. if (rsp->reuse_io_tag == 0)
  2425. evt = BFA_IOIM_SM_DONE;
  2426. else
  2427. evt = BFA_IOIM_SM_COMP;
  2428. break;
  2429. case BFI_IOIM_STS_PROTO_ERR:
  2430. bfa_stats(ioim->itnim, iocom_proto_err);
  2431. WARN_ON(!rsp->reuse_io_tag);
  2432. evt = BFA_IOIM_SM_COMP;
  2433. break;
  2434. case BFI_IOIM_STS_SQER_NEEDED:
  2435. bfa_stats(ioim->itnim, iocom_sqer_needed);
  2436. WARN_ON(rsp->reuse_io_tag != 0);
  2437. evt = BFA_IOIM_SM_SQRETRY;
  2438. break;
  2439. case BFI_IOIM_STS_RES_FREE:
  2440. bfa_stats(ioim->itnim, iocom_res_free);
  2441. evt = BFA_IOIM_SM_FREE;
  2442. break;
  2443. case BFI_IOIM_STS_HOST_ABORTED:
  2444. bfa_stats(ioim->itnim, iocom_hostabrts);
  2445. if (rsp->abort_tag != ioim->abort_tag) {
  2446. bfa_trc(ioim->bfa, rsp->abort_tag);
  2447. bfa_trc(ioim->bfa, ioim->abort_tag);
  2448. return;
  2449. }
  2450. if (rsp->reuse_io_tag)
  2451. evt = BFA_IOIM_SM_ABORT_COMP;
  2452. else
  2453. evt = BFA_IOIM_SM_ABORT_DONE;
  2454. break;
  2455. case BFI_IOIM_STS_UTAG:
  2456. bfa_stats(ioim->itnim, iocom_utags);
  2457. evt = BFA_IOIM_SM_COMP_UTAG;
  2458. break;
  2459. default:
  2460. WARN_ON(1);
  2461. }
  2462. bfa_sm_send_event(ioim, evt);
  2463. }
  2464. void
  2465. bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  2466. {
  2467. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  2468. struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
  2469. struct bfa_ioim_s *ioim;
  2470. u16 iotag;
  2471. iotag = be16_to_cpu(rsp->io_tag);
  2472. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
  2473. WARN_ON(ioim->iotag != iotag);
  2474. bfa_ioim_cb_profile_comp(fcpim, ioim);
  2475. bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
  2476. }
  2477. /*
  2478. * Called by itnim to clean up IO while going offline.
  2479. */
  2480. void
  2481. bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
  2482. {
  2483. bfa_trc(ioim->bfa, ioim->iotag);
  2484. bfa_stats(ioim->itnim, io_cleanups);
  2485. ioim->iosp->tskim = NULL;
  2486. bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
  2487. }
  2488. void
  2489. bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
  2490. {
  2491. bfa_trc(ioim->bfa, ioim->iotag);
  2492. bfa_stats(ioim->itnim, io_tmaborts);
  2493. ioim->iosp->tskim = tskim;
  2494. bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
  2495. }
  2496. /*
  2497. * IOC failure handling.
  2498. */
  2499. void
  2500. bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
  2501. {
  2502. bfa_trc(ioim->bfa, ioim->iotag);
  2503. bfa_stats(ioim->itnim, io_iocdowns);
  2504. bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
  2505. }
  2506. /*
  2507. * IO offline TOV popped. Fail the pending IO.
  2508. */
  2509. void
  2510. bfa_ioim_tov(struct bfa_ioim_s *ioim)
  2511. {
  2512. bfa_trc(ioim->bfa, ioim->iotag);
  2513. bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
  2514. }
  2515. /*
  2516. * Allocate IOIM resource for initiator mode I/O request.
  2517. */
  2518. struct bfa_ioim_s *
  2519. bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
  2520. struct bfa_itnim_s *itnim, u16 nsges)
  2521. {
  2522. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  2523. struct bfa_ioim_s *ioim;
  2524. struct bfa_iotag_s *iotag = NULL;
  2525. /*
  2526. * alocate IOIM resource
  2527. */
  2528. bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
  2529. if (!iotag) {
  2530. bfa_stats(itnim, no_iotags);
  2531. return NULL;
  2532. }
  2533. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
  2534. ioim->dio = dio;
  2535. ioim->itnim = itnim;
  2536. ioim->nsges = nsges;
  2537. ioim->nsgpgs = 0;
  2538. bfa_stats(itnim, total_ios);
  2539. fcpim->ios_active++;
  2540. list_add_tail(&ioim->qe, &itnim->io_q);
  2541. return ioim;
  2542. }
  2543. void
  2544. bfa_ioim_free(struct bfa_ioim_s *ioim)
  2545. {
  2546. struct bfa_fcpim_s *fcpim = ioim->fcpim;
  2547. struct bfa_iotag_s *iotag;
  2548. if (ioim->nsgpgs > 0)
  2549. bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
  2550. bfa_stats(ioim->itnim, io_comps);
  2551. fcpim->ios_active--;
  2552. ioim->iotag &= BFA_IOIM_IOTAG_MASK;
  2553. WARN_ON(!(ioim->iotag <
  2554. (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
  2555. iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
  2556. if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
  2557. list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
  2558. else
  2559. list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
  2560. list_del(&ioim->qe);
  2561. }
  2562. void
  2563. bfa_ioim_start(struct bfa_ioim_s *ioim)
  2564. {
  2565. bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
  2566. /*
  2567. * Obtain the queue over which this request has to be issued
  2568. */
  2569. ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
  2570. BFA_FALSE : bfa_itnim_get_reqq(ioim);
  2571. bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
  2572. }
  2573. /*
  2574. * Driver I/O abort request.
  2575. */
  2576. bfa_status_t
  2577. bfa_ioim_abort(struct bfa_ioim_s *ioim)
  2578. {
  2579. bfa_trc(ioim->bfa, ioim->iotag);
  2580. if (!bfa_ioim_is_abortable(ioim))
  2581. return BFA_STATUS_FAILED;
  2582. bfa_stats(ioim->itnim, io_aborts);
  2583. bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
  2584. return BFA_STATUS_OK;
  2585. }
  2586. /*
  2587. * BFA TSKIM state machine functions
  2588. */
  2589. /*
  2590. * Task management command beginning state.
  2591. */
  2592. static void
  2593. bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2594. {
  2595. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2596. switch (event) {
  2597. case BFA_TSKIM_SM_START:
  2598. bfa_sm_set_state(tskim, bfa_tskim_sm_active);
  2599. bfa_tskim_gather_ios(tskim);
  2600. /*
  2601. * If device is offline, do not send TM on wire. Just cleanup
  2602. * any pending IO requests and complete TM request.
  2603. */
  2604. if (!bfa_itnim_is_online(tskim->itnim)) {
  2605. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2606. tskim->tsk_status = BFI_TSKIM_STS_OK;
  2607. bfa_tskim_cleanup_ios(tskim);
  2608. return;
  2609. }
  2610. if (!bfa_tskim_send(tskim)) {
  2611. bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
  2612. bfa_stats(tskim->itnim, tm_qwait);
  2613. bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
  2614. &tskim->reqq_wait);
  2615. }
  2616. break;
  2617. default:
  2618. bfa_sm_fault(tskim->bfa, event);
  2619. }
  2620. }
  2621. /*
  2622. * TM command is active, awaiting completion from firmware to
  2623. * cleanup IO requests in TM scope.
  2624. */
  2625. static void
  2626. bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2627. {
  2628. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2629. switch (event) {
  2630. case BFA_TSKIM_SM_DONE:
  2631. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2632. bfa_tskim_cleanup_ios(tskim);
  2633. break;
  2634. case BFA_TSKIM_SM_CLEANUP:
  2635. bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
  2636. if (!bfa_tskim_send_abort(tskim)) {
  2637. bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
  2638. bfa_stats(tskim->itnim, tm_qwait);
  2639. bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
  2640. &tskim->reqq_wait);
  2641. }
  2642. break;
  2643. case BFA_TSKIM_SM_HWFAIL:
  2644. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2645. bfa_tskim_iocdisable_ios(tskim);
  2646. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2647. break;
  2648. default:
  2649. bfa_sm_fault(tskim->bfa, event);
  2650. }
  2651. }
  2652. /*
  2653. * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
  2654. * completion event from firmware.
  2655. */
  2656. static void
  2657. bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2658. {
  2659. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2660. switch (event) {
  2661. case BFA_TSKIM_SM_DONE:
  2662. /*
  2663. * Ignore and wait for ABORT completion from firmware.
  2664. */
  2665. break;
  2666. case BFA_TSKIM_SM_UTAG:
  2667. case BFA_TSKIM_SM_CLEANUP_DONE:
  2668. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2669. bfa_tskim_cleanup_ios(tskim);
  2670. break;
  2671. case BFA_TSKIM_SM_HWFAIL:
  2672. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2673. bfa_tskim_iocdisable_ios(tskim);
  2674. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2675. break;
  2676. default:
  2677. bfa_sm_fault(tskim->bfa, event);
  2678. }
  2679. }
  2680. static void
  2681. bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2682. {
  2683. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2684. switch (event) {
  2685. case BFA_TSKIM_SM_IOS_DONE:
  2686. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2687. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
  2688. break;
  2689. case BFA_TSKIM_SM_CLEANUP:
  2690. /*
  2691. * Ignore, TM command completed on wire.
  2692. * Notify TM conmpletion on IO cleanup completion.
  2693. */
  2694. break;
  2695. case BFA_TSKIM_SM_HWFAIL:
  2696. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2697. bfa_tskim_iocdisable_ios(tskim);
  2698. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2699. break;
  2700. default:
  2701. bfa_sm_fault(tskim->bfa, event);
  2702. }
  2703. }
  2704. /*
  2705. * Task management command is waiting for room in request CQ
  2706. */
  2707. static void
  2708. bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2709. {
  2710. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2711. switch (event) {
  2712. case BFA_TSKIM_SM_QRESUME:
  2713. bfa_sm_set_state(tskim, bfa_tskim_sm_active);
  2714. bfa_tskim_send(tskim);
  2715. break;
  2716. case BFA_TSKIM_SM_CLEANUP:
  2717. /*
  2718. * No need to send TM on wire since ITN is offline.
  2719. */
  2720. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2721. bfa_reqq_wcancel(&tskim->reqq_wait);
  2722. bfa_tskim_cleanup_ios(tskim);
  2723. break;
  2724. case BFA_TSKIM_SM_HWFAIL:
  2725. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2726. bfa_reqq_wcancel(&tskim->reqq_wait);
  2727. bfa_tskim_iocdisable_ios(tskim);
  2728. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2729. break;
  2730. default:
  2731. bfa_sm_fault(tskim->bfa, event);
  2732. }
  2733. }
  2734. /*
  2735. * Task management command is active, awaiting for room in request CQ
  2736. * to send clean up request.
  2737. */
  2738. static void
  2739. bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
  2740. enum bfa_tskim_event event)
  2741. {
  2742. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2743. switch (event) {
  2744. case BFA_TSKIM_SM_DONE:
  2745. bfa_reqq_wcancel(&tskim->reqq_wait);
  2746. /*
  2747. * Fall through !!!
  2748. */
  2749. case BFA_TSKIM_SM_QRESUME:
  2750. bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
  2751. bfa_tskim_send_abort(tskim);
  2752. break;
  2753. case BFA_TSKIM_SM_HWFAIL:
  2754. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2755. bfa_reqq_wcancel(&tskim->reqq_wait);
  2756. bfa_tskim_iocdisable_ios(tskim);
  2757. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2758. break;
  2759. default:
  2760. bfa_sm_fault(tskim->bfa, event);
  2761. }
  2762. }
  2763. /*
  2764. * BFA callback is pending
  2765. */
  2766. static void
  2767. bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2768. {
  2769. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2770. switch (event) {
  2771. case BFA_TSKIM_SM_HCB:
  2772. bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
  2773. bfa_tskim_free(tskim);
  2774. break;
  2775. case BFA_TSKIM_SM_CLEANUP:
  2776. bfa_tskim_notify_comp(tskim);
  2777. break;
  2778. case BFA_TSKIM_SM_HWFAIL:
  2779. break;
  2780. default:
  2781. bfa_sm_fault(tskim->bfa, event);
  2782. }
  2783. }
  2784. static void
  2785. __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
  2786. {
  2787. struct bfa_tskim_s *tskim = cbarg;
  2788. if (!complete) {
  2789. bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
  2790. return;
  2791. }
  2792. bfa_stats(tskim->itnim, tm_success);
  2793. bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
  2794. }
  2795. static void
  2796. __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
  2797. {
  2798. struct bfa_tskim_s *tskim = cbarg;
  2799. if (!complete) {
  2800. bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
  2801. return;
  2802. }
  2803. bfa_stats(tskim->itnim, tm_failures);
  2804. bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
  2805. BFI_TSKIM_STS_FAILED);
  2806. }
  2807. static bfa_boolean_t
  2808. bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
  2809. {
  2810. switch (tskim->tm_cmnd) {
  2811. case FCP_TM_TARGET_RESET:
  2812. return BFA_TRUE;
  2813. case FCP_TM_ABORT_TASK_SET:
  2814. case FCP_TM_CLEAR_TASK_SET:
  2815. case FCP_TM_LUN_RESET:
  2816. case FCP_TM_CLEAR_ACA:
  2817. return !memcmp(&tskim->lun, &lun, sizeof(lun));
  2818. default:
  2819. WARN_ON(1);
  2820. }
  2821. return BFA_FALSE;
  2822. }
  2823. /*
  2824. * Gather affected IO requests and task management commands.
  2825. */
  2826. static void
  2827. bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
  2828. {
  2829. struct bfa_itnim_s *itnim = tskim->itnim;
  2830. struct bfa_ioim_s *ioim;
  2831. struct list_head *qe, *qen;
  2832. struct scsi_cmnd *cmnd;
  2833. struct scsi_lun scsilun;
  2834. INIT_LIST_HEAD(&tskim->io_q);
  2835. /*
  2836. * Gather any active IO requests first.
  2837. */
  2838. list_for_each_safe(qe, qen, &itnim->io_q) {
  2839. ioim = (struct bfa_ioim_s *) qe;
  2840. cmnd = (struct scsi_cmnd *) ioim->dio;
  2841. int_to_scsilun(cmnd->device->lun, &scsilun);
  2842. if (bfa_tskim_match_scope(tskim, scsilun)) {
  2843. list_del(&ioim->qe);
  2844. list_add_tail(&ioim->qe, &tskim->io_q);
  2845. }
  2846. }
  2847. /*
  2848. * Failback any pending IO requests immediately.
  2849. */
  2850. list_for_each_safe(qe, qen, &itnim->pending_q) {
  2851. ioim = (struct bfa_ioim_s *) qe;
  2852. cmnd = (struct scsi_cmnd *) ioim->dio;
  2853. int_to_scsilun(cmnd->device->lun, &scsilun);
  2854. if (bfa_tskim_match_scope(tskim, scsilun)) {
  2855. list_del(&ioim->qe);
  2856. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  2857. bfa_ioim_tov(ioim);
  2858. }
  2859. }
  2860. }
  2861. /*
  2862. * IO cleanup completion
  2863. */
  2864. static void
  2865. bfa_tskim_cleanp_comp(void *tskim_cbarg)
  2866. {
  2867. struct bfa_tskim_s *tskim = tskim_cbarg;
  2868. bfa_stats(tskim->itnim, tm_io_comps);
  2869. bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
  2870. }
  2871. /*
  2872. * Gather affected IO requests and task management commands.
  2873. */
  2874. static void
  2875. bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
  2876. {
  2877. struct bfa_ioim_s *ioim;
  2878. struct list_head *qe, *qen;
  2879. bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
  2880. list_for_each_safe(qe, qen, &tskim->io_q) {
  2881. ioim = (struct bfa_ioim_s *) qe;
  2882. bfa_wc_up(&tskim->wc);
  2883. bfa_ioim_cleanup_tm(ioim, tskim);
  2884. }
  2885. bfa_wc_wait(&tskim->wc);
  2886. }
  2887. /*
  2888. * Send task management request to firmware.
  2889. */
  2890. static bfa_boolean_t
  2891. bfa_tskim_send(struct bfa_tskim_s *tskim)
  2892. {
  2893. struct bfa_itnim_s *itnim = tskim->itnim;
  2894. struct bfi_tskim_req_s *m;
  2895. /*
  2896. * check for room in queue to send request now
  2897. */
  2898. m = bfa_reqq_next(tskim->bfa, itnim->reqq);
  2899. if (!m)
  2900. return BFA_FALSE;
  2901. /*
  2902. * build i/o request message next
  2903. */
  2904. bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
  2905. bfa_fn_lpu(tskim->bfa));
  2906. m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
  2907. m->itn_fhdl = tskim->itnim->rport->fw_handle;
  2908. m->t_secs = tskim->tsecs;
  2909. m->lun = tskim->lun;
  2910. m->tm_flags = tskim->tm_cmnd;
  2911. /*
  2912. * queue I/O message to firmware
  2913. */
  2914. bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
  2915. return BFA_TRUE;
  2916. }
  2917. /*
  2918. * Send abort request to cleanup an active TM to firmware.
  2919. */
  2920. static bfa_boolean_t
  2921. bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
  2922. {
  2923. struct bfa_itnim_s *itnim = tskim->itnim;
  2924. struct bfi_tskim_abortreq_s *m;
  2925. /*
  2926. * check for room in queue to send request now
  2927. */
  2928. m = bfa_reqq_next(tskim->bfa, itnim->reqq);
  2929. if (!m)
  2930. return BFA_FALSE;
  2931. /*
  2932. * build i/o request message next
  2933. */
  2934. bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
  2935. bfa_fn_lpu(tskim->bfa));
  2936. m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
  2937. /*
  2938. * queue I/O message to firmware
  2939. */
  2940. bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
  2941. return BFA_TRUE;
  2942. }
  2943. /*
  2944. * Call to resume task management cmnd waiting for room in request queue.
  2945. */
  2946. static void
  2947. bfa_tskim_qresume(void *cbarg)
  2948. {
  2949. struct bfa_tskim_s *tskim = cbarg;
  2950. bfa_stats(tskim->itnim, tm_qresumes);
  2951. bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
  2952. }
  2953. /*
  2954. * Cleanup IOs associated with a task mangement command on IOC failures.
  2955. */
  2956. static void
  2957. bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
  2958. {
  2959. struct bfa_ioim_s *ioim;
  2960. struct list_head *qe, *qen;
  2961. list_for_each_safe(qe, qen, &tskim->io_q) {
  2962. ioim = (struct bfa_ioim_s *) qe;
  2963. bfa_ioim_iocdisable(ioim);
  2964. }
  2965. }
  2966. /*
  2967. * Notification on completions from related ioim.
  2968. */
  2969. void
  2970. bfa_tskim_iodone(struct bfa_tskim_s *tskim)
  2971. {
  2972. bfa_wc_down(&tskim->wc);
  2973. }
  2974. /*
  2975. * Handle IOC h/w failure notification from itnim.
  2976. */
  2977. void
  2978. bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
  2979. {
  2980. tskim->notify = BFA_FALSE;
  2981. bfa_stats(tskim->itnim, tm_iocdowns);
  2982. bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
  2983. }
  2984. /*
  2985. * Cleanup TM command and associated IOs as part of ITNIM offline.
  2986. */
  2987. void
  2988. bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
  2989. {
  2990. tskim->notify = BFA_TRUE;
  2991. bfa_stats(tskim->itnim, tm_cleanups);
  2992. bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
  2993. }
  2994. /*
  2995. * Memory allocation and initialization.
  2996. */
  2997. void
  2998. bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
  2999. {
  3000. struct bfa_tskim_s *tskim;
  3001. struct bfa_fcp_mod_s *fcp = fcpim->fcp;
  3002. u16 i;
  3003. INIT_LIST_HEAD(&fcpim->tskim_free_q);
  3004. INIT_LIST_HEAD(&fcpim->tskim_unused_q);
  3005. tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
  3006. fcpim->tskim_arr = tskim;
  3007. for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
  3008. /*
  3009. * initialize TSKIM
  3010. */
  3011. memset(tskim, 0, sizeof(struct bfa_tskim_s));
  3012. tskim->tsk_tag = i;
  3013. tskim->bfa = fcpim->bfa;
  3014. tskim->fcpim = fcpim;
  3015. tskim->notify = BFA_FALSE;
  3016. bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
  3017. tskim);
  3018. bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
  3019. list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
  3020. }
  3021. bfa_mem_kva_curp(fcp) = (u8 *) tskim;
  3022. }
  3023. void
  3024. bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  3025. {
  3026. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3027. struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
  3028. struct bfa_tskim_s *tskim;
  3029. u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
  3030. tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
  3031. WARN_ON(tskim->tsk_tag != tsk_tag);
  3032. tskim->tsk_status = rsp->tsk_status;
  3033. /*
  3034. * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
  3035. * requests. All other statuses are for normal completions.
  3036. */
  3037. if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
  3038. bfa_stats(tskim->itnim, tm_cleanup_comps);
  3039. bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
  3040. } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
  3041. bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG);
  3042. } else {
  3043. bfa_stats(tskim->itnim, tm_fw_rsps);
  3044. bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
  3045. }
  3046. }
  3047. struct bfa_tskim_s *
  3048. bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
  3049. {
  3050. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3051. struct bfa_tskim_s *tskim;
  3052. bfa_q_deq(&fcpim->tskim_free_q, &tskim);
  3053. if (tskim)
  3054. tskim->dtsk = dtsk;
  3055. return tskim;
  3056. }
  3057. void
  3058. bfa_tskim_free(struct bfa_tskim_s *tskim)
  3059. {
  3060. WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
  3061. list_del(&tskim->qe);
  3062. list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
  3063. }
  3064. /*
  3065. * Start a task management command.
  3066. *
  3067. * @param[in] tskim BFA task management command instance
  3068. * @param[in] itnim i-t nexus for the task management command
  3069. * @param[in] lun lun, if applicable
  3070. * @param[in] tm_cmnd Task management command code.
  3071. * @param[in] t_secs Timeout in seconds
  3072. *
  3073. * @return None.
  3074. */
  3075. void
  3076. bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
  3077. struct scsi_lun lun,
  3078. enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
  3079. {
  3080. tskim->itnim = itnim;
  3081. tskim->lun = lun;
  3082. tskim->tm_cmnd = tm_cmnd;
  3083. tskim->tsecs = tsecs;
  3084. tskim->notify = BFA_FALSE;
  3085. bfa_stats(itnim, tm_cmnds);
  3086. list_add_tail(&tskim->qe, &itnim->tsk_q);
  3087. bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
  3088. }
  3089. void
  3090. bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
  3091. {
  3092. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3093. struct list_head *qe;
  3094. int i;
  3095. for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
  3096. bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
  3097. list_add_tail(qe, &fcpim->tskim_unused_q);
  3098. }
  3099. }
  3100. /* BFA FCP module - parent module for fcpim */
  3101. BFA_MODULE(fcp);
  3102. static void
  3103. bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
  3104. struct bfa_s *bfa)
  3105. {
  3106. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3107. struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
  3108. struct bfa_mem_dma_s *seg_ptr;
  3109. u16 nsegs, idx, per_seg_ios, num_io_req;
  3110. u32 km_len = 0;
  3111. /*
  3112. * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
  3113. * So if the values are non zero, adjust them appropriately.
  3114. */
  3115. if (cfg->fwcfg.num_ioim_reqs &&
  3116. cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
  3117. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
  3118. else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
  3119. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
  3120. if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
  3121. cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
  3122. num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
  3123. if (num_io_req > BFA_IO_MAX) {
  3124. if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
  3125. cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
  3126. cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
  3127. } else if (cfg->fwcfg.num_fwtio_reqs)
  3128. cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
  3129. else
  3130. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
  3131. }
  3132. bfa_fcpim_meminfo(cfg, &km_len);
  3133. num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
  3134. km_len += num_io_req * sizeof(struct bfa_iotag_s);
  3135. km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
  3136. /* dma memory */
  3137. nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
  3138. per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
  3139. bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
  3140. if (num_io_req >= per_seg_ios) {
  3141. num_io_req -= per_seg_ios;
  3142. bfa_mem_dma_setup(minfo, seg_ptr,
  3143. per_seg_ios * BFI_IOIM_SNSLEN);
  3144. } else
  3145. bfa_mem_dma_setup(minfo, seg_ptr,
  3146. num_io_req * BFI_IOIM_SNSLEN);
  3147. }
  3148. /* kva memory */
  3149. bfa_mem_kva_setup(minfo, fcp_kva, km_len);
  3150. }
  3151. static void
  3152. bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  3153. struct bfa_pcidev_s *pcidev)
  3154. {
  3155. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3156. struct bfa_mem_dma_s *seg_ptr;
  3157. u16 idx, nsegs, num_io_req;
  3158. fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
  3159. fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
  3160. fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
  3161. fcp->num_itns = cfg->fwcfg.num_rports;
  3162. fcp->bfa = bfa;
  3163. /*
  3164. * Setup the pool of snsbase addr's, that is passed to fw as
  3165. * part of bfi_iocfc_cfg_s.
  3166. */
  3167. num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
  3168. nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
  3169. bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
  3170. if (!bfa_mem_dma_virt(seg_ptr))
  3171. break;
  3172. fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
  3173. fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
  3174. bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
  3175. }
  3176. fcp->throttle_update_required = 1;
  3177. bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
  3178. bfa_iotag_attach(fcp);
  3179. fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
  3180. bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
  3181. (fcp->num_itns * sizeof(struct bfa_itn_s));
  3182. memset(fcp->itn_arr, 0,
  3183. (fcp->num_itns * sizeof(struct bfa_itn_s)));
  3184. }
  3185. static void
  3186. bfa_fcp_detach(struct bfa_s *bfa)
  3187. {
  3188. }
  3189. static void
  3190. bfa_fcp_start(struct bfa_s *bfa)
  3191. {
  3192. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3193. /*
  3194. * bfa_init() with flash read is complete. now invalidate the stale
  3195. * content of lun mask like unit attention, rp tag and lp tag.
  3196. */
  3197. bfa_ioim_lm_init(fcp->bfa);
  3198. }
  3199. static void
  3200. bfa_fcp_stop(struct bfa_s *bfa)
  3201. {
  3202. }
  3203. static void
  3204. bfa_fcp_iocdisable(struct bfa_s *bfa)
  3205. {
  3206. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3207. bfa_fcpim_iocdisable(fcp);
  3208. }
  3209. void
  3210. bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
  3211. {
  3212. struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
  3213. struct list_head *qe;
  3214. int i;
  3215. /* Update io throttle value only once during driver load time */
  3216. if (!mod->throttle_update_required)
  3217. return;
  3218. for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
  3219. bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
  3220. list_add_tail(qe, &mod->iotag_unused_q);
  3221. }
  3222. if (mod->num_ioim_reqs != num_ioim_fw) {
  3223. bfa_trc(bfa, mod->num_ioim_reqs);
  3224. bfa_trc(bfa, num_ioim_fw);
  3225. }
  3226. mod->max_ioim_reqs = max_ioim_fw;
  3227. mod->num_ioim_reqs = num_ioim_fw;
  3228. mod->throttle_update_required = 0;
  3229. }
  3230. void
  3231. bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
  3232. void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
  3233. {
  3234. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3235. struct bfa_itn_s *itn;
  3236. itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
  3237. itn->isr = isr;
  3238. }
  3239. /*
  3240. * Itn interrupt processing.
  3241. */
  3242. void
  3243. bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  3244. {
  3245. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3246. union bfi_itn_i2h_msg_u msg;
  3247. struct bfa_itn_s *itn;
  3248. msg.msg = m;
  3249. itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
  3250. if (itn->isr)
  3251. itn->isr(bfa, m);
  3252. else
  3253. WARN_ON(1);
  3254. }
  3255. void
  3256. bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
  3257. {
  3258. struct bfa_iotag_s *iotag;
  3259. u16 num_io_req, i;
  3260. iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
  3261. fcp->iotag_arr = iotag;
  3262. INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
  3263. INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
  3264. INIT_LIST_HEAD(&fcp->iotag_unused_q);
  3265. num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
  3266. for (i = 0; i < num_io_req; i++, iotag++) {
  3267. memset(iotag, 0, sizeof(struct bfa_iotag_s));
  3268. iotag->tag = i;
  3269. if (i < fcp->num_ioim_reqs)
  3270. list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
  3271. else
  3272. list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
  3273. }
  3274. bfa_mem_kva_curp(fcp) = (u8 *) iotag;
  3275. }
  3276. /**
  3277. * To send config req, first try to use throttle value from flash
  3278. * If 0, then use driver parameter
  3279. * We need to use min(flash_val, drv_val) because
  3280. * memory allocation was done based on this cfg'd value
  3281. */
  3282. u16
  3283. bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
  3284. {
  3285. u16 tmp;
  3286. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3287. /*
  3288. * If throttle value from flash is already in effect after driver is
  3289. * loaded then until next load, always return current value instead
  3290. * of actual flash value
  3291. */
  3292. if (!fcp->throttle_update_required)
  3293. return (u16)fcp->num_ioim_reqs;
  3294. tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
  3295. if (!tmp || (tmp > drv_cfg_param))
  3296. tmp = drv_cfg_param;
  3297. return tmp;
  3298. }
  3299. bfa_status_t
  3300. bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value)
  3301. {
  3302. if (!bfa_dconf_get_min_cfg(bfa)) {
  3303. BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
  3304. BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
  3305. return BFA_STATUS_OK;
  3306. }
  3307. return BFA_STATUS_FAILED;
  3308. }
  3309. u16
  3310. bfa_fcpim_read_throttle(struct bfa_s *bfa)
  3311. {
  3312. struct bfa_throttle_cfg_s *throttle_cfg =
  3313. &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
  3314. return ((!bfa_dconf_get_min_cfg(bfa)) ?
  3315. ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
  3316. }
  3317. bfa_status_t
  3318. bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value)
  3319. {
  3320. /* in min cfg no commands should run. */
  3321. if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
  3322. (!bfa_dconf_read_data_valid(bfa)))
  3323. return BFA_STATUS_FAILED;
  3324. bfa_fcpim_write_throttle(bfa, value);
  3325. return bfa_dconf_update(bfa);
  3326. }
  3327. bfa_status_t
  3328. bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
  3329. {
  3330. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3331. struct bfa_defs_fcpim_throttle_s throttle;
  3332. if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
  3333. (!bfa_dconf_read_data_valid(bfa)))
  3334. return BFA_STATUS_FAILED;
  3335. memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
  3336. throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
  3337. throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
  3338. if (!throttle.cfg_value)
  3339. throttle.cfg_value = throttle.cur_value;
  3340. throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
  3341. memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
  3342. return BFA_STATUS_OK;
  3343. }