commsup.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609
  1. /*
  2. * Adaptec AAC series RAID controller driver
  3. * (c) Copyright 2001 Red Hat Inc.
  4. *
  5. * based on the old aacraid driver that is..
  6. * Adaptec aacraid device driver for Linux.
  7. *
  8. * Copyright (c) 2000-2010 Adaptec, Inc.
  9. * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10. * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; see the file COPYING. If not, write to
  24. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25. *
  26. * Module Name:
  27. * commsup.c
  28. *
  29. * Abstract: Contain all routines that are required for FSA host/adapter
  30. * communication.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/init.h>
  35. #include <linux/crash_dump.h>
  36. #include <linux/types.h>
  37. #include <linux/sched.h>
  38. #include <linux/pci.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/slab.h>
  41. #include <linux/completion.h>
  42. #include <linux/blkdev.h>
  43. #include <linux/delay.h>
  44. #include <linux/kthread.h>
  45. #include <linux/interrupt.h>
  46. #include <linux/semaphore.h>
  47. #include <linux/bcd.h>
  48. #include <scsi/scsi.h>
  49. #include <scsi/scsi_host.h>
  50. #include <scsi/scsi_device.h>
  51. #include <scsi/scsi_cmnd.h>
  52. #include "aacraid.h"
  53. /**
  54. * fib_map_alloc - allocate the fib objects
  55. * @dev: Adapter to allocate for
  56. *
  57. * Allocate and map the shared PCI space for the FIB blocks used to
  58. * talk to the Adaptec firmware.
  59. */
  60. static int fib_map_alloc(struct aac_dev *dev)
  61. {
  62. if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
  63. dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
  64. else
  65. dev->max_cmd_size = dev->max_fib_size;
  66. if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
  67. dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
  68. } else {
  69. dev->max_cmd_size = dev->max_fib_size;
  70. }
  71. dprintk((KERN_INFO
  72. "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
  73. &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
  74. AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
  75. dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev,
  76. (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
  77. * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
  78. &dev->hw_fib_pa, GFP_KERNEL);
  79. if (dev->hw_fib_va == NULL)
  80. return -ENOMEM;
  81. return 0;
  82. }
  83. /**
  84. * aac_fib_map_free - free the fib objects
  85. * @dev: Adapter to free
  86. *
  87. * Free the PCI mappings and the memory allocated for FIB blocks
  88. * on this adapter.
  89. */
  90. void aac_fib_map_free(struct aac_dev *dev)
  91. {
  92. size_t alloc_size;
  93. size_t fib_size;
  94. int num_fibs;
  95. if(!dev->hw_fib_va || !dev->max_cmd_size)
  96. return;
  97. num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
  98. fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
  99. alloc_size = fib_size * num_fibs + ALIGN32 - 1;
  100. dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va,
  101. dev->hw_fib_pa);
  102. dev->hw_fib_va = NULL;
  103. dev->hw_fib_pa = 0;
  104. }
  105. void aac_fib_vector_assign(struct aac_dev *dev)
  106. {
  107. u32 i = 0;
  108. u32 vector = 1;
  109. struct fib *fibptr = NULL;
  110. for (i = 0, fibptr = &dev->fibs[i];
  111. i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
  112. i++, fibptr++) {
  113. if ((dev->max_msix == 1) ||
  114. (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
  115. - dev->vector_cap))) {
  116. fibptr->vector_no = 0;
  117. } else {
  118. fibptr->vector_no = vector;
  119. vector++;
  120. if (vector == dev->max_msix)
  121. vector = 1;
  122. }
  123. }
  124. }
  125. /**
  126. * aac_fib_setup - setup the fibs
  127. * @dev: Adapter to set up
  128. *
  129. * Allocate the PCI space for the fibs, map it and then initialise the
  130. * fib area, the unmapped fib data and also the free list
  131. */
  132. int aac_fib_setup(struct aac_dev * dev)
  133. {
  134. struct fib *fibptr;
  135. struct hw_fib *hw_fib;
  136. dma_addr_t hw_fib_pa;
  137. int i;
  138. u32 max_cmds;
  139. while (((i = fib_map_alloc(dev)) == -ENOMEM)
  140. && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
  141. max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1;
  142. dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB;
  143. if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
  144. dev->init->r7.max_io_commands = cpu_to_le32(max_cmds);
  145. }
  146. if (i<0)
  147. return -ENOMEM;
  148. memset(dev->hw_fib_va, 0,
  149. (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
  150. (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
  151. /* 32 byte alignment for PMC */
  152. hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
  153. hw_fib = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
  154. (hw_fib_pa - dev->hw_fib_pa));
  155. /* add Xport header */
  156. hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
  157. sizeof(struct aac_fib_xporthdr));
  158. hw_fib_pa += sizeof(struct aac_fib_xporthdr);
  159. /*
  160. * Initialise the fibs
  161. */
  162. for (i = 0, fibptr = &dev->fibs[i];
  163. i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
  164. i++, fibptr++)
  165. {
  166. fibptr->flags = 0;
  167. fibptr->size = sizeof(struct fib);
  168. fibptr->dev = dev;
  169. fibptr->hw_fib_va = hw_fib;
  170. fibptr->data = (void *) fibptr->hw_fib_va->data;
  171. fibptr->next = fibptr+1; /* Forward chain the fibs */
  172. sema_init(&fibptr->event_wait, 0);
  173. spin_lock_init(&fibptr->event_lock);
  174. hw_fib->header.XferState = cpu_to_le32(0xffffffff);
  175. hw_fib->header.SenderSize =
  176. cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */
  177. fibptr->hw_fib_pa = hw_fib_pa;
  178. fibptr->hw_sgl_pa = hw_fib_pa +
  179. offsetof(struct aac_hba_cmd_req, sge[2]);
  180. /*
  181. * one element is for the ptr to the separate sg list,
  182. * second element for 32 byte alignment
  183. */
  184. fibptr->hw_error_pa = hw_fib_pa +
  185. offsetof(struct aac_native_hba, resp.resp_bytes[0]);
  186. hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
  187. dev->max_cmd_size + sizeof(struct aac_fib_xporthdr));
  188. hw_fib_pa = hw_fib_pa +
  189. dev->max_cmd_size + sizeof(struct aac_fib_xporthdr);
  190. }
  191. /*
  192. *Assign vector numbers to fibs
  193. */
  194. aac_fib_vector_assign(dev);
  195. /*
  196. * Add the fib chain to the free list
  197. */
  198. dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
  199. /*
  200. * Set 8 fibs aside for management tools
  201. */
  202. dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
  203. return 0;
  204. }
  205. /**
  206. * aac_fib_alloc_tag-allocate a fib using tags
  207. * @dev: Adapter to allocate the fib for
  208. *
  209. * Allocate a fib from the adapter fib pool using tags
  210. * from the blk layer.
  211. */
  212. struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
  213. {
  214. struct fib *fibptr;
  215. fibptr = &dev->fibs[scmd->request->tag];
  216. /*
  217. * Null out fields that depend on being zero at the start of
  218. * each I/O
  219. */
  220. fibptr->hw_fib_va->header.XferState = 0;
  221. fibptr->type = FSAFS_NTC_FIB_CONTEXT;
  222. fibptr->callback_data = NULL;
  223. fibptr->callback = NULL;
  224. return fibptr;
  225. }
  226. /**
  227. * aac_fib_alloc - allocate a fib
  228. * @dev: Adapter to allocate the fib for
  229. *
  230. * Allocate a fib from the adapter fib pool. If the pool is empty we
  231. * return NULL.
  232. */
  233. struct fib *aac_fib_alloc(struct aac_dev *dev)
  234. {
  235. struct fib * fibptr;
  236. unsigned long flags;
  237. spin_lock_irqsave(&dev->fib_lock, flags);
  238. fibptr = dev->free_fib;
  239. if(!fibptr){
  240. spin_unlock_irqrestore(&dev->fib_lock, flags);
  241. return fibptr;
  242. }
  243. dev->free_fib = fibptr->next;
  244. spin_unlock_irqrestore(&dev->fib_lock, flags);
  245. /*
  246. * Set the proper node type code and node byte size
  247. */
  248. fibptr->type = FSAFS_NTC_FIB_CONTEXT;
  249. fibptr->size = sizeof(struct fib);
  250. /*
  251. * Null out fields that depend on being zero at the start of
  252. * each I/O
  253. */
  254. fibptr->hw_fib_va->header.XferState = 0;
  255. fibptr->flags = 0;
  256. fibptr->callback = NULL;
  257. fibptr->callback_data = NULL;
  258. return fibptr;
  259. }
  260. /**
  261. * aac_fib_free - free a fib
  262. * @fibptr: fib to free up
  263. *
  264. * Frees up a fib and places it on the appropriate queue
  265. */
  266. void aac_fib_free(struct fib *fibptr)
  267. {
  268. unsigned long flags;
  269. if (fibptr->done == 2)
  270. return;
  271. spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
  272. if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
  273. aac_config.fib_timeouts++;
  274. if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
  275. fibptr->hw_fib_va->header.XferState != 0) {
  276. printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
  277. (void*)fibptr,
  278. le32_to_cpu(fibptr->hw_fib_va->header.XferState));
  279. }
  280. fibptr->next = fibptr->dev->free_fib;
  281. fibptr->dev->free_fib = fibptr;
  282. spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
  283. }
  284. /**
  285. * aac_fib_init - initialise a fib
  286. * @fibptr: The fib to initialize
  287. *
  288. * Set up the generic fib fields ready for use
  289. */
  290. void aac_fib_init(struct fib *fibptr)
  291. {
  292. struct hw_fib *hw_fib = fibptr->hw_fib_va;
  293. memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
  294. hw_fib->header.StructType = FIB_MAGIC;
  295. hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
  296. hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
  297. hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
  298. hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
  299. }
  300. /**
  301. * fib_deallocate - deallocate a fib
  302. * @fibptr: fib to deallocate
  303. *
  304. * Will deallocate and return to the free pool the FIB pointed to by the
  305. * caller.
  306. */
  307. static void fib_dealloc(struct fib * fibptr)
  308. {
  309. struct hw_fib *hw_fib = fibptr->hw_fib_va;
  310. hw_fib->header.XferState = 0;
  311. }
  312. /*
  313. * Commuication primitives define and support the queuing method we use to
  314. * support host to adapter commuication. All queue accesses happen through
  315. * these routines and are the only routines which have a knowledge of the
  316. * how these queues are implemented.
  317. */
  318. /**
  319. * aac_get_entry - get a queue entry
  320. * @dev: Adapter
  321. * @qid: Queue Number
  322. * @entry: Entry return
  323. * @index: Index return
  324. * @nonotify: notification control
  325. *
  326. * With a priority the routine returns a queue entry if the queue has free entries. If the queue
  327. * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
  328. * returned.
  329. */
  330. static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
  331. {
  332. struct aac_queue * q;
  333. unsigned long idx;
  334. /*
  335. * All of the queues wrap when they reach the end, so we check
  336. * to see if they have reached the end and if they have we just
  337. * set the index back to zero. This is a wrap. You could or off
  338. * the high bits in all updates but this is a bit faster I think.
  339. */
  340. q = &dev->queues->queue[qid];
  341. idx = *index = le32_to_cpu(*(q->headers.producer));
  342. /* Interrupt Moderation, only interrupt for first two entries */
  343. if (idx != le32_to_cpu(*(q->headers.consumer))) {
  344. if (--idx == 0) {
  345. if (qid == AdapNormCmdQueue)
  346. idx = ADAP_NORM_CMD_ENTRIES;
  347. else
  348. idx = ADAP_NORM_RESP_ENTRIES;
  349. }
  350. if (idx != le32_to_cpu(*(q->headers.consumer)))
  351. *nonotify = 1;
  352. }
  353. if (qid == AdapNormCmdQueue) {
  354. if (*index >= ADAP_NORM_CMD_ENTRIES)
  355. *index = 0; /* Wrap to front of the Producer Queue. */
  356. } else {
  357. if (*index >= ADAP_NORM_RESP_ENTRIES)
  358. *index = 0; /* Wrap to front of the Producer Queue. */
  359. }
  360. /* Queue is full */
  361. if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
  362. printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
  363. qid, atomic_read(&q->numpending));
  364. return 0;
  365. } else {
  366. *entry = q->base + *index;
  367. return 1;
  368. }
  369. }
  370. /**
  371. * aac_queue_get - get the next free QE
  372. * @dev: Adapter
  373. * @index: Returned index
  374. * @priority: Priority of fib
  375. * @fib: Fib to associate with the queue entry
  376. * @wait: Wait if queue full
  377. * @fibptr: Driver fib object to go with fib
  378. * @nonotify: Don't notify the adapter
  379. *
  380. * Gets the next free QE off the requested priorty adapter command
  381. * queue and associates the Fib with the QE. The QE represented by
  382. * index is ready to insert on the queue when this routine returns
  383. * success.
  384. */
  385. int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
  386. {
  387. struct aac_entry * entry = NULL;
  388. int map = 0;
  389. if (qid == AdapNormCmdQueue) {
  390. /* if no entries wait for some if caller wants to */
  391. while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
  392. printk(KERN_ERR "GetEntries failed\n");
  393. }
  394. /*
  395. * Setup queue entry with a command, status and fib mapped
  396. */
  397. entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
  398. map = 1;
  399. } else {
  400. while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
  401. /* if no entries wait for some if caller wants to */
  402. }
  403. /*
  404. * Setup queue entry with command, status and fib mapped
  405. */
  406. entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
  407. entry->addr = hw_fib->header.SenderFibAddress;
  408. /* Restore adapters pointer to the FIB */
  409. hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
  410. map = 0;
  411. }
  412. /*
  413. * If MapFib is true than we need to map the Fib and put pointers
  414. * in the queue entry.
  415. */
  416. if (map)
  417. entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
  418. return 0;
  419. }
  420. /*
  421. * Define the highest level of host to adapter communication routines.
  422. * These routines will support host to adapter FS commuication. These
  423. * routines have no knowledge of the commuication method used. This level
  424. * sends and receives FIBs. This level has no knowledge of how these FIBs
  425. * get passed back and forth.
  426. */
  427. /**
  428. * aac_fib_send - send a fib to the adapter
  429. * @command: Command to send
  430. * @fibptr: The fib
  431. * @size: Size of fib data area
  432. * @priority: Priority of Fib
  433. * @wait: Async/sync select
  434. * @reply: True if a reply is wanted
  435. * @callback: Called with reply
  436. * @callback_data: Passed to callback
  437. *
  438. * Sends the requested FIB to the adapter and optionally will wait for a
  439. * response FIB. If the caller does not wish to wait for a response than
  440. * an event to wait on must be supplied. This event will be set when a
  441. * response FIB is received from the adapter.
  442. */
  443. int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
  444. int priority, int wait, int reply, fib_callback callback,
  445. void *callback_data)
  446. {
  447. struct aac_dev * dev = fibptr->dev;
  448. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  449. unsigned long flags = 0;
  450. unsigned long mflags = 0;
  451. unsigned long sflags = 0;
  452. if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
  453. return -EBUSY;
  454. if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))
  455. return -EINVAL;
  456. /*
  457. * There are 5 cases with the wait and response requested flags.
  458. * The only invalid cases are if the caller requests to wait and
  459. * does not request a response and if the caller does not want a
  460. * response and the Fib is not allocated from pool. If a response
  461. * is not requested the Fib will just be deallocaed by the DPC
  462. * routine when the response comes back from the adapter. No
  463. * further processing will be done besides deleting the Fib. We
  464. * will have a debug mode where the adapter can notify the host
  465. * it had a problem and the host can log that fact.
  466. */
  467. fibptr->flags = 0;
  468. if (wait && !reply) {
  469. return -EINVAL;
  470. } else if (!wait && reply) {
  471. hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
  472. FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
  473. } else if (!wait && !reply) {
  474. hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
  475. FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
  476. } else if (wait && reply) {
  477. hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
  478. FIB_COUNTER_INCREMENT(aac_config.NormalSent);
  479. }
  480. /*
  481. * Map the fib into 32bits by using the fib number
  482. */
  483. hw_fib->header.SenderFibAddress =
  484. cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
  485. /* use the same shifted value for handle to be compatible
  486. * with the new native hba command handle
  487. */
  488. hw_fib->header.Handle =
  489. cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
  490. /*
  491. * Set FIB state to indicate where it came from and if we want a
  492. * response from the adapter. Also load the command from the
  493. * caller.
  494. *
  495. * Map the hw fib pointer as a 32bit value
  496. */
  497. hw_fib->header.Command = cpu_to_le16(command);
  498. hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
  499. /*
  500. * Set the size of the Fib we want to send to the adapter
  501. */
  502. hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
  503. if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
  504. return -EMSGSIZE;
  505. }
  506. /*
  507. * Get a queue entry connect the FIB to it and send an notify
  508. * the adapter a command is ready.
  509. */
  510. hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
  511. /*
  512. * Fill in the Callback and CallbackContext if we are not
  513. * going to wait.
  514. */
  515. if (!wait) {
  516. fibptr->callback = callback;
  517. fibptr->callback_data = callback_data;
  518. fibptr->flags = FIB_CONTEXT_FLAG;
  519. }
  520. fibptr->done = 0;
  521. FIB_COUNTER_INCREMENT(aac_config.FibsSent);
  522. dprintk((KERN_DEBUG "Fib contents:.\n"));
  523. dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
  524. dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
  525. dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
  526. dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va));
  527. dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
  528. dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
  529. if (!dev->queues)
  530. return -EBUSY;
  531. if (wait) {
  532. spin_lock_irqsave(&dev->manage_lock, mflags);
  533. if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
  534. printk(KERN_INFO "No management Fibs Available:%d\n",
  535. dev->management_fib_count);
  536. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  537. return -EBUSY;
  538. }
  539. dev->management_fib_count++;
  540. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  541. spin_lock_irqsave(&fibptr->event_lock, flags);
  542. }
  543. if (dev->sync_mode) {
  544. if (wait)
  545. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  546. spin_lock_irqsave(&dev->sync_lock, sflags);
  547. if (dev->sync_fib) {
  548. list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
  549. spin_unlock_irqrestore(&dev->sync_lock, sflags);
  550. } else {
  551. dev->sync_fib = fibptr;
  552. spin_unlock_irqrestore(&dev->sync_lock, sflags);
  553. aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
  554. (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
  555. NULL, NULL, NULL, NULL, NULL);
  556. }
  557. if (wait) {
  558. fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
  559. if (down_interruptible(&fibptr->event_wait)) {
  560. fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
  561. return -EFAULT;
  562. }
  563. return 0;
  564. }
  565. return -EINPROGRESS;
  566. }
  567. if (aac_adapter_deliver(fibptr) != 0) {
  568. printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
  569. if (wait) {
  570. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  571. spin_lock_irqsave(&dev->manage_lock, mflags);
  572. dev->management_fib_count--;
  573. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  574. }
  575. return -EBUSY;
  576. }
  577. /*
  578. * If the caller wanted us to wait for response wait now.
  579. */
  580. if (wait) {
  581. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  582. /* Only set for first known interruptable command */
  583. if (wait < 0) {
  584. /*
  585. * *VERY* Dangerous to time out a command, the
  586. * assumption is made that we have no hope of
  587. * functioning because an interrupt routing or other
  588. * hardware failure has occurred.
  589. */
  590. unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
  591. while (down_trylock(&fibptr->event_wait)) {
  592. int blink;
  593. if (time_is_before_eq_jiffies(timeout)) {
  594. struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
  595. atomic_dec(&q->numpending);
  596. if (wait == -1) {
  597. printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
  598. "Usually a result of a PCI interrupt routing problem;\n"
  599. "update mother board BIOS or consider utilizing one of\n"
  600. "the SAFE mode kernel options (acpi, apic etc)\n");
  601. }
  602. return -ETIMEDOUT;
  603. }
  604. if (unlikely(aac_pci_offline(dev)))
  605. return -EFAULT;
  606. if ((blink = aac_adapter_check_health(dev)) > 0) {
  607. if (wait == -1) {
  608. printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
  609. "Usually a result of a serious unrecoverable hardware problem\n",
  610. blink);
  611. }
  612. return -EFAULT;
  613. }
  614. /*
  615. * Allow other processes / CPUS to use core
  616. */
  617. schedule();
  618. }
  619. } else if (down_interruptible(&fibptr->event_wait)) {
  620. /* Do nothing ... satisfy
  621. * down_interruptible must_check */
  622. }
  623. spin_lock_irqsave(&fibptr->event_lock, flags);
  624. if (fibptr->done == 0) {
  625. fibptr->done = 2; /* Tell interrupt we aborted */
  626. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  627. return -ERESTARTSYS;
  628. }
  629. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  630. BUG_ON(fibptr->done == 0);
  631. if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
  632. return -ETIMEDOUT;
  633. return 0;
  634. }
  635. /*
  636. * If the user does not want a response than return success otherwise
  637. * return pending
  638. */
  639. if (reply)
  640. return -EINPROGRESS;
  641. else
  642. return 0;
  643. }
  644. int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
  645. void *callback_data)
  646. {
  647. struct aac_dev *dev = fibptr->dev;
  648. int wait;
  649. unsigned long flags = 0;
  650. unsigned long mflags = 0;
  651. struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
  652. fibptr->hw_fib_va;
  653. fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
  654. if (callback) {
  655. wait = 0;
  656. fibptr->callback = callback;
  657. fibptr->callback_data = callback_data;
  658. } else
  659. wait = 1;
  660. hbacmd->iu_type = command;
  661. if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
  662. /* bit1 of request_id must be 0 */
  663. hbacmd->request_id =
  664. cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
  665. fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
  666. } else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
  667. return -EINVAL;
  668. if (wait) {
  669. spin_lock_irqsave(&dev->manage_lock, mflags);
  670. if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
  671. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  672. return -EBUSY;
  673. }
  674. dev->management_fib_count++;
  675. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  676. spin_lock_irqsave(&fibptr->event_lock, flags);
  677. }
  678. if (aac_adapter_deliver(fibptr) != 0) {
  679. if (wait) {
  680. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  681. spin_lock_irqsave(&dev->manage_lock, mflags);
  682. dev->management_fib_count--;
  683. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  684. }
  685. return -EBUSY;
  686. }
  687. FIB_COUNTER_INCREMENT(aac_config.NativeSent);
  688. if (wait) {
  689. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  690. if (unlikely(aac_pci_offline(dev)))
  691. return -EFAULT;
  692. fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
  693. if (down_interruptible(&fibptr->event_wait))
  694. fibptr->done = 2;
  695. fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
  696. spin_lock_irqsave(&fibptr->event_lock, flags);
  697. if ((fibptr->done == 0) || (fibptr->done == 2)) {
  698. fibptr->done = 2; /* Tell interrupt we aborted */
  699. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  700. return -ERESTARTSYS;
  701. }
  702. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  703. WARN_ON(fibptr->done == 0);
  704. if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
  705. return -ETIMEDOUT;
  706. return 0;
  707. }
  708. return -EINPROGRESS;
  709. }
  710. /**
  711. * aac_consumer_get - get the top of the queue
  712. * @dev: Adapter
  713. * @q: Queue
  714. * @entry: Return entry
  715. *
  716. * Will return a pointer to the entry on the top of the queue requested that
  717. * we are a consumer of, and return the address of the queue entry. It does
  718. * not change the state of the queue.
  719. */
  720. int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
  721. {
  722. u32 index;
  723. int status;
  724. if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
  725. status = 0;
  726. } else {
  727. /*
  728. * The consumer index must be wrapped if we have reached
  729. * the end of the queue, else we just use the entry
  730. * pointed to by the header index
  731. */
  732. if (le32_to_cpu(*q->headers.consumer) >= q->entries)
  733. index = 0;
  734. else
  735. index = le32_to_cpu(*q->headers.consumer);
  736. *entry = q->base + index;
  737. status = 1;
  738. }
  739. return(status);
  740. }
  741. /**
  742. * aac_consumer_free - free consumer entry
  743. * @dev: Adapter
  744. * @q: Queue
  745. * @qid: Queue ident
  746. *
  747. * Frees up the current top of the queue we are a consumer of. If the
  748. * queue was full notify the producer that the queue is no longer full.
  749. */
  750. void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
  751. {
  752. int wasfull = 0;
  753. u32 notify;
  754. if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
  755. wasfull = 1;
  756. if (le32_to_cpu(*q->headers.consumer) >= q->entries)
  757. *q->headers.consumer = cpu_to_le32(1);
  758. else
  759. le32_add_cpu(q->headers.consumer, 1);
  760. if (wasfull) {
  761. switch (qid) {
  762. case HostNormCmdQueue:
  763. notify = HostNormCmdNotFull;
  764. break;
  765. case HostNormRespQueue:
  766. notify = HostNormRespNotFull;
  767. break;
  768. default:
  769. BUG();
  770. return;
  771. }
  772. aac_adapter_notify(dev, notify);
  773. }
  774. }
  775. /**
  776. * aac_fib_adapter_complete - complete adapter issued fib
  777. * @fibptr: fib to complete
  778. * @size: size of fib
  779. *
  780. * Will do all necessary work to complete a FIB that was sent from
  781. * the adapter.
  782. */
  783. int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
  784. {
  785. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  786. struct aac_dev * dev = fibptr->dev;
  787. struct aac_queue * q;
  788. unsigned long nointr = 0;
  789. unsigned long qflags;
  790. if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
  791. dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
  792. dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
  793. kfree(hw_fib);
  794. return 0;
  795. }
  796. if (hw_fib->header.XferState == 0) {
  797. if (dev->comm_interface == AAC_COMM_MESSAGE)
  798. kfree(hw_fib);
  799. return 0;
  800. }
  801. /*
  802. * If we plan to do anything check the structure type first.
  803. */
  804. if (hw_fib->header.StructType != FIB_MAGIC &&
  805. hw_fib->header.StructType != FIB_MAGIC2 &&
  806. hw_fib->header.StructType != FIB_MAGIC2_64) {
  807. if (dev->comm_interface == AAC_COMM_MESSAGE)
  808. kfree(hw_fib);
  809. return -EINVAL;
  810. }
  811. /*
  812. * This block handles the case where the adapter had sent us a
  813. * command and we have finished processing the command. We
  814. * call completeFib when we are done processing the command
  815. * and want to send a response back to the adapter. This will
  816. * send the completed cdb to the adapter.
  817. */
  818. if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
  819. if (dev->comm_interface == AAC_COMM_MESSAGE) {
  820. kfree (hw_fib);
  821. } else {
  822. u32 index;
  823. hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
  824. if (size) {
  825. size += sizeof(struct aac_fibhdr);
  826. if (size > le16_to_cpu(hw_fib->header.SenderSize))
  827. return -EMSGSIZE;
  828. hw_fib->header.Size = cpu_to_le16(size);
  829. }
  830. q = &dev->queues->queue[AdapNormRespQueue];
  831. spin_lock_irqsave(q->lock, qflags);
  832. aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
  833. *(q->headers.producer) = cpu_to_le32(index + 1);
  834. spin_unlock_irqrestore(q->lock, qflags);
  835. if (!(nointr & (int)aac_config.irq_mod))
  836. aac_adapter_notify(dev, AdapNormRespQueue);
  837. }
  838. } else {
  839. printk(KERN_WARNING "aac_fib_adapter_complete: "
  840. "Unknown xferstate detected.\n");
  841. BUG();
  842. }
  843. return 0;
  844. }
  845. /**
  846. * aac_fib_complete - fib completion handler
  847. * @fib: FIB to complete
  848. *
  849. * Will do all necessary work to complete a FIB.
  850. */
  851. int aac_fib_complete(struct fib *fibptr)
  852. {
  853. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  854. if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
  855. fib_dealloc(fibptr);
  856. return 0;
  857. }
  858. /*
  859. * Check for a fib which has already been completed or with a
  860. * status wait timeout
  861. */
  862. if (hw_fib->header.XferState == 0 || fibptr->done == 2)
  863. return 0;
  864. /*
  865. * If we plan to do anything check the structure type first.
  866. */
  867. if (hw_fib->header.StructType != FIB_MAGIC &&
  868. hw_fib->header.StructType != FIB_MAGIC2 &&
  869. hw_fib->header.StructType != FIB_MAGIC2_64)
  870. return -EINVAL;
  871. /*
  872. * This block completes a cdb which orginated on the host and we
  873. * just need to deallocate the cdb or reinit it. At this point the
  874. * command is complete that we had sent to the adapter and this
  875. * cdb could be reused.
  876. */
  877. if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
  878. (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
  879. {
  880. fib_dealloc(fibptr);
  881. }
  882. else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
  883. {
  884. /*
  885. * This handles the case when the host has aborted the I/O
  886. * to the adapter because the adapter is not responding
  887. */
  888. fib_dealloc(fibptr);
  889. } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
  890. fib_dealloc(fibptr);
  891. } else {
  892. BUG();
  893. }
  894. return 0;
  895. }
  896. /**
  897. * aac_printf - handle printf from firmware
  898. * @dev: Adapter
  899. * @val: Message info
  900. *
  901. * Print a message passed to us by the controller firmware on the
  902. * Adaptec board
  903. */
  904. void aac_printf(struct aac_dev *dev, u32 val)
  905. {
  906. char *cp = dev->printfbuf;
  907. if (dev->printf_enabled)
  908. {
  909. int length = val & 0xffff;
  910. int level = (val >> 16) & 0xffff;
  911. /*
  912. * The size of the printfbuf is set in port.c
  913. * There is no variable or define for it
  914. */
  915. if (length > 255)
  916. length = 255;
  917. if (cp[length] != 0)
  918. cp[length] = 0;
  919. if (level == LOG_AAC_HIGH_ERROR)
  920. printk(KERN_WARNING "%s:%s", dev->name, cp);
  921. else
  922. printk(KERN_INFO "%s:%s", dev->name, cp);
  923. }
  924. memset(cp, 0, 256);
  925. }
  926. static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index)
  927. {
  928. return le32_to_cpu(((__le32 *)aifcmd->data)[index]);
  929. }
  930. static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
  931. {
  932. switch (aac_aif_data(aifcmd, 1)) {
  933. case AifBuCacheDataLoss:
  934. if (aac_aif_data(aifcmd, 2))
  935. dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n",
  936. aac_aif_data(aifcmd, 2));
  937. else
  938. dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n");
  939. break;
  940. case AifBuCacheDataRecover:
  941. if (aac_aif_data(aifcmd, 2))
  942. dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n",
  943. aac_aif_data(aifcmd, 2));
  944. else
  945. dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n");
  946. break;
  947. }
  948. }
  949. /**
  950. * aac_handle_aif - Handle a message from the firmware
  951. * @dev: Which adapter this fib is from
  952. * @fibptr: Pointer to fibptr from adapter
  953. *
  954. * This routine handles a driver notify fib from the adapter and
  955. * dispatches it to the appropriate routine for handling.
  956. */
  957. #define AIF_SNIFF_TIMEOUT (500*HZ)
  958. static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
  959. {
  960. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  961. struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
  962. u32 channel, id, lun, container;
  963. struct scsi_device *device;
  964. enum {
  965. NOTHING,
  966. DELETE,
  967. ADD,
  968. CHANGE
  969. } device_config_needed = NOTHING;
  970. /* Sniff for container changes */
  971. if (!dev || !dev->fsa_dev)
  972. return;
  973. container = channel = id = lun = (u32)-1;
  974. /*
  975. * We have set this up to try and minimize the number of
  976. * re-configures that take place. As a result of this when
  977. * certain AIF's come in we will set a flag waiting for another
  978. * type of AIF before setting the re-config flag.
  979. */
  980. switch (le32_to_cpu(aifcmd->command)) {
  981. case AifCmdDriverNotify:
  982. switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
  983. case AifRawDeviceRemove:
  984. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  985. if ((container >> 28)) {
  986. container = (u32)-1;
  987. break;
  988. }
  989. channel = (container >> 24) & 0xF;
  990. if (channel >= dev->maximum_num_channels) {
  991. container = (u32)-1;
  992. break;
  993. }
  994. id = container & 0xFFFF;
  995. if (id >= dev->maximum_num_physicals) {
  996. container = (u32)-1;
  997. break;
  998. }
  999. lun = (container >> 16) & 0xFF;
  1000. container = (u32)-1;
  1001. channel = aac_phys_to_logical(channel);
  1002. device_config_needed = DELETE;
  1003. break;
  1004. /*
  1005. * Morph or Expand complete
  1006. */
  1007. case AifDenMorphComplete:
  1008. case AifDenVolumeExtendComplete:
  1009. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  1010. if (container >= dev->maximum_num_containers)
  1011. break;
  1012. /*
  1013. * Find the scsi_device associated with the SCSI
  1014. * address. Make sure we have the right array, and if
  1015. * so set the flag to initiate a new re-config once we
  1016. * see an AifEnConfigChange AIF come through.
  1017. */
  1018. if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
  1019. device = scsi_device_lookup(dev->scsi_host_ptr,
  1020. CONTAINER_TO_CHANNEL(container),
  1021. CONTAINER_TO_ID(container),
  1022. CONTAINER_TO_LUN(container));
  1023. if (device) {
  1024. dev->fsa_dev[container].config_needed = CHANGE;
  1025. dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
  1026. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  1027. scsi_device_put(device);
  1028. }
  1029. }
  1030. }
  1031. /*
  1032. * If we are waiting on something and this happens to be
  1033. * that thing then set the re-configure flag.
  1034. */
  1035. if (container != (u32)-1) {
  1036. if (container >= dev->maximum_num_containers)
  1037. break;
  1038. if ((dev->fsa_dev[container].config_waiting_on ==
  1039. le32_to_cpu(*(__le32 *)aifcmd->data)) &&
  1040. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  1041. dev->fsa_dev[container].config_waiting_on = 0;
  1042. } else for (container = 0;
  1043. container < dev->maximum_num_containers; ++container) {
  1044. if ((dev->fsa_dev[container].config_waiting_on ==
  1045. le32_to_cpu(*(__le32 *)aifcmd->data)) &&
  1046. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  1047. dev->fsa_dev[container].config_waiting_on = 0;
  1048. }
  1049. break;
  1050. case AifCmdEventNotify:
  1051. switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
  1052. case AifEnBatteryEvent:
  1053. dev->cache_protected =
  1054. (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
  1055. break;
  1056. /*
  1057. * Add an Array.
  1058. */
  1059. case AifEnAddContainer:
  1060. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  1061. if (container >= dev->maximum_num_containers)
  1062. break;
  1063. dev->fsa_dev[container].config_needed = ADD;
  1064. dev->fsa_dev[container].config_waiting_on =
  1065. AifEnConfigChange;
  1066. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  1067. break;
  1068. /*
  1069. * Delete an Array.
  1070. */
  1071. case AifEnDeleteContainer:
  1072. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  1073. if (container >= dev->maximum_num_containers)
  1074. break;
  1075. dev->fsa_dev[container].config_needed = DELETE;
  1076. dev->fsa_dev[container].config_waiting_on =
  1077. AifEnConfigChange;
  1078. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  1079. break;
  1080. /*
  1081. * Container change detected. If we currently are not
  1082. * waiting on something else, setup to wait on a Config Change.
  1083. */
  1084. case AifEnContainerChange:
  1085. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  1086. if (container >= dev->maximum_num_containers)
  1087. break;
  1088. if (dev->fsa_dev[container].config_waiting_on &&
  1089. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  1090. break;
  1091. dev->fsa_dev[container].config_needed = CHANGE;
  1092. dev->fsa_dev[container].config_waiting_on =
  1093. AifEnConfigChange;
  1094. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  1095. break;
  1096. case AifEnConfigChange:
  1097. break;
  1098. case AifEnAddJBOD:
  1099. case AifEnDeleteJBOD:
  1100. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  1101. if ((container >> 28)) {
  1102. container = (u32)-1;
  1103. break;
  1104. }
  1105. channel = (container >> 24) & 0xF;
  1106. if (channel >= dev->maximum_num_channels) {
  1107. container = (u32)-1;
  1108. break;
  1109. }
  1110. id = container & 0xFFFF;
  1111. if (id >= dev->maximum_num_physicals) {
  1112. container = (u32)-1;
  1113. break;
  1114. }
  1115. lun = (container >> 16) & 0xFF;
  1116. container = (u32)-1;
  1117. channel = aac_phys_to_logical(channel);
  1118. device_config_needed =
  1119. (((__le32 *)aifcmd->data)[0] ==
  1120. cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
  1121. if (device_config_needed == ADD) {
  1122. device = scsi_device_lookup(dev->scsi_host_ptr,
  1123. channel,
  1124. id,
  1125. lun);
  1126. if (device) {
  1127. scsi_remove_device(device);
  1128. scsi_device_put(device);
  1129. }
  1130. }
  1131. break;
  1132. case AifEnEnclosureManagement:
  1133. /*
  1134. * If in JBOD mode, automatic exposure of new
  1135. * physical target to be suppressed until configured.
  1136. */
  1137. if (dev->jbod)
  1138. break;
  1139. switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
  1140. case EM_DRIVE_INSERTION:
  1141. case EM_DRIVE_REMOVAL:
  1142. case EM_SES_DRIVE_INSERTION:
  1143. case EM_SES_DRIVE_REMOVAL:
  1144. container = le32_to_cpu(
  1145. ((__le32 *)aifcmd->data)[2]);
  1146. if ((container >> 28)) {
  1147. container = (u32)-1;
  1148. break;
  1149. }
  1150. channel = (container >> 24) & 0xF;
  1151. if (channel >= dev->maximum_num_channels) {
  1152. container = (u32)-1;
  1153. break;
  1154. }
  1155. id = container & 0xFFFF;
  1156. lun = (container >> 16) & 0xFF;
  1157. container = (u32)-1;
  1158. if (id >= dev->maximum_num_physicals) {
  1159. /* legacy dev_t ? */
  1160. if ((0x2000 <= id) || lun || channel ||
  1161. ((channel = (id >> 7) & 0x3F) >=
  1162. dev->maximum_num_channels))
  1163. break;
  1164. lun = (id >> 4) & 7;
  1165. id &= 0xF;
  1166. }
  1167. channel = aac_phys_to_logical(channel);
  1168. device_config_needed =
  1169. ((((__le32 *)aifcmd->data)[3]
  1170. == cpu_to_le32(EM_DRIVE_INSERTION)) ||
  1171. (((__le32 *)aifcmd->data)[3]
  1172. == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
  1173. ADD : DELETE;
  1174. break;
  1175. }
  1176. break;
  1177. case AifBuManagerEvent:
  1178. aac_handle_aif_bu(dev, aifcmd);
  1179. break;
  1180. }
  1181. /*
  1182. * If we are waiting on something and this happens to be
  1183. * that thing then set the re-configure flag.
  1184. */
  1185. if (container != (u32)-1) {
  1186. if (container >= dev->maximum_num_containers)
  1187. break;
  1188. if ((dev->fsa_dev[container].config_waiting_on ==
  1189. le32_to_cpu(*(__le32 *)aifcmd->data)) &&
  1190. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  1191. dev->fsa_dev[container].config_waiting_on = 0;
  1192. } else for (container = 0;
  1193. container < dev->maximum_num_containers; ++container) {
  1194. if ((dev->fsa_dev[container].config_waiting_on ==
  1195. le32_to_cpu(*(__le32 *)aifcmd->data)) &&
  1196. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  1197. dev->fsa_dev[container].config_waiting_on = 0;
  1198. }
  1199. break;
  1200. case AifCmdJobProgress:
  1201. /*
  1202. * These are job progress AIF's. When a Clear is being
  1203. * done on a container it is initially created then hidden from
  1204. * the OS. When the clear completes we don't get a config
  1205. * change so we monitor the job status complete on a clear then
  1206. * wait for a container change.
  1207. */
  1208. if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
  1209. (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
  1210. ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
  1211. for (container = 0;
  1212. container < dev->maximum_num_containers;
  1213. ++container) {
  1214. /*
  1215. * Stomp on all config sequencing for all
  1216. * containers?
  1217. */
  1218. dev->fsa_dev[container].config_waiting_on =
  1219. AifEnContainerChange;
  1220. dev->fsa_dev[container].config_needed = ADD;
  1221. dev->fsa_dev[container].config_waiting_stamp =
  1222. jiffies;
  1223. }
  1224. }
  1225. if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
  1226. ((__le32 *)aifcmd->data)[6] == 0 &&
  1227. ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
  1228. for (container = 0;
  1229. container < dev->maximum_num_containers;
  1230. ++container) {
  1231. /*
  1232. * Stomp on all config sequencing for all
  1233. * containers?
  1234. */
  1235. dev->fsa_dev[container].config_waiting_on =
  1236. AifEnContainerChange;
  1237. dev->fsa_dev[container].config_needed = DELETE;
  1238. dev->fsa_dev[container].config_waiting_stamp =
  1239. jiffies;
  1240. }
  1241. }
  1242. break;
  1243. }
  1244. container = 0;
  1245. retry_next:
  1246. if (device_config_needed == NOTHING)
  1247. for (; container < dev->maximum_num_containers; ++container) {
  1248. if ((dev->fsa_dev[container].config_waiting_on == 0) &&
  1249. (dev->fsa_dev[container].config_needed != NOTHING) &&
  1250. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
  1251. device_config_needed =
  1252. dev->fsa_dev[container].config_needed;
  1253. dev->fsa_dev[container].config_needed = NOTHING;
  1254. channel = CONTAINER_TO_CHANNEL(container);
  1255. id = CONTAINER_TO_ID(container);
  1256. lun = CONTAINER_TO_LUN(container);
  1257. break;
  1258. }
  1259. }
  1260. if (device_config_needed == NOTHING)
  1261. return;
  1262. /*
  1263. * If we decided that a re-configuration needs to be done,
  1264. * schedule it here on the way out the door, please close the door
  1265. * behind you.
  1266. */
  1267. /*
  1268. * Find the scsi_device associated with the SCSI address,
  1269. * and mark it as changed, invalidating the cache. This deals
  1270. * with changes to existing device IDs.
  1271. */
  1272. if (!dev || !dev->scsi_host_ptr)
  1273. return;
  1274. /*
  1275. * force reload of disk info via aac_probe_container
  1276. */
  1277. if ((channel == CONTAINER_CHANNEL) &&
  1278. (device_config_needed != NOTHING)) {
  1279. if (dev->fsa_dev[container].valid == 1)
  1280. dev->fsa_dev[container].valid = 2;
  1281. aac_probe_container(dev, container);
  1282. }
  1283. device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
  1284. if (device) {
  1285. switch (device_config_needed) {
  1286. case DELETE:
  1287. #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
  1288. scsi_remove_device(device);
  1289. #else
  1290. if (scsi_device_online(device)) {
  1291. scsi_device_set_state(device, SDEV_OFFLINE);
  1292. sdev_printk(KERN_INFO, device,
  1293. "Device offlined - %s\n",
  1294. (channel == CONTAINER_CHANNEL) ?
  1295. "array deleted" :
  1296. "enclosure services event");
  1297. }
  1298. #endif
  1299. break;
  1300. case ADD:
  1301. if (!scsi_device_online(device)) {
  1302. sdev_printk(KERN_INFO, device,
  1303. "Device online - %s\n",
  1304. (channel == CONTAINER_CHANNEL) ?
  1305. "array created" :
  1306. "enclosure services event");
  1307. scsi_device_set_state(device, SDEV_RUNNING);
  1308. }
  1309. /* FALLTHRU */
  1310. case CHANGE:
  1311. if ((channel == CONTAINER_CHANNEL)
  1312. && (!dev->fsa_dev[container].valid)) {
  1313. #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
  1314. scsi_remove_device(device);
  1315. #else
  1316. if (!scsi_device_online(device))
  1317. break;
  1318. scsi_device_set_state(device, SDEV_OFFLINE);
  1319. sdev_printk(KERN_INFO, device,
  1320. "Device offlined - %s\n",
  1321. "array failed");
  1322. #endif
  1323. break;
  1324. }
  1325. scsi_rescan_device(&device->sdev_gendev);
  1326. default:
  1327. break;
  1328. }
  1329. scsi_device_put(device);
  1330. device_config_needed = NOTHING;
  1331. }
  1332. if (device_config_needed == ADD)
  1333. scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
  1334. if (channel == CONTAINER_CHANNEL) {
  1335. container++;
  1336. device_config_needed = NOTHING;
  1337. goto retry_next;
  1338. }
  1339. }
  1340. static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
  1341. {
  1342. int index, quirks;
  1343. int retval;
  1344. struct Scsi_Host *host;
  1345. struct scsi_device *dev;
  1346. struct scsi_cmnd *command;
  1347. struct scsi_cmnd *command_list;
  1348. int jafo = 0;
  1349. int bled;
  1350. u64 dmamask;
  1351. int num_of_fibs = 0;
  1352. /*
  1353. * Assumptions:
  1354. * - host is locked, unless called by the aacraid thread.
  1355. * (a matter of convenience, due to legacy issues surrounding
  1356. * eh_host_adapter_reset).
  1357. * - in_reset is asserted, so no new i/o is getting to the
  1358. * card.
  1359. * - The card is dead, or will be very shortly ;-/ so no new
  1360. * commands are completing in the interrupt service.
  1361. */
  1362. host = aac->scsi_host_ptr;
  1363. scsi_block_requests(host);
  1364. aac_adapter_disable_int(aac);
  1365. if (aac->thread && aac->thread->pid != current->pid) {
  1366. spin_unlock_irq(host->host_lock);
  1367. kthread_stop(aac->thread);
  1368. aac->thread = NULL;
  1369. jafo = 1;
  1370. }
  1371. /*
  1372. * If a positive health, means in a known DEAD PANIC
  1373. * state and the adapter could be reset to `try again'.
  1374. */
  1375. bled = forced ? 0 : aac_adapter_check_health(aac);
  1376. retval = aac_adapter_restart(aac, bled, reset_type);
  1377. if (retval)
  1378. goto out;
  1379. /*
  1380. * Loop through the fibs, close the synchronous FIBS
  1381. */
  1382. retval = 1;
  1383. num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
  1384. for (index = 0; index < num_of_fibs; index++) {
  1385. struct fib *fib = &aac->fibs[index];
  1386. __le32 XferState = fib->hw_fib_va->header.XferState;
  1387. bool is_response_expected = false;
  1388. if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) &&
  1389. (XferState & cpu_to_le32(ResponseExpected)))
  1390. is_response_expected = true;
  1391. if (is_response_expected
  1392. || fib->flags & FIB_CONTEXT_FLAG_WAIT) {
  1393. unsigned long flagv;
  1394. spin_lock_irqsave(&fib->event_lock, flagv);
  1395. up(&fib->event_wait);
  1396. spin_unlock_irqrestore(&fib->event_lock, flagv);
  1397. schedule();
  1398. retval = 0;
  1399. }
  1400. }
  1401. /* Give some extra time for ioctls to complete. */
  1402. if (retval == 0)
  1403. ssleep(2);
  1404. index = aac->cardtype;
  1405. /*
  1406. * Re-initialize the adapter, first free resources, then carefully
  1407. * apply the initialization sequence to come back again. Only risk
  1408. * is a change in Firmware dropping cache, it is assumed the caller
  1409. * will ensure that i/o is queisced and the card is flushed in that
  1410. * case.
  1411. */
  1412. aac_free_irq(aac);
  1413. aac_fib_map_free(aac);
  1414. dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
  1415. aac->comm_phys);
  1416. aac->comm_addr = NULL;
  1417. aac->comm_phys = 0;
  1418. kfree(aac->queues);
  1419. aac->queues = NULL;
  1420. kfree(aac->fsa_dev);
  1421. aac->fsa_dev = NULL;
  1422. dmamask = DMA_BIT_MASK(32);
  1423. quirks = aac_get_driver_ident(index)->quirks;
  1424. if (quirks & AAC_QUIRK_31BIT)
  1425. retval = pci_set_dma_mask(aac->pdev, dmamask);
  1426. else if (!(quirks & AAC_QUIRK_SRC))
  1427. retval = pci_set_dma_mask(aac->pdev, dmamask);
  1428. else
  1429. retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
  1430. if (quirks & AAC_QUIRK_31BIT && !retval) {
  1431. dmamask = DMA_BIT_MASK(31);
  1432. retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
  1433. }
  1434. if (retval)
  1435. goto out;
  1436. if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
  1437. goto out;
  1438. if (jafo) {
  1439. aac->thread = kthread_run(aac_command_thread, aac, "%s",
  1440. aac->name);
  1441. if (IS_ERR(aac->thread)) {
  1442. retval = PTR_ERR(aac->thread);
  1443. aac->thread = NULL;
  1444. goto out;
  1445. }
  1446. }
  1447. (void)aac_get_adapter_info(aac);
  1448. if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
  1449. host->sg_tablesize = 34;
  1450. host->max_sectors = (host->sg_tablesize * 8) + 112;
  1451. }
  1452. if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
  1453. host->sg_tablesize = 17;
  1454. host->max_sectors = (host->sg_tablesize * 8) + 112;
  1455. }
  1456. aac_get_config_status(aac, 1);
  1457. aac_get_containers(aac);
  1458. /*
  1459. * This is where the assumption that the Adapter is quiesced
  1460. * is important.
  1461. */
  1462. command_list = NULL;
  1463. __shost_for_each_device(dev, host) {
  1464. unsigned long flags;
  1465. spin_lock_irqsave(&dev->list_lock, flags);
  1466. list_for_each_entry(command, &dev->cmd_list, list)
  1467. if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
  1468. command->SCp.buffer = (struct scatterlist *)command_list;
  1469. command_list = command;
  1470. }
  1471. spin_unlock_irqrestore(&dev->list_lock, flags);
  1472. }
  1473. while ((command = command_list)) {
  1474. command_list = (struct scsi_cmnd *)command->SCp.buffer;
  1475. command->SCp.buffer = NULL;
  1476. command->result = DID_OK << 16
  1477. | COMMAND_COMPLETE << 8
  1478. | SAM_STAT_TASK_SET_FULL;
  1479. command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
  1480. command->scsi_done(command);
  1481. }
  1482. /*
  1483. * Any Device that was already marked offline needs to be marked
  1484. * running
  1485. */
  1486. __shost_for_each_device(dev, host) {
  1487. if (!scsi_device_online(dev))
  1488. scsi_device_set_state(dev, SDEV_RUNNING);
  1489. }
  1490. retval = 0;
  1491. out:
  1492. aac->in_reset = 0;
  1493. scsi_unblock_requests(host);
  1494. /*
  1495. * Issue bus rescan to catch any configuration that might have
  1496. * occurred
  1497. */
  1498. if (!retval && !is_kdump_kernel()) {
  1499. dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
  1500. aac_schedule_safw_scan_worker(aac);
  1501. }
  1502. if (jafo) {
  1503. spin_lock_irq(host->host_lock);
  1504. }
  1505. return retval;
  1506. }
  1507. int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
  1508. {
  1509. unsigned long flagv = 0;
  1510. int retval;
  1511. struct Scsi_Host * host;
  1512. int bled;
  1513. if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
  1514. return -EBUSY;
  1515. if (aac->in_reset) {
  1516. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1517. return -EBUSY;
  1518. }
  1519. aac->in_reset = 1;
  1520. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1521. /*
  1522. * Wait for all commands to complete to this specific
  1523. * target (block maximum 60 seconds). Although not necessary,
  1524. * it does make us a good storage citizen.
  1525. */
  1526. host = aac->scsi_host_ptr;
  1527. scsi_block_requests(host);
  1528. /* Quiesce build, flush cache, write through mode */
  1529. if (forced < 2)
  1530. aac_send_shutdown(aac);
  1531. spin_lock_irqsave(host->host_lock, flagv);
  1532. bled = forced ? forced :
  1533. (aac_check_reset != 0 && aac_check_reset != 1);
  1534. retval = _aac_reset_adapter(aac, bled, reset_type);
  1535. spin_unlock_irqrestore(host->host_lock, flagv);
  1536. if ((forced < 2) && (retval == -ENODEV)) {
  1537. /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
  1538. struct fib * fibctx = aac_fib_alloc(aac);
  1539. if (fibctx) {
  1540. struct aac_pause *cmd;
  1541. int status;
  1542. aac_fib_init(fibctx);
  1543. cmd = (struct aac_pause *) fib_data(fibctx);
  1544. cmd->command = cpu_to_le32(VM_ContainerConfig);
  1545. cmd->type = cpu_to_le32(CT_PAUSE_IO);
  1546. cmd->timeout = cpu_to_le32(1);
  1547. cmd->min = cpu_to_le32(1);
  1548. cmd->noRescan = cpu_to_le32(1);
  1549. cmd->count = cpu_to_le32(0);
  1550. status = aac_fib_send(ContainerCommand,
  1551. fibctx,
  1552. sizeof(struct aac_pause),
  1553. FsaNormal,
  1554. -2 /* Timeout silently */, 1,
  1555. NULL, NULL);
  1556. if (status >= 0)
  1557. aac_fib_complete(fibctx);
  1558. /* FIB should be freed only after getting
  1559. * the response from the F/W */
  1560. if (status != -ERESTARTSYS)
  1561. aac_fib_free(fibctx);
  1562. }
  1563. }
  1564. return retval;
  1565. }
  1566. int aac_check_health(struct aac_dev * aac)
  1567. {
  1568. int BlinkLED;
  1569. unsigned long time_now, flagv = 0;
  1570. struct list_head * entry;
  1571. /* Extending the scope of fib_lock slightly to protect aac->in_reset */
  1572. if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
  1573. return 0;
  1574. if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
  1575. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1576. return 0; /* OK */
  1577. }
  1578. aac->in_reset = 1;
  1579. /* Fake up an AIF:
  1580. * aac_aifcmd.command = AifCmdEventNotify = 1
  1581. * aac_aifcmd.seqnum = 0xFFFFFFFF
  1582. * aac_aifcmd.data[0] = AifEnExpEvent = 23
  1583. * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
  1584. * aac.aifcmd.data[2] = AifHighPriority = 3
  1585. * aac.aifcmd.data[3] = BlinkLED
  1586. */
  1587. time_now = jiffies/HZ;
  1588. entry = aac->fib_list.next;
  1589. /*
  1590. * For each Context that is on the
  1591. * fibctxList, make a copy of the
  1592. * fib, and then set the event to wake up the
  1593. * thread that is waiting for it.
  1594. */
  1595. while (entry != &aac->fib_list) {
  1596. /*
  1597. * Extract the fibctx
  1598. */
  1599. struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
  1600. struct hw_fib * hw_fib;
  1601. struct fib * fib;
  1602. /*
  1603. * Check if the queue is getting
  1604. * backlogged
  1605. */
  1606. if (fibctx->count > 20) {
  1607. /*
  1608. * It's *not* jiffies folks,
  1609. * but jiffies / HZ, so do not
  1610. * panic ...
  1611. */
  1612. u32 time_last = fibctx->jiffies;
  1613. /*
  1614. * Has it been > 2 minutes
  1615. * since the last read off
  1616. * the queue?
  1617. */
  1618. if ((time_now - time_last) > aif_timeout) {
  1619. entry = entry->next;
  1620. aac_close_fib_context(aac, fibctx);
  1621. continue;
  1622. }
  1623. }
  1624. /*
  1625. * Warning: no sleep allowed while
  1626. * holding spinlock
  1627. */
  1628. hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
  1629. fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
  1630. if (fib && hw_fib) {
  1631. struct aac_aifcmd * aif;
  1632. fib->hw_fib_va = hw_fib;
  1633. fib->dev = aac;
  1634. aac_fib_init(fib);
  1635. fib->type = FSAFS_NTC_FIB_CONTEXT;
  1636. fib->size = sizeof (struct fib);
  1637. fib->data = hw_fib->data;
  1638. aif = (struct aac_aifcmd *)hw_fib->data;
  1639. aif->command = cpu_to_le32(AifCmdEventNotify);
  1640. aif->seqnum = cpu_to_le32(0xFFFFFFFF);
  1641. ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
  1642. ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
  1643. ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
  1644. ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
  1645. /*
  1646. * Put the FIB onto the
  1647. * fibctx's fibs
  1648. */
  1649. list_add_tail(&fib->fiblink, &fibctx->fib_list);
  1650. fibctx->count++;
  1651. /*
  1652. * Set the event to wake up the
  1653. * thread that will waiting.
  1654. */
  1655. up(&fibctx->wait_sem);
  1656. } else {
  1657. printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
  1658. kfree(fib);
  1659. kfree(hw_fib);
  1660. }
  1661. entry = entry->next;
  1662. }
  1663. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1664. if (BlinkLED < 0) {
  1665. printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
  1666. aac->name, BlinkLED);
  1667. goto out;
  1668. }
  1669. printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
  1670. out:
  1671. aac->in_reset = 0;
  1672. return BlinkLED;
  1673. }
  1674. static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
  1675. {
  1676. return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
  1677. }
  1678. static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
  1679. int bus,
  1680. int target)
  1681. {
  1682. if (bus != CONTAINER_CHANNEL)
  1683. bus = aac_phys_to_logical(bus);
  1684. return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
  1685. }
  1686. static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
  1687. {
  1688. if (bus != CONTAINER_CHANNEL)
  1689. bus = aac_phys_to_logical(bus);
  1690. return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
  1691. }
  1692. static void aac_put_safw_scsi_device(struct scsi_device *sdev)
  1693. {
  1694. if (sdev)
  1695. scsi_device_put(sdev);
  1696. }
  1697. static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
  1698. {
  1699. struct scsi_device *sdev;
  1700. sdev = aac_lookup_safw_scsi_device(dev, bus, target);
  1701. scsi_remove_device(sdev);
  1702. aac_put_safw_scsi_device(sdev);
  1703. }
  1704. static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
  1705. int bus, int target)
  1706. {
  1707. return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
  1708. }
  1709. static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
  1710. {
  1711. if (is_safw_raid_volume(dev, bus, target))
  1712. return dev->fsa_dev[target].valid;
  1713. else
  1714. return aac_is_safw_scan_count_equal(dev, bus, target);
  1715. }
  1716. static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
  1717. {
  1718. int is_exposed = 0;
  1719. struct scsi_device *sdev;
  1720. sdev = aac_lookup_safw_scsi_device(dev, bus, target);
  1721. if (sdev)
  1722. is_exposed = 1;
  1723. aac_put_safw_scsi_device(sdev);
  1724. return is_exposed;
  1725. }
  1726. static int aac_update_safw_host_devices(struct aac_dev *dev)
  1727. {
  1728. int i;
  1729. int bus;
  1730. int target;
  1731. int is_exposed = 0;
  1732. int rcode = 0;
  1733. rcode = aac_setup_safw_adapter(dev);
  1734. if (unlikely(rcode < 0)) {
  1735. goto out;
  1736. }
  1737. for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
  1738. bus = get_bus_number(i);
  1739. target = get_target_number(i);
  1740. is_exposed = aac_is_safw_device_exposed(dev, bus, target);
  1741. if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
  1742. aac_add_safw_device(dev, bus, target);
  1743. else if (!aac_is_safw_target_valid(dev, bus, target) &&
  1744. is_exposed)
  1745. aac_remove_safw_device(dev, bus, target);
  1746. }
  1747. out:
  1748. return rcode;
  1749. }
  1750. static int aac_scan_safw_host(struct aac_dev *dev)
  1751. {
  1752. int rcode = 0;
  1753. rcode = aac_update_safw_host_devices(dev);
  1754. if (rcode)
  1755. aac_schedule_safw_scan_worker(dev);
  1756. return rcode;
  1757. }
  1758. int aac_scan_host(struct aac_dev *dev)
  1759. {
  1760. int rcode = 0;
  1761. mutex_lock(&dev->scan_mutex);
  1762. if (dev->sa_firmware)
  1763. rcode = aac_scan_safw_host(dev);
  1764. else
  1765. scsi_scan_host(dev->scsi_host_ptr);
  1766. mutex_unlock(&dev->scan_mutex);
  1767. return rcode;
  1768. }
  1769. /**
  1770. * aac_handle_sa_aif Handle a message from the firmware
  1771. * @dev: Which adapter this fib is from
  1772. * @fibptr: Pointer to fibptr from adapter
  1773. *
  1774. * This routine handles a driver notify fib from the adapter and
  1775. * dispatches it to the appropriate routine for handling.
  1776. */
  1777. static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
  1778. {
  1779. int i;
  1780. u32 events = 0;
  1781. if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
  1782. events = SA_AIF_HOTPLUG;
  1783. else if (fibptr->hbacmd_size & SA_AIF_HARDWARE)
  1784. events = SA_AIF_HARDWARE;
  1785. else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE)
  1786. events = SA_AIF_PDEV_CHANGE;
  1787. else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE)
  1788. events = SA_AIF_LDEV_CHANGE;
  1789. else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE)
  1790. events = SA_AIF_BPSTAT_CHANGE;
  1791. else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE)
  1792. events = SA_AIF_BPCFG_CHANGE;
  1793. switch (events) {
  1794. case SA_AIF_HOTPLUG:
  1795. case SA_AIF_HARDWARE:
  1796. case SA_AIF_PDEV_CHANGE:
  1797. case SA_AIF_LDEV_CHANGE:
  1798. case SA_AIF_BPCFG_CHANGE:
  1799. aac_scan_host(dev);
  1800. break;
  1801. case SA_AIF_BPSTAT_CHANGE:
  1802. /* currently do nothing */
  1803. break;
  1804. }
  1805. for (i = 1; i <= 10; ++i) {
  1806. events = src_readl(dev, MUnit.IDR);
  1807. if (events & (1<<23)) {
  1808. pr_warn(" AIF not cleared by firmware - %d/%d)\n",
  1809. i, 10);
  1810. ssleep(1);
  1811. }
  1812. }
  1813. }
  1814. static int get_fib_count(struct aac_dev *dev)
  1815. {
  1816. unsigned int num = 0;
  1817. struct list_head *entry;
  1818. unsigned long flagv;
  1819. /*
  1820. * Warning: no sleep allowed while
  1821. * holding spinlock. We take the estimate
  1822. * and pre-allocate a set of fibs outside the
  1823. * lock.
  1824. */
  1825. num = le32_to_cpu(dev->init->r7.adapter_fibs_size)
  1826. / sizeof(struct hw_fib); /* some extra */
  1827. spin_lock_irqsave(&dev->fib_lock, flagv);
  1828. entry = dev->fib_list.next;
  1829. while (entry != &dev->fib_list) {
  1830. entry = entry->next;
  1831. ++num;
  1832. }
  1833. spin_unlock_irqrestore(&dev->fib_lock, flagv);
  1834. return num;
  1835. }
  1836. static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
  1837. struct fib **fib_pool,
  1838. unsigned int num)
  1839. {
  1840. struct hw_fib **hw_fib_p;
  1841. struct fib **fib_p;
  1842. hw_fib_p = hw_fib_pool;
  1843. fib_p = fib_pool;
  1844. while (hw_fib_p < &hw_fib_pool[num]) {
  1845. *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL);
  1846. if (!(*(hw_fib_p++))) {
  1847. --hw_fib_p;
  1848. break;
  1849. }
  1850. *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL);
  1851. if (!(*(fib_p++))) {
  1852. kfree(*(--hw_fib_p));
  1853. break;
  1854. }
  1855. }
  1856. /*
  1857. * Get the actual number of allocated fibs
  1858. */
  1859. num = hw_fib_p - hw_fib_pool;
  1860. return num;
  1861. }
  1862. static void wakeup_fibctx_threads(struct aac_dev *dev,
  1863. struct hw_fib **hw_fib_pool,
  1864. struct fib **fib_pool,
  1865. struct fib *fib,
  1866. struct hw_fib *hw_fib,
  1867. unsigned int num)
  1868. {
  1869. unsigned long flagv;
  1870. struct list_head *entry;
  1871. struct hw_fib **hw_fib_p;
  1872. struct fib **fib_p;
  1873. u32 time_now, time_last;
  1874. struct hw_fib *hw_newfib;
  1875. struct fib *newfib;
  1876. struct aac_fib_context *fibctx;
  1877. time_now = jiffies/HZ;
  1878. spin_lock_irqsave(&dev->fib_lock, flagv);
  1879. entry = dev->fib_list.next;
  1880. /*
  1881. * For each Context that is on the
  1882. * fibctxList, make a copy of the
  1883. * fib, and then set the event to wake up the
  1884. * thread that is waiting for it.
  1885. */
  1886. hw_fib_p = hw_fib_pool;
  1887. fib_p = fib_pool;
  1888. while (entry != &dev->fib_list) {
  1889. /*
  1890. * Extract the fibctx
  1891. */
  1892. fibctx = list_entry(entry, struct aac_fib_context,
  1893. next);
  1894. /*
  1895. * Check if the queue is getting
  1896. * backlogged
  1897. */
  1898. if (fibctx->count > 20) {
  1899. /*
  1900. * It's *not* jiffies folks,
  1901. * but jiffies / HZ so do not
  1902. * panic ...
  1903. */
  1904. time_last = fibctx->jiffies;
  1905. /*
  1906. * Has it been > 2 minutes
  1907. * since the last read off
  1908. * the queue?
  1909. */
  1910. if ((time_now - time_last) > aif_timeout) {
  1911. entry = entry->next;
  1912. aac_close_fib_context(dev, fibctx);
  1913. continue;
  1914. }
  1915. }
  1916. /*
  1917. * Warning: no sleep allowed while
  1918. * holding spinlock
  1919. */
  1920. if (hw_fib_p >= &hw_fib_pool[num]) {
  1921. pr_warn("aifd: didn't allocate NewFib\n");
  1922. entry = entry->next;
  1923. continue;
  1924. }
  1925. hw_newfib = *hw_fib_p;
  1926. *(hw_fib_p++) = NULL;
  1927. newfib = *fib_p;
  1928. *(fib_p++) = NULL;
  1929. /*
  1930. * Make the copy of the FIB
  1931. */
  1932. memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
  1933. memcpy(newfib, fib, sizeof(struct fib));
  1934. newfib->hw_fib_va = hw_newfib;
  1935. /*
  1936. * Put the FIB onto the
  1937. * fibctx's fibs
  1938. */
  1939. list_add_tail(&newfib->fiblink, &fibctx->fib_list);
  1940. fibctx->count++;
  1941. /*
  1942. * Set the event to wake up the
  1943. * thread that is waiting.
  1944. */
  1945. up(&fibctx->wait_sem);
  1946. entry = entry->next;
  1947. }
  1948. /*
  1949. * Set the status of this FIB
  1950. */
  1951. *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
  1952. aac_fib_adapter_complete(fib, sizeof(u32));
  1953. spin_unlock_irqrestore(&dev->fib_lock, flagv);
  1954. }
  1955. static void aac_process_events(struct aac_dev *dev)
  1956. {
  1957. struct hw_fib *hw_fib;
  1958. struct fib *fib;
  1959. unsigned long flags;
  1960. spinlock_t *t_lock;
  1961. t_lock = dev->queues->queue[HostNormCmdQueue].lock;
  1962. spin_lock_irqsave(t_lock, flags);
  1963. while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
  1964. struct list_head *entry;
  1965. struct aac_aifcmd *aifcmd;
  1966. unsigned int num;
  1967. struct hw_fib **hw_fib_pool, **hw_fib_p;
  1968. struct fib **fib_pool, **fib_p;
  1969. set_current_state(TASK_RUNNING);
  1970. entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
  1971. list_del(entry);
  1972. t_lock = dev->queues->queue[HostNormCmdQueue].lock;
  1973. spin_unlock_irqrestore(t_lock, flags);
  1974. fib = list_entry(entry, struct fib, fiblink);
  1975. hw_fib = fib->hw_fib_va;
  1976. if (dev->sa_firmware) {
  1977. /* Thor AIF */
  1978. aac_handle_sa_aif(dev, fib);
  1979. aac_fib_adapter_complete(fib, (u16)sizeof(u32));
  1980. goto free_fib;
  1981. }
  1982. /*
  1983. * We will process the FIB here or pass it to a
  1984. * worker thread that is TBD. We Really can't
  1985. * do anything at this point since we don't have
  1986. * anything defined for this thread to do.
  1987. */
  1988. memset(fib, 0, sizeof(struct fib));
  1989. fib->type = FSAFS_NTC_FIB_CONTEXT;
  1990. fib->size = sizeof(struct fib);
  1991. fib->hw_fib_va = hw_fib;
  1992. fib->data = hw_fib->data;
  1993. fib->dev = dev;
  1994. /*
  1995. * We only handle AifRequest fibs from the adapter.
  1996. */
  1997. aifcmd = (struct aac_aifcmd *) hw_fib->data;
  1998. if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
  1999. /* Handle Driver Notify Events */
  2000. aac_handle_aif(dev, fib);
  2001. *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
  2002. aac_fib_adapter_complete(fib, (u16)sizeof(u32));
  2003. goto free_fib;
  2004. }
  2005. /*
  2006. * The u32 here is important and intended. We are using
  2007. * 32bit wrapping time to fit the adapter field
  2008. */
  2009. /* Sniff events */
  2010. if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)
  2011. || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) {
  2012. aac_handle_aif(dev, fib);
  2013. }
  2014. /*
  2015. * get number of fibs to process
  2016. */
  2017. num = get_fib_count(dev);
  2018. if (!num)
  2019. goto free_fib;
  2020. hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *),
  2021. GFP_KERNEL);
  2022. if (!hw_fib_pool)
  2023. goto free_fib;
  2024. fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL);
  2025. if (!fib_pool)
  2026. goto free_hw_fib_pool;
  2027. /*
  2028. * Fill up fib pointer pools with actual fibs
  2029. * and hw_fibs
  2030. */
  2031. num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
  2032. if (!num)
  2033. goto free_mem;
  2034. /*
  2035. * wakeup the thread that is waiting for
  2036. * the response from fw (ioctl)
  2037. */
  2038. wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool,
  2039. fib, hw_fib, num);
  2040. free_mem:
  2041. /* Free up the remaining resources */
  2042. hw_fib_p = hw_fib_pool;
  2043. fib_p = fib_pool;
  2044. while (hw_fib_p < &hw_fib_pool[num]) {
  2045. kfree(*hw_fib_p);
  2046. kfree(*fib_p);
  2047. ++fib_p;
  2048. ++hw_fib_p;
  2049. }
  2050. kfree(fib_pool);
  2051. free_hw_fib_pool:
  2052. kfree(hw_fib_pool);
  2053. free_fib:
  2054. kfree(fib);
  2055. t_lock = dev->queues->queue[HostNormCmdQueue].lock;
  2056. spin_lock_irqsave(t_lock, flags);
  2057. }
  2058. /*
  2059. * There are no more AIF's
  2060. */
  2061. t_lock = dev->queues->queue[HostNormCmdQueue].lock;
  2062. spin_unlock_irqrestore(t_lock, flags);
  2063. }
  2064. static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
  2065. u32 datasize)
  2066. {
  2067. struct aac_srb *srbcmd;
  2068. struct sgmap64 *sg64;
  2069. dma_addr_t addr;
  2070. char *dma_buf;
  2071. struct fib *fibptr;
  2072. int ret = -ENOMEM;
  2073. u32 vbus, vid;
  2074. fibptr = aac_fib_alloc(dev);
  2075. if (!fibptr)
  2076. goto out;
  2077. dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
  2078. GFP_KERNEL);
  2079. if (!dma_buf)
  2080. goto fib_free_out;
  2081. aac_fib_init(fibptr);
  2082. vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
  2083. vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
  2084. srbcmd = (struct aac_srb *)fib_data(fibptr);
  2085. srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
  2086. srbcmd->channel = cpu_to_le32(vbus);
  2087. srbcmd->id = cpu_to_le32(vid);
  2088. srbcmd->lun = 0;
  2089. srbcmd->flags = cpu_to_le32(SRB_DataOut);
  2090. srbcmd->timeout = cpu_to_le32(10);
  2091. srbcmd->retry_limit = 0;
  2092. srbcmd->cdb_size = cpu_to_le32(12);
  2093. srbcmd->count = cpu_to_le32(datasize);
  2094. memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
  2095. srbcmd->cdb[0] = BMIC_OUT;
  2096. srbcmd->cdb[6] = WRITE_HOST_WELLNESS;
  2097. memcpy(dma_buf, (char *)wellness_str, datasize);
  2098. sg64 = (struct sgmap64 *)&srbcmd->sg;
  2099. sg64->count = cpu_to_le32(1);
  2100. sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
  2101. sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
  2102. sg64->sg[0].count = cpu_to_le32(datasize);
  2103. ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
  2104. FsaNormal, 1, 1, NULL, NULL);
  2105. dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr);
  2106. /*
  2107. * Do not set XferState to zero unless
  2108. * receives a response from F/W
  2109. */
  2110. if (ret >= 0)
  2111. aac_fib_complete(fibptr);
  2112. /*
  2113. * FIB should be freed only after
  2114. * getting the response from the F/W
  2115. */
  2116. if (ret != -ERESTARTSYS)
  2117. goto fib_free_out;
  2118. out:
  2119. return ret;
  2120. fib_free_out:
  2121. aac_fib_free(fibptr);
  2122. goto out;
  2123. }
  2124. int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
  2125. {
  2126. struct tm cur_tm;
  2127. char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
  2128. u32 datasize = sizeof(wellness_str);
  2129. time64_t local_time;
  2130. int ret = -ENODEV;
  2131. if (!dev->sa_firmware)
  2132. goto out;
  2133. local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60));
  2134. time64_to_tm(local_time, 0, &cur_tm);
  2135. cur_tm.tm_mon += 1;
  2136. cur_tm.tm_year += 1900;
  2137. wellness_str[8] = bin2bcd(cur_tm.tm_hour);
  2138. wellness_str[9] = bin2bcd(cur_tm.tm_min);
  2139. wellness_str[10] = bin2bcd(cur_tm.tm_sec);
  2140. wellness_str[12] = bin2bcd(cur_tm.tm_mon);
  2141. wellness_str[13] = bin2bcd(cur_tm.tm_mday);
  2142. wellness_str[14] = bin2bcd(cur_tm.tm_year / 100);
  2143. wellness_str[15] = bin2bcd(cur_tm.tm_year % 100);
  2144. ret = aac_send_wellness_command(dev, wellness_str, datasize);
  2145. out:
  2146. return ret;
  2147. }
  2148. int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
  2149. {
  2150. int ret = -ENOMEM;
  2151. struct fib *fibptr;
  2152. __le32 *info;
  2153. fibptr = aac_fib_alloc(dev);
  2154. if (!fibptr)
  2155. goto out;
  2156. aac_fib_init(fibptr);
  2157. info = (__le32 *)fib_data(fibptr);
  2158. *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */
  2159. ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
  2160. 1, 1, NULL, NULL);
  2161. /*
  2162. * Do not set XferState to zero unless
  2163. * receives a response from F/W
  2164. */
  2165. if (ret >= 0)
  2166. aac_fib_complete(fibptr);
  2167. /*
  2168. * FIB should be freed only after
  2169. * getting the response from the F/W
  2170. */
  2171. if (ret != -ERESTARTSYS)
  2172. aac_fib_free(fibptr);
  2173. out:
  2174. return ret;
  2175. }
  2176. /**
  2177. * aac_command_thread - command processing thread
  2178. * @dev: Adapter to monitor
  2179. *
  2180. * Waits on the commandready event in it's queue. When the event gets set
  2181. * it will pull FIBs off it's queue. It will continue to pull FIBs off
  2182. * until the queue is empty. When the queue is empty it will wait for
  2183. * more FIBs.
  2184. */
  2185. int aac_command_thread(void *data)
  2186. {
  2187. struct aac_dev *dev = data;
  2188. DECLARE_WAITQUEUE(wait, current);
  2189. unsigned long next_jiffies = jiffies + HZ;
  2190. unsigned long next_check_jiffies = next_jiffies;
  2191. long difference = HZ;
  2192. /*
  2193. * We can only have one thread per adapter for AIF's.
  2194. */
  2195. if (dev->aif_thread)
  2196. return -EINVAL;
  2197. /*
  2198. * Let the DPC know it has a place to send the AIF's to.
  2199. */
  2200. dev->aif_thread = 1;
  2201. add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
  2202. set_current_state(TASK_INTERRUPTIBLE);
  2203. dprintk ((KERN_INFO "aac_command_thread start\n"));
  2204. while (1) {
  2205. aac_process_events(dev);
  2206. /*
  2207. * Background activity
  2208. */
  2209. if ((time_before(next_check_jiffies,next_jiffies))
  2210. && ((difference = next_check_jiffies - jiffies) <= 0)) {
  2211. next_check_jiffies = next_jiffies;
  2212. if (aac_adapter_check_health(dev) == 0) {
  2213. difference = ((long)(unsigned)check_interval)
  2214. * HZ;
  2215. next_check_jiffies = jiffies + difference;
  2216. } else if (!dev->queues)
  2217. break;
  2218. }
  2219. if (!time_before(next_check_jiffies,next_jiffies)
  2220. && ((difference = next_jiffies - jiffies) <= 0)) {
  2221. struct timespec64 now;
  2222. int ret;
  2223. /* Don't even try to talk to adapter if its sick */
  2224. ret = aac_adapter_check_health(dev);
  2225. if (ret || !dev->queues)
  2226. break;
  2227. next_check_jiffies = jiffies
  2228. + ((long)(unsigned)check_interval)
  2229. * HZ;
  2230. ktime_get_real_ts64(&now);
  2231. /* Synchronize our watches */
  2232. if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
  2233. && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
  2234. difference = HZ + HZ / 2 -
  2235. now.tv_nsec / (NSEC_PER_SEC / HZ);
  2236. else {
  2237. if (now.tv_nsec > NSEC_PER_SEC / 2)
  2238. ++now.tv_sec;
  2239. if (dev->sa_firmware)
  2240. ret =
  2241. aac_send_safw_hostttime(dev, &now);
  2242. else
  2243. ret = aac_send_hosttime(dev, &now);
  2244. difference = (long)(unsigned)update_interval*HZ;
  2245. }
  2246. next_jiffies = jiffies + difference;
  2247. if (time_before(next_check_jiffies,next_jiffies))
  2248. difference = next_check_jiffies - jiffies;
  2249. }
  2250. if (difference <= 0)
  2251. difference = 1;
  2252. set_current_state(TASK_INTERRUPTIBLE);
  2253. if (kthread_should_stop())
  2254. break;
  2255. /*
  2256. * we probably want usleep_range() here instead of the
  2257. * jiffies computation
  2258. */
  2259. schedule_timeout(difference);
  2260. if (kthread_should_stop())
  2261. break;
  2262. }
  2263. if (dev->queues)
  2264. remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
  2265. dev->aif_thread = 0;
  2266. return 0;
  2267. }
  2268. int aac_acquire_irq(struct aac_dev *dev)
  2269. {
  2270. int i;
  2271. int j;
  2272. int ret = 0;
  2273. if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
  2274. for (i = 0; i < dev->max_msix; i++) {
  2275. dev->aac_msix[i].vector_no = i;
  2276. dev->aac_msix[i].dev = dev;
  2277. if (request_irq(pci_irq_vector(dev->pdev, i),
  2278. dev->a_ops.adapter_intr,
  2279. 0, "aacraid", &(dev->aac_msix[i]))) {
  2280. printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
  2281. dev->name, dev->id, i);
  2282. for (j = 0 ; j < i ; j++)
  2283. free_irq(pci_irq_vector(dev->pdev, j),
  2284. &(dev->aac_msix[j]));
  2285. pci_disable_msix(dev->pdev);
  2286. ret = -1;
  2287. }
  2288. }
  2289. } else {
  2290. dev->aac_msix[0].vector_no = 0;
  2291. dev->aac_msix[0].dev = dev;
  2292. if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
  2293. IRQF_SHARED, "aacraid",
  2294. &(dev->aac_msix[0])) < 0) {
  2295. if (dev->msi)
  2296. pci_disable_msi(dev->pdev);
  2297. printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
  2298. dev->name, dev->id);
  2299. ret = -1;
  2300. }
  2301. }
  2302. return ret;
  2303. }
  2304. void aac_free_irq(struct aac_dev *dev)
  2305. {
  2306. int i;
  2307. int cpu;
  2308. cpu = cpumask_first(cpu_online_mask);
  2309. if (aac_is_src(dev)) {
  2310. if (dev->max_msix > 1) {
  2311. for (i = 0; i < dev->max_msix; i++)
  2312. free_irq(pci_irq_vector(dev->pdev, i),
  2313. &(dev->aac_msix[i]));
  2314. } else {
  2315. free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
  2316. }
  2317. } else {
  2318. free_irq(dev->pdev->irq, dev);
  2319. }
  2320. if (dev->msi)
  2321. pci_disable_msi(dev->pdev);
  2322. else if (dev->max_msix > 1)
  2323. pci_disable_msix(dev->pdev);
  2324. }