ql4_mbx.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465
  1. /*
  2. * QLogic iSCSI HBA Driver
  3. * Copyright (c) 2003-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qla4xxx for copyright and licensing details.
  6. */
  7. #include <linux/ctype.h>
  8. #include "ql4_def.h"
  9. #include "ql4_glbl.h"
  10. #include "ql4_dbg.h"
  11. #include "ql4_inline.h"
  12. #include "ql4_version.h"
  13. void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
  14. int in_count)
  15. {
  16. int i;
  17. /* Load all mailbox registers, except mailbox 0. */
  18. for (i = 1; i < in_count; i++)
  19. writel(mbx_cmd[i], &ha->reg->mailbox[i]);
  20. /* Wakeup firmware */
  21. writel(mbx_cmd[0], &ha->reg->mailbox[0]);
  22. readl(&ha->reg->mailbox[0]);
  23. writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
  24. readl(&ha->reg->ctrl_status);
  25. }
  26. void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
  27. {
  28. int intr_status;
  29. intr_status = readl(&ha->reg->ctrl_status);
  30. if (intr_status & INTR_PENDING) {
  31. /*
  32. * Service the interrupt.
  33. * The ISR will save the mailbox status registers
  34. * to a temporary storage location in the adapter structure.
  35. */
  36. ha->mbox_status_count = out_count;
  37. ha->isp_ops->interrupt_service_routine(ha, intr_status);
  38. }
  39. }
  40. /**
  41. * qla4xxx_is_intr_poll_mode – Are we allowed to poll for interrupts?
  42. * @ha: Pointer to host adapter structure.
  43. * @ret: 1=polling mode, 0=non-polling mode
  44. **/
  45. static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
  46. {
  47. int rval = 1;
  48. if (is_qla8032(ha) || is_qla8042(ha)) {
  49. if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
  50. test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
  51. rval = 0;
  52. } else {
  53. if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
  54. test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
  55. test_bit(AF_ONLINE, &ha->flags) &&
  56. !test_bit(AF_HA_REMOVAL, &ha->flags))
  57. rval = 0;
  58. }
  59. return rval;
  60. }
  61. /**
  62. * qla4xxx_mailbox_command - issues mailbox commands
  63. * @ha: Pointer to host adapter structure.
  64. * @inCount: number of mailbox registers to load.
  65. * @outCount: number of mailbox registers to return.
  66. * @mbx_cmd: data pointer for mailbox in registers.
  67. * @mbx_sts: data pointer for mailbox out registers.
  68. *
  69. * This routine issue mailbox commands and waits for completion.
  70. * If outCount is 0, this routine completes successfully WITHOUT waiting
  71. * for the mailbox command to complete.
  72. **/
  73. int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
  74. uint8_t outCount, uint32_t *mbx_cmd,
  75. uint32_t *mbx_sts)
  76. {
  77. int status = QLA_ERROR;
  78. uint8_t i;
  79. u_long wait_count;
  80. unsigned long flags = 0;
  81. uint32_t dev_state;
  82. /* Make sure that pointers are valid */
  83. if (!mbx_cmd || !mbx_sts) {
  84. DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts "
  85. "pointer\n", ha->host_no, __func__));
  86. return status;
  87. }
  88. if (is_qla40XX(ha)) {
  89. if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
  90. DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
  91. "prematurely completing mbx cmd as "
  92. "adapter removal detected\n",
  93. ha->host_no, __func__));
  94. return status;
  95. }
  96. }
  97. if ((is_aer_supported(ha)) &&
  98. (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
  99. DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
  100. "timeout MBX Exiting.\n", ha->host_no, __func__));
  101. return status;
  102. }
  103. /* Mailbox code active */
  104. wait_count = MBOX_TOV * 100;
  105. while (wait_count--) {
  106. mutex_lock(&ha->mbox_sem);
  107. if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) {
  108. set_bit(AF_MBOX_COMMAND, &ha->flags);
  109. mutex_unlock(&ha->mbox_sem);
  110. break;
  111. }
  112. mutex_unlock(&ha->mbox_sem);
  113. if (!wait_count) {
  114. DEBUG2(printk("scsi%ld: %s: mbox_sem failed\n",
  115. ha->host_no, __func__));
  116. return status;
  117. }
  118. msleep(10);
  119. }
  120. if (is_qla80XX(ha)) {
  121. if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
  122. DEBUG2(ql4_printk(KERN_WARNING, ha,
  123. "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
  124. ha->host_no, __func__));
  125. goto mbox_exit;
  126. }
  127. /* Do not send any mbx cmd if h/w is in failed state*/
  128. ha->isp_ops->idc_lock(ha);
  129. dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
  130. ha->isp_ops->idc_unlock(ha);
  131. if (dev_state == QLA8XXX_DEV_FAILED) {
  132. ql4_printk(KERN_WARNING, ha,
  133. "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
  134. ha->host_no, __func__);
  135. goto mbox_exit;
  136. }
  137. }
  138. spin_lock_irqsave(&ha->hardware_lock, flags);
  139. ha->mbox_status_count = outCount;
  140. for (i = 0; i < outCount; i++)
  141. ha->mbox_status[i] = 0;
  142. /* Queue the mailbox command to the firmware */
  143. ha->isp_ops->queue_mailbox_command(ha, mbx_cmd, inCount);
  144. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  145. /* Wait for completion */
  146. /*
  147. * If we don't want status, don't wait for the mailbox command to
  148. * complete. For example, MBOX_CMD_RESET_FW doesn't return status,
  149. * you must poll the inbound Interrupt Mask for completion.
  150. */
  151. if (outCount == 0) {
  152. status = QLA_SUCCESS;
  153. goto mbox_exit;
  154. }
  155. /*
  156. * Wait for completion: Poll or completion queue
  157. */
  158. if (qla4xxx_is_intr_poll_mode(ha)) {
  159. /* Poll for command to complete */
  160. wait_count = jiffies + MBOX_TOV * HZ;
  161. while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
  162. if (time_after_eq(jiffies, wait_count))
  163. break;
  164. /*
  165. * Service the interrupt.
  166. * The ISR will save the mailbox status registers
  167. * to a temporary storage location in the adapter
  168. * structure.
  169. */
  170. spin_lock_irqsave(&ha->hardware_lock, flags);
  171. ha->isp_ops->process_mailbox_interrupt(ha, outCount);
  172. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  173. msleep(10);
  174. }
  175. } else {
  176. /* Do not poll for completion. Use completion queue */
  177. set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
  178. wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
  179. clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
  180. }
  181. /* Check for mailbox timeout. */
  182. if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
  183. if (is_qla80XX(ha) &&
  184. test_bit(AF_FW_RECOVERY, &ha->flags)) {
  185. DEBUG2(ql4_printk(KERN_INFO, ha,
  186. "scsi%ld: %s: prematurely completing mbx cmd as "
  187. "firmware recovery detected\n",
  188. ha->host_no, __func__));
  189. goto mbox_exit;
  190. }
  191. ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n",
  192. ha->host_no, mbx_cmd[0]);
  193. ha->mailbox_timeout_count++;
  194. mbx_sts[0] = (-1);
  195. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  196. if (is_qla8022(ha)) {
  197. ql4_printk(KERN_INFO, ha,
  198. "disabling pause transmit on port 0 & 1.\n");
  199. qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
  200. CRB_NIU_XG_PAUSE_CTL_P0 |
  201. CRB_NIU_XG_PAUSE_CTL_P1);
  202. } else if (is_qla8032(ha) || is_qla8042(ha)) {
  203. ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n",
  204. __func__);
  205. qla4_83xx_disable_pause(ha);
  206. }
  207. goto mbox_exit;
  208. }
  209. /*
  210. * Copy the mailbox out registers to the caller's mailbox in/out
  211. * structure.
  212. */
  213. spin_lock_irqsave(&ha->hardware_lock, flags);
  214. for (i = 0; i < outCount; i++)
  215. mbx_sts[i] = ha->mbox_status[i];
  216. /* Set return status and error flags (if applicable). */
  217. switch (ha->mbox_status[0]) {
  218. case MBOX_STS_COMMAND_COMPLETE:
  219. status = QLA_SUCCESS;
  220. break;
  221. case MBOX_STS_INTERMEDIATE_COMPLETION:
  222. status = QLA_SUCCESS;
  223. break;
  224. case MBOX_STS_BUSY:
  225. ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
  226. ha->host_no, __func__, mbx_cmd[0]);
  227. ha->mailbox_timeout_count++;
  228. break;
  229. default:
  230. ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n",
  231. ha->host_no, __func__, mbx_cmd[0], mbx_sts[0],
  232. mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4],
  233. mbx_sts[5], mbx_sts[6], mbx_sts[7]);
  234. break;
  235. }
  236. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  237. mbox_exit:
  238. mutex_lock(&ha->mbox_sem);
  239. clear_bit(AF_MBOX_COMMAND, &ha->flags);
  240. mutex_unlock(&ha->mbox_sem);
  241. clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
  242. return status;
  243. }
  244. /**
  245. * qla4xxx_get_minidump_template - Get the firmware template
  246. * @ha: Pointer to host adapter structure.
  247. * @phys_addr: dma address for template
  248. *
  249. * Obtain the minidump template from firmware during initialization
  250. * as it may not be available when minidump is desired.
  251. **/
  252. int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
  253. dma_addr_t phys_addr)
  254. {
  255. uint32_t mbox_cmd[MBOX_REG_COUNT];
  256. uint32_t mbox_sts[MBOX_REG_COUNT];
  257. int status;
  258. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  259. memset(&mbox_sts, 0, sizeof(mbox_sts));
  260. mbox_cmd[0] = MBOX_CMD_MINIDUMP;
  261. mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
  262. mbox_cmd[2] = LSDW(phys_addr);
  263. mbox_cmd[3] = MSDW(phys_addr);
  264. mbox_cmd[4] = ha->fw_dump_tmplt_size;
  265. mbox_cmd[5] = 0;
  266. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
  267. &mbox_sts[0]);
  268. if (status != QLA_SUCCESS) {
  269. DEBUG2(ql4_printk(KERN_INFO, ha,
  270. "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
  271. ha->host_no, __func__, mbox_cmd[0],
  272. mbox_sts[0], mbox_sts[1]));
  273. }
  274. return status;
  275. }
  276. /**
  277. * qla4xxx_req_template_size - Get minidump template size from firmware.
  278. * @ha: Pointer to host adapter structure.
  279. **/
  280. int qla4xxx_req_template_size(struct scsi_qla_host *ha)
  281. {
  282. uint32_t mbox_cmd[MBOX_REG_COUNT];
  283. uint32_t mbox_sts[MBOX_REG_COUNT];
  284. int status;
  285. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  286. memset(&mbox_sts, 0, sizeof(mbox_sts));
  287. mbox_cmd[0] = MBOX_CMD_MINIDUMP;
  288. mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
  289. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
  290. &mbox_sts[0]);
  291. if (status == QLA_SUCCESS) {
  292. ha->fw_dump_tmplt_size = mbox_sts[1];
  293. DEBUG2(ql4_printk(KERN_INFO, ha,
  294. "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
  295. __func__, mbox_sts[0], mbox_sts[1],
  296. mbox_sts[2], mbox_sts[3], mbox_sts[4],
  297. mbox_sts[5], mbox_sts[6], mbox_sts[7]));
  298. if (ha->fw_dump_tmplt_size == 0)
  299. status = QLA_ERROR;
  300. } else {
  301. ql4_printk(KERN_WARNING, ha,
  302. "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
  303. __func__, mbox_sts[0], mbox_sts[1]);
  304. status = QLA_ERROR;
  305. }
  306. return status;
  307. }
  308. void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
  309. {
  310. set_bit(AF_FW_RECOVERY, &ha->flags);
  311. ql4_printk(KERN_INFO, ha, "scsi%ld: %s: set FW RECOVERY!\n",
  312. ha->host_no, __func__);
  313. if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
  314. if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) {
  315. complete(&ha->mbx_intr_comp);
  316. ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
  317. "recovery, doing premature completion of "
  318. "mbx cmd\n", ha->host_no, __func__);
  319. } else {
  320. set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
  321. ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
  322. "recovery, doing premature completion of "
  323. "polling mbx cmd\n", ha->host_no, __func__);
  324. }
  325. }
  326. }
  327. static uint8_t
  328. qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
  329. uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
  330. {
  331. memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
  332. memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
  333. if (is_qla8022(ha))
  334. qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, 0);
  335. mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
  336. mbox_cmd[1] = 0;
  337. mbox_cmd[2] = LSDW(init_fw_cb_dma);
  338. mbox_cmd[3] = MSDW(init_fw_cb_dma);
  339. mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
  340. if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
  341. QLA_SUCCESS) {
  342. DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
  343. "MBOX_CMD_INITIALIZE_FIRMWARE"
  344. " failed w/ status %04X\n",
  345. ha->host_no, __func__, mbox_sts[0]));
  346. return QLA_ERROR;
  347. }
  348. return QLA_SUCCESS;
  349. }
  350. uint8_t
  351. qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
  352. uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
  353. {
  354. memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
  355. memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
  356. mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
  357. mbox_cmd[2] = LSDW(init_fw_cb_dma);
  358. mbox_cmd[3] = MSDW(init_fw_cb_dma);
  359. mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
  360. if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) !=
  361. QLA_SUCCESS) {
  362. DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
  363. "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK"
  364. " failed w/ status %04X\n",
  365. ha->host_no, __func__, mbox_sts[0]));
  366. return QLA_ERROR;
  367. }
  368. return QLA_SUCCESS;
  369. }
  370. uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state)
  371. {
  372. uint8_t ipaddr_state;
  373. switch (fw_ipaddr_state) {
  374. case IP_ADDRSTATE_UNCONFIGURED:
  375. ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED;
  376. break;
  377. case IP_ADDRSTATE_INVALID:
  378. ipaddr_state = ISCSI_IPDDRESS_STATE_INVALID;
  379. break;
  380. case IP_ADDRSTATE_ACQUIRING:
  381. ipaddr_state = ISCSI_IPDDRESS_STATE_ACQUIRING;
  382. break;
  383. case IP_ADDRSTATE_TENTATIVE:
  384. ipaddr_state = ISCSI_IPDDRESS_STATE_TENTATIVE;
  385. break;
  386. case IP_ADDRSTATE_DEPRICATED:
  387. ipaddr_state = ISCSI_IPDDRESS_STATE_DEPRECATED;
  388. break;
  389. case IP_ADDRSTATE_PREFERRED:
  390. ipaddr_state = ISCSI_IPDDRESS_STATE_VALID;
  391. break;
  392. case IP_ADDRSTATE_DISABLING:
  393. ipaddr_state = ISCSI_IPDDRESS_STATE_DISABLING;
  394. break;
  395. default:
  396. ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED;
  397. }
  398. return ipaddr_state;
  399. }
  400. static void
  401. qla4xxx_update_local_ip(struct scsi_qla_host *ha,
  402. struct addr_ctrl_blk *init_fw_cb)
  403. {
  404. ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
  405. ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
  406. ha->ip_config.ipv4_addr_state =
  407. qla4xxx_set_ipaddr_state(init_fw_cb->ipv4_addr_state);
  408. ha->ip_config.eth_mtu_size =
  409. le16_to_cpu(init_fw_cb->eth_mtu_size);
  410. ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port);
  411. if (ha->acb_version == ACB_SUPPORTED) {
  412. ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts);
  413. ha->ip_config.ipv6_addl_options =
  414. le16_to_cpu(init_fw_cb->ipv6_addtl_opts);
  415. ha->ip_config.ipv6_tcp_options =
  416. le16_to_cpu(init_fw_cb->ipv6_tcp_opts);
  417. }
  418. /* Save IPv4 Address Info */
  419. memcpy(ha->ip_config.ip_address, init_fw_cb->ipv4_addr,
  420. min(sizeof(ha->ip_config.ip_address),
  421. sizeof(init_fw_cb->ipv4_addr)));
  422. memcpy(ha->ip_config.subnet_mask, init_fw_cb->ipv4_subnet,
  423. min(sizeof(ha->ip_config.subnet_mask),
  424. sizeof(init_fw_cb->ipv4_subnet)));
  425. memcpy(ha->ip_config.gateway, init_fw_cb->ipv4_gw_addr,
  426. min(sizeof(ha->ip_config.gateway),
  427. sizeof(init_fw_cb->ipv4_gw_addr)));
  428. ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag);
  429. ha->ip_config.control = init_fw_cb->control;
  430. ha->ip_config.tcp_wsf = init_fw_cb->ipv4_tcp_wsf;
  431. ha->ip_config.ipv4_tos = init_fw_cb->ipv4_tos;
  432. ha->ip_config.ipv4_cache_id = init_fw_cb->ipv4_cacheid;
  433. ha->ip_config.ipv4_alt_cid_len = init_fw_cb->ipv4_dhcp_alt_cid_len;
  434. memcpy(ha->ip_config.ipv4_alt_cid, init_fw_cb->ipv4_dhcp_alt_cid,
  435. min(sizeof(ha->ip_config.ipv4_alt_cid),
  436. sizeof(init_fw_cb->ipv4_dhcp_alt_cid)));
  437. ha->ip_config.ipv4_vid_len = init_fw_cb->ipv4_dhcp_vid_len;
  438. memcpy(ha->ip_config.ipv4_vid, init_fw_cb->ipv4_dhcp_vid,
  439. min(sizeof(ha->ip_config.ipv4_vid),
  440. sizeof(init_fw_cb->ipv4_dhcp_vid)));
  441. ha->ip_config.ipv4_ttl = init_fw_cb->ipv4_ttl;
  442. ha->ip_config.def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
  443. ha->ip_config.abort_timer = init_fw_cb->abort_timer;
  444. ha->ip_config.iscsi_options = le16_to_cpu(init_fw_cb->iscsi_opts);
  445. ha->ip_config.iscsi_max_pdu_size =
  446. le16_to_cpu(init_fw_cb->iscsi_max_pdu_size);
  447. ha->ip_config.iscsi_first_burst_len =
  448. le16_to_cpu(init_fw_cb->iscsi_fburst_len);
  449. ha->ip_config.iscsi_max_outstnd_r2t =
  450. le16_to_cpu(init_fw_cb->iscsi_max_outstnd_r2t);
  451. ha->ip_config.iscsi_max_burst_len =
  452. le16_to_cpu(init_fw_cb->iscsi_max_burst_len);
  453. memcpy(ha->ip_config.iscsi_name, init_fw_cb->iscsi_name,
  454. min(sizeof(ha->ip_config.iscsi_name),
  455. sizeof(init_fw_cb->iscsi_name)));
  456. if (is_ipv6_enabled(ha)) {
  457. /* Save IPv6 Address */
  458. ha->ip_config.ipv6_link_local_state =
  459. qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_lnk_lcl_addr_state);
  460. ha->ip_config.ipv6_addr0_state =
  461. qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr0_state);
  462. ha->ip_config.ipv6_addr1_state =
  463. qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr1_state);
  464. switch (le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state)) {
  465. case IPV6_RTRSTATE_UNKNOWN:
  466. ha->ip_config.ipv6_default_router_state =
  467. ISCSI_ROUTER_STATE_UNKNOWN;
  468. break;
  469. case IPV6_RTRSTATE_MANUAL:
  470. ha->ip_config.ipv6_default_router_state =
  471. ISCSI_ROUTER_STATE_MANUAL;
  472. break;
  473. case IPV6_RTRSTATE_ADVERTISED:
  474. ha->ip_config.ipv6_default_router_state =
  475. ISCSI_ROUTER_STATE_ADVERTISED;
  476. break;
  477. case IPV6_RTRSTATE_STALE:
  478. ha->ip_config.ipv6_default_router_state =
  479. ISCSI_ROUTER_STATE_STALE;
  480. break;
  481. default:
  482. ha->ip_config.ipv6_default_router_state =
  483. ISCSI_ROUTER_STATE_UNKNOWN;
  484. }
  485. ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
  486. ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
  487. memcpy(&ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[8],
  488. init_fw_cb->ipv6_if_id,
  489. min(sizeof(ha->ip_config.ipv6_link_local_addr)/2,
  490. sizeof(init_fw_cb->ipv6_if_id)));
  491. memcpy(&ha->ip_config.ipv6_addr0, init_fw_cb->ipv6_addr0,
  492. min(sizeof(ha->ip_config.ipv6_addr0),
  493. sizeof(init_fw_cb->ipv6_addr0)));
  494. memcpy(&ha->ip_config.ipv6_addr1, init_fw_cb->ipv6_addr1,
  495. min(sizeof(ha->ip_config.ipv6_addr1),
  496. sizeof(init_fw_cb->ipv6_addr1)));
  497. memcpy(&ha->ip_config.ipv6_default_router_addr,
  498. init_fw_cb->ipv6_dflt_rtr_addr,
  499. min(sizeof(ha->ip_config.ipv6_default_router_addr),
  500. sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
  501. ha->ip_config.ipv6_vlan_tag =
  502. be16_to_cpu(init_fw_cb->ipv6_vlan_tag);
  503. ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port);
  504. ha->ip_config.ipv6_cache_id = init_fw_cb->ipv6_cache_id;
  505. ha->ip_config.ipv6_flow_lbl =
  506. le16_to_cpu(init_fw_cb->ipv6_flow_lbl);
  507. ha->ip_config.ipv6_traffic_class =
  508. init_fw_cb->ipv6_traffic_class;
  509. ha->ip_config.ipv6_hop_limit = init_fw_cb->ipv6_hop_limit;
  510. ha->ip_config.ipv6_nd_reach_time =
  511. le32_to_cpu(init_fw_cb->ipv6_nd_reach_time);
  512. ha->ip_config.ipv6_nd_rexmit_timer =
  513. le32_to_cpu(init_fw_cb->ipv6_nd_rexmit_timer);
  514. ha->ip_config.ipv6_nd_stale_timeout =
  515. le32_to_cpu(init_fw_cb->ipv6_nd_stale_timeout);
  516. ha->ip_config.ipv6_dup_addr_detect_count =
  517. init_fw_cb->ipv6_dup_addr_detect_count;
  518. ha->ip_config.ipv6_gw_advrt_mtu =
  519. le32_to_cpu(init_fw_cb->ipv6_gw_advrt_mtu);
  520. ha->ip_config.ipv6_tcp_wsf = init_fw_cb->ipv6_tcp_wsf;
  521. }
  522. }
  523. uint8_t
  524. qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
  525. uint32_t *mbox_cmd,
  526. uint32_t *mbox_sts,
  527. struct addr_ctrl_blk *init_fw_cb,
  528. dma_addr_t init_fw_cb_dma)
  529. {
  530. if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma)
  531. != QLA_SUCCESS) {
  532. DEBUG2(printk(KERN_WARNING
  533. "scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
  534. ha->host_no, __func__));
  535. return QLA_ERROR;
  536. }
  537. DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk)));
  538. /* Save some info in adapter structure. */
  539. ha->acb_version = init_fw_cb->acb_version;
  540. ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
  541. ha->heartbeat_interval = init_fw_cb->hb_interval;
  542. memcpy(ha->name_string, init_fw_cb->iscsi_name,
  543. min(sizeof(ha->name_string),
  544. sizeof(init_fw_cb->iscsi_name)));
  545. ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
  546. /*memcpy(ha->alias, init_fw_cb->Alias,
  547. min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
  548. qla4xxx_update_local_ip(ha, init_fw_cb);
  549. return QLA_SUCCESS;
  550. }
  551. /**
  552. * qla4xxx_initialize_fw_cb - initializes firmware control block.
  553. * @ha: Pointer to host adapter structure.
  554. **/
  555. int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
  556. {
  557. struct addr_ctrl_blk *init_fw_cb;
  558. dma_addr_t init_fw_cb_dma;
  559. uint32_t mbox_cmd[MBOX_REG_COUNT];
  560. uint32_t mbox_sts[MBOX_REG_COUNT];
  561. int status = QLA_ERROR;
  562. init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
  563. sizeof(struct addr_ctrl_blk),
  564. &init_fw_cb_dma, GFP_KERNEL);
  565. if (init_fw_cb == NULL) {
  566. DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
  567. ha->host_no, __func__));
  568. goto exit_init_fw_cb_no_free;
  569. }
  570. memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
  571. /* Get Initialize Firmware Control Block. */
  572. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  573. memset(&mbox_sts, 0, sizeof(mbox_sts));
  574. if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
  575. QLA_SUCCESS) {
  576. dma_free_coherent(&ha->pdev->dev,
  577. sizeof(struct addr_ctrl_blk),
  578. init_fw_cb, init_fw_cb_dma);
  579. goto exit_init_fw_cb;
  580. }
  581. /* Fill in the request and response queue information. */
  582. init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
  583. init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
  584. init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
  585. init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
  586. init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
  587. init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
  588. init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
  589. init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
  590. init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma));
  591. init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma));
  592. /* Set up required options. */
  593. init_fw_cb->fw_options |=
  594. __constant_cpu_to_le16(FWOPT_SESSION_MODE |
  595. FWOPT_INITIATOR_MODE);
  596. if (is_qla80XX(ha))
  597. init_fw_cb->fw_options |=
  598. __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
  599. init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
  600. init_fw_cb->add_fw_options = 0;
  601. init_fw_cb->add_fw_options |=
  602. __constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
  603. init_fw_cb->add_fw_options |=
  604. __constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
  605. if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
  606. != QLA_SUCCESS) {
  607. DEBUG2(printk(KERN_WARNING
  608. "scsi%ld: %s: Failed to set init_fw_ctrl_blk\n",
  609. ha->host_no, __func__));
  610. goto exit_init_fw_cb;
  611. }
  612. if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0],
  613. init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) {
  614. DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n",
  615. ha->host_no, __func__));
  616. goto exit_init_fw_cb;
  617. }
  618. status = QLA_SUCCESS;
  619. exit_init_fw_cb:
  620. dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
  621. init_fw_cb, init_fw_cb_dma);
  622. exit_init_fw_cb_no_free:
  623. return status;
  624. }
  625. /**
  626. * qla4xxx_get_dhcp_ip_address - gets HBA ip address via DHCP
  627. * @ha: Pointer to host adapter structure.
  628. **/
  629. int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
  630. {
  631. struct addr_ctrl_blk *init_fw_cb;
  632. dma_addr_t init_fw_cb_dma;
  633. uint32_t mbox_cmd[MBOX_REG_COUNT];
  634. uint32_t mbox_sts[MBOX_REG_COUNT];
  635. init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
  636. sizeof(struct addr_ctrl_blk),
  637. &init_fw_cb_dma, GFP_KERNEL);
  638. if (init_fw_cb == NULL) {
  639. printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
  640. __func__);
  641. return QLA_ERROR;
  642. }
  643. /* Get Initialize Firmware Control Block. */
  644. memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
  645. if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
  646. QLA_SUCCESS) {
  647. DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
  648. ha->host_no, __func__));
  649. dma_free_coherent(&ha->pdev->dev,
  650. sizeof(struct addr_ctrl_blk),
  651. init_fw_cb, init_fw_cb_dma);
  652. return QLA_ERROR;
  653. }
  654. /* Save IP Address. */
  655. qla4xxx_update_local_ip(ha, init_fw_cb);
  656. dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
  657. init_fw_cb, init_fw_cb_dma);
  658. return QLA_SUCCESS;
  659. }
  660. /**
  661. * qla4xxx_get_firmware_state - gets firmware state of HBA
  662. * @ha: Pointer to host adapter structure.
  663. **/
  664. int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
  665. {
  666. uint32_t mbox_cmd[MBOX_REG_COUNT];
  667. uint32_t mbox_sts[MBOX_REG_COUNT];
  668. /* Get firmware version */
  669. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  670. memset(&mbox_sts, 0, sizeof(mbox_sts));
  671. mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
  672. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 4, &mbox_cmd[0], &mbox_sts[0]) !=
  673. QLA_SUCCESS) {
  674. DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
  675. "status %04X\n", ha->host_no, __func__,
  676. mbox_sts[0]));
  677. return QLA_ERROR;
  678. }
  679. ha->firmware_state = mbox_sts[1];
  680. ha->board_id = mbox_sts[2];
  681. ha->addl_fw_state = mbox_sts[3];
  682. DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n",
  683. ha->host_no, __func__, ha->firmware_state);)
  684. return QLA_SUCCESS;
  685. }
  686. /**
  687. * qla4xxx_get_firmware_status - retrieves firmware status
  688. * @ha: Pointer to host adapter structure.
  689. **/
  690. int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
  691. {
  692. uint32_t mbox_cmd[MBOX_REG_COUNT];
  693. uint32_t mbox_sts[MBOX_REG_COUNT];
  694. /* Get firmware version */
  695. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  696. memset(&mbox_sts, 0, sizeof(mbox_sts));
  697. mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
  698. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
  699. QLA_SUCCESS) {
  700. DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
  701. "status %04X\n", ha->host_no, __func__,
  702. mbox_sts[0]));
  703. return QLA_ERROR;
  704. }
  705. /* High-water mark of IOCBs */
  706. ha->iocb_hiwat = mbox_sts[2];
  707. DEBUG2(ql4_printk(KERN_INFO, ha,
  708. "%s: firmware IOCBs available = %d\n", __func__,
  709. ha->iocb_hiwat));
  710. if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
  711. ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
  712. /* Ideally, we should not enter this code, as the # of firmware
  713. * IOCBs is hard-coded in the firmware. We set a default
  714. * iocb_hiwat here just in case */
  715. if (ha->iocb_hiwat == 0) {
  716. ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4;
  717. DEBUG2(ql4_printk(KERN_WARNING, ha,
  718. "%s: Setting IOCB's to = %d\n", __func__,
  719. ha->iocb_hiwat));
  720. }
  721. return QLA_SUCCESS;
  722. }
  723. /**
  724. * qla4xxx_get_fwddb_entry - retrieves firmware ddb entry
  725. * @ha: Pointer to host adapter structure.
  726. * @fw_ddb_index: Firmware's device database index
  727. * @fw_ddb_entry: Pointer to firmware's device database entry structure
  728. * @num_valid_ddb_entries: Pointer to number of valid ddb entries
  729. * @next_ddb_index: Pointer to next valid device database index
  730. * @fw_ddb_device_state: Pointer to device state
  731. **/
  732. int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
  733. uint16_t fw_ddb_index,
  734. struct dev_db_entry *fw_ddb_entry,
  735. dma_addr_t fw_ddb_entry_dma,
  736. uint32_t *num_valid_ddb_entries,
  737. uint32_t *next_ddb_index,
  738. uint32_t *fw_ddb_device_state,
  739. uint32_t *conn_err_detail,
  740. uint16_t *tcp_source_port_num,
  741. uint16_t *connection_id)
  742. {
  743. int status = QLA_ERROR;
  744. uint16_t options;
  745. uint32_t mbox_cmd[MBOX_REG_COUNT];
  746. uint32_t mbox_sts[MBOX_REG_COUNT];
  747. /* Make sure the device index is valid */
  748. if (fw_ddb_index >= MAX_DDB_ENTRIES) {
  749. DEBUG2(printk("scsi%ld: %s: ddb [%d] out of range.\n",
  750. ha->host_no, __func__, fw_ddb_index));
  751. goto exit_get_fwddb;
  752. }
  753. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  754. memset(&mbox_sts, 0, sizeof(mbox_sts));
  755. if (fw_ddb_entry)
  756. memset(fw_ddb_entry, 0, sizeof(struct dev_db_entry));
  757. mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
  758. mbox_cmd[1] = (uint32_t) fw_ddb_index;
  759. mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
  760. mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
  761. mbox_cmd[4] = sizeof(struct dev_db_entry);
  762. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 7, &mbox_cmd[0], &mbox_sts[0]) ==
  763. QLA_ERROR) {
  764. DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
  765. " with status 0x%04X\n", ha->host_no, __func__,
  766. mbox_sts[0]));
  767. goto exit_get_fwddb;
  768. }
  769. if (fw_ddb_index != mbox_sts[1]) {
  770. DEBUG2(printk("scsi%ld: %s: ddb mismatch [%d] != [%d].\n",
  771. ha->host_no, __func__, fw_ddb_index,
  772. mbox_sts[1]));
  773. goto exit_get_fwddb;
  774. }
  775. if (fw_ddb_entry) {
  776. options = le16_to_cpu(fw_ddb_entry->options);
  777. if (options & DDB_OPT_IPV6_DEVICE) {
  778. ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
  779. "Next %d State %04x ConnErr %08x %pI6 "
  780. ":%04d \"%s\"\n", __func__, fw_ddb_index,
  781. mbox_sts[0], mbox_sts[2], mbox_sts[3],
  782. mbox_sts[4], mbox_sts[5],
  783. fw_ddb_entry->ip_addr,
  784. le16_to_cpu(fw_ddb_entry->port),
  785. fw_ddb_entry->iscsi_name);
  786. } else {
  787. ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
  788. "Next %d State %04x ConnErr %08x %pI4 "
  789. ":%04d \"%s\"\n", __func__, fw_ddb_index,
  790. mbox_sts[0], mbox_sts[2], mbox_sts[3],
  791. mbox_sts[4], mbox_sts[5],
  792. fw_ddb_entry->ip_addr,
  793. le16_to_cpu(fw_ddb_entry->port),
  794. fw_ddb_entry->iscsi_name);
  795. }
  796. }
  797. if (num_valid_ddb_entries)
  798. *num_valid_ddb_entries = mbox_sts[2];
  799. if (next_ddb_index)
  800. *next_ddb_index = mbox_sts[3];
  801. if (fw_ddb_device_state)
  802. *fw_ddb_device_state = mbox_sts[4];
  803. /*
  804. * RA: This mailbox has been changed to pass connection error and
  805. * details. Its true for ISP4010 as per Version E - Not sure when it
  806. * was changed. Get the time2wait from the fw_dd_entry field :
  807. * default_time2wait which we call it as minTime2Wait DEV_DB_ENTRY
  808. * struct.
  809. */
  810. if (conn_err_detail)
  811. *conn_err_detail = mbox_sts[5];
  812. if (tcp_source_port_num)
  813. *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16);
  814. if (connection_id)
  815. *connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
  816. status = QLA_SUCCESS;
  817. exit_get_fwddb:
  818. return status;
  819. }
  820. int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index)
  821. {
  822. uint32_t mbox_cmd[MBOX_REG_COUNT];
  823. uint32_t mbox_sts[MBOX_REG_COUNT];
  824. int status;
  825. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  826. memset(&mbox_sts, 0, sizeof(mbox_sts));
  827. mbox_cmd[0] = MBOX_CMD_CONN_OPEN;
  828. mbox_cmd[1] = fw_ddb_index;
  829. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
  830. &mbox_sts[0]);
  831. DEBUG2(ql4_printk(KERN_INFO, ha,
  832. "%s: status = %d mbx0 = 0x%x mbx1 = 0x%x\n",
  833. __func__, status, mbox_sts[0], mbox_sts[1]));
  834. return status;
  835. }
  836. /**
  837. * qla4xxx_set_fwddb_entry - sets a ddb entry.
  838. * @ha: Pointer to host adapter structure.
  839. * @fw_ddb_index: Firmware's device database index
  840. * @fw_ddb_entry_dma: dma address of ddb entry
  841. * @mbx_sts: mailbox 0 to be returned or NULL
  842. *
  843. * This routine initializes or updates the adapter's device database
  844. * entry for the specified device.
  845. **/
  846. int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
  847. dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts)
  848. {
  849. uint32_t mbox_cmd[MBOX_REG_COUNT];
  850. uint32_t mbox_sts[MBOX_REG_COUNT];
  851. int status;
  852. /* Do not wait for completion. The firmware will send us an
  853. * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
  854. */
  855. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  856. memset(&mbox_sts, 0, sizeof(mbox_sts));
  857. mbox_cmd[0] = MBOX_CMD_SET_DATABASE_ENTRY;
  858. mbox_cmd[1] = (uint32_t) fw_ddb_index;
  859. mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
  860. mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
  861. mbox_cmd[4] = sizeof(struct dev_db_entry);
  862. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
  863. &mbox_sts[0]);
  864. if (mbx_sts)
  865. *mbx_sts = mbox_sts[0];
  866. DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n",
  867. ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);)
  868. return status;
  869. }
  870. int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
  871. struct ddb_entry *ddb_entry, int options)
  872. {
  873. int status;
  874. uint32_t mbox_cmd[MBOX_REG_COUNT];
  875. uint32_t mbox_sts[MBOX_REG_COUNT];
  876. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  877. memset(&mbox_sts, 0, sizeof(mbox_sts));
  878. mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
  879. mbox_cmd[1] = ddb_entry->fw_ddb_index;
  880. mbox_cmd[3] = options;
  881. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
  882. &mbox_sts[0]);
  883. if (status != QLA_SUCCESS) {
  884. DEBUG2(ql4_printk(KERN_INFO, ha,
  885. "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
  886. "failed sts %04X %04X", __func__,
  887. mbox_sts[0], mbox_sts[1]));
  888. if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) &&
  889. (mbox_sts[1] == DDB_NOT_LOGGED_IN)) {
  890. set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
  891. }
  892. }
  893. return status;
  894. }
  895. /**
  896. * qla4xxx_get_crash_record - retrieves crash record.
  897. * @ha: Pointer to host adapter structure.
  898. *
  899. * This routine retrieves a crash record from the QLA4010 after an 8002h aen.
  900. **/
  901. void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
  902. {
  903. uint32_t mbox_cmd[MBOX_REG_COUNT];
  904. uint32_t mbox_sts[MBOX_REG_COUNT];
  905. struct crash_record *crash_record = NULL;
  906. dma_addr_t crash_record_dma = 0;
  907. uint32_t crash_record_size = 0;
  908. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  909. memset(&mbox_sts, 0, sizeof(mbox_cmd));
  910. /* Get size of crash record. */
  911. mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
  912. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
  913. QLA_SUCCESS) {
  914. DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
  915. ha->host_no, __func__));
  916. goto exit_get_crash_record;
  917. }
  918. crash_record_size = mbox_sts[4];
  919. if (crash_record_size == 0) {
  920. DEBUG2(printk("scsi%ld: %s: ERROR: Crash record size is 0!\n",
  921. ha->host_no, __func__));
  922. goto exit_get_crash_record;
  923. }
  924. /* Alloc Memory for Crash Record. */
  925. crash_record = dma_alloc_coherent(&ha->pdev->dev, crash_record_size,
  926. &crash_record_dma, GFP_KERNEL);
  927. if (crash_record == NULL)
  928. goto exit_get_crash_record;
  929. /* Get Crash Record. */
  930. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  931. memset(&mbox_sts, 0, sizeof(mbox_cmd));
  932. mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
  933. mbox_cmd[2] = LSDW(crash_record_dma);
  934. mbox_cmd[3] = MSDW(crash_record_dma);
  935. mbox_cmd[4] = crash_record_size;
  936. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
  937. QLA_SUCCESS)
  938. goto exit_get_crash_record;
  939. /* Dump Crash Record. */
  940. exit_get_crash_record:
  941. if (crash_record)
  942. dma_free_coherent(&ha->pdev->dev, crash_record_size,
  943. crash_record, crash_record_dma);
  944. }
  945. /**
  946. * qla4xxx_get_conn_event_log - retrieves connection event log
  947. * @ha: Pointer to host adapter structure.
  948. **/
  949. void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
  950. {
  951. uint32_t mbox_cmd[MBOX_REG_COUNT];
  952. uint32_t mbox_sts[MBOX_REG_COUNT];
  953. struct conn_event_log_entry *event_log = NULL;
  954. dma_addr_t event_log_dma = 0;
  955. uint32_t event_log_size = 0;
  956. uint32_t num_valid_entries;
  957. uint32_t oldest_entry = 0;
  958. uint32_t max_event_log_entries;
  959. uint8_t i;
  960. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  961. memset(&mbox_sts, 0, sizeof(mbox_cmd));
  962. /* Get size of crash record. */
  963. mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
  964. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
  965. QLA_SUCCESS)
  966. goto exit_get_event_log;
  967. event_log_size = mbox_sts[4];
  968. if (event_log_size == 0)
  969. goto exit_get_event_log;
  970. /* Alloc Memory for Crash Record. */
  971. event_log = dma_alloc_coherent(&ha->pdev->dev, event_log_size,
  972. &event_log_dma, GFP_KERNEL);
  973. if (event_log == NULL)
  974. goto exit_get_event_log;
  975. /* Get Crash Record. */
  976. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  977. memset(&mbox_sts, 0, sizeof(mbox_cmd));
  978. mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
  979. mbox_cmd[2] = LSDW(event_log_dma);
  980. mbox_cmd[3] = MSDW(event_log_dma);
  981. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
  982. QLA_SUCCESS) {
  983. DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
  984. "log!\n", ha->host_no, __func__));
  985. goto exit_get_event_log;
  986. }
  987. /* Dump Event Log. */
  988. num_valid_entries = mbox_sts[1];
  989. max_event_log_entries = event_log_size /
  990. sizeof(struct conn_event_log_entry);
  991. if (num_valid_entries > max_event_log_entries)
  992. oldest_entry = num_valid_entries % max_event_log_entries;
  993. DEBUG3(printk("scsi%ld: Connection Event Log Dump (%d entries):\n",
  994. ha->host_no, num_valid_entries));
  995. if (ql4xextended_error_logging == 3) {
  996. if (oldest_entry == 0) {
  997. /* Circular Buffer has not wrapped around */
  998. for (i=0; i < num_valid_entries; i++) {
  999. qla4xxx_dump_buffer((uint8_t *)event_log+
  1000. (i*sizeof(*event_log)),
  1001. sizeof(*event_log));
  1002. }
  1003. }
  1004. else {
  1005. /* Circular Buffer has wrapped around -
  1006. * display accordingly*/
  1007. for (i=oldest_entry; i < max_event_log_entries; i++) {
  1008. qla4xxx_dump_buffer((uint8_t *)event_log+
  1009. (i*sizeof(*event_log)),
  1010. sizeof(*event_log));
  1011. }
  1012. for (i=0; i < oldest_entry; i++) {
  1013. qla4xxx_dump_buffer((uint8_t *)event_log+
  1014. (i*sizeof(*event_log)),
  1015. sizeof(*event_log));
  1016. }
  1017. }
  1018. }
  1019. exit_get_event_log:
  1020. if (event_log)
  1021. dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
  1022. event_log_dma);
  1023. }
  1024. /**
  1025. * qla4xxx_abort_task - issues Abort Task
  1026. * @ha: Pointer to host adapter structure.
  1027. * @srb: Pointer to srb entry
  1028. *
  1029. * This routine performs a LUN RESET on the specified target/lun.
  1030. * The caller must ensure that the ddb_entry and lun_entry pointers
  1031. * are valid before calling this routine.
  1032. **/
  1033. int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
  1034. {
  1035. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1036. uint32_t mbox_sts[MBOX_REG_COUNT];
  1037. struct scsi_cmnd *cmd = srb->cmd;
  1038. int status = QLA_SUCCESS;
  1039. unsigned long flags = 0;
  1040. uint32_t index;
  1041. /*
  1042. * Send abort task command to ISP, so that the ISP will return
  1043. * request with ABORT status
  1044. */
  1045. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1046. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1047. spin_lock_irqsave(&ha->hardware_lock, flags);
  1048. index = (unsigned long)(unsigned char *)cmd->host_scribble;
  1049. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1050. /* Firmware already posted completion on response queue */
  1051. if (index == MAX_SRBS)
  1052. return status;
  1053. mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
  1054. mbox_cmd[1] = srb->ddb->fw_ddb_index;
  1055. mbox_cmd[2] = index;
  1056. /* Immediate Command Enable */
  1057. mbox_cmd[5] = 0x01;
  1058. qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
  1059. &mbox_sts[0]);
  1060. if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) {
  1061. status = QLA_ERROR;
  1062. DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%llu: abort task FAILED: "
  1063. "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n",
  1064. ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0],
  1065. mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]));
  1066. }
  1067. return status;
  1068. }
  1069. /**
  1070. * qla4xxx_reset_lun - issues LUN Reset
  1071. * @ha: Pointer to host adapter structure.
  1072. * @ddb_entry: Pointer to device database entry
  1073. * @lun: lun number
  1074. *
  1075. * This routine performs a LUN RESET on the specified target/lun.
  1076. * The caller must ensure that the ddb_entry and lun_entry pointers
  1077. * are valid before calling this routine.
  1078. **/
  1079. int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
  1080. uint64_t lun)
  1081. {
  1082. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1083. uint32_t mbox_sts[MBOX_REG_COUNT];
  1084. uint32_t scsi_lun[2];
  1085. int status = QLA_SUCCESS;
  1086. DEBUG2(printk("scsi%ld:%d:%llu: lun reset issued\n", ha->host_no,
  1087. ddb_entry->fw_ddb_index, lun));
  1088. /*
  1089. * Send lun reset command to ISP, so that the ISP will return all
  1090. * outstanding requests with RESET status
  1091. */
  1092. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1093. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1094. int_to_scsilun(lun, (struct scsi_lun *) scsi_lun);
  1095. mbox_cmd[0] = MBOX_CMD_LUN_RESET;
  1096. mbox_cmd[1] = ddb_entry->fw_ddb_index;
  1097. /* FW expects LUN bytes 0-3 in Incoming Mailbox 2
  1098. * (LUN byte 0 is LSByte, byte 3 is MSByte) */
  1099. mbox_cmd[2] = cpu_to_le32(scsi_lun[0]);
  1100. /* FW expects LUN bytes 4-7 in Incoming Mailbox 3
  1101. * (LUN byte 4 is LSByte, byte 7 is MSByte) */
  1102. mbox_cmd[3] = cpu_to_le32(scsi_lun[1]);
  1103. mbox_cmd[5] = 0x01; /* Immediate Command Enable */
  1104. qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
  1105. if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
  1106. mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
  1107. status = QLA_ERROR;
  1108. return status;
  1109. }
  1110. /**
  1111. * qla4xxx_reset_target - issues target Reset
  1112. * @ha: Pointer to host adapter structure.
  1113. * @db_entry: Pointer to device database entry
  1114. * @un_entry: Pointer to lun entry structure
  1115. *
  1116. * This routine performs a TARGET RESET on the specified target.
  1117. * The caller must ensure that the ddb_entry pointers
  1118. * are valid before calling this routine.
  1119. **/
  1120. int qla4xxx_reset_target(struct scsi_qla_host *ha,
  1121. struct ddb_entry *ddb_entry)
  1122. {
  1123. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1124. uint32_t mbox_sts[MBOX_REG_COUNT];
  1125. int status = QLA_SUCCESS;
  1126. DEBUG2(printk("scsi%ld:%d: target reset issued\n", ha->host_no,
  1127. ddb_entry->fw_ddb_index));
  1128. /*
  1129. * Send target reset command to ISP, so that the ISP will return all
  1130. * outstanding requests with RESET status
  1131. */
  1132. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1133. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1134. mbox_cmd[0] = MBOX_CMD_TARGET_WARM_RESET;
  1135. mbox_cmd[1] = ddb_entry->fw_ddb_index;
  1136. mbox_cmd[5] = 0x01; /* Immediate Command Enable */
  1137. qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
  1138. &mbox_sts[0]);
  1139. if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
  1140. mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
  1141. status = QLA_ERROR;
  1142. return status;
  1143. }
  1144. int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
  1145. uint32_t offset, uint32_t len)
  1146. {
  1147. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1148. uint32_t mbox_sts[MBOX_REG_COUNT];
  1149. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1150. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1151. mbox_cmd[0] = MBOX_CMD_READ_FLASH;
  1152. mbox_cmd[1] = LSDW(dma_addr);
  1153. mbox_cmd[2] = MSDW(dma_addr);
  1154. mbox_cmd[3] = offset;
  1155. mbox_cmd[4] = len;
  1156. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) !=
  1157. QLA_SUCCESS) {
  1158. DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
  1159. "status %04X %04X, offset %08x, len %08x\n", ha->host_no,
  1160. __func__, mbox_sts[0], mbox_sts[1], offset, len));
  1161. return QLA_ERROR;
  1162. }
  1163. return QLA_SUCCESS;
  1164. }
  1165. /**
  1166. * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
  1167. * @ha: Pointer to host adapter structure.
  1168. *
  1169. * Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
  1170. * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
  1171. * those mailboxes, if unused.
  1172. **/
  1173. int qla4xxx_about_firmware(struct scsi_qla_host *ha)
  1174. {
  1175. struct about_fw_info *about_fw = NULL;
  1176. dma_addr_t about_fw_dma;
  1177. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1178. uint32_t mbox_sts[MBOX_REG_COUNT];
  1179. int status = QLA_ERROR;
  1180. about_fw = dma_alloc_coherent(&ha->pdev->dev,
  1181. sizeof(struct about_fw_info),
  1182. &about_fw_dma, GFP_KERNEL);
  1183. if (!about_fw) {
  1184. DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
  1185. "for about_fw\n", __func__));
  1186. return status;
  1187. }
  1188. memset(about_fw, 0, sizeof(struct about_fw_info));
  1189. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1190. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1191. mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
  1192. mbox_cmd[2] = LSDW(about_fw_dma);
  1193. mbox_cmd[3] = MSDW(about_fw_dma);
  1194. mbox_cmd[4] = sizeof(struct about_fw_info);
  1195. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  1196. &mbox_cmd[0], &mbox_sts[0]);
  1197. if (status != QLA_SUCCESS) {
  1198. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
  1199. "failed w/ status %04X\n", __func__,
  1200. mbox_sts[0]));
  1201. goto exit_about_fw;
  1202. }
  1203. /* Save version information. */
  1204. ha->fw_info.fw_major = le16_to_cpu(about_fw->fw_major);
  1205. ha->fw_info.fw_minor = le16_to_cpu(about_fw->fw_minor);
  1206. ha->fw_info.fw_patch = le16_to_cpu(about_fw->fw_patch);
  1207. ha->fw_info.fw_build = le16_to_cpu(about_fw->fw_build);
  1208. memcpy(ha->fw_info.fw_build_date, about_fw->fw_build_date,
  1209. sizeof(about_fw->fw_build_date));
  1210. memcpy(ha->fw_info.fw_build_time, about_fw->fw_build_time,
  1211. sizeof(about_fw->fw_build_time));
  1212. strcpy((char *)ha->fw_info.fw_build_user,
  1213. skip_spaces((char *)about_fw->fw_build_user));
  1214. ha->fw_info.fw_load_source = le16_to_cpu(about_fw->fw_load_source);
  1215. ha->fw_info.iscsi_major = le16_to_cpu(about_fw->iscsi_major);
  1216. ha->fw_info.iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
  1217. ha->fw_info.bootload_major = le16_to_cpu(about_fw->bootload_major);
  1218. ha->fw_info.bootload_minor = le16_to_cpu(about_fw->bootload_minor);
  1219. ha->fw_info.bootload_patch = le16_to_cpu(about_fw->bootload_patch);
  1220. ha->fw_info.bootload_build = le16_to_cpu(about_fw->bootload_build);
  1221. strcpy((char *)ha->fw_info.extended_timestamp,
  1222. skip_spaces((char *)about_fw->extended_timestamp));
  1223. ha->fw_uptime_secs = le32_to_cpu(mbox_sts[5]);
  1224. ha->fw_uptime_msecs = le32_to_cpu(mbox_sts[6]);
  1225. status = QLA_SUCCESS;
  1226. exit_about_fw:
  1227. dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
  1228. about_fw, about_fw_dma);
  1229. return status;
  1230. }
  1231. int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
  1232. dma_addr_t dma_addr)
  1233. {
  1234. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1235. uint32_t mbox_sts[MBOX_REG_COUNT];
  1236. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1237. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1238. mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS;
  1239. mbox_cmd[1] = options;
  1240. mbox_cmd[2] = LSDW(dma_addr);
  1241. mbox_cmd[3] = MSDW(dma_addr);
  1242. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
  1243. QLA_SUCCESS) {
  1244. DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
  1245. ha->host_no, __func__, mbox_sts[0]));
  1246. return QLA_ERROR;
  1247. }
  1248. return QLA_SUCCESS;
  1249. }
  1250. int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index,
  1251. uint32_t *mbx_sts)
  1252. {
  1253. int status;
  1254. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1255. uint32_t mbox_sts[MBOX_REG_COUNT];
  1256. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1257. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1258. mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
  1259. mbox_cmd[1] = ddb_index;
  1260. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
  1261. &mbox_sts[0]);
  1262. if (status != QLA_SUCCESS) {
  1263. DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
  1264. __func__, mbox_sts[0]));
  1265. }
  1266. *mbx_sts = mbox_sts[0];
  1267. return status;
  1268. }
  1269. int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index)
  1270. {
  1271. int status;
  1272. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1273. uint32_t mbox_sts[MBOX_REG_COUNT];
  1274. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1275. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1276. mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
  1277. mbox_cmd[1] = ddb_index;
  1278. status = qla4xxx_mailbox_command(ha, 2, 1, &mbox_cmd[0],
  1279. &mbox_sts[0]);
  1280. if (status != QLA_SUCCESS) {
  1281. DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
  1282. __func__, mbox_sts[0]));
  1283. }
  1284. return status;
  1285. }
  1286. int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
  1287. uint32_t offset, uint32_t length, uint32_t options)
  1288. {
  1289. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1290. uint32_t mbox_sts[MBOX_REG_COUNT];
  1291. int status = QLA_SUCCESS;
  1292. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1293. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1294. mbox_cmd[0] = MBOX_CMD_WRITE_FLASH;
  1295. mbox_cmd[1] = LSDW(dma_addr);
  1296. mbox_cmd[2] = MSDW(dma_addr);
  1297. mbox_cmd[3] = offset;
  1298. mbox_cmd[4] = length;
  1299. mbox_cmd[5] = options;
  1300. status = qla4xxx_mailbox_command(ha, 6, 2, &mbox_cmd[0], &mbox_sts[0]);
  1301. if (status != QLA_SUCCESS) {
  1302. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_WRITE_FLASH "
  1303. "failed w/ status %04X, mbx1 %04X\n",
  1304. __func__, mbox_sts[0], mbox_sts[1]));
  1305. }
  1306. return status;
  1307. }
  1308. int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
  1309. struct dev_db_entry *fw_ddb_entry,
  1310. dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
  1311. {
  1312. uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
  1313. uint32_t dev_db_end_offset;
  1314. int status = QLA_ERROR;
  1315. memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
  1316. dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
  1317. dev_db_end_offset = FLASH_OFFSET_DB_END;
  1318. if (dev_db_start_offset > dev_db_end_offset) {
  1319. DEBUG2(ql4_printk(KERN_ERR, ha,
  1320. "%s:Invalid DDB index %d", __func__,
  1321. ddb_index));
  1322. goto exit_bootdb_failed;
  1323. }
  1324. if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
  1325. sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
  1326. ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
  1327. "failed\n", ha->host_no, __func__);
  1328. goto exit_bootdb_failed;
  1329. }
  1330. if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
  1331. status = QLA_SUCCESS;
  1332. exit_bootdb_failed:
  1333. return status;
  1334. }
  1335. int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
  1336. struct dev_db_entry *fw_ddb_entry,
  1337. dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
  1338. {
  1339. uint32_t dev_db_start_offset;
  1340. uint32_t dev_db_end_offset;
  1341. int status = QLA_ERROR;
  1342. memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
  1343. if (is_qla40XX(ha)) {
  1344. dev_db_start_offset = FLASH_OFFSET_DB_INFO;
  1345. dev_db_end_offset = FLASH_OFFSET_DB_END;
  1346. } else {
  1347. dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
  1348. (ha->hw.flt_region_ddb << 2);
  1349. /* flt_ddb_size is DDB table size for both ports
  1350. * so divide it by 2 to calculate the offset for second port
  1351. */
  1352. if (ha->port_num == 1)
  1353. dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
  1354. dev_db_end_offset = dev_db_start_offset +
  1355. (ha->hw.flt_ddb_size / 2);
  1356. }
  1357. dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
  1358. if (dev_db_start_offset > dev_db_end_offset) {
  1359. DEBUG2(ql4_printk(KERN_ERR, ha,
  1360. "%s:Invalid DDB index %d", __func__,
  1361. ddb_index));
  1362. goto exit_fdb_failed;
  1363. }
  1364. if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
  1365. sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
  1366. ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash failed\n",
  1367. ha->host_no, __func__);
  1368. goto exit_fdb_failed;
  1369. }
  1370. if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
  1371. status = QLA_SUCCESS;
  1372. exit_fdb_failed:
  1373. return status;
  1374. }
  1375. int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
  1376. uint16_t idx)
  1377. {
  1378. int ret = 0;
  1379. int rval = QLA_ERROR;
  1380. uint32_t offset = 0, chap_size;
  1381. struct ql4_chap_table *chap_table;
  1382. dma_addr_t chap_dma;
  1383. chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
  1384. if (chap_table == NULL)
  1385. return -ENOMEM;
  1386. chap_size = sizeof(struct ql4_chap_table);
  1387. memset(chap_table, 0, chap_size);
  1388. if (is_qla40XX(ha))
  1389. offset = FLASH_CHAP_OFFSET | (idx * chap_size);
  1390. else {
  1391. offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
  1392. /* flt_chap_size is CHAP table size for both ports
  1393. * so divide it by 2 to calculate the offset for second port
  1394. */
  1395. if (ha->port_num == 1)
  1396. offset += (ha->hw.flt_chap_size / 2);
  1397. offset += (idx * chap_size);
  1398. }
  1399. rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
  1400. if (rval != QLA_SUCCESS) {
  1401. ret = -EINVAL;
  1402. goto exit_get_chap;
  1403. }
  1404. DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
  1405. __le16_to_cpu(chap_table->cookie)));
  1406. if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
  1407. ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
  1408. goto exit_get_chap;
  1409. }
  1410. strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
  1411. strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
  1412. chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
  1413. exit_get_chap:
  1414. dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
  1415. return ret;
  1416. }
  1417. /**
  1418. * qla4xxx_set_chap - Make a chap entry at the given index
  1419. * @ha: pointer to adapter structure
  1420. * @username: CHAP username to set
  1421. * @password: CHAP password to set
  1422. * @idx: CHAP index at which to make the entry
  1423. * @bidi: type of chap entry (chap_in or chap_out)
  1424. *
  1425. * Create chap entry at the given index with the information provided.
  1426. *
  1427. * Note: Caller should acquire the chap lock before getting here.
  1428. **/
  1429. int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
  1430. uint16_t idx, int bidi)
  1431. {
  1432. int ret = 0;
  1433. int rval = QLA_ERROR;
  1434. uint32_t offset = 0;
  1435. struct ql4_chap_table *chap_table;
  1436. uint32_t chap_size = 0;
  1437. dma_addr_t chap_dma;
  1438. chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
  1439. if (chap_table == NULL) {
  1440. ret = -ENOMEM;
  1441. goto exit_set_chap;
  1442. }
  1443. memset(chap_table, 0, sizeof(struct ql4_chap_table));
  1444. if (bidi)
  1445. chap_table->flags |= BIT_6; /* peer */
  1446. else
  1447. chap_table->flags |= BIT_7; /* local */
  1448. chap_table->secret_len = strlen(password);
  1449. strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1);
  1450. strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1);
  1451. chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
  1452. if (is_qla40XX(ha)) {
  1453. chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
  1454. offset = FLASH_CHAP_OFFSET;
  1455. } else { /* Single region contains CHAP info for both ports which is
  1456. * divided into half for each port.
  1457. */
  1458. chap_size = ha->hw.flt_chap_size / 2;
  1459. offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
  1460. if (ha->port_num == 1)
  1461. offset += chap_size;
  1462. }
  1463. offset += (idx * sizeof(struct ql4_chap_table));
  1464. rval = qla4xxx_set_flash(ha, chap_dma, offset,
  1465. sizeof(struct ql4_chap_table),
  1466. FLASH_OPT_RMW_COMMIT);
  1467. if (rval == QLA_SUCCESS && ha->chap_list) {
  1468. /* Update ha chap_list cache */
  1469. memcpy((struct ql4_chap_table *)ha->chap_list + idx,
  1470. chap_table, sizeof(struct ql4_chap_table));
  1471. }
  1472. dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
  1473. if (rval != QLA_SUCCESS)
  1474. ret = -EINVAL;
  1475. exit_set_chap:
  1476. return ret;
  1477. }
  1478. int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
  1479. char *password, uint16_t chap_index)
  1480. {
  1481. int rval = QLA_ERROR;
  1482. struct ql4_chap_table *chap_table = NULL;
  1483. int max_chap_entries;
  1484. if (!ha->chap_list) {
  1485. ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
  1486. rval = QLA_ERROR;
  1487. goto exit_uni_chap;
  1488. }
  1489. if (!username || !password) {
  1490. ql4_printk(KERN_ERR, ha, "No memory for username & secret\n");
  1491. rval = QLA_ERROR;
  1492. goto exit_uni_chap;
  1493. }
  1494. if (is_qla80XX(ha))
  1495. max_chap_entries = (ha->hw.flt_chap_size / 2) /
  1496. sizeof(struct ql4_chap_table);
  1497. else
  1498. max_chap_entries = MAX_CHAP_ENTRIES_40XX;
  1499. if (chap_index > max_chap_entries) {
  1500. ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
  1501. rval = QLA_ERROR;
  1502. goto exit_uni_chap;
  1503. }
  1504. mutex_lock(&ha->chap_sem);
  1505. chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
  1506. if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
  1507. rval = QLA_ERROR;
  1508. goto exit_unlock_uni_chap;
  1509. }
  1510. if (!(chap_table->flags & BIT_7)) {
  1511. ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
  1512. rval = QLA_ERROR;
  1513. goto exit_unlock_uni_chap;
  1514. }
  1515. strlcpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
  1516. strlcpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
  1517. rval = QLA_SUCCESS;
  1518. exit_unlock_uni_chap:
  1519. mutex_unlock(&ha->chap_sem);
  1520. exit_uni_chap:
  1521. return rval;
  1522. }
  1523. /**
  1524. * qla4xxx_get_chap_index - Get chap index given username and secret
  1525. * @ha: pointer to adapter structure
  1526. * @username: CHAP username to be searched
  1527. * @password: CHAP password to be searched
  1528. * @bidi: Is this a BIDI CHAP
  1529. * @chap_index: CHAP index to be returned
  1530. *
  1531. * Match the username and password in the chap_list, return the index if a
  1532. * match is found. If a match is not found then add the entry in FLASH and
  1533. * return the index at which entry is written in the FLASH.
  1534. **/
  1535. int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
  1536. char *password, int bidi, uint16_t *chap_index)
  1537. {
  1538. int i, rval;
  1539. int free_index = -1;
  1540. int found_index = 0;
  1541. int max_chap_entries = 0;
  1542. struct ql4_chap_table *chap_table;
  1543. if (is_qla80XX(ha))
  1544. max_chap_entries = (ha->hw.flt_chap_size / 2) /
  1545. sizeof(struct ql4_chap_table);
  1546. else
  1547. max_chap_entries = MAX_CHAP_ENTRIES_40XX;
  1548. if (!ha->chap_list) {
  1549. ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
  1550. return QLA_ERROR;
  1551. }
  1552. if (!username || !password) {
  1553. ql4_printk(KERN_ERR, ha, "Do not have username and psw\n");
  1554. return QLA_ERROR;
  1555. }
  1556. mutex_lock(&ha->chap_sem);
  1557. for (i = 0; i < max_chap_entries; i++) {
  1558. chap_table = (struct ql4_chap_table *)ha->chap_list + i;
  1559. if (chap_table->cookie !=
  1560. __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
  1561. if (i > MAX_RESRV_CHAP_IDX && free_index == -1)
  1562. free_index = i;
  1563. continue;
  1564. }
  1565. if (bidi) {
  1566. if (chap_table->flags & BIT_7)
  1567. continue;
  1568. } else {
  1569. if (chap_table->flags & BIT_6)
  1570. continue;
  1571. }
  1572. if (!strncmp(chap_table->secret, password,
  1573. MAX_CHAP_SECRET_LEN) &&
  1574. !strncmp(chap_table->name, username,
  1575. MAX_CHAP_NAME_LEN)) {
  1576. *chap_index = i;
  1577. found_index = 1;
  1578. break;
  1579. }
  1580. }
  1581. /* If chap entry is not present and a free index is available then
  1582. * write the entry in flash
  1583. */
  1584. if (!found_index && free_index != -1) {
  1585. rval = qla4xxx_set_chap(ha, username, password,
  1586. free_index, bidi);
  1587. if (!rval) {
  1588. *chap_index = free_index;
  1589. found_index = 1;
  1590. }
  1591. }
  1592. mutex_unlock(&ha->chap_sem);
  1593. if (found_index)
  1594. return QLA_SUCCESS;
  1595. return QLA_ERROR;
  1596. }
  1597. int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
  1598. uint16_t fw_ddb_index,
  1599. uint16_t connection_id,
  1600. uint16_t option)
  1601. {
  1602. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1603. uint32_t mbox_sts[MBOX_REG_COUNT];
  1604. int status = QLA_SUCCESS;
  1605. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1606. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1607. mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
  1608. mbox_cmd[1] = fw_ddb_index;
  1609. mbox_cmd[2] = connection_id;
  1610. mbox_cmd[3] = option;
  1611. status = qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]);
  1612. if (status != QLA_SUCCESS) {
  1613. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_CONN_CLOSE "
  1614. "option %04x failed w/ status %04X %04X\n",
  1615. __func__, option, mbox_sts[0], mbox_sts[1]));
  1616. }
  1617. return status;
  1618. }
  1619. /**
  1620. * qla4_84xx_extend_idc_tmo - Extend IDC Timeout.
  1621. * @ha: Pointer to host adapter structure.
  1622. * @ext_tmo: idc timeout value
  1623. *
  1624. * Requests firmware to extend the idc timeout value.
  1625. **/
  1626. static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha, uint32_t ext_tmo)
  1627. {
  1628. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1629. uint32_t mbox_sts[MBOX_REG_COUNT];
  1630. int status;
  1631. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1632. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1633. ext_tmo &= 0xf;
  1634. mbox_cmd[0] = MBOX_CMD_IDC_TIME_EXTEND;
  1635. mbox_cmd[1] = ((ha->idc_info.request_desc & 0xfffff0ff) |
  1636. (ext_tmo << 8)); /* new timeout */
  1637. mbox_cmd[2] = ha->idc_info.info1;
  1638. mbox_cmd[3] = ha->idc_info.info2;
  1639. mbox_cmd[4] = ha->idc_info.info3;
  1640. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  1641. mbox_cmd, mbox_sts);
  1642. if (status != QLA_SUCCESS) {
  1643. DEBUG2(ql4_printk(KERN_INFO, ha,
  1644. "scsi%ld: %s: failed status %04X\n",
  1645. ha->host_no, __func__, mbox_sts[0]));
  1646. return QLA_ERROR;
  1647. } else {
  1648. ql4_printk(KERN_INFO, ha, "%s: IDC timeout extended by %d secs\n",
  1649. __func__, ext_tmo);
  1650. }
  1651. return QLA_SUCCESS;
  1652. }
  1653. int qla4xxx_disable_acb(struct scsi_qla_host *ha)
  1654. {
  1655. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1656. uint32_t mbox_sts[MBOX_REG_COUNT];
  1657. int status = QLA_SUCCESS;
  1658. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1659. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1660. mbox_cmd[0] = MBOX_CMD_DISABLE_ACB;
  1661. status = qla4xxx_mailbox_command(ha, 8, 5, &mbox_cmd[0], &mbox_sts[0]);
  1662. if (status != QLA_SUCCESS) {
  1663. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB "
  1664. "failed w/ status %04X %04X %04X", __func__,
  1665. mbox_sts[0], mbox_sts[1], mbox_sts[2]));
  1666. } else {
  1667. if (is_qla8042(ha) &&
  1668. test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) &&
  1669. (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {
  1670. /*
  1671. * Disable ACB mailbox command takes time to complete
  1672. * based on the total number of targets connected.
  1673. * For 512 targets, it took approximately 5 secs to
  1674. * complete. Setting the timeout value to 8, with the 3
  1675. * secs buffer.
  1676. */
  1677. qla4_84xx_extend_idc_tmo(ha, IDC_EXTEND_TOV);
  1678. if (!wait_for_completion_timeout(&ha->disable_acb_comp,
  1679. IDC_EXTEND_TOV * HZ)) {
  1680. ql4_printk(KERN_WARNING, ha, "%s: Disable ACB Completion not received\n",
  1681. __func__);
  1682. }
  1683. }
  1684. }
  1685. return status;
  1686. }
  1687. int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
  1688. uint32_t acb_type, uint32_t len)
  1689. {
  1690. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1691. uint32_t mbox_sts[MBOX_REG_COUNT];
  1692. int status = QLA_SUCCESS;
  1693. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1694. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1695. mbox_cmd[0] = MBOX_CMD_GET_ACB;
  1696. mbox_cmd[1] = acb_type;
  1697. mbox_cmd[2] = LSDW(acb_dma);
  1698. mbox_cmd[3] = MSDW(acb_dma);
  1699. mbox_cmd[4] = len;
  1700. status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
  1701. if (status != QLA_SUCCESS) {
  1702. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_GET_ACB "
  1703. "failed w/ status %04X\n", __func__,
  1704. mbox_sts[0]));
  1705. }
  1706. return status;
  1707. }
  1708. int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
  1709. uint32_t *mbox_sts, dma_addr_t acb_dma)
  1710. {
  1711. int status = QLA_SUCCESS;
  1712. memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
  1713. memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
  1714. mbox_cmd[0] = MBOX_CMD_SET_ACB;
  1715. mbox_cmd[1] = 0; /* Primary ACB */
  1716. mbox_cmd[2] = LSDW(acb_dma);
  1717. mbox_cmd[3] = MSDW(acb_dma);
  1718. mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
  1719. status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
  1720. if (status != QLA_SUCCESS) {
  1721. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_SET_ACB "
  1722. "failed w/ status %04X\n", __func__,
  1723. mbox_sts[0]));
  1724. }
  1725. return status;
  1726. }
  1727. int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
  1728. struct ddb_entry *ddb_entry,
  1729. struct iscsi_cls_conn *cls_conn,
  1730. uint32_t *mbx_sts)
  1731. {
  1732. struct dev_db_entry *fw_ddb_entry;
  1733. struct iscsi_conn *conn;
  1734. struct iscsi_session *sess;
  1735. struct qla_conn *qla_conn;
  1736. struct sockaddr *dst_addr;
  1737. dma_addr_t fw_ddb_entry_dma;
  1738. int status = QLA_SUCCESS;
  1739. int rval = 0;
  1740. struct sockaddr_in *addr;
  1741. struct sockaddr_in6 *addr6;
  1742. char *ip;
  1743. uint16_t iscsi_opts = 0;
  1744. uint32_t options = 0;
  1745. uint16_t idx, *ptid;
  1746. fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
  1747. &fw_ddb_entry_dma, GFP_KERNEL);
  1748. if (!fw_ddb_entry) {
  1749. DEBUG2(ql4_printk(KERN_ERR, ha,
  1750. "%s: Unable to allocate dma buffer.\n",
  1751. __func__));
  1752. rval = -ENOMEM;
  1753. goto exit_set_param_no_free;
  1754. }
  1755. conn = cls_conn->dd_data;
  1756. qla_conn = conn->dd_data;
  1757. sess = conn->session;
  1758. dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
  1759. if (dst_addr->sa_family == AF_INET6)
  1760. options |= IPV6_DEFAULT_DDB_ENTRY;
  1761. status = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
  1762. if (status == QLA_ERROR) {
  1763. rval = -EINVAL;
  1764. goto exit_set_param;
  1765. }
  1766. ptid = (uint16_t *)&fw_ddb_entry->isid[1];
  1767. *ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
  1768. DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n",
  1769. fw_ddb_entry->isid[5], fw_ddb_entry->isid[4],
  1770. fw_ddb_entry->isid[3], fw_ddb_entry->isid[2],
  1771. fw_ddb_entry->isid[1], fw_ddb_entry->isid[0]));
  1772. iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
  1773. memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
  1774. memset(fw_ddb_entry->iscsi_name, 0, sizeof(fw_ddb_entry->iscsi_name));
  1775. if (sess->targetname != NULL) {
  1776. memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
  1777. min(strlen(sess->targetname),
  1778. sizeof(fw_ddb_entry->iscsi_name)));
  1779. }
  1780. memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr));
  1781. memset(fw_ddb_entry->tgt_addr, 0, sizeof(fw_ddb_entry->tgt_addr));
  1782. fw_ddb_entry->options = DDB_OPT_TARGET | DDB_OPT_AUTO_SENDTGTS_DISABLE;
  1783. if (dst_addr->sa_family == AF_INET) {
  1784. addr = (struct sockaddr_in *)dst_addr;
  1785. ip = (char *)&addr->sin_addr;
  1786. memcpy(fw_ddb_entry->ip_addr, ip, IP_ADDR_LEN);
  1787. fw_ddb_entry->port = cpu_to_le16(ntohs(addr->sin_port));
  1788. DEBUG2(ql4_printk(KERN_INFO, ha,
  1789. "%s: Destination Address [%pI4]: index [%d]\n",
  1790. __func__, fw_ddb_entry->ip_addr,
  1791. ddb_entry->fw_ddb_index));
  1792. } else if (dst_addr->sa_family == AF_INET6) {
  1793. addr6 = (struct sockaddr_in6 *)dst_addr;
  1794. ip = (char *)&addr6->sin6_addr;
  1795. memcpy(fw_ddb_entry->ip_addr, ip, IPv6_ADDR_LEN);
  1796. fw_ddb_entry->port = cpu_to_le16(ntohs(addr6->sin6_port));
  1797. fw_ddb_entry->options |= DDB_OPT_IPV6_DEVICE;
  1798. DEBUG2(ql4_printk(KERN_INFO, ha,
  1799. "%s: Destination Address [%pI6]: index [%d]\n",
  1800. __func__, fw_ddb_entry->ip_addr,
  1801. ddb_entry->fw_ddb_index));
  1802. } else {
  1803. ql4_printk(KERN_ERR, ha,
  1804. "%s: Failed to get IP Address\n",
  1805. __func__);
  1806. rval = -EINVAL;
  1807. goto exit_set_param;
  1808. }
  1809. /* CHAP */
  1810. if (sess->username != NULL && sess->password != NULL) {
  1811. if (strlen(sess->username) && strlen(sess->password)) {
  1812. iscsi_opts |= BIT_7;
  1813. rval = qla4xxx_get_chap_index(ha, sess->username,
  1814. sess->password,
  1815. LOCAL_CHAP, &idx);
  1816. if (rval)
  1817. goto exit_set_param;
  1818. fw_ddb_entry->chap_tbl_idx = cpu_to_le16(idx);
  1819. }
  1820. }
  1821. if (sess->username_in != NULL && sess->password_in != NULL) {
  1822. /* Check if BIDI CHAP */
  1823. if (strlen(sess->username_in) && strlen(sess->password_in)) {
  1824. iscsi_opts |= BIT_4;
  1825. rval = qla4xxx_get_chap_index(ha, sess->username_in,
  1826. sess->password_in,
  1827. BIDI_CHAP, &idx);
  1828. if (rval)
  1829. goto exit_set_param;
  1830. }
  1831. }
  1832. if (sess->initial_r2t_en)
  1833. iscsi_opts |= BIT_10;
  1834. if (sess->imm_data_en)
  1835. iscsi_opts |= BIT_11;
  1836. fw_ddb_entry->iscsi_options = cpu_to_le16(iscsi_opts);
  1837. if (conn->max_recv_dlength)
  1838. fw_ddb_entry->iscsi_max_rcv_data_seg_len =
  1839. __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
  1840. if (sess->max_r2t)
  1841. fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
  1842. if (sess->first_burst)
  1843. fw_ddb_entry->iscsi_first_burst_len =
  1844. __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS));
  1845. if (sess->max_burst)
  1846. fw_ddb_entry->iscsi_max_burst_len =
  1847. __constant_cpu_to_le16((sess->max_burst / BYTE_UNITS));
  1848. if (sess->time2wait)
  1849. fw_ddb_entry->iscsi_def_time2wait =
  1850. cpu_to_le16(sess->time2wait);
  1851. if (sess->time2retain)
  1852. fw_ddb_entry->iscsi_def_time2retain =
  1853. cpu_to_le16(sess->time2retain);
  1854. status = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
  1855. fw_ddb_entry_dma, mbx_sts);
  1856. if (status != QLA_SUCCESS)
  1857. rval = -EINVAL;
  1858. exit_set_param:
  1859. dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
  1860. fw_ddb_entry, fw_ddb_entry_dma);
  1861. exit_set_param_no_free:
  1862. return rval;
  1863. }
  1864. int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
  1865. uint16_t stats_size, dma_addr_t stats_dma)
  1866. {
  1867. int status = QLA_SUCCESS;
  1868. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1869. uint32_t mbox_sts[MBOX_REG_COUNT];
  1870. memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
  1871. memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
  1872. mbox_cmd[0] = MBOX_CMD_GET_MANAGEMENT_DATA;
  1873. mbox_cmd[1] = fw_ddb_index;
  1874. mbox_cmd[2] = LSDW(stats_dma);
  1875. mbox_cmd[3] = MSDW(stats_dma);
  1876. mbox_cmd[4] = stats_size;
  1877. status = qla4xxx_mailbox_command(ha, 5, 1, &mbox_cmd[0], &mbox_sts[0]);
  1878. if (status != QLA_SUCCESS) {
  1879. DEBUG2(ql4_printk(KERN_WARNING, ha,
  1880. "%s: MBOX_CMD_GET_MANAGEMENT_DATA "
  1881. "failed w/ status %04X\n", __func__,
  1882. mbox_sts[0]));
  1883. }
  1884. return status;
  1885. }
  1886. int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
  1887. uint32_t ip_idx, uint32_t *sts)
  1888. {
  1889. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1890. uint32_t mbox_sts[MBOX_REG_COUNT];
  1891. int status = QLA_SUCCESS;
  1892. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1893. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1894. mbox_cmd[0] = MBOX_CMD_GET_IP_ADDR_STATE;
  1895. mbox_cmd[1] = acb_idx;
  1896. mbox_cmd[2] = ip_idx;
  1897. status = qla4xxx_mailbox_command(ha, 3, 8, &mbox_cmd[0], &mbox_sts[0]);
  1898. if (status != QLA_SUCCESS) {
  1899. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: "
  1900. "MBOX_CMD_GET_IP_ADDR_STATE failed w/ "
  1901. "status %04X\n", __func__, mbox_sts[0]));
  1902. }
  1903. memcpy(sts, mbox_sts, sizeof(mbox_sts));
  1904. return status;
  1905. }
  1906. int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
  1907. uint32_t offset, uint32_t size)
  1908. {
  1909. int status = QLA_SUCCESS;
  1910. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1911. uint32_t mbox_sts[MBOX_REG_COUNT];
  1912. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1913. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1914. mbox_cmd[0] = MBOX_CMD_GET_NVRAM;
  1915. mbox_cmd[1] = LSDW(nvram_dma);
  1916. mbox_cmd[2] = MSDW(nvram_dma);
  1917. mbox_cmd[3] = offset;
  1918. mbox_cmd[4] = size;
  1919. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
  1920. &mbox_sts[0]);
  1921. if (status != QLA_SUCCESS) {
  1922. DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
  1923. "status %04X\n", ha->host_no, __func__,
  1924. mbox_sts[0]));
  1925. }
  1926. return status;
  1927. }
  1928. int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
  1929. uint32_t offset, uint32_t size)
  1930. {
  1931. int status = QLA_SUCCESS;
  1932. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1933. uint32_t mbox_sts[MBOX_REG_COUNT];
  1934. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1935. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1936. mbox_cmd[0] = MBOX_CMD_SET_NVRAM;
  1937. mbox_cmd[1] = LSDW(nvram_dma);
  1938. mbox_cmd[2] = MSDW(nvram_dma);
  1939. mbox_cmd[3] = offset;
  1940. mbox_cmd[4] = size;
  1941. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
  1942. &mbox_sts[0]);
  1943. if (status != QLA_SUCCESS) {
  1944. DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
  1945. "status %04X\n", ha->host_no, __func__,
  1946. mbox_sts[0]));
  1947. }
  1948. return status;
  1949. }
  1950. int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
  1951. uint32_t region, uint32_t field0,
  1952. uint32_t field1)
  1953. {
  1954. int status = QLA_SUCCESS;
  1955. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1956. uint32_t mbox_sts[MBOX_REG_COUNT];
  1957. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1958. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1959. mbox_cmd[0] = MBOX_CMD_RESTORE_FACTORY_DEFAULTS;
  1960. mbox_cmd[3] = region;
  1961. mbox_cmd[4] = field0;
  1962. mbox_cmd[5] = field1;
  1963. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0],
  1964. &mbox_sts[0]);
  1965. if (status != QLA_SUCCESS) {
  1966. DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
  1967. "status %04X\n", ha->host_no, __func__,
  1968. mbox_sts[0]));
  1969. }
  1970. return status;
  1971. }
  1972. /**
  1973. * qla4_8xxx_set_param - set driver version in firmware.
  1974. * @ha: Pointer to host adapter structure.
  1975. * @param: Parameter to set i.e driver version
  1976. **/
  1977. int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param)
  1978. {
  1979. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1980. uint32_t mbox_sts[MBOX_REG_COUNT];
  1981. uint32_t status;
  1982. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1983. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1984. mbox_cmd[0] = MBOX_CMD_SET_PARAM;
  1985. if (param == SET_DRVR_VERSION) {
  1986. mbox_cmd[1] = SET_DRVR_VERSION;
  1987. strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION,
  1988. MAX_DRVR_VER_LEN - 1);
  1989. } else {
  1990. ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n",
  1991. __func__, param);
  1992. status = QLA_ERROR;
  1993. goto exit_set_param;
  1994. }
  1995. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, mbox_cmd,
  1996. mbox_sts);
  1997. if (status == QLA_ERROR)
  1998. ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
  1999. __func__, mbox_sts[0]);
  2000. exit_set_param:
  2001. return status;
  2002. }
  2003. /**
  2004. * qla4_83xx_post_idc_ack - post IDC ACK
  2005. * @ha: Pointer to host adapter structure.
  2006. *
  2007. * Posts IDC ACK for IDC Request Notification AEN.
  2008. **/
  2009. int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha)
  2010. {
  2011. uint32_t mbox_cmd[MBOX_REG_COUNT];
  2012. uint32_t mbox_sts[MBOX_REG_COUNT];
  2013. int status;
  2014. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  2015. memset(&mbox_sts, 0, sizeof(mbox_sts));
  2016. mbox_cmd[0] = MBOX_CMD_IDC_ACK;
  2017. mbox_cmd[1] = ha->idc_info.request_desc;
  2018. mbox_cmd[2] = ha->idc_info.info1;
  2019. mbox_cmd[3] = ha->idc_info.info2;
  2020. mbox_cmd[4] = ha->idc_info.info3;
  2021. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  2022. mbox_cmd, mbox_sts);
  2023. if (status == QLA_ERROR)
  2024. ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
  2025. mbox_sts[0]);
  2026. else
  2027. ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", __func__);
  2028. return status;
  2029. }
  2030. int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
  2031. {
  2032. uint32_t mbox_cmd[MBOX_REG_COUNT];
  2033. uint32_t mbox_sts[MBOX_REG_COUNT];
  2034. struct addr_ctrl_blk *acb = NULL;
  2035. uint32_t acb_len = sizeof(struct addr_ctrl_blk);
  2036. int rval = QLA_SUCCESS;
  2037. dma_addr_t acb_dma;
  2038. acb = dma_alloc_coherent(&ha->pdev->dev,
  2039. sizeof(struct addr_ctrl_blk),
  2040. &acb_dma, GFP_KERNEL);
  2041. if (!acb) {
  2042. ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__);
  2043. rval = QLA_ERROR;
  2044. goto exit_config_acb;
  2045. }
  2046. memset(acb, 0, acb_len);
  2047. switch (acb_config) {
  2048. case ACB_CONFIG_DISABLE:
  2049. rval = qla4xxx_get_acb(ha, acb_dma, 0, acb_len);
  2050. if (rval != QLA_SUCCESS)
  2051. goto exit_free_acb;
  2052. rval = qla4xxx_disable_acb(ha);
  2053. if (rval != QLA_SUCCESS)
  2054. goto exit_free_acb;
  2055. if (!ha->saved_acb)
  2056. ha->saved_acb = kzalloc(acb_len, GFP_KERNEL);
  2057. if (!ha->saved_acb) {
  2058. ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
  2059. __func__);
  2060. rval = QLA_ERROR;
  2061. goto exit_free_acb;
  2062. }
  2063. memcpy(ha->saved_acb, acb, acb_len);
  2064. break;
  2065. case ACB_CONFIG_SET:
  2066. if (!ha->saved_acb) {
  2067. ql4_printk(KERN_ERR, ha, "%s: Can't set ACB, Saved ACB not available\n",
  2068. __func__);
  2069. rval = QLA_ERROR;
  2070. goto exit_free_acb;
  2071. }
  2072. memcpy(acb, ha->saved_acb, acb_len);
  2073. rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
  2074. if (rval != QLA_SUCCESS)
  2075. goto exit_free_acb;
  2076. break;
  2077. default:
  2078. ql4_printk(KERN_ERR, ha, "%s: Invalid ACB Configuration\n",
  2079. __func__);
  2080. }
  2081. exit_free_acb:
  2082. dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,
  2083. acb_dma);
  2084. exit_config_acb:
  2085. if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) {
  2086. kfree(ha->saved_acb);
  2087. ha->saved_acb = NULL;
  2088. }
  2089. DEBUG2(ql4_printk(KERN_INFO, ha,
  2090. "%s %s\n", __func__,
  2091. rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
  2092. return rval;
  2093. }
  2094. int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config)
  2095. {
  2096. uint32_t mbox_cmd[MBOX_REG_COUNT];
  2097. uint32_t mbox_sts[MBOX_REG_COUNT];
  2098. int status;
  2099. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  2100. memset(&mbox_sts, 0, sizeof(mbox_sts));
  2101. mbox_cmd[0] = MBOX_CMD_GET_PORT_CONFIG;
  2102. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  2103. mbox_cmd, mbox_sts);
  2104. if (status == QLA_SUCCESS)
  2105. *config = mbox_sts[1];
  2106. else
  2107. ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
  2108. mbox_sts[0]);
  2109. return status;
  2110. }
  2111. int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config)
  2112. {
  2113. uint32_t mbox_cmd[MBOX_REG_COUNT];
  2114. uint32_t mbox_sts[MBOX_REG_COUNT];
  2115. int status;
  2116. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  2117. memset(&mbox_sts, 0, sizeof(mbox_sts));
  2118. mbox_cmd[0] = MBOX_CMD_SET_PORT_CONFIG;
  2119. mbox_cmd[1] = *config;
  2120. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  2121. mbox_cmd, mbox_sts);
  2122. if (status != QLA_SUCCESS)
  2123. ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
  2124. mbox_sts[0]);
  2125. return status;
  2126. }