ql4_mbx.c 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454
  1. /*
  2. * QLogic iSCSI HBA Driver
  3. * Copyright (c) 2003-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qla4xxx for copyright and licensing details.
  6. */
  7. #include <linux/ctype.h>
  8. #include "ql4_def.h"
  9. #include "ql4_glbl.h"
  10. #include "ql4_dbg.h"
  11. #include "ql4_inline.h"
  12. #include "ql4_version.h"
  13. void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
  14. int in_count)
  15. {
  16. int i;
  17. /* Load all mailbox registers, except mailbox 0. */
  18. for (i = 1; i < in_count; i++)
  19. writel(mbx_cmd[i], &ha->reg->mailbox[i]);
  20. /* Wakeup firmware */
  21. writel(mbx_cmd[0], &ha->reg->mailbox[0]);
  22. readl(&ha->reg->mailbox[0]);
  23. writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
  24. readl(&ha->reg->ctrl_status);
  25. }
  26. void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
  27. {
  28. int intr_status;
  29. intr_status = readl(&ha->reg->ctrl_status);
  30. if (intr_status & INTR_PENDING) {
  31. /*
  32. * Service the interrupt.
  33. * The ISR will save the mailbox status registers
  34. * to a temporary storage location in the adapter structure.
  35. */
  36. ha->mbox_status_count = out_count;
  37. ha->isp_ops->interrupt_service_routine(ha, intr_status);
  38. }
  39. }
  40. /**
  41. * qla4xxx_is_intr_poll_mode – Are we allowed to poll for interrupts?
  42. * @ha: Pointer to host adapter structure.
  43. * @ret: 1=polling mode, 0=non-polling mode
  44. **/
  45. static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
  46. {
  47. int rval = 1;
  48. if (is_qla8032(ha) || is_qla8042(ha)) {
  49. if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
  50. test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
  51. rval = 0;
  52. } else {
  53. if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
  54. test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
  55. test_bit(AF_ONLINE, &ha->flags) &&
  56. !test_bit(AF_HA_REMOVAL, &ha->flags))
  57. rval = 0;
  58. }
  59. return rval;
  60. }
  61. /**
  62. * qla4xxx_mailbox_command - issues mailbox commands
  63. * @ha: Pointer to host adapter structure.
  64. * @inCount: number of mailbox registers to load.
  65. * @outCount: number of mailbox registers to return.
  66. * @mbx_cmd: data pointer for mailbox in registers.
  67. * @mbx_sts: data pointer for mailbox out registers.
  68. *
  69. * This routine issue mailbox commands and waits for completion.
  70. * If outCount is 0, this routine completes successfully WITHOUT waiting
  71. * for the mailbox command to complete.
  72. **/
  73. int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
  74. uint8_t outCount, uint32_t *mbx_cmd,
  75. uint32_t *mbx_sts)
  76. {
  77. int status = QLA_ERROR;
  78. uint8_t i;
  79. u_long wait_count;
  80. unsigned long flags = 0;
  81. uint32_t dev_state;
  82. /* Make sure that pointers are valid */
  83. if (!mbx_cmd || !mbx_sts) {
  84. DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts "
  85. "pointer\n", ha->host_no, __func__));
  86. return status;
  87. }
  88. if (is_qla40XX(ha)) {
  89. if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
  90. DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
  91. "prematurely completing mbx cmd as "
  92. "adapter removal detected\n",
  93. ha->host_no, __func__));
  94. return status;
  95. }
  96. }
  97. if ((is_aer_supported(ha)) &&
  98. (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
  99. DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
  100. "timeout MBX Exiting.\n", ha->host_no, __func__));
  101. return status;
  102. }
  103. /* Mailbox code active */
  104. wait_count = MBOX_TOV * 100;
  105. while (wait_count--) {
  106. mutex_lock(&ha->mbox_sem);
  107. if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) {
  108. set_bit(AF_MBOX_COMMAND, &ha->flags);
  109. mutex_unlock(&ha->mbox_sem);
  110. break;
  111. }
  112. mutex_unlock(&ha->mbox_sem);
  113. if (!wait_count) {
  114. DEBUG2(printk("scsi%ld: %s: mbox_sem failed\n",
  115. ha->host_no, __func__));
  116. return status;
  117. }
  118. msleep(10);
  119. }
  120. if (is_qla80XX(ha)) {
  121. if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
  122. DEBUG2(ql4_printk(KERN_WARNING, ha,
  123. "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
  124. ha->host_no, __func__));
  125. goto mbox_exit;
  126. }
  127. /* Do not send any mbx cmd if h/w is in failed state*/
  128. ha->isp_ops->idc_lock(ha);
  129. dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
  130. ha->isp_ops->idc_unlock(ha);
  131. if (dev_state == QLA8XXX_DEV_FAILED) {
  132. ql4_printk(KERN_WARNING, ha,
  133. "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
  134. ha->host_no, __func__);
  135. goto mbox_exit;
  136. }
  137. }
  138. spin_lock_irqsave(&ha->hardware_lock, flags);
  139. ha->mbox_status_count = outCount;
  140. for (i = 0; i < outCount; i++)
  141. ha->mbox_status[i] = 0;
  142. /* Queue the mailbox command to the firmware */
  143. ha->isp_ops->queue_mailbox_command(ha, mbx_cmd, inCount);
  144. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  145. /* Wait for completion */
  146. /*
  147. * If we don't want status, don't wait for the mailbox command to
  148. * complete. For example, MBOX_CMD_RESET_FW doesn't return status,
  149. * you must poll the inbound Interrupt Mask for completion.
  150. */
  151. if (outCount == 0) {
  152. status = QLA_SUCCESS;
  153. goto mbox_exit;
  154. }
  155. /*
  156. * Wait for completion: Poll or completion queue
  157. */
  158. if (qla4xxx_is_intr_poll_mode(ha)) {
  159. /* Poll for command to complete */
  160. wait_count = jiffies + MBOX_TOV * HZ;
  161. while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
  162. if (time_after_eq(jiffies, wait_count))
  163. break;
  164. /*
  165. * Service the interrupt.
  166. * The ISR will save the mailbox status registers
  167. * to a temporary storage location in the adapter
  168. * structure.
  169. */
  170. spin_lock_irqsave(&ha->hardware_lock, flags);
  171. ha->isp_ops->process_mailbox_interrupt(ha, outCount);
  172. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  173. msleep(10);
  174. }
  175. } else {
  176. /* Do not poll for completion. Use completion queue */
  177. set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
  178. wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
  179. clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
  180. }
  181. /* Check for mailbox timeout. */
  182. if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
  183. if (is_qla80XX(ha) &&
  184. test_bit(AF_FW_RECOVERY, &ha->flags)) {
  185. DEBUG2(ql4_printk(KERN_INFO, ha,
  186. "scsi%ld: %s: prematurely completing mbx cmd as "
  187. "firmware recovery detected\n",
  188. ha->host_no, __func__));
  189. goto mbox_exit;
  190. }
  191. ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n",
  192. ha->host_no, mbx_cmd[0]);
  193. ha->mailbox_timeout_count++;
  194. mbx_sts[0] = (-1);
  195. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  196. if (is_qla8022(ha)) {
  197. ql4_printk(KERN_INFO, ha,
  198. "disabling pause transmit on port 0 & 1.\n");
  199. qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
  200. CRB_NIU_XG_PAUSE_CTL_P0 |
  201. CRB_NIU_XG_PAUSE_CTL_P1);
  202. } else if (is_qla8032(ha) || is_qla8042(ha)) {
  203. ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n",
  204. __func__);
  205. qla4_83xx_disable_pause(ha);
  206. }
  207. goto mbox_exit;
  208. }
  209. /*
  210. * Copy the mailbox out registers to the caller's mailbox in/out
  211. * structure.
  212. */
  213. spin_lock_irqsave(&ha->hardware_lock, flags);
  214. for (i = 0; i < outCount; i++)
  215. mbx_sts[i] = ha->mbox_status[i];
  216. /* Set return status and error flags (if applicable). */
  217. switch (ha->mbox_status[0]) {
  218. case MBOX_STS_COMMAND_COMPLETE:
  219. status = QLA_SUCCESS;
  220. break;
  221. case MBOX_STS_INTERMEDIATE_COMPLETION:
  222. status = QLA_SUCCESS;
  223. break;
  224. case MBOX_STS_BUSY:
  225. ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
  226. ha->host_no, __func__, mbx_cmd[0]);
  227. ha->mailbox_timeout_count++;
  228. break;
  229. default:
  230. ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n",
  231. ha->host_no, __func__, mbx_cmd[0], mbx_sts[0],
  232. mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4],
  233. mbx_sts[5], mbx_sts[6], mbx_sts[7]);
  234. break;
  235. }
  236. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  237. mbox_exit:
  238. mutex_lock(&ha->mbox_sem);
  239. clear_bit(AF_MBOX_COMMAND, &ha->flags);
  240. mutex_unlock(&ha->mbox_sem);
  241. clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
  242. return status;
  243. }
  244. /**
  245. * qla4xxx_get_minidump_template - Get the firmware template
  246. * @ha: Pointer to host adapter structure.
  247. * @phys_addr: dma address for template
  248. *
  249. * Obtain the minidump template from firmware during initialization
  250. * as it may not be available when minidump is desired.
  251. **/
  252. int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
  253. dma_addr_t phys_addr)
  254. {
  255. uint32_t mbox_cmd[MBOX_REG_COUNT];
  256. uint32_t mbox_sts[MBOX_REG_COUNT];
  257. int status;
  258. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  259. memset(&mbox_sts, 0, sizeof(mbox_sts));
  260. mbox_cmd[0] = MBOX_CMD_MINIDUMP;
  261. mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
  262. mbox_cmd[2] = LSDW(phys_addr);
  263. mbox_cmd[3] = MSDW(phys_addr);
  264. mbox_cmd[4] = ha->fw_dump_tmplt_size;
  265. mbox_cmd[5] = 0;
  266. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
  267. &mbox_sts[0]);
  268. if (status != QLA_SUCCESS) {
  269. DEBUG2(ql4_printk(KERN_INFO, ha,
  270. "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
  271. ha->host_no, __func__, mbox_cmd[0],
  272. mbox_sts[0], mbox_sts[1]));
  273. }
  274. return status;
  275. }
  276. /**
  277. * qla4xxx_req_template_size - Get minidump template size from firmware.
  278. * @ha: Pointer to host adapter structure.
  279. **/
  280. int qla4xxx_req_template_size(struct scsi_qla_host *ha)
  281. {
  282. uint32_t mbox_cmd[MBOX_REG_COUNT];
  283. uint32_t mbox_sts[MBOX_REG_COUNT];
  284. int status;
  285. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  286. memset(&mbox_sts, 0, sizeof(mbox_sts));
  287. mbox_cmd[0] = MBOX_CMD_MINIDUMP;
  288. mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
  289. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
  290. &mbox_sts[0]);
  291. if (status == QLA_SUCCESS) {
  292. ha->fw_dump_tmplt_size = mbox_sts[1];
  293. DEBUG2(ql4_printk(KERN_INFO, ha,
  294. "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
  295. __func__, mbox_sts[0], mbox_sts[1],
  296. mbox_sts[2], mbox_sts[3], mbox_sts[4],
  297. mbox_sts[5], mbox_sts[6], mbox_sts[7]));
  298. if (ha->fw_dump_tmplt_size == 0)
  299. status = QLA_ERROR;
  300. } else {
  301. ql4_printk(KERN_WARNING, ha,
  302. "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
  303. __func__, mbox_sts[0], mbox_sts[1]);
  304. status = QLA_ERROR;
  305. }
  306. return status;
  307. }
  308. void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
  309. {
  310. set_bit(AF_FW_RECOVERY, &ha->flags);
  311. ql4_printk(KERN_INFO, ha, "scsi%ld: %s: set FW RECOVERY!\n",
  312. ha->host_no, __func__);
  313. if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
  314. if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) {
  315. complete(&ha->mbx_intr_comp);
  316. ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
  317. "recovery, doing premature completion of "
  318. "mbx cmd\n", ha->host_no, __func__);
  319. } else {
  320. set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
  321. ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
  322. "recovery, doing premature completion of "
  323. "polling mbx cmd\n", ha->host_no, __func__);
  324. }
  325. }
  326. }
  327. static uint8_t
  328. qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
  329. uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
  330. {
  331. memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
  332. memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
  333. if (is_qla8022(ha))
  334. qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, 0);
  335. mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
  336. mbox_cmd[1] = 0;
  337. mbox_cmd[2] = LSDW(init_fw_cb_dma);
  338. mbox_cmd[3] = MSDW(init_fw_cb_dma);
  339. mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
  340. if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
  341. QLA_SUCCESS) {
  342. DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
  343. "MBOX_CMD_INITIALIZE_FIRMWARE"
  344. " failed w/ status %04X\n",
  345. ha->host_no, __func__, mbox_sts[0]));
  346. return QLA_ERROR;
  347. }
  348. return QLA_SUCCESS;
  349. }
  350. uint8_t
  351. qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
  352. uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
  353. {
  354. memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
  355. memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
  356. mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
  357. mbox_cmd[2] = LSDW(init_fw_cb_dma);
  358. mbox_cmd[3] = MSDW(init_fw_cb_dma);
  359. mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
  360. if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) !=
  361. QLA_SUCCESS) {
  362. DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
  363. "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK"
  364. " failed w/ status %04X\n",
  365. ha->host_no, __func__, mbox_sts[0]));
  366. return QLA_ERROR;
  367. }
  368. return QLA_SUCCESS;
  369. }
  370. uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state)
  371. {
  372. uint8_t ipaddr_state;
  373. switch (fw_ipaddr_state) {
  374. case IP_ADDRSTATE_UNCONFIGURED:
  375. ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED;
  376. break;
  377. case IP_ADDRSTATE_INVALID:
  378. ipaddr_state = ISCSI_IPDDRESS_STATE_INVALID;
  379. break;
  380. case IP_ADDRSTATE_ACQUIRING:
  381. ipaddr_state = ISCSI_IPDDRESS_STATE_ACQUIRING;
  382. break;
  383. case IP_ADDRSTATE_TENTATIVE:
  384. ipaddr_state = ISCSI_IPDDRESS_STATE_TENTATIVE;
  385. break;
  386. case IP_ADDRSTATE_DEPRICATED:
  387. ipaddr_state = ISCSI_IPDDRESS_STATE_DEPRECATED;
  388. break;
  389. case IP_ADDRSTATE_PREFERRED:
  390. ipaddr_state = ISCSI_IPDDRESS_STATE_VALID;
  391. break;
  392. case IP_ADDRSTATE_DISABLING:
  393. ipaddr_state = ISCSI_IPDDRESS_STATE_DISABLING;
  394. break;
  395. default:
  396. ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED;
  397. }
  398. return ipaddr_state;
  399. }
  400. static void
  401. qla4xxx_update_local_ip(struct scsi_qla_host *ha,
  402. struct addr_ctrl_blk *init_fw_cb)
  403. {
  404. ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
  405. ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
  406. ha->ip_config.ipv4_addr_state =
  407. qla4xxx_set_ipaddr_state(init_fw_cb->ipv4_addr_state);
  408. ha->ip_config.eth_mtu_size =
  409. le16_to_cpu(init_fw_cb->eth_mtu_size);
  410. ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port);
  411. if (ha->acb_version == ACB_SUPPORTED) {
  412. ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts);
  413. ha->ip_config.ipv6_addl_options =
  414. le16_to_cpu(init_fw_cb->ipv6_addtl_opts);
  415. ha->ip_config.ipv6_tcp_options =
  416. le16_to_cpu(init_fw_cb->ipv6_tcp_opts);
  417. }
  418. /* Save IPv4 Address Info */
  419. memcpy(ha->ip_config.ip_address, init_fw_cb->ipv4_addr,
  420. min(sizeof(ha->ip_config.ip_address),
  421. sizeof(init_fw_cb->ipv4_addr)));
  422. memcpy(ha->ip_config.subnet_mask, init_fw_cb->ipv4_subnet,
  423. min(sizeof(ha->ip_config.subnet_mask),
  424. sizeof(init_fw_cb->ipv4_subnet)));
  425. memcpy(ha->ip_config.gateway, init_fw_cb->ipv4_gw_addr,
  426. min(sizeof(ha->ip_config.gateway),
  427. sizeof(init_fw_cb->ipv4_gw_addr)));
  428. ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag);
  429. ha->ip_config.control = init_fw_cb->control;
  430. ha->ip_config.tcp_wsf = init_fw_cb->ipv4_tcp_wsf;
  431. ha->ip_config.ipv4_tos = init_fw_cb->ipv4_tos;
  432. ha->ip_config.ipv4_cache_id = init_fw_cb->ipv4_cacheid;
  433. ha->ip_config.ipv4_alt_cid_len = init_fw_cb->ipv4_dhcp_alt_cid_len;
  434. memcpy(ha->ip_config.ipv4_alt_cid, init_fw_cb->ipv4_dhcp_alt_cid,
  435. min(sizeof(ha->ip_config.ipv4_alt_cid),
  436. sizeof(init_fw_cb->ipv4_dhcp_alt_cid)));
  437. ha->ip_config.ipv4_vid_len = init_fw_cb->ipv4_dhcp_vid_len;
  438. memcpy(ha->ip_config.ipv4_vid, init_fw_cb->ipv4_dhcp_vid,
  439. min(sizeof(ha->ip_config.ipv4_vid),
  440. sizeof(init_fw_cb->ipv4_dhcp_vid)));
  441. ha->ip_config.ipv4_ttl = init_fw_cb->ipv4_ttl;
  442. ha->ip_config.def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
  443. ha->ip_config.abort_timer = init_fw_cb->abort_timer;
  444. ha->ip_config.iscsi_options = le16_to_cpu(init_fw_cb->iscsi_opts);
  445. ha->ip_config.iscsi_max_pdu_size =
  446. le16_to_cpu(init_fw_cb->iscsi_max_pdu_size);
  447. ha->ip_config.iscsi_first_burst_len =
  448. le16_to_cpu(init_fw_cb->iscsi_fburst_len);
  449. ha->ip_config.iscsi_max_outstnd_r2t =
  450. le16_to_cpu(init_fw_cb->iscsi_max_outstnd_r2t);
  451. ha->ip_config.iscsi_max_burst_len =
  452. le16_to_cpu(init_fw_cb->iscsi_max_burst_len);
  453. memcpy(ha->ip_config.iscsi_name, init_fw_cb->iscsi_name,
  454. min(sizeof(ha->ip_config.iscsi_name),
  455. sizeof(init_fw_cb->iscsi_name)));
  456. if (is_ipv6_enabled(ha)) {
  457. /* Save IPv6 Address */
  458. ha->ip_config.ipv6_link_local_state =
  459. qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_lnk_lcl_addr_state);
  460. ha->ip_config.ipv6_addr0_state =
  461. qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr0_state);
  462. ha->ip_config.ipv6_addr1_state =
  463. qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr1_state);
  464. switch (le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state)) {
  465. case IPV6_RTRSTATE_UNKNOWN:
  466. ha->ip_config.ipv6_default_router_state =
  467. ISCSI_ROUTER_STATE_UNKNOWN;
  468. break;
  469. case IPV6_RTRSTATE_MANUAL:
  470. ha->ip_config.ipv6_default_router_state =
  471. ISCSI_ROUTER_STATE_MANUAL;
  472. break;
  473. case IPV6_RTRSTATE_ADVERTISED:
  474. ha->ip_config.ipv6_default_router_state =
  475. ISCSI_ROUTER_STATE_ADVERTISED;
  476. break;
  477. case IPV6_RTRSTATE_STALE:
  478. ha->ip_config.ipv6_default_router_state =
  479. ISCSI_ROUTER_STATE_STALE;
  480. break;
  481. default:
  482. ha->ip_config.ipv6_default_router_state =
  483. ISCSI_ROUTER_STATE_UNKNOWN;
  484. }
  485. ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
  486. ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
  487. memcpy(&ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[8],
  488. init_fw_cb->ipv6_if_id,
  489. min(sizeof(ha->ip_config.ipv6_link_local_addr)/2,
  490. sizeof(init_fw_cb->ipv6_if_id)));
  491. memcpy(&ha->ip_config.ipv6_addr0, init_fw_cb->ipv6_addr0,
  492. min(sizeof(ha->ip_config.ipv6_addr0),
  493. sizeof(init_fw_cb->ipv6_addr0)));
  494. memcpy(&ha->ip_config.ipv6_addr1, init_fw_cb->ipv6_addr1,
  495. min(sizeof(ha->ip_config.ipv6_addr1),
  496. sizeof(init_fw_cb->ipv6_addr1)));
  497. memcpy(&ha->ip_config.ipv6_default_router_addr,
  498. init_fw_cb->ipv6_dflt_rtr_addr,
  499. min(sizeof(ha->ip_config.ipv6_default_router_addr),
  500. sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
  501. ha->ip_config.ipv6_vlan_tag =
  502. be16_to_cpu(init_fw_cb->ipv6_vlan_tag);
  503. ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port);
  504. ha->ip_config.ipv6_cache_id = init_fw_cb->ipv6_cache_id;
  505. ha->ip_config.ipv6_flow_lbl =
  506. le16_to_cpu(init_fw_cb->ipv6_flow_lbl);
  507. ha->ip_config.ipv6_traffic_class =
  508. init_fw_cb->ipv6_traffic_class;
  509. ha->ip_config.ipv6_hop_limit = init_fw_cb->ipv6_hop_limit;
  510. ha->ip_config.ipv6_nd_reach_time =
  511. le32_to_cpu(init_fw_cb->ipv6_nd_reach_time);
  512. ha->ip_config.ipv6_nd_rexmit_timer =
  513. le32_to_cpu(init_fw_cb->ipv6_nd_rexmit_timer);
  514. ha->ip_config.ipv6_nd_stale_timeout =
  515. le32_to_cpu(init_fw_cb->ipv6_nd_stale_timeout);
  516. ha->ip_config.ipv6_dup_addr_detect_count =
  517. init_fw_cb->ipv6_dup_addr_detect_count;
  518. ha->ip_config.ipv6_gw_advrt_mtu =
  519. le32_to_cpu(init_fw_cb->ipv6_gw_advrt_mtu);
  520. ha->ip_config.ipv6_tcp_wsf = init_fw_cb->ipv6_tcp_wsf;
  521. }
  522. }
  523. uint8_t
  524. qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
  525. uint32_t *mbox_cmd,
  526. uint32_t *mbox_sts,
  527. struct addr_ctrl_blk *init_fw_cb,
  528. dma_addr_t init_fw_cb_dma)
  529. {
  530. if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma)
  531. != QLA_SUCCESS) {
  532. DEBUG2(printk(KERN_WARNING
  533. "scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
  534. ha->host_no, __func__));
  535. return QLA_ERROR;
  536. }
  537. DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk)));
  538. /* Save some info in adapter structure. */
  539. ha->acb_version = init_fw_cb->acb_version;
  540. ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
  541. ha->heartbeat_interval = init_fw_cb->hb_interval;
  542. memcpy(ha->name_string, init_fw_cb->iscsi_name,
  543. min(sizeof(ha->name_string),
  544. sizeof(init_fw_cb->iscsi_name)));
  545. ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
  546. /*memcpy(ha->alias, init_fw_cb->Alias,
  547. min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
  548. qla4xxx_update_local_ip(ha, init_fw_cb);
  549. return QLA_SUCCESS;
  550. }
  551. /**
  552. * qla4xxx_initialize_fw_cb - initializes firmware control block.
  553. * @ha: Pointer to host adapter structure.
  554. **/
  555. int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
  556. {
  557. struct addr_ctrl_blk *init_fw_cb;
  558. dma_addr_t init_fw_cb_dma;
  559. uint32_t mbox_cmd[MBOX_REG_COUNT];
  560. uint32_t mbox_sts[MBOX_REG_COUNT];
  561. int status = QLA_ERROR;
  562. init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
  563. sizeof(struct addr_ctrl_blk),
  564. &init_fw_cb_dma, GFP_KERNEL);
  565. if (init_fw_cb == NULL) {
  566. DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
  567. ha->host_no, __func__));
  568. goto exit_init_fw_cb_no_free;
  569. }
  570. /* Get Initialize Firmware Control Block. */
  571. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  572. memset(&mbox_sts, 0, sizeof(mbox_sts));
  573. if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
  574. QLA_SUCCESS) {
  575. goto exit_init_fw_cb;
  576. }
  577. /* Fill in the request and response queue information. */
  578. init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
  579. init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
  580. init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
  581. init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
  582. init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
  583. init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
  584. init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
  585. init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
  586. init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma));
  587. init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma));
  588. /* Set up required options. */
  589. init_fw_cb->fw_options |=
  590. __constant_cpu_to_le16(FWOPT_SESSION_MODE |
  591. FWOPT_INITIATOR_MODE);
  592. if (is_qla80XX(ha))
  593. init_fw_cb->fw_options |=
  594. __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
  595. init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
  596. init_fw_cb->add_fw_options = 0;
  597. init_fw_cb->add_fw_options |=
  598. __constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
  599. init_fw_cb->add_fw_options |=
  600. __constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
  601. if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
  602. != QLA_SUCCESS) {
  603. DEBUG2(printk(KERN_WARNING
  604. "scsi%ld: %s: Failed to set init_fw_ctrl_blk\n",
  605. ha->host_no, __func__));
  606. goto exit_init_fw_cb;
  607. }
  608. if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0],
  609. init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) {
  610. DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n",
  611. ha->host_no, __func__));
  612. goto exit_init_fw_cb;
  613. }
  614. status = QLA_SUCCESS;
  615. exit_init_fw_cb:
  616. dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
  617. init_fw_cb, init_fw_cb_dma);
  618. exit_init_fw_cb_no_free:
  619. return status;
  620. }
  621. /**
  622. * qla4xxx_get_dhcp_ip_address - gets HBA ip address via DHCP
  623. * @ha: Pointer to host adapter structure.
  624. **/
  625. int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
  626. {
  627. struct addr_ctrl_blk *init_fw_cb;
  628. dma_addr_t init_fw_cb_dma;
  629. uint32_t mbox_cmd[MBOX_REG_COUNT];
  630. uint32_t mbox_sts[MBOX_REG_COUNT];
  631. init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
  632. sizeof(struct addr_ctrl_blk),
  633. &init_fw_cb_dma, GFP_KERNEL);
  634. if (init_fw_cb == NULL) {
  635. printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
  636. __func__);
  637. return QLA_ERROR;
  638. }
  639. /* Get Initialize Firmware Control Block. */
  640. if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
  641. QLA_SUCCESS) {
  642. DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
  643. ha->host_no, __func__));
  644. dma_free_coherent(&ha->pdev->dev,
  645. sizeof(struct addr_ctrl_blk),
  646. init_fw_cb, init_fw_cb_dma);
  647. return QLA_ERROR;
  648. }
  649. /* Save IP Address. */
  650. qla4xxx_update_local_ip(ha, init_fw_cb);
  651. dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
  652. init_fw_cb, init_fw_cb_dma);
  653. return QLA_SUCCESS;
  654. }
  655. /**
  656. * qla4xxx_get_firmware_state - gets firmware state of HBA
  657. * @ha: Pointer to host adapter structure.
  658. **/
  659. int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
  660. {
  661. uint32_t mbox_cmd[MBOX_REG_COUNT];
  662. uint32_t mbox_sts[MBOX_REG_COUNT];
  663. /* Get firmware version */
  664. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  665. memset(&mbox_sts, 0, sizeof(mbox_sts));
  666. mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
  667. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 4, &mbox_cmd[0], &mbox_sts[0]) !=
  668. QLA_SUCCESS) {
  669. DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
  670. "status %04X\n", ha->host_no, __func__,
  671. mbox_sts[0]));
  672. return QLA_ERROR;
  673. }
  674. ha->firmware_state = mbox_sts[1];
  675. ha->board_id = mbox_sts[2];
  676. ha->addl_fw_state = mbox_sts[3];
  677. DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n",
  678. ha->host_no, __func__, ha->firmware_state);)
  679. return QLA_SUCCESS;
  680. }
  681. /**
  682. * qla4xxx_get_firmware_status - retrieves firmware status
  683. * @ha: Pointer to host adapter structure.
  684. **/
  685. int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
  686. {
  687. uint32_t mbox_cmd[MBOX_REG_COUNT];
  688. uint32_t mbox_sts[MBOX_REG_COUNT];
  689. /* Get firmware version */
  690. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  691. memset(&mbox_sts, 0, sizeof(mbox_sts));
  692. mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
  693. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
  694. QLA_SUCCESS) {
  695. DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
  696. "status %04X\n", ha->host_no, __func__,
  697. mbox_sts[0]));
  698. return QLA_ERROR;
  699. }
  700. /* High-water mark of IOCBs */
  701. ha->iocb_hiwat = mbox_sts[2];
  702. DEBUG2(ql4_printk(KERN_INFO, ha,
  703. "%s: firmware IOCBs available = %d\n", __func__,
  704. ha->iocb_hiwat));
  705. if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
  706. ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
  707. /* Ideally, we should not enter this code, as the # of firmware
  708. * IOCBs is hard-coded in the firmware. We set a default
  709. * iocb_hiwat here just in case */
  710. if (ha->iocb_hiwat == 0) {
  711. ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4;
  712. DEBUG2(ql4_printk(KERN_WARNING, ha,
  713. "%s: Setting IOCB's to = %d\n", __func__,
  714. ha->iocb_hiwat));
  715. }
  716. return QLA_SUCCESS;
  717. }
  718. /**
  719. * qla4xxx_get_fwddb_entry - retrieves firmware ddb entry
  720. * @ha: Pointer to host adapter structure.
  721. * @fw_ddb_index: Firmware's device database index
  722. * @fw_ddb_entry: Pointer to firmware's device database entry structure
  723. * @num_valid_ddb_entries: Pointer to number of valid ddb entries
  724. * @next_ddb_index: Pointer to next valid device database index
  725. * @fw_ddb_device_state: Pointer to device state
  726. **/
  727. int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
  728. uint16_t fw_ddb_index,
  729. struct dev_db_entry *fw_ddb_entry,
  730. dma_addr_t fw_ddb_entry_dma,
  731. uint32_t *num_valid_ddb_entries,
  732. uint32_t *next_ddb_index,
  733. uint32_t *fw_ddb_device_state,
  734. uint32_t *conn_err_detail,
  735. uint16_t *tcp_source_port_num,
  736. uint16_t *connection_id)
  737. {
  738. int status = QLA_ERROR;
  739. uint16_t options;
  740. uint32_t mbox_cmd[MBOX_REG_COUNT];
  741. uint32_t mbox_sts[MBOX_REG_COUNT];
  742. /* Make sure the device index is valid */
  743. if (fw_ddb_index >= MAX_DDB_ENTRIES) {
  744. DEBUG2(printk("scsi%ld: %s: ddb [%d] out of range.\n",
  745. ha->host_no, __func__, fw_ddb_index));
  746. goto exit_get_fwddb;
  747. }
  748. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  749. memset(&mbox_sts, 0, sizeof(mbox_sts));
  750. if (fw_ddb_entry)
  751. memset(fw_ddb_entry, 0, sizeof(struct dev_db_entry));
  752. mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
  753. mbox_cmd[1] = (uint32_t) fw_ddb_index;
  754. mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
  755. mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
  756. mbox_cmd[4] = sizeof(struct dev_db_entry);
  757. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 7, &mbox_cmd[0], &mbox_sts[0]) ==
  758. QLA_ERROR) {
  759. DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
  760. " with status 0x%04X\n", ha->host_no, __func__,
  761. mbox_sts[0]));
  762. goto exit_get_fwddb;
  763. }
  764. if (fw_ddb_index != mbox_sts[1]) {
  765. DEBUG2(printk("scsi%ld: %s: ddb mismatch [%d] != [%d].\n",
  766. ha->host_no, __func__, fw_ddb_index,
  767. mbox_sts[1]));
  768. goto exit_get_fwddb;
  769. }
  770. if (fw_ddb_entry) {
  771. options = le16_to_cpu(fw_ddb_entry->options);
  772. if (options & DDB_OPT_IPV6_DEVICE) {
  773. ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
  774. "Next %d State %04x ConnErr %08x %pI6 "
  775. ":%04d \"%s\"\n", __func__, fw_ddb_index,
  776. mbox_sts[0], mbox_sts[2], mbox_sts[3],
  777. mbox_sts[4], mbox_sts[5],
  778. fw_ddb_entry->ip_addr,
  779. le16_to_cpu(fw_ddb_entry->port),
  780. fw_ddb_entry->iscsi_name);
  781. } else {
  782. ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
  783. "Next %d State %04x ConnErr %08x %pI4 "
  784. ":%04d \"%s\"\n", __func__, fw_ddb_index,
  785. mbox_sts[0], mbox_sts[2], mbox_sts[3],
  786. mbox_sts[4], mbox_sts[5],
  787. fw_ddb_entry->ip_addr,
  788. le16_to_cpu(fw_ddb_entry->port),
  789. fw_ddb_entry->iscsi_name);
  790. }
  791. }
  792. if (num_valid_ddb_entries)
  793. *num_valid_ddb_entries = mbox_sts[2];
  794. if (next_ddb_index)
  795. *next_ddb_index = mbox_sts[3];
  796. if (fw_ddb_device_state)
  797. *fw_ddb_device_state = mbox_sts[4];
  798. /*
  799. * RA: This mailbox has been changed to pass connection error and
  800. * details. Its true for ISP4010 as per Version E - Not sure when it
  801. * was changed. Get the time2wait from the fw_dd_entry field :
  802. * default_time2wait which we call it as minTime2Wait DEV_DB_ENTRY
  803. * struct.
  804. */
  805. if (conn_err_detail)
  806. *conn_err_detail = mbox_sts[5];
  807. if (tcp_source_port_num)
  808. *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16);
  809. if (connection_id)
  810. *connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
  811. status = QLA_SUCCESS;
  812. exit_get_fwddb:
  813. return status;
  814. }
  815. int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index)
  816. {
  817. uint32_t mbox_cmd[MBOX_REG_COUNT];
  818. uint32_t mbox_sts[MBOX_REG_COUNT];
  819. int status;
  820. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  821. memset(&mbox_sts, 0, sizeof(mbox_sts));
  822. mbox_cmd[0] = MBOX_CMD_CONN_OPEN;
  823. mbox_cmd[1] = fw_ddb_index;
  824. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
  825. &mbox_sts[0]);
  826. DEBUG2(ql4_printk(KERN_INFO, ha,
  827. "%s: status = %d mbx0 = 0x%x mbx1 = 0x%x\n",
  828. __func__, status, mbox_sts[0], mbox_sts[1]));
  829. return status;
  830. }
  831. /**
  832. * qla4xxx_set_fwddb_entry - sets a ddb entry.
  833. * @ha: Pointer to host adapter structure.
  834. * @fw_ddb_index: Firmware's device database index
  835. * @fw_ddb_entry_dma: dma address of ddb entry
  836. * @mbx_sts: mailbox 0 to be returned or NULL
  837. *
  838. * This routine initializes or updates the adapter's device database
  839. * entry for the specified device.
  840. **/
  841. int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
  842. dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts)
  843. {
  844. uint32_t mbox_cmd[MBOX_REG_COUNT];
  845. uint32_t mbox_sts[MBOX_REG_COUNT];
  846. int status;
  847. /* Do not wait for completion. The firmware will send us an
  848. * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
  849. */
  850. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  851. memset(&mbox_sts, 0, sizeof(mbox_sts));
  852. mbox_cmd[0] = MBOX_CMD_SET_DATABASE_ENTRY;
  853. mbox_cmd[1] = (uint32_t) fw_ddb_index;
  854. mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
  855. mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
  856. mbox_cmd[4] = sizeof(struct dev_db_entry);
  857. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
  858. &mbox_sts[0]);
  859. if (mbx_sts)
  860. *mbx_sts = mbox_sts[0];
  861. DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n",
  862. ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);)
  863. return status;
  864. }
  865. int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
  866. struct ddb_entry *ddb_entry, int options)
  867. {
  868. int status;
  869. uint32_t mbox_cmd[MBOX_REG_COUNT];
  870. uint32_t mbox_sts[MBOX_REG_COUNT];
  871. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  872. memset(&mbox_sts, 0, sizeof(mbox_sts));
  873. mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
  874. mbox_cmd[1] = ddb_entry->fw_ddb_index;
  875. mbox_cmd[3] = options;
  876. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
  877. &mbox_sts[0]);
  878. if (status != QLA_SUCCESS) {
  879. DEBUG2(ql4_printk(KERN_INFO, ha,
  880. "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
  881. "failed sts %04X %04X", __func__,
  882. mbox_sts[0], mbox_sts[1]));
  883. if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) &&
  884. (mbox_sts[1] == DDB_NOT_LOGGED_IN)) {
  885. set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
  886. }
  887. }
  888. return status;
  889. }
  890. /**
  891. * qla4xxx_get_crash_record - retrieves crash record.
  892. * @ha: Pointer to host adapter structure.
  893. *
  894. * This routine retrieves a crash record from the QLA4010 after an 8002h aen.
  895. **/
  896. void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
  897. {
  898. uint32_t mbox_cmd[MBOX_REG_COUNT];
  899. uint32_t mbox_sts[MBOX_REG_COUNT];
  900. struct crash_record *crash_record = NULL;
  901. dma_addr_t crash_record_dma = 0;
  902. uint32_t crash_record_size = 0;
  903. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  904. memset(&mbox_sts, 0, sizeof(mbox_cmd));
  905. /* Get size of crash record. */
  906. mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
  907. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
  908. QLA_SUCCESS) {
  909. DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
  910. ha->host_no, __func__));
  911. goto exit_get_crash_record;
  912. }
  913. crash_record_size = mbox_sts[4];
  914. if (crash_record_size == 0) {
  915. DEBUG2(printk("scsi%ld: %s: ERROR: Crash record size is 0!\n",
  916. ha->host_no, __func__));
  917. goto exit_get_crash_record;
  918. }
  919. /* Alloc Memory for Crash Record. */
  920. crash_record = dma_alloc_coherent(&ha->pdev->dev, crash_record_size,
  921. &crash_record_dma, GFP_KERNEL);
  922. if (crash_record == NULL)
  923. goto exit_get_crash_record;
  924. /* Get Crash Record. */
  925. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  926. memset(&mbox_sts, 0, sizeof(mbox_cmd));
  927. mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
  928. mbox_cmd[2] = LSDW(crash_record_dma);
  929. mbox_cmd[3] = MSDW(crash_record_dma);
  930. mbox_cmd[4] = crash_record_size;
  931. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
  932. QLA_SUCCESS)
  933. goto exit_get_crash_record;
  934. /* Dump Crash Record. */
  935. exit_get_crash_record:
  936. if (crash_record)
  937. dma_free_coherent(&ha->pdev->dev, crash_record_size,
  938. crash_record, crash_record_dma);
  939. }
  940. /**
  941. * qla4xxx_get_conn_event_log - retrieves connection event log
  942. * @ha: Pointer to host adapter structure.
  943. **/
  944. void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
  945. {
  946. uint32_t mbox_cmd[MBOX_REG_COUNT];
  947. uint32_t mbox_sts[MBOX_REG_COUNT];
  948. struct conn_event_log_entry *event_log = NULL;
  949. dma_addr_t event_log_dma = 0;
  950. uint32_t event_log_size = 0;
  951. uint32_t num_valid_entries;
  952. uint32_t oldest_entry = 0;
  953. uint32_t max_event_log_entries;
  954. uint8_t i;
  955. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  956. memset(&mbox_sts, 0, sizeof(mbox_cmd));
  957. /* Get size of crash record. */
  958. mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
  959. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
  960. QLA_SUCCESS)
  961. goto exit_get_event_log;
  962. event_log_size = mbox_sts[4];
  963. if (event_log_size == 0)
  964. goto exit_get_event_log;
  965. /* Alloc Memory for Crash Record. */
  966. event_log = dma_alloc_coherent(&ha->pdev->dev, event_log_size,
  967. &event_log_dma, GFP_KERNEL);
  968. if (event_log == NULL)
  969. goto exit_get_event_log;
  970. /* Get Crash Record. */
  971. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  972. memset(&mbox_sts, 0, sizeof(mbox_cmd));
  973. mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
  974. mbox_cmd[2] = LSDW(event_log_dma);
  975. mbox_cmd[3] = MSDW(event_log_dma);
  976. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
  977. QLA_SUCCESS) {
  978. DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
  979. "log!\n", ha->host_no, __func__));
  980. goto exit_get_event_log;
  981. }
  982. /* Dump Event Log. */
  983. num_valid_entries = mbox_sts[1];
  984. max_event_log_entries = event_log_size /
  985. sizeof(struct conn_event_log_entry);
  986. if (num_valid_entries > max_event_log_entries)
  987. oldest_entry = num_valid_entries % max_event_log_entries;
  988. DEBUG3(printk("scsi%ld: Connection Event Log Dump (%d entries):\n",
  989. ha->host_no, num_valid_entries));
  990. if (ql4xextended_error_logging == 3) {
  991. if (oldest_entry == 0) {
  992. /* Circular Buffer has not wrapped around */
  993. for (i=0; i < num_valid_entries; i++) {
  994. qla4xxx_dump_buffer((uint8_t *)event_log+
  995. (i*sizeof(*event_log)),
  996. sizeof(*event_log));
  997. }
  998. }
  999. else {
  1000. /* Circular Buffer has wrapped around -
  1001. * display accordingly*/
  1002. for (i=oldest_entry; i < max_event_log_entries; i++) {
  1003. qla4xxx_dump_buffer((uint8_t *)event_log+
  1004. (i*sizeof(*event_log)),
  1005. sizeof(*event_log));
  1006. }
  1007. for (i=0; i < oldest_entry; i++) {
  1008. qla4xxx_dump_buffer((uint8_t *)event_log+
  1009. (i*sizeof(*event_log)),
  1010. sizeof(*event_log));
  1011. }
  1012. }
  1013. }
  1014. exit_get_event_log:
  1015. if (event_log)
  1016. dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
  1017. event_log_dma);
  1018. }
  1019. /**
  1020. * qla4xxx_abort_task - issues Abort Task
  1021. * @ha: Pointer to host adapter structure.
  1022. * @srb: Pointer to srb entry
  1023. *
  1024. * This routine performs a LUN RESET on the specified target/lun.
  1025. * The caller must ensure that the ddb_entry and lun_entry pointers
  1026. * are valid before calling this routine.
  1027. **/
  1028. int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
  1029. {
  1030. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1031. uint32_t mbox_sts[MBOX_REG_COUNT];
  1032. struct scsi_cmnd *cmd = srb->cmd;
  1033. int status = QLA_SUCCESS;
  1034. unsigned long flags = 0;
  1035. uint32_t index;
  1036. /*
  1037. * Send abort task command to ISP, so that the ISP will return
  1038. * request with ABORT status
  1039. */
  1040. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1041. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1042. spin_lock_irqsave(&ha->hardware_lock, flags);
  1043. index = (unsigned long)(unsigned char *)cmd->host_scribble;
  1044. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1045. /* Firmware already posted completion on response queue */
  1046. if (index == MAX_SRBS)
  1047. return status;
  1048. mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
  1049. mbox_cmd[1] = srb->ddb->fw_ddb_index;
  1050. mbox_cmd[2] = index;
  1051. /* Immediate Command Enable */
  1052. mbox_cmd[5] = 0x01;
  1053. qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
  1054. &mbox_sts[0]);
  1055. if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) {
  1056. status = QLA_ERROR;
  1057. DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%llu: abort task FAILED: "
  1058. "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n",
  1059. ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0],
  1060. mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]));
  1061. }
  1062. return status;
  1063. }
  1064. /**
  1065. * qla4xxx_reset_lun - issues LUN Reset
  1066. * @ha: Pointer to host adapter structure.
  1067. * @ddb_entry: Pointer to device database entry
  1068. * @lun: lun number
  1069. *
  1070. * This routine performs a LUN RESET on the specified target/lun.
  1071. * The caller must ensure that the ddb_entry and lun_entry pointers
  1072. * are valid before calling this routine.
  1073. **/
  1074. int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
  1075. uint64_t lun)
  1076. {
  1077. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1078. uint32_t mbox_sts[MBOX_REG_COUNT];
  1079. uint32_t scsi_lun[2];
  1080. int status = QLA_SUCCESS;
  1081. DEBUG2(printk("scsi%ld:%d:%llu: lun reset issued\n", ha->host_no,
  1082. ddb_entry->fw_ddb_index, lun));
  1083. /*
  1084. * Send lun reset command to ISP, so that the ISP will return all
  1085. * outstanding requests with RESET status
  1086. */
  1087. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1088. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1089. int_to_scsilun(lun, (struct scsi_lun *) scsi_lun);
  1090. mbox_cmd[0] = MBOX_CMD_LUN_RESET;
  1091. mbox_cmd[1] = ddb_entry->fw_ddb_index;
  1092. /* FW expects LUN bytes 0-3 in Incoming Mailbox 2
  1093. * (LUN byte 0 is LSByte, byte 3 is MSByte) */
  1094. mbox_cmd[2] = cpu_to_le32(scsi_lun[0]);
  1095. /* FW expects LUN bytes 4-7 in Incoming Mailbox 3
  1096. * (LUN byte 4 is LSByte, byte 7 is MSByte) */
  1097. mbox_cmd[3] = cpu_to_le32(scsi_lun[1]);
  1098. mbox_cmd[5] = 0x01; /* Immediate Command Enable */
  1099. qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
  1100. if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
  1101. mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
  1102. status = QLA_ERROR;
  1103. return status;
  1104. }
  1105. /**
  1106. * qla4xxx_reset_target - issues target Reset
  1107. * @ha: Pointer to host adapter structure.
  1108. * @db_entry: Pointer to device database entry
  1109. * @un_entry: Pointer to lun entry structure
  1110. *
  1111. * This routine performs a TARGET RESET on the specified target.
  1112. * The caller must ensure that the ddb_entry pointers
  1113. * are valid before calling this routine.
  1114. **/
  1115. int qla4xxx_reset_target(struct scsi_qla_host *ha,
  1116. struct ddb_entry *ddb_entry)
  1117. {
  1118. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1119. uint32_t mbox_sts[MBOX_REG_COUNT];
  1120. int status = QLA_SUCCESS;
  1121. DEBUG2(printk("scsi%ld:%d: target reset issued\n", ha->host_no,
  1122. ddb_entry->fw_ddb_index));
  1123. /*
  1124. * Send target reset command to ISP, so that the ISP will return all
  1125. * outstanding requests with RESET status
  1126. */
  1127. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1128. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1129. mbox_cmd[0] = MBOX_CMD_TARGET_WARM_RESET;
  1130. mbox_cmd[1] = ddb_entry->fw_ddb_index;
  1131. mbox_cmd[5] = 0x01; /* Immediate Command Enable */
  1132. qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
  1133. &mbox_sts[0]);
  1134. if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
  1135. mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
  1136. status = QLA_ERROR;
  1137. return status;
  1138. }
  1139. int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
  1140. uint32_t offset, uint32_t len)
  1141. {
  1142. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1143. uint32_t mbox_sts[MBOX_REG_COUNT];
  1144. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1145. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1146. mbox_cmd[0] = MBOX_CMD_READ_FLASH;
  1147. mbox_cmd[1] = LSDW(dma_addr);
  1148. mbox_cmd[2] = MSDW(dma_addr);
  1149. mbox_cmd[3] = offset;
  1150. mbox_cmd[4] = len;
  1151. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) !=
  1152. QLA_SUCCESS) {
  1153. DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
  1154. "status %04X %04X, offset %08x, len %08x\n", ha->host_no,
  1155. __func__, mbox_sts[0], mbox_sts[1], offset, len));
  1156. return QLA_ERROR;
  1157. }
  1158. return QLA_SUCCESS;
  1159. }
  1160. /**
  1161. * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
  1162. * @ha: Pointer to host adapter structure.
  1163. *
  1164. * Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
  1165. * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
  1166. * those mailboxes, if unused.
  1167. **/
  1168. int qla4xxx_about_firmware(struct scsi_qla_host *ha)
  1169. {
  1170. struct about_fw_info *about_fw = NULL;
  1171. dma_addr_t about_fw_dma;
  1172. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1173. uint32_t mbox_sts[MBOX_REG_COUNT];
  1174. int status = QLA_ERROR;
  1175. about_fw = dma_zalloc_coherent(&ha->pdev->dev,
  1176. sizeof(struct about_fw_info),
  1177. &about_fw_dma, GFP_KERNEL);
  1178. if (!about_fw) {
  1179. DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
  1180. "for about_fw\n", __func__));
  1181. return status;
  1182. }
  1183. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1184. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1185. mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
  1186. mbox_cmd[2] = LSDW(about_fw_dma);
  1187. mbox_cmd[3] = MSDW(about_fw_dma);
  1188. mbox_cmd[4] = sizeof(struct about_fw_info);
  1189. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  1190. &mbox_cmd[0], &mbox_sts[0]);
  1191. if (status != QLA_SUCCESS) {
  1192. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
  1193. "failed w/ status %04X\n", __func__,
  1194. mbox_sts[0]));
  1195. goto exit_about_fw;
  1196. }
  1197. /* Save version information. */
  1198. ha->fw_info.fw_major = le16_to_cpu(about_fw->fw_major);
  1199. ha->fw_info.fw_minor = le16_to_cpu(about_fw->fw_minor);
  1200. ha->fw_info.fw_patch = le16_to_cpu(about_fw->fw_patch);
  1201. ha->fw_info.fw_build = le16_to_cpu(about_fw->fw_build);
  1202. memcpy(ha->fw_info.fw_build_date, about_fw->fw_build_date,
  1203. sizeof(about_fw->fw_build_date));
  1204. memcpy(ha->fw_info.fw_build_time, about_fw->fw_build_time,
  1205. sizeof(about_fw->fw_build_time));
  1206. strcpy((char *)ha->fw_info.fw_build_user,
  1207. skip_spaces((char *)about_fw->fw_build_user));
  1208. ha->fw_info.fw_load_source = le16_to_cpu(about_fw->fw_load_source);
  1209. ha->fw_info.iscsi_major = le16_to_cpu(about_fw->iscsi_major);
  1210. ha->fw_info.iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
  1211. ha->fw_info.bootload_major = le16_to_cpu(about_fw->bootload_major);
  1212. ha->fw_info.bootload_minor = le16_to_cpu(about_fw->bootload_minor);
  1213. ha->fw_info.bootload_patch = le16_to_cpu(about_fw->bootload_patch);
  1214. ha->fw_info.bootload_build = le16_to_cpu(about_fw->bootload_build);
  1215. strcpy((char *)ha->fw_info.extended_timestamp,
  1216. skip_spaces((char *)about_fw->extended_timestamp));
  1217. ha->fw_uptime_secs = le32_to_cpu(mbox_sts[5]);
  1218. ha->fw_uptime_msecs = le32_to_cpu(mbox_sts[6]);
  1219. status = QLA_SUCCESS;
  1220. exit_about_fw:
  1221. dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
  1222. about_fw, about_fw_dma);
  1223. return status;
  1224. }
  1225. int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
  1226. dma_addr_t dma_addr)
  1227. {
  1228. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1229. uint32_t mbox_sts[MBOX_REG_COUNT];
  1230. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1231. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1232. mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS;
  1233. mbox_cmd[1] = options;
  1234. mbox_cmd[2] = LSDW(dma_addr);
  1235. mbox_cmd[3] = MSDW(dma_addr);
  1236. if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
  1237. QLA_SUCCESS) {
  1238. DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
  1239. ha->host_no, __func__, mbox_sts[0]));
  1240. return QLA_ERROR;
  1241. }
  1242. return QLA_SUCCESS;
  1243. }
  1244. int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index,
  1245. uint32_t *mbx_sts)
  1246. {
  1247. int status;
  1248. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1249. uint32_t mbox_sts[MBOX_REG_COUNT];
  1250. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1251. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1252. mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
  1253. mbox_cmd[1] = ddb_index;
  1254. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
  1255. &mbox_sts[0]);
  1256. if (status != QLA_SUCCESS) {
  1257. DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
  1258. __func__, mbox_sts[0]));
  1259. }
  1260. *mbx_sts = mbox_sts[0];
  1261. return status;
  1262. }
  1263. int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index)
  1264. {
  1265. int status;
  1266. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1267. uint32_t mbox_sts[MBOX_REG_COUNT];
  1268. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1269. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1270. mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
  1271. mbox_cmd[1] = ddb_index;
  1272. status = qla4xxx_mailbox_command(ha, 2, 1, &mbox_cmd[0],
  1273. &mbox_sts[0]);
  1274. if (status != QLA_SUCCESS) {
  1275. DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
  1276. __func__, mbox_sts[0]));
  1277. }
  1278. return status;
  1279. }
  1280. int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
  1281. uint32_t offset, uint32_t length, uint32_t options)
  1282. {
  1283. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1284. uint32_t mbox_sts[MBOX_REG_COUNT];
  1285. int status = QLA_SUCCESS;
  1286. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1287. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1288. mbox_cmd[0] = MBOX_CMD_WRITE_FLASH;
  1289. mbox_cmd[1] = LSDW(dma_addr);
  1290. mbox_cmd[2] = MSDW(dma_addr);
  1291. mbox_cmd[3] = offset;
  1292. mbox_cmd[4] = length;
  1293. mbox_cmd[5] = options;
  1294. status = qla4xxx_mailbox_command(ha, 6, 2, &mbox_cmd[0], &mbox_sts[0]);
  1295. if (status != QLA_SUCCESS) {
  1296. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_WRITE_FLASH "
  1297. "failed w/ status %04X, mbx1 %04X\n",
  1298. __func__, mbox_sts[0], mbox_sts[1]));
  1299. }
  1300. return status;
  1301. }
  1302. int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
  1303. struct dev_db_entry *fw_ddb_entry,
  1304. dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
  1305. {
  1306. uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
  1307. uint32_t dev_db_end_offset;
  1308. int status = QLA_ERROR;
  1309. memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
  1310. dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
  1311. dev_db_end_offset = FLASH_OFFSET_DB_END;
  1312. if (dev_db_start_offset > dev_db_end_offset) {
  1313. DEBUG2(ql4_printk(KERN_ERR, ha,
  1314. "%s:Invalid DDB index %d", __func__,
  1315. ddb_index));
  1316. goto exit_bootdb_failed;
  1317. }
  1318. if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
  1319. sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
  1320. ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
  1321. "failed\n", ha->host_no, __func__);
  1322. goto exit_bootdb_failed;
  1323. }
  1324. if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
  1325. status = QLA_SUCCESS;
  1326. exit_bootdb_failed:
  1327. return status;
  1328. }
  1329. int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
  1330. struct dev_db_entry *fw_ddb_entry,
  1331. dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
  1332. {
  1333. uint32_t dev_db_start_offset;
  1334. uint32_t dev_db_end_offset;
  1335. int status = QLA_ERROR;
  1336. memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
  1337. if (is_qla40XX(ha)) {
  1338. dev_db_start_offset = FLASH_OFFSET_DB_INFO;
  1339. dev_db_end_offset = FLASH_OFFSET_DB_END;
  1340. } else {
  1341. dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
  1342. (ha->hw.flt_region_ddb << 2);
  1343. /* flt_ddb_size is DDB table size for both ports
  1344. * so divide it by 2 to calculate the offset for second port
  1345. */
  1346. if (ha->port_num == 1)
  1347. dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
  1348. dev_db_end_offset = dev_db_start_offset +
  1349. (ha->hw.flt_ddb_size / 2);
  1350. }
  1351. dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
  1352. if (dev_db_start_offset > dev_db_end_offset) {
  1353. DEBUG2(ql4_printk(KERN_ERR, ha,
  1354. "%s:Invalid DDB index %d", __func__,
  1355. ddb_index));
  1356. goto exit_fdb_failed;
  1357. }
  1358. if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
  1359. sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
  1360. ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash failed\n",
  1361. ha->host_no, __func__);
  1362. goto exit_fdb_failed;
  1363. }
  1364. if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
  1365. status = QLA_SUCCESS;
  1366. exit_fdb_failed:
  1367. return status;
  1368. }
  1369. int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
  1370. uint16_t idx)
  1371. {
  1372. int ret = 0;
  1373. int rval = QLA_ERROR;
  1374. uint32_t offset = 0, chap_size;
  1375. struct ql4_chap_table *chap_table;
  1376. dma_addr_t chap_dma;
  1377. chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
  1378. if (chap_table == NULL)
  1379. return -ENOMEM;
  1380. chap_size = sizeof(struct ql4_chap_table);
  1381. if (is_qla40XX(ha))
  1382. offset = FLASH_CHAP_OFFSET | (idx * chap_size);
  1383. else {
  1384. offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
  1385. /* flt_chap_size is CHAP table size for both ports
  1386. * so divide it by 2 to calculate the offset for second port
  1387. */
  1388. if (ha->port_num == 1)
  1389. offset += (ha->hw.flt_chap_size / 2);
  1390. offset += (idx * chap_size);
  1391. }
  1392. rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
  1393. if (rval != QLA_SUCCESS) {
  1394. ret = -EINVAL;
  1395. goto exit_get_chap;
  1396. }
  1397. DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
  1398. __le16_to_cpu(chap_table->cookie)));
  1399. if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
  1400. ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
  1401. goto exit_get_chap;
  1402. }
  1403. strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
  1404. strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
  1405. chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
  1406. exit_get_chap:
  1407. dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
  1408. return ret;
  1409. }
  1410. /**
  1411. * qla4xxx_set_chap - Make a chap entry at the given index
  1412. * @ha: pointer to adapter structure
  1413. * @username: CHAP username to set
  1414. * @password: CHAP password to set
  1415. * @idx: CHAP index at which to make the entry
  1416. * @bidi: type of chap entry (chap_in or chap_out)
  1417. *
  1418. * Create chap entry at the given index with the information provided.
  1419. *
  1420. * Note: Caller should acquire the chap lock before getting here.
  1421. **/
  1422. int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
  1423. uint16_t idx, int bidi)
  1424. {
  1425. int ret = 0;
  1426. int rval = QLA_ERROR;
  1427. uint32_t offset = 0;
  1428. struct ql4_chap_table *chap_table;
  1429. uint32_t chap_size = 0;
  1430. dma_addr_t chap_dma;
  1431. chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
  1432. if (chap_table == NULL) {
  1433. ret = -ENOMEM;
  1434. goto exit_set_chap;
  1435. }
  1436. if (bidi)
  1437. chap_table->flags |= BIT_6; /* peer */
  1438. else
  1439. chap_table->flags |= BIT_7; /* local */
  1440. chap_table->secret_len = strlen(password);
  1441. strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1);
  1442. strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1);
  1443. chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
  1444. if (is_qla40XX(ha)) {
  1445. chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
  1446. offset = FLASH_CHAP_OFFSET;
  1447. } else { /* Single region contains CHAP info for both ports which is
  1448. * divided into half for each port.
  1449. */
  1450. chap_size = ha->hw.flt_chap_size / 2;
  1451. offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
  1452. if (ha->port_num == 1)
  1453. offset += chap_size;
  1454. }
  1455. offset += (idx * sizeof(struct ql4_chap_table));
  1456. rval = qla4xxx_set_flash(ha, chap_dma, offset,
  1457. sizeof(struct ql4_chap_table),
  1458. FLASH_OPT_RMW_COMMIT);
  1459. if (rval == QLA_SUCCESS && ha->chap_list) {
  1460. /* Update ha chap_list cache */
  1461. memcpy((struct ql4_chap_table *)ha->chap_list + idx,
  1462. chap_table, sizeof(struct ql4_chap_table));
  1463. }
  1464. dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
  1465. if (rval != QLA_SUCCESS)
  1466. ret = -EINVAL;
  1467. exit_set_chap:
  1468. return ret;
  1469. }
  1470. int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
  1471. char *password, uint16_t chap_index)
  1472. {
  1473. int rval = QLA_ERROR;
  1474. struct ql4_chap_table *chap_table = NULL;
  1475. int max_chap_entries;
  1476. if (!ha->chap_list) {
  1477. ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
  1478. rval = QLA_ERROR;
  1479. goto exit_uni_chap;
  1480. }
  1481. if (!username || !password) {
  1482. ql4_printk(KERN_ERR, ha, "No memory for username & secret\n");
  1483. rval = QLA_ERROR;
  1484. goto exit_uni_chap;
  1485. }
  1486. if (is_qla80XX(ha))
  1487. max_chap_entries = (ha->hw.flt_chap_size / 2) /
  1488. sizeof(struct ql4_chap_table);
  1489. else
  1490. max_chap_entries = MAX_CHAP_ENTRIES_40XX;
  1491. if (chap_index > max_chap_entries) {
  1492. ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
  1493. rval = QLA_ERROR;
  1494. goto exit_uni_chap;
  1495. }
  1496. mutex_lock(&ha->chap_sem);
  1497. chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
  1498. if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
  1499. rval = QLA_ERROR;
  1500. goto exit_unlock_uni_chap;
  1501. }
  1502. if (!(chap_table->flags & BIT_7)) {
  1503. ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
  1504. rval = QLA_ERROR;
  1505. goto exit_unlock_uni_chap;
  1506. }
  1507. strlcpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
  1508. strlcpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
  1509. rval = QLA_SUCCESS;
  1510. exit_unlock_uni_chap:
  1511. mutex_unlock(&ha->chap_sem);
  1512. exit_uni_chap:
  1513. return rval;
  1514. }
  1515. /**
  1516. * qla4xxx_get_chap_index - Get chap index given username and secret
  1517. * @ha: pointer to adapter structure
  1518. * @username: CHAP username to be searched
  1519. * @password: CHAP password to be searched
  1520. * @bidi: Is this a BIDI CHAP
  1521. * @chap_index: CHAP index to be returned
  1522. *
  1523. * Match the username and password in the chap_list, return the index if a
  1524. * match is found. If a match is not found then add the entry in FLASH and
  1525. * return the index at which entry is written in the FLASH.
  1526. **/
  1527. int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
  1528. char *password, int bidi, uint16_t *chap_index)
  1529. {
  1530. int i, rval;
  1531. int free_index = -1;
  1532. int found_index = 0;
  1533. int max_chap_entries = 0;
  1534. struct ql4_chap_table *chap_table;
  1535. if (is_qla80XX(ha))
  1536. max_chap_entries = (ha->hw.flt_chap_size / 2) /
  1537. sizeof(struct ql4_chap_table);
  1538. else
  1539. max_chap_entries = MAX_CHAP_ENTRIES_40XX;
  1540. if (!ha->chap_list) {
  1541. ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
  1542. return QLA_ERROR;
  1543. }
  1544. if (!username || !password) {
  1545. ql4_printk(KERN_ERR, ha, "Do not have username and psw\n");
  1546. return QLA_ERROR;
  1547. }
  1548. mutex_lock(&ha->chap_sem);
  1549. for (i = 0; i < max_chap_entries; i++) {
  1550. chap_table = (struct ql4_chap_table *)ha->chap_list + i;
  1551. if (chap_table->cookie !=
  1552. __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
  1553. if (i > MAX_RESRV_CHAP_IDX && free_index == -1)
  1554. free_index = i;
  1555. continue;
  1556. }
  1557. if (bidi) {
  1558. if (chap_table->flags & BIT_7)
  1559. continue;
  1560. } else {
  1561. if (chap_table->flags & BIT_6)
  1562. continue;
  1563. }
  1564. if (!strncmp(chap_table->secret, password,
  1565. MAX_CHAP_SECRET_LEN) &&
  1566. !strncmp(chap_table->name, username,
  1567. MAX_CHAP_NAME_LEN)) {
  1568. *chap_index = i;
  1569. found_index = 1;
  1570. break;
  1571. }
  1572. }
  1573. /* If chap entry is not present and a free index is available then
  1574. * write the entry in flash
  1575. */
  1576. if (!found_index && free_index != -1) {
  1577. rval = qla4xxx_set_chap(ha, username, password,
  1578. free_index, bidi);
  1579. if (!rval) {
  1580. *chap_index = free_index;
  1581. found_index = 1;
  1582. }
  1583. }
  1584. mutex_unlock(&ha->chap_sem);
  1585. if (found_index)
  1586. return QLA_SUCCESS;
  1587. return QLA_ERROR;
  1588. }
  1589. int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
  1590. uint16_t fw_ddb_index,
  1591. uint16_t connection_id,
  1592. uint16_t option)
  1593. {
  1594. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1595. uint32_t mbox_sts[MBOX_REG_COUNT];
  1596. int status = QLA_SUCCESS;
  1597. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1598. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1599. mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
  1600. mbox_cmd[1] = fw_ddb_index;
  1601. mbox_cmd[2] = connection_id;
  1602. mbox_cmd[3] = option;
  1603. status = qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]);
  1604. if (status != QLA_SUCCESS) {
  1605. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_CONN_CLOSE "
  1606. "option %04x failed w/ status %04X %04X\n",
  1607. __func__, option, mbox_sts[0], mbox_sts[1]));
  1608. }
  1609. return status;
  1610. }
  1611. /**
  1612. * qla4_84xx_extend_idc_tmo - Extend IDC Timeout.
  1613. * @ha: Pointer to host adapter structure.
  1614. * @ext_tmo: idc timeout value
  1615. *
  1616. * Requests firmware to extend the idc timeout value.
  1617. **/
  1618. static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha, uint32_t ext_tmo)
  1619. {
  1620. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1621. uint32_t mbox_sts[MBOX_REG_COUNT];
  1622. int status;
  1623. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1624. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1625. ext_tmo &= 0xf;
  1626. mbox_cmd[0] = MBOX_CMD_IDC_TIME_EXTEND;
  1627. mbox_cmd[1] = ((ha->idc_info.request_desc & 0xfffff0ff) |
  1628. (ext_tmo << 8)); /* new timeout */
  1629. mbox_cmd[2] = ha->idc_info.info1;
  1630. mbox_cmd[3] = ha->idc_info.info2;
  1631. mbox_cmd[4] = ha->idc_info.info3;
  1632. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  1633. mbox_cmd, mbox_sts);
  1634. if (status != QLA_SUCCESS) {
  1635. DEBUG2(ql4_printk(KERN_INFO, ha,
  1636. "scsi%ld: %s: failed status %04X\n",
  1637. ha->host_no, __func__, mbox_sts[0]));
  1638. return QLA_ERROR;
  1639. } else {
  1640. ql4_printk(KERN_INFO, ha, "%s: IDC timeout extended by %d secs\n",
  1641. __func__, ext_tmo);
  1642. }
  1643. return QLA_SUCCESS;
  1644. }
  1645. int qla4xxx_disable_acb(struct scsi_qla_host *ha)
  1646. {
  1647. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1648. uint32_t mbox_sts[MBOX_REG_COUNT];
  1649. int status = QLA_SUCCESS;
  1650. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1651. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1652. mbox_cmd[0] = MBOX_CMD_DISABLE_ACB;
  1653. status = qla4xxx_mailbox_command(ha, 8, 5, &mbox_cmd[0], &mbox_sts[0]);
  1654. if (status != QLA_SUCCESS) {
  1655. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB "
  1656. "failed w/ status %04X %04X %04X", __func__,
  1657. mbox_sts[0], mbox_sts[1], mbox_sts[2]));
  1658. } else {
  1659. if (is_qla8042(ha) &&
  1660. test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) &&
  1661. (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {
  1662. /*
  1663. * Disable ACB mailbox command takes time to complete
  1664. * based on the total number of targets connected.
  1665. * For 512 targets, it took approximately 5 secs to
  1666. * complete. Setting the timeout value to 8, with the 3
  1667. * secs buffer.
  1668. */
  1669. qla4_84xx_extend_idc_tmo(ha, IDC_EXTEND_TOV);
  1670. if (!wait_for_completion_timeout(&ha->disable_acb_comp,
  1671. IDC_EXTEND_TOV * HZ)) {
  1672. ql4_printk(KERN_WARNING, ha, "%s: Disable ACB Completion not received\n",
  1673. __func__);
  1674. }
  1675. }
  1676. }
  1677. return status;
  1678. }
  1679. int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
  1680. uint32_t acb_type, uint32_t len)
  1681. {
  1682. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1683. uint32_t mbox_sts[MBOX_REG_COUNT];
  1684. int status = QLA_SUCCESS;
  1685. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1686. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1687. mbox_cmd[0] = MBOX_CMD_GET_ACB;
  1688. mbox_cmd[1] = acb_type;
  1689. mbox_cmd[2] = LSDW(acb_dma);
  1690. mbox_cmd[3] = MSDW(acb_dma);
  1691. mbox_cmd[4] = len;
  1692. status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
  1693. if (status != QLA_SUCCESS) {
  1694. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_GET_ACB "
  1695. "failed w/ status %04X\n", __func__,
  1696. mbox_sts[0]));
  1697. }
  1698. return status;
  1699. }
  1700. int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
  1701. uint32_t *mbox_sts, dma_addr_t acb_dma)
  1702. {
  1703. int status = QLA_SUCCESS;
  1704. memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
  1705. memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
  1706. mbox_cmd[0] = MBOX_CMD_SET_ACB;
  1707. mbox_cmd[1] = 0; /* Primary ACB */
  1708. mbox_cmd[2] = LSDW(acb_dma);
  1709. mbox_cmd[3] = MSDW(acb_dma);
  1710. mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
  1711. status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
  1712. if (status != QLA_SUCCESS) {
  1713. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_SET_ACB "
  1714. "failed w/ status %04X\n", __func__,
  1715. mbox_sts[0]));
  1716. }
  1717. return status;
  1718. }
  1719. int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
  1720. struct ddb_entry *ddb_entry,
  1721. struct iscsi_cls_conn *cls_conn,
  1722. uint32_t *mbx_sts)
  1723. {
  1724. struct dev_db_entry *fw_ddb_entry;
  1725. struct iscsi_conn *conn;
  1726. struct iscsi_session *sess;
  1727. struct qla_conn *qla_conn;
  1728. struct sockaddr *dst_addr;
  1729. dma_addr_t fw_ddb_entry_dma;
  1730. int status = QLA_SUCCESS;
  1731. int rval = 0;
  1732. struct sockaddr_in *addr;
  1733. struct sockaddr_in6 *addr6;
  1734. char *ip;
  1735. uint16_t iscsi_opts = 0;
  1736. uint32_t options = 0;
  1737. uint16_t idx, *ptid;
  1738. fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
  1739. &fw_ddb_entry_dma, GFP_KERNEL);
  1740. if (!fw_ddb_entry) {
  1741. DEBUG2(ql4_printk(KERN_ERR, ha,
  1742. "%s: Unable to allocate dma buffer.\n",
  1743. __func__));
  1744. rval = -ENOMEM;
  1745. goto exit_set_param_no_free;
  1746. }
  1747. conn = cls_conn->dd_data;
  1748. qla_conn = conn->dd_data;
  1749. sess = conn->session;
  1750. dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
  1751. if (dst_addr->sa_family == AF_INET6)
  1752. options |= IPV6_DEFAULT_DDB_ENTRY;
  1753. status = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
  1754. if (status == QLA_ERROR) {
  1755. rval = -EINVAL;
  1756. goto exit_set_param;
  1757. }
  1758. ptid = (uint16_t *)&fw_ddb_entry->isid[1];
  1759. *ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
  1760. DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%pmR]\n", fw_ddb_entry->isid));
  1761. iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
  1762. memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
  1763. memset(fw_ddb_entry->iscsi_name, 0, sizeof(fw_ddb_entry->iscsi_name));
  1764. if (sess->targetname != NULL) {
  1765. memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
  1766. min(strlen(sess->targetname),
  1767. sizeof(fw_ddb_entry->iscsi_name)));
  1768. }
  1769. memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr));
  1770. memset(fw_ddb_entry->tgt_addr, 0, sizeof(fw_ddb_entry->tgt_addr));
  1771. fw_ddb_entry->options = DDB_OPT_TARGET | DDB_OPT_AUTO_SENDTGTS_DISABLE;
  1772. if (dst_addr->sa_family == AF_INET) {
  1773. addr = (struct sockaddr_in *)dst_addr;
  1774. ip = (char *)&addr->sin_addr;
  1775. memcpy(fw_ddb_entry->ip_addr, ip, IP_ADDR_LEN);
  1776. fw_ddb_entry->port = cpu_to_le16(ntohs(addr->sin_port));
  1777. DEBUG2(ql4_printk(KERN_INFO, ha,
  1778. "%s: Destination Address [%pI4]: index [%d]\n",
  1779. __func__, fw_ddb_entry->ip_addr,
  1780. ddb_entry->fw_ddb_index));
  1781. } else if (dst_addr->sa_family == AF_INET6) {
  1782. addr6 = (struct sockaddr_in6 *)dst_addr;
  1783. ip = (char *)&addr6->sin6_addr;
  1784. memcpy(fw_ddb_entry->ip_addr, ip, IPv6_ADDR_LEN);
  1785. fw_ddb_entry->port = cpu_to_le16(ntohs(addr6->sin6_port));
  1786. fw_ddb_entry->options |= DDB_OPT_IPV6_DEVICE;
  1787. DEBUG2(ql4_printk(KERN_INFO, ha,
  1788. "%s: Destination Address [%pI6]: index [%d]\n",
  1789. __func__, fw_ddb_entry->ip_addr,
  1790. ddb_entry->fw_ddb_index));
  1791. } else {
  1792. ql4_printk(KERN_ERR, ha,
  1793. "%s: Failed to get IP Address\n",
  1794. __func__);
  1795. rval = -EINVAL;
  1796. goto exit_set_param;
  1797. }
  1798. /* CHAP */
  1799. if (sess->username != NULL && sess->password != NULL) {
  1800. if (strlen(sess->username) && strlen(sess->password)) {
  1801. iscsi_opts |= BIT_7;
  1802. rval = qla4xxx_get_chap_index(ha, sess->username,
  1803. sess->password,
  1804. LOCAL_CHAP, &idx);
  1805. if (rval)
  1806. goto exit_set_param;
  1807. fw_ddb_entry->chap_tbl_idx = cpu_to_le16(idx);
  1808. }
  1809. }
  1810. if (sess->username_in != NULL && sess->password_in != NULL) {
  1811. /* Check if BIDI CHAP */
  1812. if (strlen(sess->username_in) && strlen(sess->password_in)) {
  1813. iscsi_opts |= BIT_4;
  1814. rval = qla4xxx_get_chap_index(ha, sess->username_in,
  1815. sess->password_in,
  1816. BIDI_CHAP, &idx);
  1817. if (rval)
  1818. goto exit_set_param;
  1819. }
  1820. }
  1821. if (sess->initial_r2t_en)
  1822. iscsi_opts |= BIT_10;
  1823. if (sess->imm_data_en)
  1824. iscsi_opts |= BIT_11;
  1825. fw_ddb_entry->iscsi_options = cpu_to_le16(iscsi_opts);
  1826. if (conn->max_recv_dlength)
  1827. fw_ddb_entry->iscsi_max_rcv_data_seg_len =
  1828. __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
  1829. if (sess->max_r2t)
  1830. fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
  1831. if (sess->first_burst)
  1832. fw_ddb_entry->iscsi_first_burst_len =
  1833. __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS));
  1834. if (sess->max_burst)
  1835. fw_ddb_entry->iscsi_max_burst_len =
  1836. __constant_cpu_to_le16((sess->max_burst / BYTE_UNITS));
  1837. if (sess->time2wait)
  1838. fw_ddb_entry->iscsi_def_time2wait =
  1839. cpu_to_le16(sess->time2wait);
  1840. if (sess->time2retain)
  1841. fw_ddb_entry->iscsi_def_time2retain =
  1842. cpu_to_le16(sess->time2retain);
  1843. status = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
  1844. fw_ddb_entry_dma, mbx_sts);
  1845. if (status != QLA_SUCCESS)
  1846. rval = -EINVAL;
  1847. exit_set_param:
  1848. dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
  1849. fw_ddb_entry, fw_ddb_entry_dma);
  1850. exit_set_param_no_free:
  1851. return rval;
  1852. }
  1853. int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
  1854. uint16_t stats_size, dma_addr_t stats_dma)
  1855. {
  1856. int status = QLA_SUCCESS;
  1857. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1858. uint32_t mbox_sts[MBOX_REG_COUNT];
  1859. memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
  1860. memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
  1861. mbox_cmd[0] = MBOX_CMD_GET_MANAGEMENT_DATA;
  1862. mbox_cmd[1] = fw_ddb_index;
  1863. mbox_cmd[2] = LSDW(stats_dma);
  1864. mbox_cmd[3] = MSDW(stats_dma);
  1865. mbox_cmd[4] = stats_size;
  1866. status = qla4xxx_mailbox_command(ha, 5, 1, &mbox_cmd[0], &mbox_sts[0]);
  1867. if (status != QLA_SUCCESS) {
  1868. DEBUG2(ql4_printk(KERN_WARNING, ha,
  1869. "%s: MBOX_CMD_GET_MANAGEMENT_DATA "
  1870. "failed w/ status %04X\n", __func__,
  1871. mbox_sts[0]));
  1872. }
  1873. return status;
  1874. }
  1875. int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
  1876. uint32_t ip_idx, uint32_t *sts)
  1877. {
  1878. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1879. uint32_t mbox_sts[MBOX_REG_COUNT];
  1880. int status = QLA_SUCCESS;
  1881. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1882. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1883. mbox_cmd[0] = MBOX_CMD_GET_IP_ADDR_STATE;
  1884. mbox_cmd[1] = acb_idx;
  1885. mbox_cmd[2] = ip_idx;
  1886. status = qla4xxx_mailbox_command(ha, 3, 8, &mbox_cmd[0], &mbox_sts[0]);
  1887. if (status != QLA_SUCCESS) {
  1888. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: "
  1889. "MBOX_CMD_GET_IP_ADDR_STATE failed w/ "
  1890. "status %04X\n", __func__, mbox_sts[0]));
  1891. }
  1892. memcpy(sts, mbox_sts, sizeof(mbox_sts));
  1893. return status;
  1894. }
  1895. int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
  1896. uint32_t offset, uint32_t size)
  1897. {
  1898. int status = QLA_SUCCESS;
  1899. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1900. uint32_t mbox_sts[MBOX_REG_COUNT];
  1901. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1902. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1903. mbox_cmd[0] = MBOX_CMD_GET_NVRAM;
  1904. mbox_cmd[1] = LSDW(nvram_dma);
  1905. mbox_cmd[2] = MSDW(nvram_dma);
  1906. mbox_cmd[3] = offset;
  1907. mbox_cmd[4] = size;
  1908. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
  1909. &mbox_sts[0]);
  1910. if (status != QLA_SUCCESS) {
  1911. DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
  1912. "status %04X\n", ha->host_no, __func__,
  1913. mbox_sts[0]));
  1914. }
  1915. return status;
  1916. }
  1917. int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
  1918. uint32_t offset, uint32_t size)
  1919. {
  1920. int status = QLA_SUCCESS;
  1921. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1922. uint32_t mbox_sts[MBOX_REG_COUNT];
  1923. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1924. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1925. mbox_cmd[0] = MBOX_CMD_SET_NVRAM;
  1926. mbox_cmd[1] = LSDW(nvram_dma);
  1927. mbox_cmd[2] = MSDW(nvram_dma);
  1928. mbox_cmd[3] = offset;
  1929. mbox_cmd[4] = size;
  1930. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
  1931. &mbox_sts[0]);
  1932. if (status != QLA_SUCCESS) {
  1933. DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
  1934. "status %04X\n", ha->host_no, __func__,
  1935. mbox_sts[0]));
  1936. }
  1937. return status;
  1938. }
  1939. int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
  1940. uint32_t region, uint32_t field0,
  1941. uint32_t field1)
  1942. {
  1943. int status = QLA_SUCCESS;
  1944. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1945. uint32_t mbox_sts[MBOX_REG_COUNT];
  1946. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1947. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1948. mbox_cmd[0] = MBOX_CMD_RESTORE_FACTORY_DEFAULTS;
  1949. mbox_cmd[3] = region;
  1950. mbox_cmd[4] = field0;
  1951. mbox_cmd[5] = field1;
  1952. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0],
  1953. &mbox_sts[0]);
  1954. if (status != QLA_SUCCESS) {
  1955. DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
  1956. "status %04X\n", ha->host_no, __func__,
  1957. mbox_sts[0]));
  1958. }
  1959. return status;
  1960. }
  1961. /**
  1962. * qla4_8xxx_set_param - set driver version in firmware.
  1963. * @ha: Pointer to host adapter structure.
  1964. * @param: Parameter to set i.e driver version
  1965. **/
  1966. int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param)
  1967. {
  1968. uint32_t mbox_cmd[MBOX_REG_COUNT];
  1969. uint32_t mbox_sts[MBOX_REG_COUNT];
  1970. uint32_t status;
  1971. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  1972. memset(&mbox_sts, 0, sizeof(mbox_sts));
  1973. mbox_cmd[0] = MBOX_CMD_SET_PARAM;
  1974. if (param == SET_DRVR_VERSION) {
  1975. mbox_cmd[1] = SET_DRVR_VERSION;
  1976. strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION,
  1977. MAX_DRVR_VER_LEN - 1);
  1978. } else {
  1979. ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n",
  1980. __func__, param);
  1981. status = QLA_ERROR;
  1982. goto exit_set_param;
  1983. }
  1984. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, mbox_cmd,
  1985. mbox_sts);
  1986. if (status == QLA_ERROR)
  1987. ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
  1988. __func__, mbox_sts[0]);
  1989. exit_set_param:
  1990. return status;
  1991. }
  1992. /**
  1993. * qla4_83xx_post_idc_ack - post IDC ACK
  1994. * @ha: Pointer to host adapter structure.
  1995. *
  1996. * Posts IDC ACK for IDC Request Notification AEN.
  1997. **/
  1998. int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha)
  1999. {
  2000. uint32_t mbox_cmd[MBOX_REG_COUNT];
  2001. uint32_t mbox_sts[MBOX_REG_COUNT];
  2002. int status;
  2003. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  2004. memset(&mbox_sts, 0, sizeof(mbox_sts));
  2005. mbox_cmd[0] = MBOX_CMD_IDC_ACK;
  2006. mbox_cmd[1] = ha->idc_info.request_desc;
  2007. mbox_cmd[2] = ha->idc_info.info1;
  2008. mbox_cmd[3] = ha->idc_info.info2;
  2009. mbox_cmd[4] = ha->idc_info.info3;
  2010. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  2011. mbox_cmd, mbox_sts);
  2012. if (status == QLA_ERROR)
  2013. ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
  2014. mbox_sts[0]);
  2015. else
  2016. ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", __func__);
  2017. return status;
  2018. }
  2019. int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
  2020. {
  2021. uint32_t mbox_cmd[MBOX_REG_COUNT];
  2022. uint32_t mbox_sts[MBOX_REG_COUNT];
  2023. struct addr_ctrl_blk *acb = NULL;
  2024. uint32_t acb_len = sizeof(struct addr_ctrl_blk);
  2025. int rval = QLA_SUCCESS;
  2026. dma_addr_t acb_dma;
  2027. acb = dma_alloc_coherent(&ha->pdev->dev,
  2028. sizeof(struct addr_ctrl_blk),
  2029. &acb_dma, GFP_KERNEL);
  2030. if (!acb) {
  2031. ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__);
  2032. rval = QLA_ERROR;
  2033. goto exit_config_acb;
  2034. }
  2035. memset(acb, 0, acb_len);
  2036. switch (acb_config) {
  2037. case ACB_CONFIG_DISABLE:
  2038. rval = qla4xxx_get_acb(ha, acb_dma, 0, acb_len);
  2039. if (rval != QLA_SUCCESS)
  2040. goto exit_free_acb;
  2041. rval = qla4xxx_disable_acb(ha);
  2042. if (rval != QLA_SUCCESS)
  2043. goto exit_free_acb;
  2044. if (!ha->saved_acb)
  2045. ha->saved_acb = kzalloc(acb_len, GFP_KERNEL);
  2046. if (!ha->saved_acb) {
  2047. ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
  2048. __func__);
  2049. rval = QLA_ERROR;
  2050. goto exit_free_acb;
  2051. }
  2052. memcpy(ha->saved_acb, acb, acb_len);
  2053. break;
  2054. case ACB_CONFIG_SET:
  2055. if (!ha->saved_acb) {
  2056. ql4_printk(KERN_ERR, ha, "%s: Can't set ACB, Saved ACB not available\n",
  2057. __func__);
  2058. rval = QLA_ERROR;
  2059. goto exit_free_acb;
  2060. }
  2061. memcpy(acb, ha->saved_acb, acb_len);
  2062. rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
  2063. if (rval != QLA_SUCCESS)
  2064. goto exit_free_acb;
  2065. break;
  2066. default:
  2067. ql4_printk(KERN_ERR, ha, "%s: Invalid ACB Configuration\n",
  2068. __func__);
  2069. }
  2070. exit_free_acb:
  2071. dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,
  2072. acb_dma);
  2073. exit_config_acb:
  2074. if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) {
  2075. kfree(ha->saved_acb);
  2076. ha->saved_acb = NULL;
  2077. }
  2078. DEBUG2(ql4_printk(KERN_INFO, ha,
  2079. "%s %s\n", __func__,
  2080. rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
  2081. return rval;
  2082. }
  2083. int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config)
  2084. {
  2085. uint32_t mbox_cmd[MBOX_REG_COUNT];
  2086. uint32_t mbox_sts[MBOX_REG_COUNT];
  2087. int status;
  2088. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  2089. memset(&mbox_sts, 0, sizeof(mbox_sts));
  2090. mbox_cmd[0] = MBOX_CMD_GET_PORT_CONFIG;
  2091. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  2092. mbox_cmd, mbox_sts);
  2093. if (status == QLA_SUCCESS)
  2094. *config = mbox_sts[1];
  2095. else
  2096. ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
  2097. mbox_sts[0]);
  2098. return status;
  2099. }
  2100. int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config)
  2101. {
  2102. uint32_t mbox_cmd[MBOX_REG_COUNT];
  2103. uint32_t mbox_sts[MBOX_REG_COUNT];
  2104. int status;
  2105. memset(&mbox_cmd, 0, sizeof(mbox_cmd));
  2106. memset(&mbox_sts, 0, sizeof(mbox_sts));
  2107. mbox_cmd[0] = MBOX_CMD_SET_PORT_CONFIG;
  2108. mbox_cmd[1] = *config;
  2109. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
  2110. mbox_cmd, mbox_sts);
  2111. if (status != QLA_SUCCESS)
  2112. ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
  2113. mbox_sts[0]);
  2114. return status;
  2115. }