be_cmds.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448
  1. /**
  2. * Copyright (C) 2005 - 2015 Avago Technologies
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@avagotech.com
  12. *
  13. * Avago Technologies
  14. * 3333 Susan Street
  15. * Costa Mesa, CA 92626
  16. */
  17. #include <scsi/iscsi_proto.h>
  18. #include "be_main.h"
  19. #include "be.h"
  20. #include "be_mgmt.h"
  21. int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
  22. {
  23. u32 sreset;
  24. u8 *pci_reset_offset = 0;
  25. u8 *pci_online0_offset = 0;
  26. u8 *pci_online1_offset = 0;
  27. u32 pconline0 = 0;
  28. u32 pconline1 = 0;
  29. u32 i;
  30. pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
  31. pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
  32. pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
  33. sreset = readl((void *)pci_reset_offset);
  34. sreset |= BE2_SET_RESET;
  35. writel(sreset, (void *)pci_reset_offset);
  36. i = 0;
  37. while (sreset & BE2_SET_RESET) {
  38. if (i > 64)
  39. break;
  40. msleep(100);
  41. sreset = readl((void *)pci_reset_offset);
  42. i++;
  43. }
  44. if (sreset & BE2_SET_RESET) {
  45. printk(KERN_ERR DRV_NAME
  46. " Soft Reset did not deassert\n");
  47. return -EIO;
  48. }
  49. pconline1 = BE2_MPU_IRAM_ONLINE;
  50. writel(pconline0, (void *)pci_online0_offset);
  51. writel(pconline1, (void *)pci_online1_offset);
  52. sreset |= BE2_SET_RESET;
  53. writel(sreset, (void *)pci_reset_offset);
  54. i = 0;
  55. while (sreset & BE2_SET_RESET) {
  56. if (i > 64)
  57. break;
  58. msleep(1);
  59. sreset = readl((void *)pci_reset_offset);
  60. i++;
  61. }
  62. if (sreset & BE2_SET_RESET) {
  63. printk(KERN_ERR DRV_NAME
  64. " MPU Online Soft Reset did not deassert\n");
  65. return -EIO;
  66. }
  67. return 0;
  68. }
  69. int be_chk_reset_complete(struct beiscsi_hba *phba)
  70. {
  71. unsigned int num_loop;
  72. u8 *mpu_sem = 0;
  73. u32 status;
  74. num_loop = 1000;
  75. mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
  76. msleep(5000);
  77. while (num_loop) {
  78. status = readl((void *)mpu_sem);
  79. if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
  80. break;
  81. msleep(60);
  82. num_loop--;
  83. }
  84. if ((status & 0x80000000) || (!num_loop)) {
  85. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  86. "BC_%d : Failed in be_chk_reset_complete"
  87. "status = 0x%x\n", status);
  88. return -EIO;
  89. }
  90. return 0;
  91. }
  92. void be_mcc_notify(struct beiscsi_hba *phba)
  93. {
  94. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  95. u32 val = 0;
  96. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  97. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  98. iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
  99. }
  100. unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
  101. {
  102. unsigned int tag = 0;
  103. if (phba->ctrl.mcc_tag_available) {
  104. tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
  105. phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
  106. phba->ctrl.mcc_numtag[tag] = 0;
  107. }
  108. if (tag) {
  109. phba->ctrl.mcc_tag_available--;
  110. if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
  111. phba->ctrl.mcc_alloc_index = 0;
  112. else
  113. phba->ctrl.mcc_alloc_index++;
  114. }
  115. return tag;
  116. }
  117. /*
  118. * beiscsi_mccq_compl()- Wait for completion of MBX
  119. * @phba: Driver private structure
  120. * @tag: Tag for the MBX Command
  121. * @wrb: the WRB used for the MBX Command
  122. * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
  123. *
  124. * Waits for MBX completion with the passed TAG.
  125. *
  126. * return
  127. * Success: 0
  128. * Failure: Non-Zero
  129. **/
  130. int beiscsi_mccq_compl(struct beiscsi_hba *phba,
  131. uint32_t tag, struct be_mcc_wrb **wrb,
  132. struct be_dma_mem *mbx_cmd_mem)
  133. {
  134. int rc = 0;
  135. uint32_t mcc_tag_response;
  136. uint16_t status = 0, addl_status = 0, wrb_num = 0;
  137. struct be_mcc_wrb *temp_wrb;
  138. struct be_cmd_req_hdr *mbx_hdr;
  139. struct be_cmd_resp_hdr *mbx_resp_hdr;
  140. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  141. if (beiscsi_error(phba)) {
  142. free_mcc_tag(&phba->ctrl, tag);
  143. return -EPERM;
  144. }
  145. /* Set MBX Tag state to Active */
  146. spin_lock(&phba->ctrl.mbox_lock);
  147. phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING;
  148. spin_unlock(&phba->ctrl.mbox_lock);
  149. /* wait for the mccq completion */
  150. rc = wait_event_interruptible_timeout(
  151. phba->ctrl.mcc_wait[tag],
  152. phba->ctrl.mcc_numtag[tag],
  153. msecs_to_jiffies(
  154. BEISCSI_HOST_MBX_TIMEOUT));
  155. if (rc <= 0) {
  156. struct be_dma_mem *tag_mem;
  157. /* Set MBX Tag state to timeout */
  158. spin_lock(&phba->ctrl.mbox_lock);
  159. phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT;
  160. spin_unlock(&phba->ctrl.mbox_lock);
  161. /* Store resource addr to be freed later */
  162. tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
  163. if (mbx_cmd_mem) {
  164. tag_mem->size = mbx_cmd_mem->size;
  165. tag_mem->va = mbx_cmd_mem->va;
  166. tag_mem->dma = mbx_cmd_mem->dma;
  167. } else
  168. tag_mem->size = 0;
  169. beiscsi_log(phba, KERN_ERR,
  170. BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
  171. BEISCSI_LOG_CONFIG,
  172. "BC_%d : MBX Cmd Completion timed out\n");
  173. return -EBUSY;
  174. } else {
  175. rc = 0;
  176. /* Set MBX Tag state to completed */
  177. spin_lock(&phba->ctrl.mbox_lock);
  178. phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
  179. spin_unlock(&phba->ctrl.mbox_lock);
  180. }
  181. mcc_tag_response = phba->ctrl.mcc_numtag[tag];
  182. status = (mcc_tag_response & CQE_STATUS_MASK);
  183. addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
  184. CQE_STATUS_ADDL_SHIFT);
  185. if (mbx_cmd_mem) {
  186. mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
  187. } else {
  188. wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
  189. CQE_STATUS_WRB_SHIFT;
  190. temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
  191. mbx_hdr = embedded_payload(temp_wrb);
  192. if (wrb)
  193. *wrb = temp_wrb;
  194. }
  195. if (status || addl_status) {
  196. beiscsi_log(phba, KERN_WARNING,
  197. BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
  198. BEISCSI_LOG_CONFIG,
  199. "BC_%d : MBX Cmd Failed for "
  200. "Subsys : %d Opcode : %d with "
  201. "Status : %d and Extd_Status : %d\n",
  202. mbx_hdr->subsystem,
  203. mbx_hdr->opcode,
  204. status, addl_status);
  205. if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
  206. mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
  207. beiscsi_log(phba, KERN_WARNING,
  208. BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
  209. BEISCSI_LOG_CONFIG,
  210. "BC_%d : Insufficient Buffer Error "
  211. "Resp_Len : %d Actual_Resp_Len : %d\n",
  212. mbx_resp_hdr->response_length,
  213. mbx_resp_hdr->actual_resp_len);
  214. rc = -EAGAIN;
  215. goto release_mcc_tag;
  216. }
  217. rc = -EIO;
  218. }
  219. release_mcc_tag:
  220. /* Release the MCC entry */
  221. free_mcc_tag(&phba->ctrl, tag);
  222. return rc;
  223. }
  224. void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
  225. {
  226. spin_lock(&ctrl->mbox_lock);
  227. tag = tag & 0x000000FF;
  228. ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
  229. if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
  230. ctrl->mcc_free_index = 0;
  231. else
  232. ctrl->mcc_free_index++;
  233. ctrl->mcc_tag_available++;
  234. spin_unlock(&ctrl->mbox_lock);
  235. }
  236. bool is_link_state_evt(u32 trailer)
  237. {
  238. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  239. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  240. ASYNC_EVENT_CODE_LINK_STATE);
  241. }
  242. static bool is_iscsi_evt(u32 trailer)
  243. {
  244. return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  245. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  246. ASYNC_EVENT_CODE_ISCSI;
  247. }
  248. static int iscsi_evt_type(u32 trailer)
  249. {
  250. return (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
  251. ASYNC_TRAILER_EVENT_TYPE_MASK;
  252. }
  253. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  254. {
  255. if (compl->flags != 0) {
  256. compl->flags = le32_to_cpu(compl->flags);
  257. WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  258. return true;
  259. } else
  260. return false;
  261. }
  262. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  263. {
  264. compl->flags = 0;
  265. }
  266. /*
  267. * be_mcc_compl_process()- Check the MBX comapletion status
  268. * @ctrl: Function specific MBX data structure
  269. * @compl: Completion status of MBX Command
  270. *
  271. * Check for the MBX completion status when BMBX method used
  272. *
  273. * return
  274. * Success: Zero
  275. * Failure: Non-Zero
  276. **/
  277. static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
  278. struct be_mcc_compl *compl)
  279. {
  280. u16 compl_status, extd_status;
  281. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  282. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  283. struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
  284. struct be_cmd_resp_hdr *resp_hdr;
  285. be_dws_le_to_cpu(compl, 4);
  286. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  287. CQE_STATUS_COMPL_MASK;
  288. if (compl_status != MCC_STATUS_SUCCESS) {
  289. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  290. CQE_STATUS_EXTD_MASK;
  291. beiscsi_log(phba, KERN_ERR,
  292. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  293. "BC_%d : error in cmd completion: "
  294. "Subsystem : %d Opcode : %d "
  295. "status(compl/extd)=%d/%d\n",
  296. hdr->subsystem, hdr->opcode,
  297. compl_status, extd_status);
  298. if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
  299. resp_hdr = (struct be_cmd_resp_hdr *) hdr;
  300. if (resp_hdr->response_length)
  301. return 0;
  302. }
  303. return -EBUSY;
  304. }
  305. return 0;
  306. }
  307. int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
  308. struct be_mcc_compl *compl)
  309. {
  310. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  311. u16 compl_status, extd_status;
  312. unsigned short tag;
  313. be_dws_le_to_cpu(compl, 4);
  314. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  315. CQE_STATUS_COMPL_MASK;
  316. /* The ctrl.mcc_numtag[tag] is filled with
  317. * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
  318. * [7:0] = compl_status
  319. */
  320. tag = (compl->tag0 & 0x000000FF);
  321. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  322. CQE_STATUS_EXTD_MASK;
  323. ctrl->mcc_numtag[tag] = 0x80000000;
  324. ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
  325. ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
  326. ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
  327. if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) {
  328. wake_up_interruptible(&ctrl->mcc_wait[tag]);
  329. } else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) {
  330. struct be_dma_mem *tag_mem;
  331. tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
  332. beiscsi_log(phba, KERN_WARNING,
  333. BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
  334. BEISCSI_LOG_CONFIG,
  335. "BC_%d : MBX Completion for timeout Command "
  336. "from FW\n");
  337. /* Check if memory needs to be freed */
  338. if (tag_mem->size)
  339. pci_free_consistent(ctrl->pdev, tag_mem->size,
  340. tag_mem->va, tag_mem->dma);
  341. /* Change tag state */
  342. spin_lock(&phba->ctrl.mbox_lock);
  343. ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
  344. spin_unlock(&phba->ctrl.mbox_lock);
  345. /* Free MCC Tag */
  346. free_mcc_tag(ctrl, tag);
  347. }
  348. return 0;
  349. }
  350. static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
  351. {
  352. struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
  353. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  354. if (be_mcc_compl_is_new(compl)) {
  355. queue_tail_inc(mcc_cq);
  356. return compl;
  357. }
  358. return NULL;
  359. }
  360. /**
  361. * be2iscsi_fail_session(): Closing session with appropriate error
  362. * @cls_session: ptr to session
  363. *
  364. * Depending on adapter state appropriate error flag is passed.
  365. **/
  366. void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
  367. {
  368. struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
  369. struct beiscsi_hba *phba = iscsi_host_priv(shost);
  370. uint32_t iscsi_err_flag;
  371. if (phba->state & BE_ADAPTER_STATE_SHUTDOWN)
  372. iscsi_err_flag = ISCSI_ERR_INVALID_HOST;
  373. else
  374. iscsi_err_flag = ISCSI_ERR_CONN_FAILED;
  375. iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
  376. }
  377. void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
  378. struct be_async_event_link_state *evt)
  379. {
  380. if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
  381. ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
  382. (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
  383. phba->state = BE_ADAPTER_LINK_DOWN;
  384. beiscsi_log(phba, KERN_ERR,
  385. BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
  386. "BC_%d : Link Down on Port %d\n",
  387. evt->physical_port);
  388. iscsi_host_for_each_session(phba->shost,
  389. be2iscsi_fail_session);
  390. } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
  391. ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
  392. (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
  393. phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
  394. phba->get_boot = BE_GET_BOOT_RETRIES;
  395. beiscsi_log(phba, KERN_ERR,
  396. BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
  397. "BC_%d : Link UP on Port %d\n",
  398. evt->physical_port);
  399. }
  400. }
  401. int beiscsi_process_mcc(struct beiscsi_hba *phba)
  402. {
  403. struct be_mcc_compl *compl;
  404. int num = 0, status = 0;
  405. struct be_ctrl_info *ctrl = &phba->ctrl;
  406. spin_lock_bh(&phba->ctrl.mcc_cq_lock);
  407. while ((compl = be_mcc_compl_get(phba))) {
  408. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  409. /* Interpret flags as an async trailer */
  410. if (is_link_state_evt(compl->flags))
  411. /* Interpret compl as a async link evt */
  412. beiscsi_async_link_state_process(phba,
  413. (struct be_async_event_link_state *) compl);
  414. else if (is_iscsi_evt(compl->flags)) {
  415. switch (iscsi_evt_type(compl->flags)) {
  416. case ASYNC_EVENT_NEW_ISCSI_TGT_DISC:
  417. case ASYNC_EVENT_NEW_ISCSI_CONN:
  418. case ASYNC_EVENT_NEW_TCP_CONN:
  419. phba->state |= BE_ADAPTER_CHECK_BOOT;
  420. phba->get_boot = BE_GET_BOOT_RETRIES;
  421. beiscsi_log(phba, KERN_ERR,
  422. BEISCSI_LOG_CONFIG |
  423. BEISCSI_LOG_MBOX,
  424. "BC_%d : Async iscsi Event,"
  425. " flags handled = 0x%08x\n",
  426. compl->flags);
  427. break;
  428. default:
  429. phba->state |= BE_ADAPTER_CHECK_BOOT;
  430. phba->get_boot = BE_GET_BOOT_RETRIES;
  431. beiscsi_log(phba, KERN_ERR,
  432. BEISCSI_LOG_CONFIG |
  433. BEISCSI_LOG_MBOX,
  434. "BC_%d : Unsupported Async"
  435. " Event, flags = 0x%08x\n",
  436. compl->flags);
  437. }
  438. } else
  439. beiscsi_log(phba, KERN_ERR,
  440. BEISCSI_LOG_CONFIG |
  441. BEISCSI_LOG_MBOX,
  442. "BC_%d : Unsupported Async Event, flags"
  443. " = 0x%08x\n", compl->flags);
  444. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  445. status = be_mcc_compl_process(ctrl, compl);
  446. atomic_dec(&phba->ctrl.mcc_obj.q.used);
  447. }
  448. be_mcc_compl_use(compl);
  449. num++;
  450. }
  451. if (num)
  452. hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);
  453. spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
  454. return status;
  455. }
  456. /*
  457. * be_mcc_wait_compl()- Wait for MBX completion
  458. * @phba: driver private structure
  459. *
  460. * Wait till no more pending mcc requests are present
  461. *
  462. * return
  463. * Success: 0
  464. * Failure: Non-Zero
  465. *
  466. **/
  467. static int be_mcc_wait_compl(struct beiscsi_hba *phba)
  468. {
  469. int i, status;
  470. for (i = 0; i < mcc_timeout; i++) {
  471. if (beiscsi_error(phba))
  472. return -EIO;
  473. status = beiscsi_process_mcc(phba);
  474. if (status)
  475. return status;
  476. if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
  477. break;
  478. udelay(100);
  479. }
  480. if (i == mcc_timeout) {
  481. beiscsi_log(phba, KERN_ERR,
  482. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  483. "BC_%d : FW Timed Out\n");
  484. phba->fw_timeout = true;
  485. beiscsi_ue_detect(phba);
  486. return -EBUSY;
  487. }
  488. return 0;
  489. }
  490. /*
  491. * be_mcc_notify_wait()- Notify and wait for Compl
  492. * @phba: driver private structure
  493. *
  494. * Notify MCC requests and wait for completion
  495. *
  496. * return
  497. * Success: 0
  498. * Failure: Non-Zero
  499. **/
  500. int be_mcc_notify_wait(struct beiscsi_hba *phba)
  501. {
  502. be_mcc_notify(phba);
  503. return be_mcc_wait_compl(phba);
  504. }
  505. /*
  506. * be_mbox_db_ready_wait()- Check ready status
  507. * @ctrl: Function specific MBX data structure
  508. *
  509. * Check for the ready status of FW to send BMBX
  510. * commands to adapter.
  511. *
  512. * return
  513. * Success: 0
  514. * Failure: Non-Zero
  515. **/
  516. static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
  517. {
  518. #define BEISCSI_MBX_RDY_BIT_TIMEOUT 4000 /* 4sec */
  519. void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
  520. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  521. unsigned long timeout;
  522. bool read_flag = false;
  523. int ret = 0, i;
  524. u32 ready;
  525. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q);
  526. if (beiscsi_error(phba))
  527. return -EIO;
  528. timeout = jiffies + (HZ * 110);
  529. do {
  530. for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) {
  531. ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
  532. if (ready) {
  533. read_flag = true;
  534. break;
  535. }
  536. mdelay(1);
  537. }
  538. if (!read_flag) {
  539. wait_event_timeout(rdybit_check_q,
  540. (read_flag != true),
  541. HZ * 5);
  542. }
  543. } while ((time_before(jiffies, timeout)) && !read_flag);
  544. if (!read_flag) {
  545. beiscsi_log(phba, KERN_ERR,
  546. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  547. "BC_%d : FW Timed Out\n");
  548. phba->fw_timeout = true;
  549. beiscsi_ue_detect(phba);
  550. ret = -EBUSY;
  551. }
  552. return ret;
  553. }
  554. /*
  555. * be_mbox_notify: Notify adapter of new BMBX command
  556. * @ctrl: Function specific MBX data structure
  557. *
  558. * Ring doorbell to inform adapter of a BMBX command
  559. * to process
  560. *
  561. * return
  562. * Success: 0
  563. * Failure: Non-Zero
  564. **/
  565. int be_mbox_notify(struct be_ctrl_info *ctrl)
  566. {
  567. int status;
  568. u32 val = 0;
  569. void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
  570. struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
  571. struct be_mcc_mailbox *mbox = mbox_mem->va;
  572. struct be_mcc_compl *compl = &mbox->compl;
  573. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  574. status = be_mbox_db_ready_wait(ctrl);
  575. if (status)
  576. return status;
  577. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  578. val |= MPU_MAILBOX_DB_HI_MASK;
  579. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  580. iowrite32(val, db);
  581. status = be_mbox_db_ready_wait(ctrl);
  582. if (status)
  583. return status;
  584. val = 0;
  585. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  586. val &= ~MPU_MAILBOX_DB_HI_MASK;
  587. val |= (u32) (mbox_mem->dma >> 4) << 2;
  588. iowrite32(val, db);
  589. status = be_mbox_db_ready_wait(ctrl);
  590. if (status)
  591. return status;
  592. if (be_mcc_compl_is_new(compl)) {
  593. status = be_mcc_compl_process(ctrl, &mbox->compl);
  594. be_mcc_compl_use(compl);
  595. if (status) {
  596. beiscsi_log(phba, KERN_ERR,
  597. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  598. "BC_%d : After be_mcc_compl_process\n");
  599. return status;
  600. }
  601. } else {
  602. beiscsi_log(phba, KERN_ERR,
  603. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  604. "BC_%d : Invalid Mailbox Completion\n");
  605. return -EBUSY;
  606. }
  607. return 0;
  608. }
  609. /*
  610. * Insert the mailbox address into the doorbell in two steps
  611. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  612. */
  613. static int be_mbox_notify_wait(struct beiscsi_hba *phba)
  614. {
  615. int status;
  616. u32 val = 0;
  617. void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
  618. struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
  619. struct be_mcc_mailbox *mbox = mbox_mem->va;
  620. struct be_mcc_compl *compl = &mbox->compl;
  621. struct be_ctrl_info *ctrl = &phba->ctrl;
  622. status = be_mbox_db_ready_wait(ctrl);
  623. if (status)
  624. return status;
  625. val |= MPU_MAILBOX_DB_HI_MASK;
  626. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  627. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  628. iowrite32(val, db);
  629. /* wait for ready to be set */
  630. status = be_mbox_db_ready_wait(ctrl);
  631. if (status != 0)
  632. return status;
  633. val = 0;
  634. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  635. val |= (u32)(mbox_mem->dma >> 4) << 2;
  636. iowrite32(val, db);
  637. status = be_mbox_db_ready_wait(ctrl);
  638. if (status != 0)
  639. return status;
  640. /* A cq entry has been made now */
  641. if (be_mcc_compl_is_new(compl)) {
  642. status = be_mcc_compl_process(ctrl, &mbox->compl);
  643. be_mcc_compl_use(compl);
  644. if (status)
  645. return status;
  646. } else {
  647. beiscsi_log(phba, KERN_ERR,
  648. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  649. "BC_%d : invalid mailbox completion\n");
  650. return -EBUSY;
  651. }
  652. return 0;
  653. }
  654. void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
  655. bool embedded, u8 sge_cnt)
  656. {
  657. if (embedded)
  658. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  659. else
  660. wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
  661. MCC_WRB_SGE_CNT_SHIFT;
  662. wrb->payload_length = payload_len;
  663. be_dws_cpu_to_le(wrb, 8);
  664. }
  665. void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  666. u8 subsystem, u8 opcode, int cmd_len)
  667. {
  668. req_hdr->opcode = opcode;
  669. req_hdr->subsystem = subsystem;
  670. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  671. req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
  672. }
  673. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  674. struct be_dma_mem *mem)
  675. {
  676. int i, buf_pages;
  677. u64 dma = (u64) mem->dma;
  678. buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  679. for (i = 0; i < buf_pages; i++) {
  680. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  681. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  682. dma += PAGE_SIZE_4K;
  683. }
  684. }
  685. static u32 eq_delay_to_mult(u32 usec_delay)
  686. {
  687. #define MAX_INTR_RATE 651042
  688. const u32 round = 10;
  689. u32 multiplier;
  690. if (usec_delay == 0)
  691. multiplier = 0;
  692. else {
  693. u32 interrupt_rate = 1000000 / usec_delay;
  694. if (interrupt_rate == 0)
  695. multiplier = 1023;
  696. else {
  697. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  698. multiplier /= interrupt_rate;
  699. multiplier = (multiplier + round / 2) / round;
  700. multiplier = min(multiplier, (u32) 1023);
  701. }
  702. }
  703. return multiplier;
  704. }
  705. struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
  706. {
  707. return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  708. }
  709. struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
  710. {
  711. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  712. struct be_mcc_wrb *wrb;
  713. WARN_ON(atomic_read(&mccq->used) >= mccq->len);
  714. wrb = queue_head_node(mccq);
  715. memset(wrb, 0, sizeof(*wrb));
  716. wrb->tag0 = (mccq->head & 0x000000FF) << 16;
  717. queue_head_inc(mccq);
  718. atomic_inc(&mccq->used);
  719. return wrb;
  720. }
  721. int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
  722. struct be_queue_info *eq, int eq_delay)
  723. {
  724. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  725. struct be_cmd_req_eq_create *req = embedded_payload(wrb);
  726. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  727. struct be_dma_mem *q_mem = &eq->dma_mem;
  728. int status;
  729. spin_lock(&ctrl->mbox_lock);
  730. memset(wrb, 0, sizeof(*wrb));
  731. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  732. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  733. OPCODE_COMMON_EQ_CREATE, sizeof(*req));
  734. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  735. AMAP_SET_BITS(struct amap_eq_context, func, req->context,
  736. PCI_FUNC(ctrl->pdev->devfn));
  737. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  738. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  739. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  740. __ilog2_u32(eq->len / 256));
  741. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  742. eq_delay_to_mult(eq_delay));
  743. be_dws_cpu_to_le(req->context, sizeof(req->context));
  744. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  745. status = be_mbox_notify(ctrl);
  746. if (!status) {
  747. eq->id = le16_to_cpu(resp->eq_id);
  748. eq->created = true;
  749. }
  750. spin_unlock(&ctrl->mbox_lock);
  751. return status;
  752. }
  753. /**
  754. * be_cmd_fw_initialize()- Initialize FW
  755. * @ctrl: Pointer to function control structure
  756. *
  757. * Send FW initialize pattern for the function.
  758. *
  759. * return
  760. * Success: 0
  761. * Failure: Non-Zero value
  762. **/
  763. int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
  764. {
  765. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  766. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  767. int status;
  768. u8 *endian_check;
  769. spin_lock(&ctrl->mbox_lock);
  770. memset(wrb, 0, sizeof(*wrb));
  771. endian_check = (u8 *) wrb;
  772. *endian_check++ = 0xFF;
  773. *endian_check++ = 0x12;
  774. *endian_check++ = 0x34;
  775. *endian_check++ = 0xFF;
  776. *endian_check++ = 0xFF;
  777. *endian_check++ = 0x56;
  778. *endian_check++ = 0x78;
  779. *endian_check++ = 0xFF;
  780. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  781. status = be_mbox_notify(ctrl);
  782. if (status)
  783. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  784. "BC_%d : be_cmd_fw_initialize Failed\n");
  785. spin_unlock(&ctrl->mbox_lock);
  786. return status;
  787. }
  788. /**
  789. * be_cmd_fw_uninit()- Uinitialize FW
  790. * @ctrl: Pointer to function control structure
  791. *
  792. * Send FW uninitialize pattern for the function
  793. *
  794. * return
  795. * Success: 0
  796. * Failure: Non-Zero value
  797. **/
  798. int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
  799. {
  800. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  801. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  802. int status;
  803. u8 *endian_check;
  804. spin_lock(&ctrl->mbox_lock);
  805. memset(wrb, 0, sizeof(*wrb));
  806. endian_check = (u8 *) wrb;
  807. *endian_check++ = 0xFF;
  808. *endian_check++ = 0xAA;
  809. *endian_check++ = 0xBB;
  810. *endian_check++ = 0xFF;
  811. *endian_check++ = 0xFF;
  812. *endian_check++ = 0xCC;
  813. *endian_check++ = 0xDD;
  814. *endian_check = 0xFF;
  815. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  816. status = be_mbox_notify(ctrl);
  817. if (status)
  818. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  819. "BC_%d : be_cmd_fw_uninit Failed\n");
  820. spin_unlock(&ctrl->mbox_lock);
  821. return status;
  822. }
  823. int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
  824. struct be_queue_info *cq, struct be_queue_info *eq,
  825. bool sol_evts, bool no_delay, int coalesce_wm)
  826. {
  827. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  828. struct be_cmd_req_cq_create *req = embedded_payload(wrb);
  829. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  830. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  831. struct be_dma_mem *q_mem = &cq->dma_mem;
  832. void *ctxt = &req->context;
  833. int status;
  834. spin_lock(&ctrl->mbox_lock);
  835. memset(wrb, 0, sizeof(*wrb));
  836. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  837. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  838. OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  839. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  840. if (is_chip_be2_be3r(phba)) {
  841. AMAP_SET_BITS(struct amap_cq_context, coalescwm,
  842. ctxt, coalesce_wm);
  843. AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
  844. AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
  845. __ilog2_u32(cq->len / 256));
  846. AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
  847. AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
  848. AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
  849. AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
  850. AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
  851. AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
  852. PCI_FUNC(ctrl->pdev->devfn));
  853. } else {
  854. req->hdr.version = MBX_CMD_VER2;
  855. req->page_size = 1;
  856. AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
  857. ctxt, coalesce_wm);
  858. AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
  859. ctxt, no_delay);
  860. AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
  861. __ilog2_u32(cq->len / 256));
  862. AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
  863. AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
  864. AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
  865. AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
  866. }
  867. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  868. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  869. status = be_mbox_notify(ctrl);
  870. if (!status) {
  871. cq->id = le16_to_cpu(resp->cq_id);
  872. cq->created = true;
  873. } else
  874. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  875. "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
  876. status);
  877. spin_unlock(&ctrl->mbox_lock);
  878. return status;
  879. }
  880. static u32 be_encoded_q_len(int q_len)
  881. {
  882. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  883. if (len_encoded == 16)
  884. len_encoded = 0;
  885. return len_encoded;
  886. }
  887. int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
  888. struct be_queue_info *mccq,
  889. struct be_queue_info *cq)
  890. {
  891. struct be_mcc_wrb *wrb;
  892. struct be_cmd_req_mcc_create *req;
  893. struct be_dma_mem *q_mem = &mccq->dma_mem;
  894. struct be_ctrl_info *ctrl;
  895. void *ctxt;
  896. int status;
  897. spin_lock(&phba->ctrl.mbox_lock);
  898. ctrl = &phba->ctrl;
  899. wrb = wrb_from_mbox(&ctrl->mbox_mem);
  900. memset(wrb, 0, sizeof(*wrb));
  901. req = embedded_payload(wrb);
  902. ctxt = &req->context;
  903. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  904. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  905. OPCODE_COMMON_MCC_CREATE, sizeof(*req));
  906. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  907. AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
  908. PCI_FUNC(phba->pcidev->devfn));
  909. AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
  910. AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
  911. be_encoded_q_len(mccq->len));
  912. AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
  913. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  914. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  915. status = be_mbox_notify_wait(phba);
  916. if (!status) {
  917. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  918. mccq->id = le16_to_cpu(resp->id);
  919. mccq->created = true;
  920. }
  921. spin_unlock(&phba->ctrl.mbox_lock);
  922. return status;
  923. }
  924. int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
  925. int queue_type)
  926. {
  927. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  928. struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
  929. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  930. u8 subsys = 0, opcode = 0;
  931. int status;
  932. beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
  933. "BC_%d : In beiscsi_cmd_q_destroy "
  934. "queue_type : %d\n", queue_type);
  935. spin_lock(&ctrl->mbox_lock);
  936. memset(wrb, 0, sizeof(*wrb));
  937. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  938. switch (queue_type) {
  939. case QTYPE_EQ:
  940. subsys = CMD_SUBSYSTEM_COMMON;
  941. opcode = OPCODE_COMMON_EQ_DESTROY;
  942. break;
  943. case QTYPE_CQ:
  944. subsys = CMD_SUBSYSTEM_COMMON;
  945. opcode = OPCODE_COMMON_CQ_DESTROY;
  946. break;
  947. case QTYPE_MCCQ:
  948. subsys = CMD_SUBSYSTEM_COMMON;
  949. opcode = OPCODE_COMMON_MCC_DESTROY;
  950. break;
  951. case QTYPE_WRBQ:
  952. subsys = CMD_SUBSYSTEM_ISCSI;
  953. opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
  954. break;
  955. case QTYPE_DPDUQ:
  956. subsys = CMD_SUBSYSTEM_ISCSI;
  957. opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
  958. break;
  959. case QTYPE_SGL:
  960. subsys = CMD_SUBSYSTEM_ISCSI;
  961. opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
  962. break;
  963. default:
  964. spin_unlock(&ctrl->mbox_lock);
  965. BUG();
  966. return -ENXIO;
  967. }
  968. be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
  969. if (queue_type != QTYPE_SGL)
  970. req->id = cpu_to_le16(q->id);
  971. status = be_mbox_notify(ctrl);
  972. spin_unlock(&ctrl->mbox_lock);
  973. return status;
  974. }
  975. /**
  976. * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
  977. * @ctrl: ptr to ctrl_info
  978. * @cq: Completion Queue
  979. * @dq: Default Queue
  980. * @lenght: ring size
  981. * @entry_size: size of each entry in DEFQ
  982. * @is_header: Header or Data DEFQ
  983. * @ulp_num: Bind to which ULP
  984. *
  985. * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
  986. * on this queue by the FW
  987. *
  988. * return
  989. * Success: 0
  990. * Failure: Non-Zero Value
  991. *
  992. **/
  993. int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
  994. struct be_queue_info *cq,
  995. struct be_queue_info *dq, int length,
  996. int entry_size, uint8_t is_header,
  997. uint8_t ulp_num)
  998. {
  999. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1000. struct be_defq_create_req *req = embedded_payload(wrb);
  1001. struct be_dma_mem *q_mem = &dq->dma_mem;
  1002. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  1003. void *ctxt = &req->context;
  1004. int status;
  1005. spin_lock(&ctrl->mbox_lock);
  1006. memset(wrb, 0, sizeof(*wrb));
  1007. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1008. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  1009. OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
  1010. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  1011. if (phba->fw_config.dual_ulp_aware) {
  1012. req->ulp_num = ulp_num;
  1013. req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
  1014. req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
  1015. }
  1016. if (is_chip_be2_be3r(phba)) {
  1017. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1018. rx_pdid, ctxt, 0);
  1019. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1020. rx_pdid_valid, ctxt, 1);
  1021. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1022. pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
  1023. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1024. ring_size, ctxt,
  1025. be_encoded_q_len(length /
  1026. sizeof(struct phys_addr)));
  1027. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1028. default_buffer_size, ctxt, entry_size);
  1029. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1030. cq_id_recv, ctxt, cq->id);
  1031. } else {
  1032. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1033. rx_pdid, ctxt, 0);
  1034. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1035. rx_pdid_valid, ctxt, 1);
  1036. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1037. ring_size, ctxt,
  1038. be_encoded_q_len(length /
  1039. sizeof(struct phys_addr)));
  1040. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1041. default_buffer_size, ctxt, entry_size);
  1042. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1043. cq_id_recv, ctxt, cq->id);
  1044. }
  1045. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  1046. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1047. status = be_mbox_notify(ctrl);
  1048. if (!status) {
  1049. struct be_ring *defq_ring;
  1050. struct be_defq_create_resp *resp = embedded_payload(wrb);
  1051. dq->id = le16_to_cpu(resp->id);
  1052. dq->created = true;
  1053. if (is_header)
  1054. defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
  1055. else
  1056. defq_ring = &phba->phwi_ctrlr->
  1057. default_pdu_data[ulp_num];
  1058. defq_ring->id = dq->id;
  1059. if (!phba->fw_config.dual_ulp_aware) {
  1060. defq_ring->ulp_num = BEISCSI_ULP0;
  1061. defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
  1062. } else {
  1063. defq_ring->ulp_num = resp->ulp_num;
  1064. defq_ring->doorbell_offset = resp->doorbell_offset;
  1065. }
  1066. }
  1067. spin_unlock(&ctrl->mbox_lock);
  1068. return status;
  1069. }
  1070. /**
  1071. * be_cmd_wrbq_create()- Create WRBQ
  1072. * @ctrl: ptr to ctrl_info
  1073. * @q_mem: memory details for the queue
  1074. * @wrbq: queue info
  1075. * @pwrb_context: ptr to wrb_context
  1076. * @ulp_num: ULP on which the WRBQ is to be created
  1077. *
  1078. * Create WRBQ on the passed ULP_NUM.
  1079. *
  1080. **/
  1081. int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
  1082. struct be_dma_mem *q_mem,
  1083. struct be_queue_info *wrbq,
  1084. struct hwi_wrb_context *pwrb_context,
  1085. uint8_t ulp_num)
  1086. {
  1087. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1088. struct be_wrbq_create_req *req = embedded_payload(wrb);
  1089. struct be_wrbq_create_resp *resp = embedded_payload(wrb);
  1090. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  1091. int status;
  1092. spin_lock(&ctrl->mbox_lock);
  1093. memset(wrb, 0, sizeof(*wrb));
  1094. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1095. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  1096. OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
  1097. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  1098. if (phba->fw_config.dual_ulp_aware) {
  1099. req->ulp_num = ulp_num;
  1100. req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
  1101. req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
  1102. }
  1103. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1104. status = be_mbox_notify(ctrl);
  1105. if (!status) {
  1106. wrbq->id = le16_to_cpu(resp->cid);
  1107. wrbq->created = true;
  1108. pwrb_context->cid = wrbq->id;
  1109. if (!phba->fw_config.dual_ulp_aware) {
  1110. pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
  1111. pwrb_context->ulp_num = BEISCSI_ULP0;
  1112. } else {
  1113. pwrb_context->ulp_num = resp->ulp_num;
  1114. pwrb_context->doorbell_offset = resp->doorbell_offset;
  1115. }
  1116. }
  1117. spin_unlock(&ctrl->mbox_lock);
  1118. return status;
  1119. }
  1120. int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
  1121. struct be_dma_mem *q_mem)
  1122. {
  1123. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1124. struct be_post_template_pages_req *req = embedded_payload(wrb);
  1125. int status;
  1126. spin_lock(&ctrl->mbox_lock);
  1127. memset(wrb, 0, sizeof(*wrb));
  1128. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1129. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1130. OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
  1131. sizeof(*req));
  1132. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  1133. req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
  1134. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1135. status = be_mbox_notify(ctrl);
  1136. spin_unlock(&ctrl->mbox_lock);
  1137. return status;
  1138. }
  1139. int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
  1140. {
  1141. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1142. struct be_remove_template_pages_req *req = embedded_payload(wrb);
  1143. int status;
  1144. spin_lock(&ctrl->mbox_lock);
  1145. memset(wrb, 0, sizeof(*wrb));
  1146. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1147. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1148. OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
  1149. sizeof(*req));
  1150. req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
  1151. status = be_mbox_notify(ctrl);
  1152. spin_unlock(&ctrl->mbox_lock);
  1153. return status;
  1154. }
  1155. int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
  1156. struct be_dma_mem *q_mem,
  1157. u32 page_offset, u32 num_pages)
  1158. {
  1159. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1160. struct be_post_sgl_pages_req *req = embedded_payload(wrb);
  1161. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  1162. int status;
  1163. unsigned int curr_pages;
  1164. u32 internal_page_offset = 0;
  1165. u32 temp_num_pages = num_pages;
  1166. if (num_pages == 0xff)
  1167. num_pages = 1;
  1168. spin_lock(&ctrl->mbox_lock);
  1169. do {
  1170. memset(wrb, 0, sizeof(*wrb));
  1171. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1172. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  1173. OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
  1174. sizeof(*req));
  1175. curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
  1176. pages);
  1177. req->num_pages = min(num_pages, curr_pages);
  1178. req->page_offset = page_offset;
  1179. be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
  1180. q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
  1181. internal_page_offset += req->num_pages;
  1182. page_offset += req->num_pages;
  1183. num_pages -= req->num_pages;
  1184. if (temp_num_pages == 0xff)
  1185. req->num_pages = temp_num_pages;
  1186. status = be_mbox_notify(ctrl);
  1187. if (status) {
  1188. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  1189. "BC_%d : FW CMD to map iscsi frags failed.\n");
  1190. goto error;
  1191. }
  1192. } while (num_pages > 0);
  1193. error:
  1194. spin_unlock(&ctrl->mbox_lock);
  1195. if (status != 0)
  1196. beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
  1197. return status;
  1198. }
  1199. int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
  1200. {
  1201. struct be_ctrl_info *ctrl = &phba->ctrl;
  1202. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1203. struct be_post_sgl_pages_req *req = embedded_payload(wrb);
  1204. int status;
  1205. spin_lock(&ctrl->mbox_lock);
  1206. req = embedded_payload(wrb);
  1207. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1208. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1209. OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
  1210. status = be_mbox_notify_wait(phba);
  1211. spin_unlock(&ctrl->mbox_lock);
  1212. return status;
  1213. }
  1214. /**
  1215. * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
  1216. * @phba: device priv structure instance
  1217. * @vlan_tag: TAG to be set
  1218. *
  1219. * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
  1220. *
  1221. * returns
  1222. * TAG for the MBX Cmd
  1223. * **/
  1224. int be_cmd_set_vlan(struct beiscsi_hba *phba,
  1225. uint16_t vlan_tag)
  1226. {
  1227. unsigned int tag = 0;
  1228. struct be_mcc_wrb *wrb;
  1229. struct be_cmd_set_vlan_req *req;
  1230. struct be_ctrl_info *ctrl = &phba->ctrl;
  1231. spin_lock(&ctrl->mbox_lock);
  1232. tag = alloc_mcc_tag(phba);
  1233. if (!tag) {
  1234. spin_unlock(&ctrl->mbox_lock);
  1235. return tag;
  1236. }
  1237. wrb = wrb_from_mccq(phba);
  1238. req = embedded_payload(wrb);
  1239. wrb->tag0 |= tag;
  1240. be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
  1241. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  1242. OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
  1243. sizeof(*req));
  1244. req->interface_hndl = phba->interface_handle;
  1245. req->vlan_priority = vlan_tag;
  1246. be_mcc_notify(phba);
  1247. spin_unlock(&ctrl->mbox_lock);
  1248. return tag;
  1249. }