ql4_iocb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543
  1. /*
  2. * QLogic iSCSI HBA Driver
  3. * Copyright (c) 2003-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qla4xxx for copyright and licensing details.
  6. */
  7. #include "ql4_def.h"
  8. #include "ql4_glbl.h"
  9. #include "ql4_dbg.h"
  10. #include "ql4_inline.h"
  11. #include <scsi/scsi_tcq.h>
  12. static int
  13. qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
  14. {
  15. uint16_t cnt;
  16. /* Calculate number of free request entries. */
  17. if ((req_cnt + 2) >= ha->req_q_count) {
  18. cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
  19. if (ha->request_in < cnt)
  20. ha->req_q_count = cnt - ha->request_in;
  21. else
  22. ha->req_q_count = REQUEST_QUEUE_DEPTH -
  23. (ha->request_in - cnt);
  24. }
  25. /* Check if room for request in request ring. */
  26. if ((req_cnt + 2) < ha->req_q_count)
  27. return 1;
  28. else
  29. return 0;
  30. }
  31. static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
  32. {
  33. /* Advance request queue pointer */
  34. if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
  35. ha->request_in = 0;
  36. ha->request_ptr = ha->request_ring;
  37. } else {
  38. ha->request_in++;
  39. ha->request_ptr++;
  40. }
  41. }
  42. /**
  43. * qla4xxx_get_req_pkt - returns a valid entry in request queue.
  44. * @ha: Pointer to host adapter structure.
  45. * @queue_entry: Pointer to pointer to queue entry structure
  46. *
  47. * This routine performs the following tasks:
  48. * - returns the current request_in pointer (if queue not full)
  49. * - advances the request_in pointer
  50. * - checks for queue full
  51. **/
  52. static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
  53. struct queue_entry **queue_entry)
  54. {
  55. uint16_t req_cnt = 1;
  56. if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
  57. *queue_entry = ha->request_ptr;
  58. memset(*queue_entry, 0, sizeof(**queue_entry));
  59. qla4xxx_advance_req_ring_ptr(ha);
  60. ha->req_q_count -= req_cnt;
  61. return QLA_SUCCESS;
  62. }
  63. return QLA_ERROR;
  64. }
  65. /**
  66. * qla4xxx_send_marker_iocb - issues marker iocb to HBA
  67. * @ha: Pointer to host adapter structure.
  68. * @ddb_entry: Pointer to device database entry
  69. * @lun: SCSI LUN
  70. * @marker_type: marker identifier
  71. *
  72. * This routine issues a marker IOCB.
  73. **/
  74. int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
  75. struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod)
  76. {
  77. struct qla4_marker_entry *marker_entry;
  78. unsigned long flags = 0;
  79. uint8_t status = QLA_SUCCESS;
  80. /* Acquire hardware specific lock */
  81. spin_lock_irqsave(&ha->hardware_lock, flags);
  82. /* Get pointer to the queue entry for the marker */
  83. if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
  84. QLA_SUCCESS) {
  85. status = QLA_ERROR;
  86. goto exit_send_marker;
  87. }
  88. /* Put the marker in the request queue */
  89. marker_entry->hdr.entryType = ET_MARKER;
  90. marker_entry->hdr.entryCount = 1;
  91. marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  92. marker_entry->modifier = cpu_to_le16(mrkr_mod);
  93. int_to_scsilun(lun, &marker_entry->lun);
  94. wmb();
  95. /* Tell ISP it's got a new I/O request */
  96. ha->isp_ops->queue_iocb(ha);
  97. exit_send_marker:
  98. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  99. return status;
  100. }
  101. static struct continuation_t1_entry *
  102. qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
  103. {
  104. struct continuation_t1_entry *cont_entry;
  105. cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
  106. qla4xxx_advance_req_ring_ptr(ha);
  107. /* Load packet defaults */
  108. cont_entry->hdr.entryType = ET_CONTINUE;
  109. cont_entry->hdr.entryCount = 1;
  110. cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
  111. return cont_entry;
  112. }
  113. static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
  114. {
  115. uint16_t iocbs;
  116. iocbs = 1;
  117. if (dsds > COMMAND_SEG) {
  118. iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
  119. if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
  120. iocbs++;
  121. }
  122. return iocbs;
  123. }
  124. static void qla4xxx_build_scsi_iocbs(struct srb *srb,
  125. struct command_t3_entry *cmd_entry,
  126. uint16_t tot_dsds)
  127. {
  128. struct scsi_qla_host *ha;
  129. uint16_t avail_dsds;
  130. struct data_seg_a64 *cur_dsd;
  131. struct scsi_cmnd *cmd;
  132. struct scatterlist *sg;
  133. int i;
  134. cmd = srb->cmd;
  135. ha = srb->ha;
  136. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  137. /* No data being transferred */
  138. cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
  139. return;
  140. }
  141. avail_dsds = COMMAND_SEG;
  142. cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
  143. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  144. dma_addr_t sle_dma;
  145. /* Allocate additional continuation packets? */
  146. if (avail_dsds == 0) {
  147. struct continuation_t1_entry *cont_entry;
  148. cont_entry = qla4xxx_alloc_cont_entry(ha);
  149. cur_dsd =
  150. (struct data_seg_a64 *)
  151. &cont_entry->dataseg[0];
  152. avail_dsds = CONTINUE_SEG;
  153. }
  154. sle_dma = sg_dma_address(sg);
  155. cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
  156. cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
  157. cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
  158. avail_dsds--;
  159. cur_dsd++;
  160. }
  161. }
  162. void qla4_83xx_queue_iocb(struct scsi_qla_host *ha)
  163. {
  164. writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
  165. readl(&ha->qla4_83xx_reg->req_q_in);
  166. }
  167. void qla4_83xx_complete_iocb(struct scsi_qla_host *ha)
  168. {
  169. writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
  170. readl(&ha->qla4_83xx_reg->rsp_q_out);
  171. }
  172. /**
  173. * qla4_82xx_queue_iocb - Tell ISP it's got new request(s)
  174. * @ha: pointer to host adapter structure.
  175. *
  176. * This routine notifies the ISP that one or more new request
  177. * queue entries have been placed on the request queue.
  178. **/
  179. void qla4_82xx_queue_iocb(struct scsi_qla_host *ha)
  180. {
  181. uint32_t dbval = 0;
  182. dbval = 0x14 | (ha->func_num << 5);
  183. dbval = dbval | (0 << 8) | (ha->request_in << 16);
  184. qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
  185. }
  186. /**
  187. * qla4_82xx_complete_iocb - Tell ISP we're done with response(s)
  188. * @ha: pointer to host adapter structure.
  189. *
  190. * This routine notifies the ISP that one or more response/completion
  191. * queue entries have been processed by the driver.
  192. * This also clears the interrupt.
  193. **/
  194. void qla4_82xx_complete_iocb(struct scsi_qla_host *ha)
  195. {
  196. writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
  197. readl(&ha->qla4_82xx_reg->rsp_q_out);
  198. }
  199. /**
  200. * qla4xxx_queue_iocb - Tell ISP it's got new request(s)
  201. * @ha: pointer to host adapter structure.
  202. *
  203. * This routine is notifies the ISP that one or more new request
  204. * queue entries have been placed on the request queue.
  205. **/
  206. void qla4xxx_queue_iocb(struct scsi_qla_host *ha)
  207. {
  208. writel(ha->request_in, &ha->reg->req_q_in);
  209. readl(&ha->reg->req_q_in);
  210. }
  211. /**
  212. * qla4xxx_complete_iocb - Tell ISP we're done with response(s)
  213. * @ha: pointer to host adapter structure.
  214. *
  215. * This routine is notifies the ISP that one or more response/completion
  216. * queue entries have been processed by the driver.
  217. * This also clears the interrupt.
  218. **/
  219. void qla4xxx_complete_iocb(struct scsi_qla_host *ha)
  220. {
  221. writel(ha->response_out, &ha->reg->rsp_q_out);
  222. readl(&ha->reg->rsp_q_out);
  223. }
  224. /**
  225. * qla4xxx_send_command_to_isp - issues command to HBA
  226. * @ha: pointer to host adapter structure.
  227. * @srb: pointer to SCSI Request Block to be sent to ISP
  228. *
  229. * This routine is called by qla4xxx_queuecommand to build an ISP
  230. * command and pass it to the ISP for execution.
  231. **/
  232. int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
  233. {
  234. struct scsi_cmnd *cmd = srb->cmd;
  235. struct ddb_entry *ddb_entry;
  236. struct command_t3_entry *cmd_entry;
  237. int nseg;
  238. uint16_t tot_dsds;
  239. uint16_t req_cnt;
  240. unsigned long flags;
  241. uint32_t index;
  242. /* Get real lun and adapter */
  243. ddb_entry = srb->ddb;
  244. tot_dsds = 0;
  245. /* Acquire hardware specific lock */
  246. spin_lock_irqsave(&ha->hardware_lock, flags);
  247. index = (uint32_t)cmd->request->tag;
  248. /*
  249. * Check to see if adapter is online before placing request on
  250. * request queue. If a reset occurs and a request is in the queue,
  251. * the firmware will still attempt to process the request, retrieving
  252. * garbage for pointers.
  253. */
  254. if (!test_bit(AF_ONLINE, &ha->flags)) {
  255. DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
  256. "Do not issue command.\n",
  257. ha->host_no, __func__));
  258. goto queuing_error;
  259. }
  260. /* Calculate the number of request entries needed. */
  261. nseg = scsi_dma_map(cmd);
  262. if (nseg < 0)
  263. goto queuing_error;
  264. tot_dsds = nseg;
  265. req_cnt = qla4xxx_calc_request_entries(tot_dsds);
  266. if (!qla4xxx_space_in_req_ring(ha, req_cnt))
  267. goto queuing_error;
  268. /* total iocbs active */
  269. if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
  270. goto queuing_error;
  271. /* Build command packet */
  272. cmd_entry = (struct command_t3_entry *) ha->request_ptr;
  273. memset(cmd_entry, 0, sizeof(struct command_t3_entry));
  274. cmd_entry->hdr.entryType = ET_COMMAND;
  275. cmd_entry->handle = cpu_to_le32(index);
  276. cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  277. int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
  278. cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
  279. memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
  280. cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
  281. cmd_entry->hdr.entryCount = req_cnt;
  282. /* Set data transfer direction control flags
  283. * NOTE: Look at data_direction bits iff there is data to be
  284. * transferred, as the data direction bit is sometimed filled
  285. * in when there is no data to be transferred */
  286. cmd_entry->control_flags = CF_NO_DATA;
  287. if (scsi_bufflen(cmd)) {
  288. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  289. cmd_entry->control_flags = CF_WRITE;
  290. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  291. cmd_entry->control_flags = CF_READ;
  292. ha->bytes_xfered += scsi_bufflen(cmd);
  293. if (ha->bytes_xfered & ~0xFFFFF){
  294. ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
  295. ha->bytes_xfered &= 0xFFFFF;
  296. }
  297. }
  298. /* Set tagged queueing control flags */
  299. cmd_entry->control_flags |= CF_SIMPLE_TAG;
  300. qla4xxx_advance_req_ring_ptr(ha);
  301. qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
  302. wmb();
  303. srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
  304. /* update counters */
  305. srb->state = SRB_ACTIVE_STATE;
  306. srb->flags |= SRB_DMA_VALID;
  307. /* Track IOCB used */
  308. ha->iocb_cnt += req_cnt;
  309. srb->iocb_cnt = req_cnt;
  310. ha->req_q_count -= req_cnt;
  311. ha->isp_ops->queue_iocb(ha);
  312. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  313. return QLA_SUCCESS;
  314. queuing_error:
  315. if (tot_dsds)
  316. scsi_dma_unmap(cmd);
  317. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  318. return QLA_ERROR;
  319. }
  320. int qla4xxx_send_passthru0(struct iscsi_task *task)
  321. {
  322. struct passthru0 *passthru_iocb;
  323. struct iscsi_session *sess = task->conn->session;
  324. struct ddb_entry *ddb_entry = sess->dd_data;
  325. struct scsi_qla_host *ha = ddb_entry->ha;
  326. struct ql4_task_data *task_data = task->dd_data;
  327. uint16_t ctrl_flags = 0;
  328. unsigned long flags;
  329. int ret = QLA_ERROR;
  330. spin_lock_irqsave(&ha->hardware_lock, flags);
  331. task_data->iocb_req_cnt = 1;
  332. /* Put the IOCB on the request queue */
  333. if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt))
  334. goto queuing_error;
  335. passthru_iocb = (struct passthru0 *) ha->request_ptr;
  336. memset(passthru_iocb, 0, sizeof(struct passthru0));
  337. passthru_iocb->hdr.entryType = ET_PASSTHRU0;
  338. passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU;
  339. passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt;
  340. passthru_iocb->handle = task->itt;
  341. passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  342. passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT);
  343. /* Setup the out & in DSDs */
  344. if (task_data->req_len) {
  345. memcpy((uint8_t *)task_data->req_buffer +
  346. sizeof(struct iscsi_hdr), task->data, task->data_count);
  347. ctrl_flags |= PT_FLAG_SEND_BUFFER;
  348. passthru_iocb->out_dsd.base.addrLow =
  349. cpu_to_le32(LSDW(task_data->req_dma));
  350. passthru_iocb->out_dsd.base.addrHigh =
  351. cpu_to_le32(MSDW(task_data->req_dma));
  352. passthru_iocb->out_dsd.count =
  353. cpu_to_le32(task->data_count +
  354. sizeof(struct iscsi_hdr));
  355. }
  356. if (task_data->resp_len) {
  357. passthru_iocb->in_dsd.base.addrLow =
  358. cpu_to_le32(LSDW(task_data->resp_dma));
  359. passthru_iocb->in_dsd.base.addrHigh =
  360. cpu_to_le32(MSDW(task_data->resp_dma));
  361. passthru_iocb->in_dsd.count =
  362. cpu_to_le32(task_data->resp_len);
  363. }
  364. ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE);
  365. passthru_iocb->control_flags = cpu_to_le16(ctrl_flags);
  366. /* Update the request pointer */
  367. qla4xxx_advance_req_ring_ptr(ha);
  368. wmb();
  369. /* Track IOCB used */
  370. ha->iocb_cnt += task_data->iocb_req_cnt;
  371. ha->req_q_count -= task_data->iocb_req_cnt;
  372. ha->isp_ops->queue_iocb(ha);
  373. ret = QLA_SUCCESS;
  374. queuing_error:
  375. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  376. return ret;
  377. }
  378. static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
  379. {
  380. struct mrb *mrb;
  381. mrb = kzalloc(sizeof(*mrb), GFP_KERNEL);
  382. if (!mrb)
  383. return mrb;
  384. mrb->ha = ha;
  385. return mrb;
  386. }
  387. static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
  388. uint32_t *in_mbox)
  389. {
  390. int rval = QLA_SUCCESS;
  391. uint32_t i;
  392. unsigned long flags;
  393. uint32_t index = 0;
  394. /* Acquire hardware specific lock */
  395. spin_lock_irqsave(&ha->hardware_lock, flags);
  396. /* Get pointer to the queue entry for the marker */
  397. rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
  398. if (rval != QLA_SUCCESS)
  399. goto exit_mbox_iocb;
  400. index = ha->mrb_index;
  401. /* get valid mrb index*/
  402. for (i = 0; i < MAX_MRB; i++) {
  403. index++;
  404. if (index == MAX_MRB)
  405. index = 1;
  406. if (ha->active_mrb_array[index] == NULL) {
  407. ha->mrb_index = index;
  408. break;
  409. }
  410. }
  411. mrb->iocb_cnt = 1;
  412. ha->active_mrb_array[index] = mrb;
  413. mrb->mbox->handle = index;
  414. mrb->mbox->hdr.entryType = ET_MBOX_CMD;
  415. mrb->mbox->hdr.entryCount = mrb->iocb_cnt;
  416. memcpy(mrb->mbox->in_mbox, in_mbox, 32);
  417. mrb->mbox_cmd = in_mbox[0];
  418. wmb();
  419. ha->iocb_cnt += mrb->iocb_cnt;
  420. ha->isp_ops->queue_iocb(ha);
  421. exit_mbox_iocb:
  422. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  423. return rval;
  424. }
  425. int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
  426. uint32_t payload_size, uint32_t pid, uint8_t *ipaddr)
  427. {
  428. uint32_t in_mbox[8];
  429. struct mrb *mrb = NULL;
  430. int rval = QLA_SUCCESS;
  431. memset(in_mbox, 0, sizeof(in_mbox));
  432. mrb = qla4xxx_get_new_mrb(ha);
  433. if (!mrb) {
  434. DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
  435. __func__));
  436. rval = QLA_ERROR;
  437. goto exit_ping;
  438. }
  439. in_mbox[0] = MBOX_CMD_PING;
  440. in_mbox[1] = options;
  441. memcpy(&in_mbox[2], &ipaddr[0], 4);
  442. memcpy(&in_mbox[3], &ipaddr[4], 4);
  443. memcpy(&in_mbox[4], &ipaddr[8], 4);
  444. memcpy(&in_mbox[5], &ipaddr[12], 4);
  445. in_mbox[6] = payload_size;
  446. mrb->pid = pid;
  447. rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);
  448. if (rval != QLA_SUCCESS)
  449. goto exit_ping;
  450. return rval;
  451. exit_ping:
  452. kfree(mrb);
  453. return rval;
  454. }