ql4_iocb.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. /*
  2. * QLogic iSCSI HBA Driver
  3. * Copyright (c) 2003-2010 QLogic Corporation
  4. *
  5. * See LICENSE.qla4xxx for copyright and licensing details.
  6. */
  7. #include "ql4_def.h"
  8. #include "ql4_glbl.h"
  9. #include "ql4_dbg.h"
  10. #include "ql4_inline.h"
  11. #include <scsi/scsi_tcq.h>
  12. static int
  13. qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
  14. {
  15. uint16_t cnt;
  16. /* Calculate number of free request entries. */
  17. if ((req_cnt + 2) >= ha->req_q_count) {
  18. cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
  19. if (ha->request_in < cnt)
  20. ha->req_q_count = cnt - ha->request_in;
  21. else
  22. ha->req_q_count = REQUEST_QUEUE_DEPTH -
  23. (ha->request_in - cnt);
  24. }
  25. /* Check if room for request in request ring. */
  26. if ((req_cnt + 2) < ha->req_q_count)
  27. return 1;
  28. else
  29. return 0;
  30. }
  31. static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
  32. {
  33. /* Advance request queue pointer */
  34. if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
  35. ha->request_in = 0;
  36. ha->request_ptr = ha->request_ring;
  37. } else {
  38. ha->request_in++;
  39. ha->request_ptr++;
  40. }
  41. }
  42. /**
  43. * qla4xxx_get_req_pkt - returns a valid entry in request queue.
  44. * @ha: Pointer to host adapter structure.
  45. * @queue_entry: Pointer to pointer to queue entry structure
  46. *
  47. * This routine performs the following tasks:
  48. * - returns the current request_in pointer (if queue not full)
  49. * - advances the request_in pointer
  50. * - checks for queue full
  51. **/
  52. static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
  53. struct queue_entry **queue_entry)
  54. {
  55. uint16_t req_cnt = 1;
  56. if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
  57. *queue_entry = ha->request_ptr;
  58. memset(*queue_entry, 0, sizeof(**queue_entry));
  59. qla4xxx_advance_req_ring_ptr(ha);
  60. ha->req_q_count -= req_cnt;
  61. return QLA_SUCCESS;
  62. }
  63. return QLA_ERROR;
  64. }
  65. /**
  66. * qla4xxx_send_marker_iocb - issues marker iocb to HBA
  67. * @ha: Pointer to host adapter structure.
  68. * @ddb_entry: Pointer to device database entry
  69. * @lun: SCSI LUN
  70. * @marker_type: marker identifier
  71. *
  72. * This routine issues a marker IOCB.
  73. **/
  74. int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
  75. struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod)
  76. {
  77. struct qla4_marker_entry *marker_entry;
  78. unsigned long flags = 0;
  79. uint8_t status = QLA_SUCCESS;
  80. /* Acquire hardware specific lock */
  81. spin_lock_irqsave(&ha->hardware_lock, flags);
  82. /* Get pointer to the queue entry for the marker */
  83. if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
  84. QLA_SUCCESS) {
  85. status = QLA_ERROR;
  86. goto exit_send_marker;
  87. }
  88. /* Put the marker in the request queue */
  89. marker_entry->hdr.entryType = ET_MARKER;
  90. marker_entry->hdr.entryCount = 1;
  91. marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  92. marker_entry->modifier = cpu_to_le16(mrkr_mod);
  93. int_to_scsilun(lun, &marker_entry->lun);
  94. wmb();
  95. /* Tell ISP it's got a new I/O request */
  96. ha->isp_ops->queue_iocb(ha);
  97. exit_send_marker:
  98. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  99. return status;
  100. }
  101. static struct continuation_t1_entry *
  102. qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
  103. {
  104. struct continuation_t1_entry *cont_entry;
  105. cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
  106. qla4xxx_advance_req_ring_ptr(ha);
  107. /* Load packet defaults */
  108. cont_entry->hdr.entryType = ET_CONTINUE;
  109. cont_entry->hdr.entryCount = 1;
  110. cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
  111. return cont_entry;
  112. }
  113. static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
  114. {
  115. uint16_t iocbs;
  116. iocbs = 1;
  117. if (dsds > COMMAND_SEG) {
  118. iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
  119. if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
  120. iocbs++;
  121. }
  122. return iocbs;
  123. }
  124. static void qla4xxx_build_scsi_iocbs(struct srb *srb,
  125. struct command_t3_entry *cmd_entry,
  126. uint16_t tot_dsds)
  127. {
  128. struct scsi_qla_host *ha;
  129. uint16_t avail_dsds;
  130. struct data_seg_a64 *cur_dsd;
  131. struct scsi_cmnd *cmd;
  132. struct scatterlist *sg;
  133. int i;
  134. cmd = srb->cmd;
  135. ha = srb->ha;
  136. if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
  137. /* No data being transferred */
  138. cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
  139. return;
  140. }
  141. avail_dsds = COMMAND_SEG;
  142. cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
  143. scsi_for_each_sg(cmd, sg, tot_dsds, i) {
  144. dma_addr_t sle_dma;
  145. /* Allocate additional continuation packets? */
  146. if (avail_dsds == 0) {
  147. struct continuation_t1_entry *cont_entry;
  148. cont_entry = qla4xxx_alloc_cont_entry(ha);
  149. cur_dsd =
  150. (struct data_seg_a64 *)
  151. &cont_entry->dataseg[0];
  152. avail_dsds = CONTINUE_SEG;
  153. }
  154. sle_dma = sg_dma_address(sg);
  155. cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
  156. cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
  157. cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
  158. avail_dsds--;
  159. cur_dsd++;
  160. }
  161. }
  162. /**
  163. * qla4_8xxx_queue_iocb - Tell ISP it's got new request(s)
  164. * @ha: pointer to host adapter structure.
  165. *
  166. * This routine notifies the ISP that one or more new request
  167. * queue entries have been placed on the request queue.
  168. **/
  169. void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha)
  170. {
  171. uint32_t dbval = 0;
  172. dbval = 0x14 | (ha->func_num << 5);
  173. dbval = dbval | (0 << 8) | (ha->request_in << 16);
  174. qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
  175. }
  176. /**
  177. * qla4_8xxx_complete_iocb - Tell ISP we're done with response(s)
  178. * @ha: pointer to host adapter structure.
  179. *
  180. * This routine notifies the ISP that one or more response/completion
  181. * queue entries have been processed by the driver.
  182. * This also clears the interrupt.
  183. **/
  184. void qla4_8xxx_complete_iocb(struct scsi_qla_host *ha)
  185. {
  186. writel(ha->response_out, &ha->qla4_8xxx_reg->rsp_q_out);
  187. readl(&ha->qla4_8xxx_reg->rsp_q_out);
  188. }
  189. /**
  190. * qla4xxx_queue_iocb - Tell ISP it's got new request(s)
  191. * @ha: pointer to host adapter structure.
  192. *
  193. * This routine is notifies the ISP that one or more new request
  194. * queue entries have been placed on the request queue.
  195. **/
  196. void qla4xxx_queue_iocb(struct scsi_qla_host *ha)
  197. {
  198. writel(ha->request_in, &ha->reg->req_q_in);
  199. readl(&ha->reg->req_q_in);
  200. }
  201. /**
  202. * qla4xxx_complete_iocb - Tell ISP we're done with response(s)
  203. * @ha: pointer to host adapter structure.
  204. *
  205. * This routine is notifies the ISP that one or more response/completion
  206. * queue entries have been processed by the driver.
  207. * This also clears the interrupt.
  208. **/
  209. void qla4xxx_complete_iocb(struct scsi_qla_host *ha)
  210. {
  211. writel(ha->response_out, &ha->reg->rsp_q_out);
  212. readl(&ha->reg->rsp_q_out);
  213. }
  214. /**
  215. * qla4xxx_send_command_to_isp - issues command to HBA
  216. * @ha: pointer to host adapter structure.
  217. * @srb: pointer to SCSI Request Block to be sent to ISP
  218. *
  219. * This routine is called by qla4xxx_queuecommand to build an ISP
  220. * command and pass it to the ISP for execution.
  221. **/
  222. int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
  223. {
  224. struct scsi_cmnd *cmd = srb->cmd;
  225. struct ddb_entry *ddb_entry;
  226. struct command_t3_entry *cmd_entry;
  227. int nseg;
  228. uint16_t tot_dsds;
  229. uint16_t req_cnt;
  230. unsigned long flags;
  231. uint32_t index;
  232. char tag[2];
  233. /* Get real lun and adapter */
  234. ddb_entry = srb->ddb;
  235. tot_dsds = 0;
  236. /* Acquire hardware specific lock */
  237. spin_lock_irqsave(&ha->hardware_lock, flags);
  238. index = (uint32_t)cmd->request->tag;
  239. /*
  240. * Check to see if adapter is online before placing request on
  241. * request queue. If a reset occurs and a request is in the queue,
  242. * the firmware will still attempt to process the request, retrieving
  243. * garbage for pointers.
  244. */
  245. if (!test_bit(AF_ONLINE, &ha->flags)) {
  246. DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
  247. "Do not issue command.\n",
  248. ha->host_no, __func__));
  249. goto queuing_error;
  250. }
  251. /* Calculate the number of request entries needed. */
  252. nseg = scsi_dma_map(cmd);
  253. if (nseg < 0)
  254. goto queuing_error;
  255. tot_dsds = nseg;
  256. req_cnt = qla4xxx_calc_request_entries(tot_dsds);
  257. if (!qla4xxx_space_in_req_ring(ha, req_cnt))
  258. goto queuing_error;
  259. /* total iocbs active */
  260. if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
  261. goto queuing_error;
  262. /* Build command packet */
  263. cmd_entry = (struct command_t3_entry *) ha->request_ptr;
  264. memset(cmd_entry, 0, sizeof(struct command_t3_entry));
  265. cmd_entry->hdr.entryType = ET_COMMAND;
  266. cmd_entry->handle = cpu_to_le32(index);
  267. cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
  268. cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id);
  269. int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
  270. cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
  271. cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
  272. memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
  273. cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
  274. cmd_entry->hdr.entryCount = req_cnt;
  275. /* Set data transfer direction control flags
  276. * NOTE: Look at data_direction bits iff there is data to be
  277. * transferred, as the data direction bit is sometimed filled
  278. * in when there is no data to be transferred */
  279. cmd_entry->control_flags = CF_NO_DATA;
  280. if (scsi_bufflen(cmd)) {
  281. if (cmd->sc_data_direction == DMA_TO_DEVICE)
  282. cmd_entry->control_flags = CF_WRITE;
  283. else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  284. cmd_entry->control_flags = CF_READ;
  285. ha->bytes_xfered += scsi_bufflen(cmd);
  286. if (ha->bytes_xfered & ~0xFFFFF){
  287. ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
  288. ha->bytes_xfered &= 0xFFFFF;
  289. }
  290. }
  291. /* Set tagged queueing control flags */
  292. cmd_entry->control_flags |= CF_SIMPLE_TAG;
  293. if (scsi_populate_tag_msg(cmd, tag))
  294. switch (tag[0]) {
  295. case MSG_HEAD_TAG:
  296. cmd_entry->control_flags |= CF_HEAD_TAG;
  297. break;
  298. case MSG_ORDERED_TAG:
  299. cmd_entry->control_flags |= CF_ORDERED_TAG;
  300. break;
  301. }
  302. qla4xxx_advance_req_ring_ptr(ha);
  303. qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
  304. wmb();
  305. srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
  306. /* update counters */
  307. srb->state = SRB_ACTIVE_STATE;
  308. srb->flags |= SRB_DMA_VALID;
  309. /* Track IOCB used */
  310. ha->iocb_cnt += req_cnt;
  311. srb->iocb_cnt = req_cnt;
  312. ha->req_q_count -= req_cnt;
  313. ha->isp_ops->queue_iocb(ha);
  314. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  315. return QLA_SUCCESS;
  316. queuing_error:
  317. if (tot_dsds)
  318. scsi_dma_unmap(cmd);
  319. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  320. return QLA_ERROR;
  321. }