qla_nvme.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2017 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_nvme.h"
  8. #include <linux/scatterlist.h>
  9. #include <linux/delay.h>
  10. #include <linux/nvme.h>
  11. #include <linux/nvme-fc.h>
  12. static struct nvme_fc_port_template qla_nvme_fc_transport;
  13. static void qla_nvme_unregister_remote_port(struct work_struct *);
  14. int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
  15. {
  16. struct qla_nvme_rport *rport;
  17. struct nvme_fc_port_info req;
  18. int ret;
  19. if (!IS_ENABLED(CONFIG_NVME_FC))
  20. return 0;
  21. if (!vha->flags.nvme_enabled) {
  22. ql_log(ql_log_info, vha, 0x2100,
  23. "%s: Not registering target since Host NVME is not enabled\n",
  24. __func__);
  25. return 0;
  26. }
  27. if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
  28. return 0;
  29. if (!(fcport->nvme_prli_service_param &
  30. (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
  31. (fcport->nvme_flag & NVME_FLAG_REGISTERED))
  32. return 0;
  33. INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
  34. fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
  35. memset(&req, 0, sizeof(struct nvme_fc_port_info));
  36. req.port_name = wwn_to_u64(fcport->port_name);
  37. req.node_name = wwn_to_u64(fcport->node_name);
  38. req.port_role = 0;
  39. req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
  40. if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
  41. req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
  42. if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
  43. req.port_role |= FC_PORT_ROLE_NVME_TARGET;
  44. if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
  45. req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
  46. req.port_id = fcport->d_id.b24;
  47. ql_log(ql_log_info, vha, 0x2102,
  48. "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
  49. __func__, req.node_name, req.port_name,
  50. req.port_id);
  51. ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
  52. &fcport->nvme_remote_port);
  53. if (ret) {
  54. ql_log(ql_log_warn, vha, 0x212e,
  55. "Failed to register remote port. Transport returned %d\n",
  56. ret);
  57. return ret;
  58. }
  59. rport = fcport->nvme_remote_port->private;
  60. rport->fcport = fcport;
  61. list_add_tail(&rport->list, &vha->nvme_rport_list);
  62. fcport->nvme_flag |= NVME_FLAG_REGISTERED;
  63. return 0;
  64. }
  65. /* Allocate a queue for NVMe traffic */
  66. static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
  67. unsigned int qidx, u16 qsize, void **handle)
  68. {
  69. struct scsi_qla_host *vha;
  70. struct qla_hw_data *ha;
  71. struct qla_qpair *qpair;
  72. if (!qidx)
  73. qidx++;
  74. vha = (struct scsi_qla_host *)lport->private;
  75. ha = vha->hw;
  76. ql_log(ql_log_info, vha, 0x2104,
  77. "%s: handle %p, idx =%d, qsize %d\n",
  78. __func__, handle, qidx, qsize);
  79. if (qidx > qla_nvme_fc_transport.max_hw_queues) {
  80. ql_log(ql_log_warn, vha, 0x212f,
  81. "%s: Illegal qidx=%d. Max=%d\n",
  82. __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
  83. return -EINVAL;
  84. }
  85. if (ha->queue_pair_map[qidx]) {
  86. *handle = ha->queue_pair_map[qidx];
  87. ql_log(ql_log_info, vha, 0x2121,
  88. "Returning existing qpair of %p for idx=%x\n",
  89. *handle, qidx);
  90. return 0;
  91. }
  92. qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
  93. if (qpair == NULL) {
  94. ql_log(ql_log_warn, vha, 0x2122,
  95. "Failed to allocate qpair\n");
  96. return -EINVAL;
  97. }
  98. *handle = qpair;
  99. return 0;
  100. }
  101. static void qla_nvme_sp_ls_done(void *ptr, int res)
  102. {
  103. srb_t *sp = ptr;
  104. struct srb_iocb *nvme;
  105. struct nvmefc_ls_req *fd;
  106. struct nvme_private *priv;
  107. if (atomic_read(&sp->ref_count) == 0) {
  108. ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
  109. "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
  110. return;
  111. }
  112. if (!atomic_dec_and_test(&sp->ref_count))
  113. return;
  114. if (res)
  115. res = -EINVAL;
  116. nvme = &sp->u.iocb_cmd;
  117. fd = nvme->u.nvme.desc;
  118. priv = fd->private;
  119. priv->comp_status = res;
  120. schedule_work(&priv->ls_work);
  121. /* work schedule doesn't need the sp */
  122. qla2x00_rel_sp(sp);
  123. }
  124. static void qla_nvme_sp_done(void *ptr, int res)
  125. {
  126. srb_t *sp = ptr;
  127. struct srb_iocb *nvme;
  128. struct nvmefc_fcp_req *fd;
  129. nvme = &sp->u.iocb_cmd;
  130. fd = nvme->u.nvme.desc;
  131. if (!atomic_dec_and_test(&sp->ref_count))
  132. return;
  133. if (res == QLA_SUCCESS)
  134. fd->status = 0;
  135. else
  136. fd->status = NVME_SC_INTERNAL;
  137. fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
  138. fd->done(fd);
  139. qla2xxx_rel_qpair_sp(sp->qpair, sp);
  140. return;
  141. }
  142. static void qla_nvme_abort_work(struct work_struct *work)
  143. {
  144. struct nvme_private *priv =
  145. container_of(work, struct nvme_private, abort_work);
  146. srb_t *sp = priv->sp;
  147. fc_port_t *fcport = sp->fcport;
  148. struct qla_hw_data *ha = fcport->vha->hw;
  149. int rval;
  150. rval = ha->isp_ops->abort_command(sp);
  151. ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
  152. "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
  153. __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
  154. sp, sp->handle, fcport, rval);
  155. }
  156. static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
  157. struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
  158. {
  159. struct nvme_private *priv = fd->private;
  160. INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
  161. schedule_work(&priv->abort_work);
  162. }
  163. static void qla_nvme_ls_complete(struct work_struct *work)
  164. {
  165. struct nvme_private *priv =
  166. container_of(work, struct nvme_private, ls_work);
  167. struct nvmefc_ls_req *fd = priv->fd;
  168. fd->done(fd, priv->comp_status);
  169. }
  170. static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
  171. struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
  172. {
  173. struct qla_nvme_rport *qla_rport = rport->private;
  174. fc_port_t *fcport = qla_rport->fcport;
  175. struct srb_iocb *nvme;
  176. struct nvme_private *priv = fd->private;
  177. struct scsi_qla_host *vha;
  178. int rval = QLA_FUNCTION_FAILED;
  179. struct qla_hw_data *ha;
  180. srb_t *sp;
  181. vha = fcport->vha;
  182. ha = vha->hw;
  183. /* Alloc SRB structure */
  184. sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
  185. if (!sp)
  186. return rval;
  187. sp->type = SRB_NVME_LS;
  188. sp->name = "nvme_ls";
  189. sp->done = qla_nvme_sp_ls_done;
  190. atomic_set(&sp->ref_count, 1);
  191. nvme = &sp->u.iocb_cmd;
  192. priv->sp = sp;
  193. priv->fd = fd;
  194. INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
  195. nvme->u.nvme.desc = fd;
  196. nvme->u.nvme.dir = 0;
  197. nvme->u.nvme.dl = 0;
  198. nvme->u.nvme.cmd_len = fd->rqstlen;
  199. nvme->u.nvme.rsp_len = fd->rsplen;
  200. nvme->u.nvme.rsp_dma = fd->rspdma;
  201. nvme->u.nvme.timeout_sec = fd->timeout;
  202. nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
  203. fd->rqstlen, DMA_TO_DEVICE);
  204. dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
  205. fd->rqstlen, DMA_TO_DEVICE);
  206. rval = qla2x00_start_sp(sp);
  207. if (rval != QLA_SUCCESS) {
  208. ql_log(ql_log_warn, vha, 0x700e,
  209. "qla2x00_start_sp failed = %d\n", rval);
  210. atomic_dec(&sp->ref_count);
  211. wake_up(&sp->nvme_ls_waitq);
  212. return rval;
  213. }
  214. return rval;
  215. }
  216. static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
  217. struct nvme_fc_remote_port *rport, void *hw_queue_handle,
  218. struct nvmefc_fcp_req *fd)
  219. {
  220. struct nvme_private *priv = fd->private;
  221. INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
  222. schedule_work(&priv->abort_work);
  223. }
  224. static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
  225. {
  226. struct qla_qpair *qpair = hw_queue_handle;
  227. unsigned long flags;
  228. struct scsi_qla_host *vha = lport->private;
  229. spin_lock_irqsave(&qpair->qp_lock, flags);
  230. qla24xx_process_response_queue(vha, qpair->rsp);
  231. spin_unlock_irqrestore(&qpair->qp_lock, flags);
  232. }
  233. static inline int qla2x00_start_nvme_mq(srb_t *sp)
  234. {
  235. unsigned long flags;
  236. uint32_t *clr_ptr;
  237. uint32_t index;
  238. uint32_t handle;
  239. struct cmd_nvme *cmd_pkt;
  240. uint16_t cnt, i;
  241. uint16_t req_cnt;
  242. uint16_t tot_dsds;
  243. uint16_t avail_dsds;
  244. uint32_t *cur_dsd;
  245. struct req_que *req = NULL;
  246. struct scsi_qla_host *vha = sp->fcport->vha;
  247. struct qla_hw_data *ha = vha->hw;
  248. struct qla_qpair *qpair = sp->qpair;
  249. struct srb_iocb *nvme = &sp->u.iocb_cmd;
  250. struct scatterlist *sgl, *sg;
  251. struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
  252. uint32_t rval = QLA_SUCCESS;
  253. /* Setup qpair pointers */
  254. req = qpair->req;
  255. tot_dsds = fd->sg_cnt;
  256. /* Acquire qpair specific lock */
  257. spin_lock_irqsave(&qpair->qp_lock, flags);
  258. /* Check for room in outstanding command list. */
  259. handle = req->current_outstanding_cmd;
  260. for (index = 1; index < req->num_outstanding_cmds; index++) {
  261. handle++;
  262. if (handle == req->num_outstanding_cmds)
  263. handle = 1;
  264. if (!req->outstanding_cmds[handle])
  265. break;
  266. }
  267. if (index == req->num_outstanding_cmds) {
  268. rval = -EBUSY;
  269. goto queuing_error;
  270. }
  271. req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
  272. if (req->cnt < (req_cnt + 2)) {
  273. cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
  274. RD_REG_DWORD_RELAXED(req->req_q_out);
  275. if (req->ring_index < cnt)
  276. req->cnt = cnt - req->ring_index;
  277. else
  278. req->cnt = req->length - (req->ring_index - cnt);
  279. if (req->cnt < (req_cnt + 2)){
  280. rval = -EBUSY;
  281. goto queuing_error;
  282. }
  283. }
  284. if (unlikely(!fd->sqid)) {
  285. struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
  286. if (cmd->sqe.common.opcode == nvme_admin_async_event) {
  287. nvme->u.nvme.aen_op = 1;
  288. atomic_inc(&ha->nvme_active_aen_cnt);
  289. }
  290. }
  291. /* Build command packet. */
  292. req->current_outstanding_cmd = handle;
  293. req->outstanding_cmds[handle] = sp;
  294. sp->handle = handle;
  295. req->cnt -= req_cnt;
  296. cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
  297. cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
  298. /* Zero out remaining portion of packet. */
  299. clr_ptr = (uint32_t *)cmd_pkt + 2;
  300. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  301. cmd_pkt->entry_status = 0;
  302. /* Update entry type to indicate Command NVME IOCB */
  303. cmd_pkt->entry_type = COMMAND_NVME;
  304. /* No data transfer how do we check buffer len == 0?? */
  305. if (fd->io_dir == NVMEFC_FCP_READ) {
  306. cmd_pkt->control_flags =
  307. cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
  308. vha->qla_stats.input_bytes += fd->payload_length;
  309. vha->qla_stats.input_requests++;
  310. } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
  311. cmd_pkt->control_flags =
  312. cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
  313. vha->qla_stats.output_bytes += fd->payload_length;
  314. vha->qla_stats.output_requests++;
  315. } else if (fd->io_dir == 0) {
  316. cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
  317. }
  318. /* Set NPORT-ID */
  319. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  320. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  321. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  322. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  323. cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
  324. /* NVME RSP IU */
  325. cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
  326. cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
  327. cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
  328. /* NVME CNMD IU */
  329. cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
  330. cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
  331. cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
  332. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  333. cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
  334. /* One DSD is available in the Command Type NVME IOCB */
  335. avail_dsds = 1;
  336. cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
  337. sgl = fd->first_sgl;
  338. /* Load data segments */
  339. for_each_sg(sgl, sg, tot_dsds, i) {
  340. dma_addr_t sle_dma;
  341. cont_a64_entry_t *cont_pkt;
  342. /* Allocate additional continuation packets? */
  343. if (avail_dsds == 0) {
  344. /*
  345. * Five DSDs are available in the Continuation
  346. * Type 1 IOCB.
  347. */
  348. /* Adjust ring index */
  349. req->ring_index++;
  350. if (req->ring_index == req->length) {
  351. req->ring_index = 0;
  352. req->ring_ptr = req->ring;
  353. } else {
  354. req->ring_ptr++;
  355. }
  356. cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
  357. *((uint32_t *)(&cont_pkt->entry_type)) =
  358. cpu_to_le32(CONTINUE_A64_TYPE);
  359. cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
  360. avail_dsds = 5;
  361. }
  362. sle_dma = sg_dma_address(sg);
  363. *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
  364. *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
  365. *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
  366. avail_dsds--;
  367. }
  368. /* Set total entry count. */
  369. cmd_pkt->entry_count = (uint8_t)req_cnt;
  370. wmb();
  371. /* Adjust ring index. */
  372. req->ring_index++;
  373. if (req->ring_index == req->length) {
  374. req->ring_index = 0;
  375. req->ring_ptr = req->ring;
  376. } else {
  377. req->ring_ptr++;
  378. }
  379. /* Set chip new ring index. */
  380. WRT_REG_DWORD(req->req_q_in, req->ring_index);
  381. queuing_error:
  382. spin_unlock_irqrestore(&qpair->qp_lock, flags);
  383. return rval;
  384. }
  385. /* Post a command */
  386. static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
  387. struct nvme_fc_remote_port *rport, void *hw_queue_handle,
  388. struct nvmefc_fcp_req *fd)
  389. {
  390. fc_port_t *fcport;
  391. struct srb_iocb *nvme;
  392. struct scsi_qla_host *vha;
  393. int rval = -ENODEV;
  394. srb_t *sp;
  395. struct qla_qpair *qpair = hw_queue_handle;
  396. struct nvme_private *priv = fd->private;
  397. struct qla_nvme_rport *qla_rport = rport->private;
  398. fcport = qla_rport->fcport;
  399. vha = fcport->vha;
  400. if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
  401. return rval;
  402. /*
  403. * If we know the dev is going away while the transport is still sending
  404. * IO's return busy back to stall the IO Q. This happens when the
  405. * link goes away and fw hasn't notified us yet, but IO's are being
  406. * returned. If the dev comes back quickly we won't exhaust the IO
  407. * retry count at the core.
  408. */
  409. if (fcport->nvme_flag & NVME_FLAG_RESETTING)
  410. return -EBUSY;
  411. /* Alloc SRB structure */
  412. sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
  413. if (!sp)
  414. return -EBUSY;
  415. atomic_set(&sp->ref_count, 1);
  416. init_waitqueue_head(&sp->nvme_ls_waitq);
  417. priv->sp = sp;
  418. sp->type = SRB_NVME_CMD;
  419. sp->name = "nvme_cmd";
  420. sp->done = qla_nvme_sp_done;
  421. sp->qpair = qpair;
  422. sp->vha = vha;
  423. nvme = &sp->u.iocb_cmd;
  424. nvme->u.nvme.desc = fd;
  425. rval = qla2x00_start_nvme_mq(sp);
  426. if (rval != QLA_SUCCESS) {
  427. ql_log(ql_log_warn, vha, 0x212d,
  428. "qla2x00_start_nvme_mq failed = %d\n", rval);
  429. atomic_dec(&sp->ref_count);
  430. wake_up(&sp->nvme_ls_waitq);
  431. }
  432. return rval;
  433. }
  434. static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
  435. {
  436. struct scsi_qla_host *vha = lport->private;
  437. ql_log(ql_log_info, vha, 0x210f,
  438. "localport delete of %p completed.\n", vha->nvme_local_port);
  439. vha->nvme_local_port = NULL;
  440. complete(&vha->nvme_del_done);
  441. }
  442. static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
  443. {
  444. fc_port_t *fcport;
  445. struct qla_nvme_rport *qla_rport = rport->private, *trport;
  446. fcport = qla_rport->fcport;
  447. fcport->nvme_remote_port = NULL;
  448. fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
  449. list_for_each_entry_safe(qla_rport, trport,
  450. &fcport->vha->nvme_rport_list, list) {
  451. if (qla_rport->fcport == fcport) {
  452. list_del(&qla_rport->list);
  453. break;
  454. }
  455. }
  456. complete(&fcport->nvme_del_done);
  457. if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
  458. INIT_WORK(&fcport->free_work, qlt_free_session_done);
  459. schedule_work(&fcport->free_work);
  460. }
  461. fcport->nvme_flag &= ~NVME_FLAG_DELETING;
  462. ql_log(ql_log_info, fcport->vha, 0x2110,
  463. "remoteport_delete of %p completed.\n", fcport);
  464. }
  465. static struct nvme_fc_port_template qla_nvme_fc_transport = {
  466. .module = THIS_MODULE,
  467. .localport_delete = qla_nvme_localport_delete,
  468. .remoteport_delete = qla_nvme_remoteport_delete,
  469. .create_queue = qla_nvme_alloc_queue,
  470. .delete_queue = NULL,
  471. .ls_req = qla_nvme_ls_req,
  472. .ls_abort = qla_nvme_ls_abort,
  473. .fcp_io = qla_nvme_post_cmd,
  474. .fcp_abort = qla_nvme_fcp_abort,
  475. .poll_queue = qla_nvme_poll,
  476. .max_hw_queues = 8,
  477. .max_sgl_segments = 128,
  478. .max_dif_sgl_segments = 64,
  479. .dma_boundary = 0xFFFFFFFF,
  480. .local_priv_sz = 8,
  481. .remote_priv_sz = sizeof(struct qla_nvme_rport),
  482. .lsrqst_priv_sz = sizeof(struct nvme_private),
  483. .fcprqst_priv_sz = sizeof(struct nvme_private),
  484. };
  485. #define NVME_ABORT_POLLING_PERIOD 2
  486. static int qla_nvme_wait_on_command(srb_t *sp)
  487. {
  488. int ret = QLA_SUCCESS;
  489. wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
  490. NVME_ABORT_POLLING_PERIOD*HZ);
  491. if (atomic_read(&sp->ref_count) > 1)
  492. ret = QLA_FUNCTION_FAILED;
  493. return ret;
  494. }
  495. void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
  496. {
  497. int rval;
  498. if (ha->flags.fw_started) {
  499. rval = ha->isp_ops->abort_command(sp);
  500. if (!rval && !qla_nvme_wait_on_command(sp))
  501. ql_log(ql_log_warn, NULL, 0x2112,
  502. "timed out waiting on sp=%p\n", sp);
  503. } else {
  504. sp->done(sp, res);
  505. }
  506. }
  507. static void qla_nvme_unregister_remote_port(struct work_struct *work)
  508. {
  509. struct fc_port *fcport = container_of(work, struct fc_port,
  510. nvme_del_work);
  511. struct qla_nvme_rport *qla_rport, *trport;
  512. if (!IS_ENABLED(CONFIG_NVME_FC))
  513. return;
  514. ql_log(ql_log_warn, NULL, 0x2112,
  515. "%s: unregister remoteport on %p\n",__func__, fcport);
  516. list_for_each_entry_safe(qla_rport, trport,
  517. &fcport->vha->nvme_rport_list, list) {
  518. if (qla_rport->fcport == fcport) {
  519. ql_log(ql_log_info, fcport->vha, 0x2113,
  520. "%s: fcport=%p\n", __func__, fcport);
  521. init_completion(&fcport->nvme_del_done);
  522. nvme_fc_unregister_remoteport(
  523. fcport->nvme_remote_port);
  524. wait_for_completion(&fcport->nvme_del_done);
  525. break;
  526. }
  527. }
  528. }
  529. void qla_nvme_delete(struct scsi_qla_host *vha)
  530. {
  531. struct qla_nvme_rport *qla_rport, *trport;
  532. fc_port_t *fcport;
  533. int nv_ret;
  534. if (!IS_ENABLED(CONFIG_NVME_FC))
  535. return;
  536. list_for_each_entry_safe(qla_rport, trport,
  537. &vha->nvme_rport_list, list) {
  538. fcport = qla_rport->fcport;
  539. ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
  540. __func__, fcport);
  541. nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
  542. }
  543. if (vha->nvme_local_port) {
  544. init_completion(&vha->nvme_del_done);
  545. ql_log(ql_log_info, vha, 0x2116,
  546. "unregister localport=%p\n",
  547. vha->nvme_local_port);
  548. nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
  549. if (nv_ret)
  550. ql_log(ql_log_info, vha, 0x2115,
  551. "Unregister of localport failed\n");
  552. else
  553. wait_for_completion(&vha->nvme_del_done);
  554. }
  555. }
  556. int qla_nvme_register_hba(struct scsi_qla_host *vha)
  557. {
  558. struct nvme_fc_port_template *tmpl;
  559. struct qla_hw_data *ha;
  560. struct nvme_fc_port_info pinfo;
  561. int ret = EINVAL;
  562. if (!IS_ENABLED(CONFIG_NVME_FC))
  563. return ret;
  564. ha = vha->hw;
  565. tmpl = &qla_nvme_fc_transport;
  566. WARN_ON(vha->nvme_local_port);
  567. WARN_ON(ha->max_req_queues < 3);
  568. qla_nvme_fc_transport.max_hw_queues =
  569. min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
  570. (uint8_t)(ha->max_req_queues - 2));
  571. pinfo.node_name = wwn_to_u64(vha->node_name);
  572. pinfo.port_name = wwn_to_u64(vha->port_name);
  573. pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
  574. pinfo.port_id = vha->d_id.b24;
  575. ql_log(ql_log_info, vha, 0xffff,
  576. "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
  577. pinfo.node_name, pinfo.port_name, pinfo.port_id);
  578. qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
  579. ret = nvme_fc_register_localport(&pinfo, tmpl,
  580. get_device(&ha->pdev->dev), &vha->nvme_local_port);
  581. if (ret) {
  582. ql_log(ql_log_warn, vha, 0xffff,
  583. "register_localport failed: ret=%x\n", ret);
  584. } else {
  585. vha->nvme_local_port->private = vha;
  586. }
  587. return ret;
  588. }