iser_memory.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579
  1. /*
  2. * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
  3. * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <linux/kernel.h>
  35. #include <linux/slab.h>
  36. #include <linux/mm.h>
  37. #include <linux/highmem.h>
  38. #include <linux/scatterlist.h>
  39. #include "iscsi_iser.h"
  40. static
  41. int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
  42. struct iser_data_buf *mem,
  43. struct iser_reg_resources *rsc,
  44. struct iser_mem_reg *mem_reg);
  45. static
  46. int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
  47. struct iser_data_buf *mem,
  48. struct iser_reg_resources *rsc,
  49. struct iser_mem_reg *mem_reg);
  50. static const struct iser_reg_ops fastreg_ops = {
  51. .alloc_reg_res = iser_alloc_fastreg_pool,
  52. .free_reg_res = iser_free_fastreg_pool,
  53. .reg_mem = iser_fast_reg_mr,
  54. .unreg_mem = iser_unreg_mem_fastreg,
  55. .reg_desc_get = iser_reg_desc_get_fr,
  56. .reg_desc_put = iser_reg_desc_put_fr,
  57. };
  58. static const struct iser_reg_ops fmr_ops = {
  59. .alloc_reg_res = iser_alloc_fmr_pool,
  60. .free_reg_res = iser_free_fmr_pool,
  61. .reg_mem = iser_fast_reg_fmr,
  62. .unreg_mem = iser_unreg_mem_fmr,
  63. .reg_desc_get = iser_reg_desc_get_fmr,
  64. .reg_desc_put = iser_reg_desc_put_fmr,
  65. };
  66. void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
  67. {
  68. iser_err_comp(wc, "memreg");
  69. }
  70. int iser_assign_reg_ops(struct iser_device *device)
  71. {
  72. struct ib_device *ib_dev = device->ib_device;
  73. /* Assign function handles - based on FMR support */
  74. if (ib_dev->alloc_fmr && ib_dev->dealloc_fmr &&
  75. ib_dev->map_phys_fmr && ib_dev->unmap_fmr) {
  76. iser_info("FMR supported, using FMR for registration\n");
  77. device->reg_ops = &fmr_ops;
  78. } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
  79. iser_info("FastReg supported, using FastReg for registration\n");
  80. device->reg_ops = &fastreg_ops;
  81. device->remote_inv_sup = iser_always_reg;
  82. } else {
  83. iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
  84. return -1;
  85. }
  86. return 0;
  87. }
  88. struct iser_fr_desc *
  89. iser_reg_desc_get_fr(struct ib_conn *ib_conn)
  90. {
  91. struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
  92. struct iser_fr_desc *desc;
  93. unsigned long flags;
  94. spin_lock_irqsave(&fr_pool->lock, flags);
  95. desc = list_first_entry(&fr_pool->list,
  96. struct iser_fr_desc, list);
  97. list_del(&desc->list);
  98. spin_unlock_irqrestore(&fr_pool->lock, flags);
  99. return desc;
  100. }
  101. void
  102. iser_reg_desc_put_fr(struct ib_conn *ib_conn,
  103. struct iser_fr_desc *desc)
  104. {
  105. struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
  106. unsigned long flags;
  107. spin_lock_irqsave(&fr_pool->lock, flags);
  108. list_add(&desc->list, &fr_pool->list);
  109. spin_unlock_irqrestore(&fr_pool->lock, flags);
  110. }
  111. struct iser_fr_desc *
  112. iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
  113. {
  114. struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
  115. return list_first_entry(&fr_pool->list,
  116. struct iser_fr_desc, list);
  117. }
  118. void
  119. iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
  120. struct iser_fr_desc *desc)
  121. {
  122. }
  123. static void iser_data_buf_dump(struct iser_data_buf *data,
  124. struct ib_device *ibdev)
  125. {
  126. struct scatterlist *sg;
  127. int i;
  128. for_each_sg(data->sg, sg, data->dma_nents, i)
  129. iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
  130. "off:0x%x sz:0x%x dma_len:0x%x\n",
  131. i, (unsigned long)ib_sg_dma_address(ibdev, sg),
  132. sg_page(sg), sg->offset,
  133. sg->length, ib_sg_dma_len(ibdev, sg));
  134. }
  135. static void iser_dump_page_vec(struct iser_page_vec *page_vec)
  136. {
  137. int i;
  138. iser_err("page vec npages %d data length %d\n",
  139. page_vec->npages, page_vec->fake_mr.length);
  140. for (i = 0; i < page_vec->npages; i++)
  141. iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
  142. }
  143. int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
  144. struct iser_data_buf *data,
  145. enum iser_data_dir iser_dir,
  146. enum dma_data_direction dma_dir)
  147. {
  148. struct ib_device *dev;
  149. iser_task->dir[iser_dir] = 1;
  150. dev = iser_task->iser_conn->ib_conn.device->ib_device;
  151. data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
  152. if (data->dma_nents == 0) {
  153. iser_err("dma_map_sg failed!!!\n");
  154. return -EINVAL;
  155. }
  156. return 0;
  157. }
  158. void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
  159. struct iser_data_buf *data,
  160. enum dma_data_direction dir)
  161. {
  162. struct ib_device *dev;
  163. dev = iser_task->iser_conn->ib_conn.device->ib_device;
  164. ib_dma_unmap_sg(dev, data->sg, data->size, dir);
  165. }
  166. static int
  167. iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
  168. struct iser_mem_reg *reg)
  169. {
  170. struct scatterlist *sg = mem->sg;
  171. reg->sge.lkey = device->pd->local_dma_lkey;
  172. /*
  173. * FIXME: rework the registration code path to differentiate
  174. * rkey/lkey use cases
  175. */
  176. if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
  177. reg->rkey = device->pd->unsafe_global_rkey;
  178. else
  179. reg->rkey = 0;
  180. reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
  181. reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
  182. iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
  183. " length=0x%x\n", reg->sge.lkey, reg->rkey,
  184. reg->sge.addr, reg->sge.length);
  185. return 0;
  186. }
  187. static int iser_set_page(struct ib_mr *mr, u64 addr)
  188. {
  189. struct iser_page_vec *page_vec =
  190. container_of(mr, struct iser_page_vec, fake_mr);
  191. page_vec->pages[page_vec->npages++] = addr;
  192. return 0;
  193. }
  194. static
  195. int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
  196. struct iser_data_buf *mem,
  197. struct iser_reg_resources *rsc,
  198. struct iser_mem_reg *reg)
  199. {
  200. struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
  201. struct iser_device *device = ib_conn->device;
  202. struct iser_page_vec *page_vec = rsc->page_vec;
  203. struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
  204. struct ib_pool_fmr *fmr;
  205. int ret, plen;
  206. page_vec->npages = 0;
  207. page_vec->fake_mr.page_size = SIZE_4K;
  208. plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
  209. mem->size, NULL, iser_set_page);
  210. if (unlikely(plen < mem->size)) {
  211. iser_err("page vec too short to hold this SG\n");
  212. iser_data_buf_dump(mem, device->ib_device);
  213. iser_dump_page_vec(page_vec);
  214. return -EINVAL;
  215. }
  216. fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages,
  217. page_vec->npages, page_vec->pages[0]);
  218. if (IS_ERR(fmr)) {
  219. ret = PTR_ERR(fmr);
  220. iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
  221. return ret;
  222. }
  223. reg->sge.lkey = fmr->fmr->lkey;
  224. reg->rkey = fmr->fmr->rkey;
  225. reg->sge.addr = page_vec->fake_mr.iova;
  226. reg->sge.length = page_vec->fake_mr.length;
  227. reg->mem_h = fmr;
  228. iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
  229. " length=0x%x\n", reg->sge.lkey, reg->rkey,
  230. reg->sge.addr, reg->sge.length);
  231. return 0;
  232. }
  233. /**
  234. * Unregister (previosuly registered using FMR) memory.
  235. * If memory is non-FMR does nothing.
  236. */
  237. void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
  238. enum iser_data_dir cmd_dir)
  239. {
  240. struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
  241. int ret;
  242. if (!reg->mem_h)
  243. return;
  244. iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
  245. ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
  246. if (ret)
  247. iser_err("ib_fmr_pool_unmap failed %d\n", ret);
  248. reg->mem_h = NULL;
  249. }
  250. void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
  251. enum iser_data_dir cmd_dir)
  252. {
  253. struct iser_device *device = iser_task->iser_conn->ib_conn.device;
  254. struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
  255. if (!reg->mem_h)
  256. return;
  257. device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn,
  258. reg->mem_h);
  259. reg->mem_h = NULL;
  260. }
  261. static void
  262. iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
  263. struct ib_sig_domain *domain)
  264. {
  265. domain->sig_type = IB_SIG_TYPE_T10_DIF;
  266. domain->sig.dif.pi_interval = scsi_prot_interval(sc);
  267. domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
  268. /*
  269. * At the moment we hard code those, but in the future
  270. * we will take them from sc.
  271. */
  272. domain->sig.dif.apptag_check_mask = 0xffff;
  273. domain->sig.dif.app_escape = true;
  274. domain->sig.dif.ref_escape = true;
  275. if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
  276. domain->sig.dif.ref_remap = true;
  277. };
  278. static int
  279. iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
  280. {
  281. switch (scsi_get_prot_op(sc)) {
  282. case SCSI_PROT_WRITE_INSERT:
  283. case SCSI_PROT_READ_STRIP:
  284. sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
  285. iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
  286. sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
  287. break;
  288. case SCSI_PROT_READ_INSERT:
  289. case SCSI_PROT_WRITE_STRIP:
  290. sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
  291. iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
  292. sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
  293. IB_T10DIF_CSUM : IB_T10DIF_CRC;
  294. break;
  295. case SCSI_PROT_READ_PASS:
  296. case SCSI_PROT_WRITE_PASS:
  297. iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
  298. sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
  299. iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
  300. sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
  301. IB_T10DIF_CSUM : IB_T10DIF_CRC;
  302. break;
  303. default:
  304. iser_err("Unsupported PI operation %d\n",
  305. scsi_get_prot_op(sc));
  306. return -EINVAL;
  307. }
  308. return 0;
  309. }
  310. static inline void
  311. iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
  312. {
  313. *mask = 0;
  314. if (sc->prot_flags & SCSI_PROT_REF_CHECK)
  315. *mask |= ISER_CHECK_REFTAG;
  316. if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
  317. *mask |= ISER_CHECK_GUARD;
  318. }
  319. static inline void
  320. iser_inv_rkey(struct ib_send_wr *inv_wr,
  321. struct ib_mr *mr,
  322. struct ib_cqe *cqe)
  323. {
  324. inv_wr->opcode = IB_WR_LOCAL_INV;
  325. inv_wr->wr_cqe = cqe;
  326. inv_wr->ex.invalidate_rkey = mr->rkey;
  327. inv_wr->send_flags = 0;
  328. inv_wr->num_sge = 0;
  329. }
  330. static int
  331. iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
  332. struct iser_pi_context *pi_ctx,
  333. struct iser_mem_reg *data_reg,
  334. struct iser_mem_reg *prot_reg,
  335. struct iser_mem_reg *sig_reg)
  336. {
  337. struct iser_tx_desc *tx_desc = &iser_task->desc;
  338. struct ib_sig_attrs *sig_attrs = &tx_desc->sig_attrs;
  339. struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
  340. struct ib_sig_handover_wr *wr;
  341. struct ib_mr *mr = pi_ctx->sig_mr;
  342. int ret;
  343. memset(sig_attrs, 0, sizeof(*sig_attrs));
  344. ret = iser_set_sig_attrs(iser_task->sc, sig_attrs);
  345. if (ret)
  346. goto err;
  347. iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
  348. if (pi_ctx->sig_mr_valid)
  349. iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe);
  350. ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
  351. wr = sig_handover_wr(iser_tx_next_wr(tx_desc));
  352. wr->wr.opcode = IB_WR_REG_SIG_MR;
  353. wr->wr.wr_cqe = cqe;
  354. wr->wr.sg_list = &data_reg->sge;
  355. wr->wr.num_sge = 1;
  356. wr->wr.send_flags = 0;
  357. wr->sig_attrs = sig_attrs;
  358. wr->sig_mr = mr;
  359. if (scsi_prot_sg_count(iser_task->sc))
  360. wr->prot = &prot_reg->sge;
  361. else
  362. wr->prot = NULL;
  363. wr->access_flags = IB_ACCESS_LOCAL_WRITE |
  364. IB_ACCESS_REMOTE_READ |
  365. IB_ACCESS_REMOTE_WRITE;
  366. pi_ctx->sig_mr_valid = 1;
  367. sig_reg->sge.lkey = mr->lkey;
  368. sig_reg->rkey = mr->rkey;
  369. sig_reg->sge.addr = 0;
  370. sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
  371. iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
  372. sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
  373. sig_reg->sge.length);
  374. err:
  375. return ret;
  376. }
  377. static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
  378. struct iser_data_buf *mem,
  379. struct iser_reg_resources *rsc,
  380. struct iser_mem_reg *reg)
  381. {
  382. struct iser_tx_desc *tx_desc = &iser_task->desc;
  383. struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
  384. struct ib_mr *mr = rsc->mr;
  385. struct ib_reg_wr *wr;
  386. int n;
  387. if (rsc->mr_valid)
  388. iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe);
  389. ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
  390. n = ib_map_mr_sg(mr, mem->sg, mem->size, NULL, SIZE_4K);
  391. if (unlikely(n != mem->size)) {
  392. iser_err("failed to map sg (%d/%d)\n",
  393. n, mem->size);
  394. return n < 0 ? n : -EINVAL;
  395. }
  396. wr = reg_wr(iser_tx_next_wr(tx_desc));
  397. wr->wr.opcode = IB_WR_REG_MR;
  398. wr->wr.wr_cqe = cqe;
  399. wr->wr.send_flags = 0;
  400. wr->wr.num_sge = 0;
  401. wr->mr = mr;
  402. wr->key = mr->rkey;
  403. wr->access = IB_ACCESS_LOCAL_WRITE |
  404. IB_ACCESS_REMOTE_WRITE |
  405. IB_ACCESS_REMOTE_READ;
  406. rsc->mr_valid = 1;
  407. reg->sge.lkey = mr->lkey;
  408. reg->rkey = mr->rkey;
  409. reg->sge.addr = mr->iova;
  410. reg->sge.length = mr->length;
  411. iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n",
  412. reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length);
  413. return 0;
  414. }
  415. static int
  416. iser_reg_prot_sg(struct iscsi_iser_task *task,
  417. struct iser_data_buf *mem,
  418. struct iser_fr_desc *desc,
  419. bool use_dma_key,
  420. struct iser_mem_reg *reg)
  421. {
  422. struct iser_device *device = task->iser_conn->ib_conn.device;
  423. if (use_dma_key)
  424. return iser_reg_dma(device, mem, reg);
  425. return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
  426. }
  427. static int
  428. iser_reg_data_sg(struct iscsi_iser_task *task,
  429. struct iser_data_buf *mem,
  430. struct iser_fr_desc *desc,
  431. bool use_dma_key,
  432. struct iser_mem_reg *reg)
  433. {
  434. struct iser_device *device = task->iser_conn->ib_conn.device;
  435. if (use_dma_key)
  436. return iser_reg_dma(device, mem, reg);
  437. return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
  438. }
  439. int iser_reg_rdma_mem(struct iscsi_iser_task *task,
  440. enum iser_data_dir dir,
  441. bool all_imm)
  442. {
  443. struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
  444. struct iser_device *device = ib_conn->device;
  445. struct iser_data_buf *mem = &task->data[dir];
  446. struct iser_mem_reg *reg = &task->rdma_reg[dir];
  447. struct iser_mem_reg *data_reg;
  448. struct iser_fr_desc *desc = NULL;
  449. bool use_dma_key;
  450. int err;
  451. use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) &&
  452. scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
  453. if (!use_dma_key) {
  454. desc = device->reg_ops->reg_desc_get(ib_conn);
  455. reg->mem_h = desc;
  456. }
  457. if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL)
  458. data_reg = reg;
  459. else
  460. data_reg = &task->desc.data_reg;
  461. err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
  462. if (unlikely(err))
  463. goto err_reg;
  464. if (scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
  465. struct iser_mem_reg *prot_reg = &task->desc.prot_reg;
  466. if (scsi_prot_sg_count(task->sc)) {
  467. mem = &task->prot[dir];
  468. err = iser_reg_prot_sg(task, mem, desc,
  469. use_dma_key, prot_reg);
  470. if (unlikely(err))
  471. goto err_reg;
  472. }
  473. err = iser_reg_sig_mr(task, desc->pi_ctx, data_reg,
  474. prot_reg, reg);
  475. if (unlikely(err))
  476. goto err_reg;
  477. desc->pi_ctx->sig_protected = 1;
  478. }
  479. return 0;
  480. err_reg:
  481. if (desc)
  482. device->reg_ops->reg_desc_put(ib_conn, desc);
  483. return err;
  484. }
  485. void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
  486. enum iser_data_dir dir)
  487. {
  488. struct iser_device *device = task->iser_conn->ib_conn.device;
  489. device->reg_ops->unreg_mem(task, dir);
  490. }