mem.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/moduleparam.h>
  34. #include <rdma/ib_umem.h>
  35. #include <linux/atomic.h>
  36. #include <rdma/ib_user_verbs.h>
  37. #include "iw_cxgb4.h"
  38. int use_dsgl = 1;
  39. module_param(use_dsgl, int, 0644);
  40. MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1) (DEPRECATED)");
  41. #define T4_ULPTX_MIN_IO 32
  42. #define C4IW_MAX_INLINE_SIZE 96
  43. #define T4_ULPTX_MAX_DMA 1024
  44. #define C4IW_INLINE_THRESHOLD 128
  45. static int inline_threshold = C4IW_INLINE_THRESHOLD;
  46. module_param(inline_threshold, int, 0644);
  47. MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
  48. static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
  49. {
  50. return (is_t4(dev->rdev.lldi.adapter_type) ||
  51. is_t5(dev->rdev.lldi.adapter_type)) &&
  52. length >= 8*1024*1024*1024ULL;
  53. }
  54. static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
  55. u32 len, dma_addr_t data,
  56. struct sk_buff *skb,
  57. struct c4iw_wr_wait *wr_waitp)
  58. {
  59. struct ulp_mem_io *req;
  60. struct ulptx_sgl *sgl;
  61. u8 wr_len;
  62. int ret = 0;
  63. addr &= 0x7FFFFFF;
  64. if (wr_waitp)
  65. c4iw_init_wr_wait(wr_waitp);
  66. wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
  67. if (!skb) {
  68. skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
  69. if (!skb)
  70. return -ENOMEM;
  71. }
  72. set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  73. req = __skb_put_zero(skb, wr_len);
  74. INIT_ULPTX_WR(req, wr_len, 0, 0);
  75. req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
  76. (wr_waitp ? FW_WR_COMPL_F : 0));
  77. req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L;
  78. req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
  79. req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
  80. T5_ULP_MEMIO_ORDER_V(1) |
  81. T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0]));
  82. req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
  83. req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
  84. req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
  85. sgl = (struct ulptx_sgl *)(req + 1);
  86. sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
  87. ULPTX_NSGE_V(1));
  88. sgl->len0 = cpu_to_be32(len);
  89. sgl->addr0 = cpu_to_be64(data);
  90. if (wr_waitp)
  91. ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
  92. else
  93. ret = c4iw_ofld_send(rdev, skb);
  94. return ret;
  95. }
  96. static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
  97. void *data, struct sk_buff *skb,
  98. struct c4iw_wr_wait *wr_waitp)
  99. {
  100. struct ulp_mem_io *req;
  101. struct ulptx_idata *sc;
  102. u8 wr_len, *to_dp, *from_dp;
  103. int copy_len, num_wqe, i, ret = 0;
  104. __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
  105. if (is_t4(rdev->lldi.adapter_type))
  106. cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F);
  107. else
  108. cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
  109. addr &= 0x7FFFFFF;
  110. pr_debug("addr 0x%x len %u\n", addr, len);
  111. num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
  112. c4iw_init_wr_wait(wr_waitp);
  113. for (i = 0; i < num_wqe; i++) {
  114. copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
  115. len;
  116. wr_len = roundup(sizeof *req + sizeof *sc +
  117. roundup(copy_len, T4_ULPTX_MIN_IO), 16);
  118. if (!skb) {
  119. skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
  120. if (!skb)
  121. return -ENOMEM;
  122. }
  123. set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
  124. req = __skb_put_zero(skb, wr_len);
  125. INIT_ULPTX_WR(req, wr_len, 0, 0);
  126. if (i == (num_wqe-1)) {
  127. req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
  128. FW_WR_COMPL_F);
  129. req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp;
  130. } else
  131. req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
  132. req->wr.wr_mid = cpu_to_be32(
  133. FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
  134. req->cmd = cmd;
  135. req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
  136. DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
  137. req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
  138. 16));
  139. req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
  140. sc = (struct ulptx_idata *)(req + 1);
  141. sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
  142. sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
  143. to_dp = (u8 *)(sc + 1);
  144. from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
  145. if (data)
  146. memcpy(to_dp, from_dp, copy_len);
  147. else
  148. memset(to_dp, 0, copy_len);
  149. if (copy_len % T4_ULPTX_MIN_IO)
  150. memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
  151. (copy_len % T4_ULPTX_MIN_IO));
  152. if (i == (num_wqe-1))
  153. ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0,
  154. __func__);
  155. else
  156. ret = c4iw_ofld_send(rdev, skb);
  157. if (ret)
  158. break;
  159. skb = NULL;
  160. len -= C4IW_MAX_INLINE_SIZE;
  161. }
  162. return ret;
  163. }
  164. static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
  165. void *data, struct sk_buff *skb,
  166. struct c4iw_wr_wait *wr_waitp)
  167. {
  168. u32 remain = len;
  169. u32 dmalen;
  170. int ret = 0;
  171. dma_addr_t daddr;
  172. dma_addr_t save;
  173. daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
  174. if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
  175. return -1;
  176. save = daddr;
  177. while (remain > inline_threshold) {
  178. if (remain < T4_ULPTX_MAX_DMA) {
  179. if (remain & ~T4_ULPTX_MIN_IO)
  180. dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
  181. else
  182. dmalen = remain;
  183. } else
  184. dmalen = T4_ULPTX_MAX_DMA;
  185. remain -= dmalen;
  186. ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
  187. skb, remain ? NULL : wr_waitp);
  188. if (ret)
  189. goto out;
  190. addr += dmalen >> 5;
  191. data += dmalen;
  192. daddr += dmalen;
  193. }
  194. if (remain)
  195. ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb,
  196. wr_waitp);
  197. out:
  198. dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
  199. return ret;
  200. }
  201. /*
  202. * write len bytes of data into addr (32B aligned address)
  203. * If data is NULL, clear len byte of memory to zero.
  204. */
  205. static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
  206. void *data, struct sk_buff *skb,
  207. struct c4iw_wr_wait *wr_waitp)
  208. {
  209. int ret;
  210. if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) {
  211. ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
  212. wr_waitp);
  213. goto out;
  214. }
  215. if (len <= inline_threshold) {
  216. ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
  217. wr_waitp);
  218. goto out;
  219. }
  220. ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp);
  221. if (ret) {
  222. pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
  223. pci_name(rdev->lldi.pdev));
  224. ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
  225. wr_waitp);
  226. }
  227. out:
  228. return ret;
  229. }
  230. /*
  231. * Build and write a TPT entry.
  232. * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
  233. * pbl_size and pbl_addr
  234. * OUT: stag index
  235. */
  236. static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
  237. u32 *stag, u8 stag_state, u32 pdid,
  238. enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
  239. int bind_enabled, u32 zbva, u64 to,
  240. u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
  241. struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
  242. {
  243. int err;
  244. struct fw_ri_tpte *tpt;
  245. u32 stag_idx;
  246. static atomic_t key;
  247. if (c4iw_fatal_error(rdev))
  248. return -EIO;
  249. tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
  250. if (!tpt)
  251. return -ENOMEM;
  252. stag_state = stag_state > 0;
  253. stag_idx = (*stag) >> 8;
  254. if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
  255. stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
  256. if (!stag_idx) {
  257. mutex_lock(&rdev->stats.lock);
  258. rdev->stats.stag.fail++;
  259. mutex_unlock(&rdev->stats.lock);
  260. kfree(tpt);
  261. return -ENOMEM;
  262. }
  263. mutex_lock(&rdev->stats.lock);
  264. rdev->stats.stag.cur += 32;
  265. if (rdev->stats.stag.cur > rdev->stats.stag.max)
  266. rdev->stats.stag.max = rdev->stats.stag.cur;
  267. mutex_unlock(&rdev->stats.lock);
  268. *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
  269. }
  270. pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
  271. stag_state, type, pdid, stag_idx);
  272. /* write TPT entry */
  273. if (reset_tpt_entry)
  274. memset(tpt, 0, sizeof(*tpt));
  275. else {
  276. tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
  277. FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
  278. FW_RI_TPTE_STAGSTATE_V(stag_state) |
  279. FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
  280. tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
  281. (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
  282. FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
  283. FW_RI_VA_BASED_TO))|
  284. FW_RI_TPTE_PS_V(page_size));
  285. tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
  286. FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
  287. tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
  288. tpt->va_hi = cpu_to_be32((u32)(to >> 32));
  289. tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
  290. tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
  291. tpt->len_hi = cpu_to_be32((u32)(len >> 32));
  292. }
  293. err = write_adapter_mem(rdev, stag_idx +
  294. (rdev->lldi.vr->stag.start >> 5),
  295. sizeof(*tpt), tpt, skb, wr_waitp);
  296. if (reset_tpt_entry) {
  297. c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
  298. mutex_lock(&rdev->stats.lock);
  299. rdev->stats.stag.cur -= 32;
  300. mutex_unlock(&rdev->stats.lock);
  301. }
  302. kfree(tpt);
  303. return err;
  304. }
  305. static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
  306. u32 pbl_addr, u32 pbl_size, struct c4iw_wr_wait *wr_waitp)
  307. {
  308. int err;
  309. pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
  310. pbl_addr, rdev->lldi.vr->pbl.start,
  311. pbl_size);
  312. err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL,
  313. wr_waitp);
  314. return err;
  315. }
  316. static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
  317. u32 pbl_addr, struct sk_buff *skb,
  318. struct c4iw_wr_wait *wr_waitp)
  319. {
  320. return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
  321. pbl_size, pbl_addr, skb, wr_waitp);
  322. }
  323. static int allocate_window(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
  324. struct c4iw_wr_wait *wr_waitp)
  325. {
  326. *stag = T4_STAG_UNSET;
  327. return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
  328. 0UL, 0, 0, 0, 0, NULL, wr_waitp);
  329. }
  330. static int deallocate_window(struct c4iw_rdev *rdev, u32 stag,
  331. struct sk_buff *skb,
  332. struct c4iw_wr_wait *wr_waitp)
  333. {
  334. return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
  335. 0, skb, wr_waitp);
  336. }
  337. static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
  338. u32 pbl_size, u32 pbl_addr,
  339. struct c4iw_wr_wait *wr_waitp)
  340. {
  341. *stag = T4_STAG_UNSET;
  342. return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
  343. 0UL, 0, 0, pbl_size, pbl_addr, NULL, wr_waitp);
  344. }
  345. static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
  346. {
  347. u32 mmid;
  348. mhp->attr.state = 1;
  349. mhp->attr.stag = stag;
  350. mmid = stag >> 8;
  351. mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
  352. mhp->ibmr.length = mhp->attr.len;
  353. mhp->ibmr.iova = mhp->attr.va_fbo;
  354. mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
  355. pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
  356. return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
  357. }
  358. static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
  359. struct c4iw_mr *mhp, int shift)
  360. {
  361. u32 stag = T4_STAG_UNSET;
  362. int ret;
  363. ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
  364. FW_RI_STAG_NSMR, mhp->attr.len ?
  365. mhp->attr.perms : 0,
  366. mhp->attr.mw_bind_enable, mhp->attr.zbva,
  367. mhp->attr.va_fbo, mhp->attr.len ?
  368. mhp->attr.len : -1, shift - 12,
  369. mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL,
  370. mhp->wr_waitp);
  371. if (ret)
  372. return ret;
  373. ret = finish_mem_reg(mhp, stag);
  374. if (ret) {
  375. dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
  376. mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
  377. mhp->dereg_skb = NULL;
  378. }
  379. return ret;
  380. }
  381. static int alloc_pbl(struct c4iw_mr *mhp, int npages)
  382. {
  383. mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
  384. npages << 3);
  385. if (!mhp->attr.pbl_addr)
  386. return -ENOMEM;
  387. mhp->attr.pbl_size = npages;
  388. return 0;
  389. }
  390. struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
  391. {
  392. struct c4iw_dev *rhp;
  393. struct c4iw_pd *php;
  394. struct c4iw_mr *mhp;
  395. int ret;
  396. u32 stag = T4_STAG_UNSET;
  397. pr_debug("ib_pd %p\n", pd);
  398. php = to_c4iw_pd(pd);
  399. rhp = php->rhp;
  400. mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
  401. if (!mhp)
  402. return ERR_PTR(-ENOMEM);
  403. mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
  404. if (!mhp->wr_waitp) {
  405. ret = -ENOMEM;
  406. goto err_free_mhp;
  407. }
  408. c4iw_init_wr_wait(mhp->wr_waitp);
  409. mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
  410. if (!mhp->dereg_skb) {
  411. ret = -ENOMEM;
  412. goto err_free_wr_wait;
  413. }
  414. mhp->rhp = rhp;
  415. mhp->attr.pdid = php->pdid;
  416. mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
  417. mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
  418. mhp->attr.zbva = 0;
  419. mhp->attr.va_fbo = 0;
  420. mhp->attr.page_size = 0;
  421. mhp->attr.len = ~0ULL;
  422. mhp->attr.pbl_size = 0;
  423. ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
  424. FW_RI_STAG_NSMR, mhp->attr.perms,
  425. mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
  426. NULL, mhp->wr_waitp);
  427. if (ret)
  428. goto err_free_skb;
  429. ret = finish_mem_reg(mhp, stag);
  430. if (ret)
  431. goto err_dereg_mem;
  432. return &mhp->ibmr;
  433. err_dereg_mem:
  434. dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
  435. mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
  436. err_free_skb:
  437. kfree_skb(mhp->dereg_skb);
  438. err_free_wr_wait:
  439. c4iw_put_wr_wait(mhp->wr_waitp);
  440. err_free_mhp:
  441. kfree(mhp);
  442. return ERR_PTR(ret);
  443. }
  444. struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  445. u64 virt, int acc, struct ib_udata *udata)
  446. {
  447. __be64 *pages;
  448. int shift, n, len;
  449. int i, k, entry;
  450. int err = -ENOMEM;
  451. struct scatterlist *sg;
  452. struct c4iw_dev *rhp;
  453. struct c4iw_pd *php;
  454. struct c4iw_mr *mhp;
  455. pr_debug("ib_pd %p\n", pd);
  456. if (length == ~0ULL)
  457. return ERR_PTR(-EINVAL);
  458. if ((length + start) < start)
  459. return ERR_PTR(-EINVAL);
  460. php = to_c4iw_pd(pd);
  461. rhp = php->rhp;
  462. if (mr_exceeds_hw_limits(rhp, length))
  463. return ERR_PTR(-EINVAL);
  464. mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
  465. if (!mhp)
  466. return ERR_PTR(-ENOMEM);
  467. mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
  468. if (!mhp->wr_waitp)
  469. goto err_free_mhp;
  470. mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
  471. if (!mhp->dereg_skb)
  472. goto err_free_wr_wait;
  473. mhp->rhp = rhp;
  474. mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
  475. if (IS_ERR(mhp->umem))
  476. goto err_free_skb;
  477. shift = mhp->umem->page_shift;
  478. n = mhp->umem->nmap;
  479. err = alloc_pbl(mhp, n);
  480. if (err)
  481. goto err_umem_release;
  482. pages = (__be64 *) __get_free_page(GFP_KERNEL);
  483. if (!pages) {
  484. err = -ENOMEM;
  485. goto err_pbl_free;
  486. }
  487. i = n = 0;
  488. for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
  489. len = sg_dma_len(sg) >> shift;
  490. for (k = 0; k < len; ++k) {
  491. pages[i++] = cpu_to_be64(sg_dma_address(sg) +
  492. (k << shift));
  493. if (i == PAGE_SIZE / sizeof *pages) {
  494. err = write_pbl(&mhp->rhp->rdev,
  495. pages,
  496. mhp->attr.pbl_addr + (n << 3), i,
  497. mhp->wr_waitp);
  498. if (err)
  499. goto pbl_done;
  500. n += i;
  501. i = 0;
  502. }
  503. }
  504. }
  505. if (i)
  506. err = write_pbl(&mhp->rhp->rdev, pages,
  507. mhp->attr.pbl_addr + (n << 3), i,
  508. mhp->wr_waitp);
  509. pbl_done:
  510. free_page((unsigned long) pages);
  511. if (err)
  512. goto err_pbl_free;
  513. mhp->attr.pdid = php->pdid;
  514. mhp->attr.zbva = 0;
  515. mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
  516. mhp->attr.va_fbo = virt;
  517. mhp->attr.page_size = shift - 12;
  518. mhp->attr.len = length;
  519. err = register_mem(rhp, php, mhp, shift);
  520. if (err)
  521. goto err_pbl_free;
  522. return &mhp->ibmr;
  523. err_pbl_free:
  524. c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
  525. mhp->attr.pbl_size << 3);
  526. err_umem_release:
  527. ib_umem_release(mhp->umem);
  528. err_free_skb:
  529. kfree_skb(mhp->dereg_skb);
  530. err_free_wr_wait:
  531. c4iw_put_wr_wait(mhp->wr_waitp);
  532. err_free_mhp:
  533. kfree(mhp);
  534. return ERR_PTR(err);
  535. }
  536. struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
  537. struct ib_udata *udata)
  538. {
  539. struct c4iw_dev *rhp;
  540. struct c4iw_pd *php;
  541. struct c4iw_mw *mhp;
  542. u32 mmid;
  543. u32 stag = 0;
  544. int ret;
  545. if (type != IB_MW_TYPE_1)
  546. return ERR_PTR(-EINVAL);
  547. php = to_c4iw_pd(pd);
  548. rhp = php->rhp;
  549. mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
  550. if (!mhp)
  551. return ERR_PTR(-ENOMEM);
  552. mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
  553. if (!mhp->wr_waitp) {
  554. ret = -ENOMEM;
  555. goto free_mhp;
  556. }
  557. mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
  558. if (!mhp->dereg_skb) {
  559. ret = -ENOMEM;
  560. goto free_wr_wait;
  561. }
  562. ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
  563. if (ret)
  564. goto free_skb;
  565. mhp->rhp = rhp;
  566. mhp->attr.pdid = php->pdid;
  567. mhp->attr.type = FW_RI_STAG_MW;
  568. mhp->attr.stag = stag;
  569. mmid = (stag) >> 8;
  570. mhp->ibmw.rkey = stag;
  571. if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
  572. ret = -ENOMEM;
  573. goto dealloc_win;
  574. }
  575. pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
  576. return &(mhp->ibmw);
  577. dealloc_win:
  578. deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
  579. mhp->wr_waitp);
  580. free_skb:
  581. kfree_skb(mhp->dereg_skb);
  582. free_wr_wait:
  583. c4iw_put_wr_wait(mhp->wr_waitp);
  584. free_mhp:
  585. kfree(mhp);
  586. return ERR_PTR(ret);
  587. }
  588. int c4iw_dealloc_mw(struct ib_mw *mw)
  589. {
  590. struct c4iw_dev *rhp;
  591. struct c4iw_mw *mhp;
  592. u32 mmid;
  593. mhp = to_c4iw_mw(mw);
  594. rhp = mhp->rhp;
  595. mmid = (mw->rkey) >> 8;
  596. remove_handle(rhp, &rhp->mmidr, mmid);
  597. deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
  598. mhp->wr_waitp);
  599. kfree_skb(mhp->dereg_skb);
  600. c4iw_put_wr_wait(mhp->wr_waitp);
  601. kfree(mhp);
  602. pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
  603. return 0;
  604. }
  605. struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
  606. enum ib_mr_type mr_type,
  607. u32 max_num_sg)
  608. {
  609. struct c4iw_dev *rhp;
  610. struct c4iw_pd *php;
  611. struct c4iw_mr *mhp;
  612. u32 mmid;
  613. u32 stag = 0;
  614. int ret = 0;
  615. int length = roundup(max_num_sg * sizeof(u64), 32);
  616. php = to_c4iw_pd(pd);
  617. rhp = php->rhp;
  618. if (mr_type != IB_MR_TYPE_MEM_REG ||
  619. max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
  620. use_dsgl))
  621. return ERR_PTR(-EINVAL);
  622. mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
  623. if (!mhp) {
  624. ret = -ENOMEM;
  625. goto err;
  626. }
  627. mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
  628. if (!mhp->wr_waitp) {
  629. ret = -ENOMEM;
  630. goto err_free_mhp;
  631. }
  632. c4iw_init_wr_wait(mhp->wr_waitp);
  633. mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
  634. length, &mhp->mpl_addr, GFP_KERNEL);
  635. if (!mhp->mpl) {
  636. ret = -ENOMEM;
  637. goto err_free_wr_wait;
  638. }
  639. mhp->max_mpl_len = length;
  640. mhp->rhp = rhp;
  641. ret = alloc_pbl(mhp, max_num_sg);
  642. if (ret)
  643. goto err_free_dma;
  644. mhp->attr.pbl_size = max_num_sg;
  645. ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
  646. mhp->attr.pbl_size, mhp->attr.pbl_addr,
  647. mhp->wr_waitp);
  648. if (ret)
  649. goto err_free_pbl;
  650. mhp->attr.pdid = php->pdid;
  651. mhp->attr.type = FW_RI_STAG_NSMR;
  652. mhp->attr.stag = stag;
  653. mhp->attr.state = 0;
  654. mmid = (stag) >> 8;
  655. mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
  656. if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
  657. ret = -ENOMEM;
  658. goto err_dereg;
  659. }
  660. pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
  661. return &(mhp->ibmr);
  662. err_dereg:
  663. dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
  664. mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
  665. err_free_pbl:
  666. c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
  667. mhp->attr.pbl_size << 3);
  668. err_free_dma:
  669. dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
  670. mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
  671. err_free_wr_wait:
  672. c4iw_put_wr_wait(mhp->wr_waitp);
  673. err_free_mhp:
  674. kfree(mhp);
  675. err:
  676. return ERR_PTR(ret);
  677. }
  678. static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
  679. {
  680. struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
  681. if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
  682. return -ENOMEM;
  683. mhp->mpl[mhp->mpl_len++] = addr;
  684. return 0;
  685. }
  686. int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
  687. unsigned int *sg_offset)
  688. {
  689. struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
  690. mhp->mpl_len = 0;
  691. return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
  692. }
  693. int c4iw_dereg_mr(struct ib_mr *ib_mr)
  694. {
  695. struct c4iw_dev *rhp;
  696. struct c4iw_mr *mhp;
  697. u32 mmid;
  698. pr_debug("ib_mr %p\n", ib_mr);
  699. mhp = to_c4iw_mr(ib_mr);
  700. rhp = mhp->rhp;
  701. mmid = mhp->attr.stag >> 8;
  702. remove_handle(rhp, &rhp->mmidr, mmid);
  703. if (mhp->mpl)
  704. dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
  705. mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
  706. dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
  707. mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
  708. if (mhp->attr.pbl_size)
  709. c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
  710. mhp->attr.pbl_size << 3);
  711. if (mhp->kva)
  712. kfree((void *) (unsigned long) mhp->kva);
  713. if (mhp->umem)
  714. ib_umem_release(mhp->umem);
  715. pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
  716. c4iw_put_wr_wait(mhp->wr_waitp);
  717. kfree(mhp);
  718. return 0;
  719. }
  720. void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
  721. {
  722. struct c4iw_mr *mhp;
  723. unsigned long flags;
  724. spin_lock_irqsave(&rhp->lock, flags);
  725. mhp = get_mhp(rhp, rkey >> 8);
  726. if (mhp)
  727. mhp->attr.state = 0;
  728. spin_unlock_irqrestore(&rhp->lock, flags);
  729. }