srq.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. /*
  2. * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/kernel.h>
  33. #include <linux/module.h>
  34. #include <linux/mlx5/driver.h>
  35. #include <linux/mlx5/cmd.h>
  36. #include <linux/mlx5/srq.h>
  37. #include <rdma/ib_verbs.h>
  38. #include "mlx5_core.h"
  39. #include <linux/mlx5/transobj.h>
  40. void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
  41. {
  42. struct mlx5_srq_table *table = &dev->priv.srq_table;
  43. struct mlx5_core_srq *srq;
  44. spin_lock(&table->lock);
  45. srq = radix_tree_lookup(&table->tree, srqn);
  46. if (srq)
  47. atomic_inc(&srq->refcount);
  48. spin_unlock(&table->lock);
  49. if (!srq) {
  50. mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
  51. return;
  52. }
  53. srq->event(srq, event_type);
  54. if (atomic_dec_and_test(&srq->refcount))
  55. complete(&srq->free);
  56. }
  57. static int get_pas_size(struct mlx5_srq_attr *in)
  58. {
  59. u32 log_page_size = in->log_page_size + 12;
  60. u32 log_srq_size = in->log_size;
  61. u32 log_rq_stride = in->wqe_shift;
  62. u32 page_offset = in->page_offset;
  63. u32 po_quanta = 1 << (log_page_size - 6);
  64. u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
  65. u32 page_size = 1 << log_page_size;
  66. u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
  67. u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
  68. return rq_num_pas * sizeof(u64);
  69. }
  70. static void set_wq(void *wq, struct mlx5_srq_attr *in)
  71. {
  72. MLX5_SET(wq, wq, wq_signature, !!(in->flags
  73. & MLX5_SRQ_FLAG_WQ_SIG));
  74. MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
  75. MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
  76. MLX5_SET(wq, wq, log_wq_sz, in->log_size);
  77. MLX5_SET(wq, wq, page_offset, in->page_offset);
  78. MLX5_SET(wq, wq, lwm, in->lwm);
  79. MLX5_SET(wq, wq, pd, in->pd);
  80. MLX5_SET64(wq, wq, dbr_addr, in->db_record);
  81. }
  82. static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
  83. {
  84. MLX5_SET(srqc, srqc, wq_signature, !!(in->flags
  85. & MLX5_SRQ_FLAG_WQ_SIG));
  86. MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
  87. MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
  88. MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
  89. MLX5_SET(srqc, srqc, page_offset, in->page_offset);
  90. MLX5_SET(srqc, srqc, lwm, in->lwm);
  91. MLX5_SET(srqc, srqc, pd, in->pd);
  92. MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
  93. MLX5_SET(srqc, srqc, xrcd, in->xrcd);
  94. MLX5_SET(srqc, srqc, cqn, in->cqn);
  95. }
  96. static void get_wq(void *wq, struct mlx5_srq_attr *in)
  97. {
  98. if (MLX5_GET(wq, wq, wq_signature))
  99. in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
  100. in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
  101. in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
  102. in->log_size = MLX5_GET(wq, wq, log_wq_sz);
  103. in->page_offset = MLX5_GET(wq, wq, page_offset);
  104. in->lwm = MLX5_GET(wq, wq, lwm);
  105. in->pd = MLX5_GET(wq, wq, pd);
  106. in->db_record = MLX5_GET64(wq, wq, dbr_addr);
  107. }
  108. static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
  109. {
  110. if (MLX5_GET(srqc, srqc, wq_signature))
  111. in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
  112. in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
  113. in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
  114. in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
  115. in->page_offset = MLX5_GET(srqc, srqc, page_offset);
  116. in->lwm = MLX5_GET(srqc, srqc, lwm);
  117. in->pd = MLX5_GET(srqc, srqc, pd);
  118. in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
  119. }
  120. struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
  121. {
  122. struct mlx5_srq_table *table = &dev->priv.srq_table;
  123. struct mlx5_core_srq *srq;
  124. spin_lock(&table->lock);
  125. srq = radix_tree_lookup(&table->tree, srqn);
  126. if (srq)
  127. atomic_inc(&srq->refcount);
  128. spin_unlock(&table->lock);
  129. return srq;
  130. }
  131. EXPORT_SYMBOL(mlx5_core_get_srq);
  132. static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
  133. struct mlx5_srq_attr *in)
  134. {
  135. u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
  136. void *create_in;
  137. void *srqc;
  138. void *pas;
  139. int pas_size;
  140. int inlen;
  141. int err;
  142. pas_size = get_pas_size(in);
  143. inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
  144. create_in = mlx5_vzalloc(inlen);
  145. if (!create_in)
  146. return -ENOMEM;
  147. srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
  148. pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
  149. set_srqc(srqc, in);
  150. memcpy(pas, in->pas, pas_size);
  151. MLX5_SET(create_srq_in, create_in, opcode,
  152. MLX5_CMD_OP_CREATE_SRQ);
  153. err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
  154. sizeof(create_out));
  155. kvfree(create_in);
  156. if (!err)
  157. srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
  158. return err;
  159. }
  160. static int destroy_srq_cmd(struct mlx5_core_dev *dev,
  161. struct mlx5_core_srq *srq)
  162. {
  163. u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
  164. u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
  165. MLX5_SET(destroy_srq_in, srq_in, opcode,
  166. MLX5_CMD_OP_DESTROY_SRQ);
  167. MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
  168. return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
  169. srq_out, sizeof(srq_out));
  170. }
  171. static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
  172. u16 lwm, int is_srq)
  173. {
  174. /* arm_srq structs missing using identical xrc ones */
  175. u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
  176. u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
  177. MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
  178. MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
  179. MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
  180. return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
  181. srq_out, sizeof(srq_out));
  182. }
  183. static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
  184. struct mlx5_srq_attr *out)
  185. {
  186. u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
  187. u32 *srq_out;
  188. void *srqc;
  189. int err;
  190. srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
  191. if (!srq_out)
  192. return -ENOMEM;
  193. MLX5_SET(query_srq_in, srq_in, opcode,
  194. MLX5_CMD_OP_QUERY_SRQ);
  195. MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
  196. err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
  197. srq_out, MLX5_ST_SZ_BYTES(query_srq_out));
  198. if (err)
  199. goto out;
  200. srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
  201. get_srqc(srqc, out);
  202. if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
  203. out->flags |= MLX5_SRQ_FLAG_ERR;
  204. out:
  205. kvfree(srq_out);
  206. return err;
  207. }
  208. static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
  209. struct mlx5_core_srq *srq,
  210. struct mlx5_srq_attr *in)
  211. {
  212. u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
  213. void *create_in;
  214. void *xrc_srqc;
  215. void *pas;
  216. int pas_size;
  217. int inlen;
  218. int err;
  219. pas_size = get_pas_size(in);
  220. inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
  221. create_in = mlx5_vzalloc(inlen);
  222. if (!create_in)
  223. return -ENOMEM;
  224. xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
  225. xrc_srq_context_entry);
  226. pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
  227. set_srqc(xrc_srqc, in);
  228. MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
  229. memcpy(pas, in->pas, pas_size);
  230. MLX5_SET(create_xrc_srq_in, create_in, opcode,
  231. MLX5_CMD_OP_CREATE_XRC_SRQ);
  232. memset(create_out, 0, sizeof(create_out));
  233. err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
  234. sizeof(create_out));
  235. if (err)
  236. goto out;
  237. srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
  238. out:
  239. kvfree(create_in);
  240. return err;
  241. }
  242. static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
  243. struct mlx5_core_srq *srq)
  244. {
  245. u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
  246. u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
  247. MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
  248. MLX5_CMD_OP_DESTROY_XRC_SRQ);
  249. MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
  250. return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
  251. xrcsrq_out, sizeof(xrcsrq_out));
  252. }
  253. static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
  254. struct mlx5_core_srq *srq, u16 lwm)
  255. {
  256. u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
  257. u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
  258. MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
  259. MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
  260. MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
  261. MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
  262. return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
  263. xrcsrq_out, sizeof(xrcsrq_out));
  264. }
  265. static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
  266. struct mlx5_core_srq *srq,
  267. struct mlx5_srq_attr *out)
  268. {
  269. u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
  270. u32 *xrcsrq_out;
  271. void *xrc_srqc;
  272. int err;
  273. xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
  274. if (!xrcsrq_out)
  275. return -ENOMEM;
  276. memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
  277. MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
  278. MLX5_CMD_OP_QUERY_XRC_SRQ);
  279. MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
  280. err = mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out,
  281. MLX5_ST_SZ_BYTES(query_xrc_srq_out));
  282. if (err)
  283. goto out;
  284. xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
  285. xrc_srq_context_entry);
  286. get_srqc(xrc_srqc, out);
  287. if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
  288. out->flags |= MLX5_SRQ_FLAG_ERR;
  289. out:
  290. kvfree(xrcsrq_out);
  291. return err;
  292. }
  293. static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
  294. struct mlx5_srq_attr *in)
  295. {
  296. void *create_in;
  297. void *rmpc;
  298. void *wq;
  299. int pas_size;
  300. int inlen;
  301. int err;
  302. pas_size = get_pas_size(in);
  303. inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
  304. create_in = mlx5_vzalloc(inlen);
  305. if (!create_in)
  306. return -ENOMEM;
  307. rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
  308. wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
  309. MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
  310. set_wq(wq, in);
  311. memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
  312. err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
  313. kvfree(create_in);
  314. return err;
  315. }
  316. static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
  317. struct mlx5_core_srq *srq)
  318. {
  319. return mlx5_core_destroy_rmp(dev, srq->srqn);
  320. }
  321. static int arm_rmp_cmd(struct mlx5_core_dev *dev,
  322. struct mlx5_core_srq *srq,
  323. u16 lwm)
  324. {
  325. void *in;
  326. void *rmpc;
  327. void *wq;
  328. void *bitmask;
  329. int err;
  330. in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
  331. if (!in)
  332. return -ENOMEM;
  333. rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
  334. bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
  335. wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
  336. MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
  337. MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
  338. MLX5_SET(wq, wq, lwm, lwm);
  339. MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
  340. MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
  341. err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
  342. kvfree(in);
  343. return err;
  344. }
  345. static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
  346. struct mlx5_srq_attr *out)
  347. {
  348. u32 *rmp_out;
  349. void *rmpc;
  350. int err;
  351. rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
  352. if (!rmp_out)
  353. return -ENOMEM;
  354. err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
  355. if (err)
  356. goto out;
  357. rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
  358. get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
  359. if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
  360. out->flags |= MLX5_SRQ_FLAG_ERR;
  361. out:
  362. kvfree(rmp_out);
  363. return err;
  364. }
  365. static int create_srq_split(struct mlx5_core_dev *dev,
  366. struct mlx5_core_srq *srq,
  367. struct mlx5_srq_attr *in)
  368. {
  369. if (!dev->issi)
  370. return create_srq_cmd(dev, srq, in);
  371. else if (srq->common.res == MLX5_RES_XSRQ)
  372. return create_xrc_srq_cmd(dev, srq, in);
  373. else
  374. return create_rmp_cmd(dev, srq, in);
  375. }
  376. static int destroy_srq_split(struct mlx5_core_dev *dev,
  377. struct mlx5_core_srq *srq)
  378. {
  379. if (!dev->issi)
  380. return destroy_srq_cmd(dev, srq);
  381. else if (srq->common.res == MLX5_RES_XSRQ)
  382. return destroy_xrc_srq_cmd(dev, srq);
  383. else
  384. return destroy_rmp_cmd(dev, srq);
  385. }
  386. int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
  387. struct mlx5_srq_attr *in)
  388. {
  389. int err;
  390. struct mlx5_srq_table *table = &dev->priv.srq_table;
  391. if (in->type == IB_SRQT_XRC)
  392. srq->common.res = MLX5_RES_XSRQ;
  393. else
  394. srq->common.res = MLX5_RES_SRQ;
  395. err = create_srq_split(dev, srq, in);
  396. if (err)
  397. return err;
  398. atomic_set(&srq->refcount, 1);
  399. init_completion(&srq->free);
  400. spin_lock_irq(&table->lock);
  401. err = radix_tree_insert(&table->tree, srq->srqn, srq);
  402. spin_unlock_irq(&table->lock);
  403. if (err) {
  404. mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
  405. goto err_destroy_srq_split;
  406. }
  407. return 0;
  408. err_destroy_srq_split:
  409. destroy_srq_split(dev, srq);
  410. return err;
  411. }
  412. EXPORT_SYMBOL(mlx5_core_create_srq);
  413. int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
  414. {
  415. struct mlx5_srq_table *table = &dev->priv.srq_table;
  416. struct mlx5_core_srq *tmp;
  417. int err;
  418. spin_lock_irq(&table->lock);
  419. tmp = radix_tree_delete(&table->tree, srq->srqn);
  420. spin_unlock_irq(&table->lock);
  421. if (!tmp) {
  422. mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
  423. return -EINVAL;
  424. }
  425. if (tmp != srq) {
  426. mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
  427. return -EINVAL;
  428. }
  429. err = destroy_srq_split(dev, srq);
  430. if (err)
  431. return err;
  432. if (atomic_dec_and_test(&srq->refcount))
  433. complete(&srq->free);
  434. wait_for_completion(&srq->free);
  435. return 0;
  436. }
  437. EXPORT_SYMBOL(mlx5_core_destroy_srq);
  438. int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
  439. struct mlx5_srq_attr *out)
  440. {
  441. if (!dev->issi)
  442. return query_srq_cmd(dev, srq, out);
  443. else if (srq->common.res == MLX5_RES_XSRQ)
  444. return query_xrc_srq_cmd(dev, srq, out);
  445. else
  446. return query_rmp_cmd(dev, srq, out);
  447. }
  448. EXPORT_SYMBOL(mlx5_core_query_srq);
  449. int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
  450. u16 lwm, int is_srq)
  451. {
  452. if (!dev->issi)
  453. return arm_srq_cmd(dev, srq, lwm, is_srq);
  454. else if (srq->common.res == MLX5_RES_XSRQ)
  455. return arm_xrc_srq_cmd(dev, srq, lwm);
  456. else
  457. return arm_rmp_cmd(dev, srq, lwm);
  458. }
  459. EXPORT_SYMBOL(mlx5_core_arm_srq);
  460. void mlx5_init_srq_table(struct mlx5_core_dev *dev)
  461. {
  462. struct mlx5_srq_table *table = &dev->priv.srq_table;
  463. memset(table, 0, sizeof(*table));
  464. spin_lock_init(&table->lock);
  465. INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
  466. }
  467. void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
  468. {
  469. /* nothing */
  470. }