nldev.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122
  1. /*
  2. * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions are met:
  6. *
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. * 3. Neither the names of the copyright holders nor the names of its
  13. * contributors may be used to endorse or promote products derived from
  14. * this software without specific prior written permission.
  15. *
  16. * Alternatively, this software may be distributed under the terms of the
  17. * GNU General Public License ("GPL") version 2 as published by the Free
  18. * Software Foundation.
  19. *
  20. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  21. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  22. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  23. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  24. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  25. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  26. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  27. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  28. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  29. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  30. * POSSIBILITY OF SUCH DAMAGE.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/pid.h>
  34. #include <linux/pid_namespace.h>
  35. #include <net/netlink.h>
  36. #include <rdma/rdma_cm.h>
  37. #include <rdma/rdma_netlink.h>
  38. #include "core_priv.h"
  39. #include "cma_priv.h"
  40. static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
  41. [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
  42. [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
  43. .len = IB_DEVICE_NAME_MAX - 1},
  44. [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
  45. [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
  46. .len = IB_FW_VERSION_NAME_MAX - 1},
  47. [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
  48. [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
  49. [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
  50. [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
  51. [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
  52. [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
  53. [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
  54. [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
  55. [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
  56. [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
  57. [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
  58. [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
  59. .len = 16 },
  60. [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
  61. [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
  62. [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
  63. [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
  64. [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
  65. [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
  66. [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
  67. [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
  68. [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
  69. [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
  70. [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
  71. [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
  72. .len = TASK_COMM_LEN },
  73. [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED },
  74. [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED },
  75. [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 },
  76. [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = {
  77. .len = sizeof(struct __kernel_sockaddr_storage) },
  78. [RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
  79. .len = sizeof(struct __kernel_sockaddr_storage) },
  80. [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED },
  81. [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
  82. [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
  83. [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
  84. [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 },
  85. [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED },
  86. [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED },
  87. [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 },
  88. [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 },
  89. [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
  90. [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 },
  91. [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED },
  92. [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED },
  93. [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 },
  94. [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY] = { .type = NLA_U32 },
  95. [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 },
  96. [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING,
  97. .len = IFNAMSIZ },
  98. [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED },
  99. [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED },
  100. [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING,
  101. .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
  102. [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 },
  103. [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 },
  104. [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 },
  105. [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 },
  106. [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 },
  107. };
  108. static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
  109. enum rdma_nldev_print_type print_type)
  110. {
  111. if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
  112. return -EMSGSIZE;
  113. if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
  114. nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
  115. return -EMSGSIZE;
  116. return 0;
  117. }
  118. static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
  119. enum rdma_nldev_print_type print_type,
  120. u32 value)
  121. {
  122. if (put_driver_name_print_type(msg, name, print_type))
  123. return -EMSGSIZE;
  124. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
  125. return -EMSGSIZE;
  126. return 0;
  127. }
  128. static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
  129. enum rdma_nldev_print_type print_type,
  130. u64 value)
  131. {
  132. if (put_driver_name_print_type(msg, name, print_type))
  133. return -EMSGSIZE;
  134. if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
  135. RDMA_NLDEV_ATTR_PAD))
  136. return -EMSGSIZE;
  137. return 0;
  138. }
  139. int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
  140. {
  141. return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
  142. value);
  143. }
  144. EXPORT_SYMBOL(rdma_nl_put_driver_u32);
  145. int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
  146. u32 value)
  147. {
  148. return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
  149. value);
  150. }
  151. EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
  152. int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
  153. {
  154. return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
  155. value);
  156. }
  157. EXPORT_SYMBOL(rdma_nl_put_driver_u64);
  158. int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
  159. {
  160. return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
  161. value);
  162. }
  163. EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
  164. static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
  165. {
  166. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
  167. return -EMSGSIZE;
  168. if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
  169. return -EMSGSIZE;
  170. return 0;
  171. }
  172. static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
  173. {
  174. char fw[IB_FW_VERSION_NAME_MAX];
  175. if (fill_nldev_handle(msg, device))
  176. return -EMSGSIZE;
  177. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
  178. return -EMSGSIZE;
  179. BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
  180. if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
  181. device->attrs.device_cap_flags,
  182. RDMA_NLDEV_ATTR_PAD))
  183. return -EMSGSIZE;
  184. ib_get_device_fw_str(device, fw);
  185. /* Device without FW has strlen(fw) = 0 */
  186. if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
  187. return -EMSGSIZE;
  188. if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
  189. be64_to_cpu(device->node_guid),
  190. RDMA_NLDEV_ATTR_PAD))
  191. return -EMSGSIZE;
  192. if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
  193. be64_to_cpu(device->attrs.sys_image_guid),
  194. RDMA_NLDEV_ATTR_PAD))
  195. return -EMSGSIZE;
  196. if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
  197. return -EMSGSIZE;
  198. return 0;
  199. }
  200. static int fill_port_info(struct sk_buff *msg,
  201. struct ib_device *device, u32 port,
  202. const struct net *net)
  203. {
  204. struct net_device *netdev = NULL;
  205. struct ib_port_attr attr;
  206. int ret;
  207. if (fill_nldev_handle(msg, device))
  208. return -EMSGSIZE;
  209. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
  210. return -EMSGSIZE;
  211. ret = ib_query_port(device, port, &attr);
  212. if (ret)
  213. return ret;
  214. if (rdma_protocol_ib(device, port)) {
  215. BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
  216. if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
  217. (u64)attr.port_cap_flags,
  218. RDMA_NLDEV_ATTR_PAD))
  219. return -EMSGSIZE;
  220. if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
  221. attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
  222. return -EMSGSIZE;
  223. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
  224. return -EMSGSIZE;
  225. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
  226. return -EMSGSIZE;
  227. if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
  228. return -EMSGSIZE;
  229. }
  230. if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
  231. return -EMSGSIZE;
  232. if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
  233. return -EMSGSIZE;
  234. if (device->get_netdev)
  235. netdev = device->get_netdev(device, port);
  236. if (netdev && net_eq(dev_net(netdev), net)) {
  237. ret = nla_put_u32(msg,
  238. RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
  239. if (ret)
  240. goto out;
  241. ret = nla_put_string(msg,
  242. RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
  243. }
  244. out:
  245. if (netdev)
  246. dev_put(netdev);
  247. return ret;
  248. }
  249. static int fill_res_info_entry(struct sk_buff *msg,
  250. const char *name, u64 curr)
  251. {
  252. struct nlattr *entry_attr;
  253. entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
  254. if (!entry_attr)
  255. return -EMSGSIZE;
  256. if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
  257. goto err;
  258. if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
  259. RDMA_NLDEV_ATTR_PAD))
  260. goto err;
  261. nla_nest_end(msg, entry_attr);
  262. return 0;
  263. err:
  264. nla_nest_cancel(msg, entry_attr);
  265. return -EMSGSIZE;
  266. }
  267. static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
  268. {
  269. static const char * const names[RDMA_RESTRACK_MAX] = {
  270. [RDMA_RESTRACK_PD] = "pd",
  271. [RDMA_RESTRACK_CQ] = "cq",
  272. [RDMA_RESTRACK_QP] = "qp",
  273. [RDMA_RESTRACK_CM_ID] = "cm_id",
  274. [RDMA_RESTRACK_MR] = "mr",
  275. };
  276. struct rdma_restrack_root *res = &device->res;
  277. struct nlattr *table_attr;
  278. int ret, i, curr;
  279. if (fill_nldev_handle(msg, device))
  280. return -EMSGSIZE;
  281. table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
  282. if (!table_attr)
  283. return -EMSGSIZE;
  284. for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
  285. if (!names[i])
  286. continue;
  287. curr = rdma_restrack_count(res, i, task_active_pid_ns(current));
  288. ret = fill_res_info_entry(msg, names[i], curr);
  289. if (ret)
  290. goto err;
  291. }
  292. nla_nest_end(msg, table_attr);
  293. return 0;
  294. err:
  295. nla_nest_cancel(msg, table_attr);
  296. return ret;
  297. }
  298. static int fill_res_name_pid(struct sk_buff *msg,
  299. struct rdma_restrack_entry *res)
  300. {
  301. /*
  302. * For user resources, user is should read /proc/PID/comm to get the
  303. * name of the task file.
  304. */
  305. if (rdma_is_kernel_res(res)) {
  306. if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
  307. res->kern_name))
  308. return -EMSGSIZE;
  309. } else {
  310. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
  311. task_pid_vnr(res->task)))
  312. return -EMSGSIZE;
  313. }
  314. return 0;
  315. }
  316. static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
  317. struct rdma_restrack_entry *res, uint32_t port)
  318. {
  319. struct ib_qp *qp = container_of(res, struct ib_qp, res);
  320. struct rdma_restrack_root *resroot = &qp->device->res;
  321. struct ib_qp_init_attr qp_init_attr;
  322. struct nlattr *entry_attr;
  323. struct ib_qp_attr qp_attr;
  324. int ret;
  325. ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
  326. if (ret)
  327. return ret;
  328. if (port && port != qp_attr.port_num)
  329. return 0;
  330. entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
  331. if (!entry_attr)
  332. goto out;
  333. /* In create_qp() port is not set yet */
  334. if (qp_attr.port_num &&
  335. nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
  336. goto err;
  337. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
  338. goto err;
  339. if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
  340. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
  341. qp_attr.dest_qp_num))
  342. goto err;
  343. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
  344. qp_attr.rq_psn))
  345. goto err;
  346. }
  347. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
  348. goto err;
  349. if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
  350. qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
  351. if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
  352. qp_attr.path_mig_state))
  353. goto err;
  354. }
  355. if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
  356. goto err;
  357. if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
  358. goto err;
  359. if (fill_res_name_pid(msg, res))
  360. goto err;
  361. if (resroot->fill_res_entry(msg, res))
  362. goto err;
  363. nla_nest_end(msg, entry_attr);
  364. return 0;
  365. err:
  366. nla_nest_cancel(msg, entry_attr);
  367. out:
  368. return -EMSGSIZE;
  369. }
  370. static int fill_res_cm_id_entry(struct sk_buff *msg,
  371. struct netlink_callback *cb,
  372. struct rdma_restrack_entry *res, uint32_t port)
  373. {
  374. struct rdma_id_private *id_priv =
  375. container_of(res, struct rdma_id_private, res);
  376. struct rdma_restrack_root *resroot = &id_priv->id.device->res;
  377. struct rdma_cm_id *cm_id = &id_priv->id;
  378. struct nlattr *entry_attr;
  379. if (port && port != cm_id->port_num)
  380. return 0;
  381. entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY);
  382. if (!entry_attr)
  383. goto out;
  384. if (cm_id->port_num &&
  385. nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
  386. goto err;
  387. if (id_priv->qp_num) {
  388. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
  389. goto err;
  390. if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
  391. goto err;
  392. }
  393. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
  394. goto err;
  395. if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
  396. goto err;
  397. if (cm_id->route.addr.src_addr.ss_family &&
  398. nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
  399. sizeof(cm_id->route.addr.src_addr),
  400. &cm_id->route.addr.src_addr))
  401. goto err;
  402. if (cm_id->route.addr.dst_addr.ss_family &&
  403. nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
  404. sizeof(cm_id->route.addr.dst_addr),
  405. &cm_id->route.addr.dst_addr))
  406. goto err;
  407. if (fill_res_name_pid(msg, res))
  408. goto err;
  409. if (resroot->fill_res_entry(msg, res))
  410. goto err;
  411. nla_nest_end(msg, entry_attr);
  412. return 0;
  413. err:
  414. nla_nest_cancel(msg, entry_attr);
  415. out:
  416. return -EMSGSIZE;
  417. }
  418. static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
  419. struct rdma_restrack_entry *res, uint32_t port)
  420. {
  421. struct ib_cq *cq = container_of(res, struct ib_cq, res);
  422. struct rdma_restrack_root *resroot = &cq->device->res;
  423. struct nlattr *entry_attr;
  424. entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CQ_ENTRY);
  425. if (!entry_attr)
  426. goto out;
  427. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
  428. goto err;
  429. if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
  430. atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
  431. goto err;
  432. /* Poll context is only valid for kernel CQs */
  433. if (rdma_is_kernel_res(res) &&
  434. nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
  435. goto err;
  436. if (fill_res_name_pid(msg, res))
  437. goto err;
  438. if (resroot->fill_res_entry(msg, res))
  439. goto err;
  440. nla_nest_end(msg, entry_attr);
  441. return 0;
  442. err:
  443. nla_nest_cancel(msg, entry_attr);
  444. out:
  445. return -EMSGSIZE;
  446. }
  447. static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb,
  448. struct rdma_restrack_entry *res, uint32_t port)
  449. {
  450. struct ib_mr *mr = container_of(res, struct ib_mr, res);
  451. struct rdma_restrack_root *resroot = &mr->pd->device->res;
  452. struct nlattr *entry_attr;
  453. entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_MR_ENTRY);
  454. if (!entry_attr)
  455. goto out;
  456. if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
  457. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
  458. goto err;
  459. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
  460. goto err;
  461. }
  462. if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
  463. RDMA_NLDEV_ATTR_PAD))
  464. goto err;
  465. if (fill_res_name_pid(msg, res))
  466. goto err;
  467. if (resroot->fill_res_entry(msg, res))
  468. goto err;
  469. nla_nest_end(msg, entry_attr);
  470. return 0;
  471. err:
  472. nla_nest_cancel(msg, entry_attr);
  473. out:
  474. return -EMSGSIZE;
  475. }
  476. static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
  477. struct rdma_restrack_entry *res, uint32_t port)
  478. {
  479. struct ib_pd *pd = container_of(res, struct ib_pd, res);
  480. struct rdma_restrack_root *resroot = &pd->device->res;
  481. struct nlattr *entry_attr;
  482. entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_PD_ENTRY);
  483. if (!entry_attr)
  484. goto out;
  485. if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
  486. if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
  487. pd->local_dma_lkey))
  488. goto err;
  489. if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
  490. nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
  491. pd->unsafe_global_rkey))
  492. goto err;
  493. }
  494. if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
  495. atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
  496. goto err;
  497. if (fill_res_name_pid(msg, res))
  498. goto err;
  499. if (resroot->fill_res_entry(msg, res))
  500. goto err;
  501. nla_nest_end(msg, entry_attr);
  502. return 0;
  503. err:
  504. nla_nest_cancel(msg, entry_attr);
  505. out:
  506. return -EMSGSIZE;
  507. }
  508. static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
  509. struct netlink_ext_ack *extack)
  510. {
  511. struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
  512. struct ib_device *device;
  513. struct sk_buff *msg;
  514. u32 index;
  515. int err;
  516. err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
  517. nldev_policy, extack);
  518. if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
  519. return -EINVAL;
  520. index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
  521. device = ib_device_get_by_index(index);
  522. if (!device)
  523. return -EINVAL;
  524. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  525. if (!msg) {
  526. err = -ENOMEM;
  527. goto err;
  528. }
  529. nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
  530. RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
  531. 0, 0);
  532. err = fill_dev_info(msg, device);
  533. if (err)
  534. goto err_free;
  535. nlmsg_end(msg, nlh);
  536. put_device(&device->dev);
  537. return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
  538. err_free:
  539. nlmsg_free(msg);
  540. err:
  541. put_device(&device->dev);
  542. return err;
  543. }
  544. static int _nldev_get_dumpit(struct ib_device *device,
  545. struct sk_buff *skb,
  546. struct netlink_callback *cb,
  547. unsigned int idx)
  548. {
  549. int start = cb->args[0];
  550. struct nlmsghdr *nlh;
  551. if (idx < start)
  552. return 0;
  553. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  554. RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
  555. 0, NLM_F_MULTI);
  556. if (fill_dev_info(skb, device)) {
  557. nlmsg_cancel(skb, nlh);
  558. goto out;
  559. }
  560. nlmsg_end(skb, nlh);
  561. idx++;
  562. out: cb->args[0] = idx;
  563. return skb->len;
  564. }
  565. static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
  566. {
  567. /*
  568. * There is no need to take lock, because
  569. * we are relying on ib_core's lists_rwsem
  570. */
  571. return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
  572. }
  573. static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
  574. struct netlink_ext_ack *extack)
  575. {
  576. struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
  577. struct ib_device *device;
  578. struct sk_buff *msg;
  579. u32 index;
  580. u32 port;
  581. int err;
  582. err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
  583. nldev_policy, extack);
  584. if (err ||
  585. !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
  586. !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
  587. return -EINVAL;
  588. index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
  589. device = ib_device_get_by_index(index);
  590. if (!device)
  591. return -EINVAL;
  592. port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
  593. if (!rdma_is_port_valid(device, port)) {
  594. err = -EINVAL;
  595. goto err;
  596. }
  597. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  598. if (!msg) {
  599. err = -ENOMEM;
  600. goto err;
  601. }
  602. nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
  603. RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
  604. 0, 0);
  605. err = fill_port_info(msg, device, port, sock_net(skb->sk));
  606. if (err)
  607. goto err_free;
  608. nlmsg_end(msg, nlh);
  609. put_device(&device->dev);
  610. return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
  611. err_free:
  612. nlmsg_free(msg);
  613. err:
  614. put_device(&device->dev);
  615. return err;
  616. }
  617. static int nldev_port_get_dumpit(struct sk_buff *skb,
  618. struct netlink_callback *cb)
  619. {
  620. struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
  621. struct ib_device *device;
  622. int start = cb->args[0];
  623. struct nlmsghdr *nlh;
  624. u32 idx = 0;
  625. u32 ifindex;
  626. int err;
  627. u32 p;
  628. err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
  629. nldev_policy, NULL);
  630. if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
  631. return -EINVAL;
  632. ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
  633. device = ib_device_get_by_index(ifindex);
  634. if (!device)
  635. return -EINVAL;
  636. for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
  637. /*
  638. * The dumpit function returns all information from specific
  639. * index. This specific index is taken from the netlink
  640. * messages request sent by user and it is available
  641. * in cb->args[0].
  642. *
  643. * Usually, the user doesn't fill this field and it causes
  644. * to return everything.
  645. *
  646. */
  647. if (idx < start) {
  648. idx++;
  649. continue;
  650. }
  651. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
  652. cb->nlh->nlmsg_seq,
  653. RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
  654. RDMA_NLDEV_CMD_PORT_GET),
  655. 0, NLM_F_MULTI);
  656. if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
  657. nlmsg_cancel(skb, nlh);
  658. goto out;
  659. }
  660. idx++;
  661. nlmsg_end(skb, nlh);
  662. }
  663. out:
  664. put_device(&device->dev);
  665. cb->args[0] = idx;
  666. return skb->len;
  667. }
  668. static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
  669. struct netlink_ext_ack *extack)
  670. {
  671. struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
  672. struct ib_device *device;
  673. struct sk_buff *msg;
  674. u32 index;
  675. int ret;
  676. ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
  677. nldev_policy, extack);
  678. if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
  679. return -EINVAL;
  680. index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
  681. device = ib_device_get_by_index(index);
  682. if (!device)
  683. return -EINVAL;
  684. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  685. if (!msg) {
  686. ret = -ENOMEM;
  687. goto err;
  688. }
  689. nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
  690. RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
  691. 0, 0);
  692. ret = fill_res_info(msg, device);
  693. if (ret)
  694. goto err_free;
  695. nlmsg_end(msg, nlh);
  696. put_device(&device->dev);
  697. return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
  698. err_free:
  699. nlmsg_free(msg);
  700. err:
  701. put_device(&device->dev);
  702. return ret;
  703. }
  704. static int _nldev_res_get_dumpit(struct ib_device *device,
  705. struct sk_buff *skb,
  706. struct netlink_callback *cb,
  707. unsigned int idx)
  708. {
  709. int start = cb->args[0];
  710. struct nlmsghdr *nlh;
  711. if (idx < start)
  712. return 0;
  713. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  714. RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
  715. 0, NLM_F_MULTI);
  716. if (fill_res_info(skb, device)) {
  717. nlmsg_cancel(skb, nlh);
  718. goto out;
  719. }
  720. nlmsg_end(skb, nlh);
  721. idx++;
  722. out:
  723. cb->args[0] = idx;
  724. return skb->len;
  725. }
  726. static int nldev_res_get_dumpit(struct sk_buff *skb,
  727. struct netlink_callback *cb)
  728. {
  729. return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
  730. }
  731. struct nldev_fill_res_entry {
  732. int (*fill_res_func)(struct sk_buff *msg, struct netlink_callback *cb,
  733. struct rdma_restrack_entry *res, u32 port);
  734. enum rdma_nldev_attr nldev_attr;
  735. enum rdma_nldev_command nldev_cmd;
  736. };
  737. static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
  738. [RDMA_RESTRACK_QP] = {
  739. .fill_res_func = fill_res_qp_entry,
  740. .nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
  741. .nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
  742. },
  743. [RDMA_RESTRACK_CM_ID] = {
  744. .fill_res_func = fill_res_cm_id_entry,
  745. .nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
  746. .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
  747. },
  748. [RDMA_RESTRACK_CQ] = {
  749. .fill_res_func = fill_res_cq_entry,
  750. .nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
  751. .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
  752. },
  753. [RDMA_RESTRACK_MR] = {
  754. .fill_res_func = fill_res_mr_entry,
  755. .nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
  756. .nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
  757. },
  758. [RDMA_RESTRACK_PD] = {
  759. .fill_res_func = fill_res_pd_entry,
  760. .nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
  761. .nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
  762. },
  763. };
  764. static int res_get_common_dumpit(struct sk_buff *skb,
  765. struct netlink_callback *cb,
  766. enum rdma_restrack_type res_type)
  767. {
  768. const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
  769. struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
  770. struct rdma_restrack_entry *res;
  771. int err, ret = 0, idx = 0;
  772. struct nlattr *table_attr;
  773. struct ib_device *device;
  774. int start = cb->args[0];
  775. struct nlmsghdr *nlh;
  776. u32 index, port = 0;
  777. bool filled = false;
  778. err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
  779. nldev_policy, NULL);
  780. /*
  781. * Right now, we are expecting the device index to get res information,
  782. * but it is possible to extend this code to return all devices in
  783. * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
  784. * if it doesn't exist, we will iterate over all devices.
  785. *
  786. * But it is not needed for now.
  787. */
  788. if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
  789. return -EINVAL;
  790. index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
  791. device = ib_device_get_by_index(index);
  792. if (!device)
  793. return -EINVAL;
  794. /*
  795. * If no PORT_INDEX is supplied, we will return all QPs from that device
  796. */
  797. if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
  798. port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
  799. if (!rdma_is_port_valid(device, port)) {
  800. ret = -EINVAL;
  801. goto err_index;
  802. }
  803. }
  804. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  805. RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
  806. 0, NLM_F_MULTI);
  807. if (fill_nldev_handle(skb, device)) {
  808. ret = -EMSGSIZE;
  809. goto err;
  810. }
  811. table_attr = nla_nest_start(skb, fe->nldev_attr);
  812. if (!table_attr) {
  813. ret = -EMSGSIZE;
  814. goto err;
  815. }
  816. down_read(&device->res.rwsem);
  817. hash_for_each_possible(device->res.hash, res, node, res_type) {
  818. if (idx < start)
  819. goto next;
  820. if ((rdma_is_kernel_res(res) &&
  821. task_active_pid_ns(current) != &init_pid_ns) ||
  822. (!rdma_is_kernel_res(res) && task_active_pid_ns(current) !=
  823. task_active_pid_ns(res->task)))
  824. /*
  825. * 1. Kern resources should be visible in init
  826. * namspace only
  827. * 2. Present only resources visible in the current
  828. * namespace
  829. */
  830. goto next;
  831. if (!rdma_restrack_get(res))
  832. /*
  833. * Resource is under release now, but we are not
  834. * relesing lock now, so it will be released in
  835. * our next pass, once we will get ->next pointer.
  836. */
  837. goto next;
  838. filled = true;
  839. up_read(&device->res.rwsem);
  840. ret = fe->fill_res_func(skb, cb, res, port);
  841. down_read(&device->res.rwsem);
  842. /*
  843. * Return resource back, but it won't be released till
  844. * the &device->res.rwsem will be released for write.
  845. */
  846. rdma_restrack_put(res);
  847. if (ret == -EMSGSIZE)
  848. /*
  849. * There is a chance to optimize here.
  850. * It can be done by using list_prepare_entry
  851. * and list_for_each_entry_continue afterwards.
  852. */
  853. break;
  854. if (ret)
  855. goto res_err;
  856. next: idx++;
  857. }
  858. up_read(&device->res.rwsem);
  859. nla_nest_end(skb, table_attr);
  860. nlmsg_end(skb, nlh);
  861. cb->args[0] = idx;
  862. /*
  863. * No more entries to fill, cancel the message and
  864. * return 0 to mark end of dumpit.
  865. */
  866. if (!filled)
  867. goto err;
  868. put_device(&device->dev);
  869. return skb->len;
  870. res_err:
  871. nla_nest_cancel(skb, table_attr);
  872. up_read(&device->res.rwsem);
  873. err:
  874. nlmsg_cancel(skb, nlh);
  875. err_index:
  876. put_device(&device->dev);
  877. return ret;
  878. }
  879. static int nldev_res_get_qp_dumpit(struct sk_buff *skb,
  880. struct netlink_callback *cb)
  881. {
  882. return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_QP);
  883. }
  884. static int nldev_res_get_cm_id_dumpit(struct sk_buff *skb,
  885. struct netlink_callback *cb)
  886. {
  887. return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CM_ID);
  888. }
  889. static int nldev_res_get_cq_dumpit(struct sk_buff *skb,
  890. struct netlink_callback *cb)
  891. {
  892. return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CQ);
  893. }
  894. static int nldev_res_get_mr_dumpit(struct sk_buff *skb,
  895. struct netlink_callback *cb)
  896. {
  897. return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR);
  898. }
  899. static int nldev_res_get_pd_dumpit(struct sk_buff *skb,
  900. struct netlink_callback *cb)
  901. {
  902. return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_PD);
  903. }
  904. static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
  905. [RDMA_NLDEV_CMD_GET] = {
  906. .doit = nldev_get_doit,
  907. .dump = nldev_get_dumpit,
  908. },
  909. [RDMA_NLDEV_CMD_PORT_GET] = {
  910. .doit = nldev_port_get_doit,
  911. .dump = nldev_port_get_dumpit,
  912. },
  913. [RDMA_NLDEV_CMD_RES_GET] = {
  914. .doit = nldev_res_get_doit,
  915. .dump = nldev_res_get_dumpit,
  916. },
  917. [RDMA_NLDEV_CMD_RES_QP_GET] = {
  918. .dump = nldev_res_get_qp_dumpit,
  919. /*
  920. * .doit is not implemented yet for two reasons:
  921. * 1. It is not needed yet.
  922. * 2. There is a need to provide identifier, while it is easy
  923. * for the QPs (device index + port index + LQPN), it is not
  924. * the case for the rest of resources (PD and CQ). Because it
  925. * is better to provide similar interface for all resources,
  926. * let's wait till we will have other resources implemented
  927. * too.
  928. */
  929. },
  930. [RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
  931. .dump = nldev_res_get_cm_id_dumpit,
  932. },
  933. [RDMA_NLDEV_CMD_RES_CQ_GET] = {
  934. .dump = nldev_res_get_cq_dumpit,
  935. },
  936. [RDMA_NLDEV_CMD_RES_MR_GET] = {
  937. .dump = nldev_res_get_mr_dumpit,
  938. },
  939. [RDMA_NLDEV_CMD_RES_PD_GET] = {
  940. .dump = nldev_res_get_pd_dumpit,
  941. },
  942. };
  943. void __init nldev_init(void)
  944. {
  945. rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
  946. }
  947. void __exit nldev_exit(void)
  948. {
  949. rdma_nl_unregister(RDMA_NL_NLDEV);
  950. }
  951. MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);