devx.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
  4. */
  5. #include <rdma/ib_user_verbs.h>
  6. #include <rdma/ib_verbs.h>
  7. #include <rdma/uverbs_types.h>
  8. #include <rdma/uverbs_ioctl.h>
  9. #include <rdma/mlx5_user_ioctl_cmds.h>
  10. #include <rdma/ib_umem.h>
  11. #include <linux/mlx5/driver.h>
  12. #include <linux/mlx5/fs.h>
  13. #include "mlx5_ib.h"
  14. #define UVERBS_MODULE_NAME mlx5_ib
  15. #include <rdma/uverbs_named_ioctl.h>
  16. #define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
  17. struct devx_obj {
  18. struct mlx5_core_dev *mdev;
  19. u32 obj_id;
  20. u32 dinlen; /* destroy inbox length */
  21. u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
  22. };
  23. struct devx_umem {
  24. struct mlx5_core_dev *mdev;
  25. struct ib_umem *umem;
  26. u32 page_offset;
  27. int page_shift;
  28. int ncont;
  29. u32 dinlen;
  30. u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
  31. };
  32. struct devx_umem_reg_cmd {
  33. void *in;
  34. u32 inlen;
  35. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  36. };
  37. static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
  38. {
  39. return to_mucontext(ib_uverbs_get_ucontext(file));
  40. }
  41. int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
  42. {
  43. u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
  44. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
  45. u64 general_obj_types;
  46. void *hdr;
  47. int err;
  48. hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
  49. general_obj_types = MLX5_CAP_GEN_64(dev->mdev, general_obj_types);
  50. if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) ||
  51. !(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
  52. return -EINVAL;
  53. if (!capable(CAP_NET_RAW))
  54. return -EPERM;
  55. MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
  56. MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
  57. err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
  58. if (err)
  59. return err;
  60. context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
  61. return 0;
  62. }
  63. void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
  64. struct mlx5_ib_ucontext *context)
  65. {
  66. u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
  67. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
  68. MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
  69. MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
  70. MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
  71. mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
  72. }
  73. bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
  74. {
  75. struct devx_obj *devx_obj = obj;
  76. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
  77. switch (opcode) {
  78. case MLX5_CMD_OP_DESTROY_TIR:
  79. *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
  80. *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
  81. obj_id);
  82. return true;
  83. case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
  84. *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
  85. *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
  86. table_id);
  87. return true;
  88. default:
  89. return false;
  90. }
  91. }
  92. static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
  93. {
  94. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  95. u32 obj_id;
  96. switch (opcode) {
  97. case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
  98. case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
  99. obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
  100. break;
  101. case MLX5_CMD_OP_QUERY_MKEY:
  102. obj_id = MLX5_GET(query_mkey_in, in, mkey_index);
  103. break;
  104. case MLX5_CMD_OP_QUERY_CQ:
  105. obj_id = MLX5_GET(query_cq_in, in, cqn);
  106. break;
  107. case MLX5_CMD_OP_MODIFY_CQ:
  108. obj_id = MLX5_GET(modify_cq_in, in, cqn);
  109. break;
  110. case MLX5_CMD_OP_QUERY_SQ:
  111. obj_id = MLX5_GET(query_sq_in, in, sqn);
  112. break;
  113. case MLX5_CMD_OP_MODIFY_SQ:
  114. obj_id = MLX5_GET(modify_sq_in, in, sqn);
  115. break;
  116. case MLX5_CMD_OP_QUERY_RQ:
  117. obj_id = MLX5_GET(query_rq_in, in, rqn);
  118. break;
  119. case MLX5_CMD_OP_MODIFY_RQ:
  120. obj_id = MLX5_GET(modify_rq_in, in, rqn);
  121. break;
  122. case MLX5_CMD_OP_QUERY_RMP:
  123. obj_id = MLX5_GET(query_rmp_in, in, rmpn);
  124. break;
  125. case MLX5_CMD_OP_MODIFY_RMP:
  126. obj_id = MLX5_GET(modify_rmp_in, in, rmpn);
  127. break;
  128. case MLX5_CMD_OP_QUERY_RQT:
  129. obj_id = MLX5_GET(query_rqt_in, in, rqtn);
  130. break;
  131. case MLX5_CMD_OP_MODIFY_RQT:
  132. obj_id = MLX5_GET(modify_rqt_in, in, rqtn);
  133. break;
  134. case MLX5_CMD_OP_QUERY_TIR:
  135. obj_id = MLX5_GET(query_tir_in, in, tirn);
  136. break;
  137. case MLX5_CMD_OP_MODIFY_TIR:
  138. obj_id = MLX5_GET(modify_tir_in, in, tirn);
  139. break;
  140. case MLX5_CMD_OP_QUERY_TIS:
  141. obj_id = MLX5_GET(query_tis_in, in, tisn);
  142. break;
  143. case MLX5_CMD_OP_MODIFY_TIS:
  144. obj_id = MLX5_GET(modify_tis_in, in, tisn);
  145. break;
  146. case MLX5_CMD_OP_QUERY_FLOW_TABLE:
  147. obj_id = MLX5_GET(query_flow_table_in, in, table_id);
  148. break;
  149. case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
  150. obj_id = MLX5_GET(modify_flow_table_in, in, table_id);
  151. break;
  152. case MLX5_CMD_OP_QUERY_FLOW_GROUP:
  153. obj_id = MLX5_GET(query_flow_group_in, in, group_id);
  154. break;
  155. case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
  156. obj_id = MLX5_GET(query_fte_in, in, flow_index);
  157. break;
  158. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  159. obj_id = MLX5_GET(set_fte_in, in, flow_index);
  160. break;
  161. case MLX5_CMD_OP_QUERY_Q_COUNTER:
  162. obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id);
  163. break;
  164. case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
  165. obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id);
  166. break;
  167. case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
  168. obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
  169. break;
  170. case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
  171. obj_id = MLX5_GET(query_scheduling_element_in, in,
  172. scheduling_element_id);
  173. break;
  174. case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
  175. obj_id = MLX5_GET(modify_scheduling_element_in, in,
  176. scheduling_element_id);
  177. break;
  178. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  179. obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
  180. break;
  181. case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
  182. obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index);
  183. break;
  184. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  185. obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
  186. break;
  187. case MLX5_CMD_OP_QUERY_QP:
  188. obj_id = MLX5_GET(query_qp_in, in, qpn);
  189. break;
  190. case MLX5_CMD_OP_RST2INIT_QP:
  191. obj_id = MLX5_GET(rst2init_qp_in, in, qpn);
  192. break;
  193. case MLX5_CMD_OP_INIT2RTR_QP:
  194. obj_id = MLX5_GET(init2rtr_qp_in, in, qpn);
  195. break;
  196. case MLX5_CMD_OP_RTR2RTS_QP:
  197. obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn);
  198. break;
  199. case MLX5_CMD_OP_RTS2RTS_QP:
  200. obj_id = MLX5_GET(rts2rts_qp_in, in, qpn);
  201. break;
  202. case MLX5_CMD_OP_SQERR2RTS_QP:
  203. obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn);
  204. break;
  205. case MLX5_CMD_OP_2ERR_QP:
  206. obj_id = MLX5_GET(qp_2err_in, in, qpn);
  207. break;
  208. case MLX5_CMD_OP_2RST_QP:
  209. obj_id = MLX5_GET(qp_2rst_in, in, qpn);
  210. break;
  211. case MLX5_CMD_OP_QUERY_DCT:
  212. obj_id = MLX5_GET(query_dct_in, in, dctn);
  213. break;
  214. case MLX5_CMD_OP_QUERY_XRQ:
  215. obj_id = MLX5_GET(query_xrq_in, in, xrqn);
  216. break;
  217. case MLX5_CMD_OP_QUERY_XRC_SRQ:
  218. obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn);
  219. break;
  220. case MLX5_CMD_OP_ARM_XRC_SRQ:
  221. obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn);
  222. break;
  223. case MLX5_CMD_OP_QUERY_SRQ:
  224. obj_id = MLX5_GET(query_srq_in, in, srqn);
  225. break;
  226. case MLX5_CMD_OP_ARM_RQ:
  227. obj_id = MLX5_GET(arm_rq_in, in, srq_number);
  228. break;
  229. case MLX5_CMD_OP_DRAIN_DCT:
  230. case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
  231. obj_id = MLX5_GET(drain_dct_in, in, dctn);
  232. break;
  233. case MLX5_CMD_OP_ARM_XRQ:
  234. obj_id = MLX5_GET(arm_xrq_in, in, xrqn);
  235. break;
  236. default:
  237. return false;
  238. }
  239. if (obj_id == obj->obj_id)
  240. return true;
  241. return false;
  242. }
  243. static bool devx_is_obj_create_cmd(const void *in)
  244. {
  245. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  246. switch (opcode) {
  247. case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
  248. case MLX5_CMD_OP_CREATE_MKEY:
  249. case MLX5_CMD_OP_CREATE_CQ:
  250. case MLX5_CMD_OP_ALLOC_PD:
  251. case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
  252. case MLX5_CMD_OP_CREATE_RMP:
  253. case MLX5_CMD_OP_CREATE_SQ:
  254. case MLX5_CMD_OP_CREATE_RQ:
  255. case MLX5_CMD_OP_CREATE_RQT:
  256. case MLX5_CMD_OP_CREATE_TIR:
  257. case MLX5_CMD_OP_CREATE_TIS:
  258. case MLX5_CMD_OP_ALLOC_Q_COUNTER:
  259. case MLX5_CMD_OP_CREATE_FLOW_TABLE:
  260. case MLX5_CMD_OP_CREATE_FLOW_GROUP:
  261. case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
  262. case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
  263. case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
  264. case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
  265. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  266. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  267. case MLX5_CMD_OP_CREATE_QP:
  268. case MLX5_CMD_OP_CREATE_SRQ:
  269. case MLX5_CMD_OP_CREATE_XRC_SRQ:
  270. case MLX5_CMD_OP_CREATE_DCT:
  271. case MLX5_CMD_OP_CREATE_XRQ:
  272. case MLX5_CMD_OP_ATTACH_TO_MCG:
  273. case MLX5_CMD_OP_ALLOC_XRCD:
  274. return true;
  275. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  276. {
  277. u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
  278. if (op_mod == 0)
  279. return true;
  280. return false;
  281. }
  282. default:
  283. return false;
  284. }
  285. }
  286. static bool devx_is_obj_modify_cmd(const void *in)
  287. {
  288. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  289. switch (opcode) {
  290. case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
  291. case MLX5_CMD_OP_MODIFY_CQ:
  292. case MLX5_CMD_OP_MODIFY_RMP:
  293. case MLX5_CMD_OP_MODIFY_SQ:
  294. case MLX5_CMD_OP_MODIFY_RQ:
  295. case MLX5_CMD_OP_MODIFY_RQT:
  296. case MLX5_CMD_OP_MODIFY_TIR:
  297. case MLX5_CMD_OP_MODIFY_TIS:
  298. case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
  299. case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
  300. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  301. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  302. case MLX5_CMD_OP_RST2INIT_QP:
  303. case MLX5_CMD_OP_INIT2RTR_QP:
  304. case MLX5_CMD_OP_RTR2RTS_QP:
  305. case MLX5_CMD_OP_RTS2RTS_QP:
  306. case MLX5_CMD_OP_SQERR2RTS_QP:
  307. case MLX5_CMD_OP_2ERR_QP:
  308. case MLX5_CMD_OP_2RST_QP:
  309. case MLX5_CMD_OP_ARM_XRC_SRQ:
  310. case MLX5_CMD_OP_ARM_RQ:
  311. case MLX5_CMD_OP_DRAIN_DCT:
  312. case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
  313. case MLX5_CMD_OP_ARM_XRQ:
  314. return true;
  315. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  316. {
  317. u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
  318. if (op_mod == 1)
  319. return true;
  320. return false;
  321. }
  322. default:
  323. return false;
  324. }
  325. }
  326. static bool devx_is_obj_query_cmd(const void *in)
  327. {
  328. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  329. switch (opcode) {
  330. case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
  331. case MLX5_CMD_OP_QUERY_MKEY:
  332. case MLX5_CMD_OP_QUERY_CQ:
  333. case MLX5_CMD_OP_QUERY_RMP:
  334. case MLX5_CMD_OP_QUERY_SQ:
  335. case MLX5_CMD_OP_QUERY_RQ:
  336. case MLX5_CMD_OP_QUERY_RQT:
  337. case MLX5_CMD_OP_QUERY_TIR:
  338. case MLX5_CMD_OP_QUERY_TIS:
  339. case MLX5_CMD_OP_QUERY_Q_COUNTER:
  340. case MLX5_CMD_OP_QUERY_FLOW_TABLE:
  341. case MLX5_CMD_OP_QUERY_FLOW_GROUP:
  342. case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
  343. case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
  344. case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
  345. case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
  346. case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
  347. case MLX5_CMD_OP_QUERY_QP:
  348. case MLX5_CMD_OP_QUERY_SRQ:
  349. case MLX5_CMD_OP_QUERY_XRC_SRQ:
  350. case MLX5_CMD_OP_QUERY_DCT:
  351. case MLX5_CMD_OP_QUERY_XRQ:
  352. return true;
  353. default:
  354. return false;
  355. }
  356. }
  357. static bool devx_is_general_cmd(void *in)
  358. {
  359. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  360. switch (opcode) {
  361. case MLX5_CMD_OP_QUERY_HCA_CAP:
  362. case MLX5_CMD_OP_QUERY_VPORT_STATE:
  363. case MLX5_CMD_OP_QUERY_ADAPTER:
  364. case MLX5_CMD_OP_QUERY_ISSI:
  365. case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
  366. case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
  367. case MLX5_CMD_OP_QUERY_VNIC_ENV:
  368. case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
  369. case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
  370. case MLX5_CMD_OP_NOP:
  371. case MLX5_CMD_OP_QUERY_CONG_STATUS:
  372. case MLX5_CMD_OP_QUERY_CONG_PARAMS:
  373. case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
  374. return true;
  375. default:
  376. return false;
  377. }
  378. }
  379. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
  380. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  381. {
  382. struct mlx5_ib_ucontext *c;
  383. struct mlx5_ib_dev *dev;
  384. int user_vector;
  385. int dev_eqn;
  386. unsigned int irqn;
  387. int err;
  388. if (uverbs_copy_from(&user_vector, attrs,
  389. MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
  390. return -EFAULT;
  391. c = devx_ufile2uctx(file);
  392. if (IS_ERR(c))
  393. return PTR_ERR(c);
  394. dev = to_mdev(c->ibucontext.device);
  395. err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
  396. if (err < 0)
  397. return err;
  398. if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
  399. &dev_eqn, sizeof(dev_eqn)))
  400. return -EFAULT;
  401. return 0;
  402. }
  403. /*
  404. *Security note:
  405. * The hardware protection mechanism works like this: Each device object that
  406. * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
  407. * the device specification manual) upon its creation. Then upon doorbell,
  408. * hardware fetches the object context for which the doorbell was rang, and
  409. * validates that the UAR through which the DB was rang matches the UAR ID
  410. * of the object.
  411. * If no match the doorbell is silently ignored by the hardware. Of course,
  412. * the user cannot ring a doorbell on a UAR that was not mapped to it.
  413. * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
  414. * mailboxes (except tagging them with UID), we expose to the user its UAR
  415. * ID, so it can embed it in these objects in the expected specification
  416. * format. So the only thing the user can do is hurt itself by creating a
  417. * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
  418. * may ring a doorbell on its objects.
  419. * The consequence of that will be that another user can schedule a QP/SQ
  420. * of the buggy user for execution (just insert it to the hardware schedule
  421. * queue or arm its CQ for event generation), no further harm is expected.
  422. */
  423. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
  424. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  425. {
  426. struct mlx5_ib_ucontext *c;
  427. struct mlx5_ib_dev *dev;
  428. u32 user_idx;
  429. s32 dev_idx;
  430. c = devx_ufile2uctx(file);
  431. if (IS_ERR(c))
  432. return PTR_ERR(c);
  433. dev = to_mdev(c->ibucontext.device);
  434. if (uverbs_copy_from(&user_idx, attrs,
  435. MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
  436. return -EFAULT;
  437. dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
  438. if (dev_idx < 0)
  439. return dev_idx;
  440. if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
  441. &dev_idx, sizeof(dev_idx)))
  442. return -EFAULT;
  443. return 0;
  444. }
  445. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
  446. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  447. {
  448. struct mlx5_ib_ucontext *c;
  449. struct mlx5_ib_dev *dev;
  450. void *cmd_in = uverbs_attr_get_alloced_ptr(
  451. attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
  452. int cmd_out_len = uverbs_attr_get_len(attrs,
  453. MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
  454. void *cmd_out;
  455. int err;
  456. c = devx_ufile2uctx(file);
  457. if (IS_ERR(c))
  458. return PTR_ERR(c);
  459. dev = to_mdev(c->ibucontext.device);
  460. if (!c->devx_uid)
  461. return -EPERM;
  462. /* Only white list of some general HCA commands are allowed for this method. */
  463. if (!devx_is_general_cmd(cmd_in))
  464. return -EINVAL;
  465. cmd_out = uverbs_zalloc(attrs, cmd_out_len);
  466. if (IS_ERR(cmd_out))
  467. return PTR_ERR(cmd_out);
  468. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  469. err = mlx5_cmd_exec(dev->mdev, cmd_in,
  470. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
  471. cmd_out, cmd_out_len);
  472. if (err)
  473. return err;
  474. return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
  475. cmd_out_len);
  476. }
  477. static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
  478. u32 *dinlen,
  479. u32 *obj_id)
  480. {
  481. u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
  482. u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
  483. *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
  484. *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
  485. MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
  486. MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
  487. switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
  488. case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
  489. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
  490. MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
  491. break;
  492. case MLX5_CMD_OP_CREATE_MKEY:
  493. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
  494. break;
  495. case MLX5_CMD_OP_CREATE_CQ:
  496. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
  497. break;
  498. case MLX5_CMD_OP_ALLOC_PD:
  499. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
  500. break;
  501. case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
  502. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  503. MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
  504. break;
  505. case MLX5_CMD_OP_CREATE_RMP:
  506. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
  507. break;
  508. case MLX5_CMD_OP_CREATE_SQ:
  509. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
  510. break;
  511. case MLX5_CMD_OP_CREATE_RQ:
  512. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
  513. break;
  514. case MLX5_CMD_OP_CREATE_RQT:
  515. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
  516. break;
  517. case MLX5_CMD_OP_CREATE_TIR:
  518. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
  519. break;
  520. case MLX5_CMD_OP_CREATE_TIS:
  521. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
  522. break;
  523. case MLX5_CMD_OP_ALLOC_Q_COUNTER:
  524. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  525. MLX5_CMD_OP_DEALLOC_Q_COUNTER);
  526. break;
  527. case MLX5_CMD_OP_CREATE_FLOW_TABLE:
  528. *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
  529. *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
  530. MLX5_SET(destroy_flow_table_in, din, other_vport,
  531. MLX5_GET(create_flow_table_in, in, other_vport));
  532. MLX5_SET(destroy_flow_table_in, din, vport_number,
  533. MLX5_GET(create_flow_table_in, in, vport_number));
  534. MLX5_SET(destroy_flow_table_in, din, table_type,
  535. MLX5_GET(create_flow_table_in, in, table_type));
  536. MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
  537. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  538. MLX5_CMD_OP_DESTROY_FLOW_TABLE);
  539. break;
  540. case MLX5_CMD_OP_CREATE_FLOW_GROUP:
  541. *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
  542. *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
  543. MLX5_SET(destroy_flow_group_in, din, other_vport,
  544. MLX5_GET(create_flow_group_in, in, other_vport));
  545. MLX5_SET(destroy_flow_group_in, din, vport_number,
  546. MLX5_GET(create_flow_group_in, in, vport_number));
  547. MLX5_SET(destroy_flow_group_in, din, table_type,
  548. MLX5_GET(create_flow_group_in, in, table_type));
  549. MLX5_SET(destroy_flow_group_in, din, table_id,
  550. MLX5_GET(create_flow_group_in, in, table_id));
  551. MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
  552. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  553. MLX5_CMD_OP_DESTROY_FLOW_GROUP);
  554. break;
  555. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  556. *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
  557. *obj_id = MLX5_GET(set_fte_in, in, flow_index);
  558. MLX5_SET(delete_fte_in, din, other_vport,
  559. MLX5_GET(set_fte_in, in, other_vport));
  560. MLX5_SET(delete_fte_in, din, vport_number,
  561. MLX5_GET(set_fte_in, in, vport_number));
  562. MLX5_SET(delete_fte_in, din, table_type,
  563. MLX5_GET(set_fte_in, in, table_type));
  564. MLX5_SET(delete_fte_in, din, table_id,
  565. MLX5_GET(set_fte_in, in, table_id));
  566. MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
  567. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  568. MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
  569. break;
  570. case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
  571. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  572. MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
  573. break;
  574. case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
  575. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  576. MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
  577. break;
  578. case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
  579. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  580. MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
  581. break;
  582. case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
  583. *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
  584. *obj_id = MLX5_GET(create_scheduling_element_out, out,
  585. scheduling_element_id);
  586. MLX5_SET(destroy_scheduling_element_in, din,
  587. scheduling_hierarchy,
  588. MLX5_GET(create_scheduling_element_in, in,
  589. scheduling_hierarchy));
  590. MLX5_SET(destroy_scheduling_element_in, din,
  591. scheduling_element_id, *obj_id);
  592. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  593. MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
  594. break;
  595. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  596. *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
  597. *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
  598. MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
  599. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  600. MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
  601. break;
  602. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  603. *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
  604. *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
  605. MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
  606. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  607. MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
  608. break;
  609. case MLX5_CMD_OP_CREATE_QP:
  610. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
  611. break;
  612. case MLX5_CMD_OP_CREATE_SRQ:
  613. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
  614. break;
  615. case MLX5_CMD_OP_CREATE_XRC_SRQ:
  616. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  617. MLX5_CMD_OP_DESTROY_XRC_SRQ);
  618. break;
  619. case MLX5_CMD_OP_CREATE_DCT:
  620. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
  621. break;
  622. case MLX5_CMD_OP_CREATE_XRQ:
  623. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
  624. break;
  625. case MLX5_CMD_OP_ATTACH_TO_MCG:
  626. *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
  627. MLX5_SET(detach_from_mcg_in, din, qpn,
  628. MLX5_GET(attach_to_mcg_in, in, qpn));
  629. memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
  630. MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
  631. MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
  632. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
  633. break;
  634. case MLX5_CMD_OP_ALLOC_XRCD:
  635. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
  636. break;
  637. default:
  638. /* The entry must match to one of the devx_is_obj_create_cmd */
  639. WARN_ON(true);
  640. break;
  641. }
  642. }
  643. static int devx_obj_cleanup(struct ib_uobject *uobject,
  644. enum rdma_remove_reason why)
  645. {
  646. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  647. struct devx_obj *obj = uobject->object;
  648. int ret;
  649. ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
  650. if (ib_is_destroy_retryable(ret, why, uobject))
  651. return ret;
  652. kfree(obj);
  653. return ret;
  654. }
  655. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
  656. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  657. {
  658. void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
  659. int cmd_out_len = uverbs_attr_get_len(attrs,
  660. MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
  661. void *cmd_out;
  662. struct ib_uobject *uobj = uverbs_attr_get_uobject(
  663. attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
  664. struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
  665. struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
  666. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  667. struct devx_obj *obj;
  668. int err;
  669. if (!c->devx_uid)
  670. return -EPERM;
  671. if (!devx_is_obj_create_cmd(cmd_in))
  672. return -EINVAL;
  673. cmd_out = uverbs_zalloc(attrs, cmd_out_len);
  674. if (IS_ERR(cmd_out))
  675. return PTR_ERR(cmd_out);
  676. obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
  677. if (!obj)
  678. return -ENOMEM;
  679. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  680. err = mlx5_cmd_exec(dev->mdev, cmd_in,
  681. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN),
  682. cmd_out, cmd_out_len);
  683. if (err)
  684. goto obj_free;
  685. uobj->object = obj;
  686. obj->mdev = dev->mdev;
  687. devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id);
  688. WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
  689. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
  690. if (err)
  691. goto obj_destroy;
  692. return 0;
  693. obj_destroy:
  694. mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
  695. obj_free:
  696. kfree(obj);
  697. return err;
  698. }
  699. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
  700. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  701. {
  702. void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
  703. int cmd_out_len = uverbs_attr_get_len(attrs,
  704. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
  705. struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
  706. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
  707. struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
  708. struct devx_obj *obj = uobj->object;
  709. void *cmd_out;
  710. int err;
  711. if (!c->devx_uid)
  712. return -EPERM;
  713. if (!devx_is_obj_modify_cmd(cmd_in))
  714. return -EINVAL;
  715. if (!devx_is_valid_obj_id(obj, cmd_in))
  716. return -EINVAL;
  717. cmd_out = uverbs_zalloc(attrs, cmd_out_len);
  718. if (IS_ERR(cmd_out))
  719. return PTR_ERR(cmd_out);
  720. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  721. err = mlx5_cmd_exec(obj->mdev, cmd_in,
  722. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
  723. cmd_out, cmd_out_len);
  724. if (err)
  725. return err;
  726. return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
  727. cmd_out, cmd_out_len);
  728. }
  729. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
  730. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  731. {
  732. void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
  733. int cmd_out_len = uverbs_attr_get_len(attrs,
  734. MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
  735. struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
  736. MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
  737. struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
  738. struct devx_obj *obj = uobj->object;
  739. void *cmd_out;
  740. int err;
  741. if (!c->devx_uid)
  742. return -EPERM;
  743. if (!devx_is_obj_query_cmd(cmd_in))
  744. return -EINVAL;
  745. if (!devx_is_valid_obj_id(obj, cmd_in))
  746. return -EINVAL;
  747. cmd_out = uverbs_zalloc(attrs, cmd_out_len);
  748. if (IS_ERR(cmd_out))
  749. return PTR_ERR(cmd_out);
  750. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  751. err = mlx5_cmd_exec(obj->mdev, cmd_in,
  752. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
  753. cmd_out, cmd_out_len);
  754. if (err)
  755. return err;
  756. return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
  757. cmd_out, cmd_out_len);
  758. }
  759. static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
  760. struct uverbs_attr_bundle *attrs,
  761. struct devx_umem *obj)
  762. {
  763. u64 addr;
  764. size_t size;
  765. u32 access;
  766. int npages;
  767. int err;
  768. u32 page_mask;
  769. if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
  770. uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
  771. return -EFAULT;
  772. err = uverbs_get_flags32(&access, attrs,
  773. MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
  774. IB_ACCESS_LOCAL_WRITE |
  775. IB_ACCESS_REMOTE_WRITE |
  776. IB_ACCESS_REMOTE_READ);
  777. if (err)
  778. return err;
  779. err = ib_check_mr_access(access);
  780. if (err)
  781. return err;
  782. obj->umem = ib_umem_get(ucontext, addr, size, access, 0);
  783. if (IS_ERR(obj->umem))
  784. return PTR_ERR(obj->umem);
  785. mlx5_ib_cont_pages(obj->umem, obj->umem->address,
  786. MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
  787. &obj->page_shift, &obj->ncont, NULL);
  788. if (!npages) {
  789. ib_umem_release(obj->umem);
  790. return -EINVAL;
  791. }
  792. page_mask = (1 << obj->page_shift) - 1;
  793. obj->page_offset = obj->umem->address & page_mask;
  794. return 0;
  795. }
  796. static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
  797. struct devx_umem *obj,
  798. struct devx_umem_reg_cmd *cmd)
  799. {
  800. cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
  801. (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
  802. cmd->in = uverbs_zalloc(attrs, cmd->inlen);
  803. return PTR_ERR_OR_ZERO(cmd->in);
  804. }
  805. static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
  806. struct devx_umem *obj,
  807. struct devx_umem_reg_cmd *cmd)
  808. {
  809. void *umem;
  810. __be64 *mtt;
  811. umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
  812. mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
  813. MLX5_SET(general_obj_in_cmd_hdr, cmd->in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
  814. MLX5_SET(general_obj_in_cmd_hdr, cmd->in, obj_type, MLX5_OBJ_TYPE_UMEM);
  815. MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
  816. MLX5_SET(umem, umem, log_page_size, obj->page_shift -
  817. MLX5_ADAPTER_PAGE_SHIFT);
  818. MLX5_SET(umem, umem, page_offset, obj->page_offset);
  819. mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
  820. (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
  821. MLX5_IB_MTT_READ);
  822. }
  823. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
  824. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  825. {
  826. struct devx_umem_reg_cmd cmd;
  827. struct devx_umem *obj;
  828. struct ib_uobject *uobj = uverbs_attr_get_uobject(
  829. attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
  830. u32 obj_id;
  831. struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
  832. struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
  833. int err;
  834. if (!c->devx_uid)
  835. return -EPERM;
  836. obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
  837. if (!obj)
  838. return -ENOMEM;
  839. err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
  840. if (err)
  841. goto err_obj_free;
  842. err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
  843. if (err)
  844. goto err_umem_release;
  845. devx_umem_reg_cmd_build(dev, obj, &cmd);
  846. MLX5_SET(general_obj_in_cmd_hdr, cmd.in, uid, c->devx_uid);
  847. err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
  848. sizeof(cmd.out));
  849. if (err)
  850. goto err_umem_release;
  851. obj->mdev = dev->mdev;
  852. uobj->object = obj;
  853. devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
  854. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
  855. if (err)
  856. goto err_umem_destroy;
  857. return 0;
  858. err_umem_destroy:
  859. mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
  860. err_umem_release:
  861. ib_umem_release(obj->umem);
  862. err_obj_free:
  863. kfree(obj);
  864. return err;
  865. }
  866. static int devx_umem_cleanup(struct ib_uobject *uobject,
  867. enum rdma_remove_reason why)
  868. {
  869. struct devx_umem *obj = uobject->object;
  870. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  871. int err;
  872. err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
  873. if (ib_is_destroy_retryable(err, why, uobject))
  874. return err;
  875. ib_umem_release(obj->umem);
  876. kfree(obj);
  877. return 0;
  878. }
  879. DECLARE_UVERBS_NAMED_METHOD(
  880. MLX5_IB_METHOD_DEVX_UMEM_REG,
  881. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
  882. MLX5_IB_OBJECT_DEVX_UMEM,
  883. UVERBS_ACCESS_NEW,
  884. UA_MANDATORY),
  885. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
  886. UVERBS_ATTR_TYPE(u64),
  887. UA_MANDATORY),
  888. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
  889. UVERBS_ATTR_TYPE(u64),
  890. UA_MANDATORY),
  891. UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
  892. enum ib_access_flags),
  893. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
  894. UVERBS_ATTR_TYPE(u32),
  895. UA_MANDATORY));
  896. DECLARE_UVERBS_NAMED_METHOD_DESTROY(
  897. MLX5_IB_METHOD_DEVX_UMEM_DEREG,
  898. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
  899. MLX5_IB_OBJECT_DEVX_UMEM,
  900. UVERBS_ACCESS_DESTROY,
  901. UA_MANDATORY));
  902. DECLARE_UVERBS_NAMED_METHOD(
  903. MLX5_IB_METHOD_DEVX_QUERY_EQN,
  904. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
  905. UVERBS_ATTR_TYPE(u32),
  906. UA_MANDATORY),
  907. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
  908. UVERBS_ATTR_TYPE(u32),
  909. UA_MANDATORY));
  910. DECLARE_UVERBS_NAMED_METHOD(
  911. MLX5_IB_METHOD_DEVX_QUERY_UAR,
  912. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
  913. UVERBS_ATTR_TYPE(u32),
  914. UA_MANDATORY),
  915. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
  916. UVERBS_ATTR_TYPE(u32),
  917. UA_MANDATORY));
  918. DECLARE_UVERBS_NAMED_METHOD(
  919. MLX5_IB_METHOD_DEVX_OTHER,
  920. UVERBS_ATTR_PTR_IN(
  921. MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
  922. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  923. UA_MANDATORY,
  924. UA_ALLOC_AND_COPY),
  925. UVERBS_ATTR_PTR_OUT(
  926. MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
  927. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  928. UA_MANDATORY));
  929. DECLARE_UVERBS_NAMED_METHOD(
  930. MLX5_IB_METHOD_DEVX_OBJ_CREATE,
  931. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
  932. MLX5_IB_OBJECT_DEVX_OBJ,
  933. UVERBS_ACCESS_NEW,
  934. UA_MANDATORY),
  935. UVERBS_ATTR_PTR_IN(
  936. MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
  937. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  938. UA_MANDATORY,
  939. UA_ALLOC_AND_COPY),
  940. UVERBS_ATTR_PTR_OUT(
  941. MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
  942. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  943. UA_MANDATORY));
  944. DECLARE_UVERBS_NAMED_METHOD_DESTROY(
  945. MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
  946. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
  947. MLX5_IB_OBJECT_DEVX_OBJ,
  948. UVERBS_ACCESS_DESTROY,
  949. UA_MANDATORY));
  950. DECLARE_UVERBS_NAMED_METHOD(
  951. MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
  952. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
  953. MLX5_IB_OBJECT_DEVX_OBJ,
  954. UVERBS_ACCESS_WRITE,
  955. UA_MANDATORY),
  956. UVERBS_ATTR_PTR_IN(
  957. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
  958. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  959. UA_MANDATORY,
  960. UA_ALLOC_AND_COPY),
  961. UVERBS_ATTR_PTR_OUT(
  962. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
  963. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  964. UA_MANDATORY));
  965. DECLARE_UVERBS_NAMED_METHOD(
  966. MLX5_IB_METHOD_DEVX_OBJ_QUERY,
  967. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
  968. MLX5_IB_OBJECT_DEVX_OBJ,
  969. UVERBS_ACCESS_READ,
  970. UA_MANDATORY),
  971. UVERBS_ATTR_PTR_IN(
  972. MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
  973. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  974. UA_MANDATORY,
  975. UA_ALLOC_AND_COPY),
  976. UVERBS_ATTR_PTR_OUT(
  977. MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
  978. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  979. UA_MANDATORY));
  980. DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
  981. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
  982. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
  983. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
  984. DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
  985. UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
  986. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
  987. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
  988. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
  989. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY));
  990. DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
  991. UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
  992. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
  993. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
  994. DECLARE_UVERBS_OBJECT_TREE(devx_objects,
  995. &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX),
  996. &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ),
  997. &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM));
  998. const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void)
  999. {
  1000. return &devx_objects;
  1001. }