mlx5_ib.h 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368
  1. /*
  2. * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef MLX5_IB_H
  33. #define MLX5_IB_H
  34. #include <linux/kernel.h>
  35. #include <linux/sched.h>
  36. #include <rdma/ib_verbs.h>
  37. #include <rdma/ib_smi.h>
  38. #include <linux/mlx5/driver.h>
  39. #include <linux/mlx5/cq.h>
  40. #include <linux/mlx5/qp.h>
  41. #include <linux/mlx5/srq.h>
  42. #include <linux/types.h>
  43. #include <linux/mlx5/transobj.h>
  44. #include <rdma/ib_user_verbs.h>
  45. #include <rdma/mlx5-abi.h>
  46. #include <rdma/uverbs_ioctl.h>
  47. #include <rdma/mlx5_user_ioctl_cmds.h>
  48. #define mlx5_ib_dbg(dev, format, arg...) \
  49. pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
  50. __LINE__, current->pid, ##arg)
  51. #define mlx5_ib_err(dev, format, arg...) \
  52. pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
  53. __LINE__, current->pid, ##arg)
  54. #define mlx5_ib_warn(dev, format, arg...) \
  55. pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
  56. __LINE__, current->pid, ##arg)
  57. #define field_avail(type, fld, sz) (offsetof(type, fld) + \
  58. sizeof(((type *)0)->fld) <= (sz))
  59. #define MLX5_IB_DEFAULT_UIDX 0xffffff
  60. #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
  61. #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
  62. enum {
  63. MLX5_IB_MMAP_CMD_SHIFT = 8,
  64. MLX5_IB_MMAP_CMD_MASK = 0xff,
  65. };
  66. enum {
  67. MLX5_RES_SCAT_DATA32_CQE = 0x1,
  68. MLX5_RES_SCAT_DATA64_CQE = 0x2,
  69. MLX5_REQ_SCAT_DATA32_CQE = 0x11,
  70. MLX5_REQ_SCAT_DATA64_CQE = 0x22,
  71. };
  72. enum mlx5_ib_mad_ifc_flags {
  73. MLX5_MAD_IFC_IGNORE_MKEY = 1,
  74. MLX5_MAD_IFC_IGNORE_BKEY = 2,
  75. MLX5_MAD_IFC_NET_VIEW = 4,
  76. };
  77. enum {
  78. MLX5_CROSS_CHANNEL_BFREG = 0,
  79. };
  80. enum {
  81. MLX5_CQE_VERSION_V0,
  82. MLX5_CQE_VERSION_V1,
  83. };
  84. enum {
  85. MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
  86. MLX5_TM_MAX_SGE = 1,
  87. };
  88. enum {
  89. MLX5_IB_INVALID_UAR_INDEX = BIT(31),
  90. MLX5_IB_INVALID_BFREG = BIT(31),
  91. };
  92. enum {
  93. MLX5_MAX_MEMIC_PAGES = 0x100,
  94. MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
  95. };
  96. enum {
  97. MLX5_MEMIC_BASE_ALIGN = 6,
  98. MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
  99. };
  100. struct mlx5_ib_vma_private_data {
  101. struct list_head list;
  102. struct vm_area_struct *vma;
  103. /* protect vma_private_list add/del */
  104. struct mutex *vma_private_list_mutex;
  105. };
  106. struct mlx5_ib_ucontext {
  107. struct ib_ucontext ibucontext;
  108. struct list_head db_page_list;
  109. /* protect doorbell record alloc/free
  110. */
  111. struct mutex db_page_mutex;
  112. struct mlx5_bfreg_info bfregi;
  113. u8 cqe_version;
  114. /* Transport Domain number */
  115. u32 tdn;
  116. struct list_head vma_private_list;
  117. /* protect vma_private_list add/del */
  118. struct mutex vma_private_list_mutex;
  119. u64 lib_caps;
  120. DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
  121. u16 devx_uid;
  122. /* For RoCE LAG TX affinity */
  123. atomic_t tx_port_affinity;
  124. };
  125. static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
  126. {
  127. return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
  128. }
  129. struct mlx5_ib_pd {
  130. struct ib_pd ibpd;
  131. u32 pdn;
  132. };
  133. #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
  134. #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
  135. #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
  136. #error "Invalid number of bypass priorities"
  137. #endif
  138. #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
  139. #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
  140. #define MLX5_IB_NUM_SNIFFER_FTS 2
  141. #define MLX5_IB_NUM_EGRESS_FTS 1
  142. struct mlx5_ib_flow_prio {
  143. struct mlx5_flow_table *flow_table;
  144. unsigned int refcount;
  145. };
  146. struct mlx5_ib_flow_handler {
  147. struct list_head list;
  148. struct ib_flow ibflow;
  149. struct mlx5_ib_flow_prio *prio;
  150. struct mlx5_flow_handle *rule;
  151. struct ib_counters *ibcounters;
  152. struct mlx5_ib_dev *dev;
  153. struct mlx5_ib_flow_matcher *flow_matcher;
  154. };
  155. struct mlx5_ib_flow_matcher {
  156. struct mlx5_ib_match_params matcher_mask;
  157. int mask_len;
  158. enum mlx5_ib_flow_type flow_type;
  159. u16 priority;
  160. struct mlx5_core_dev *mdev;
  161. atomic_t usecnt;
  162. u8 match_criteria_enable;
  163. };
  164. struct mlx5_ib_flow_db {
  165. struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
  166. struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
  167. struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
  168. struct mlx5_flow_table *lag_demux_ft;
  169. /* Protect flow steering bypass flow tables
  170. * when add/del flow rules.
  171. * only single add/removal of flow steering rule could be done
  172. * simultaneously.
  173. */
  174. struct mutex lock;
  175. };
  176. /* Use macros here so that don't have to duplicate
  177. * enum ib_send_flags and enum ib_qp_type for low-level driver
  178. */
  179. #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
  180. #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
  181. #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
  182. #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
  183. #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
  184. #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
  185. #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
  186. /*
  187. * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
  188. * creates the actual hardware QP.
  189. */
  190. #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
  191. #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
  192. #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
  193. #define MLX5_IB_WR_UMR IB_WR_RESERVED1
  194. #define MLX5_IB_UMR_OCTOWORD 16
  195. #define MLX5_IB_UMR_XLT_ALIGNMENT 64
  196. #define MLX5_IB_UPD_XLT_ZAP BIT(0)
  197. #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
  198. #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
  199. #define MLX5_IB_UPD_XLT_ADDR BIT(3)
  200. #define MLX5_IB_UPD_XLT_PD BIT(4)
  201. #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
  202. #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
  203. /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
  204. *
  205. * These flags are intended for internal use by the mlx5_ib driver, and they
  206. * rely on the range reserved for that use in the ib_qp_create_flags enum.
  207. */
  208. /* Create a UD QP whose source QP number is 1 */
  209. static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
  210. {
  211. return IB_QP_CREATE_RESERVED_START;
  212. }
  213. struct wr_list {
  214. u16 opcode;
  215. u16 next;
  216. };
  217. enum mlx5_ib_rq_flags {
  218. MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
  219. MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
  220. };
  221. struct mlx5_ib_wq {
  222. u64 *wrid;
  223. u32 *wr_data;
  224. struct wr_list *w_list;
  225. unsigned *wqe_head;
  226. u16 unsig_count;
  227. /* serialize post to the work queue
  228. */
  229. spinlock_t lock;
  230. int wqe_cnt;
  231. int max_post;
  232. int max_gs;
  233. int offset;
  234. int wqe_shift;
  235. unsigned head;
  236. unsigned tail;
  237. u16 cur_post;
  238. u16 last_poll;
  239. void *qend;
  240. };
  241. enum mlx5_ib_wq_flags {
  242. MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
  243. MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
  244. };
  245. #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
  246. #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
  247. #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
  248. #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
  249. struct mlx5_ib_rwq {
  250. struct ib_wq ibwq;
  251. struct mlx5_core_qp core_qp;
  252. u32 rq_num_pas;
  253. u32 log_rq_stride;
  254. u32 log_rq_size;
  255. u32 rq_page_offset;
  256. u32 log_page_size;
  257. u32 log_num_strides;
  258. u32 two_byte_shift_en;
  259. u32 single_stride_log_num_of_bytes;
  260. struct ib_umem *umem;
  261. size_t buf_size;
  262. unsigned int page_shift;
  263. int create_type;
  264. struct mlx5_db db;
  265. u32 user_index;
  266. u32 wqe_count;
  267. u32 wqe_shift;
  268. int wq_sig;
  269. u32 create_flags; /* Use enum mlx5_ib_wq_flags */
  270. };
  271. enum {
  272. MLX5_QP_USER,
  273. MLX5_QP_KERNEL,
  274. MLX5_QP_EMPTY
  275. };
  276. enum {
  277. MLX5_WQ_USER,
  278. MLX5_WQ_KERNEL
  279. };
  280. struct mlx5_ib_rwq_ind_table {
  281. struct ib_rwq_ind_table ib_rwq_ind_tbl;
  282. u32 rqtn;
  283. };
  284. struct mlx5_ib_ubuffer {
  285. struct ib_umem *umem;
  286. int buf_size;
  287. u64 buf_addr;
  288. };
  289. struct mlx5_ib_qp_base {
  290. struct mlx5_ib_qp *container_mibqp;
  291. struct mlx5_core_qp mqp;
  292. struct mlx5_ib_ubuffer ubuffer;
  293. };
  294. struct mlx5_ib_qp_trans {
  295. struct mlx5_ib_qp_base base;
  296. u16 xrcdn;
  297. u8 alt_port;
  298. u8 atomic_rd_en;
  299. u8 resp_depth;
  300. };
  301. struct mlx5_ib_rss_qp {
  302. u32 tirn;
  303. };
  304. struct mlx5_ib_rq {
  305. struct mlx5_ib_qp_base base;
  306. struct mlx5_ib_wq *rq;
  307. struct mlx5_ib_ubuffer ubuffer;
  308. struct mlx5_db *doorbell;
  309. u32 tirn;
  310. u8 state;
  311. u32 flags;
  312. };
  313. struct mlx5_ib_sq {
  314. struct mlx5_ib_qp_base base;
  315. struct mlx5_ib_wq *sq;
  316. struct mlx5_ib_ubuffer ubuffer;
  317. struct mlx5_db *doorbell;
  318. struct mlx5_flow_handle *flow_rule;
  319. u32 tisn;
  320. u8 state;
  321. };
  322. struct mlx5_ib_raw_packet_qp {
  323. struct mlx5_ib_sq sq;
  324. struct mlx5_ib_rq rq;
  325. };
  326. struct mlx5_bf {
  327. int buf_size;
  328. unsigned long offset;
  329. struct mlx5_sq_bfreg *bfreg;
  330. };
  331. struct mlx5_ib_dct {
  332. struct mlx5_core_dct mdct;
  333. u32 *in;
  334. };
  335. struct mlx5_ib_qp {
  336. struct ib_qp ibqp;
  337. union {
  338. struct mlx5_ib_qp_trans trans_qp;
  339. struct mlx5_ib_raw_packet_qp raw_packet_qp;
  340. struct mlx5_ib_rss_qp rss_qp;
  341. struct mlx5_ib_dct dct;
  342. };
  343. struct mlx5_frag_buf buf;
  344. struct mlx5_db db;
  345. struct mlx5_ib_wq rq;
  346. u8 sq_signal_bits;
  347. u8 next_fence;
  348. struct mlx5_ib_wq sq;
  349. /* serialize qp state modifications
  350. */
  351. struct mutex mutex;
  352. u32 flags;
  353. u8 port;
  354. u8 state;
  355. int wq_sig;
  356. int scat_cqe;
  357. int max_inline_data;
  358. struct mlx5_bf bf;
  359. int has_rq;
  360. /* only for user space QPs. For kernel
  361. * we have it from the bf object
  362. */
  363. int bfregn;
  364. int create_type;
  365. /* Store signature errors */
  366. bool signature_en;
  367. struct list_head qps_list;
  368. struct list_head cq_recv_list;
  369. struct list_head cq_send_list;
  370. struct mlx5_rate_limit rl;
  371. u32 underlay_qpn;
  372. bool tunnel_offload_en;
  373. /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
  374. enum ib_qp_type qp_sub_type;
  375. };
  376. struct mlx5_ib_cq_buf {
  377. struct mlx5_frag_buf_ctrl fbc;
  378. struct ib_umem *umem;
  379. int cqe_size;
  380. int nent;
  381. };
  382. enum mlx5_ib_qp_flags {
  383. MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
  384. MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
  385. MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
  386. MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
  387. MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
  388. MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
  389. /* QP uses 1 as its source QP number */
  390. MLX5_IB_QP_SQPN_QP1 = 1 << 6,
  391. MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
  392. MLX5_IB_QP_RSS = 1 << 8,
  393. MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
  394. MLX5_IB_QP_UNDERLAY = 1 << 10,
  395. MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
  396. MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
  397. };
  398. struct mlx5_umr_wr {
  399. struct ib_send_wr wr;
  400. u64 virt_addr;
  401. u64 offset;
  402. struct ib_pd *pd;
  403. unsigned int page_shift;
  404. unsigned int xlt_size;
  405. u64 length;
  406. int access_flags;
  407. u32 mkey;
  408. u8 ignore_free_state:1;
  409. };
  410. static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
  411. {
  412. return container_of(wr, struct mlx5_umr_wr, wr);
  413. }
  414. struct mlx5_shared_mr_info {
  415. int mr_id;
  416. struct ib_umem *umem;
  417. };
  418. enum mlx5_ib_cq_pr_flags {
  419. MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
  420. };
  421. struct mlx5_ib_cq {
  422. struct ib_cq ibcq;
  423. struct mlx5_core_cq mcq;
  424. struct mlx5_ib_cq_buf buf;
  425. struct mlx5_db db;
  426. /* serialize access to the CQ
  427. */
  428. spinlock_t lock;
  429. /* protect resize cq
  430. */
  431. struct mutex resize_mutex;
  432. struct mlx5_ib_cq_buf *resize_buf;
  433. struct ib_umem *resize_umem;
  434. int cqe_size;
  435. struct list_head list_send_qp;
  436. struct list_head list_recv_qp;
  437. u32 create_flags;
  438. struct list_head wc_list;
  439. enum ib_cq_notify_flags notify_flags;
  440. struct work_struct notify_work;
  441. u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
  442. };
  443. struct mlx5_ib_wc {
  444. struct ib_wc wc;
  445. struct list_head list;
  446. };
  447. struct mlx5_ib_srq {
  448. struct ib_srq ibsrq;
  449. struct mlx5_core_srq msrq;
  450. struct mlx5_frag_buf buf;
  451. struct mlx5_db db;
  452. u64 *wrid;
  453. /* protect SRQ hanlding
  454. */
  455. spinlock_t lock;
  456. int head;
  457. int tail;
  458. u16 wqe_ctr;
  459. struct ib_umem *umem;
  460. /* serialize arming a SRQ
  461. */
  462. struct mutex mutex;
  463. int wq_sig;
  464. };
  465. struct mlx5_ib_xrcd {
  466. struct ib_xrcd ibxrcd;
  467. u32 xrcdn;
  468. };
  469. enum mlx5_ib_mtt_access_flags {
  470. MLX5_IB_MTT_READ = (1 << 0),
  471. MLX5_IB_MTT_WRITE = (1 << 1),
  472. };
  473. struct mlx5_ib_dm {
  474. struct ib_dm ibdm;
  475. phys_addr_t dev_addr;
  476. };
  477. #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
  478. #define MLX5_IB_DM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
  479. IB_ACCESS_REMOTE_WRITE |\
  480. IB_ACCESS_REMOTE_READ |\
  481. IB_ACCESS_REMOTE_ATOMIC |\
  482. IB_ZERO_BASED)
  483. struct mlx5_ib_mr {
  484. struct ib_mr ibmr;
  485. void *descs;
  486. dma_addr_t desc_map;
  487. int ndescs;
  488. int max_descs;
  489. int desc_size;
  490. int access_mode;
  491. struct mlx5_core_mkey mmkey;
  492. struct ib_umem *umem;
  493. struct mlx5_shared_mr_info *smr_info;
  494. struct list_head list;
  495. int order;
  496. bool allocated_from_cache;
  497. int npages;
  498. struct mlx5_ib_dev *dev;
  499. u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
  500. struct mlx5_core_sig_ctx *sig;
  501. int live;
  502. void *descs_alloc;
  503. int access_flags; /* Needed for rereg MR */
  504. struct mlx5_ib_mr *parent;
  505. atomic_t num_leaf_free;
  506. wait_queue_head_t q_leaf_free;
  507. };
  508. struct mlx5_ib_mw {
  509. struct ib_mw ibmw;
  510. struct mlx5_core_mkey mmkey;
  511. int ndescs;
  512. };
  513. struct mlx5_ib_umr_context {
  514. struct ib_cqe cqe;
  515. enum ib_wc_status status;
  516. struct completion done;
  517. };
  518. struct umr_common {
  519. struct ib_pd *pd;
  520. struct ib_cq *cq;
  521. struct ib_qp *qp;
  522. /* control access to UMR QP
  523. */
  524. struct semaphore sem;
  525. };
  526. enum {
  527. MLX5_FMR_INVALID,
  528. MLX5_FMR_VALID,
  529. MLX5_FMR_BUSY,
  530. };
  531. struct mlx5_cache_ent {
  532. struct list_head head;
  533. /* sync access to the cahce entry
  534. */
  535. spinlock_t lock;
  536. struct dentry *dir;
  537. char name[4];
  538. u32 order;
  539. u32 xlt;
  540. u32 access_mode;
  541. u32 page;
  542. u32 size;
  543. u32 cur;
  544. u32 miss;
  545. u32 limit;
  546. struct dentry *fsize;
  547. struct dentry *fcur;
  548. struct dentry *fmiss;
  549. struct dentry *flimit;
  550. struct mlx5_ib_dev *dev;
  551. struct work_struct work;
  552. struct delayed_work dwork;
  553. int pending;
  554. struct completion compl;
  555. };
  556. struct mlx5_mr_cache {
  557. struct workqueue_struct *wq;
  558. struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
  559. int stopped;
  560. struct dentry *root;
  561. unsigned long last_add;
  562. };
  563. struct mlx5_ib_gsi_qp;
  564. struct mlx5_ib_port_resources {
  565. struct mlx5_ib_resources *devr;
  566. struct mlx5_ib_gsi_qp *gsi;
  567. struct work_struct pkey_change_work;
  568. };
  569. struct mlx5_ib_resources {
  570. struct ib_cq *c0;
  571. struct ib_xrcd *x0;
  572. struct ib_xrcd *x1;
  573. struct ib_pd *p0;
  574. struct ib_srq *s0;
  575. struct ib_srq *s1;
  576. struct mlx5_ib_port_resources ports[2];
  577. /* Protects changes to the port resources */
  578. struct mutex mutex;
  579. };
  580. struct mlx5_ib_counters {
  581. const char **names;
  582. size_t *offsets;
  583. u32 num_q_counters;
  584. u32 num_cong_counters;
  585. u32 num_ext_ppcnt_counters;
  586. u16 set_id;
  587. bool set_id_valid;
  588. };
  589. struct mlx5_ib_multiport_info;
  590. struct mlx5_ib_multiport {
  591. struct mlx5_ib_multiport_info *mpi;
  592. /* To be held when accessing the multiport info */
  593. spinlock_t mpi_lock;
  594. };
  595. struct mlx5_ib_port {
  596. struct mlx5_ib_counters cnts;
  597. struct mlx5_ib_multiport mp;
  598. struct mlx5_ib_dbg_cc_params *dbg_cc_params;
  599. };
  600. struct mlx5_roce {
  601. /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
  602. * netdev pointer
  603. */
  604. rwlock_t netdev_lock;
  605. struct net_device *netdev;
  606. struct notifier_block nb;
  607. atomic_t tx_port_affinity;
  608. enum ib_port_state last_port_state;
  609. struct mlx5_ib_dev *dev;
  610. u8 native_port_num;
  611. };
  612. struct mlx5_ib_dbg_param {
  613. int offset;
  614. struct mlx5_ib_dev *dev;
  615. struct dentry *dentry;
  616. u8 port_num;
  617. };
  618. enum mlx5_ib_dbg_cc_types {
  619. MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
  620. MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
  621. MLX5_IB_DBG_CC_RP_TIME_RESET,
  622. MLX5_IB_DBG_CC_RP_BYTE_RESET,
  623. MLX5_IB_DBG_CC_RP_THRESHOLD,
  624. MLX5_IB_DBG_CC_RP_AI_RATE,
  625. MLX5_IB_DBG_CC_RP_HAI_RATE,
  626. MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
  627. MLX5_IB_DBG_CC_RP_MIN_RATE,
  628. MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
  629. MLX5_IB_DBG_CC_RP_DCE_TCP_G,
  630. MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
  631. MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
  632. MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
  633. MLX5_IB_DBG_CC_RP_GD,
  634. MLX5_IB_DBG_CC_NP_CNP_DSCP,
  635. MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
  636. MLX5_IB_DBG_CC_NP_CNP_PRIO,
  637. MLX5_IB_DBG_CC_MAX,
  638. };
  639. struct mlx5_ib_dbg_cc_params {
  640. struct dentry *root;
  641. struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
  642. };
  643. enum {
  644. MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
  645. };
  646. struct mlx5_ib_dbg_delay_drop {
  647. struct dentry *dir_debugfs;
  648. struct dentry *rqs_cnt_debugfs;
  649. struct dentry *events_cnt_debugfs;
  650. struct dentry *timeout_debugfs;
  651. };
  652. struct mlx5_ib_delay_drop {
  653. struct mlx5_ib_dev *dev;
  654. struct work_struct delay_drop_work;
  655. /* serialize setting of delay drop */
  656. struct mutex lock;
  657. u32 timeout;
  658. bool activate;
  659. atomic_t events_cnt;
  660. atomic_t rqs_cnt;
  661. struct mlx5_ib_dbg_delay_drop *dbg;
  662. };
  663. enum mlx5_ib_stages {
  664. MLX5_IB_STAGE_INIT,
  665. MLX5_IB_STAGE_FLOW_DB,
  666. MLX5_IB_STAGE_CAPS,
  667. MLX5_IB_STAGE_NON_DEFAULT_CB,
  668. MLX5_IB_STAGE_ROCE,
  669. MLX5_IB_STAGE_DEVICE_RESOURCES,
  670. MLX5_IB_STAGE_ODP,
  671. MLX5_IB_STAGE_COUNTERS,
  672. MLX5_IB_STAGE_CONG_DEBUGFS,
  673. MLX5_IB_STAGE_UAR,
  674. MLX5_IB_STAGE_BFREG,
  675. MLX5_IB_STAGE_PRE_IB_REG_UMR,
  676. MLX5_IB_STAGE_SPECS,
  677. MLX5_IB_STAGE_IB_REG,
  678. MLX5_IB_STAGE_POST_IB_REG_UMR,
  679. MLX5_IB_STAGE_DELAY_DROP,
  680. MLX5_IB_STAGE_CLASS_ATTR,
  681. MLX5_IB_STAGE_REP_REG,
  682. MLX5_IB_STAGE_MAX,
  683. };
  684. struct mlx5_ib_stage {
  685. int (*init)(struct mlx5_ib_dev *dev);
  686. void (*cleanup)(struct mlx5_ib_dev *dev);
  687. };
  688. #define STAGE_CREATE(_stage, _init, _cleanup) \
  689. .stage[_stage] = {.init = _init, .cleanup = _cleanup}
  690. struct mlx5_ib_profile {
  691. struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
  692. };
  693. struct mlx5_ib_multiport_info {
  694. struct list_head list;
  695. struct mlx5_ib_dev *ibdev;
  696. struct mlx5_core_dev *mdev;
  697. struct completion unref_comp;
  698. u64 sys_image_guid;
  699. u32 mdev_refcnt;
  700. bool is_master;
  701. bool unaffiliate;
  702. };
  703. struct mlx5_ib_flow_action {
  704. struct ib_flow_action ib_action;
  705. union {
  706. struct {
  707. u64 ib_flags;
  708. struct mlx5_accel_esp_xfrm *ctx;
  709. } esp_aes_gcm;
  710. };
  711. };
  712. struct mlx5_memic {
  713. struct mlx5_core_dev *dev;
  714. spinlock_t memic_lock;
  715. DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
  716. };
  717. struct mlx5_read_counters_attr {
  718. struct mlx5_fc *hw_cntrs_hndl;
  719. u64 *out;
  720. u32 flags;
  721. };
  722. enum mlx5_ib_counters_type {
  723. MLX5_IB_COUNTERS_FLOW,
  724. };
  725. struct mlx5_ib_mcounters {
  726. struct ib_counters ibcntrs;
  727. enum mlx5_ib_counters_type type;
  728. /* number of counters supported for this counters type */
  729. u32 counters_num;
  730. struct mlx5_fc *hw_cntrs_hndl;
  731. /* read function for this counters type */
  732. int (*read_counters)(struct ib_device *ibdev,
  733. struct mlx5_read_counters_attr *read_attr);
  734. /* max index set as part of create_flow */
  735. u32 cntrs_max_index;
  736. /* number of counters data entries (<description,index> pair) */
  737. u32 ncounters;
  738. /* counters data array for descriptions and indexes */
  739. struct mlx5_ib_flow_counters_desc *counters_data;
  740. /* protects access to mcounters internal data */
  741. struct mutex mcntrs_mutex;
  742. };
  743. static inline struct mlx5_ib_mcounters *
  744. to_mcounters(struct ib_counters *ibcntrs)
  745. {
  746. return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
  747. }
  748. struct mlx5_ib_dev {
  749. struct ib_device ib_dev;
  750. const struct uverbs_object_tree_def *driver_trees[6];
  751. struct mlx5_core_dev *mdev;
  752. struct mlx5_roce roce[MLX5_MAX_PORTS];
  753. int num_ports;
  754. /* serialize update of capability mask
  755. */
  756. struct mutex cap_mask_mutex;
  757. bool ib_active;
  758. struct umr_common umrc;
  759. /* sync used page count stats
  760. */
  761. struct mlx5_ib_resources devr;
  762. struct mlx5_mr_cache cache;
  763. struct timer_list delay_timer;
  764. /* Prevents soft lock on massive reg MRs */
  765. struct mutex slow_path_mutex;
  766. int fill_delay;
  767. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  768. struct ib_odp_caps odp_caps;
  769. u64 odp_max_size;
  770. /*
  771. * Sleepable RCU that prevents destruction of MRs while they are still
  772. * being used by a page fault handler.
  773. */
  774. struct srcu_struct mr_srcu;
  775. u32 null_mkey;
  776. #endif
  777. struct mlx5_ib_flow_db *flow_db;
  778. /* protect resources needed as part of reset flow */
  779. spinlock_t reset_flow_resource_lock;
  780. struct list_head qp_list;
  781. /* Array with num_ports elements */
  782. struct mlx5_ib_port *port;
  783. struct mlx5_sq_bfreg bfreg;
  784. struct mlx5_sq_bfreg fp_bfreg;
  785. struct mlx5_ib_delay_drop delay_drop;
  786. const struct mlx5_ib_profile *profile;
  787. struct mlx5_eswitch_rep *rep;
  788. /* protect the user_td */
  789. struct mutex lb_mutex;
  790. u32 user_td;
  791. u8 umr_fence;
  792. struct list_head ib_dev_list;
  793. u64 sys_image_guid;
  794. struct mlx5_memic memic;
  795. };
  796. static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
  797. {
  798. return container_of(mcq, struct mlx5_ib_cq, mcq);
  799. }
  800. static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
  801. {
  802. return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
  803. }
  804. static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
  805. {
  806. return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
  807. }
  808. static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
  809. {
  810. return container_of(ibcq, struct mlx5_ib_cq, ibcq);
  811. }
  812. static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
  813. {
  814. return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
  815. }
  816. static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
  817. {
  818. return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
  819. }
  820. static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
  821. {
  822. return container_of(mmkey, struct mlx5_ib_mr, mmkey);
  823. }
  824. static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
  825. {
  826. return container_of(ibpd, struct mlx5_ib_pd, ibpd);
  827. }
  828. static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
  829. {
  830. return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
  831. }
  832. static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
  833. {
  834. return container_of(ibqp, struct mlx5_ib_qp, ibqp);
  835. }
  836. static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
  837. {
  838. return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
  839. }
  840. static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
  841. {
  842. return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
  843. }
  844. static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
  845. {
  846. return container_of(msrq, struct mlx5_ib_srq, msrq);
  847. }
  848. static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
  849. {
  850. return container_of(ibdm, struct mlx5_ib_dm, ibdm);
  851. }
  852. static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
  853. {
  854. return container_of(ibmr, struct mlx5_ib_mr, ibmr);
  855. }
  856. static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
  857. {
  858. return container_of(ibmw, struct mlx5_ib_mw, ibmw);
  859. }
  860. static inline struct mlx5_ib_flow_action *
  861. to_mflow_act(struct ib_flow_action *ibact)
  862. {
  863. return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
  864. }
  865. int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
  866. struct mlx5_db *db);
  867. void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
  868. void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
  869. void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
  870. void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
  871. int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
  872. u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  873. const void *in_mad, void *response_mad);
  874. struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
  875. struct ib_udata *udata);
  876. int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
  877. int mlx5_ib_destroy_ah(struct ib_ah *ah);
  878. struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
  879. struct ib_srq_init_attr *init_attr,
  880. struct ib_udata *udata);
  881. int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  882. enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
  883. int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
  884. int mlx5_ib_destroy_srq(struct ib_srq *srq);
  885. int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
  886. const struct ib_recv_wr **bad_wr);
  887. struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
  888. struct ib_qp_init_attr *init_attr,
  889. struct ib_udata *udata);
  890. int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  891. int attr_mask, struct ib_udata *udata);
  892. int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
  893. struct ib_qp_init_attr *qp_init_attr);
  894. int mlx5_ib_destroy_qp(struct ib_qp *qp);
  895. void mlx5_ib_drain_sq(struct ib_qp *qp);
  896. void mlx5_ib_drain_rq(struct ib_qp *qp);
  897. int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
  898. const struct ib_send_wr **bad_wr);
  899. int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
  900. const struct ib_recv_wr **bad_wr);
  901. void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
  902. int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
  903. void *buffer, u32 length,
  904. struct mlx5_ib_qp_base *base);
  905. struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
  906. const struct ib_cq_init_attr *attr,
  907. struct ib_ucontext *context,
  908. struct ib_udata *udata);
  909. int mlx5_ib_destroy_cq(struct ib_cq *cq);
  910. int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
  911. int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
  912. int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
  913. int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
  914. struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
  915. struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  916. u64 virt_addr, int access_flags,
  917. struct ib_udata *udata);
  918. struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
  919. struct ib_udata *udata);
  920. int mlx5_ib_dealloc_mw(struct ib_mw *mw);
  921. int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
  922. int page_shift, int flags);
  923. struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
  924. int access_flags);
  925. void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
  926. int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
  927. u64 length, u64 virt_addr, int access_flags,
  928. struct ib_pd *pd, struct ib_udata *udata);
  929. int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
  930. struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
  931. enum ib_mr_type mr_type,
  932. u32 max_num_sg);
  933. int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
  934. unsigned int *sg_offset);
  935. int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
  936. const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  937. const struct ib_mad_hdr *in, size_t in_mad_size,
  938. struct ib_mad_hdr *out, size_t *out_mad_size,
  939. u16 *out_mad_pkey_index);
  940. struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
  941. struct ib_ucontext *context,
  942. struct ib_udata *udata);
  943. int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
  944. int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
  945. int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
  946. int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
  947. struct ib_smp *out_mad);
  948. int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
  949. __be64 *sys_image_guid);
  950. int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
  951. u16 *max_pkeys);
  952. int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
  953. u32 *vendor_id);
  954. int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
  955. int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
  956. int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
  957. u16 *pkey);
  958. int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
  959. union ib_gid *gid);
  960. int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
  961. struct ib_port_attr *props);
  962. int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
  963. struct ib_port_attr *props);
  964. int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
  965. void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
  966. void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
  967. unsigned long max_page_shift,
  968. int *count, int *shift,
  969. int *ncont, int *order);
  970. void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
  971. int page_shift, size_t offset, size_t num_pages,
  972. __be64 *pas, int access_flags);
  973. void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
  974. int page_shift, __be64 *pas, int access_flags);
  975. void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
  976. int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
  977. int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
  978. int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
  979. struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
  980. void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
  981. int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
  982. struct ib_mr_status *mr_status);
  983. struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
  984. struct ib_wq_init_attr *init_attr,
  985. struct ib_udata *udata);
  986. int mlx5_ib_destroy_wq(struct ib_wq *wq);
  987. int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
  988. u32 wq_attr_mask, struct ib_udata *udata);
  989. struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
  990. struct ib_rwq_ind_table_init_attr *init_attr,
  991. struct ib_udata *udata);
  992. int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
  993. bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev);
  994. struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
  995. struct ib_ucontext *context,
  996. struct ib_dm_alloc_attr *attr,
  997. struct uverbs_attr_bundle *attrs);
  998. int mlx5_ib_dealloc_dm(struct ib_dm *ibdm);
  999. struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
  1000. struct ib_dm_mr_attr *attr,
  1001. struct uverbs_attr_bundle *attrs);
  1002. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  1003. void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
  1004. void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
  1005. struct mlx5_pagefault *pfault);
  1006. int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
  1007. int __init mlx5_ib_odp_init(void);
  1008. void mlx5_ib_odp_cleanup(void);
  1009. void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
  1010. unsigned long end);
  1011. void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
  1012. void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
  1013. size_t nentries, struct mlx5_ib_mr *mr, int flags);
  1014. #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
  1015. static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
  1016. {
  1017. return;
  1018. }
  1019. static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
  1020. static inline int mlx5_ib_odp_init(void) { return 0; }
  1021. static inline void mlx5_ib_odp_cleanup(void) {}
  1022. static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
  1023. static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
  1024. size_t nentries, struct mlx5_ib_mr *mr,
  1025. int flags) {}
  1026. #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
  1027. /* Needed for rep profile */
  1028. int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev);
  1029. void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev);
  1030. int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev);
  1031. int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev);
  1032. int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev);
  1033. int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev);
  1034. void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev);
  1035. int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev);
  1036. void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev);
  1037. int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev);
  1038. void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev);
  1039. int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
  1040. void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
  1041. void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
  1042. int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
  1043. void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
  1044. int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
  1045. int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
  1046. void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
  1047. const struct mlx5_ib_profile *profile,
  1048. int stage);
  1049. void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
  1050. const struct mlx5_ib_profile *profile);
  1051. int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
  1052. u8 port, struct ifla_vf_info *info);
  1053. int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
  1054. u8 port, int state);
  1055. int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
  1056. u8 port, struct ifla_vf_stats *stats);
  1057. int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
  1058. u64 guid, int type);
  1059. __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
  1060. const struct ib_gid_attr *attr);
  1061. void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
  1062. int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
  1063. /* GSI QP helper functions */
  1064. struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
  1065. struct ib_qp_init_attr *init_attr);
  1066. int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
  1067. int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
  1068. int attr_mask);
  1069. int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
  1070. int qp_attr_mask,
  1071. struct ib_qp_init_attr *qp_init_attr);
  1072. int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
  1073. const struct ib_send_wr **bad_wr);
  1074. int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
  1075. const struct ib_recv_wr **bad_wr);
  1076. void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
  1077. int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
  1078. void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
  1079. int bfregn);
  1080. struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
  1081. struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
  1082. u8 ib_port_num,
  1083. u8 *native_port_num);
  1084. void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
  1085. u8 port_num);
  1086. #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
  1087. int mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
  1088. struct mlx5_ib_ucontext *context);
  1089. void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
  1090. struct mlx5_ib_ucontext *context);
  1091. const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
  1092. struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
  1093. struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
  1094. void *cmd_in, int inlen, int dest_id, int dest_type);
  1095. bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
  1096. int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
  1097. #else
  1098. static inline int
  1099. mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
  1100. struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; };
  1101. static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
  1102. struct mlx5_ib_ucontext *context) {}
  1103. static inline const struct uverbs_object_tree_def *
  1104. mlx5_ib_get_devx_tree(void) { return NULL; }
  1105. static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
  1106. int *dest_type)
  1107. {
  1108. return false;
  1109. }
  1110. static inline int
  1111. mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
  1112. {
  1113. return 0;
  1114. }
  1115. #endif
  1116. static inline void init_query_mad(struct ib_smp *mad)
  1117. {
  1118. mad->base_version = 1;
  1119. mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
  1120. mad->class_version = 1;
  1121. mad->method = IB_MGMT_METHOD_GET;
  1122. }
  1123. static inline u8 convert_access(int acc)
  1124. {
  1125. return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
  1126. (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
  1127. (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
  1128. (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
  1129. MLX5_PERM_LOCAL_READ;
  1130. }
  1131. static inline int is_qp1(enum ib_qp_type qp_type)
  1132. {
  1133. return qp_type == MLX5_IB_QPT_HW_GSI;
  1134. }
  1135. #define MLX5_MAX_UMR_SHIFT 16
  1136. #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
  1137. static inline u32 check_cq_create_flags(u32 flags)
  1138. {
  1139. /*
  1140. * It returns non-zero value for unsupported CQ
  1141. * create flags, otherwise it returns zero.
  1142. */
  1143. return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
  1144. IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
  1145. }
  1146. static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
  1147. u32 *user_index)
  1148. {
  1149. if (cqe_version) {
  1150. if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
  1151. (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
  1152. return -EINVAL;
  1153. *user_index = cmd_uidx;
  1154. } else {
  1155. *user_index = MLX5_IB_DEFAULT_UIDX;
  1156. }
  1157. return 0;
  1158. }
  1159. static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
  1160. struct mlx5_ib_create_qp *ucmd,
  1161. int inlen,
  1162. u32 *user_index)
  1163. {
  1164. u8 cqe_version = ucontext->cqe_version;
  1165. if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
  1166. !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
  1167. return 0;
  1168. if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
  1169. !!cqe_version))
  1170. return -EINVAL;
  1171. return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
  1172. }
  1173. static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
  1174. struct mlx5_ib_create_srq *ucmd,
  1175. int inlen,
  1176. u32 *user_index)
  1177. {
  1178. u8 cqe_version = ucontext->cqe_version;
  1179. if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
  1180. !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
  1181. return 0;
  1182. if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
  1183. !!cqe_version))
  1184. return -EINVAL;
  1185. return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
  1186. }
  1187. static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
  1188. {
  1189. return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
  1190. MLX5_UARS_IN_PAGE : 1;
  1191. }
  1192. static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
  1193. struct mlx5_bfreg_info *bfregi)
  1194. {
  1195. return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
  1196. }
  1197. unsigned long mlx5_ib_get_xlt_emergency_page(void);
  1198. void mlx5_ib_put_xlt_emergency_page(void);
  1199. int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
  1200. struct mlx5_bfreg_info *bfregi, u32 bfregn,
  1201. bool dyn_bfreg);
  1202. #endif /* MLX5_IB_H */