rdma_vt.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559
  1. #ifndef DEF_RDMA_VT_H
  2. #define DEF_RDMA_VT_H
  3. /*
  4. * Copyright(c) 2016 - 2018 Intel Corporation.
  5. *
  6. * This file is provided under a dual BSD/GPLv2 license. When using or
  7. * redistributing this file, you may do so under either license.
  8. *
  9. * GPL LICENSE SUMMARY
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of version 2 of the GNU General Public License as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * BSD LICENSE
  21. *
  22. * Redistribution and use in source and binary forms, with or without
  23. * modification, are permitted provided that the following conditions
  24. * are met:
  25. *
  26. * - Redistributions of source code must retain the above copyright
  27. * notice, this list of conditions and the following disclaimer.
  28. * - Redistributions in binary form must reproduce the above copyright
  29. * notice, this list of conditions and the following disclaimer in
  30. * the documentation and/or other materials provided with the
  31. * distribution.
  32. * - Neither the name of Intel Corporation nor the names of its
  33. * contributors may be used to endorse or promote products derived
  34. * from this software without specific prior written permission.
  35. *
  36. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  37. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  38. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  39. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  40. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  41. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  42. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  43. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  44. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  45. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  46. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  47. *
  48. */
  49. /*
  50. * Structure that low level drivers will populate in order to register with the
  51. * rdmavt layer.
  52. */
  53. #include <linux/spinlock.h>
  54. #include <linux/list.h>
  55. #include <linux/hash.h>
  56. #include <rdma/ib_verbs.h>
  57. #include <rdma/ib_mad.h>
  58. #include <rdma/rdmavt_mr.h>
  59. #include <rdma/rdmavt_qp.h>
  60. #define RVT_MAX_PKEY_VALUES 16
  61. #define RVT_MAX_TRAP_LEN 100 /* Limit pending trap list */
  62. #define RVT_MAX_TRAP_LISTS 5 /*((IB_NOTICE_TYPE_INFO & 0x0F) + 1)*/
  63. #define RVT_TRAP_TIMEOUT 4096 /* 4.096 usec */
  64. struct trap_list {
  65. u32 list_len;
  66. struct list_head list;
  67. };
  68. struct rvt_ibport {
  69. struct rvt_qp __rcu *qp[2];
  70. struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
  71. struct rb_root mcast_tree;
  72. spinlock_t lock; /* protect changes in this struct */
  73. /* non-zero when timer is set */
  74. unsigned long mkey_lease_timeout;
  75. unsigned long trap_timeout;
  76. __be64 gid_prefix; /* in network order */
  77. __be64 mkey;
  78. u64 tid;
  79. u32 port_cap_flags;
  80. u16 port_cap3_flags;
  81. u32 pma_sample_start;
  82. u32 pma_sample_interval;
  83. __be16 pma_counter_select[5];
  84. u16 pma_tag;
  85. u16 mkey_lease_period;
  86. u32 sm_lid;
  87. u8 sm_sl;
  88. u8 mkeyprot;
  89. u8 subnet_timeout;
  90. u8 vl_high_limit;
  91. /*
  92. * Driver is expected to keep these up to date. These
  93. * counters are informational only and not required to be
  94. * completely accurate.
  95. */
  96. u64 n_rc_resends;
  97. u64 n_seq_naks;
  98. u64 n_rdma_seq;
  99. u64 n_rnr_naks;
  100. u64 n_other_naks;
  101. u64 n_loop_pkts;
  102. u64 n_pkt_drops;
  103. u64 n_vl15_dropped;
  104. u64 n_rc_timeouts;
  105. u64 n_dmawait;
  106. u64 n_unaligned;
  107. u64 n_rc_dupreq;
  108. u64 n_rc_seqnak;
  109. u16 pkey_violations;
  110. u16 qkey_violations;
  111. u16 mkey_violations;
  112. /* Hot-path per CPU counters to avoid cacheline trading to update */
  113. u64 z_rc_acks;
  114. u64 z_rc_qacks;
  115. u64 z_rc_delayed_comp;
  116. u64 __percpu *rc_acks;
  117. u64 __percpu *rc_qacks;
  118. u64 __percpu *rc_delayed_comp;
  119. void *priv; /* driver private data */
  120. /*
  121. * The pkey table is allocated and maintained by the driver. Drivers
  122. * need to have access to this before registering with rdmav. However
  123. * rdmavt will need access to it so drivers need to proviee this during
  124. * the attach port API call.
  125. */
  126. u16 *pkey_table;
  127. struct rvt_ah *sm_ah;
  128. /*
  129. * Keep a list of traps that have not been repressed. They will be
  130. * resent based on trap_timer.
  131. */
  132. struct trap_list trap_lists[RVT_MAX_TRAP_LISTS];
  133. struct timer_list trap_timer;
  134. };
  135. #define RVT_CQN_MAX 16 /* maximum length of cq name */
  136. /*
  137. * Things that are driver specific, module parameters in hfi1 and qib
  138. */
  139. struct rvt_driver_params {
  140. struct ib_device_attr props;
  141. /*
  142. * Anything driver specific that is not covered by props
  143. * For instance special module parameters. Goes here.
  144. */
  145. unsigned int lkey_table_size;
  146. unsigned int qp_table_size;
  147. int qpn_start;
  148. int qpn_inc;
  149. int qpn_res_start;
  150. int qpn_res_end;
  151. int nports;
  152. int npkeys;
  153. int node;
  154. int psn_mask;
  155. int psn_shift;
  156. int psn_modify_mask;
  157. u32 core_cap_flags;
  158. u32 max_mad_size;
  159. u8 qos_shift;
  160. u8 max_rdma_atomic;
  161. u8 reserved_operations;
  162. };
  163. /* Protection domain */
  164. struct rvt_pd {
  165. struct ib_pd ibpd;
  166. bool user;
  167. };
  168. /* Address handle */
  169. struct rvt_ah {
  170. struct ib_ah ibah;
  171. struct rdma_ah_attr attr;
  172. atomic_t refcount;
  173. u8 vl;
  174. u8 log_pmtu;
  175. };
  176. struct rvt_dev_info;
  177. struct rvt_swqe;
  178. struct rvt_driver_provided {
  179. /*
  180. * Which functions are required depends on which verbs rdmavt is
  181. * providing and which verbs the driver is overriding. See
  182. * check_support() for details.
  183. */
  184. /* hot path calldowns in a single cacheline */
  185. /*
  186. * Give the driver a notice that there is send work to do. It is up to
  187. * the driver to generally push the packets out, this just queues the
  188. * work with the driver. There are two variants here. The no_lock
  189. * version requires the s_lock not to be held. The other assumes the
  190. * s_lock is held.
  191. */
  192. void (*schedule_send)(struct rvt_qp *qp);
  193. void (*schedule_send_no_lock)(struct rvt_qp *qp);
  194. /* Driver specific work request checking */
  195. int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
  196. /*
  197. * Sometimes rdmavt needs to kick the driver's send progress. That is
  198. * done by this call back.
  199. */
  200. void (*do_send)(struct rvt_qp *qp);
  201. /* Passed to ib core registration. Callback to create syfs files */
  202. int (*port_callback)(struct ib_device *, u8, struct kobject *);
  203. /*
  204. * Returns a pointer to the undelying hardware's PCI device. This is
  205. * used to display information as to what hardware is being referenced
  206. * in an output message
  207. */
  208. struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi);
  209. /*
  210. * Allocate a private queue pair data structure for driver specific
  211. * information which is opaque to rdmavt. Errors are returned via
  212. * ERR_PTR(err). The driver is free to return NULL or a valid
  213. * pointer.
  214. */
  215. void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
  216. /*
  217. * Free the driver's private qp structure.
  218. */
  219. void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
  220. /*
  221. * Inform the driver the particular qp in quesiton has been reset so
  222. * that it can clean up anything it needs to.
  223. */
  224. void (*notify_qp_reset)(struct rvt_qp *qp);
  225. /*
  226. * Get a path mtu from the driver based on qp attributes.
  227. */
  228. int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
  229. struct ib_qp_attr *attr);
  230. /*
  231. * Notify driver that it needs to flush any outstanding IO requests that
  232. * are waiting on a qp.
  233. */
  234. void (*flush_qp_waiters)(struct rvt_qp *qp);
  235. /*
  236. * Notify driver to stop its queue of sending packets. Nothing else
  237. * should be posted to the queue pair after this has been called.
  238. */
  239. void (*stop_send_queue)(struct rvt_qp *qp);
  240. /*
  241. * Have the drivr drain any in progress operations
  242. */
  243. void (*quiesce_qp)(struct rvt_qp *qp);
  244. /*
  245. * Inform the driver a qp has went to error state.
  246. */
  247. void (*notify_error_qp)(struct rvt_qp *qp);
  248. /*
  249. * Get an MTU for a qp.
  250. */
  251. u32 (*mtu_from_qp)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
  252. u32 pmtu);
  253. /*
  254. * Convert an mtu to a path mtu
  255. */
  256. int (*mtu_to_path_mtu)(u32 mtu);
  257. /*
  258. * Get the guid of a port in big endian byte order
  259. */
  260. int (*get_guid_be)(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
  261. int guid_index, __be64 *guid);
  262. /*
  263. * Query driver for the state of the port.
  264. */
  265. int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num,
  266. struct ib_port_attr *props);
  267. /*
  268. * Tell driver to shutdown a port
  269. */
  270. int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num);
  271. /* Tell driver to send a trap for changed port capabilities */
  272. void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num);
  273. /*
  274. * The following functions can be safely ignored completely. Any use of
  275. * these is checked for NULL before blindly calling. Rdmavt should also
  276. * be functional if drivers omit these.
  277. */
  278. /* Called to inform the driver that all qps should now be freed. */
  279. unsigned (*free_all_qps)(struct rvt_dev_info *rdi);
  280. /* Driver specific AH validation */
  281. int (*check_ah)(struct ib_device *, struct rdma_ah_attr *);
  282. /* Inform the driver a new AH has been created */
  283. void (*notify_new_ah)(struct ib_device *, struct rdma_ah_attr *,
  284. struct rvt_ah *);
  285. /* Let the driver pick the next queue pair number*/
  286. int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
  287. enum ib_qp_type type, u8 port_num);
  288. /* Determine if its safe or allowed to modify the qp */
  289. int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
  290. int attr_mask, struct ib_udata *udata);
  291. /* Driver specific QP modification/notification-of */
  292. void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
  293. int attr_mask, struct ib_udata *udata);
  294. /* Notify driver a mad agent has been created */
  295. void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
  296. /* Notify driver a mad agent has been removed */
  297. void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
  298. /* Notify driver to restart rc */
  299. void (*notify_restart_rc)(struct rvt_qp *qp, u32 psn, int wait);
  300. /* Get and return CPU to pin CQ processing thread */
  301. int (*comp_vect_cpu_lookup)(struct rvt_dev_info *rdi, int comp_vect);
  302. };
  303. struct rvt_dev_info {
  304. struct ib_device ibdev; /* Keep this first. Nothing above here */
  305. /*
  306. * Prior to calling for registration the driver will be responsible for
  307. * allocating space for this structure.
  308. *
  309. * The driver will also be responsible for filling in certain members of
  310. * dparms.props. The driver needs to fill in dparms exactly as it would
  311. * want values reported to a ULP. This will be returned to the caller
  312. * in rdmavt's device. The driver should also therefore refrain from
  313. * modifying this directly after registration with rdmavt.
  314. */
  315. /* Driver specific properties */
  316. struct rvt_driver_params dparms;
  317. /* post send table */
  318. const struct rvt_operation_params *post_parms;
  319. /* Driver specific helper functions */
  320. struct rvt_driver_provided driver_f;
  321. struct rvt_mregion __rcu *dma_mr;
  322. struct rvt_lkey_table lkey_table;
  323. /* Internal use */
  324. int n_pds_allocated;
  325. spinlock_t n_pds_lock; /* Protect pd allocated count */
  326. int n_ahs_allocated;
  327. spinlock_t n_ahs_lock; /* Protect ah allocated count */
  328. u32 n_srqs_allocated;
  329. spinlock_t n_srqs_lock; /* Protect srqs allocated count */
  330. int flags;
  331. struct rvt_ibport **ports;
  332. /* QP */
  333. struct rvt_qp_ibdev *qp_dev;
  334. u32 n_qps_allocated; /* number of QPs allocated for device */
  335. u32 n_rc_qps; /* number of RC QPs allocated for device */
  336. u32 busy_jiffies; /* timeout scaling based on RC QP count */
  337. spinlock_t n_qps_lock; /* protect qps, rc qps and busy jiffy counts */
  338. /* memory maps */
  339. struct list_head pending_mmaps;
  340. spinlock_t mmap_offset_lock; /* protect mmap_offset */
  341. u32 mmap_offset;
  342. spinlock_t pending_lock; /* protect pending mmap list */
  343. /* CQ */
  344. u32 n_cqs_allocated; /* number of CQs allocated for device */
  345. spinlock_t n_cqs_lock; /* protect count of in use cqs */
  346. /* Multicast */
  347. u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
  348. spinlock_t n_mcast_grps_lock;
  349. };
  350. /**
  351. * rvt_set_ibdev_name - Craft an IB device name from client info
  352. * @rdi: pointer to the client rvt_dev_info structure
  353. * @name: client specific name
  354. * @unit: client specific unit number.
  355. */
  356. static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
  357. const char *fmt, const char *name,
  358. const int unit)
  359. {
  360. snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit);
  361. }
  362. /**
  363. * rvt_get_ibdev_name - return the IB name
  364. * @rdi: rdmavt device
  365. *
  366. * Return the registered name of the device.
  367. */
  368. static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
  369. {
  370. return rdi->ibdev.name;
  371. }
  372. static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
  373. {
  374. return container_of(ibpd, struct rvt_pd, ibpd);
  375. }
  376. static inline struct rvt_ah *ibah_to_rvtah(struct ib_ah *ibah)
  377. {
  378. return container_of(ibah, struct rvt_ah, ibah);
  379. }
  380. static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev)
  381. {
  382. return container_of(ibdev, struct rvt_dev_info, ibdev);
  383. }
  384. static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
  385. {
  386. return container_of(ibsrq, struct rvt_srq, ibsrq);
  387. }
  388. static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
  389. {
  390. return container_of(ibqp, struct rvt_qp, ibqp);
  391. }
  392. static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
  393. {
  394. /*
  395. * All ports have same number of pkeys.
  396. */
  397. return rdi->dparms.npkeys;
  398. }
  399. /*
  400. * Return the max atomic suitable for determining
  401. * the size of the ack ring buffer in a QP.
  402. */
  403. static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
  404. {
  405. return rdi->dparms.max_rdma_atomic + 1;
  406. }
  407. /*
  408. * Return the indexed PKEY from the port PKEY table.
  409. */
  410. static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
  411. int port_index,
  412. unsigned index)
  413. {
  414. if (index >= rvt_get_npkeys(rdi))
  415. return 0;
  416. else
  417. return rdi->ports[port_index]->pkey_table[index];
  418. }
  419. /**
  420. * rvt_lookup_qpn - return the QP with the given QPN
  421. * @ibp: the ibport
  422. * @qpn: the QP number to look up
  423. *
  424. * The caller must hold the rcu_read_lock(), and keep the lock until
  425. * the returned qp is no longer in use.
  426. */
  427. /* TODO: Remove this and put in rdmavt/qp.h when no longer needed by drivers */
  428. static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
  429. struct rvt_ibport *rvp,
  430. u32 qpn) __must_hold(RCU)
  431. {
  432. struct rvt_qp *qp = NULL;
  433. if (unlikely(qpn <= 1)) {
  434. qp = rcu_dereference(rvp->qp[qpn]);
  435. } else {
  436. u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
  437. for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
  438. qp = rcu_dereference(qp->next))
  439. if (qp->ibqp.qp_num == qpn)
  440. break;
  441. }
  442. return qp;
  443. }
  444. /**
  445. * rvt_mod_retry_timer - mod a retry timer
  446. * @qp - the QP
  447. * Modify a potentially already running retry timer
  448. */
  449. static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
  450. {
  451. struct ib_qp *ibqp = &qp->ibqp;
  452. struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
  453. lockdep_assert_held(&qp->s_lock);
  454. qp->s_flags |= RVT_S_TIMER;
  455. /* 4.096 usec. * (1 << qp->timeout) */
  456. mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
  457. rdi->busy_jiffies);
  458. }
  459. struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
  460. void rvt_dealloc_device(struct rvt_dev_info *rdi);
  461. int rvt_register_device(struct rvt_dev_info *rvd, u32 driver_id);
  462. void rvt_unregister_device(struct rvt_dev_info *rvd);
  463. int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
  464. int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
  465. int port_index, u16 *pkey_table);
  466. int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
  467. int access);
  468. int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey);
  469. int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
  470. u32 len, u64 vaddr, u32 rkey, int acc);
  471. int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
  472. struct rvt_sge *isge, struct rvt_sge *last_sge,
  473. struct ib_sge *sge, int acc);
  474. struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
  475. u16 lid);
  476. #endif /* DEF_RDMA_VT_H */