qib_verbs.h 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177
  1. /*
  2. * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
  3. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
  4. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #ifndef QIB_VERBS_H
  35. #define QIB_VERBS_H
  36. #include <linux/types.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/kernel.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/kref.h>
  41. #include <linux/workqueue.h>
  42. #include <linux/kthread.h>
  43. #include <linux/completion.h>
  44. #include <rdma/ib_pack.h>
  45. #include <rdma/ib_user_verbs.h>
  46. struct qib_ctxtdata;
  47. struct qib_pportdata;
  48. struct qib_devdata;
  49. struct qib_verbs_txreq;
  50. #define QIB_MAX_RDMA_ATOMIC 16
  51. #define QIB_GUIDS_PER_PORT 5
  52. #define QPN_MAX (1 << 24)
  53. #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
  54. /*
  55. * Increment this value if any changes that break userspace ABI
  56. * compatibility are made.
  57. */
  58. #define QIB_UVERBS_ABI_VERSION 2
  59. /*
  60. * Define an ib_cq_notify value that is not valid so we know when CQ
  61. * notifications are armed.
  62. */
  63. #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
  64. #define IB_SEQ_NAK (3 << 29)
  65. /* AETH NAK opcode values */
  66. #define IB_RNR_NAK 0x20
  67. #define IB_NAK_PSN_ERROR 0x60
  68. #define IB_NAK_INVALID_REQUEST 0x61
  69. #define IB_NAK_REMOTE_ACCESS_ERROR 0x62
  70. #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
  71. #define IB_NAK_INVALID_RD_REQUEST 0x64
  72. /* Flags for checking QP state (see ib_qib_state_ops[]) */
  73. #define QIB_POST_SEND_OK 0x01
  74. #define QIB_POST_RECV_OK 0x02
  75. #define QIB_PROCESS_RECV_OK 0x04
  76. #define QIB_PROCESS_SEND_OK 0x08
  77. #define QIB_PROCESS_NEXT_SEND_OK 0x10
  78. #define QIB_FLUSH_SEND 0x20
  79. #define QIB_FLUSH_RECV 0x40
  80. #define QIB_PROCESS_OR_FLUSH_SEND \
  81. (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
  82. /* IB Performance Manager status values */
  83. #define IB_PMA_SAMPLE_STATUS_DONE 0x00
  84. #define IB_PMA_SAMPLE_STATUS_STARTED 0x01
  85. #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
  86. /* Mandatory IB performance counter select values. */
  87. #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
  88. #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
  89. #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
  90. #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
  91. #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
  92. #define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
  93. #define IB_BTH_REQ_ACK (1 << 31)
  94. #define IB_BTH_SOLICITED (1 << 23)
  95. #define IB_BTH_MIG_REQ (1 << 22)
  96. /* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
  97. #define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
  98. #define IB_GRH_VERSION 6
  99. #define IB_GRH_VERSION_MASK 0xF
  100. #define IB_GRH_VERSION_SHIFT 28
  101. #define IB_GRH_TCLASS_MASK 0xFF
  102. #define IB_GRH_TCLASS_SHIFT 20
  103. #define IB_GRH_FLOW_MASK 0xFFFFF
  104. #define IB_GRH_FLOW_SHIFT 0
  105. #define IB_GRH_NEXT_HDR 0x1B
  106. #define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
  107. /* Values for set/get portinfo VLCap OperationalVLs */
  108. #define IB_VL_VL0 1
  109. #define IB_VL_VL0_1 2
  110. #define IB_VL_VL0_3 3
  111. #define IB_VL_VL0_7 4
  112. #define IB_VL_VL0_14 5
  113. static inline int qib_num_vls(int vls)
  114. {
  115. switch (vls) {
  116. default:
  117. case IB_VL_VL0:
  118. return 1;
  119. case IB_VL_VL0_1:
  120. return 2;
  121. case IB_VL_VL0_3:
  122. return 4;
  123. case IB_VL_VL0_7:
  124. return 8;
  125. case IB_VL_VL0_14:
  126. return 15;
  127. }
  128. }
  129. struct ib_reth {
  130. __be64 vaddr;
  131. __be32 rkey;
  132. __be32 length;
  133. } __packed;
  134. struct ib_atomic_eth {
  135. __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
  136. __be32 rkey;
  137. __be64 swap_data;
  138. __be64 compare_data;
  139. } __packed;
  140. struct qib_other_headers {
  141. __be32 bth[3];
  142. union {
  143. struct {
  144. __be32 deth[2];
  145. __be32 imm_data;
  146. } ud;
  147. struct {
  148. struct ib_reth reth;
  149. __be32 imm_data;
  150. } rc;
  151. struct {
  152. __be32 aeth;
  153. __be32 atomic_ack_eth[2];
  154. } at;
  155. __be32 imm_data;
  156. __be32 aeth;
  157. struct ib_atomic_eth atomic_eth;
  158. } u;
  159. } __packed;
  160. /*
  161. * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
  162. * long (72 w/ imm_data). Only the first 56 bytes of the IB header
  163. * will be in the eager header buffer. The remaining 12 or 16 bytes
  164. * are in the data buffer.
  165. */
  166. struct qib_ib_header {
  167. __be16 lrh[4];
  168. union {
  169. struct {
  170. struct ib_grh grh;
  171. struct qib_other_headers oth;
  172. } l;
  173. struct qib_other_headers oth;
  174. } u;
  175. } __packed;
  176. struct qib_pio_header {
  177. __le32 pbc[2];
  178. struct qib_ib_header hdr;
  179. } __packed;
  180. /*
  181. * There is one struct qib_mcast for each multicast GID.
  182. * All attached QPs are then stored as a list of
  183. * struct qib_mcast_qp.
  184. */
  185. struct qib_mcast_qp {
  186. struct list_head list;
  187. struct qib_qp *qp;
  188. };
  189. struct qib_mcast {
  190. struct rb_node rb_node;
  191. union ib_gid mgid;
  192. struct list_head qp_list;
  193. wait_queue_head_t wait;
  194. atomic_t refcount;
  195. int n_attached;
  196. };
  197. /* Protection domain */
  198. struct qib_pd {
  199. struct ib_pd ibpd;
  200. int user; /* non-zero if created from user space */
  201. };
  202. /* Address Handle */
  203. struct qib_ah {
  204. struct ib_ah ibah;
  205. struct ib_ah_attr attr;
  206. atomic_t refcount;
  207. };
  208. /*
  209. * This structure is used by qib_mmap() to validate an offset
  210. * when an mmap() request is made. The vm_area_struct then uses
  211. * this as its vm_private_data.
  212. */
  213. struct qib_mmap_info {
  214. struct list_head pending_mmaps;
  215. struct ib_ucontext *context;
  216. void *obj;
  217. __u64 offset;
  218. struct kref ref;
  219. unsigned size;
  220. };
  221. /*
  222. * This structure is used to contain the head pointer, tail pointer,
  223. * and completion queue entries as a single memory allocation so
  224. * it can be mmap'ed into user space.
  225. */
  226. struct qib_cq_wc {
  227. u32 head; /* index of next entry to fill */
  228. u32 tail; /* index of next ib_poll_cq() entry */
  229. union {
  230. /* these are actually size ibcq.cqe + 1 */
  231. struct ib_uverbs_wc uqueue[0];
  232. struct ib_wc kqueue[0];
  233. };
  234. };
  235. /*
  236. * The completion queue structure.
  237. */
  238. struct qib_cq {
  239. struct ib_cq ibcq;
  240. struct kthread_work comptask;
  241. struct qib_devdata *dd;
  242. spinlock_t lock; /* protect changes in this struct */
  243. u8 notify;
  244. u8 triggered;
  245. struct qib_cq_wc *queue;
  246. struct qib_mmap_info *ip;
  247. };
  248. /*
  249. * A segment is a linear region of low physical memory.
  250. * XXX Maybe we should use phys addr here and kmap()/kunmap().
  251. * Used by the verbs layer.
  252. */
  253. struct qib_seg {
  254. void *vaddr;
  255. size_t length;
  256. };
  257. /* The number of qib_segs that fit in a page. */
  258. #define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
  259. struct qib_segarray {
  260. struct qib_seg segs[QIB_SEGSZ];
  261. };
  262. struct qib_mregion {
  263. struct ib_pd *pd; /* shares refcnt of ibmr.pd */
  264. u64 user_base; /* User's address for this region */
  265. u64 iova; /* IB start address of this region */
  266. size_t length;
  267. u32 lkey;
  268. u32 offset; /* offset (bytes) to start of region */
  269. int access_flags;
  270. u32 max_segs; /* number of qib_segs in all the arrays */
  271. u32 mapsz; /* size of the map array */
  272. u8 page_shift; /* 0 - non unform/non powerof2 sizes */
  273. u8 lkey_published; /* in global table */
  274. struct completion comp; /* complete when refcount goes to zero */
  275. struct rcu_head list;
  276. atomic_t refcount;
  277. struct qib_segarray *map[0]; /* the segments */
  278. };
  279. /*
  280. * These keep track of the copy progress within a memory region.
  281. * Used by the verbs layer.
  282. */
  283. struct qib_sge {
  284. struct qib_mregion *mr;
  285. void *vaddr; /* kernel virtual address of segment */
  286. u32 sge_length; /* length of the SGE */
  287. u32 length; /* remaining length of the segment */
  288. u16 m; /* current index: mr->map[m] */
  289. u16 n; /* current index: mr->map[m]->segs[n] */
  290. };
  291. /* Memory region */
  292. struct qib_mr {
  293. struct ib_mr ibmr;
  294. struct ib_umem *umem;
  295. struct qib_mregion mr; /* must be last */
  296. };
  297. /*
  298. * Send work request queue entry.
  299. * The size of the sg_list is determined when the QP is created and stored
  300. * in qp->s_max_sge.
  301. */
  302. struct qib_swqe {
  303. struct ib_send_wr wr; /* don't use wr.sg_list */
  304. u32 psn; /* first packet sequence number */
  305. u32 lpsn; /* last packet sequence number */
  306. u32 ssn; /* send sequence number */
  307. u32 length; /* total length of data in sg_list */
  308. struct qib_sge sg_list[0];
  309. };
  310. /*
  311. * Receive work request queue entry.
  312. * The size of the sg_list is determined when the QP (or SRQ) is created
  313. * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
  314. */
  315. struct qib_rwqe {
  316. u64 wr_id;
  317. u8 num_sge;
  318. struct ib_sge sg_list[0];
  319. };
  320. /*
  321. * This structure is used to contain the head pointer, tail pointer,
  322. * and receive work queue entries as a single memory allocation so
  323. * it can be mmap'ed into user space.
  324. * Note that the wq array elements are variable size so you can't
  325. * just index into the array to get the N'th element;
  326. * use get_rwqe_ptr() instead.
  327. */
  328. struct qib_rwq {
  329. u32 head; /* new work requests posted to the head */
  330. u32 tail; /* receives pull requests from here. */
  331. struct qib_rwqe wq[0];
  332. };
  333. struct qib_rq {
  334. struct qib_rwq *wq;
  335. u32 size; /* size of RWQE array */
  336. u8 max_sge;
  337. spinlock_t lock /* protect changes in this struct */
  338. ____cacheline_aligned_in_smp;
  339. };
  340. struct qib_srq {
  341. struct ib_srq ibsrq;
  342. struct qib_rq rq;
  343. struct qib_mmap_info *ip;
  344. /* send signal when number of RWQEs < limit */
  345. u32 limit;
  346. };
  347. struct qib_sge_state {
  348. struct qib_sge *sg_list; /* next SGE to be used if any */
  349. struct qib_sge sge; /* progress state for the current SGE */
  350. u32 total_len;
  351. u8 num_sge;
  352. };
  353. /*
  354. * This structure holds the information that the send tasklet needs
  355. * to send a RDMA read response or atomic operation.
  356. */
  357. struct qib_ack_entry {
  358. u8 opcode;
  359. u8 sent;
  360. u32 psn;
  361. u32 lpsn;
  362. union {
  363. struct qib_sge rdma_sge;
  364. u64 atomic_data;
  365. };
  366. };
  367. /*
  368. * Variables prefixed with s_ are for the requester (sender).
  369. * Variables prefixed with r_ are for the responder (receiver).
  370. * Variables prefixed with ack_ are for responder replies.
  371. *
  372. * Common variables are protected by both r_rq.lock and s_lock in that order
  373. * which only happens in modify_qp() or changing the QP 'state'.
  374. */
  375. struct qib_qp {
  376. struct ib_qp ibqp;
  377. /* read mostly fields above and below */
  378. struct ib_ah_attr remote_ah_attr;
  379. struct ib_ah_attr alt_ah_attr;
  380. struct qib_qp __rcu *next; /* link list for QPN hash table */
  381. struct qib_swqe *s_wq; /* send work queue */
  382. struct qib_mmap_info *ip;
  383. struct qib_ib_header *s_hdr; /* next packet header to send */
  384. unsigned long timeout_jiffies; /* computed from timeout */
  385. enum ib_mtu path_mtu;
  386. u32 remote_qpn;
  387. u32 pmtu; /* decoded from path_mtu */
  388. u32 qkey; /* QKEY for this QP (for UD or RD) */
  389. u32 s_size; /* send work queue size */
  390. u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
  391. u8 state; /* QP state */
  392. u8 qp_access_flags;
  393. u8 alt_timeout; /* Alternate path timeout for this QP */
  394. u8 timeout; /* Timeout for this QP */
  395. u8 s_srate;
  396. u8 s_mig_state;
  397. u8 port_num;
  398. u8 s_pkey_index; /* PKEY index to use */
  399. u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
  400. u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
  401. u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
  402. u8 s_retry_cnt; /* number of times to retry */
  403. u8 s_rnr_retry_cnt;
  404. u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
  405. u8 s_max_sge; /* size of s_wq->sg_list */
  406. u8 s_draining;
  407. /* start of read/write fields */
  408. atomic_t refcount ____cacheline_aligned_in_smp;
  409. wait_queue_head_t wait;
  410. struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
  411. ____cacheline_aligned_in_smp;
  412. struct qib_sge_state s_rdma_read_sge;
  413. spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
  414. unsigned long r_aflags;
  415. u64 r_wr_id; /* ID for current receive WQE */
  416. u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
  417. u32 r_len; /* total length of r_sge */
  418. u32 r_rcv_len; /* receive data len processed */
  419. u32 r_psn; /* expected rcv packet sequence number */
  420. u32 r_msn; /* message sequence number */
  421. u8 r_state; /* opcode of last packet received */
  422. u8 r_flags;
  423. u8 r_head_ack_queue; /* index into s_ack_queue[] */
  424. struct list_head rspwait; /* link for waititing to respond */
  425. struct qib_sge_state r_sge; /* current receive data */
  426. struct qib_rq r_rq; /* receive work queue */
  427. spinlock_t s_lock ____cacheline_aligned_in_smp;
  428. struct qib_sge_state *s_cur_sge;
  429. u32 s_flags;
  430. struct qib_verbs_txreq *s_tx;
  431. struct qib_swqe *s_wqe;
  432. struct qib_sge_state s_sge; /* current send request data */
  433. struct qib_mregion *s_rdma_mr;
  434. atomic_t s_dma_busy;
  435. u32 s_cur_size; /* size of send packet in bytes */
  436. u32 s_len; /* total length of s_sge */
  437. u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
  438. u32 s_next_psn; /* PSN for next request */
  439. u32 s_last_psn; /* last response PSN processed */
  440. u32 s_sending_psn; /* lowest PSN that is being sent */
  441. u32 s_sending_hpsn; /* highest PSN that is being sent */
  442. u32 s_psn; /* current packet sequence number */
  443. u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
  444. u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
  445. u32 s_head; /* new entries added here */
  446. u32 s_tail; /* next entry to process */
  447. u32 s_cur; /* current work queue entry */
  448. u32 s_acked; /* last un-ACK'ed entry */
  449. u32 s_last; /* last completed entry */
  450. u32 s_ssn; /* SSN of tail entry */
  451. u32 s_lsn; /* limit sequence number (credit) */
  452. u16 s_hdrwords; /* size of s_hdr in 32 bit words */
  453. u16 s_rdma_ack_cnt;
  454. u8 s_state; /* opcode of last packet sent */
  455. u8 s_ack_state; /* opcode of packet to ACK */
  456. u8 s_nak_state; /* non-zero if NAK is pending */
  457. u8 r_nak_state; /* non-zero if NAK is pending */
  458. u8 s_retry; /* requester retry counter */
  459. u8 s_rnr_retry; /* requester RNR retry counter */
  460. u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
  461. u8 s_tail_ack_queue; /* index into s_ack_queue[] */
  462. struct qib_sge_state s_ack_rdma_sge;
  463. struct timer_list s_timer;
  464. struct list_head iowait; /* link for wait PIO buf */
  465. struct work_struct s_work;
  466. wait_queue_head_t wait_dma;
  467. struct qib_sge r_sg_list[0] /* verified SGEs */
  468. ____cacheline_aligned_in_smp;
  469. };
  470. /*
  471. * Atomic bit definitions for r_aflags.
  472. */
  473. #define QIB_R_WRID_VALID 0
  474. #define QIB_R_REWIND_SGE 1
  475. /*
  476. * Bit definitions for r_flags.
  477. */
  478. #define QIB_R_REUSE_SGE 0x01
  479. #define QIB_R_RDMAR_SEQ 0x02
  480. #define QIB_R_RSP_NAK 0x04
  481. #define QIB_R_RSP_SEND 0x08
  482. #define QIB_R_COMM_EST 0x10
  483. /*
  484. * Bit definitions for s_flags.
  485. *
  486. * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
  487. * QIB_S_BUSY - send tasklet is processing the QP
  488. * QIB_S_TIMER - the RC retry timer is active
  489. * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
  490. * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
  491. * before processing the next SWQE
  492. * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
  493. * before processing the next SWQE
  494. * QIB_S_WAIT_RNR - waiting for RNR timeout
  495. * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
  496. * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
  497. * next send completion entry not via send DMA
  498. * QIB_S_WAIT_PIO - waiting for a send buffer to be available
  499. * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
  500. * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
  501. * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
  502. * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
  503. * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
  504. * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
  505. */
  506. #define QIB_S_SIGNAL_REQ_WR 0x0001
  507. #define QIB_S_BUSY 0x0002
  508. #define QIB_S_TIMER 0x0004
  509. #define QIB_S_RESP_PENDING 0x0008
  510. #define QIB_S_ACK_PENDING 0x0010
  511. #define QIB_S_WAIT_FENCE 0x0020
  512. #define QIB_S_WAIT_RDMAR 0x0040
  513. #define QIB_S_WAIT_RNR 0x0080
  514. #define QIB_S_WAIT_SSN_CREDIT 0x0100
  515. #define QIB_S_WAIT_DMA 0x0200
  516. #define QIB_S_WAIT_PIO 0x0400
  517. #define QIB_S_WAIT_TX 0x0800
  518. #define QIB_S_WAIT_DMA_DESC 0x1000
  519. #define QIB_S_WAIT_KMEM 0x2000
  520. #define QIB_S_WAIT_PSN 0x4000
  521. #define QIB_S_WAIT_ACK 0x8000
  522. #define QIB_S_SEND_ONE 0x10000
  523. #define QIB_S_UNLIMITED_CREDIT 0x20000
  524. /*
  525. * Wait flags that would prevent any packet type from being sent.
  526. */
  527. #define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
  528. QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
  529. /*
  530. * Wait flags that would prevent send work requests from making progress.
  531. */
  532. #define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
  533. QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
  534. QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
  535. #define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
  536. #define QIB_PSN_CREDIT 16
  537. /*
  538. * Since struct qib_swqe is not a fixed size, we can't simply index into
  539. * struct qib_qp.s_wq. This function does the array index computation.
  540. */
  541. static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
  542. unsigned n)
  543. {
  544. return (struct qib_swqe *)((char *)qp->s_wq +
  545. (sizeof(struct qib_swqe) +
  546. qp->s_max_sge *
  547. sizeof(struct qib_sge)) * n);
  548. }
  549. /*
  550. * Since struct qib_rwqe is not a fixed size, we can't simply index into
  551. * struct qib_rwq.wq. This function does the array index computation.
  552. */
  553. static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
  554. {
  555. return (struct qib_rwqe *)
  556. ((char *) rq->wq->wq +
  557. (sizeof(struct qib_rwqe) +
  558. rq->max_sge * sizeof(struct ib_sge)) * n);
  559. }
  560. /*
  561. * QPN-map pages start out as NULL, they get allocated upon
  562. * first use and are never deallocated. This way,
  563. * large bitmaps are not allocated unless large numbers of QPs are used.
  564. */
  565. struct qpn_map {
  566. void *page;
  567. };
  568. struct qib_qpn_table {
  569. spinlock_t lock; /* protect changes in this struct */
  570. unsigned flags; /* flags for QP0/1 allocated for each port */
  571. u32 last; /* last QP number allocated */
  572. u32 nmaps; /* size of the map table */
  573. u16 limit;
  574. u16 mask;
  575. /* bit map of free QP numbers other than 0/1 */
  576. struct qpn_map map[QPNMAP_ENTRIES];
  577. };
  578. struct qib_lkey_table {
  579. spinlock_t lock; /* protect changes in this struct */
  580. u32 next; /* next unused index (speeds search) */
  581. u32 gen; /* generation count */
  582. u32 max; /* size of the table */
  583. struct qib_mregion __rcu **table;
  584. };
  585. struct qib_opcode_stats {
  586. u64 n_packets; /* number of packets */
  587. u64 n_bytes; /* total number of bytes */
  588. };
  589. struct qib_opcode_stats_perctx {
  590. struct qib_opcode_stats stats[128];
  591. };
  592. struct qib_pma_counters {
  593. u64 n_unicast_xmit; /* total unicast packets sent */
  594. u64 n_unicast_rcv; /* total unicast packets received */
  595. u64 n_multicast_xmit; /* total multicast packets sent */
  596. u64 n_multicast_rcv; /* total multicast packets received */
  597. };
  598. struct qib_ibport {
  599. struct qib_qp __rcu *qp0;
  600. struct qib_qp __rcu *qp1;
  601. struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
  602. struct qib_ah *sm_ah;
  603. struct qib_ah *smi_ah;
  604. struct rb_root mcast_tree;
  605. spinlock_t lock; /* protect changes in this struct */
  606. /* non-zero when timer is set */
  607. unsigned long mkey_lease_timeout;
  608. unsigned long trap_timeout;
  609. __be64 gid_prefix; /* in network order */
  610. __be64 mkey;
  611. __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
  612. u64 tid; /* TID for traps */
  613. struct qib_pma_counters __percpu *pmastats;
  614. u64 z_unicast_xmit; /* starting count for PMA */
  615. u64 z_unicast_rcv; /* starting count for PMA */
  616. u64 z_multicast_xmit; /* starting count for PMA */
  617. u64 z_multicast_rcv; /* starting count for PMA */
  618. u64 z_symbol_error_counter; /* starting count for PMA */
  619. u64 z_link_error_recovery_counter; /* starting count for PMA */
  620. u64 z_link_downed_counter; /* starting count for PMA */
  621. u64 z_port_rcv_errors; /* starting count for PMA */
  622. u64 z_port_rcv_remphys_errors; /* starting count for PMA */
  623. u64 z_port_xmit_discards; /* starting count for PMA */
  624. u64 z_port_xmit_data; /* starting count for PMA */
  625. u64 z_port_rcv_data; /* starting count for PMA */
  626. u64 z_port_xmit_packets; /* starting count for PMA */
  627. u64 z_port_rcv_packets; /* starting count for PMA */
  628. u32 z_local_link_integrity_errors; /* starting count for PMA */
  629. u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
  630. u32 z_vl15_dropped; /* starting count for PMA */
  631. u32 n_rc_resends;
  632. u32 n_rc_acks;
  633. u32 n_rc_qacks;
  634. u32 n_rc_delayed_comp;
  635. u32 n_seq_naks;
  636. u32 n_rdma_seq;
  637. u32 n_rnr_naks;
  638. u32 n_other_naks;
  639. u32 n_loop_pkts;
  640. u32 n_pkt_drops;
  641. u32 n_vl15_dropped;
  642. u32 n_rc_timeouts;
  643. u32 n_dmawait;
  644. u32 n_unaligned;
  645. u32 n_rc_dupreq;
  646. u32 n_rc_seqnak;
  647. u32 port_cap_flags;
  648. u32 pma_sample_start;
  649. u32 pma_sample_interval;
  650. __be16 pma_counter_select[5];
  651. u16 pma_tag;
  652. u16 pkey_violations;
  653. u16 qkey_violations;
  654. u16 mkey_violations;
  655. u16 mkey_lease_period;
  656. u16 sm_lid;
  657. u16 repress_traps;
  658. u8 sm_sl;
  659. u8 mkeyprot;
  660. u8 subnet_timeout;
  661. u8 vl_high_limit;
  662. u8 sl_to_vl[16];
  663. };
  664. struct qib_ibdev {
  665. struct ib_device ibdev;
  666. struct list_head pending_mmaps;
  667. spinlock_t mmap_offset_lock; /* protect mmap_offset */
  668. u32 mmap_offset;
  669. struct qib_mregion __rcu *dma_mr;
  670. /* QP numbers are shared by all IB ports */
  671. struct qib_qpn_table qpn_table;
  672. struct qib_lkey_table lk_table;
  673. struct list_head piowait; /* list for wait PIO buf */
  674. struct list_head dmawait; /* list for wait DMA */
  675. struct list_head txwait; /* list for wait qib_verbs_txreq */
  676. struct list_head memwait; /* list for wait kernel memory */
  677. struct list_head txreq_free;
  678. struct timer_list mem_timer;
  679. struct qib_qp __rcu **qp_table;
  680. struct qib_pio_header *pio_hdrs;
  681. dma_addr_t pio_hdrs_phys;
  682. /* list of QPs waiting for RNR timer */
  683. spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
  684. u32 qp_table_size; /* size of the hash table */
  685. u32 qp_rnd; /* random bytes for hash */
  686. spinlock_t qpt_lock;
  687. u32 n_piowait;
  688. u32 n_txwait;
  689. u32 n_pds_allocated; /* number of PDs allocated for device */
  690. spinlock_t n_pds_lock;
  691. u32 n_ahs_allocated; /* number of AHs allocated for device */
  692. spinlock_t n_ahs_lock;
  693. u32 n_cqs_allocated; /* number of CQs allocated for device */
  694. spinlock_t n_cqs_lock;
  695. u32 n_qps_allocated; /* number of QPs allocated for device */
  696. spinlock_t n_qps_lock;
  697. u32 n_srqs_allocated; /* number of SRQs allocated for device */
  698. spinlock_t n_srqs_lock;
  699. u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
  700. spinlock_t n_mcast_grps_lock;
  701. #ifdef CONFIG_DEBUG_FS
  702. /* per HCA debugfs */
  703. struct dentry *qib_ibdev_dbg;
  704. #endif
  705. };
  706. struct qib_verbs_counters {
  707. u64 symbol_error_counter;
  708. u64 link_error_recovery_counter;
  709. u64 link_downed_counter;
  710. u64 port_rcv_errors;
  711. u64 port_rcv_remphys_errors;
  712. u64 port_xmit_discards;
  713. u64 port_xmit_data;
  714. u64 port_rcv_data;
  715. u64 port_xmit_packets;
  716. u64 port_rcv_packets;
  717. u32 local_link_integrity_errors;
  718. u32 excessive_buffer_overrun_errors;
  719. u32 vl15_dropped;
  720. };
  721. static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
  722. {
  723. return container_of(ibmr, struct qib_mr, ibmr);
  724. }
  725. static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
  726. {
  727. return container_of(ibpd, struct qib_pd, ibpd);
  728. }
  729. static inline struct qib_ah *to_iah(struct ib_ah *ibah)
  730. {
  731. return container_of(ibah, struct qib_ah, ibah);
  732. }
  733. static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
  734. {
  735. return container_of(ibcq, struct qib_cq, ibcq);
  736. }
  737. static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
  738. {
  739. return container_of(ibsrq, struct qib_srq, ibsrq);
  740. }
  741. static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
  742. {
  743. return container_of(ibqp, struct qib_qp, ibqp);
  744. }
  745. static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
  746. {
  747. return container_of(ibdev, struct qib_ibdev, ibdev);
  748. }
  749. /*
  750. * Send if not busy or waiting for I/O and either
  751. * a RC response is pending or we can process send work requests.
  752. */
  753. static inline int qib_send_ok(struct qib_qp *qp)
  754. {
  755. return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
  756. (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
  757. !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
  758. }
  759. /*
  760. * This must be called with s_lock held.
  761. */
  762. void qib_schedule_send(struct qib_qp *qp);
  763. static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
  764. {
  765. u16 p1 = pkey1 & 0x7FFF;
  766. u16 p2 = pkey2 & 0x7FFF;
  767. /*
  768. * Low 15 bits must be non-zero and match, and
  769. * one of the two must be a full member.
  770. */
  771. return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
  772. }
  773. void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
  774. u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
  775. void qib_cap_mask_chg(struct qib_ibport *ibp);
  776. void qib_sys_guid_chg(struct qib_ibport *ibp);
  777. void qib_node_desc_chg(struct qib_ibport *ibp);
  778. int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
  779. const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  780. const struct ib_mad_hdr *in, size_t in_mad_size,
  781. struct ib_mad_hdr *out, size_t *out_mad_size,
  782. u16 *out_mad_pkey_index);
  783. int qib_create_agents(struct qib_ibdev *dev);
  784. void qib_free_agents(struct qib_ibdev *dev);
  785. /*
  786. * Compare the lower 24 bits of the two values.
  787. * Returns an integer <, ==, or > than zero.
  788. */
  789. static inline int qib_cmp24(u32 a, u32 b)
  790. {
  791. return (((int) a) - ((int) b)) << 8;
  792. }
  793. struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
  794. int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
  795. u64 *rwords, u64 *spkts, u64 *rpkts,
  796. u64 *xmit_wait);
  797. int qib_get_counters(struct qib_pportdata *ppd,
  798. struct qib_verbs_counters *cntrs);
  799. int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
  800. int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
  801. int qib_mcast_tree_empty(struct qib_ibport *ibp);
  802. __be32 qib_compute_aeth(struct qib_qp *qp);
  803. struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
  804. struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
  805. struct ib_qp_init_attr *init_attr,
  806. struct ib_udata *udata);
  807. int qib_destroy_qp(struct ib_qp *ibqp);
  808. int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
  809. int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  810. int attr_mask, struct ib_udata *udata);
  811. int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  812. int attr_mask, struct ib_qp_init_attr *init_attr);
  813. unsigned qib_free_all_qps(struct qib_devdata *dd);
  814. void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
  815. void qib_free_qpn_table(struct qib_qpn_table *qpt);
  816. #ifdef CONFIG_DEBUG_FS
  817. struct qib_qp_iter;
  818. struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev);
  819. int qib_qp_iter_next(struct qib_qp_iter *iter);
  820. void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
  821. #endif
  822. void qib_get_credit(struct qib_qp *qp, u32 aeth);
  823. unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
  824. void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
  825. void qib_put_txreq(struct qib_verbs_txreq *tx);
  826. int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
  827. u32 hdrwords, struct qib_sge_state *ss, u32 len);
  828. void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
  829. int release);
  830. void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
  831. void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
  832. int has_grh, void *data, u32 tlen, struct qib_qp *qp);
  833. void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
  834. int has_grh, void *data, u32 tlen, struct qib_qp *qp);
  835. int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
  836. struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
  837. void qib_rc_rnr_retry(unsigned long arg);
  838. void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
  839. void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
  840. int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
  841. void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
  842. int has_grh, void *data, u32 tlen, struct qib_qp *qp);
  843. int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
  844. void qib_free_lkey(struct qib_mregion *mr);
  845. int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
  846. struct qib_sge *isge, struct ib_sge *sge, int acc);
  847. int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
  848. u32 len, u64 vaddr, u32 rkey, int acc);
  849. int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
  850. struct ib_recv_wr **bad_wr);
  851. struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
  852. struct ib_srq_init_attr *srq_init_attr,
  853. struct ib_udata *udata);
  854. int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
  855. enum ib_srq_attr_mask attr_mask,
  856. struct ib_udata *udata);
  857. int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
  858. int qib_destroy_srq(struct ib_srq *ibsrq);
  859. int qib_cq_init(struct qib_devdata *dd);
  860. void qib_cq_exit(struct qib_devdata *dd);
  861. void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
  862. int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
  863. struct ib_cq *qib_create_cq(struct ib_device *ibdev,
  864. const struct ib_cq_init_attr *attr,
  865. struct ib_ucontext *context,
  866. struct ib_udata *udata);
  867. int qib_destroy_cq(struct ib_cq *ibcq);
  868. int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
  869. int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
  870. struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
  871. struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
  872. struct ib_phys_buf *buffer_list,
  873. int num_phys_buf, int acc, u64 *iova_start);
  874. struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  875. u64 virt_addr, int mr_access_flags,
  876. struct ib_udata *udata);
  877. int qib_dereg_mr(struct ib_mr *ibmr);
  878. struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
  879. struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
  880. struct ib_device *ibdev, int page_list_len);
  881. void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
  882. int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
  883. struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
  884. struct ib_fmr_attr *fmr_attr);
  885. int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
  886. int list_len, u64 iova);
  887. int qib_unmap_fmr(struct list_head *fmr_list);
  888. int qib_dealloc_fmr(struct ib_fmr *ibfmr);
  889. static inline void qib_get_mr(struct qib_mregion *mr)
  890. {
  891. atomic_inc(&mr->refcount);
  892. }
  893. void mr_rcu_callback(struct rcu_head *list);
  894. static inline void qib_put_mr(struct qib_mregion *mr)
  895. {
  896. if (unlikely(atomic_dec_and_test(&mr->refcount)))
  897. call_rcu(&mr->list, mr_rcu_callback);
  898. }
  899. static inline void qib_put_ss(struct qib_sge_state *ss)
  900. {
  901. while (ss->num_sge) {
  902. qib_put_mr(ss->sge.mr);
  903. if (--ss->num_sge)
  904. ss->sge = *ss->sg_list++;
  905. }
  906. }
  907. void qib_release_mmap_info(struct kref *ref);
  908. struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
  909. struct ib_ucontext *context,
  910. void *obj);
  911. void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
  912. u32 size, void *obj);
  913. int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
  914. int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
  915. void qib_migrate_qp(struct qib_qp *qp);
  916. int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
  917. int has_grh, struct qib_qp *qp, u32 bth0);
  918. u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
  919. struct ib_global_route *grh, u32 hwords, u32 nwords);
  920. void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
  921. u32 bth0, u32 bth2);
  922. void qib_do_send(struct work_struct *work);
  923. void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
  924. enum ib_wc_status status);
  925. void qib_send_rc_ack(struct qib_qp *qp);
  926. int qib_make_rc_req(struct qib_qp *qp);
  927. int qib_make_uc_req(struct qib_qp *qp);
  928. int qib_make_ud_req(struct qib_qp *qp);
  929. int qib_register_ib_device(struct qib_devdata *);
  930. void qib_unregister_ib_device(struct qib_devdata *);
  931. void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
  932. void qib_ib_piobufavail(struct qib_devdata *);
  933. unsigned qib_get_npkeys(struct qib_devdata *);
  934. unsigned qib_get_pkey(struct qib_ibport *, unsigned);
  935. extern const enum ib_wc_opcode ib_qib_wc_opcode[];
  936. /*
  937. * Below HCA-independent IB PhysPortState values, returned
  938. * by the f_ibphys_portstate() routine.
  939. */
  940. #define IB_PHYSPORTSTATE_SLEEP 1
  941. #define IB_PHYSPORTSTATE_POLL 2
  942. #define IB_PHYSPORTSTATE_DISABLED 3
  943. #define IB_PHYSPORTSTATE_CFG_TRAIN 4
  944. #define IB_PHYSPORTSTATE_LINKUP 5
  945. #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
  946. #define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
  947. #define IB_PHYSPORTSTATE_CFG_IDLE 0xB
  948. #define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
  949. #define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
  950. #define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
  951. #define IB_PHYSPORTSTATE_CFG_ENH 0x10
  952. #define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
  953. extern const int ib_qib_state_ops[];
  954. extern __be64 ib_qib_sys_image_guid; /* in network order */
  955. extern unsigned int ib_qib_lkey_table_size;
  956. extern unsigned int ib_qib_max_cqes;
  957. extern unsigned int ib_qib_max_cqs;
  958. extern unsigned int ib_qib_max_qp_wrs;
  959. extern unsigned int ib_qib_max_qps;
  960. extern unsigned int ib_qib_max_sges;
  961. extern unsigned int ib_qib_max_mcast_grps;
  962. extern unsigned int ib_qib_max_mcast_qp_attached;
  963. extern unsigned int ib_qib_max_srqs;
  964. extern unsigned int ib_qib_max_srq_sges;
  965. extern unsigned int ib_qib_max_srq_wrs;
  966. extern const u32 ib_qib_rnr_table[];
  967. extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
  968. #endif /* QIB_VERBS_H */