rxe_hdr.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #ifndef RXE_HDR_H
  34. #define RXE_HDR_H
  35. /* extracted information about a packet carried in an sk_buff struct fits in
  36. * the skbuff cb array. Must be at most 48 bytes. stored in control block of
  37. * sk_buff for received packets.
  38. */
  39. struct rxe_pkt_info {
  40. struct rxe_dev *rxe; /* device that owns packet */
  41. struct rxe_qp *qp; /* qp that owns packet */
  42. struct rxe_send_wqe *wqe; /* send wqe */
  43. u8 *hdr; /* points to bth */
  44. u32 mask; /* useful info about pkt */
  45. u32 psn; /* bth psn of packet */
  46. u16 pkey_index; /* partition of pkt */
  47. u16 paylen; /* length of bth - icrc */
  48. u8 port_num; /* port pkt received on */
  49. u8 opcode; /* bth opcode of packet */
  50. u8 offset; /* bth offset from pkt->hdr */
  51. };
  52. /* Macros should be used only for received skb */
  53. #define SKB_TO_PKT(skb) ((struct rxe_pkt_info *)(skb)->cb)
  54. #define PKT_TO_SKB(pkt) container_of((void *)(pkt), struct sk_buff, cb)
  55. /*
  56. * IBA header types and methods
  57. *
  58. * Some of these are for reference and completeness only since
  59. * rxe does not currently support RD transport
  60. * most of this could be moved into IB core. ib_pack.h has
  61. * part of this but is incomplete
  62. *
  63. * Header specific routines to insert/extract values to/from headers
  64. * the routines that are named __hhh_(set_)fff() take a pointer to a
  65. * hhh header and get(set) the fff field. The routines named
  66. * hhh_(set_)fff take a packet info struct and find the
  67. * header and field based on the opcode in the packet.
  68. * Conversion to/from network byte order from cpu order is also done.
  69. */
  70. #define RXE_ICRC_SIZE (4)
  71. #define RXE_MAX_HDR_LENGTH (80)
  72. /******************************************************************************
  73. * Base Transport Header
  74. ******************************************************************************/
  75. struct rxe_bth {
  76. u8 opcode;
  77. u8 flags;
  78. __be16 pkey;
  79. __be32 qpn;
  80. __be32 apsn;
  81. };
  82. #define BTH_TVER (0)
  83. #define BTH_DEF_PKEY (0xffff)
  84. #define BTH_SE_MASK (0x80)
  85. #define BTH_MIG_MASK (0x40)
  86. #define BTH_PAD_MASK (0x30)
  87. #define BTH_TVER_MASK (0x0f)
  88. #define BTH_FECN_MASK (0x80000000)
  89. #define BTH_BECN_MASK (0x40000000)
  90. #define BTH_RESV6A_MASK (0x3f000000)
  91. #define BTH_QPN_MASK (0x00ffffff)
  92. #define BTH_ACK_MASK (0x80000000)
  93. #define BTH_RESV7_MASK (0x7f000000)
  94. #define BTH_PSN_MASK (0x00ffffff)
  95. static inline u8 __bth_opcode(void *arg)
  96. {
  97. struct rxe_bth *bth = arg;
  98. return bth->opcode;
  99. }
  100. static inline void __bth_set_opcode(void *arg, u8 opcode)
  101. {
  102. struct rxe_bth *bth = arg;
  103. bth->opcode = opcode;
  104. }
  105. static inline u8 __bth_se(void *arg)
  106. {
  107. struct rxe_bth *bth = arg;
  108. return 0 != (BTH_SE_MASK & bth->flags);
  109. }
  110. static inline void __bth_set_se(void *arg, int se)
  111. {
  112. struct rxe_bth *bth = arg;
  113. if (se)
  114. bth->flags |= BTH_SE_MASK;
  115. else
  116. bth->flags &= ~BTH_SE_MASK;
  117. }
  118. static inline u8 __bth_mig(void *arg)
  119. {
  120. struct rxe_bth *bth = arg;
  121. return 0 != (BTH_MIG_MASK & bth->flags);
  122. }
  123. static inline void __bth_set_mig(void *arg, u8 mig)
  124. {
  125. struct rxe_bth *bth = arg;
  126. if (mig)
  127. bth->flags |= BTH_MIG_MASK;
  128. else
  129. bth->flags &= ~BTH_MIG_MASK;
  130. }
  131. static inline u8 __bth_pad(void *arg)
  132. {
  133. struct rxe_bth *bth = arg;
  134. return (BTH_PAD_MASK & bth->flags) >> 4;
  135. }
  136. static inline void __bth_set_pad(void *arg, u8 pad)
  137. {
  138. struct rxe_bth *bth = arg;
  139. bth->flags = (BTH_PAD_MASK & (pad << 4)) |
  140. (~BTH_PAD_MASK & bth->flags);
  141. }
  142. static inline u8 __bth_tver(void *arg)
  143. {
  144. struct rxe_bth *bth = arg;
  145. return BTH_TVER_MASK & bth->flags;
  146. }
  147. static inline void __bth_set_tver(void *arg, u8 tver)
  148. {
  149. struct rxe_bth *bth = arg;
  150. bth->flags = (BTH_TVER_MASK & tver) |
  151. (~BTH_TVER_MASK & bth->flags);
  152. }
  153. static inline u16 __bth_pkey(void *arg)
  154. {
  155. struct rxe_bth *bth = arg;
  156. return be16_to_cpu(bth->pkey);
  157. }
  158. static inline void __bth_set_pkey(void *arg, u16 pkey)
  159. {
  160. struct rxe_bth *bth = arg;
  161. bth->pkey = cpu_to_be16(pkey);
  162. }
  163. static inline u32 __bth_qpn(void *arg)
  164. {
  165. struct rxe_bth *bth = arg;
  166. return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
  167. }
  168. static inline void __bth_set_qpn(void *arg, u32 qpn)
  169. {
  170. struct rxe_bth *bth = arg;
  171. u32 resvqpn = be32_to_cpu(bth->qpn);
  172. bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
  173. (~BTH_QPN_MASK & resvqpn));
  174. }
  175. static inline int __bth_fecn(void *arg)
  176. {
  177. struct rxe_bth *bth = arg;
  178. return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
  179. }
  180. static inline void __bth_set_fecn(void *arg, int fecn)
  181. {
  182. struct rxe_bth *bth = arg;
  183. if (fecn)
  184. bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
  185. else
  186. bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
  187. }
  188. static inline int __bth_becn(void *arg)
  189. {
  190. struct rxe_bth *bth = arg;
  191. return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
  192. }
  193. static inline void __bth_set_becn(void *arg, int becn)
  194. {
  195. struct rxe_bth *bth = arg;
  196. if (becn)
  197. bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
  198. else
  199. bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
  200. }
  201. static inline u8 __bth_resv6a(void *arg)
  202. {
  203. struct rxe_bth *bth = arg;
  204. return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
  205. }
  206. static inline void __bth_set_resv6a(void *arg)
  207. {
  208. struct rxe_bth *bth = arg;
  209. bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
  210. }
  211. static inline int __bth_ack(void *arg)
  212. {
  213. struct rxe_bth *bth = arg;
  214. return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
  215. }
  216. static inline void __bth_set_ack(void *arg, int ack)
  217. {
  218. struct rxe_bth *bth = arg;
  219. if (ack)
  220. bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
  221. else
  222. bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
  223. }
  224. static inline void __bth_set_resv7(void *arg)
  225. {
  226. struct rxe_bth *bth = arg;
  227. bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
  228. }
  229. static inline u32 __bth_psn(void *arg)
  230. {
  231. struct rxe_bth *bth = arg;
  232. return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
  233. }
  234. static inline void __bth_set_psn(void *arg, u32 psn)
  235. {
  236. struct rxe_bth *bth = arg;
  237. u32 apsn = be32_to_cpu(bth->apsn);
  238. bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
  239. (~BTH_PSN_MASK & apsn));
  240. }
  241. static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
  242. {
  243. return __bth_opcode(pkt->hdr + pkt->offset);
  244. }
  245. static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
  246. {
  247. __bth_set_opcode(pkt->hdr + pkt->offset, opcode);
  248. }
  249. static inline u8 bth_se(struct rxe_pkt_info *pkt)
  250. {
  251. return __bth_se(pkt->hdr + pkt->offset);
  252. }
  253. static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
  254. {
  255. __bth_set_se(pkt->hdr + pkt->offset, se);
  256. }
  257. static inline u8 bth_mig(struct rxe_pkt_info *pkt)
  258. {
  259. return __bth_mig(pkt->hdr + pkt->offset);
  260. }
  261. static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
  262. {
  263. __bth_set_mig(pkt->hdr + pkt->offset, mig);
  264. }
  265. static inline u8 bth_pad(struct rxe_pkt_info *pkt)
  266. {
  267. return __bth_pad(pkt->hdr + pkt->offset);
  268. }
  269. static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
  270. {
  271. __bth_set_pad(pkt->hdr + pkt->offset, pad);
  272. }
  273. static inline u8 bth_tver(struct rxe_pkt_info *pkt)
  274. {
  275. return __bth_tver(pkt->hdr + pkt->offset);
  276. }
  277. static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
  278. {
  279. __bth_set_tver(pkt->hdr + pkt->offset, tver);
  280. }
  281. static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
  282. {
  283. return __bth_pkey(pkt->hdr + pkt->offset);
  284. }
  285. static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
  286. {
  287. __bth_set_pkey(pkt->hdr + pkt->offset, pkey);
  288. }
  289. static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
  290. {
  291. return __bth_qpn(pkt->hdr + pkt->offset);
  292. }
  293. static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
  294. {
  295. __bth_set_qpn(pkt->hdr + pkt->offset, qpn);
  296. }
  297. static inline int bth_fecn(struct rxe_pkt_info *pkt)
  298. {
  299. return __bth_fecn(pkt->hdr + pkt->offset);
  300. }
  301. static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
  302. {
  303. __bth_set_fecn(pkt->hdr + pkt->offset, fecn);
  304. }
  305. static inline int bth_becn(struct rxe_pkt_info *pkt)
  306. {
  307. return __bth_becn(pkt->hdr + pkt->offset);
  308. }
  309. static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
  310. {
  311. __bth_set_becn(pkt->hdr + pkt->offset, becn);
  312. }
  313. static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
  314. {
  315. return __bth_resv6a(pkt->hdr + pkt->offset);
  316. }
  317. static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
  318. {
  319. __bth_set_resv6a(pkt->hdr + pkt->offset);
  320. }
  321. static inline int bth_ack(struct rxe_pkt_info *pkt)
  322. {
  323. return __bth_ack(pkt->hdr + pkt->offset);
  324. }
  325. static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
  326. {
  327. __bth_set_ack(pkt->hdr + pkt->offset, ack);
  328. }
  329. static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
  330. {
  331. __bth_set_resv7(pkt->hdr + pkt->offset);
  332. }
  333. static inline u32 bth_psn(struct rxe_pkt_info *pkt)
  334. {
  335. return __bth_psn(pkt->hdr + pkt->offset);
  336. }
  337. static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
  338. {
  339. __bth_set_psn(pkt->hdr + pkt->offset, psn);
  340. }
  341. static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
  342. int mig, int pad, u16 pkey, u32 qpn, int ack_req,
  343. u32 psn)
  344. {
  345. struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset);
  346. bth->opcode = opcode;
  347. bth->flags = (pad << 4) & BTH_PAD_MASK;
  348. if (se)
  349. bth->flags |= BTH_SE_MASK;
  350. if (mig)
  351. bth->flags |= BTH_MIG_MASK;
  352. bth->pkey = cpu_to_be16(pkey);
  353. bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
  354. psn &= BTH_PSN_MASK;
  355. if (ack_req)
  356. psn |= BTH_ACK_MASK;
  357. bth->apsn = cpu_to_be32(psn);
  358. }
  359. /******************************************************************************
  360. * Reliable Datagram Extended Transport Header
  361. ******************************************************************************/
  362. struct rxe_rdeth {
  363. __be32 een;
  364. };
  365. #define RDETH_EEN_MASK (0x00ffffff)
  366. static inline u8 __rdeth_een(void *arg)
  367. {
  368. struct rxe_rdeth *rdeth = arg;
  369. return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
  370. }
  371. static inline void __rdeth_set_een(void *arg, u32 een)
  372. {
  373. struct rxe_rdeth *rdeth = arg;
  374. rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
  375. }
  376. static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
  377. {
  378. return __rdeth_een(pkt->hdr + pkt->offset
  379. + rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
  380. }
  381. static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
  382. {
  383. __rdeth_set_een(pkt->hdr + pkt->offset
  384. + rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
  385. }
  386. /******************************************************************************
  387. * Datagram Extended Transport Header
  388. ******************************************************************************/
  389. struct rxe_deth {
  390. __be32 qkey;
  391. __be32 sqp;
  392. };
  393. #define GSI_QKEY (0x80010000)
  394. #define DETH_SQP_MASK (0x00ffffff)
  395. static inline u32 __deth_qkey(void *arg)
  396. {
  397. struct rxe_deth *deth = arg;
  398. return be32_to_cpu(deth->qkey);
  399. }
  400. static inline void __deth_set_qkey(void *arg, u32 qkey)
  401. {
  402. struct rxe_deth *deth = arg;
  403. deth->qkey = cpu_to_be32(qkey);
  404. }
  405. static inline u32 __deth_sqp(void *arg)
  406. {
  407. struct rxe_deth *deth = arg;
  408. return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
  409. }
  410. static inline void __deth_set_sqp(void *arg, u32 sqp)
  411. {
  412. struct rxe_deth *deth = arg;
  413. deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
  414. }
  415. static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
  416. {
  417. return __deth_qkey(pkt->hdr + pkt->offset
  418. + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
  419. }
  420. static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
  421. {
  422. __deth_set_qkey(pkt->hdr + pkt->offset
  423. + rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
  424. }
  425. static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
  426. {
  427. return __deth_sqp(pkt->hdr + pkt->offset
  428. + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
  429. }
  430. static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
  431. {
  432. __deth_set_sqp(pkt->hdr + pkt->offset
  433. + rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
  434. }
  435. /******************************************************************************
  436. * RDMA Extended Transport Header
  437. ******************************************************************************/
  438. struct rxe_reth {
  439. __be64 va;
  440. __be32 rkey;
  441. __be32 len;
  442. };
  443. static inline u64 __reth_va(void *arg)
  444. {
  445. struct rxe_reth *reth = arg;
  446. return be64_to_cpu(reth->va);
  447. }
  448. static inline void __reth_set_va(void *arg, u64 va)
  449. {
  450. struct rxe_reth *reth = arg;
  451. reth->va = cpu_to_be64(va);
  452. }
  453. static inline u32 __reth_rkey(void *arg)
  454. {
  455. struct rxe_reth *reth = arg;
  456. return be32_to_cpu(reth->rkey);
  457. }
  458. static inline void __reth_set_rkey(void *arg, u32 rkey)
  459. {
  460. struct rxe_reth *reth = arg;
  461. reth->rkey = cpu_to_be32(rkey);
  462. }
  463. static inline u32 __reth_len(void *arg)
  464. {
  465. struct rxe_reth *reth = arg;
  466. return be32_to_cpu(reth->len);
  467. }
  468. static inline void __reth_set_len(void *arg, u32 len)
  469. {
  470. struct rxe_reth *reth = arg;
  471. reth->len = cpu_to_be32(len);
  472. }
  473. static inline u64 reth_va(struct rxe_pkt_info *pkt)
  474. {
  475. return __reth_va(pkt->hdr + pkt->offset
  476. + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
  477. }
  478. static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
  479. {
  480. __reth_set_va(pkt->hdr + pkt->offset
  481. + rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
  482. }
  483. static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
  484. {
  485. return __reth_rkey(pkt->hdr + pkt->offset
  486. + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
  487. }
  488. static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
  489. {
  490. __reth_set_rkey(pkt->hdr + pkt->offset
  491. + rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
  492. }
  493. static inline u32 reth_len(struct rxe_pkt_info *pkt)
  494. {
  495. return __reth_len(pkt->hdr + pkt->offset
  496. + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
  497. }
  498. static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
  499. {
  500. __reth_set_len(pkt->hdr + pkt->offset
  501. + rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
  502. }
  503. /******************************************************************************
  504. * Atomic Extended Transport Header
  505. ******************************************************************************/
  506. struct rxe_atmeth {
  507. __be64 va;
  508. __be32 rkey;
  509. __be64 swap_add;
  510. __be64 comp;
  511. } __attribute__((__packed__));
  512. static inline u64 __atmeth_va(void *arg)
  513. {
  514. struct rxe_atmeth *atmeth = arg;
  515. return be64_to_cpu(atmeth->va);
  516. }
  517. static inline void __atmeth_set_va(void *arg, u64 va)
  518. {
  519. struct rxe_atmeth *atmeth = arg;
  520. atmeth->va = cpu_to_be64(va);
  521. }
  522. static inline u32 __atmeth_rkey(void *arg)
  523. {
  524. struct rxe_atmeth *atmeth = arg;
  525. return be32_to_cpu(atmeth->rkey);
  526. }
  527. static inline void __atmeth_set_rkey(void *arg, u32 rkey)
  528. {
  529. struct rxe_atmeth *atmeth = arg;
  530. atmeth->rkey = cpu_to_be32(rkey);
  531. }
  532. static inline u64 __atmeth_swap_add(void *arg)
  533. {
  534. struct rxe_atmeth *atmeth = arg;
  535. return be64_to_cpu(atmeth->swap_add);
  536. }
  537. static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
  538. {
  539. struct rxe_atmeth *atmeth = arg;
  540. atmeth->swap_add = cpu_to_be64(swap_add);
  541. }
  542. static inline u64 __atmeth_comp(void *arg)
  543. {
  544. struct rxe_atmeth *atmeth = arg;
  545. return be64_to_cpu(atmeth->comp);
  546. }
  547. static inline void __atmeth_set_comp(void *arg, u64 comp)
  548. {
  549. struct rxe_atmeth *atmeth = arg;
  550. atmeth->comp = cpu_to_be64(comp);
  551. }
  552. static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
  553. {
  554. return __atmeth_va(pkt->hdr + pkt->offset
  555. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
  556. }
  557. static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
  558. {
  559. __atmeth_set_va(pkt->hdr + pkt->offset
  560. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
  561. }
  562. static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
  563. {
  564. return __atmeth_rkey(pkt->hdr + pkt->offset
  565. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
  566. }
  567. static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
  568. {
  569. __atmeth_set_rkey(pkt->hdr + pkt->offset
  570. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
  571. }
  572. static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
  573. {
  574. return __atmeth_swap_add(pkt->hdr + pkt->offset
  575. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
  576. }
  577. static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
  578. {
  579. __atmeth_set_swap_add(pkt->hdr + pkt->offset
  580. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
  581. }
  582. static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
  583. {
  584. return __atmeth_comp(pkt->hdr + pkt->offset
  585. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
  586. }
  587. static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
  588. {
  589. __atmeth_set_comp(pkt->hdr + pkt->offset
  590. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
  591. }
  592. /******************************************************************************
  593. * Ack Extended Transport Header
  594. ******************************************************************************/
  595. struct rxe_aeth {
  596. __be32 smsn;
  597. };
  598. #define AETH_SYN_MASK (0xff000000)
  599. #define AETH_MSN_MASK (0x00ffffff)
  600. enum aeth_syndrome {
  601. AETH_TYPE_MASK = 0xe0,
  602. AETH_ACK = 0x00,
  603. AETH_RNR_NAK = 0x20,
  604. AETH_RSVD = 0x40,
  605. AETH_NAK = 0x60,
  606. AETH_ACK_UNLIMITED = 0x1f,
  607. AETH_NAK_PSN_SEQ_ERROR = 0x60,
  608. AETH_NAK_INVALID_REQ = 0x61,
  609. AETH_NAK_REM_ACC_ERR = 0x62,
  610. AETH_NAK_REM_OP_ERR = 0x63,
  611. AETH_NAK_INV_RD_REQ = 0x64,
  612. };
  613. static inline u8 __aeth_syn(void *arg)
  614. {
  615. struct rxe_aeth *aeth = arg;
  616. return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
  617. }
  618. static inline void __aeth_set_syn(void *arg, u8 syn)
  619. {
  620. struct rxe_aeth *aeth = arg;
  621. u32 smsn = be32_to_cpu(aeth->smsn);
  622. aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
  623. (~AETH_SYN_MASK & smsn));
  624. }
  625. static inline u32 __aeth_msn(void *arg)
  626. {
  627. struct rxe_aeth *aeth = arg;
  628. return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
  629. }
  630. static inline void __aeth_set_msn(void *arg, u32 msn)
  631. {
  632. struct rxe_aeth *aeth = arg;
  633. u32 smsn = be32_to_cpu(aeth->smsn);
  634. aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
  635. (~AETH_MSN_MASK & smsn));
  636. }
  637. static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
  638. {
  639. return __aeth_syn(pkt->hdr + pkt->offset
  640. + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
  641. }
  642. static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
  643. {
  644. __aeth_set_syn(pkt->hdr + pkt->offset
  645. + rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
  646. }
  647. static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
  648. {
  649. return __aeth_msn(pkt->hdr + pkt->offset
  650. + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
  651. }
  652. static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
  653. {
  654. __aeth_set_msn(pkt->hdr + pkt->offset
  655. + rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
  656. }
  657. /******************************************************************************
  658. * Atomic Ack Extended Transport Header
  659. ******************************************************************************/
  660. struct rxe_atmack {
  661. __be64 orig;
  662. };
  663. static inline u64 __atmack_orig(void *arg)
  664. {
  665. struct rxe_atmack *atmack = arg;
  666. return be64_to_cpu(atmack->orig);
  667. }
  668. static inline void __atmack_set_orig(void *arg, u64 orig)
  669. {
  670. struct rxe_atmack *atmack = arg;
  671. atmack->orig = cpu_to_be64(orig);
  672. }
  673. static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
  674. {
  675. return __atmack_orig(pkt->hdr + pkt->offset
  676. + rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
  677. }
  678. static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
  679. {
  680. __atmack_set_orig(pkt->hdr + pkt->offset
  681. + rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
  682. }
  683. /******************************************************************************
  684. * Immediate Extended Transport Header
  685. ******************************************************************************/
  686. struct rxe_immdt {
  687. __be32 imm;
  688. };
  689. static inline __be32 __immdt_imm(void *arg)
  690. {
  691. struct rxe_immdt *immdt = arg;
  692. return immdt->imm;
  693. }
  694. static inline void __immdt_set_imm(void *arg, __be32 imm)
  695. {
  696. struct rxe_immdt *immdt = arg;
  697. immdt->imm = imm;
  698. }
  699. static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
  700. {
  701. return __immdt_imm(pkt->hdr + pkt->offset
  702. + rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
  703. }
  704. static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
  705. {
  706. __immdt_set_imm(pkt->hdr + pkt->offset
  707. + rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
  708. }
  709. /******************************************************************************
  710. * Invalidate Extended Transport Header
  711. ******************************************************************************/
  712. struct rxe_ieth {
  713. __be32 rkey;
  714. };
  715. static inline u32 __ieth_rkey(void *arg)
  716. {
  717. struct rxe_ieth *ieth = arg;
  718. return be32_to_cpu(ieth->rkey);
  719. }
  720. static inline void __ieth_set_rkey(void *arg, u32 rkey)
  721. {
  722. struct rxe_ieth *ieth = arg;
  723. ieth->rkey = cpu_to_be32(rkey);
  724. }
  725. static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
  726. {
  727. return __ieth_rkey(pkt->hdr + pkt->offset
  728. + rxe_opcode[pkt->opcode].offset[RXE_IETH]);
  729. }
  730. static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
  731. {
  732. __ieth_set_rkey(pkt->hdr + pkt->offset
  733. + rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
  734. }
  735. enum rxe_hdr_length {
  736. RXE_BTH_BYTES = sizeof(struct rxe_bth),
  737. RXE_DETH_BYTES = sizeof(struct rxe_deth),
  738. RXE_IMMDT_BYTES = sizeof(struct rxe_immdt),
  739. RXE_RETH_BYTES = sizeof(struct rxe_reth),
  740. RXE_AETH_BYTES = sizeof(struct rxe_aeth),
  741. RXE_ATMACK_BYTES = sizeof(struct rxe_atmack),
  742. RXE_ATMETH_BYTES = sizeof(struct rxe_atmeth),
  743. RXE_IETH_BYTES = sizeof(struct rxe_ieth),
  744. RXE_RDETH_BYTES = sizeof(struct rxe_rdeth),
  745. };
  746. static inline size_t header_size(struct rxe_pkt_info *pkt)
  747. {
  748. return pkt->offset + rxe_opcode[pkt->opcode].length;
  749. }
  750. static inline void *payload_addr(struct rxe_pkt_info *pkt)
  751. {
  752. return pkt->hdr + pkt->offset
  753. + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
  754. }
  755. static inline size_t payload_size(struct rxe_pkt_info *pkt)
  756. {
  757. return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
  758. - bth_pad(pkt) - RXE_ICRC_SIZE;
  759. }
  760. #endif /* RXE_HDR_H */