rxe_hdr.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #ifndef RXE_HDR_H
  34. #define RXE_HDR_H
  35. /* extracted information about a packet carried in an sk_buff struct fits in
  36. * the skbuff cb array. Must be at most 48 bytes. stored in control block of
  37. * sk_buff for received packets.
  38. */
  39. struct rxe_pkt_info {
  40. struct rxe_dev *rxe; /* device that owns packet */
  41. struct rxe_qp *qp; /* qp that owns packet */
  42. struct rxe_send_wqe *wqe; /* send wqe */
  43. u8 *hdr; /* points to bth */
  44. u32 mask; /* useful info about pkt */
  45. u32 psn; /* bth psn of packet */
  46. u16 pkey_index; /* partition of pkt */
  47. u16 paylen; /* length of bth - icrc */
  48. u8 port_num; /* port pkt received on */
  49. u8 opcode; /* bth opcode of packet */
  50. u8 offset; /* bth offset from pkt->hdr */
  51. };
  52. /* Macros should be used only for received skb */
  53. static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
  54. {
  55. BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
  56. return (void *)skb->cb;
  57. }
  58. static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
  59. {
  60. return container_of((void *)pkt, struct sk_buff, cb);
  61. }
  62. /*
  63. * IBA header types and methods
  64. *
  65. * Some of these are for reference and completeness only since
  66. * rxe does not currently support RD transport
  67. * most of this could be moved into IB core. ib_pack.h has
  68. * part of this but is incomplete
  69. *
  70. * Header specific routines to insert/extract values to/from headers
  71. * the routines that are named __hhh_(set_)fff() take a pointer to a
  72. * hhh header and get(set) the fff field. The routines named
  73. * hhh_(set_)fff take a packet info struct and find the
  74. * header and field based on the opcode in the packet.
  75. * Conversion to/from network byte order from cpu order is also done.
  76. */
  77. #define RXE_ICRC_SIZE (4)
  78. #define RXE_MAX_HDR_LENGTH (80)
  79. /******************************************************************************
  80. * Base Transport Header
  81. ******************************************************************************/
  82. struct rxe_bth {
  83. u8 opcode;
  84. u8 flags;
  85. __be16 pkey;
  86. __be32 qpn;
  87. __be32 apsn;
  88. };
  89. #define BTH_TVER (0)
  90. #define BTH_DEF_PKEY (0xffff)
  91. #define BTH_SE_MASK (0x80)
  92. #define BTH_MIG_MASK (0x40)
  93. #define BTH_PAD_MASK (0x30)
  94. #define BTH_TVER_MASK (0x0f)
  95. #define BTH_FECN_MASK (0x80000000)
  96. #define BTH_BECN_MASK (0x40000000)
  97. #define BTH_RESV6A_MASK (0x3f000000)
  98. #define BTH_QPN_MASK (0x00ffffff)
  99. #define BTH_ACK_MASK (0x80000000)
  100. #define BTH_RESV7_MASK (0x7f000000)
  101. #define BTH_PSN_MASK (0x00ffffff)
  102. static inline u8 __bth_opcode(void *arg)
  103. {
  104. struct rxe_bth *bth = arg;
  105. return bth->opcode;
  106. }
  107. static inline void __bth_set_opcode(void *arg, u8 opcode)
  108. {
  109. struct rxe_bth *bth = arg;
  110. bth->opcode = opcode;
  111. }
  112. static inline u8 __bth_se(void *arg)
  113. {
  114. struct rxe_bth *bth = arg;
  115. return 0 != (BTH_SE_MASK & bth->flags);
  116. }
  117. static inline void __bth_set_se(void *arg, int se)
  118. {
  119. struct rxe_bth *bth = arg;
  120. if (se)
  121. bth->flags |= BTH_SE_MASK;
  122. else
  123. bth->flags &= ~BTH_SE_MASK;
  124. }
  125. static inline u8 __bth_mig(void *arg)
  126. {
  127. struct rxe_bth *bth = arg;
  128. return 0 != (BTH_MIG_MASK & bth->flags);
  129. }
  130. static inline void __bth_set_mig(void *arg, u8 mig)
  131. {
  132. struct rxe_bth *bth = arg;
  133. if (mig)
  134. bth->flags |= BTH_MIG_MASK;
  135. else
  136. bth->flags &= ~BTH_MIG_MASK;
  137. }
  138. static inline u8 __bth_pad(void *arg)
  139. {
  140. struct rxe_bth *bth = arg;
  141. return (BTH_PAD_MASK & bth->flags) >> 4;
  142. }
  143. static inline void __bth_set_pad(void *arg, u8 pad)
  144. {
  145. struct rxe_bth *bth = arg;
  146. bth->flags = (BTH_PAD_MASK & (pad << 4)) |
  147. (~BTH_PAD_MASK & bth->flags);
  148. }
  149. static inline u8 __bth_tver(void *arg)
  150. {
  151. struct rxe_bth *bth = arg;
  152. return BTH_TVER_MASK & bth->flags;
  153. }
  154. static inline void __bth_set_tver(void *arg, u8 tver)
  155. {
  156. struct rxe_bth *bth = arg;
  157. bth->flags = (BTH_TVER_MASK & tver) |
  158. (~BTH_TVER_MASK & bth->flags);
  159. }
  160. static inline u16 __bth_pkey(void *arg)
  161. {
  162. struct rxe_bth *bth = arg;
  163. return be16_to_cpu(bth->pkey);
  164. }
  165. static inline void __bth_set_pkey(void *arg, u16 pkey)
  166. {
  167. struct rxe_bth *bth = arg;
  168. bth->pkey = cpu_to_be16(pkey);
  169. }
  170. static inline u32 __bth_qpn(void *arg)
  171. {
  172. struct rxe_bth *bth = arg;
  173. return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
  174. }
  175. static inline void __bth_set_qpn(void *arg, u32 qpn)
  176. {
  177. struct rxe_bth *bth = arg;
  178. u32 resvqpn = be32_to_cpu(bth->qpn);
  179. bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
  180. (~BTH_QPN_MASK & resvqpn));
  181. }
  182. static inline int __bth_fecn(void *arg)
  183. {
  184. struct rxe_bth *bth = arg;
  185. return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
  186. }
  187. static inline void __bth_set_fecn(void *arg, int fecn)
  188. {
  189. struct rxe_bth *bth = arg;
  190. if (fecn)
  191. bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
  192. else
  193. bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
  194. }
  195. static inline int __bth_becn(void *arg)
  196. {
  197. struct rxe_bth *bth = arg;
  198. return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
  199. }
  200. static inline void __bth_set_becn(void *arg, int becn)
  201. {
  202. struct rxe_bth *bth = arg;
  203. if (becn)
  204. bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
  205. else
  206. bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
  207. }
  208. static inline u8 __bth_resv6a(void *arg)
  209. {
  210. struct rxe_bth *bth = arg;
  211. return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
  212. }
  213. static inline void __bth_set_resv6a(void *arg)
  214. {
  215. struct rxe_bth *bth = arg;
  216. bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
  217. }
  218. static inline int __bth_ack(void *arg)
  219. {
  220. struct rxe_bth *bth = arg;
  221. return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
  222. }
  223. static inline void __bth_set_ack(void *arg, int ack)
  224. {
  225. struct rxe_bth *bth = arg;
  226. if (ack)
  227. bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
  228. else
  229. bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
  230. }
  231. static inline void __bth_set_resv7(void *arg)
  232. {
  233. struct rxe_bth *bth = arg;
  234. bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
  235. }
  236. static inline u32 __bth_psn(void *arg)
  237. {
  238. struct rxe_bth *bth = arg;
  239. return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
  240. }
  241. static inline void __bth_set_psn(void *arg, u32 psn)
  242. {
  243. struct rxe_bth *bth = arg;
  244. u32 apsn = be32_to_cpu(bth->apsn);
  245. bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
  246. (~BTH_PSN_MASK & apsn));
  247. }
  248. static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
  249. {
  250. return __bth_opcode(pkt->hdr + pkt->offset);
  251. }
  252. static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
  253. {
  254. __bth_set_opcode(pkt->hdr + pkt->offset, opcode);
  255. }
  256. static inline u8 bth_se(struct rxe_pkt_info *pkt)
  257. {
  258. return __bth_se(pkt->hdr + pkt->offset);
  259. }
  260. static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
  261. {
  262. __bth_set_se(pkt->hdr + pkt->offset, se);
  263. }
  264. static inline u8 bth_mig(struct rxe_pkt_info *pkt)
  265. {
  266. return __bth_mig(pkt->hdr + pkt->offset);
  267. }
  268. static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
  269. {
  270. __bth_set_mig(pkt->hdr + pkt->offset, mig);
  271. }
  272. static inline u8 bth_pad(struct rxe_pkt_info *pkt)
  273. {
  274. return __bth_pad(pkt->hdr + pkt->offset);
  275. }
  276. static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
  277. {
  278. __bth_set_pad(pkt->hdr + pkt->offset, pad);
  279. }
  280. static inline u8 bth_tver(struct rxe_pkt_info *pkt)
  281. {
  282. return __bth_tver(pkt->hdr + pkt->offset);
  283. }
  284. static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
  285. {
  286. __bth_set_tver(pkt->hdr + pkt->offset, tver);
  287. }
  288. static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
  289. {
  290. return __bth_pkey(pkt->hdr + pkt->offset);
  291. }
  292. static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
  293. {
  294. __bth_set_pkey(pkt->hdr + pkt->offset, pkey);
  295. }
  296. static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
  297. {
  298. return __bth_qpn(pkt->hdr + pkt->offset);
  299. }
  300. static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
  301. {
  302. __bth_set_qpn(pkt->hdr + pkt->offset, qpn);
  303. }
  304. static inline int bth_fecn(struct rxe_pkt_info *pkt)
  305. {
  306. return __bth_fecn(pkt->hdr + pkt->offset);
  307. }
  308. static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
  309. {
  310. __bth_set_fecn(pkt->hdr + pkt->offset, fecn);
  311. }
  312. static inline int bth_becn(struct rxe_pkt_info *pkt)
  313. {
  314. return __bth_becn(pkt->hdr + pkt->offset);
  315. }
  316. static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
  317. {
  318. __bth_set_becn(pkt->hdr + pkt->offset, becn);
  319. }
  320. static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
  321. {
  322. return __bth_resv6a(pkt->hdr + pkt->offset);
  323. }
  324. static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
  325. {
  326. __bth_set_resv6a(pkt->hdr + pkt->offset);
  327. }
  328. static inline int bth_ack(struct rxe_pkt_info *pkt)
  329. {
  330. return __bth_ack(pkt->hdr + pkt->offset);
  331. }
  332. static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
  333. {
  334. __bth_set_ack(pkt->hdr + pkt->offset, ack);
  335. }
  336. static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
  337. {
  338. __bth_set_resv7(pkt->hdr + pkt->offset);
  339. }
  340. static inline u32 bth_psn(struct rxe_pkt_info *pkt)
  341. {
  342. return __bth_psn(pkt->hdr + pkt->offset);
  343. }
  344. static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
  345. {
  346. __bth_set_psn(pkt->hdr + pkt->offset, psn);
  347. }
  348. static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
  349. int mig, int pad, u16 pkey, u32 qpn, int ack_req,
  350. u32 psn)
  351. {
  352. struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset);
  353. bth->opcode = opcode;
  354. bth->flags = (pad << 4) & BTH_PAD_MASK;
  355. if (se)
  356. bth->flags |= BTH_SE_MASK;
  357. if (mig)
  358. bth->flags |= BTH_MIG_MASK;
  359. bth->pkey = cpu_to_be16(pkey);
  360. bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
  361. psn &= BTH_PSN_MASK;
  362. if (ack_req)
  363. psn |= BTH_ACK_MASK;
  364. bth->apsn = cpu_to_be32(psn);
  365. }
  366. /******************************************************************************
  367. * Reliable Datagram Extended Transport Header
  368. ******************************************************************************/
  369. struct rxe_rdeth {
  370. __be32 een;
  371. };
  372. #define RDETH_EEN_MASK (0x00ffffff)
  373. static inline u8 __rdeth_een(void *arg)
  374. {
  375. struct rxe_rdeth *rdeth = arg;
  376. return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
  377. }
  378. static inline void __rdeth_set_een(void *arg, u32 een)
  379. {
  380. struct rxe_rdeth *rdeth = arg;
  381. rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
  382. }
  383. static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
  384. {
  385. return __rdeth_een(pkt->hdr + pkt->offset
  386. + rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
  387. }
  388. static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
  389. {
  390. __rdeth_set_een(pkt->hdr + pkt->offset
  391. + rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
  392. }
  393. /******************************************************************************
  394. * Datagram Extended Transport Header
  395. ******************************************************************************/
  396. struct rxe_deth {
  397. __be32 qkey;
  398. __be32 sqp;
  399. };
  400. #define GSI_QKEY (0x80010000)
  401. #define DETH_SQP_MASK (0x00ffffff)
  402. static inline u32 __deth_qkey(void *arg)
  403. {
  404. struct rxe_deth *deth = arg;
  405. return be32_to_cpu(deth->qkey);
  406. }
  407. static inline void __deth_set_qkey(void *arg, u32 qkey)
  408. {
  409. struct rxe_deth *deth = arg;
  410. deth->qkey = cpu_to_be32(qkey);
  411. }
  412. static inline u32 __deth_sqp(void *arg)
  413. {
  414. struct rxe_deth *deth = arg;
  415. return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
  416. }
  417. static inline void __deth_set_sqp(void *arg, u32 sqp)
  418. {
  419. struct rxe_deth *deth = arg;
  420. deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
  421. }
  422. static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
  423. {
  424. return __deth_qkey(pkt->hdr + pkt->offset
  425. + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
  426. }
  427. static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
  428. {
  429. __deth_set_qkey(pkt->hdr + pkt->offset
  430. + rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
  431. }
  432. static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
  433. {
  434. return __deth_sqp(pkt->hdr + pkt->offset
  435. + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
  436. }
  437. static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
  438. {
  439. __deth_set_sqp(pkt->hdr + pkt->offset
  440. + rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
  441. }
  442. /******************************************************************************
  443. * RDMA Extended Transport Header
  444. ******************************************************************************/
  445. struct rxe_reth {
  446. __be64 va;
  447. __be32 rkey;
  448. __be32 len;
  449. };
  450. static inline u64 __reth_va(void *arg)
  451. {
  452. struct rxe_reth *reth = arg;
  453. return be64_to_cpu(reth->va);
  454. }
  455. static inline void __reth_set_va(void *arg, u64 va)
  456. {
  457. struct rxe_reth *reth = arg;
  458. reth->va = cpu_to_be64(va);
  459. }
  460. static inline u32 __reth_rkey(void *arg)
  461. {
  462. struct rxe_reth *reth = arg;
  463. return be32_to_cpu(reth->rkey);
  464. }
  465. static inline void __reth_set_rkey(void *arg, u32 rkey)
  466. {
  467. struct rxe_reth *reth = arg;
  468. reth->rkey = cpu_to_be32(rkey);
  469. }
  470. static inline u32 __reth_len(void *arg)
  471. {
  472. struct rxe_reth *reth = arg;
  473. return be32_to_cpu(reth->len);
  474. }
  475. static inline void __reth_set_len(void *arg, u32 len)
  476. {
  477. struct rxe_reth *reth = arg;
  478. reth->len = cpu_to_be32(len);
  479. }
  480. static inline u64 reth_va(struct rxe_pkt_info *pkt)
  481. {
  482. return __reth_va(pkt->hdr + pkt->offset
  483. + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
  484. }
  485. static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
  486. {
  487. __reth_set_va(pkt->hdr + pkt->offset
  488. + rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
  489. }
  490. static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
  491. {
  492. return __reth_rkey(pkt->hdr + pkt->offset
  493. + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
  494. }
  495. static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
  496. {
  497. __reth_set_rkey(pkt->hdr + pkt->offset
  498. + rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
  499. }
  500. static inline u32 reth_len(struct rxe_pkt_info *pkt)
  501. {
  502. return __reth_len(pkt->hdr + pkt->offset
  503. + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
  504. }
  505. static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
  506. {
  507. __reth_set_len(pkt->hdr + pkt->offset
  508. + rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
  509. }
  510. /******************************************************************************
  511. * Atomic Extended Transport Header
  512. ******************************************************************************/
  513. struct rxe_atmeth {
  514. __be64 va;
  515. __be32 rkey;
  516. __be64 swap_add;
  517. __be64 comp;
  518. } __attribute__((__packed__));
  519. static inline u64 __atmeth_va(void *arg)
  520. {
  521. struct rxe_atmeth *atmeth = arg;
  522. return be64_to_cpu(atmeth->va);
  523. }
  524. static inline void __atmeth_set_va(void *arg, u64 va)
  525. {
  526. struct rxe_atmeth *atmeth = arg;
  527. atmeth->va = cpu_to_be64(va);
  528. }
  529. static inline u32 __atmeth_rkey(void *arg)
  530. {
  531. struct rxe_atmeth *atmeth = arg;
  532. return be32_to_cpu(atmeth->rkey);
  533. }
  534. static inline void __atmeth_set_rkey(void *arg, u32 rkey)
  535. {
  536. struct rxe_atmeth *atmeth = arg;
  537. atmeth->rkey = cpu_to_be32(rkey);
  538. }
  539. static inline u64 __atmeth_swap_add(void *arg)
  540. {
  541. struct rxe_atmeth *atmeth = arg;
  542. return be64_to_cpu(atmeth->swap_add);
  543. }
  544. static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
  545. {
  546. struct rxe_atmeth *atmeth = arg;
  547. atmeth->swap_add = cpu_to_be64(swap_add);
  548. }
  549. static inline u64 __atmeth_comp(void *arg)
  550. {
  551. struct rxe_atmeth *atmeth = arg;
  552. return be64_to_cpu(atmeth->comp);
  553. }
  554. static inline void __atmeth_set_comp(void *arg, u64 comp)
  555. {
  556. struct rxe_atmeth *atmeth = arg;
  557. atmeth->comp = cpu_to_be64(comp);
  558. }
  559. static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
  560. {
  561. return __atmeth_va(pkt->hdr + pkt->offset
  562. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
  563. }
  564. static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
  565. {
  566. __atmeth_set_va(pkt->hdr + pkt->offset
  567. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
  568. }
  569. static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
  570. {
  571. return __atmeth_rkey(pkt->hdr + pkt->offset
  572. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
  573. }
  574. static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
  575. {
  576. __atmeth_set_rkey(pkt->hdr + pkt->offset
  577. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
  578. }
  579. static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
  580. {
  581. return __atmeth_swap_add(pkt->hdr + pkt->offset
  582. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
  583. }
  584. static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
  585. {
  586. __atmeth_set_swap_add(pkt->hdr + pkt->offset
  587. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
  588. }
  589. static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
  590. {
  591. return __atmeth_comp(pkt->hdr + pkt->offset
  592. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
  593. }
  594. static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
  595. {
  596. __atmeth_set_comp(pkt->hdr + pkt->offset
  597. + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
  598. }
  599. /******************************************************************************
  600. * Ack Extended Transport Header
  601. ******************************************************************************/
  602. struct rxe_aeth {
  603. __be32 smsn;
  604. };
  605. #define AETH_SYN_MASK (0xff000000)
  606. #define AETH_MSN_MASK (0x00ffffff)
  607. enum aeth_syndrome {
  608. AETH_TYPE_MASK = 0xe0,
  609. AETH_ACK = 0x00,
  610. AETH_RNR_NAK = 0x20,
  611. AETH_RSVD = 0x40,
  612. AETH_NAK = 0x60,
  613. AETH_ACK_UNLIMITED = 0x1f,
  614. AETH_NAK_PSN_SEQ_ERROR = 0x60,
  615. AETH_NAK_INVALID_REQ = 0x61,
  616. AETH_NAK_REM_ACC_ERR = 0x62,
  617. AETH_NAK_REM_OP_ERR = 0x63,
  618. AETH_NAK_INV_RD_REQ = 0x64,
  619. };
  620. static inline u8 __aeth_syn(void *arg)
  621. {
  622. struct rxe_aeth *aeth = arg;
  623. return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
  624. }
  625. static inline void __aeth_set_syn(void *arg, u8 syn)
  626. {
  627. struct rxe_aeth *aeth = arg;
  628. u32 smsn = be32_to_cpu(aeth->smsn);
  629. aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
  630. (~AETH_SYN_MASK & smsn));
  631. }
  632. static inline u32 __aeth_msn(void *arg)
  633. {
  634. struct rxe_aeth *aeth = arg;
  635. return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
  636. }
  637. static inline void __aeth_set_msn(void *arg, u32 msn)
  638. {
  639. struct rxe_aeth *aeth = arg;
  640. u32 smsn = be32_to_cpu(aeth->smsn);
  641. aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
  642. (~AETH_MSN_MASK & smsn));
  643. }
  644. static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
  645. {
  646. return __aeth_syn(pkt->hdr + pkt->offset
  647. + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
  648. }
  649. static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
  650. {
  651. __aeth_set_syn(pkt->hdr + pkt->offset
  652. + rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
  653. }
  654. static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
  655. {
  656. return __aeth_msn(pkt->hdr + pkt->offset
  657. + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
  658. }
  659. static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
  660. {
  661. __aeth_set_msn(pkt->hdr + pkt->offset
  662. + rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
  663. }
  664. /******************************************************************************
  665. * Atomic Ack Extended Transport Header
  666. ******************************************************************************/
  667. struct rxe_atmack {
  668. __be64 orig;
  669. };
  670. static inline u64 __atmack_orig(void *arg)
  671. {
  672. struct rxe_atmack *atmack = arg;
  673. return be64_to_cpu(atmack->orig);
  674. }
  675. static inline void __atmack_set_orig(void *arg, u64 orig)
  676. {
  677. struct rxe_atmack *atmack = arg;
  678. atmack->orig = cpu_to_be64(orig);
  679. }
  680. static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
  681. {
  682. return __atmack_orig(pkt->hdr + pkt->offset
  683. + rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
  684. }
  685. static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
  686. {
  687. __atmack_set_orig(pkt->hdr + pkt->offset
  688. + rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
  689. }
  690. /******************************************************************************
  691. * Immediate Extended Transport Header
  692. ******************************************************************************/
  693. struct rxe_immdt {
  694. __be32 imm;
  695. };
  696. static inline __be32 __immdt_imm(void *arg)
  697. {
  698. struct rxe_immdt *immdt = arg;
  699. return immdt->imm;
  700. }
  701. static inline void __immdt_set_imm(void *arg, __be32 imm)
  702. {
  703. struct rxe_immdt *immdt = arg;
  704. immdt->imm = imm;
  705. }
  706. static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
  707. {
  708. return __immdt_imm(pkt->hdr + pkt->offset
  709. + rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
  710. }
  711. static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
  712. {
  713. __immdt_set_imm(pkt->hdr + pkt->offset
  714. + rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
  715. }
  716. /******************************************************************************
  717. * Invalidate Extended Transport Header
  718. ******************************************************************************/
  719. struct rxe_ieth {
  720. __be32 rkey;
  721. };
  722. static inline u32 __ieth_rkey(void *arg)
  723. {
  724. struct rxe_ieth *ieth = arg;
  725. return be32_to_cpu(ieth->rkey);
  726. }
  727. static inline void __ieth_set_rkey(void *arg, u32 rkey)
  728. {
  729. struct rxe_ieth *ieth = arg;
  730. ieth->rkey = cpu_to_be32(rkey);
  731. }
  732. static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
  733. {
  734. return __ieth_rkey(pkt->hdr + pkt->offset
  735. + rxe_opcode[pkt->opcode].offset[RXE_IETH]);
  736. }
  737. static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
  738. {
  739. __ieth_set_rkey(pkt->hdr + pkt->offset
  740. + rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
  741. }
  742. enum rxe_hdr_length {
  743. RXE_BTH_BYTES = sizeof(struct rxe_bth),
  744. RXE_DETH_BYTES = sizeof(struct rxe_deth),
  745. RXE_IMMDT_BYTES = sizeof(struct rxe_immdt),
  746. RXE_RETH_BYTES = sizeof(struct rxe_reth),
  747. RXE_AETH_BYTES = sizeof(struct rxe_aeth),
  748. RXE_ATMACK_BYTES = sizeof(struct rxe_atmack),
  749. RXE_ATMETH_BYTES = sizeof(struct rxe_atmeth),
  750. RXE_IETH_BYTES = sizeof(struct rxe_ieth),
  751. RXE_RDETH_BYTES = sizeof(struct rxe_rdeth),
  752. };
  753. static inline size_t header_size(struct rxe_pkt_info *pkt)
  754. {
  755. return pkt->offset + rxe_opcode[pkt->opcode].length;
  756. }
  757. static inline void *payload_addr(struct rxe_pkt_info *pkt)
  758. {
  759. return pkt->hdr + pkt->offset
  760. + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
  761. }
  762. static inline size_t payload_size(struct rxe_pkt_info *pkt)
  763. {
  764. return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
  765. - bth_pad(pkt) - RXE_ICRC_SIZE;
  766. }
  767. #endif /* RXE_HDR_H */