ice_lan_tx_rx.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #ifndef _ICE_LAN_TX_RX_H_
  4. #define _ICE_LAN_TX_RX_H_
  5. union ice_32byte_rx_desc {
  6. struct {
  7. __le64 pkt_addr; /* Packet buffer address */
  8. __le64 hdr_addr; /* Header buffer address */
  9. /* bit 0 of hdr_addr is DD bit */
  10. __le64 rsvd1;
  11. __le64 rsvd2;
  12. } read;
  13. struct {
  14. struct {
  15. struct {
  16. __le16 mirroring_status;
  17. __le16 l2tag1;
  18. } lo_dword;
  19. union {
  20. __le32 rss; /* RSS Hash */
  21. __le32 fd_id; /* Flow Director filter id */
  22. } hi_dword;
  23. } qword0;
  24. struct {
  25. /* status/error/PTYPE/length */
  26. __le64 status_error_len;
  27. } qword1;
  28. struct {
  29. __le16 ext_status; /* extended status */
  30. __le16 rsvd;
  31. __le16 l2tag2_1;
  32. __le16 l2tag2_2;
  33. } qword2;
  34. struct {
  35. __le32 reserved;
  36. __le32 fd_id;
  37. } qword3;
  38. } wb; /* writeback */
  39. };
  40. struct ice_rx_ptype_decoded {
  41. u32 ptype:10;
  42. u32 known:1;
  43. u32 outer_ip:1;
  44. u32 outer_ip_ver:2;
  45. u32 outer_frag:1;
  46. u32 tunnel_type:3;
  47. u32 tunnel_end_prot:2;
  48. u32 tunnel_end_frag:1;
  49. u32 inner_prot:4;
  50. u32 payload_layer:3;
  51. };
  52. enum ice_rx_ptype_outer_ip {
  53. ICE_RX_PTYPE_OUTER_L2 = 0,
  54. ICE_RX_PTYPE_OUTER_IP = 1,
  55. };
  56. enum ice_rx_ptype_outer_ip_ver {
  57. ICE_RX_PTYPE_OUTER_NONE = 0,
  58. ICE_RX_PTYPE_OUTER_IPV4 = 1,
  59. ICE_RX_PTYPE_OUTER_IPV6 = 2,
  60. };
  61. enum ice_rx_ptype_outer_fragmented {
  62. ICE_RX_PTYPE_NOT_FRAG = 0,
  63. ICE_RX_PTYPE_FRAG = 1,
  64. };
  65. enum ice_rx_ptype_tunnel_type {
  66. ICE_RX_PTYPE_TUNNEL_NONE = 0,
  67. ICE_RX_PTYPE_TUNNEL_IP_IP = 1,
  68. ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
  69. ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
  70. ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
  71. };
  72. enum ice_rx_ptype_tunnel_end_prot {
  73. ICE_RX_PTYPE_TUNNEL_END_NONE = 0,
  74. ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1,
  75. ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2,
  76. };
  77. enum ice_rx_ptype_inner_prot {
  78. ICE_RX_PTYPE_INNER_PROT_NONE = 0,
  79. ICE_RX_PTYPE_INNER_PROT_UDP = 1,
  80. ICE_RX_PTYPE_INNER_PROT_TCP = 2,
  81. ICE_RX_PTYPE_INNER_PROT_SCTP = 3,
  82. ICE_RX_PTYPE_INNER_PROT_ICMP = 4,
  83. ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
  84. };
  85. enum ice_rx_ptype_payload_layer {
  86. ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
  87. ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
  88. ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
  89. ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
  90. };
  91. /* RX Flex Descriptor
  92. * This descriptor is used instead of the legacy version descriptor when
  93. * ice_rlan_ctx.adv_desc is set
  94. */
  95. union ice_32b_rx_flex_desc {
  96. struct {
  97. __le64 pkt_addr; /* Packet buffer address */
  98. __le64 hdr_addr; /* Header buffer address */
  99. /* bit 0 of hdr_addr is DD bit */
  100. __le64 rsvd1;
  101. __le64 rsvd2;
  102. } read;
  103. struct {
  104. /* Qword 0 */
  105. u8 rxdid; /* descriptor builder profile id */
  106. u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
  107. __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
  108. __le16 pkt_len; /* [15:14] are reserved */
  109. __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
  110. /* sph=[11:11] */
  111. /* ff1/ext=[15:12] */
  112. /* Qword 1 */
  113. __le16 status_error0;
  114. __le16 l2tag1;
  115. __le16 flex_meta0;
  116. __le16 flex_meta1;
  117. /* Qword 2 */
  118. __le16 status_error1;
  119. u8 flex_flags2;
  120. u8 time_stamp_low;
  121. __le16 l2tag2_1st;
  122. __le16 l2tag2_2nd;
  123. /* Qword 3 */
  124. __le16 flex_meta2;
  125. __le16 flex_meta3;
  126. union {
  127. struct {
  128. __le16 flex_meta4;
  129. __le16 flex_meta5;
  130. } flex;
  131. __le32 ts_high;
  132. } flex_ts;
  133. } wb; /* writeback */
  134. };
  135. /* Rx Flex Descriptor NIC Profile
  136. * This descriptor corresponds to RxDID 2 which contains
  137. * metadata fields for RSS, flow id and timestamp info
  138. */
  139. struct ice_32b_rx_flex_desc_nic {
  140. /* Qword 0 */
  141. u8 rxdid;
  142. u8 mir_id_umb_cast;
  143. __le16 ptype_flexi_flags0;
  144. __le16 pkt_len;
  145. __le16 hdr_len_sph_flex_flags1;
  146. /* Qword 1 */
  147. __le16 status_error0;
  148. __le16 l2tag1;
  149. __le32 rss_hash;
  150. /* Qword 2 */
  151. __le16 status_error1;
  152. u8 flexi_flags2;
  153. u8 ts_low;
  154. __le16 l2tag2_1st;
  155. __le16 l2tag2_2nd;
  156. /* Qword 3 */
  157. __le32 flow_id;
  158. union {
  159. struct {
  160. __le16 vlan_id;
  161. __le16 flow_id_ipv6;
  162. } flex;
  163. __le32 ts_high;
  164. } flex_ts;
  165. };
  166. /* Receive Flex Descriptor profile IDs: There are a total
  167. * of 64 profiles where profile IDs 0/1 are for legacy; and
  168. * profiles 2-63 are flex profiles that can be programmed
  169. * with a specific metadata (profile 7 reserved for HW)
  170. */
  171. enum ice_rxdid {
  172. ICE_RXDID_START = 0,
  173. ICE_RXDID_LEGACY_0 = ICE_RXDID_START,
  174. ICE_RXDID_LEGACY_1,
  175. ICE_RXDID_FLX_START,
  176. ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START,
  177. ICE_RXDID_FLX_LAST = 63,
  178. ICE_RXDID_LAST = ICE_RXDID_FLX_LAST
  179. };
  180. /* Receive Flex Descriptor Rx opcode values */
  181. #define ICE_RX_OPC_MDID 0x01
  182. /* Receive Descriptor MDID values */
  183. #define ICE_RX_MDID_FLOW_ID_LOWER 5
  184. #define ICE_RX_MDID_FLOW_ID_HIGH 6
  185. #define ICE_RX_MDID_HASH_LOW 56
  186. #define ICE_RX_MDID_HASH_HIGH 57
  187. /* Rx Flag64 packet flag bits */
  188. enum ice_rx_flg64_bits {
  189. ICE_RXFLG_PKT_DSI = 0,
  190. ICE_RXFLG_EVLAN_x8100 = 15,
  191. ICE_RXFLG_EVLAN_x9100,
  192. ICE_RXFLG_VLAN_x8100,
  193. ICE_RXFLG_TNL_MAC = 22,
  194. ICE_RXFLG_TNL_VLAN,
  195. ICE_RXFLG_PKT_FRG,
  196. ICE_RXFLG_FIN = 32,
  197. ICE_RXFLG_SYN,
  198. ICE_RXFLG_RST,
  199. ICE_RXFLG_TNL0 = 38,
  200. ICE_RXFLG_TNL1,
  201. ICE_RXFLG_TNL2,
  202. ICE_RXFLG_UDP_GRE,
  203. ICE_RXFLG_RSVD = 63
  204. };
  205. /* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
  206. #define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
  207. /* for ice_32byte_rx_flex_desc.pkt_length member */
  208. #define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
  209. enum ice_rx_flex_desc_status_error_0_bits {
  210. /* Note: These are predefined bit offsets */
  211. ICE_RX_FLEX_DESC_STATUS0_DD_S = 0,
  212. ICE_RX_FLEX_DESC_STATUS0_EOF_S,
  213. ICE_RX_FLEX_DESC_STATUS0_HBO_S,
  214. ICE_RX_FLEX_DESC_STATUS0_L3L4P_S,
  215. ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
  216. ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
  217. ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
  218. ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
  219. ICE_RX_FLEX_DESC_STATUS0_LPBK_S,
  220. ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
  221. ICE_RX_FLEX_DESC_STATUS0_RXE_S,
  222. ICE_RX_FLEX_DESC_STATUS0_CRCP_S,
  223. ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
  224. ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
  225. ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
  226. ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
  227. ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
  228. };
  229. #define ICE_RXQ_CTX_SIZE_DWORDS 8
  230. #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
  231. /* RLAN Rx queue context data
  232. *
  233. * The sizes of the variables may be larger than needed due to crossing byte
  234. * boundaries. If we do not have the width of the variable set to the correct
  235. * size then we could end up shifting bits off the top of the variable when the
  236. * variable is at the top of a byte and crosses over into the next byte.
  237. */
  238. struct ice_rlan_ctx {
  239. u16 head;
  240. u16 cpuid; /* bigger than needed, see above for reason */
  241. #define ICE_RLAN_BASE_S 7
  242. u64 base;
  243. u16 qlen;
  244. #define ICE_RLAN_CTX_DBUF_S 7
  245. u16 dbuf; /* bigger than needed, see above for reason */
  246. #define ICE_RLAN_CTX_HBUF_S 6
  247. u16 hbuf; /* bigger than needed, see above for reason */
  248. u8 dtype;
  249. u8 dsize;
  250. u8 crcstrip;
  251. u8 l2tsel;
  252. u8 hsplit_0;
  253. u8 hsplit_1;
  254. u8 showiv;
  255. u32 rxmax; /* bigger than needed, see above for reason */
  256. u8 tphrdesc_ena;
  257. u8 tphwdesc_ena;
  258. u8 tphdata_ena;
  259. u8 tphhead_ena;
  260. u16 lrxqthresh; /* bigger than needed, see above for reason */
  261. };
  262. struct ice_ctx_ele {
  263. u16 offset;
  264. u16 size_of;
  265. u16 width;
  266. u16 lsb;
  267. };
  268. #define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
  269. .offset = offsetof(struct _struct, _ele), \
  270. .size_of = FIELD_SIZEOF(struct _struct, _ele), \
  271. .width = _width, \
  272. .lsb = _lsb, \
  273. }
  274. /* for hsplit_0 field of Rx RLAN context */
  275. enum ice_rlan_ctx_rx_hsplit_0 {
  276. ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
  277. ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 = 1,
  278. ICE_RLAN_RX_HSPLIT_0_SPLIT_IP = 2,
  279. ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
  280. ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP = 8,
  281. };
  282. /* for hsplit_1 field of Rx RLAN context */
  283. enum ice_rlan_ctx_rx_hsplit_1 {
  284. ICE_RLAN_RX_HSPLIT_1_NO_SPLIT = 0,
  285. ICE_RLAN_RX_HSPLIT_1_SPLIT_L2 = 1,
  286. ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2,
  287. };
  288. /* TX Descriptor */
  289. struct ice_tx_desc {
  290. __le64 buf_addr; /* Address of descriptor's data buf */
  291. __le64 cmd_type_offset_bsz;
  292. };
  293. enum ice_tx_desc_dtype_value {
  294. ICE_TX_DESC_DTYPE_DATA = 0x0,
  295. ICE_TX_DESC_DTYPE_CTX = 0x1,
  296. /* DESC_DONE - HW has completed write-back of descriptor */
  297. ICE_TX_DESC_DTYPE_DESC_DONE = 0xF,
  298. };
  299. #define ICE_TXD_QW1_CMD_S 4
  300. #define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S)
  301. enum ice_tx_desc_cmd_bits {
  302. ICE_TX_DESC_CMD_EOP = 0x0001,
  303. ICE_TX_DESC_CMD_RS = 0x0002,
  304. ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
  305. ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
  306. ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
  307. ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
  308. ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
  309. ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
  310. };
  311. #define ICE_TXD_QW1_OFFSET_S 16
  312. #define ICE_TXD_QW1_OFFSET_M (0x3FFFFULL << ICE_TXD_QW1_OFFSET_S)
  313. enum ice_tx_desc_len_fields {
  314. /* Note: These are predefined bit offsets */
  315. ICE_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */
  316. ICE_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */
  317. ICE_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */
  318. };
  319. #define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S)
  320. #define ICE_TXD_QW1_IPLEN_M (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S)
  321. #define ICE_TXD_QW1_L4LEN_M (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S)
  322. /* Tx descriptor field limits in bytes */
  323. #define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \
  324. ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD)
  325. #define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \
  326. ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD)
  327. #define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \
  328. ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD)
  329. #define ICE_TXD_QW1_TX_BUF_SZ_S 34
  330. #define ICE_TXD_QW1_L2TAG1_S 48
  331. /* Context descriptors */
  332. struct ice_tx_ctx_desc {
  333. __le32 tunneling_params;
  334. __le16 l2tag2;
  335. __le16 rsvd;
  336. __le64 qw1;
  337. };
  338. #define ICE_TXD_CTX_QW1_CMD_S 4
  339. #define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
  340. #define ICE_TXD_CTX_QW1_TSO_LEN_S 30
  341. #define ICE_TXD_CTX_QW1_TSO_LEN_M \
  342. (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
  343. #define ICE_TXD_CTX_QW1_MSS_S 50
  344. enum ice_tx_ctx_desc_cmd_bits {
  345. ICE_TX_CTX_DESC_TSO = 0x01,
  346. ICE_TX_CTX_DESC_TSYN = 0x02,
  347. ICE_TX_CTX_DESC_IL2TAG2 = 0x04,
  348. ICE_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
  349. ICE_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
  350. ICE_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
  351. ICE_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
  352. ICE_TX_CTX_DESC_SWTCH_VSI = 0x30,
  353. ICE_TX_CTX_DESC_RESERVED = 0x40
  354. };
  355. #define ICE_LAN_TXQ_MAX_QGRPS 127
  356. #define ICE_LAN_TXQ_MAX_QDIS 1023
  357. /* Tx queue context data
  358. *
  359. * The sizes of the variables may be larger than needed due to crossing byte
  360. * boundaries. If we do not have the width of the variable set to the correct
  361. * size then we could end up shifting bits off the top of the variable when the
  362. * variable is at the top of a byte and crosses over into the next byte.
  363. */
  364. struct ice_tlan_ctx {
  365. #define ICE_TLAN_CTX_BASE_S 7
  366. u64 base; /* base is defined in 128-byte units */
  367. u8 port_num;
  368. u16 cgd_num; /* bigger than needed, see above for reason */
  369. u8 pf_num;
  370. u16 vmvf_num;
  371. u8 vmvf_type;
  372. #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
  373. #define ICE_TLAN_CTX_VMVF_TYPE_PF 2
  374. u16 src_vsi;
  375. u8 tsyn_ena;
  376. u8 alt_vlan;
  377. u16 cpuid; /* bigger than needed, see above for reason */
  378. u8 wb_mode;
  379. u8 tphrd_desc;
  380. u8 tphrd;
  381. u8 tphwr_desc;
  382. u16 cmpq_id;
  383. u16 qnum_in_func;
  384. u8 itr_notification_mode;
  385. u8 adjust_prof_id;
  386. u32 qlen; /* bigger than needed, see above for reason */
  387. u8 quanta_prof_idx;
  388. u8 tso_ena;
  389. u16 tso_qnum;
  390. u8 legacy_int;
  391. u8 drop_ena;
  392. u8 cache_prof_idx;
  393. u8 pkt_shaper_prof_idx;
  394. u8 int_q_state; /* width not needed - internal do not write */
  395. };
  396. /* macro to make the table lines short */
  397. #define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
  398. { PTYPE, \
  399. 1, \
  400. ICE_RX_PTYPE_OUTER_##OUTER_IP, \
  401. ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
  402. ICE_RX_PTYPE_##OUTER_FRAG, \
  403. ICE_RX_PTYPE_TUNNEL_##T, \
  404. ICE_RX_PTYPE_TUNNEL_END_##TE, \
  405. ICE_RX_PTYPE_##TEF, \
  406. ICE_RX_PTYPE_INNER_PROT_##I, \
  407. ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
  408. #define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
  409. /* shorter macros makes the table fit but are terse */
  410. #define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
  411. /* Lookup table mapping the HW PTYPE to the bit field for decoding */
  412. static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
  413. /* L2 Packet types */
  414. ICE_PTT_UNUSED_ENTRY(0),
  415. ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
  416. ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
  417. };
  418. static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
  419. {
  420. return ice_ptype_lkup[ptype];
  421. }
  422. #endif /* _ICE_LAN_TX_RX_H_ */