en.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901
  1. /*
  2. * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef __MLX5_EN_H__
  33. #define __MLX5_EN_H__
  34. #include <linux/if_vlan.h>
  35. #include <linux/etherdevice.h>
  36. #include <linux/timecounter.h>
  37. #include <linux/net_tstamp.h>
  38. #include <linux/ptp_clock_kernel.h>
  39. #include <linux/mlx5/driver.h>
  40. #include <linux/mlx5/qp.h>
  41. #include <linux/mlx5/cq.h>
  42. #include <linux/mlx5/port.h>
  43. #include <linux/mlx5/vport.h>
  44. #include <linux/mlx5/transobj.h>
  45. #include <linux/rhashtable.h>
  46. #include <net/switchdev.h>
  47. #include "wq.h"
  48. #include "mlx5_core.h"
  49. #include "en_stats.h"
  50. #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
  51. #define MLX5E_MAX_NUM_TC 8
  52. #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
  53. #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
  54. #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
  55. #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
  56. #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
  57. #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
  58. #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
  59. #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
  60. #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
  61. #define MLX5_RX_HEADROOM NET_SKB_PAD
  62. #define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
  63. #define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */
  64. #define MLX5_MPWRQ_LOG_WQE_SZ 18
  65. #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
  66. MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
  67. #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
  68. #define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
  69. MLX5_MPWRQ_WQE_PAGE_ORDER)
  70. #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
  71. #define MLX5E_REQUIRED_MTTS(rqs, wqes)\
  72. (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
  73. #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
  74. #define MLX5_UMR_ALIGN (2048)
  75. #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
  76. #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
  77. #define MLX5E_DEFAULT_LRO_TIMEOUT 32
  78. #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
  79. #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
  80. #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
  81. #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
  82. #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
  83. #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
  84. #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
  85. #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
  86. #define MLX5E_LOG_INDIR_RQT_SIZE 0x7
  87. #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
  88. #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
  89. #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
  90. #define MLX5E_TX_CQ_POLL_BUDGET 128
  91. #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
  92. #define MLX5E_SQ_BF_BUDGET 16
  93. #define MLX5E_ICOSQ_MAX_WQEBBS \
  94. (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
  95. #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
  96. #define MLX5E_XDP_IHS_DS_COUNT \
  97. DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
  98. #define MLX5E_XDP_TX_DS_COUNT \
  99. (MLX5E_XDP_IHS_DS_COUNT + \
  100. (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
  101. #define MLX5E_XDP_TX_WQEBBS \
  102. DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
  103. #define MLX5E_NUM_MAIN_GROUPS 9
  104. static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
  105. {
  106. switch (wq_type) {
  107. case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
  108. return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
  109. wq_size / 2);
  110. default:
  111. return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
  112. wq_size / 2);
  113. }
  114. }
  115. static inline int mlx5_min_log_rq_size(int wq_type)
  116. {
  117. switch (wq_type) {
  118. case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
  119. return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
  120. default:
  121. return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
  122. }
  123. }
  124. static inline int mlx5_max_log_rq_size(int wq_type)
  125. {
  126. switch (wq_type) {
  127. case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
  128. return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
  129. default:
  130. return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
  131. }
  132. }
  133. enum {
  134. MLX5E_INLINE_MODE_L2,
  135. MLX5E_INLINE_MODE_VPORT_CONTEXT,
  136. MLX5_INLINE_MODE_NOT_REQUIRED,
  137. };
  138. struct mlx5e_tx_wqe {
  139. struct mlx5_wqe_ctrl_seg ctrl;
  140. struct mlx5_wqe_eth_seg eth;
  141. };
  142. struct mlx5e_rx_wqe {
  143. struct mlx5_wqe_srq_next_seg next;
  144. struct mlx5_wqe_data_seg data;
  145. };
  146. struct mlx5e_umr_wqe {
  147. struct mlx5_wqe_ctrl_seg ctrl;
  148. struct mlx5_wqe_umr_ctrl_seg uctrl;
  149. struct mlx5_mkey_seg mkc;
  150. struct mlx5_wqe_data_seg data;
  151. };
  152. static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
  153. "rx_cqe_moder",
  154. };
  155. enum mlx5e_priv_flag {
  156. MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
  157. };
  158. #define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \
  159. do { \
  160. if (enable) \
  161. priv->pflags |= pflag; \
  162. else \
  163. priv->pflags &= ~pflag; \
  164. } while (0)
  165. #ifdef CONFIG_MLX5_CORE_EN_DCB
  166. #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
  167. #endif
  168. struct mlx5e_cq_moder {
  169. u16 usec;
  170. u16 pkts;
  171. };
  172. struct mlx5e_params {
  173. u8 log_sq_size;
  174. u8 rq_wq_type;
  175. u8 mpwqe_log_stride_sz;
  176. u8 mpwqe_log_num_strides;
  177. u8 log_rq_size;
  178. u16 num_channels;
  179. u8 num_tc;
  180. u8 rx_cq_period_mode;
  181. bool rx_cqe_compress_admin;
  182. bool rx_cqe_compress;
  183. struct mlx5e_cq_moder rx_cq_moderation;
  184. struct mlx5e_cq_moder tx_cq_moderation;
  185. u16 min_rx_wqes;
  186. bool lro_en;
  187. u32 lro_wqe_sz;
  188. u16 tx_max_inline;
  189. u8 tx_min_inline_mode;
  190. u8 rss_hfunc;
  191. u8 toeplitz_hash_key[40];
  192. u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
  193. bool vlan_strip_disable;
  194. #ifdef CONFIG_MLX5_CORE_EN_DCB
  195. struct ieee_ets ets;
  196. #endif
  197. bool rx_am_enabled;
  198. u32 lro_timeout;
  199. };
  200. struct mlx5e_tstamp {
  201. rwlock_t lock;
  202. struct cyclecounter cycles;
  203. struct timecounter clock;
  204. struct hwtstamp_config hwtstamp_config;
  205. u32 nominal_c_mult;
  206. unsigned long overflow_period;
  207. struct delayed_work overflow_work;
  208. struct mlx5_core_dev *mdev;
  209. struct ptp_clock *ptp;
  210. struct ptp_clock_info ptp_info;
  211. };
  212. enum {
  213. MLX5E_RQ_STATE_ENABLED,
  214. MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
  215. MLX5E_RQ_STATE_AM,
  216. };
  217. struct mlx5e_cq {
  218. /* data path - accessed per cqe */
  219. struct mlx5_cqwq wq;
  220. /* data path - accessed per napi poll */
  221. u16 event_ctr;
  222. struct napi_struct *napi;
  223. struct mlx5_core_cq mcq;
  224. struct mlx5e_channel *channel;
  225. struct mlx5e_priv *priv;
  226. /* cqe decompression */
  227. struct mlx5_cqe64 title;
  228. struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
  229. u8 mini_arr_idx;
  230. u16 decmprs_left;
  231. u16 decmprs_wqe_counter;
  232. /* control */
  233. struct mlx5_wq_ctrl wq_ctrl;
  234. } ____cacheline_aligned_in_smp;
  235. struct mlx5e_rq;
  236. typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
  237. struct mlx5_cqe64 *cqe);
  238. typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
  239. u16 ix);
  240. typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
  241. struct mlx5e_dma_info {
  242. struct page *page;
  243. dma_addr_t addr;
  244. };
  245. struct mlx5e_rx_am_stats {
  246. int ppms; /* packets per msec */
  247. int bpms; /* bytes per msec */
  248. int epms; /* events per msec */
  249. };
  250. struct mlx5e_rx_am_sample {
  251. ktime_t time;
  252. u32 pkt_ctr;
  253. u32 byte_ctr;
  254. u16 event_ctr;
  255. };
  256. struct mlx5e_rx_am { /* Adaptive Moderation */
  257. u8 state;
  258. struct mlx5e_rx_am_stats prev_stats;
  259. struct mlx5e_rx_am_sample start_sample;
  260. struct work_struct work;
  261. u8 profile_ix;
  262. u8 mode;
  263. u8 tune_state;
  264. u8 steps_right;
  265. u8 steps_left;
  266. u8 tired;
  267. };
  268. /* a single cache unit is capable to serve one napi call (for non-striding rq)
  269. * or a MPWQE (for striding rq).
  270. */
  271. #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
  272. MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
  273. #define MLX5E_CACHE_SIZE (2 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
  274. struct mlx5e_page_cache {
  275. u32 head;
  276. u32 tail;
  277. struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
  278. };
  279. struct mlx5e_rq {
  280. /* data path */
  281. struct mlx5_wq_ll wq;
  282. union {
  283. struct mlx5e_dma_info *dma_info;
  284. struct {
  285. struct mlx5e_mpw_info *info;
  286. void *mtt_no_align;
  287. u32 mtt_offset;
  288. } mpwqe;
  289. };
  290. struct {
  291. u8 page_order;
  292. u32 wqe_sz; /* wqe data buffer size */
  293. u8 map_dir; /* dma map direction */
  294. } buff;
  295. __be32 mkey_be;
  296. struct device *pdev;
  297. struct net_device *netdev;
  298. struct mlx5e_tstamp *tstamp;
  299. struct mlx5e_rq_stats stats;
  300. struct mlx5e_cq cq;
  301. struct mlx5e_page_cache page_cache;
  302. mlx5e_fp_handle_rx_cqe handle_rx_cqe;
  303. mlx5e_fp_alloc_wqe alloc_wqe;
  304. mlx5e_fp_dealloc_wqe dealloc_wqe;
  305. unsigned long state;
  306. int ix;
  307. struct mlx5e_rx_am am; /* Adaptive Moderation */
  308. struct bpf_prog *xdp_prog;
  309. /* control */
  310. struct mlx5_wq_ctrl wq_ctrl;
  311. u8 wq_type;
  312. u32 mpwqe_stride_sz;
  313. u32 mpwqe_num_strides;
  314. u32 rqn;
  315. struct mlx5e_channel *channel;
  316. struct mlx5e_priv *priv;
  317. } ____cacheline_aligned_in_smp;
  318. struct mlx5e_umr_dma_info {
  319. __be64 *mtt;
  320. dma_addr_t mtt_addr;
  321. struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
  322. struct mlx5e_umr_wqe wqe;
  323. };
  324. struct mlx5e_mpw_info {
  325. struct mlx5e_umr_dma_info umr;
  326. u16 consumed_strides;
  327. u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
  328. };
  329. struct mlx5e_tx_wqe_info {
  330. u32 num_bytes;
  331. u8 num_wqebbs;
  332. u8 num_dma;
  333. };
  334. enum mlx5e_dma_map_type {
  335. MLX5E_DMA_MAP_SINGLE,
  336. MLX5E_DMA_MAP_PAGE
  337. };
  338. struct mlx5e_sq_dma {
  339. dma_addr_t addr;
  340. u32 size;
  341. enum mlx5e_dma_map_type type;
  342. };
  343. enum {
  344. MLX5E_SQ_STATE_ENABLED,
  345. MLX5E_SQ_STATE_BF_ENABLE,
  346. };
  347. struct mlx5e_sq_wqe_info {
  348. u8 opcode;
  349. u8 num_wqebbs;
  350. };
  351. enum mlx5e_sq_type {
  352. MLX5E_SQ_TXQ,
  353. MLX5E_SQ_ICO,
  354. MLX5E_SQ_XDP
  355. };
  356. struct mlx5e_sq {
  357. /* data path */
  358. /* dirtied @completion */
  359. u16 cc;
  360. u32 dma_fifo_cc;
  361. /* dirtied @xmit */
  362. u16 pc ____cacheline_aligned_in_smp;
  363. u32 dma_fifo_pc;
  364. u16 bf_offset;
  365. u16 prev_cc;
  366. u8 bf_budget;
  367. struct mlx5e_sq_stats stats;
  368. struct mlx5e_cq cq;
  369. /* pointers to per tx element info: write@xmit, read@completion */
  370. union {
  371. struct {
  372. struct sk_buff **skb;
  373. struct mlx5e_sq_dma *dma_fifo;
  374. struct mlx5e_tx_wqe_info *wqe_info;
  375. } txq;
  376. struct mlx5e_sq_wqe_info *ico_wqe;
  377. struct {
  378. struct mlx5e_sq_wqe_info *wqe_info;
  379. struct mlx5e_dma_info *di;
  380. bool doorbell;
  381. } xdp;
  382. } db;
  383. /* read only */
  384. struct mlx5_wq_cyc wq;
  385. u32 dma_fifo_mask;
  386. void __iomem *uar_map;
  387. struct netdev_queue *txq;
  388. u32 sqn;
  389. u16 bf_buf_size;
  390. u16 max_inline;
  391. u8 min_inline_mode;
  392. u16 edge;
  393. struct device *pdev;
  394. struct mlx5e_tstamp *tstamp;
  395. __be32 mkey_be;
  396. unsigned long state;
  397. /* control path */
  398. struct mlx5_wq_ctrl wq_ctrl;
  399. struct mlx5_uar uar;
  400. struct mlx5e_channel *channel;
  401. int tc;
  402. u32 rate_limit;
  403. u8 type;
  404. } ____cacheline_aligned_in_smp;
  405. static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
  406. {
  407. return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
  408. (sq->cc == sq->pc));
  409. }
  410. enum channel_flags {
  411. MLX5E_CHANNEL_NAPI_SCHED = 1,
  412. };
  413. struct mlx5e_channel {
  414. /* data path */
  415. struct mlx5e_rq rq;
  416. struct mlx5e_sq xdp_sq;
  417. struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
  418. struct mlx5e_sq icosq; /* internal control operations */
  419. bool xdp;
  420. struct napi_struct napi;
  421. struct device *pdev;
  422. struct net_device *netdev;
  423. __be32 mkey_be;
  424. u8 num_tc;
  425. unsigned long flags;
  426. /* control */
  427. struct mlx5e_priv *priv;
  428. int ix;
  429. int cpu;
  430. };
  431. enum mlx5e_traffic_types {
  432. MLX5E_TT_IPV4_TCP,
  433. MLX5E_TT_IPV6_TCP,
  434. MLX5E_TT_IPV4_UDP,
  435. MLX5E_TT_IPV6_UDP,
  436. MLX5E_TT_IPV4_IPSEC_AH,
  437. MLX5E_TT_IPV6_IPSEC_AH,
  438. MLX5E_TT_IPV4_IPSEC_ESP,
  439. MLX5E_TT_IPV6_IPSEC_ESP,
  440. MLX5E_TT_IPV4,
  441. MLX5E_TT_IPV6,
  442. MLX5E_TT_ANY,
  443. MLX5E_NUM_TT,
  444. MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
  445. };
  446. enum {
  447. MLX5E_STATE_ASYNC_EVENTS_ENABLED,
  448. MLX5E_STATE_OPENED,
  449. MLX5E_STATE_DESTROYING,
  450. };
  451. struct mlx5e_vxlan_db {
  452. spinlock_t lock; /* protect vxlan table */
  453. struct radix_tree_root tree;
  454. };
  455. struct mlx5e_l2_rule {
  456. u8 addr[ETH_ALEN + 2];
  457. struct mlx5_flow_rule *rule;
  458. };
  459. struct mlx5e_flow_table {
  460. int num_groups;
  461. struct mlx5_flow_table *t;
  462. struct mlx5_flow_group **g;
  463. };
  464. #define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
  465. struct mlx5e_tc_table {
  466. struct mlx5_flow_table *t;
  467. struct rhashtable_params ht_params;
  468. struct rhashtable ht;
  469. };
  470. struct mlx5e_vlan_table {
  471. struct mlx5e_flow_table ft;
  472. unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
  473. struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID];
  474. struct mlx5_flow_rule *untagged_rule;
  475. struct mlx5_flow_rule *any_vlan_rule;
  476. bool filter_disabled;
  477. };
  478. struct mlx5e_l2_table {
  479. struct mlx5e_flow_table ft;
  480. struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
  481. struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
  482. struct mlx5e_l2_rule broadcast;
  483. struct mlx5e_l2_rule allmulti;
  484. struct mlx5e_l2_rule promisc;
  485. bool broadcast_enabled;
  486. bool allmulti_enabled;
  487. bool promisc_enabled;
  488. };
  489. /* L3/L4 traffic type classifier */
  490. struct mlx5e_ttc_table {
  491. struct mlx5e_flow_table ft;
  492. struct mlx5_flow_rule *rules[MLX5E_NUM_TT];
  493. };
  494. #define ARFS_HASH_SHIFT BITS_PER_BYTE
  495. #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
  496. struct arfs_table {
  497. struct mlx5e_flow_table ft;
  498. struct mlx5_flow_rule *default_rule;
  499. struct hlist_head rules_hash[ARFS_HASH_SIZE];
  500. };
  501. enum arfs_type {
  502. ARFS_IPV4_TCP,
  503. ARFS_IPV6_TCP,
  504. ARFS_IPV4_UDP,
  505. ARFS_IPV6_UDP,
  506. ARFS_NUM_TYPES,
  507. };
  508. struct mlx5e_arfs_tables {
  509. struct arfs_table arfs_tables[ARFS_NUM_TYPES];
  510. /* Protect aRFS rules list */
  511. spinlock_t arfs_lock;
  512. struct list_head rules;
  513. int last_filter_id;
  514. struct workqueue_struct *wq;
  515. };
  516. /* NIC prio FTS */
  517. enum {
  518. MLX5E_VLAN_FT_LEVEL = 0,
  519. MLX5E_L2_FT_LEVEL,
  520. MLX5E_TTC_FT_LEVEL,
  521. MLX5E_ARFS_FT_LEVEL
  522. };
  523. struct mlx5e_ethtool_table {
  524. struct mlx5_flow_table *ft;
  525. int num_rules;
  526. };
  527. #define ETHTOOL_NUM_L3_L4_FTS 7
  528. #define ETHTOOL_NUM_L2_FTS 4
  529. struct mlx5e_ethtool_steering {
  530. struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
  531. struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
  532. struct list_head rules;
  533. int tot_num_rules;
  534. };
  535. struct mlx5e_flow_steering {
  536. struct mlx5_flow_namespace *ns;
  537. struct mlx5e_ethtool_steering ethtool;
  538. struct mlx5e_tc_table tc;
  539. struct mlx5e_vlan_table vlan;
  540. struct mlx5e_l2_table l2;
  541. struct mlx5e_ttc_table ttc;
  542. struct mlx5e_arfs_tables arfs;
  543. };
  544. struct mlx5e_rqt {
  545. u32 rqtn;
  546. bool enabled;
  547. };
  548. struct mlx5e_tir {
  549. u32 tirn;
  550. struct mlx5e_rqt rqt;
  551. struct list_head list;
  552. };
  553. enum {
  554. MLX5E_TC_PRIO = 0,
  555. MLX5E_NIC_PRIO
  556. };
  557. struct mlx5e_profile {
  558. void (*init)(struct mlx5_core_dev *mdev,
  559. struct net_device *netdev,
  560. const struct mlx5e_profile *profile, void *ppriv);
  561. void (*cleanup)(struct mlx5e_priv *priv);
  562. int (*init_rx)(struct mlx5e_priv *priv);
  563. void (*cleanup_rx)(struct mlx5e_priv *priv);
  564. int (*init_tx)(struct mlx5e_priv *priv);
  565. void (*cleanup_tx)(struct mlx5e_priv *priv);
  566. void (*enable)(struct mlx5e_priv *priv);
  567. void (*disable)(struct mlx5e_priv *priv);
  568. void (*update_stats)(struct mlx5e_priv *priv);
  569. int (*max_nch)(struct mlx5_core_dev *mdev);
  570. int max_tc;
  571. };
  572. struct mlx5e_priv {
  573. /* priv data path fields - start */
  574. struct mlx5e_sq **txq_to_sq_map;
  575. int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
  576. struct bpf_prog *xdp_prog;
  577. /* priv data path fields - end */
  578. unsigned long state;
  579. struct mutex state_lock; /* Protects Interface state */
  580. struct mlx5_core_mkey umr_mkey;
  581. struct mlx5e_rq drop_rq;
  582. struct mlx5e_channel **channel;
  583. u32 tisn[MLX5E_MAX_NUM_TC];
  584. struct mlx5e_rqt indir_rqt;
  585. struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
  586. struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
  587. u32 tx_rates[MLX5E_MAX_NUM_SQS];
  588. struct mlx5e_flow_steering fs;
  589. struct mlx5e_vxlan_db vxlan;
  590. struct mlx5e_params params;
  591. struct workqueue_struct *wq;
  592. struct work_struct update_carrier_work;
  593. struct work_struct set_rx_mode_work;
  594. struct work_struct tx_timeout_work;
  595. struct delayed_work update_stats_work;
  596. u32 pflags;
  597. struct mlx5_core_dev *mdev;
  598. struct net_device *netdev;
  599. struct mlx5e_stats stats;
  600. struct mlx5e_tstamp tstamp;
  601. u16 q_counter;
  602. const struct mlx5e_profile *profile;
  603. void *ppriv;
  604. };
  605. void mlx5e_build_ptys2ethtool_map(void);
  606. void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
  607. u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
  608. void *accel_priv, select_queue_fallback_t fallback);
  609. netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
  610. void mlx5e_completion_event(struct mlx5_core_cq *mcq);
  611. void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
  612. int mlx5e_napi_poll(struct napi_struct *napi, int budget);
  613. bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
  614. int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
  615. void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
  616. void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
  617. bool recycle);
  618. void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
  619. void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
  620. bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
  621. int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
  622. int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
  623. void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
  624. void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
  625. void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
  626. void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
  627. struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
  628. void mlx5e_rx_am(struct mlx5e_rq *rq);
  629. void mlx5e_rx_am_work(struct work_struct *work);
  630. struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
  631. void mlx5e_update_stats(struct mlx5e_priv *priv);
  632. int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
  633. void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
  634. void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
  635. void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
  636. int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
  637. int location);
  638. int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
  639. struct ethtool_rxnfc *info, u32 *rule_locs);
  640. int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
  641. struct ethtool_rx_flow_spec *fs);
  642. int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
  643. int location);
  644. void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
  645. void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
  646. void mlx5e_set_rx_mode_work(struct work_struct *work);
  647. void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
  648. struct skb_shared_hwtstamps *hwts);
  649. void mlx5e_timestamp_init(struct mlx5e_priv *priv);
  650. void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
  651. int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
  652. int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
  653. void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
  654. int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
  655. u16 vid);
  656. int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
  657. u16 vid);
  658. void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
  659. void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
  660. int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
  661. int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
  662. void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
  663. enum mlx5e_traffic_types tt);
  664. int mlx5e_open_locked(struct net_device *netdev);
  665. int mlx5e_close_locked(struct net_device *netdev);
  666. void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
  667. u32 *indirection_rqt, int len,
  668. int num_channels);
  669. int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
  670. void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
  671. u8 cq_period_mode);
  672. static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
  673. struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
  674. {
  675. u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
  676. /* ensure wqe is visible to device before updating doorbell record */
  677. dma_wmb();
  678. *sq->wq.db = cpu_to_be32(sq->pc);
  679. /* ensure doorbell record is visible to device before ringing the
  680. * doorbell
  681. */
  682. wmb();
  683. if (bf_sz)
  684. __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
  685. else
  686. mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
  687. /* flush the write-combining mapped buffer */
  688. wmb();
  689. sq->bf_offset ^= sq->bf_buf_size;
  690. }
  691. static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
  692. {
  693. struct mlx5_core_cq *mcq;
  694. mcq = &cq->mcq;
  695. mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
  696. }
  697. static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
  698. {
  699. return rq->mpwqe.mtt_offset +
  700. wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
  701. }
  702. static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
  703. {
  704. return min_t(int, mdev->priv.eq_table.num_comp_vectors,
  705. MLX5E_MAX_NUM_CHANNELS);
  706. }
  707. extern const struct ethtool_ops mlx5e_ethtool_ops;
  708. #ifdef CONFIG_MLX5_CORE_EN_DCB
  709. extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
  710. int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
  711. #endif
  712. #ifndef CONFIG_RFS_ACCEL
  713. static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
  714. {
  715. return 0;
  716. }
  717. static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
  718. static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
  719. {
  720. return -ENOTSUPP;
  721. }
  722. static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
  723. {
  724. return -ENOTSUPP;
  725. }
  726. #else
  727. int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
  728. void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
  729. int mlx5e_arfs_enable(struct mlx5e_priv *priv);
  730. int mlx5e_arfs_disable(struct mlx5e_priv *priv);
  731. int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
  732. u16 rxq_index, u32 flow_id);
  733. #endif
  734. u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
  735. int mlx5e_create_tir(struct mlx5_core_dev *mdev,
  736. struct mlx5e_tir *tir, u32 *in, int inlen);
  737. void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
  738. struct mlx5e_tir *tir);
  739. int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
  740. void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
  741. int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev);
  742. struct mlx5_eswitch_rep;
  743. int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
  744. struct mlx5_eswitch_rep *rep);
  745. void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
  746. struct mlx5_eswitch_rep *rep);
  747. int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep);
  748. void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
  749. struct mlx5_eswitch_rep *rep);
  750. int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
  751. void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
  752. int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
  753. void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
  754. int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
  755. void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
  756. int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
  757. void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
  758. int mlx5e_create_tises(struct mlx5e_priv *priv);
  759. void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
  760. int mlx5e_close(struct net_device *netdev);
  761. int mlx5e_open(struct net_device *netdev);
  762. void mlx5e_update_stats_work(struct work_struct *work);
  763. struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
  764. const struct mlx5e_profile *profile,
  765. void *ppriv);
  766. void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
  767. int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
  768. void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
  769. struct rtnl_link_stats64 *
  770. mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
  771. u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
  772. #endif /* __MLX5_EN_H__ */