en_dcb_nl.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754
  1. /*
  2. * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/dcbnl.h>
  34. #include <linux/math64.h>
  35. #include "mlx4_en.h"
  36. #include "fw_qos.h"
  37. enum {
  38. MLX4_CEE_STATE_DOWN = 0,
  39. MLX4_CEE_STATE_UP = 1,
  40. };
  41. /* Definitions for QCN
  42. */
  43. struct mlx4_congestion_control_mb_prio_802_1_qau_params {
  44. __be32 modify_enable_high;
  45. __be32 modify_enable_low;
  46. __be32 reserved1;
  47. __be32 extended_enable;
  48. __be32 rppp_max_rps;
  49. __be32 rpg_time_reset;
  50. __be32 rpg_byte_reset;
  51. __be32 rpg_threshold;
  52. __be32 rpg_max_rate;
  53. __be32 rpg_ai_rate;
  54. __be32 rpg_hai_rate;
  55. __be32 rpg_gd;
  56. __be32 rpg_min_dec_fac;
  57. __be32 rpg_min_rate;
  58. __be32 max_time_rise;
  59. __be32 max_byte_rise;
  60. __be32 max_qdelta;
  61. __be32 min_qoffset;
  62. __be32 gd_coefficient;
  63. __be32 reserved2[5];
  64. __be32 cp_sample_base;
  65. __be32 reserved3[39];
  66. };
  67. struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
  68. __be64 rppp_rp_centiseconds;
  69. __be32 reserved1;
  70. __be32 ignored_cnm;
  71. __be32 rppp_created_rps;
  72. __be32 estimated_total_rate;
  73. __be32 max_active_rate_limiter_index;
  74. __be32 dropped_cnms_busy_fw;
  75. __be32 reserved2;
  76. __be32 cnms_handled_successfully;
  77. __be32 min_total_limiters_rate;
  78. __be32 max_total_limiters_rate;
  79. __be32 reserved3[4];
  80. };
  81. static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
  82. {
  83. struct mlx4_en_priv *priv = netdev_priv(dev);
  84. switch (capid) {
  85. case DCB_CAP_ATTR_PFC:
  86. *cap = true;
  87. break;
  88. case DCB_CAP_ATTR_DCBX:
  89. *cap = priv->dcbx_cap;
  90. break;
  91. case DCB_CAP_ATTR_PFC_TCS:
  92. *cap = 1 << mlx4_max_tc(priv->mdev->dev);
  93. break;
  94. default:
  95. *cap = false;
  96. break;
  97. }
  98. return 0;
  99. }
  100. static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
  101. {
  102. struct mlx4_en_priv *priv = netdev_priv(netdev);
  103. return priv->cee_config.pfc_state;
  104. }
  105. static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
  106. {
  107. struct mlx4_en_priv *priv = netdev_priv(netdev);
  108. priv->cee_config.pfc_state = state;
  109. }
  110. static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
  111. u8 *setting)
  112. {
  113. struct mlx4_en_priv *priv = netdev_priv(netdev);
  114. *setting = priv->cee_config.dcb_pfc[priority];
  115. }
  116. static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
  117. u8 setting)
  118. {
  119. struct mlx4_en_priv *priv = netdev_priv(netdev);
  120. priv->cee_config.dcb_pfc[priority] = setting;
  121. priv->cee_config.pfc_state = true;
  122. }
  123. static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
  124. {
  125. struct mlx4_en_priv *priv = netdev_priv(netdev);
  126. if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
  127. return -EINVAL;
  128. if (tcid == DCB_NUMTCS_ATTR_PFC)
  129. *num = mlx4_max_tc(priv->mdev->dev);
  130. else
  131. *num = 0;
  132. return 0;
  133. }
  134. static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
  135. {
  136. struct mlx4_en_priv *priv = netdev_priv(netdev);
  137. struct mlx4_en_port_profile *prof = priv->prof;
  138. struct mlx4_en_dev *mdev = priv->mdev;
  139. u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
  140. if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
  141. return 1;
  142. if (priv->cee_config.pfc_state) {
  143. int tc;
  144. rx_ppp = prof->rx_ppp;
  145. tx_ppp = prof->tx_ppp;
  146. for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
  147. u8 tc_mask = 1 << tc;
  148. switch (priv->cee_config.dcb_pfc[tc]) {
  149. case pfc_disabled:
  150. tx_ppp &= ~tc_mask;
  151. rx_ppp &= ~tc_mask;
  152. break;
  153. case pfc_enabled_full:
  154. tx_ppp |= tc_mask;
  155. rx_ppp |= tc_mask;
  156. break;
  157. case pfc_enabled_tx:
  158. tx_ppp |= tc_mask;
  159. rx_ppp &= ~tc_mask;
  160. break;
  161. case pfc_enabled_rx:
  162. tx_ppp &= ~tc_mask;
  163. rx_ppp |= tc_mask;
  164. break;
  165. default:
  166. break;
  167. }
  168. }
  169. rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause;
  170. tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause;
  171. } else {
  172. rx_ppp = 0;
  173. tx_ppp = 0;
  174. rx_pause = prof->rx_pause;
  175. tx_pause = prof->tx_pause;
  176. }
  177. if (mlx4_SET_PORT_general(mdev->dev, priv->port,
  178. priv->rx_skb_size + ETH_FCS_LEN,
  179. tx_pause, tx_ppp, rx_pause, rx_ppp)) {
  180. en_err(priv, "Failed setting pause params\n");
  181. return 1;
  182. }
  183. prof->tx_ppp = tx_ppp;
  184. prof->rx_ppp = rx_ppp;
  185. prof->tx_pause = tx_pause;
  186. prof->rx_pause = rx_pause;
  187. return 0;
  188. }
  189. static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
  190. {
  191. struct mlx4_en_priv *priv = netdev_priv(dev);
  192. if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED)
  193. return MLX4_CEE_STATE_UP;
  194. return MLX4_CEE_STATE_DOWN;
  195. }
  196. static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
  197. {
  198. struct mlx4_en_priv *priv = netdev_priv(dev);
  199. int num_tcs = 0;
  200. if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
  201. return 1;
  202. if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
  203. return 0;
  204. if (state) {
  205. priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
  206. num_tcs = IEEE_8021QAZ_MAX_TCS;
  207. } else {
  208. priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
  209. }
  210. if (mlx4_en_alloc_tx_queue_per_tc(dev, num_tcs))
  211. return 1;
  212. return 0;
  213. }
  214. /* On success returns a non-zero 802.1p user priority bitmap
  215. * otherwise returns 0 as the invalid user priority bitmap to
  216. * indicate an error.
  217. */
  218. static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
  219. {
  220. struct mlx4_en_priv *priv = netdev_priv(netdev);
  221. struct dcb_app app = {
  222. .selector = idtype,
  223. .protocol = id,
  224. };
  225. if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
  226. return 0;
  227. return dcb_getapp(netdev, &app);
  228. }
  229. static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
  230. u16 id, u8 up)
  231. {
  232. struct mlx4_en_priv *priv = netdev_priv(netdev);
  233. struct dcb_app app;
  234. if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
  235. return -EINVAL;
  236. memset(&app, 0, sizeof(struct dcb_app));
  237. app.selector = idtype;
  238. app.protocol = id;
  239. app.priority = up;
  240. return dcb_setapp(netdev, &app);
  241. }
  242. static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
  243. struct ieee_ets *ets)
  244. {
  245. struct mlx4_en_priv *priv = netdev_priv(dev);
  246. struct ieee_ets *my_ets = &priv->ets;
  247. if (!my_ets)
  248. return -EINVAL;
  249. ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
  250. ets->cbs = my_ets->cbs;
  251. memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
  252. memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
  253. memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
  254. return 0;
  255. }
  256. static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
  257. {
  258. int i;
  259. int total_ets_bw = 0;
  260. int has_ets_tc = 0;
  261. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  262. if (ets->prio_tc[i] >= MLX4_EN_NUM_UP_HIGH) {
  263. en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
  264. i, ets->prio_tc[i]);
  265. return -EINVAL;
  266. }
  267. switch (ets->tc_tsa[i]) {
  268. case IEEE_8021QAZ_TSA_VENDOR:
  269. case IEEE_8021QAZ_TSA_STRICT:
  270. break;
  271. case IEEE_8021QAZ_TSA_ETS:
  272. has_ets_tc = 1;
  273. total_ets_bw += ets->tc_tx_bw[i];
  274. break;
  275. default:
  276. en_err(priv, "TC[%d]: Not supported TSA: %d\n",
  277. i, ets->tc_tsa[i]);
  278. return -EOPNOTSUPP;
  279. }
  280. }
  281. if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
  282. en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
  283. total_ets_bw);
  284. return -EINVAL;
  285. }
  286. return 0;
  287. }
  288. static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
  289. struct ieee_ets *ets, u16 *ratelimit)
  290. {
  291. struct mlx4_en_dev *mdev = priv->mdev;
  292. int num_strict = 0;
  293. int i;
  294. __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
  295. __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
  296. ets = ets ?: &priv->ets;
  297. ratelimit = ratelimit ?: priv->maxrate;
  298. /* higher TC means higher priority => lower pg */
  299. for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
  300. switch (ets->tc_tsa[i]) {
  301. case IEEE_8021QAZ_TSA_VENDOR:
  302. pg[i] = MLX4_EN_TC_VENDOR;
  303. tc_tx_bw[i] = MLX4_EN_BW_MAX;
  304. break;
  305. case IEEE_8021QAZ_TSA_STRICT:
  306. pg[i] = num_strict++;
  307. tc_tx_bw[i] = MLX4_EN_BW_MAX;
  308. break;
  309. case IEEE_8021QAZ_TSA_ETS:
  310. pg[i] = MLX4_EN_TC_ETS;
  311. tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
  312. break;
  313. }
  314. }
  315. return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
  316. ratelimit);
  317. }
  318. static int
  319. mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
  320. {
  321. struct mlx4_en_priv *priv = netdev_priv(dev);
  322. struct mlx4_en_dev *mdev = priv->mdev;
  323. int err;
  324. err = mlx4_en_ets_validate(priv, ets);
  325. if (err)
  326. return err;
  327. err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
  328. if (err)
  329. return err;
  330. err = mlx4_en_config_port_scheduler(priv, ets, NULL);
  331. if (err)
  332. return err;
  333. memcpy(&priv->ets, ets, sizeof(priv->ets));
  334. return 0;
  335. }
  336. static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
  337. struct ieee_pfc *pfc)
  338. {
  339. struct mlx4_en_priv *priv = netdev_priv(dev);
  340. pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
  341. pfc->pfc_en = priv->prof->tx_ppp;
  342. return 0;
  343. }
  344. static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
  345. struct ieee_pfc *pfc)
  346. {
  347. struct mlx4_en_priv *priv = netdev_priv(dev);
  348. struct mlx4_en_port_profile *prof = priv->prof;
  349. struct mlx4_en_dev *mdev = priv->mdev;
  350. u32 tx_pause, tx_ppp, rx_pause, rx_ppp;
  351. int err;
  352. en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
  353. pfc->pfc_cap,
  354. pfc->pfc_en,
  355. pfc->mbc,
  356. pfc->delay);
  357. rx_pause = prof->rx_pause && !pfc->pfc_en;
  358. tx_pause = prof->tx_pause && !pfc->pfc_en;
  359. rx_ppp = pfc->pfc_en;
  360. tx_ppp = pfc->pfc_en;
  361. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  362. priv->rx_skb_size + ETH_FCS_LEN,
  363. tx_pause, tx_ppp, rx_pause, rx_ppp);
  364. if (err) {
  365. en_err(priv, "Failed setting pause params\n");
  366. return err;
  367. }
  368. mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
  369. rx_ppp, rx_pause, tx_ppp, tx_pause);
  370. prof->tx_ppp = tx_ppp;
  371. prof->rx_ppp = rx_ppp;
  372. prof->rx_pause = rx_pause;
  373. prof->tx_pause = tx_pause;
  374. return err;
  375. }
  376. static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
  377. {
  378. struct mlx4_en_priv *priv = netdev_priv(dev);
  379. return priv->dcbx_cap;
  380. }
  381. static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
  382. {
  383. struct mlx4_en_priv *priv = netdev_priv(dev);
  384. struct ieee_ets ets = {0};
  385. struct ieee_pfc pfc = {0};
  386. if (mode == priv->dcbx_cap)
  387. return 0;
  388. if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
  389. ((mode & DCB_CAP_DCBX_VER_IEEE) &&
  390. (mode & DCB_CAP_DCBX_VER_CEE)) ||
  391. !(mode & DCB_CAP_DCBX_HOST))
  392. goto err;
  393. priv->dcbx_cap = mode;
  394. ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
  395. pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
  396. if (mode & DCB_CAP_DCBX_VER_IEEE) {
  397. if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
  398. goto err;
  399. if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
  400. goto err;
  401. } else if (mode & DCB_CAP_DCBX_VER_CEE) {
  402. if (mlx4_en_dcbnl_set_all(dev))
  403. goto err;
  404. } else {
  405. if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
  406. goto err;
  407. if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
  408. goto err;
  409. if (mlx4_en_alloc_tx_queue_per_tc(dev, 0))
  410. goto err;
  411. }
  412. return 0;
  413. err:
  414. return 1;
  415. }
  416. #define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
  417. static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
  418. struct ieee_maxrate *maxrate)
  419. {
  420. struct mlx4_en_priv *priv = netdev_priv(dev);
  421. int i;
  422. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
  423. maxrate->tc_maxrate[i] =
  424. priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
  425. return 0;
  426. }
  427. static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
  428. struct ieee_maxrate *maxrate)
  429. {
  430. struct mlx4_en_priv *priv = netdev_priv(dev);
  431. u16 tmp[IEEE_8021QAZ_MAX_TCS];
  432. int i, err;
  433. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  434. /* Convert from Kbps into HW units, rounding result up.
  435. * Setting to 0, means unlimited BW.
  436. */
  437. tmp[i] = div_u64(maxrate->tc_maxrate[i] +
  438. MLX4_RATELIMIT_UNITS_IN_KB - 1,
  439. MLX4_RATELIMIT_UNITS_IN_KB);
  440. }
  441. err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
  442. if (err)
  443. return err;
  444. memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
  445. return 0;
  446. }
  447. #define RPG_ENABLE_BIT 31
  448. #define CN_TAG_BIT 30
  449. static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
  450. struct ieee_qcn *qcn)
  451. {
  452. struct mlx4_en_priv *priv = netdev_priv(dev);
  453. struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
  454. struct mlx4_cmd_mailbox *mailbox_out = NULL;
  455. u64 mailbox_in_dma = 0;
  456. u32 inmod = 0;
  457. int i, err;
  458. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
  459. return -EOPNOTSUPP;
  460. mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
  461. if (IS_ERR(mailbox_out))
  462. return -ENOMEM;
  463. hw_qcn =
  464. (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)
  465. mailbox_out->buf;
  466. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  467. inmod = priv->port | ((1 << i) << 8) |
  468. (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
  469. err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
  470. mailbox_out->dma,
  471. inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS,
  472. MLX4_CMD_CONGESTION_CTRL_OPCODE,
  473. MLX4_CMD_TIME_CLASS_C,
  474. MLX4_CMD_NATIVE);
  475. if (err) {
  476. mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
  477. return err;
  478. }
  479. qcn->rpg_enable[i] =
  480. be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT;
  481. qcn->rppp_max_rps[i] =
  482. be32_to_cpu(hw_qcn->rppp_max_rps);
  483. qcn->rpg_time_reset[i] =
  484. be32_to_cpu(hw_qcn->rpg_time_reset);
  485. qcn->rpg_byte_reset[i] =
  486. be32_to_cpu(hw_qcn->rpg_byte_reset);
  487. qcn->rpg_threshold[i] =
  488. be32_to_cpu(hw_qcn->rpg_threshold);
  489. qcn->rpg_max_rate[i] =
  490. be32_to_cpu(hw_qcn->rpg_max_rate);
  491. qcn->rpg_ai_rate[i] =
  492. be32_to_cpu(hw_qcn->rpg_ai_rate);
  493. qcn->rpg_hai_rate[i] =
  494. be32_to_cpu(hw_qcn->rpg_hai_rate);
  495. qcn->rpg_gd[i] =
  496. be32_to_cpu(hw_qcn->rpg_gd);
  497. qcn->rpg_min_dec_fac[i] =
  498. be32_to_cpu(hw_qcn->rpg_min_dec_fac);
  499. qcn->rpg_min_rate[i] =
  500. be32_to_cpu(hw_qcn->rpg_min_rate);
  501. qcn->cndd_state_machine[i] =
  502. priv->cndd_state[i];
  503. }
  504. mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
  505. return 0;
  506. }
  507. static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
  508. struct ieee_qcn *qcn)
  509. {
  510. struct mlx4_en_priv *priv = netdev_priv(dev);
  511. struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
  512. struct mlx4_cmd_mailbox *mailbox_in = NULL;
  513. u64 mailbox_in_dma = 0;
  514. u32 inmod = 0;
  515. int i, err;
  516. #define MODIFY_ENABLE_HIGH_MASK 0xc0000000
  517. #define MODIFY_ENABLE_LOW_MASK 0xffc00000
  518. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
  519. return -EOPNOTSUPP;
  520. mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
  521. if (IS_ERR(mailbox_in))
  522. return -ENOMEM;
  523. mailbox_in_dma = mailbox_in->dma;
  524. hw_qcn =
  525. (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf;
  526. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  527. inmod = priv->port | ((1 << i) << 8) |
  528. (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
  529. /* Before updating QCN parameter,
  530. * need to set it's modify enable bit to 1
  531. */
  532. hw_qcn->modify_enable_high = cpu_to_be32(
  533. MODIFY_ENABLE_HIGH_MASK);
  534. hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK);
  535. hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT);
  536. hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]);
  537. hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]);
  538. hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]);
  539. hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]);
  540. hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]);
  541. hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]);
  542. hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]);
  543. hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]);
  544. hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]);
  545. hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]);
  546. priv->cndd_state[i] = qcn->cndd_state_machine[i];
  547. if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY)
  548. hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT);
  549. err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
  550. MLX4_CONGESTION_CONTROL_SET_PARAMS,
  551. MLX4_CMD_CONGESTION_CTRL_OPCODE,
  552. MLX4_CMD_TIME_CLASS_C,
  553. MLX4_CMD_NATIVE);
  554. if (err) {
  555. mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
  556. return err;
  557. }
  558. }
  559. mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
  560. return 0;
  561. }
  562. static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
  563. struct ieee_qcn_stats *qcn_stats)
  564. {
  565. struct mlx4_en_priv *priv = netdev_priv(dev);
  566. struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats;
  567. struct mlx4_cmd_mailbox *mailbox_out = NULL;
  568. u64 mailbox_in_dma = 0;
  569. u32 inmod = 0;
  570. int i, err;
  571. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
  572. return -EOPNOTSUPP;
  573. mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
  574. if (IS_ERR(mailbox_out))
  575. return -ENOMEM;
  576. hw_qcn_stats =
  577. (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *)
  578. mailbox_out->buf;
  579. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  580. inmod = priv->port | ((1 << i) << 8) |
  581. (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
  582. err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
  583. mailbox_out->dma, inmod,
  584. MLX4_CONGESTION_CONTROL_GET_STATISTICS,
  585. MLX4_CMD_CONGESTION_CTRL_OPCODE,
  586. MLX4_CMD_TIME_CLASS_C,
  587. MLX4_CMD_NATIVE);
  588. if (err) {
  589. mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
  590. return err;
  591. }
  592. qcn_stats->rppp_rp_centiseconds[i] =
  593. be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds);
  594. qcn_stats->rppp_created_rps[i] =
  595. be32_to_cpu(hw_qcn_stats->rppp_created_rps);
  596. }
  597. mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
  598. return 0;
  599. }
  600. const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
  601. .ieee_getets = mlx4_en_dcbnl_ieee_getets,
  602. .ieee_setets = mlx4_en_dcbnl_ieee_setets,
  603. .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
  604. .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
  605. .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
  606. .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
  607. .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
  608. .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
  609. .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
  610. .getstate = mlx4_en_dcbnl_get_state,
  611. .setstate = mlx4_en_dcbnl_set_state,
  612. .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
  613. .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
  614. .setall = mlx4_en_dcbnl_set_all,
  615. .getcap = mlx4_en_dcbnl_getcap,
  616. .getnumtcs = mlx4_en_dcbnl_getnumtcs,
  617. .getpfcstate = mlx4_en_dcbnl_getpfcstate,
  618. .setpfcstate = mlx4_en_dcbnl_setpfcstate,
  619. .getapp = mlx4_en_dcbnl_getapp,
  620. .setapp = mlx4_en_dcbnl_setapp,
  621. .getdcbx = mlx4_en_dcbnl_getdcbx,
  622. .setdcbx = mlx4_en_dcbnl_setdcbx,
  623. };
  624. const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
  625. .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
  626. .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
  627. .setstate = mlx4_en_dcbnl_set_state,
  628. .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
  629. .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
  630. .setall = mlx4_en_dcbnl_set_all,
  631. .getnumtcs = mlx4_en_dcbnl_getnumtcs,
  632. .getpfcstate = mlx4_en_dcbnl_getpfcstate,
  633. .setpfcstate = mlx4_en_dcbnl_setpfcstate,
  634. .getapp = mlx4_en_dcbnl_getapp,
  635. .setapp = mlx4_en_dcbnl_setapp,
  636. .getdcbx = mlx4_en_dcbnl_getdcbx,
  637. .setdcbx = mlx4_en_dcbnl_setdcbx,
  638. };