hclge_dcb.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. // SPDX-License-Identifier: GPL-2.0+
  2. // Copyright (c) 2016-2017 Hisilicon Limited.
  3. #include "hclge_main.h"
  4. #include "hclge_tm.h"
  5. #include "hnae3.h"
  6. #define BW_PERCENT 100
  7. static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
  8. struct ieee_ets *ets)
  9. {
  10. u8 i;
  11. for (i = 0; i < HNAE3_MAX_TC; i++) {
  12. switch (ets->tc_tsa[i]) {
  13. case IEEE_8021QAZ_TSA_STRICT:
  14. hdev->tm_info.tc_info[i].tc_sch_mode =
  15. HCLGE_SCH_MODE_SP;
  16. hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
  17. break;
  18. case IEEE_8021QAZ_TSA_ETS:
  19. hdev->tm_info.tc_info[i].tc_sch_mode =
  20. HCLGE_SCH_MODE_DWRR;
  21. hdev->tm_info.pg_info[0].tc_dwrr[i] =
  22. ets->tc_tx_bw[i];
  23. break;
  24. default:
  25. /* Hardware only supports SP (strict priority)
  26. * or ETS (enhanced transmission selection)
  27. * algorithms, if we receive some other value
  28. * from dcbnl, then throw an error.
  29. */
  30. return -EINVAL;
  31. }
  32. }
  33. hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
  34. return 0;
  35. }
  36. static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
  37. struct ieee_ets *ets)
  38. {
  39. u32 i;
  40. memset(ets, 0, sizeof(*ets));
  41. ets->willing = 1;
  42. ets->ets_cap = hdev->tc_max;
  43. for (i = 0; i < HNAE3_MAX_TC; i++) {
  44. ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
  45. ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
  46. if (hdev->tm_info.tc_info[i].tc_sch_mode ==
  47. HCLGE_SCH_MODE_SP)
  48. ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
  49. else
  50. ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
  51. }
  52. }
  53. /* IEEE std */
  54. static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
  55. {
  56. struct hclge_vport *vport = hclge_get_vport(h);
  57. struct hclge_dev *hdev = vport->back;
  58. hclge_tm_info_to_ieee_ets(hdev, ets);
  59. return 0;
  60. }
  61. static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
  62. u8 *prio_tc)
  63. {
  64. int i;
  65. if (num_tc > hdev->tc_max) {
  66. dev_err(&hdev->pdev->dev,
  67. "tc num checking failed, %u > tc_max(%u)\n",
  68. num_tc, hdev->tc_max);
  69. return -EINVAL;
  70. }
  71. for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
  72. if (prio_tc[i] >= num_tc) {
  73. dev_err(&hdev->pdev->dev,
  74. "prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
  75. i, prio_tc[i], num_tc);
  76. return -EINVAL;
  77. }
  78. }
  79. if (num_tc > hdev->vport[0].alloc_tqps) {
  80. dev_err(&hdev->pdev->dev,
  81. "allocated tqp checking failed, %u > tqp(%u)\n",
  82. num_tc, hdev->vport[0].alloc_tqps);
  83. return -EINVAL;
  84. }
  85. return 0;
  86. }
  87. static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
  88. u8 *tc, bool *changed)
  89. {
  90. bool has_ets_tc = false;
  91. u32 total_ets_bw = 0;
  92. u8 max_tc = 0;
  93. int ret;
  94. u8 i;
  95. for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
  96. if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
  97. *changed = true;
  98. if (ets->prio_tc[i] > max_tc)
  99. max_tc = ets->prio_tc[i];
  100. }
  101. ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
  102. if (ret)
  103. return ret;
  104. for (i = 0; i < hdev->tc_max; i++) {
  105. switch (ets->tc_tsa[i]) {
  106. case IEEE_8021QAZ_TSA_STRICT:
  107. if (hdev->tm_info.tc_info[i].tc_sch_mode !=
  108. HCLGE_SCH_MODE_SP)
  109. *changed = true;
  110. break;
  111. case IEEE_8021QAZ_TSA_ETS:
  112. if (hdev->tm_info.tc_info[i].tc_sch_mode !=
  113. HCLGE_SCH_MODE_DWRR)
  114. *changed = true;
  115. total_ets_bw += ets->tc_tx_bw[i];
  116. has_ets_tc = true;
  117. break;
  118. default:
  119. return -EINVAL;
  120. }
  121. }
  122. if (has_ets_tc && total_ets_bw != BW_PERCENT)
  123. return -EINVAL;
  124. *tc = max_tc + 1;
  125. if (*tc != hdev->tm_info.num_tc)
  126. *changed = true;
  127. return 0;
  128. }
  129. static int hclge_map_update(struct hclge_dev *hdev)
  130. {
  131. int ret;
  132. ret = hclge_tm_schd_setup_hw(hdev);
  133. if (ret)
  134. return ret;
  135. ret = hclge_pause_setup_hw(hdev, false);
  136. if (ret)
  137. return ret;
  138. ret = hclge_buffer_alloc(hdev);
  139. if (ret)
  140. return ret;
  141. hclge_rss_indir_init_cfg(hdev);
  142. return hclge_rss_init_hw(hdev);
  143. }
  144. static int hclge_client_setup_tc(struct hclge_dev *hdev)
  145. {
  146. struct hclge_vport *vport = hdev->vport;
  147. struct hnae3_client *client;
  148. struct hnae3_handle *handle;
  149. int ret;
  150. u32 i;
  151. for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
  152. handle = &vport[i].nic;
  153. client = handle->client;
  154. if (!client || !client->ops || !client->ops->setup_tc)
  155. continue;
  156. ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
  157. if (ret)
  158. return ret;
  159. }
  160. return 0;
  161. }
  162. static int hclge_notify_down_uinit(struct hclge_dev *hdev)
  163. {
  164. int ret;
  165. ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
  166. if (ret)
  167. return ret;
  168. return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
  169. }
  170. static int hclge_notify_init_up(struct hclge_dev *hdev)
  171. {
  172. int ret;
  173. ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
  174. if (ret)
  175. return ret;
  176. return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
  177. }
  178. static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
  179. {
  180. struct hclge_vport *vport = hclge_get_vport(h);
  181. struct net_device *netdev = h->kinfo.netdev;
  182. struct hclge_dev *hdev = vport->back;
  183. bool map_changed = false;
  184. u8 num_tc = 0;
  185. int ret;
  186. if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
  187. hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
  188. return -EINVAL;
  189. ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
  190. if (ret)
  191. return ret;
  192. if (map_changed) {
  193. netif_dbg(h, drv, netdev, "set ets\n");
  194. ret = hclge_notify_down_uinit(hdev);
  195. if (ret)
  196. return ret;
  197. }
  198. hclge_tm_schd_info_update(hdev, num_tc);
  199. ret = hclge_ieee_ets_to_tm_info(hdev, ets);
  200. if (ret)
  201. goto err_out;
  202. if (map_changed) {
  203. ret = hclge_map_update(hdev);
  204. if (ret)
  205. goto err_out;
  206. ret = hclge_client_setup_tc(hdev);
  207. if (ret)
  208. goto err_out;
  209. ret = hclge_notify_init_up(hdev);
  210. if (ret)
  211. return ret;
  212. }
  213. return hclge_tm_dwrr_cfg(hdev);
  214. err_out:
  215. if (!map_changed)
  216. return ret;
  217. hclge_notify_init_up(hdev);
  218. return ret;
  219. }
  220. static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
  221. {
  222. u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
  223. struct hclge_vport *vport = hclge_get_vport(h);
  224. struct hclge_dev *hdev = vport->back;
  225. int ret;
  226. u8 i;
  227. memset(pfc, 0, sizeof(*pfc));
  228. pfc->pfc_cap = hdev->pfc_max;
  229. pfc->pfc_en = hdev->tm_info.pfc_en;
  230. ret = hclge_pfc_tx_stats_get(hdev, requests);
  231. if (ret)
  232. return ret;
  233. ret = hclge_pfc_rx_stats_get(hdev, indications);
  234. if (ret)
  235. return ret;
  236. for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
  237. pfc->requests[i] = requests[i];
  238. pfc->indications[i] = indications[i];
  239. }
  240. return 0;
  241. }
  242. static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
  243. {
  244. struct hclge_vport *vport = hclge_get_vport(h);
  245. struct net_device *netdev = h->kinfo.netdev;
  246. struct hclge_dev *hdev = vport->back;
  247. u8 i, j, pfc_map, *prio_tc;
  248. int ret;
  249. if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
  250. hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
  251. return -EINVAL;
  252. if (pfc->pfc_en == hdev->tm_info.pfc_en)
  253. return 0;
  254. prio_tc = hdev->tm_info.prio_tc;
  255. pfc_map = 0;
  256. for (i = 0; i < hdev->tm_info.num_tc; i++) {
  257. for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
  258. if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
  259. pfc_map |= BIT(i);
  260. break;
  261. }
  262. }
  263. }
  264. hdev->tm_info.hw_pfc_map = pfc_map;
  265. hdev->tm_info.pfc_en = pfc->pfc_en;
  266. netif_dbg(h, drv, netdev,
  267. "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
  268. pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
  269. hclge_tm_pfc_info_update(hdev);
  270. ret = hclge_pause_setup_hw(hdev, false);
  271. if (ret)
  272. return ret;
  273. ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
  274. if (ret)
  275. return ret;
  276. ret = hclge_buffer_alloc(hdev);
  277. if (ret) {
  278. hclge_notify_client(hdev, HNAE3_UP_CLIENT);
  279. return ret;
  280. }
  281. return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
  282. }
  283. /* DCBX configuration */
  284. static u8 hclge_getdcbx(struct hnae3_handle *h)
  285. {
  286. struct hclge_vport *vport = hclge_get_vport(h);
  287. struct hclge_dev *hdev = vport->back;
  288. if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
  289. return 0;
  290. return hdev->dcbx_cap;
  291. }
  292. static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
  293. {
  294. struct hclge_vport *vport = hclge_get_vport(h);
  295. struct net_device *netdev = h->kinfo.netdev;
  296. struct hclge_dev *hdev = vport->back;
  297. netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
  298. /* No support for LLD_MANAGED modes or CEE */
  299. if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
  300. (mode & DCB_CAP_DCBX_VER_CEE) ||
  301. !(mode & DCB_CAP_DCBX_HOST))
  302. return 1;
  303. hdev->dcbx_cap = mode;
  304. return 0;
  305. }
  306. /* Set up TC for hardware offloaded mqprio in channel mode */
  307. static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
  308. {
  309. struct hclge_vport *vport = hclge_get_vport(h);
  310. struct hclge_dev *hdev = vport->back;
  311. int ret;
  312. if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
  313. return -EINVAL;
  314. ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
  315. if (ret)
  316. return -EINVAL;
  317. ret = hclge_notify_down_uinit(hdev);
  318. if (ret)
  319. return ret;
  320. hclge_tm_schd_info_update(hdev, tc);
  321. hclge_tm_prio_tc_info_update(hdev, prio_tc);
  322. ret = hclge_tm_init_hw(hdev, false);
  323. if (ret)
  324. goto err_out;
  325. ret = hclge_client_setup_tc(hdev);
  326. if (ret)
  327. goto err_out;
  328. hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
  329. if (tc > 1)
  330. hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
  331. else
  332. hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
  333. return hclge_notify_init_up(hdev);
  334. err_out:
  335. hclge_notify_init_up(hdev);
  336. return ret;
  337. }
  338. static const struct hnae3_dcb_ops hns3_dcb_ops = {
  339. .ieee_getets = hclge_ieee_getets,
  340. .ieee_setets = hclge_ieee_setets,
  341. .ieee_getpfc = hclge_ieee_getpfc,
  342. .ieee_setpfc = hclge_ieee_setpfc,
  343. .getdcbx = hclge_getdcbx,
  344. .setdcbx = hclge_setdcbx,
  345. .setup_tc = hclge_setup_tc,
  346. };
  347. void hclge_dcb_ops_set(struct hclge_dev *hdev)
  348. {
  349. struct hclge_vport *vport = hdev->vport;
  350. struct hnae3_knic_private_info *kinfo;
  351. /* Hdev does not support DCB or vport is
  352. * not a pf, then dcb_ops is not set.
  353. */
  354. if (!hnae3_dev_dcb_supported(hdev) ||
  355. vport->vport_id != 0)
  356. return;
  357. kinfo = &vport->nic.kinfo;
  358. kinfo->dcb_ops = &hns3_dcb_ops;
  359. hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
  360. }