spectrum_buffers.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
  3. * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. * 3. Neither the names of the copyright holders nor the names of its
  15. * contributors may be used to endorse or promote products derived from
  16. * this software without specific prior written permission.
  17. *
  18. * Alternatively, this software may be distributed under the terms of the
  19. * GNU General Public License ("GPL") version 2 as published by the Free
  20. * Software Foundation.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  32. * POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/types.h>
  36. #include <linux/dcbnl.h>
  37. #include <linux/if_ether.h>
  38. #include <linux/list.h>
  39. #include "spectrum.h"
  40. #include "core.h"
  41. #include "port.h"
  42. #include "reg.h"
  43. static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
  44. u8 pool,
  45. enum mlxsw_reg_sbxx_dir dir)
  46. {
  47. return &mlxsw_sp->sb.prs[dir][pool];
  48. }
  49. static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
  50. u8 local_port, u8 pg_buff,
  51. enum mlxsw_reg_sbxx_dir dir)
  52. {
  53. return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
  54. }
  55. static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
  56. u8 local_port, u8 pool,
  57. enum mlxsw_reg_sbxx_dir dir)
  58. {
  59. return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
  60. }
  61. static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
  62. enum mlxsw_reg_sbxx_dir dir,
  63. enum mlxsw_reg_sbpr_mode mode, u32 size)
  64. {
  65. char sbpr_pl[MLXSW_REG_SBPR_LEN];
  66. struct mlxsw_sp_sb_pr *pr;
  67. int err;
  68. mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
  69. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
  70. if (err)
  71. return err;
  72. pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
  73. pr->mode = mode;
  74. pr->size = size;
  75. return 0;
  76. }
  77. static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  78. u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
  79. u32 min_buff, u32 max_buff, u8 pool)
  80. {
  81. char sbcm_pl[MLXSW_REG_SBCM_LEN];
  82. int err;
  83. mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
  84. min_buff, max_buff, pool);
  85. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
  86. if (err)
  87. return err;
  88. if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
  89. struct mlxsw_sp_sb_cm *cm;
  90. cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
  91. cm->min_buff = min_buff;
  92. cm->max_buff = max_buff;
  93. cm->pool = pool;
  94. }
  95. return 0;
  96. }
  97. static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  98. u8 pool, enum mlxsw_reg_sbxx_dir dir,
  99. u32 min_buff, u32 max_buff)
  100. {
  101. char sbpm_pl[MLXSW_REG_SBPM_LEN];
  102. struct mlxsw_sp_sb_pm *pm;
  103. int err;
  104. mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
  105. min_buff, max_buff);
  106. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
  107. if (err)
  108. return err;
  109. pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
  110. pm->min_buff = min_buff;
  111. pm->max_buff = max_buff;
  112. return 0;
  113. }
  114. static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  115. u8 pool, enum mlxsw_reg_sbxx_dir dir,
  116. struct list_head *bulk_list)
  117. {
  118. char sbpm_pl[MLXSW_REG_SBPM_LEN];
  119. mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
  120. return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
  121. bulk_list, NULL, 0);
  122. }
  123. static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
  124. char *sbpm_pl, size_t sbpm_pl_len,
  125. unsigned long cb_priv)
  126. {
  127. struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
  128. mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
  129. }
  130. static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  131. u8 pool, enum mlxsw_reg_sbxx_dir dir,
  132. struct list_head *bulk_list)
  133. {
  134. char sbpm_pl[MLXSW_REG_SBPM_LEN];
  135. struct mlxsw_sp_sb_pm *pm;
  136. pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
  137. mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
  138. return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
  139. bulk_list,
  140. mlxsw_sp_sb_pm_occ_query_cb,
  141. (unsigned long) pm);
  142. }
  143. static const u16 mlxsw_sp_pbs[] = {
  144. [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
  145. [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
  146. };
  147. #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
  148. #define MLXSW_SP_PB_UNUSED 8
  149. static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
  150. {
  151. char pbmc_pl[MLXSW_REG_PBMC_LEN];
  152. int i;
  153. mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
  154. 0xffff, 0xffff / 2);
  155. for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
  156. if (i == MLXSW_SP_PB_UNUSED)
  157. continue;
  158. mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
  159. }
  160. mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
  161. MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
  162. return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
  163. MLXSW_REG(pbmc), pbmc_pl);
  164. }
  165. static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
  166. {
  167. char pptb_pl[MLXSW_REG_PPTB_LEN];
  168. int i;
  169. mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
  170. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
  171. mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
  172. return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
  173. pptb_pl);
  174. }
  175. static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
  176. {
  177. int err;
  178. err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
  179. if (err)
  180. return err;
  181. return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
  182. }
  183. #define MLXSW_SP_SB_PR_INGRESS_SIZE \
  184. (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
  185. #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
  186. #define MLXSW_SP_SB_PR_EGRESS_SIZE \
  187. (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
  188. #define MLXSW_SP_SB_PR(_mode, _size) \
  189. { \
  190. .mode = _mode, \
  191. .size = _size, \
  192. }
  193. static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
  194. MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
  195. MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
  196. MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
  197. MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
  198. MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
  199. MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
  200. };
  201. #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
  202. static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
  203. MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
  204. MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
  205. MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
  206. MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
  207. MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
  208. };
  209. #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
  210. static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
  211. enum mlxsw_reg_sbxx_dir dir,
  212. const struct mlxsw_sp_sb_pr *prs,
  213. size_t prs_len)
  214. {
  215. int i;
  216. int err;
  217. for (i = 0; i < prs_len; i++) {
  218. const struct mlxsw_sp_sb_pr *pr;
  219. pr = &prs[i];
  220. err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
  221. pr->mode, pr->size);
  222. if (err)
  223. return err;
  224. }
  225. return 0;
  226. }
  227. static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
  228. {
  229. int err;
  230. err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
  231. mlxsw_sp_sb_prs_ingress,
  232. MLXSW_SP_SB_PRS_INGRESS_LEN);
  233. if (err)
  234. return err;
  235. return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
  236. mlxsw_sp_sb_prs_egress,
  237. MLXSW_SP_SB_PRS_EGRESS_LEN);
  238. }
  239. #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
  240. { \
  241. .min_buff = _min_buff, \
  242. .max_buff = _max_buff, \
  243. .pool = _pool, \
  244. }
  245. static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
  246. MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
  247. MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
  248. MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
  249. MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
  250. MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
  251. MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
  252. MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
  253. MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
  254. MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
  255. MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
  256. };
  257. #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
  258. static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
  259. MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
  260. MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
  261. MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
  262. MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
  263. MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
  264. MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
  265. MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
  266. MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
  267. MLXSW_SP_SB_CM(0, 0, 0),
  268. MLXSW_SP_SB_CM(0, 0, 0),
  269. MLXSW_SP_SB_CM(0, 0, 0),
  270. MLXSW_SP_SB_CM(0, 0, 0),
  271. MLXSW_SP_SB_CM(0, 0, 0),
  272. MLXSW_SP_SB_CM(0, 0, 0),
  273. MLXSW_SP_SB_CM(0, 0, 0),
  274. MLXSW_SP_SB_CM(0, 0, 0),
  275. MLXSW_SP_SB_CM(1, 0xff, 0),
  276. };
  277. #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
  278. #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
  279. static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
  280. MLXSW_SP_CPU_PORT_SB_CM,
  281. MLXSW_SP_CPU_PORT_SB_CM,
  282. MLXSW_SP_CPU_PORT_SB_CM,
  283. MLXSW_SP_CPU_PORT_SB_CM,
  284. MLXSW_SP_CPU_PORT_SB_CM,
  285. MLXSW_SP_CPU_PORT_SB_CM,
  286. MLXSW_SP_CPU_PORT_SB_CM,
  287. MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
  288. MLXSW_SP_CPU_PORT_SB_CM,
  289. MLXSW_SP_CPU_PORT_SB_CM,
  290. MLXSW_SP_CPU_PORT_SB_CM,
  291. MLXSW_SP_CPU_PORT_SB_CM,
  292. MLXSW_SP_CPU_PORT_SB_CM,
  293. MLXSW_SP_CPU_PORT_SB_CM,
  294. MLXSW_SP_CPU_PORT_SB_CM,
  295. MLXSW_SP_CPU_PORT_SB_CM,
  296. MLXSW_SP_CPU_PORT_SB_CM,
  297. MLXSW_SP_CPU_PORT_SB_CM,
  298. MLXSW_SP_CPU_PORT_SB_CM,
  299. MLXSW_SP_CPU_PORT_SB_CM,
  300. MLXSW_SP_CPU_PORT_SB_CM,
  301. MLXSW_SP_CPU_PORT_SB_CM,
  302. MLXSW_SP_CPU_PORT_SB_CM,
  303. MLXSW_SP_CPU_PORT_SB_CM,
  304. MLXSW_SP_CPU_PORT_SB_CM,
  305. MLXSW_SP_CPU_PORT_SB_CM,
  306. MLXSW_SP_CPU_PORT_SB_CM,
  307. MLXSW_SP_CPU_PORT_SB_CM,
  308. MLXSW_SP_CPU_PORT_SB_CM,
  309. MLXSW_SP_CPU_PORT_SB_CM,
  310. MLXSW_SP_CPU_PORT_SB_CM,
  311. MLXSW_SP_CPU_PORT_SB_CM,
  312. };
  313. #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
  314. ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
  315. static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  316. enum mlxsw_reg_sbxx_dir dir,
  317. const struct mlxsw_sp_sb_cm *cms,
  318. size_t cms_len)
  319. {
  320. int i;
  321. int err;
  322. for (i = 0; i < cms_len; i++) {
  323. const struct mlxsw_sp_sb_cm *cm;
  324. if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
  325. continue; /* PG number 8 does not exist, skip it */
  326. cm = &cms[i];
  327. err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
  328. cm->min_buff, cm->max_buff,
  329. cm->pool);
  330. if (err)
  331. return err;
  332. }
  333. return 0;
  334. }
  335. static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
  336. {
  337. int err;
  338. err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
  339. mlxsw_sp_port->local_port,
  340. MLXSW_REG_SBXX_DIR_INGRESS,
  341. mlxsw_sp_sb_cms_ingress,
  342. MLXSW_SP_SB_CMS_INGRESS_LEN);
  343. if (err)
  344. return err;
  345. return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
  346. mlxsw_sp_port->local_port,
  347. MLXSW_REG_SBXX_DIR_EGRESS,
  348. mlxsw_sp_sb_cms_egress,
  349. MLXSW_SP_SB_CMS_EGRESS_LEN);
  350. }
  351. static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
  352. {
  353. return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
  354. mlxsw_sp_cpu_port_sb_cms,
  355. MLXSW_SP_CPU_PORT_SB_MCS_LEN);
  356. }
  357. #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
  358. { \
  359. .min_buff = _min_buff, \
  360. .max_buff = _max_buff, \
  361. }
  362. static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
  363. MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
  364. MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
  365. MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
  366. MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
  367. };
  368. #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
  369. static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
  370. MLXSW_SP_SB_PM(0, 7),
  371. MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
  372. MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
  373. MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
  374. };
  375. #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
  376. static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
  377. enum mlxsw_reg_sbxx_dir dir,
  378. const struct mlxsw_sp_sb_pm *pms,
  379. size_t pms_len)
  380. {
  381. int i;
  382. int err;
  383. for (i = 0; i < pms_len; i++) {
  384. const struct mlxsw_sp_sb_pm *pm;
  385. pm = &pms[i];
  386. err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
  387. pm->min_buff, pm->max_buff);
  388. if (err)
  389. return err;
  390. }
  391. return 0;
  392. }
  393. static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
  394. {
  395. int err;
  396. err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
  397. mlxsw_sp_port->local_port,
  398. MLXSW_REG_SBXX_DIR_INGRESS,
  399. mlxsw_sp_sb_pms_ingress,
  400. MLXSW_SP_SB_PMS_INGRESS_LEN);
  401. if (err)
  402. return err;
  403. return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
  404. mlxsw_sp_port->local_port,
  405. MLXSW_REG_SBXX_DIR_EGRESS,
  406. mlxsw_sp_sb_pms_egress,
  407. MLXSW_SP_SB_PMS_EGRESS_LEN);
  408. }
  409. struct mlxsw_sp_sb_mm {
  410. u32 min_buff;
  411. u32 max_buff;
  412. u8 pool;
  413. };
  414. #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
  415. { \
  416. .min_buff = _min_buff, \
  417. .max_buff = _max_buff, \
  418. .pool = _pool, \
  419. }
  420. static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
  421. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  422. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  423. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  424. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  425. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  426. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  427. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  428. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  429. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  430. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  431. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  432. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  433. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  434. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  435. MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
  436. };
  437. #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
  438. static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
  439. {
  440. char sbmm_pl[MLXSW_REG_SBMM_LEN];
  441. int i;
  442. int err;
  443. for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
  444. const struct mlxsw_sp_sb_mm *mc;
  445. mc = &mlxsw_sp_sb_mms[i];
  446. mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
  447. mc->max_buff, mc->pool);
  448. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
  449. if (err)
  450. return err;
  451. }
  452. return 0;
  453. }
  454. #define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
  455. int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
  456. {
  457. int err;
  458. err = mlxsw_sp_sb_prs_init(mlxsw_sp);
  459. if (err)
  460. return err;
  461. err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
  462. if (err)
  463. return err;
  464. err = mlxsw_sp_sb_mms_init(mlxsw_sp);
  465. if (err)
  466. return err;
  467. return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
  468. MLXSW_SP_SB_SIZE,
  469. MLXSW_SP_SB_POOL_COUNT,
  470. MLXSW_SP_SB_POOL_COUNT,
  471. MLXSW_SP_SB_TC_COUNT,
  472. MLXSW_SP_SB_TC_COUNT);
  473. }
  474. void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
  475. {
  476. devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
  477. }
  478. int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
  479. {
  480. int err;
  481. err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
  482. if (err)
  483. return err;
  484. err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
  485. if (err)
  486. return err;
  487. err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
  488. return err;
  489. }
  490. static u8 pool_get(u16 pool_index)
  491. {
  492. return pool_index % MLXSW_SP_SB_POOL_COUNT;
  493. }
  494. static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
  495. {
  496. u16 pool_index;
  497. pool_index = pool;
  498. if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
  499. pool_index += MLXSW_SP_SB_POOL_COUNT;
  500. return pool_index;
  501. }
  502. static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
  503. {
  504. return pool_index < MLXSW_SP_SB_POOL_COUNT ?
  505. MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
  506. }
  507. int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
  508. unsigned int sb_index, u16 pool_index,
  509. struct devlink_sb_pool_info *pool_info)
  510. {
  511. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  512. u8 pool = pool_get(pool_index);
  513. enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
  514. struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
  515. pool_info->pool_type = (enum devlink_sb_pool_type) dir;
  516. pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
  517. pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
  518. return 0;
  519. }
  520. int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
  521. unsigned int sb_index, u16 pool_index, u32 size,
  522. enum devlink_sb_threshold_type threshold_type)
  523. {
  524. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  525. u8 pool = pool_get(pool_index);
  526. enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
  527. u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
  528. enum mlxsw_reg_sbpr_mode mode;
  529. mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
  530. return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
  531. }
  532. #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
  533. static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
  534. enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
  535. {
  536. struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
  537. if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
  538. return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
  539. return MLXSW_SP_CELLS_TO_BYTES(max_buff);
  540. }
  541. static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
  542. enum mlxsw_reg_sbxx_dir dir, u32 threshold,
  543. u32 *p_max_buff)
  544. {
  545. struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
  546. if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
  547. int val;
  548. val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
  549. if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
  550. val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
  551. return -EINVAL;
  552. *p_max_buff = val;
  553. } else {
  554. *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
  555. }
  556. return 0;
  557. }
  558. int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
  559. unsigned int sb_index, u16 pool_index,
  560. u32 *p_threshold)
  561. {
  562. struct mlxsw_sp_port *mlxsw_sp_port =
  563. mlxsw_core_port_driver_priv(mlxsw_core_port);
  564. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  565. u8 local_port = mlxsw_sp_port->local_port;
  566. u8 pool = pool_get(pool_index);
  567. enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
  568. struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
  569. pool, dir);
  570. *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
  571. pm->max_buff);
  572. return 0;
  573. }
  574. int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
  575. unsigned int sb_index, u16 pool_index,
  576. u32 threshold)
  577. {
  578. struct mlxsw_sp_port *mlxsw_sp_port =
  579. mlxsw_core_port_driver_priv(mlxsw_core_port);
  580. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  581. u8 local_port = mlxsw_sp_port->local_port;
  582. u8 pool = pool_get(pool_index);
  583. enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
  584. u32 max_buff;
  585. int err;
  586. err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
  587. threshold, &max_buff);
  588. if (err)
  589. return err;
  590. return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
  591. 0, max_buff);
  592. }
  593. int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
  594. unsigned int sb_index, u16 tc_index,
  595. enum devlink_sb_pool_type pool_type,
  596. u16 *p_pool_index, u32 *p_threshold)
  597. {
  598. struct mlxsw_sp_port *mlxsw_sp_port =
  599. mlxsw_core_port_driver_priv(mlxsw_core_port);
  600. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  601. u8 local_port = mlxsw_sp_port->local_port;
  602. u8 pg_buff = tc_index;
  603. enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
  604. struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
  605. pg_buff, dir);
  606. *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
  607. cm->max_buff);
  608. *p_pool_index = pool_index_get(cm->pool, dir);
  609. return 0;
  610. }
  611. int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
  612. unsigned int sb_index, u16 tc_index,
  613. enum devlink_sb_pool_type pool_type,
  614. u16 pool_index, u32 threshold)
  615. {
  616. struct mlxsw_sp_port *mlxsw_sp_port =
  617. mlxsw_core_port_driver_priv(mlxsw_core_port);
  618. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  619. u8 local_port = mlxsw_sp_port->local_port;
  620. u8 pg_buff = tc_index;
  621. enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
  622. u8 pool = pool_get(pool_index);
  623. u32 max_buff;
  624. int err;
  625. if (dir != dir_get(pool_index))
  626. return -EINVAL;
  627. err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
  628. threshold, &max_buff);
  629. if (err)
  630. return err;
  631. return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
  632. 0, max_buff, pool);
  633. }
  634. #define MASKED_COUNT_MAX \
  635. (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
  636. struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
  637. u8 masked_count;
  638. u8 local_port_1;
  639. };
  640. static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
  641. char *sbsr_pl, size_t sbsr_pl_len,
  642. unsigned long cb_priv)
  643. {
  644. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  645. struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
  646. u8 masked_count;
  647. u8 local_port;
  648. int rec_index = 0;
  649. struct mlxsw_sp_sb_cm *cm;
  650. int i;
  651. memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
  652. masked_count = 0;
  653. for (local_port = cb_ctx.local_port_1;
  654. local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
  655. if (!mlxsw_sp->ports[local_port])
  656. continue;
  657. for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
  658. cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
  659. MLXSW_REG_SBXX_DIR_INGRESS);
  660. mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
  661. &cm->occ.cur, &cm->occ.max);
  662. }
  663. if (++masked_count == cb_ctx.masked_count)
  664. break;
  665. }
  666. masked_count = 0;
  667. for (local_port = cb_ctx.local_port_1;
  668. local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
  669. if (!mlxsw_sp->ports[local_port])
  670. continue;
  671. for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
  672. cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
  673. MLXSW_REG_SBXX_DIR_EGRESS);
  674. mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
  675. &cm->occ.cur, &cm->occ.max);
  676. }
  677. if (++masked_count == cb_ctx.masked_count)
  678. break;
  679. }
  680. }
  681. int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
  682. unsigned int sb_index)
  683. {
  684. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  685. struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
  686. unsigned long cb_priv;
  687. LIST_HEAD(bulk_list);
  688. char *sbsr_pl;
  689. u8 masked_count;
  690. u8 local_port_1;
  691. u8 local_port = 0;
  692. int i;
  693. int err;
  694. int err2;
  695. sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
  696. if (!sbsr_pl)
  697. return -ENOMEM;
  698. next_batch:
  699. local_port++;
  700. local_port_1 = local_port;
  701. masked_count = 0;
  702. mlxsw_reg_sbsr_pack(sbsr_pl, false);
  703. for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
  704. mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
  705. mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
  706. }
  707. for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
  708. if (!mlxsw_sp->ports[local_port])
  709. continue;
  710. mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
  711. mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
  712. for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
  713. err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
  714. MLXSW_REG_SBXX_DIR_INGRESS,
  715. &bulk_list);
  716. if (err)
  717. goto out;
  718. err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
  719. MLXSW_REG_SBXX_DIR_EGRESS,
  720. &bulk_list);
  721. if (err)
  722. goto out;
  723. }
  724. if (++masked_count == MASKED_COUNT_MAX)
  725. goto do_query;
  726. }
  727. do_query:
  728. cb_ctx.masked_count = masked_count;
  729. cb_ctx.local_port_1 = local_port_1;
  730. memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
  731. err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
  732. &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
  733. cb_priv);
  734. if (err)
  735. goto out;
  736. if (local_port < MLXSW_PORT_MAX_PORTS)
  737. goto next_batch;
  738. out:
  739. err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
  740. if (!err)
  741. err = err2;
  742. kfree(sbsr_pl);
  743. return err;
  744. }
  745. int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
  746. unsigned int sb_index)
  747. {
  748. struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
  749. LIST_HEAD(bulk_list);
  750. char *sbsr_pl;
  751. unsigned int masked_count;
  752. u8 local_port = 0;
  753. int i;
  754. int err;
  755. int err2;
  756. sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
  757. if (!sbsr_pl)
  758. return -ENOMEM;
  759. next_batch:
  760. local_port++;
  761. masked_count = 0;
  762. mlxsw_reg_sbsr_pack(sbsr_pl, true);
  763. for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
  764. mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
  765. mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
  766. }
  767. for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
  768. if (!mlxsw_sp->ports[local_port])
  769. continue;
  770. mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
  771. mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
  772. for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
  773. err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
  774. MLXSW_REG_SBXX_DIR_INGRESS,
  775. &bulk_list);
  776. if (err)
  777. goto out;
  778. err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
  779. MLXSW_REG_SBXX_DIR_EGRESS,
  780. &bulk_list);
  781. if (err)
  782. goto out;
  783. }
  784. if (++masked_count == MASKED_COUNT_MAX)
  785. goto do_query;
  786. }
  787. do_query:
  788. err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
  789. &bulk_list, NULL, 0);
  790. if (err)
  791. goto out;
  792. if (local_port < MLXSW_PORT_MAX_PORTS)
  793. goto next_batch;
  794. out:
  795. err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
  796. if (!err)
  797. err = err2;
  798. kfree(sbsr_pl);
  799. return err;
  800. }
  801. int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
  802. unsigned int sb_index, u16 pool_index,
  803. u32 *p_cur, u32 *p_max)
  804. {
  805. struct mlxsw_sp_port *mlxsw_sp_port =
  806. mlxsw_core_port_driver_priv(mlxsw_core_port);
  807. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  808. u8 local_port = mlxsw_sp_port->local_port;
  809. u8 pool = pool_get(pool_index);
  810. enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
  811. struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
  812. pool, dir);
  813. *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
  814. *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
  815. return 0;
  816. }
  817. int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
  818. unsigned int sb_index, u16 tc_index,
  819. enum devlink_sb_pool_type pool_type,
  820. u32 *p_cur, u32 *p_max)
  821. {
  822. struct mlxsw_sp_port *mlxsw_sp_port =
  823. mlxsw_core_port_driver_priv(mlxsw_core_port);
  824. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  825. u8 local_port = mlxsw_sp_port->local_port;
  826. u8 pg_buff = tc_index;
  827. enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
  828. struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
  829. pg_buff, dir);
  830. *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
  831. *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
  832. return 0;
  833. }