ice_sched.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_sched.h"
  4. /**
  5. * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
  6. * @pi: port information structure
  7. * @info: Scheduler element information from firmware
  8. *
  9. * This function inserts the root node of the scheduling tree topology
  10. * to the SW DB.
  11. */
  12. static enum ice_status
  13. ice_sched_add_root_node(struct ice_port_info *pi,
  14. struct ice_aqc_txsched_elem_data *info)
  15. {
  16. struct ice_sched_node *root;
  17. struct ice_hw *hw;
  18. u16 max_children;
  19. if (!pi)
  20. return ICE_ERR_PARAM;
  21. hw = pi->hw;
  22. root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
  23. if (!root)
  24. return ICE_ERR_NO_MEMORY;
  25. max_children = le16_to_cpu(hw->layer_info[0].max_children);
  26. root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
  27. sizeof(*root), GFP_KERNEL);
  28. if (!root->children) {
  29. devm_kfree(ice_hw_to_dev(hw), root);
  30. return ICE_ERR_NO_MEMORY;
  31. }
  32. memcpy(&root->info, info, sizeof(*info));
  33. pi->root = root;
  34. return 0;
  35. }
  36. /**
  37. * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
  38. * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
  39. * @teid: node teid to search
  40. *
  41. * This function searches for a node matching the teid in the scheduling tree
  42. * from the SW DB. The search is recursive and is restricted by the number of
  43. * layers it has searched through; stopping at the max supported layer.
  44. *
  45. * This function needs to be called when holding the port_info->sched_lock
  46. */
  47. struct ice_sched_node *
  48. ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
  49. {
  50. u16 i;
  51. /* The TEID is same as that of the start_node */
  52. if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
  53. return start_node;
  54. /* The node has no children or is at the max layer */
  55. if (!start_node->num_children ||
  56. start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
  57. start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
  58. return NULL;
  59. /* Check if teid matches to any of the children nodes */
  60. for (i = 0; i < start_node->num_children; i++)
  61. if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
  62. return start_node->children[i];
  63. /* Search within each child's sub-tree */
  64. for (i = 0; i < start_node->num_children; i++) {
  65. struct ice_sched_node *tmp;
  66. tmp = ice_sched_find_node_by_teid(start_node->children[i],
  67. teid);
  68. if (tmp)
  69. return tmp;
  70. }
  71. return NULL;
  72. }
  73. /**
  74. * ice_sched_add_node - Insert the Tx scheduler node in SW DB
  75. * @pi: port information structure
  76. * @layer: Scheduler layer of the node
  77. * @info: Scheduler element information from firmware
  78. *
  79. * This function inserts a scheduler node to the SW DB.
  80. */
  81. enum ice_status
  82. ice_sched_add_node(struct ice_port_info *pi, u8 layer,
  83. struct ice_aqc_txsched_elem_data *info)
  84. {
  85. struct ice_sched_node *parent;
  86. struct ice_sched_node *node;
  87. struct ice_hw *hw;
  88. u16 max_children;
  89. if (!pi)
  90. return ICE_ERR_PARAM;
  91. hw = pi->hw;
  92. /* A valid parent node should be there */
  93. parent = ice_sched_find_node_by_teid(pi->root,
  94. le32_to_cpu(info->parent_teid));
  95. if (!parent) {
  96. ice_debug(hw, ICE_DBG_SCHED,
  97. "Parent Node not found for parent_teid=0x%x\n",
  98. le32_to_cpu(info->parent_teid));
  99. return ICE_ERR_PARAM;
  100. }
  101. node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
  102. if (!node)
  103. return ICE_ERR_NO_MEMORY;
  104. max_children = le16_to_cpu(hw->layer_info[layer].max_children);
  105. if (max_children) {
  106. node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
  107. sizeof(*node), GFP_KERNEL);
  108. if (!node->children) {
  109. devm_kfree(ice_hw_to_dev(hw), node);
  110. return ICE_ERR_NO_MEMORY;
  111. }
  112. }
  113. node->in_use = true;
  114. node->parent = parent;
  115. node->tx_sched_layer = layer;
  116. parent->children[parent->num_children++] = node;
  117. memcpy(&node->info, info, sizeof(*info));
  118. return 0;
  119. }
  120. /**
  121. * ice_aq_delete_sched_elems - delete scheduler elements
  122. * @hw: pointer to the hw struct
  123. * @grps_req: number of groups to delete
  124. * @buf: pointer to buffer
  125. * @buf_size: buffer size in bytes
  126. * @grps_del: returns total number of elements deleted
  127. * @cd: pointer to command details structure or NULL
  128. *
  129. * Delete scheduling elements (0x040F)
  130. */
  131. static enum ice_status
  132. ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
  133. struct ice_aqc_delete_elem *buf, u16 buf_size,
  134. u16 *grps_del, struct ice_sq_cd *cd)
  135. {
  136. struct ice_aqc_add_move_delete_elem *cmd;
  137. struct ice_aq_desc desc;
  138. enum ice_status status;
  139. cmd = &desc.params.add_move_delete_elem;
  140. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems);
  141. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  142. cmd->num_grps_req = cpu_to_le16(grps_req);
  143. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  144. if (!status && grps_del)
  145. *grps_del = le16_to_cpu(cmd->num_grps_updated);
  146. return status;
  147. }
  148. /**
  149. * ice_sched_remove_elems - remove nodes from hw
  150. * @hw: pointer to the hw struct
  151. * @parent: pointer to the parent node
  152. * @num_nodes: number of nodes
  153. * @node_teids: array of node teids to be deleted
  154. *
  155. * This function remove nodes from hw
  156. */
  157. static enum ice_status
  158. ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
  159. u16 num_nodes, u32 *node_teids)
  160. {
  161. struct ice_aqc_delete_elem *buf;
  162. u16 i, num_groups_removed = 0;
  163. enum ice_status status;
  164. u16 buf_size;
  165. buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
  166. buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
  167. if (!buf)
  168. return ICE_ERR_NO_MEMORY;
  169. buf->hdr.parent_teid = parent->info.node_teid;
  170. buf->hdr.num_elems = cpu_to_le16(num_nodes);
  171. for (i = 0; i < num_nodes; i++)
  172. buf->teid[i] = cpu_to_le32(node_teids[i]);
  173. status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
  174. &num_groups_removed, NULL);
  175. if (status || num_groups_removed != 1)
  176. ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
  177. devm_kfree(ice_hw_to_dev(hw), buf);
  178. return status;
  179. }
  180. /**
  181. * ice_sched_get_first_node - get the first node of the given layer
  182. * @hw: pointer to the hw struct
  183. * @parent: pointer the base node of the subtree
  184. * @layer: layer number
  185. *
  186. * This function retrieves the first node of the given layer from the subtree
  187. */
  188. static struct ice_sched_node *
  189. ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent,
  190. u8 layer)
  191. {
  192. u8 i;
  193. if (layer < hw->sw_entry_point_layer)
  194. return NULL;
  195. for (i = 0; i < parent->num_children; i++) {
  196. struct ice_sched_node *node = parent->children[i];
  197. if (node) {
  198. if (node->tx_sched_layer == layer)
  199. return node;
  200. /* this recursion is intentional, and wouldn't
  201. * go more than 9 calls
  202. */
  203. return ice_sched_get_first_node(hw, node, layer);
  204. }
  205. }
  206. return NULL;
  207. }
  208. /**
  209. * ice_sched_get_tc_node - get pointer to TC node
  210. * @pi: port information structure
  211. * @tc: TC number
  212. *
  213. * This function returns the TC node pointer
  214. */
  215. struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
  216. {
  217. u8 i;
  218. if (!pi)
  219. return NULL;
  220. for (i = 0; i < pi->root->num_children; i++)
  221. if (pi->root->children[i]->tc_num == tc)
  222. return pi->root->children[i];
  223. return NULL;
  224. }
  225. /**
  226. * ice_free_sched_node - Free a Tx scheduler node from SW DB
  227. * @pi: port information structure
  228. * @node: pointer to the ice_sched_node struct
  229. *
  230. * This function frees up a node from SW DB as well as from HW
  231. *
  232. * This function needs to be called with the port_info->sched_lock held
  233. */
  234. void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
  235. {
  236. struct ice_sched_node *parent;
  237. struct ice_hw *hw = pi->hw;
  238. u8 i, j;
  239. /* Free the children before freeing up the parent node
  240. * The parent array is updated below and that shifts the nodes
  241. * in the array. So always pick the first child if num children > 0
  242. */
  243. while (node->num_children)
  244. ice_free_sched_node(pi, node->children[0]);
  245. /* Leaf, TC and root nodes can't be deleted by SW */
  246. if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
  247. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
  248. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
  249. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
  250. u32 teid = le32_to_cpu(node->info.node_teid);
  251. enum ice_status status;
  252. status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
  253. if (status)
  254. ice_debug(hw, ICE_DBG_SCHED,
  255. "remove element failed %d\n", status);
  256. }
  257. parent = node->parent;
  258. /* root has no parent */
  259. if (parent) {
  260. struct ice_sched_node *p, *tc_node;
  261. /* update the parent */
  262. for (i = 0; i < parent->num_children; i++)
  263. if (parent->children[i] == node) {
  264. for (j = i + 1; j < parent->num_children; j++)
  265. parent->children[j - 1] =
  266. parent->children[j];
  267. parent->num_children--;
  268. break;
  269. }
  270. /* search for previous sibling that points to this node and
  271. * remove the reference
  272. */
  273. tc_node = ice_sched_get_tc_node(pi, node->tc_num);
  274. if (!tc_node) {
  275. ice_debug(hw, ICE_DBG_SCHED,
  276. "Invalid TC number %d\n", node->tc_num);
  277. goto err_exit;
  278. }
  279. p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer);
  280. while (p) {
  281. if (p->sibling == node) {
  282. p->sibling = node->sibling;
  283. break;
  284. }
  285. p = p->sibling;
  286. }
  287. }
  288. err_exit:
  289. /* leaf nodes have no children */
  290. if (node->children)
  291. devm_kfree(ice_hw_to_dev(hw), node->children);
  292. devm_kfree(ice_hw_to_dev(hw), node);
  293. }
  294. /**
  295. * ice_aq_get_dflt_topo - gets default scheduler topology
  296. * @hw: pointer to the hw struct
  297. * @lport: logical port number
  298. * @buf: pointer to buffer
  299. * @buf_size: buffer size in bytes
  300. * @num_branches: returns total number of queue to port branches
  301. * @cd: pointer to command details structure or NULL
  302. *
  303. * Get default scheduler topology (0x400)
  304. */
  305. static enum ice_status
  306. ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
  307. struct ice_aqc_get_topo_elem *buf, u16 buf_size,
  308. u8 *num_branches, struct ice_sq_cd *cd)
  309. {
  310. struct ice_aqc_get_topo *cmd;
  311. struct ice_aq_desc desc;
  312. enum ice_status status;
  313. cmd = &desc.params.get_topo;
  314. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
  315. cmd->port_num = lport;
  316. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  317. if (!status && num_branches)
  318. *num_branches = cmd->num_branches;
  319. return status;
  320. }
  321. /**
  322. * ice_aq_add_sched_elems - adds scheduling element
  323. * @hw: pointer to the hw struct
  324. * @grps_req: the number of groups that are requested to be added
  325. * @buf: pointer to buffer
  326. * @buf_size: buffer size in bytes
  327. * @grps_added: returns total number of groups added
  328. * @cd: pointer to command details structure or NULL
  329. *
  330. * Add scheduling elements (0x0401)
  331. */
  332. static enum ice_status
  333. ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
  334. struct ice_aqc_add_elem *buf, u16 buf_size,
  335. u16 *grps_added, struct ice_sq_cd *cd)
  336. {
  337. struct ice_aqc_add_move_delete_elem *cmd;
  338. struct ice_aq_desc desc;
  339. enum ice_status status;
  340. cmd = &desc.params.add_move_delete_elem;
  341. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_sched_elems);
  342. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  343. cmd->num_grps_req = cpu_to_le16(grps_req);
  344. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  345. if (!status && grps_added)
  346. *grps_added = le16_to_cpu(cmd->num_grps_updated);
  347. return status;
  348. }
  349. /**
  350. * ice_suspend_resume_elems - suspend/resume scheduler elements
  351. * @hw: pointer to the hw struct
  352. * @elems_req: number of elements to suspend
  353. * @buf: pointer to buffer
  354. * @buf_size: buffer size in bytes
  355. * @elems_ret: returns total number of elements suspended
  356. * @cd: pointer to command details structure or NULL
  357. * @cmd_code: command code for suspend or resume
  358. *
  359. * suspend/resume scheduler elements
  360. */
  361. static enum ice_status
  362. ice_suspend_resume_elems(struct ice_hw *hw, u16 elems_req,
  363. struct ice_aqc_suspend_resume_elem *buf, u16 buf_size,
  364. u16 *elems_ret, struct ice_sq_cd *cd,
  365. enum ice_adminq_opc cmd_code)
  366. {
  367. struct ice_aqc_get_cfg_elem *cmd;
  368. struct ice_aq_desc desc;
  369. enum ice_status status;
  370. cmd = &desc.params.get_update_elem;
  371. ice_fill_dflt_direct_cmd_desc(&desc, cmd_code);
  372. cmd->num_elem_req = cpu_to_le16(elems_req);
  373. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  374. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  375. if (!status && elems_ret)
  376. *elems_ret = le16_to_cpu(cmd->num_elem_resp);
  377. return status;
  378. }
  379. /**
  380. * ice_aq_suspend_sched_elems - suspend scheduler elements
  381. * @hw: pointer to the hw struct
  382. * @elems_req: number of elements to suspend
  383. * @buf: pointer to buffer
  384. * @buf_size: buffer size in bytes
  385. * @elems_ret: returns total number of elements suspended
  386. * @cd: pointer to command details structure or NULL
  387. *
  388. * Suspend scheduling elements (0x0409)
  389. */
  390. static enum ice_status
  391. ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
  392. struct ice_aqc_suspend_resume_elem *buf,
  393. u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
  394. {
  395. return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
  396. cd, ice_aqc_opc_suspend_sched_elems);
  397. }
  398. /**
  399. * ice_aq_resume_sched_elems - resume scheduler elements
  400. * @hw: pointer to the hw struct
  401. * @elems_req: number of elements to resume
  402. * @buf: pointer to buffer
  403. * @buf_size: buffer size in bytes
  404. * @elems_ret: returns total number of elements resumed
  405. * @cd: pointer to command details structure or NULL
  406. *
  407. * resume scheduling elements (0x040A)
  408. */
  409. static enum ice_status
  410. ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
  411. struct ice_aqc_suspend_resume_elem *buf,
  412. u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
  413. {
  414. return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
  415. cd, ice_aqc_opc_resume_sched_elems);
  416. }
  417. /**
  418. * ice_aq_query_sched_res - query scheduler resource
  419. * @hw: pointer to the hw struct
  420. * @buf_size: buffer size in bytes
  421. * @buf: pointer to buffer
  422. * @cd: pointer to command details structure or NULL
  423. *
  424. * Query scheduler resource allocation (0x0412)
  425. */
  426. static enum ice_status
  427. ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
  428. struct ice_aqc_query_txsched_res_resp *buf,
  429. struct ice_sq_cd *cd)
  430. {
  431. struct ice_aq_desc desc;
  432. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
  433. return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  434. }
  435. /**
  436. * ice_sched_suspend_resume_elems - suspend or resume hw nodes
  437. * @hw: pointer to the hw struct
  438. * @num_nodes: number of nodes
  439. * @node_teids: array of node teids to be suspended or resumed
  440. * @suspend: true means suspend / false means resume
  441. *
  442. * This function suspends or resumes hw nodes
  443. */
  444. static enum ice_status
  445. ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
  446. bool suspend)
  447. {
  448. struct ice_aqc_suspend_resume_elem *buf;
  449. u16 i, buf_size, num_elem_ret = 0;
  450. enum ice_status status;
  451. buf_size = sizeof(*buf) * num_nodes;
  452. buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
  453. if (!buf)
  454. return ICE_ERR_NO_MEMORY;
  455. for (i = 0; i < num_nodes; i++)
  456. buf->teid[i] = cpu_to_le32(node_teids[i]);
  457. if (suspend)
  458. status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
  459. buf_size, &num_elem_ret,
  460. NULL);
  461. else
  462. status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
  463. buf_size, &num_elem_ret,
  464. NULL);
  465. if (status || num_elem_ret != num_nodes)
  466. ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
  467. devm_kfree(ice_hw_to_dev(hw), buf);
  468. return status;
  469. }
  470. /**
  471. * ice_sched_clear_tx_topo - clears the schduler tree nodes
  472. * @pi: port information structure
  473. *
  474. * This function removes all the nodes from HW as well as from SW DB.
  475. */
  476. static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
  477. {
  478. struct ice_sched_agg_info *agg_info;
  479. struct ice_sched_vsi_info *vsi_elem;
  480. struct ice_sched_agg_info *atmp;
  481. struct ice_sched_vsi_info *tmp;
  482. struct ice_hw *hw;
  483. if (!pi)
  484. return;
  485. hw = pi->hw;
  486. list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
  487. struct ice_sched_agg_vsi_info *agg_vsi_info;
  488. struct ice_sched_agg_vsi_info *vtmp;
  489. list_for_each_entry_safe(agg_vsi_info, vtmp,
  490. &agg_info->agg_vsi_list, list_entry) {
  491. list_del(&agg_vsi_info->list_entry);
  492. devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
  493. }
  494. }
  495. /* remove the vsi list */
  496. list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list,
  497. list_entry) {
  498. list_del(&vsi_elem->list_entry);
  499. devm_kfree(ice_hw_to_dev(hw), vsi_elem);
  500. }
  501. if (pi->root) {
  502. ice_free_sched_node(pi, pi->root);
  503. pi->root = NULL;
  504. }
  505. }
  506. /**
  507. * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
  508. * @pi: port information structure
  509. *
  510. * Cleanup scheduling elements from SW DB
  511. */
  512. static void ice_sched_clear_port(struct ice_port_info *pi)
  513. {
  514. if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
  515. return;
  516. pi->port_state = ICE_SCHED_PORT_STATE_INIT;
  517. mutex_lock(&pi->sched_lock);
  518. ice_sched_clear_tx_topo(pi);
  519. mutex_unlock(&pi->sched_lock);
  520. mutex_destroy(&pi->sched_lock);
  521. }
  522. /**
  523. * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
  524. * @hw: pointer to the hw struct
  525. *
  526. * Cleanup scheduling elements from SW DB for all the ports
  527. */
  528. void ice_sched_cleanup_all(struct ice_hw *hw)
  529. {
  530. if (!hw || !hw->port_info)
  531. return;
  532. if (hw->layer_info)
  533. devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
  534. ice_sched_clear_port(hw->port_info);
  535. hw->num_tx_sched_layers = 0;
  536. hw->num_tx_sched_phys_layers = 0;
  537. hw->flattened_layers = 0;
  538. hw->max_cgds = 0;
  539. }
  540. /**
  541. * ice_sched_create_vsi_info_entry - create an empty new VSI entry
  542. * @pi: port information structure
  543. * @vsi_id: VSI Id
  544. *
  545. * This function creates a new VSI entry and adds it to list
  546. */
  547. static struct ice_sched_vsi_info *
  548. ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
  549. {
  550. struct ice_sched_vsi_info *vsi_elem;
  551. if (!pi)
  552. return NULL;
  553. vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem),
  554. GFP_KERNEL);
  555. if (!vsi_elem)
  556. return NULL;
  557. list_add(&vsi_elem->list_entry, &pi->vsi_info_list);
  558. vsi_elem->vsi_id = vsi_id;
  559. return vsi_elem;
  560. }
  561. /**
  562. * ice_sched_add_elems - add nodes to hw and SW DB
  563. * @pi: port information structure
  564. * @tc_node: pointer to the branch node
  565. * @parent: pointer to the parent node
  566. * @layer: layer number to add nodes
  567. * @num_nodes: number of nodes
  568. * @num_nodes_added: pointer to num nodes added
  569. * @first_node_teid: if new nodes are added then return the teid of first node
  570. *
  571. * This function add nodes to hw as well as to SW DB for a given layer
  572. */
  573. static enum ice_status
  574. ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
  575. struct ice_sched_node *parent, u8 layer, u16 num_nodes,
  576. u16 *num_nodes_added, u32 *first_node_teid)
  577. {
  578. struct ice_sched_node *prev, *new_node;
  579. struct ice_aqc_add_elem *buf;
  580. u16 i, num_groups_added = 0;
  581. enum ice_status status = 0;
  582. struct ice_hw *hw = pi->hw;
  583. u16 buf_size;
  584. u32 teid;
  585. buf_size = sizeof(*buf) + sizeof(*buf->generic) * (num_nodes - 1);
  586. buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
  587. if (!buf)
  588. return ICE_ERR_NO_MEMORY;
  589. buf->hdr.parent_teid = parent->info.node_teid;
  590. buf->hdr.num_elems = cpu_to_le16(num_nodes);
  591. for (i = 0; i < num_nodes; i++) {
  592. buf->generic[i].parent_teid = parent->info.node_teid;
  593. buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
  594. buf->generic[i].data.valid_sections =
  595. ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
  596. ICE_AQC_ELEM_VALID_EIR;
  597. buf->generic[i].data.generic = 0;
  598. buf->generic[i].data.cir_bw.bw_profile_idx =
  599. ICE_SCHED_DFLT_RL_PROF_ID;
  600. buf->generic[i].data.eir_bw.bw_profile_idx =
  601. ICE_SCHED_DFLT_RL_PROF_ID;
  602. }
  603. status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
  604. &num_groups_added, NULL);
  605. if (status || num_groups_added != 1) {
  606. ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n");
  607. devm_kfree(ice_hw_to_dev(hw), buf);
  608. return ICE_ERR_CFG;
  609. }
  610. *num_nodes_added = num_nodes;
  611. /* add nodes to the SW DB */
  612. for (i = 0; i < num_nodes; i++) {
  613. status = ice_sched_add_node(pi, layer, &buf->generic[i]);
  614. if (status) {
  615. ice_debug(hw, ICE_DBG_SCHED,
  616. "add nodes in SW DB failed status =%d\n",
  617. status);
  618. break;
  619. }
  620. teid = le32_to_cpu(buf->generic[i].node_teid);
  621. new_node = ice_sched_find_node_by_teid(parent, teid);
  622. if (!new_node) {
  623. ice_debug(hw, ICE_DBG_SCHED,
  624. "Node is missing for teid =%d\n", teid);
  625. break;
  626. }
  627. new_node->sibling = NULL;
  628. new_node->tc_num = tc_node->tc_num;
  629. /* add it to previous node sibling pointer */
  630. /* Note: siblings are not linked across branches */
  631. prev = ice_sched_get_first_node(hw, tc_node, layer);
  632. if (prev && prev != new_node) {
  633. while (prev->sibling)
  634. prev = prev->sibling;
  635. prev->sibling = new_node;
  636. }
  637. if (i == 0)
  638. *first_node_teid = teid;
  639. }
  640. devm_kfree(ice_hw_to_dev(hw), buf);
  641. return status;
  642. }
  643. /**
  644. * ice_sched_add_nodes_to_layer - Add nodes to a given layer
  645. * @pi: port information structure
  646. * @tc_node: pointer to TC node
  647. * @parent: pointer to parent node
  648. * @layer: layer number to add nodes
  649. * @num_nodes: number of nodes to be added
  650. * @first_node_teid: pointer to the first node teid
  651. * @num_nodes_added: pointer to number of nodes added
  652. *
  653. * This function add nodes to a given layer.
  654. */
  655. static enum ice_status
  656. ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
  657. struct ice_sched_node *tc_node,
  658. struct ice_sched_node *parent, u8 layer,
  659. u16 num_nodes, u32 *first_node_teid,
  660. u16 *num_nodes_added)
  661. {
  662. u32 *first_teid_ptr = first_node_teid;
  663. u16 new_num_nodes, max_child_nodes;
  664. enum ice_status status = 0;
  665. struct ice_hw *hw = pi->hw;
  666. u16 num_added = 0;
  667. u32 temp;
  668. *num_nodes_added = 0;
  669. if (!num_nodes)
  670. return status;
  671. if (!parent || layer < hw->sw_entry_point_layer)
  672. return ICE_ERR_PARAM;
  673. /* max children per node per layer */
  674. max_child_nodes =
  675. le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
  676. /* current number of children + required nodes exceed max children ? */
  677. if ((parent->num_children + num_nodes) > max_child_nodes) {
  678. /* Fail if the parent is a TC node */
  679. if (parent == tc_node)
  680. return ICE_ERR_CFG;
  681. /* utilize all the spaces if the parent is not full */
  682. if (parent->num_children < max_child_nodes) {
  683. new_num_nodes = max_child_nodes - parent->num_children;
  684. /* this recursion is intentional, and wouldn't
  685. * go more than 2 calls
  686. */
  687. status = ice_sched_add_nodes_to_layer(pi, tc_node,
  688. parent, layer,
  689. new_num_nodes,
  690. first_node_teid,
  691. &num_added);
  692. if (status)
  693. return status;
  694. *num_nodes_added += num_added;
  695. }
  696. /* Don't modify the first node teid memory if the first node was
  697. * added already in the above call. Instead send some temp
  698. * memory for all other recursive calls.
  699. */
  700. if (num_added)
  701. first_teid_ptr = &temp;
  702. new_num_nodes = num_nodes - num_added;
  703. /* This parent is full, try the next sibling */
  704. parent = parent->sibling;
  705. /* this recursion is intentional, for 1024 queues
  706. * per VSI, it goes max of 16 iterations.
  707. * 1024 / 8 = 128 layer 8 nodes
  708. * 128 /8 = 16 (add 8 nodes per iteration)
  709. */
  710. status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
  711. layer, new_num_nodes,
  712. first_teid_ptr,
  713. &num_added);
  714. *num_nodes_added += num_added;
  715. return status;
  716. }
  717. status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
  718. num_nodes_added, first_node_teid);
  719. return status;
  720. }
  721. /**
  722. * ice_sched_get_qgrp_layer - get the current queue group layer number
  723. * @hw: pointer to the hw struct
  724. *
  725. * This function returns the current queue group layer number
  726. */
  727. static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
  728. {
  729. /* It's always total layers - 1, the array is 0 relative so -2 */
  730. return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
  731. }
  732. /**
  733. * ice_sched_get_vsi_layer - get the current VSI layer number
  734. * @hw: pointer to the hw struct
  735. *
  736. * This function returns the current VSI layer number
  737. */
  738. static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
  739. {
  740. /* Num Layers VSI layer
  741. * 9 6
  742. * 7 4
  743. * 5 or less sw_entry_point_layer
  744. */
  745. /* calculate the vsi layer based on number of layers. */
  746. if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
  747. u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
  748. if (layer > hw->sw_entry_point_layer)
  749. return layer;
  750. }
  751. return hw->sw_entry_point_layer;
  752. }
  753. /**
  754. * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer
  755. * @pi: pointer to the port info struct
  756. * @layer: layer number
  757. *
  758. * This function calculates the number of nodes present in the scheduler tree
  759. * including all the branches for a given layer
  760. */
  761. static u16
  762. ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer)
  763. {
  764. struct ice_hw *hw;
  765. u16 num_nodes = 0;
  766. u8 i;
  767. if (!pi)
  768. return num_nodes;
  769. hw = pi->hw;
  770. /* Calculate the number of nodes for all TCs */
  771. for (i = 0; i < pi->root->num_children; i++) {
  772. struct ice_sched_node *tc_node, *node;
  773. tc_node = pi->root->children[i];
  774. /* Get the first node */
  775. node = ice_sched_get_first_node(hw, tc_node, layer);
  776. if (!node)
  777. continue;
  778. /* count the siblings */
  779. while (node) {
  780. num_nodes++;
  781. node = node->sibling;
  782. }
  783. }
  784. return num_nodes;
  785. }
  786. /**
  787. * ice_sched_val_max_nodes - check max number of nodes reached or not
  788. * @pi: port information structure
  789. * @new_num_nodes_per_layer: pointer to the new number of nodes array
  790. *
  791. * This function checks whether the scheduler tree layers have enough space to
  792. * add new nodes
  793. */
  794. static enum ice_status
  795. ice_sched_validate_for_max_nodes(struct ice_port_info *pi,
  796. u16 *new_num_nodes_per_layer)
  797. {
  798. struct ice_hw *hw = pi->hw;
  799. u8 i, qg_layer;
  800. u16 num_nodes;
  801. qg_layer = ice_sched_get_qgrp_layer(hw);
  802. /* walk through all the layers from SW entry point to qgroup layer */
  803. for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) {
  804. num_nodes = ice_sched_get_num_nodes_per_layer(pi, i);
  805. if (num_nodes + new_num_nodes_per_layer[i] >
  806. le16_to_cpu(hw->layer_info[i].max_pf_nodes)) {
  807. ice_debug(hw, ICE_DBG_SCHED,
  808. "max nodes reached for layer = %d\n", i);
  809. return ICE_ERR_CFG;
  810. }
  811. }
  812. return 0;
  813. }
  814. /**
  815. * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
  816. * @pi: port information structure
  817. *
  818. * This function removes the leaf node that was created by the FW
  819. * during initialization
  820. */
  821. static void
  822. ice_rm_dflt_leaf_node(struct ice_port_info *pi)
  823. {
  824. struct ice_sched_node *node;
  825. node = pi->root;
  826. while (node) {
  827. if (!node->num_children)
  828. break;
  829. node = node->children[0];
  830. }
  831. if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
  832. u32 teid = le32_to_cpu(node->info.node_teid);
  833. enum ice_status status;
  834. /* remove the default leaf node */
  835. status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
  836. if (!status)
  837. ice_free_sched_node(pi, node);
  838. }
  839. }
  840. /**
  841. * ice_sched_rm_dflt_nodes - free the default nodes in the tree
  842. * @pi: port information structure
  843. *
  844. * This function frees all the nodes except root and TC that were created by
  845. * the FW during initialization
  846. */
  847. static void
  848. ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
  849. {
  850. struct ice_sched_node *node;
  851. ice_rm_dflt_leaf_node(pi);
  852. /* remove the default nodes except TC and root nodes */
  853. node = pi->root;
  854. while (node) {
  855. if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
  856. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
  857. node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
  858. ice_free_sched_node(pi, node);
  859. break;
  860. }
  861. if (!node->num_children)
  862. break;
  863. node = node->children[0];
  864. }
  865. }
  866. /**
  867. * ice_sched_init_port - Initialize scheduler by querying information from FW
  868. * @pi: port info structure for the tree to cleanup
  869. *
  870. * This function is the initial call to find the total number of Tx scheduler
  871. * resources, default topology created by firmware and storing the information
  872. * in SW DB.
  873. */
  874. enum ice_status ice_sched_init_port(struct ice_port_info *pi)
  875. {
  876. struct ice_aqc_get_topo_elem *buf;
  877. enum ice_status status;
  878. struct ice_hw *hw;
  879. u8 num_branches;
  880. u16 num_elems;
  881. u8 i, j;
  882. if (!pi)
  883. return ICE_ERR_PARAM;
  884. hw = pi->hw;
  885. /* Query the Default Topology from FW */
  886. buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES,
  887. sizeof(*buf), GFP_KERNEL);
  888. if (!buf)
  889. return ICE_ERR_NO_MEMORY;
  890. /* Query default scheduling tree topology */
  891. status = ice_aq_get_dflt_topo(hw, pi->lport, buf,
  892. sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES,
  893. &num_branches, NULL);
  894. if (status)
  895. goto err_init_port;
  896. /* num_branches should be between 1-8 */
  897. if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
  898. ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
  899. num_branches);
  900. status = ICE_ERR_PARAM;
  901. goto err_init_port;
  902. }
  903. /* get the number of elements on the default/first branch */
  904. num_elems = le16_to_cpu(buf[0].hdr.num_elems);
  905. /* num_elems should always be between 1-9 */
  906. if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
  907. ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
  908. num_elems);
  909. status = ICE_ERR_PARAM;
  910. goto err_init_port;
  911. }
  912. /* If the last node is a leaf node then the index of the Q group
  913. * layer is two less than the number of elements.
  914. */
  915. if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
  916. ICE_AQC_ELEM_TYPE_LEAF)
  917. pi->last_node_teid =
  918. le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
  919. else
  920. pi->last_node_teid =
  921. le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
  922. /* Insert the Tx Sched root node */
  923. status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
  924. if (status)
  925. goto err_init_port;
  926. /* Parse the default tree and cache the information */
  927. for (i = 0; i < num_branches; i++) {
  928. num_elems = le16_to_cpu(buf[i].hdr.num_elems);
  929. /* Skip root element as already inserted */
  930. for (j = 1; j < num_elems; j++) {
  931. /* update the sw entry point */
  932. if (buf[0].generic[j].data.elem_type ==
  933. ICE_AQC_ELEM_TYPE_ENTRY_POINT)
  934. hw->sw_entry_point_layer = j;
  935. status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
  936. if (status)
  937. goto err_init_port;
  938. }
  939. }
  940. /* Remove the default nodes. */
  941. if (pi->root)
  942. ice_sched_rm_dflt_nodes(pi);
  943. /* initialize the port for handling the scheduler tree */
  944. pi->port_state = ICE_SCHED_PORT_STATE_READY;
  945. mutex_init(&pi->sched_lock);
  946. INIT_LIST_HEAD(&pi->agg_list);
  947. INIT_LIST_HEAD(&pi->vsi_info_list);
  948. err_init_port:
  949. if (status && pi->root) {
  950. ice_free_sched_node(pi, pi->root);
  951. pi->root = NULL;
  952. }
  953. devm_kfree(ice_hw_to_dev(hw), buf);
  954. return status;
  955. }
  956. /**
  957. * ice_sched_query_res_alloc - query the FW for num of logical sched layers
  958. * @hw: pointer to the HW struct
  959. *
  960. * query FW for allocated scheduler resources and store in HW struct
  961. */
  962. enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
  963. {
  964. struct ice_aqc_query_txsched_res_resp *buf;
  965. enum ice_status status = 0;
  966. if (hw->layer_info)
  967. return status;
  968. buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
  969. if (!buf)
  970. return ICE_ERR_NO_MEMORY;
  971. status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
  972. if (status)
  973. goto sched_query_out;
  974. hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
  975. hw->num_tx_sched_phys_layers =
  976. le16_to_cpu(buf->sched_props.phys_levels);
  977. hw->flattened_layers = buf->sched_props.flattening_bitmap;
  978. hw->max_cgds = buf->sched_props.max_pf_cgds;
  979. hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
  980. (hw->num_tx_sched_layers *
  981. sizeof(*hw->layer_info)),
  982. GFP_KERNEL);
  983. if (!hw->layer_info) {
  984. status = ICE_ERR_NO_MEMORY;
  985. goto sched_query_out;
  986. }
  987. sched_query_out:
  988. devm_kfree(ice_hw_to_dev(hw), buf);
  989. return status;
  990. }
  991. /**
  992. * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id
  993. * @pi: port information structure
  994. * @vsi_id: vsi id
  995. *
  996. * This function retrieves the vsi list for the given vsi id
  997. */
  998. static struct ice_sched_vsi_info *
  999. ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
  1000. {
  1001. struct ice_sched_vsi_info *list_elem;
  1002. if (!pi)
  1003. return NULL;
  1004. list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry)
  1005. if (list_elem->vsi_id == vsi_id)
  1006. return list_elem;
  1007. return NULL;
  1008. }
  1009. /**
  1010. * ice_sched_find_node_in_subtree - Find node in part of base node subtree
  1011. * @hw: pointer to the hw struct
  1012. * @base: pointer to the base node
  1013. * @node: pointer to the node to search
  1014. *
  1015. * This function checks whether a given node is part of the base node
  1016. * subtree or not
  1017. */
  1018. static bool
  1019. ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
  1020. struct ice_sched_node *node)
  1021. {
  1022. u8 i;
  1023. for (i = 0; i < base->num_children; i++) {
  1024. struct ice_sched_node *child = base->children[i];
  1025. if (node == child)
  1026. return true;
  1027. if (child->tx_sched_layer > node->tx_sched_layer)
  1028. return false;
  1029. /* this recursion is intentional, and wouldn't
  1030. * go more than 8 calls
  1031. */
  1032. if (ice_sched_find_node_in_subtree(hw, child, node))
  1033. return true;
  1034. }
  1035. return false;
  1036. }
  1037. /**
  1038. * ice_sched_get_free_qparent - Get a free lan or rdma q group node
  1039. * @pi: port information structure
  1040. * @vsi_id: vsi id
  1041. * @tc: branch number
  1042. * @owner: lan or rdma
  1043. *
  1044. * This function retrieves a free lan or rdma q group node
  1045. */
  1046. struct ice_sched_node *
  1047. ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
  1048. u8 owner)
  1049. {
  1050. struct ice_sched_node *vsi_node, *qgrp_node = NULL;
  1051. struct ice_sched_vsi_info *list_elem;
  1052. u16 max_children;
  1053. u8 qgrp_layer;
  1054. qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
  1055. max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children);
  1056. list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);
  1057. if (!list_elem)
  1058. goto lan_q_exit;
  1059. vsi_node = list_elem->vsi_node[tc];
  1060. /* validate invalid VSI id */
  1061. if (!vsi_node)
  1062. goto lan_q_exit;
  1063. /* get the first q group node from VSI sub-tree */
  1064. qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
  1065. while (qgrp_node) {
  1066. /* make sure the qgroup node is part of the VSI subtree */
  1067. if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
  1068. if (qgrp_node->num_children < max_children &&
  1069. qgrp_node->owner == owner)
  1070. break;
  1071. qgrp_node = qgrp_node->sibling;
  1072. }
  1073. lan_q_exit:
  1074. return qgrp_node;
  1075. }
  1076. /**
  1077. * ice_sched_get_vsi_node - Get a VSI node based on VSI id
  1078. * @hw: pointer to the hw struct
  1079. * @tc_node: pointer to the TC node
  1080. * @vsi_id: VSI id
  1081. *
  1082. * This function retrieves a VSI node for a given VSI id from a given
  1083. * TC branch
  1084. */
  1085. static struct ice_sched_node *
  1086. ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
  1087. u16 vsi_id)
  1088. {
  1089. struct ice_sched_node *node;
  1090. u8 vsi_layer;
  1091. vsi_layer = ice_sched_get_vsi_layer(hw);
  1092. node = ice_sched_get_first_node(hw, tc_node, vsi_layer);
  1093. /* Check whether it already exists */
  1094. while (node) {
  1095. if (node->vsi_id == vsi_id)
  1096. return node;
  1097. node = node->sibling;
  1098. }
  1099. return node;
  1100. }
  1101. /**
  1102. * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
  1103. * @hw: pointer to the hw struct
  1104. * @num_qs: number of queues
  1105. * @num_nodes: num nodes array
  1106. *
  1107. * This function calculates the number of VSI child nodes based on the
  1108. * number of queues.
  1109. */
  1110. static void
  1111. ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
  1112. {
  1113. u16 num = num_qs;
  1114. u8 i, qgl, vsil;
  1115. qgl = ice_sched_get_qgrp_layer(hw);
  1116. vsil = ice_sched_get_vsi_layer(hw);
  1117. /* calculate num nodes from q group to VSI layer */
  1118. for (i = qgl; i > vsil; i--) {
  1119. u16 max_children = le16_to_cpu(hw->layer_info[i].max_children);
  1120. /* round to the next integer if there is a remainder */
  1121. num = DIV_ROUND_UP(num, max_children);
  1122. /* need at least one node */
  1123. num_nodes[i] = num ? num : 1;
  1124. }
  1125. }
  1126. /**
  1127. * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
  1128. * @pi: port information structure
  1129. * @vsi_id: VSI id
  1130. * @tc_node: pointer to the TC node
  1131. * @num_nodes: pointer to the num nodes that needs to be added per layer
  1132. * @owner: node owner (lan or rdma)
  1133. *
  1134. * This function adds the VSI child nodes to tree. It gets called for
  1135. * lan and rdma separately.
  1136. */
  1137. static enum ice_status
  1138. ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
  1139. struct ice_sched_node *tc_node, u16 *num_nodes,
  1140. u8 owner)
  1141. {
  1142. struct ice_sched_node *parent, *node;
  1143. struct ice_hw *hw = pi->hw;
  1144. enum ice_status status;
  1145. u32 first_node_teid;
  1146. u16 num_added = 0;
  1147. u8 i, qgl, vsil;
  1148. status = ice_sched_validate_for_max_nodes(pi, num_nodes);
  1149. if (status)
  1150. return status;
  1151. qgl = ice_sched_get_qgrp_layer(hw);
  1152. vsil = ice_sched_get_vsi_layer(hw);
  1153. parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
  1154. for (i = vsil + 1; i <= qgl; i++) {
  1155. if (!parent)
  1156. return ICE_ERR_CFG;
  1157. status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
  1158. num_nodes[i],
  1159. &first_node_teid,
  1160. &num_added);
  1161. if (status || num_nodes[i] != num_added)
  1162. return ICE_ERR_CFG;
  1163. /* The newly added node can be a new parent for the next
  1164. * layer nodes
  1165. */
  1166. if (num_added) {
  1167. parent = ice_sched_find_node_by_teid(tc_node,
  1168. first_node_teid);
  1169. node = parent;
  1170. while (node) {
  1171. node->owner = owner;
  1172. node = node->sibling;
  1173. }
  1174. } else {
  1175. parent = parent->children[0];
  1176. }
  1177. }
  1178. return 0;
  1179. }
  1180. /**
  1181. * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree
  1182. * @pi: port information structure
  1183. * @vsi_node: pointer to the VSI node
  1184. * @num_nodes: pointer to the num nodes that needs to be removed per layer
  1185. * @owner: node owner (lan or rdma)
  1186. *
  1187. * This function removes the VSI child nodes from the tree. It gets called for
  1188. * lan and rdma separately.
  1189. */
  1190. static void
  1191. ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
  1192. struct ice_sched_node *vsi_node, u16 *num_nodes,
  1193. u8 owner)
  1194. {
  1195. struct ice_sched_node *node, *next;
  1196. u8 i, qgl, vsil;
  1197. u16 num;
  1198. qgl = ice_sched_get_qgrp_layer(pi->hw);
  1199. vsil = ice_sched_get_vsi_layer(pi->hw);
  1200. for (i = qgl; i > vsil; i--) {
  1201. num = num_nodes[i];
  1202. node = ice_sched_get_first_node(pi->hw, vsi_node, i);
  1203. while (node && num) {
  1204. next = node->sibling;
  1205. if (node->owner == owner && !node->num_children) {
  1206. ice_free_sched_node(pi, node);
  1207. num--;
  1208. }
  1209. node = next;
  1210. }
  1211. }
  1212. }
  1213. /**
  1214. * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
  1215. * @hw: pointer to the hw struct
  1216. * @tc_node: pointer to TC node
  1217. * @num_nodes: pointer to num nodes array
  1218. *
  1219. * This function calculates the number of supported nodes needed to add this
  1220. * VSI into tx tree including the VSI, parent and intermediate nodes in below
  1221. * layers
  1222. */
  1223. static void
  1224. ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
  1225. struct ice_sched_node *tc_node, u16 *num_nodes)
  1226. {
  1227. struct ice_sched_node *node;
  1228. u16 max_child;
  1229. u8 i, vsil;
  1230. vsil = ice_sched_get_vsi_layer(hw);
  1231. for (i = vsil; i >= hw->sw_entry_point_layer; i--)
  1232. /* Add intermediate nodes if TC has no children and
  1233. * need at least one node for VSI
  1234. */
  1235. if (!tc_node->num_children || i == vsil) {
  1236. num_nodes[i]++;
  1237. } else {
  1238. /* If intermediate nodes are reached max children
  1239. * then add a new one.
  1240. */
  1241. node = ice_sched_get_first_node(hw, tc_node, i);
  1242. max_child = le16_to_cpu(hw->layer_info[i].max_children);
  1243. /* scan all the siblings */
  1244. while (node) {
  1245. if (node->num_children < max_child)
  1246. break;
  1247. node = node->sibling;
  1248. }
  1249. /* all the nodes are full, allocate a new one */
  1250. if (!node)
  1251. num_nodes[i]++;
  1252. }
  1253. }
  1254. /**
  1255. * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
  1256. * @pi: port information structure
  1257. * @vsi_id: VSI Id
  1258. * @tc_node: pointer to TC node
  1259. * @num_nodes: pointer to num nodes array
  1260. *
  1261. * This function adds the VSI supported nodes into tx tree including the
  1262. * VSI, its parent and intermediate nodes in below layers
  1263. */
  1264. static enum ice_status
  1265. ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
  1266. struct ice_sched_node *tc_node, u16 *num_nodes)
  1267. {
  1268. struct ice_sched_node *parent = tc_node;
  1269. enum ice_status status;
  1270. u32 first_node_teid;
  1271. u16 num_added = 0;
  1272. u8 i, vsil;
  1273. if (!pi)
  1274. return ICE_ERR_PARAM;
  1275. status = ice_sched_validate_for_max_nodes(pi, num_nodes);
  1276. if (status)
  1277. return status;
  1278. vsil = ice_sched_get_vsi_layer(pi->hw);
  1279. for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
  1280. status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
  1281. i, num_nodes[i],
  1282. &first_node_teid,
  1283. &num_added);
  1284. if (status || num_nodes[i] != num_added)
  1285. return ICE_ERR_CFG;
  1286. /* The newly added node can be a new parent for the next
  1287. * layer nodes
  1288. */
  1289. if (num_added)
  1290. parent = ice_sched_find_node_by_teid(tc_node,
  1291. first_node_teid);
  1292. else
  1293. parent = parent->children[0];
  1294. if (!parent)
  1295. return ICE_ERR_CFG;
  1296. if (i == vsil)
  1297. parent->vsi_id = vsi_id;
  1298. }
  1299. return 0;
  1300. }
  1301. /**
  1302. * ice_sched_add_vsi_to_topo - add a new VSI into tree
  1303. * @pi: port information structure
  1304. * @vsi_id: VSI Id
  1305. * @tc: TC number
  1306. *
  1307. * This function adds a new VSI into scheduler tree
  1308. */
  1309. static enum ice_status
  1310. ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
  1311. {
  1312. u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
  1313. struct ice_sched_node *tc_node;
  1314. struct ice_hw *hw = pi->hw;
  1315. tc_node = ice_sched_get_tc_node(pi, tc);
  1316. if (!tc_node)
  1317. return ICE_ERR_PARAM;
  1318. /* calculate number of supported nodes needed for this VSI */
  1319. ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
  1320. /* add vsi supported nodes to tc subtree */
  1321. return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes);
  1322. }
  1323. /**
  1324. * ice_sched_update_vsi_child_nodes - update VSI child nodes
  1325. * @pi: port information structure
  1326. * @vsi_id: VSI Id
  1327. * @tc: TC number
  1328. * @new_numqs: new number of max queues
  1329. * @owner: owner of this subtree
  1330. *
  1331. * This function updates the VSI child nodes based on the number of queues
  1332. */
  1333. static enum ice_status
  1334. ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
  1335. u16 new_numqs, u8 owner)
  1336. {
  1337. u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
  1338. u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
  1339. struct ice_sched_node *vsi_node;
  1340. struct ice_sched_node *tc_node;
  1341. struct ice_sched_vsi_info *vsi;
  1342. enum ice_status status = 0;
  1343. struct ice_hw *hw = pi->hw;
  1344. u16 prev_numqs;
  1345. u8 i;
  1346. tc_node = ice_sched_get_tc_node(pi, tc);
  1347. if (!tc_node)
  1348. return ICE_ERR_CFG;
  1349. vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
  1350. if (!vsi_node)
  1351. return ICE_ERR_CFG;
  1352. vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
  1353. if (!vsi)
  1354. return ICE_ERR_CFG;
  1355. if (owner == ICE_SCHED_NODE_OWNER_LAN)
  1356. prev_numqs = vsi->max_lanq[tc];
  1357. else
  1358. return ICE_ERR_PARAM;
  1359. /* num queues are not changed */
  1360. if (prev_numqs == new_numqs)
  1361. return status;
  1362. /* calculate number of nodes based on prev/new number of qs */
  1363. if (prev_numqs)
  1364. ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes);
  1365. if (new_numqs)
  1366. ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
  1367. if (prev_numqs > new_numqs) {
  1368. for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
  1369. new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i];
  1370. ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes,
  1371. owner);
  1372. } else {
  1373. for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
  1374. new_num_nodes[i] -= prev_num_nodes[i];
  1375. status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node,
  1376. new_num_nodes, owner);
  1377. if (status)
  1378. return status;
  1379. }
  1380. vsi->max_lanq[tc] = new_numqs;
  1381. return status;
  1382. }
  1383. /**
  1384. * ice_sched_cfg_vsi - configure the new/exisiting VSI
  1385. * @pi: port information structure
  1386. * @vsi_id: VSI Id
  1387. * @tc: TC number
  1388. * @maxqs: max number of queues
  1389. * @owner: lan or rdma
  1390. * @enable: TC enabled or disabled
  1391. *
  1392. * This function adds/updates VSI nodes based on the number of queues. If TC is
  1393. * enabled and VSI is in suspended state then resume the VSI back. If TC is
  1394. * disabled then suspend the VSI if it is not already.
  1395. */
  1396. enum ice_status
  1397. ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
  1398. u8 owner, bool enable)
  1399. {
  1400. struct ice_sched_node *vsi_node, *tc_node;
  1401. struct ice_sched_vsi_info *vsi;
  1402. enum ice_status status = 0;
  1403. struct ice_hw *hw = pi->hw;
  1404. tc_node = ice_sched_get_tc_node(pi, tc);
  1405. if (!tc_node)
  1406. return ICE_ERR_PARAM;
  1407. vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
  1408. if (!vsi)
  1409. vsi = ice_sched_create_vsi_info_entry(pi, vsi_id);
  1410. if (!vsi)
  1411. return ICE_ERR_NO_MEMORY;
  1412. vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
  1413. /* suspend the VSI if tc is not enabled */
  1414. if (!enable) {
  1415. if (vsi_node && vsi_node->in_use) {
  1416. u32 teid = le32_to_cpu(vsi_node->info.node_teid);
  1417. status = ice_sched_suspend_resume_elems(hw, 1, &teid,
  1418. true);
  1419. if (!status)
  1420. vsi_node->in_use = false;
  1421. }
  1422. return status;
  1423. }
  1424. /* TC is enabled, if it is a new VSI then add it to the tree */
  1425. if (!vsi_node) {
  1426. status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc);
  1427. if (status)
  1428. return status;
  1429. vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
  1430. if (!vsi_node)
  1431. return ICE_ERR_CFG;
  1432. vsi->vsi_node[tc] = vsi_node;
  1433. vsi_node->in_use = true;
  1434. }
  1435. /* update the VSI child nodes */
  1436. status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner);
  1437. if (status)
  1438. return status;
  1439. /* TC is enabled, resume the VSI if it is in the suspend state */
  1440. if (!vsi_node->in_use) {
  1441. u32 teid = le32_to_cpu(vsi_node->info.node_teid);
  1442. status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
  1443. if (!status)
  1444. vsi_node->in_use = true;
  1445. }
  1446. return status;
  1447. }