ice_switch.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #include "ice_switch.h"
  4. #define ICE_ETH_DA_OFFSET 0
  5. #define ICE_ETH_ETHTYPE_OFFSET 12
  6. #define ICE_ETH_VLAN_TCI_OFFSET 14
  7. #define ICE_MAX_VLAN_ID 0xFFF
  8. /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
  9. * struct to configure any switch filter rules.
  10. * {DA (6 bytes), SA(6 bytes),
  11. * Ether type (2 bytes for header without VLAN tag) OR
  12. * VLAN tag (4 bytes for header with VLAN tag) }
  13. *
  14. * Word on Hardcoded values
  15. * byte 0 = 0x2: to identify it as locally administered DA MAC
  16. * byte 6 = 0x2: to identify it as locally administered SA MAC
  17. * byte 12 = 0x81 & byte 13 = 0x00:
  18. * In case of VLAN filter first two bytes defines ether type (0x8100)
  19. * and remaining two bytes are placeholder for programming a given VLAN id
  20. * In case of Ether type filter it is treated as header without VLAN tag
  21. * and byte 12 and 13 is used to program a given Ether type instead
  22. */
  23. #define DUMMY_ETH_HDR_LEN 16
  24. static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
  25. 0x2, 0, 0, 0, 0, 0,
  26. 0x81, 0, 0, 0};
  27. #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
  28. (sizeof(struct ice_aqc_sw_rules_elem) - \
  29. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  30. sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
  31. #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
  32. (sizeof(struct ice_aqc_sw_rules_elem) - \
  33. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  34. sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
  35. #define ICE_SW_RULE_LG_ACT_SIZE(n) \
  36. (sizeof(struct ice_aqc_sw_rules_elem) - \
  37. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  38. sizeof(struct ice_sw_rule_lg_act) - \
  39. sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
  40. ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
  41. #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
  42. (sizeof(struct ice_aqc_sw_rules_elem) - \
  43. sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
  44. sizeof(struct ice_sw_rule_vsi_list) - \
  45. sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
  46. ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
  47. /**
  48. * ice_aq_alloc_free_res - command to allocate/free resources
  49. * @hw: pointer to the hw struct
  50. * @num_entries: number of resource entries in buffer
  51. * @buf: Indirect buffer to hold data parameters and response
  52. * @buf_size: size of buffer for indirect commands
  53. * @opc: pass in the command opcode
  54. * @cd: pointer to command details structure or NULL
  55. *
  56. * Helper function to allocate/free resources using the admin queue commands
  57. */
  58. static enum ice_status
  59. ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
  60. struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
  61. enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  62. {
  63. struct ice_aqc_alloc_free_res_cmd *cmd;
  64. struct ice_aq_desc desc;
  65. cmd = &desc.params.sw_res_ctrl;
  66. if (!buf)
  67. return ICE_ERR_PARAM;
  68. if (buf_size < (num_entries * sizeof(buf->elem[0])))
  69. return ICE_ERR_PARAM;
  70. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  71. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  72. cmd->num_entries = cpu_to_le16(num_entries);
  73. return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  74. }
  75. /**
  76. * ice_aq_get_sw_cfg - get switch configuration
  77. * @hw: pointer to the hardware structure
  78. * @buf: pointer to the result buffer
  79. * @buf_size: length of the buffer available for response
  80. * @req_desc: pointer to requested descriptor
  81. * @num_elems: pointer to number of elements
  82. * @cd: pointer to command details structure or NULL
  83. *
  84. * Get switch configuration (0x0200) to be placed in 'buff'.
  85. * This admin command returns information such as initial VSI/port number
  86. * and switch ID it belongs to.
  87. *
  88. * NOTE: *req_desc is both an input/output parameter.
  89. * The caller of this function first calls this function with *request_desc set
  90. * to 0. If the response from f/w has *req_desc set to 0, all the switch
  91. * configuration information has been returned; if non-zero (meaning not all
  92. * the information was returned), the caller should call this function again
  93. * with *req_desc set to the previous value returned by f/w to get the
  94. * next block of switch configuration information.
  95. *
  96. * *num_elems is output only parameter. This reflects the number of elements
  97. * in response buffer. The caller of this function to use *num_elems while
  98. * parsing the response buffer.
  99. */
  100. static enum ice_status
  101. ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
  102. u16 buf_size, u16 *req_desc, u16 *num_elems,
  103. struct ice_sq_cd *cd)
  104. {
  105. struct ice_aqc_get_sw_cfg *cmd;
  106. enum ice_status status;
  107. struct ice_aq_desc desc;
  108. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
  109. cmd = &desc.params.get_sw_conf;
  110. cmd->element = cpu_to_le16(*req_desc);
  111. status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
  112. if (!status) {
  113. *req_desc = le16_to_cpu(cmd->element);
  114. *num_elems = le16_to_cpu(cmd->num_elems);
  115. }
  116. return status;
  117. }
  118. /**
  119. * ice_aq_add_vsi
  120. * @hw: pointer to the hw struct
  121. * @vsi_ctx: pointer to a VSI context struct
  122. * @cd: pointer to command details structure or NULL
  123. *
  124. * Add a VSI context to the hardware (0x0210)
  125. */
  126. enum ice_status
  127. ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  128. struct ice_sq_cd *cd)
  129. {
  130. struct ice_aqc_add_update_free_vsi_resp *res;
  131. struct ice_aqc_add_get_update_free_vsi *cmd;
  132. enum ice_status status;
  133. struct ice_aq_desc desc;
  134. cmd = &desc.params.vsi_cmd;
  135. res = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
  136. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
  137. if (!vsi_ctx->alloc_from_pool)
  138. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
  139. ICE_AQ_VSI_IS_VALID);
  140. cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
  141. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  142. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  143. sizeof(vsi_ctx->info), cd);
  144. if (!status) {
  145. vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
  146. vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
  147. vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
  148. }
  149. return status;
  150. }
  151. /**
  152. * ice_aq_update_vsi
  153. * @hw: pointer to the hw struct
  154. * @vsi_ctx: pointer to a VSI context struct
  155. * @cd: pointer to command details structure or NULL
  156. *
  157. * Update VSI context in the hardware (0x0211)
  158. */
  159. enum ice_status
  160. ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  161. struct ice_sq_cd *cd)
  162. {
  163. struct ice_aqc_add_update_free_vsi_resp *resp;
  164. struct ice_aqc_add_get_update_free_vsi *cmd;
  165. struct ice_aq_desc desc;
  166. enum ice_status status;
  167. cmd = &desc.params.vsi_cmd;
  168. resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
  169. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
  170. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  171. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  172. status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
  173. sizeof(vsi_ctx->info), cd);
  174. if (!status) {
  175. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  176. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  177. }
  178. return status;
  179. }
  180. /**
  181. * ice_aq_free_vsi
  182. * @hw: pointer to the hw struct
  183. * @vsi_ctx: pointer to a VSI context struct
  184. * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
  185. * @cd: pointer to command details structure or NULL
  186. *
  187. * Get VSI context info from hardware (0x0213)
  188. */
  189. enum ice_status
  190. ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
  191. bool keep_vsi_alloc, struct ice_sq_cd *cd)
  192. {
  193. struct ice_aqc_add_update_free_vsi_resp *resp;
  194. struct ice_aqc_add_get_update_free_vsi *cmd;
  195. struct ice_aq_desc desc;
  196. enum ice_status status;
  197. cmd = &desc.params.vsi_cmd;
  198. resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
  199. ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
  200. cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
  201. if (keep_vsi_alloc)
  202. cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
  203. status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
  204. if (!status) {
  205. vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
  206. vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
  207. }
  208. return status;
  209. }
  210. /**
  211. * ice_aq_alloc_free_vsi_list
  212. * @hw: pointer to the hw struct
  213. * @vsi_list_id: VSI list id returned or used for lookup
  214. * @lkup_type: switch rule filter lookup type
  215. * @opc: switch rules population command type - pass in the command opcode
  216. *
  217. * allocates or free a VSI list resource
  218. */
  219. static enum ice_status
  220. ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
  221. enum ice_sw_lkup_type lkup_type,
  222. enum ice_adminq_opc opc)
  223. {
  224. struct ice_aqc_alloc_free_res_elem *sw_buf;
  225. struct ice_aqc_res_elem *vsi_ele;
  226. enum ice_status status;
  227. u16 buf_len;
  228. buf_len = sizeof(*sw_buf);
  229. sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
  230. if (!sw_buf)
  231. return ICE_ERR_NO_MEMORY;
  232. sw_buf->num_elems = cpu_to_le16(1);
  233. if (lkup_type == ICE_SW_LKUP_MAC ||
  234. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  235. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  236. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  237. lkup_type == ICE_SW_LKUP_PROMISC ||
  238. lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
  239. sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
  240. } else if (lkup_type == ICE_SW_LKUP_VLAN) {
  241. sw_buf->res_type =
  242. cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
  243. } else {
  244. status = ICE_ERR_PARAM;
  245. goto ice_aq_alloc_free_vsi_list_exit;
  246. }
  247. if (opc == ice_aqc_opc_free_res)
  248. sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
  249. status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
  250. if (status)
  251. goto ice_aq_alloc_free_vsi_list_exit;
  252. if (opc == ice_aqc_opc_alloc_res) {
  253. vsi_ele = &sw_buf->elem[0];
  254. *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
  255. }
  256. ice_aq_alloc_free_vsi_list_exit:
  257. devm_kfree(ice_hw_to_dev(hw), sw_buf);
  258. return status;
  259. }
  260. /**
  261. * ice_aq_sw_rules - add/update/remove switch rules
  262. * @hw: pointer to the hw struct
  263. * @rule_list: pointer to switch rule population list
  264. * @rule_list_sz: total size of the rule list in bytes
  265. * @num_rules: number of switch rules in the rule_list
  266. * @opc: switch rules population command type - pass in the command opcode
  267. * @cd: pointer to command details structure or NULL
  268. *
  269. * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
  270. */
  271. static enum ice_status
  272. ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
  273. u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
  274. {
  275. struct ice_aq_desc desc;
  276. if (opc != ice_aqc_opc_add_sw_rules &&
  277. opc != ice_aqc_opc_update_sw_rules &&
  278. opc != ice_aqc_opc_remove_sw_rules)
  279. return ICE_ERR_PARAM;
  280. ice_fill_dflt_direct_cmd_desc(&desc, opc);
  281. desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
  282. desc.params.sw_rules.num_rules_fltr_entry_index =
  283. cpu_to_le16(num_rules);
  284. return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
  285. }
  286. /* ice_init_port_info - Initialize port_info with switch configuration data
  287. * @pi: pointer to port_info
  288. * @vsi_port_num: VSI number or port number
  289. * @type: Type of switch element (port or VSI)
  290. * @swid: switch ID of the switch the element is attached to
  291. * @pf_vf_num: PF or VF number
  292. * @is_vf: true if the element is a VF, false otherwise
  293. */
  294. static void
  295. ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
  296. u16 swid, u16 pf_vf_num, bool is_vf)
  297. {
  298. switch (type) {
  299. case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
  300. pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
  301. pi->sw_id = swid;
  302. pi->pf_vf_num = pf_vf_num;
  303. pi->is_vf = is_vf;
  304. pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  305. pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  306. break;
  307. default:
  308. ice_debug(pi->hw, ICE_DBG_SW,
  309. "incorrect VSI/port type received\n");
  310. break;
  311. }
  312. }
  313. /* ice_get_initial_sw_cfg - Get initial port and default VSI data
  314. * @hw: pointer to the hardware structure
  315. */
  316. enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
  317. {
  318. struct ice_aqc_get_sw_cfg_resp *rbuf;
  319. enum ice_status status;
  320. u16 req_desc = 0;
  321. u16 num_elems;
  322. u16 i;
  323. rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
  324. GFP_KERNEL);
  325. if (!rbuf)
  326. return ICE_ERR_NO_MEMORY;
  327. /* Multiple calls to ice_aq_get_sw_cfg may be required
  328. * to get all the switch configuration information. The need
  329. * for additional calls is indicated by ice_aq_get_sw_cfg
  330. * writing a non-zero value in req_desc
  331. */
  332. do {
  333. status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
  334. &req_desc, &num_elems, NULL);
  335. if (status)
  336. break;
  337. for (i = 0; i < num_elems; i++) {
  338. struct ice_aqc_get_sw_cfg_resp_elem *ele;
  339. u16 pf_vf_num, swid, vsi_port_num;
  340. bool is_vf = false;
  341. u8 type;
  342. ele = rbuf[i].elements;
  343. vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
  344. ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
  345. pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
  346. ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
  347. swid = le16_to_cpu(ele->swid);
  348. if (le16_to_cpu(ele->pf_vf_num) &
  349. ICE_AQC_GET_SW_CONF_RESP_IS_VF)
  350. is_vf = true;
  351. type = le16_to_cpu(ele->vsi_port_num) >>
  352. ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
  353. if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
  354. /* FW VSI is not needed. Just continue. */
  355. continue;
  356. }
  357. ice_init_port_info(hw->port_info, vsi_port_num,
  358. type, swid, pf_vf_num, is_vf);
  359. }
  360. } while (req_desc && !status);
  361. devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
  362. return status;
  363. }
  364. /**
  365. * ice_fill_sw_info - Helper function to populate lb_en and lan_en
  366. * @hw: pointer to the hardware structure
  367. * @f_info: filter info structure to fill/update
  368. *
  369. * This helper function populates the lb_en and lan_en elements of the provided
  370. * ice_fltr_info struct using the switch's type and characteristics of the
  371. * switch rule being configured.
  372. */
  373. static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
  374. {
  375. f_info->lb_en = false;
  376. f_info->lan_en = false;
  377. if ((f_info->flag & ICE_FLTR_TX) &&
  378. (f_info->fltr_act == ICE_FWD_TO_VSI ||
  379. f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
  380. f_info->fltr_act == ICE_FWD_TO_Q ||
  381. f_info->fltr_act == ICE_FWD_TO_QGRP)) {
  382. f_info->lb_en = true;
  383. if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
  384. is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
  385. f_info->lan_en = true;
  386. }
  387. }
  388. /**
  389. * ice_fill_sw_rule - Helper function to fill switch rule structure
  390. * @hw: pointer to the hardware structure
  391. * @f_info: entry containing packet forwarding information
  392. * @s_rule: switch rule structure to be filled in based on mac_entry
  393. * @opc: switch rules population command type - pass in the command opcode
  394. */
  395. static void
  396. ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
  397. struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
  398. {
  399. u16 vlan_id = ICE_MAX_VLAN_ID + 1;
  400. u8 eth_hdr[DUMMY_ETH_HDR_LEN];
  401. void *daddr = NULL;
  402. u32 act = 0;
  403. __be16 *off;
  404. u8 q_rgn;
  405. if (opc == ice_aqc_opc_remove_sw_rules) {
  406. s_rule->pdata.lkup_tx_rx.act = 0;
  407. s_rule->pdata.lkup_tx_rx.index =
  408. cpu_to_le16(f_info->fltr_rule_id);
  409. s_rule->pdata.lkup_tx_rx.hdr_len = 0;
  410. return;
  411. }
  412. /* initialize the ether header with a dummy header */
  413. memcpy(eth_hdr, dummy_eth_header, sizeof(dummy_eth_header));
  414. ice_fill_sw_info(hw, f_info);
  415. switch (f_info->fltr_act) {
  416. case ICE_FWD_TO_VSI:
  417. act |= (f_info->fwd_id.vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
  418. ICE_SINGLE_ACT_VSI_ID_M;
  419. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  420. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  421. ICE_SINGLE_ACT_VALID_BIT;
  422. break;
  423. case ICE_FWD_TO_VSI_LIST:
  424. act |= ICE_SINGLE_ACT_VSI_LIST;
  425. act |= (f_info->fwd_id.vsi_list_id <<
  426. ICE_SINGLE_ACT_VSI_LIST_ID_S) &
  427. ICE_SINGLE_ACT_VSI_LIST_ID_M;
  428. if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
  429. act |= ICE_SINGLE_ACT_VSI_FORWARDING |
  430. ICE_SINGLE_ACT_VALID_BIT;
  431. break;
  432. case ICE_FWD_TO_Q:
  433. act |= ICE_SINGLE_ACT_TO_Q;
  434. act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
  435. ICE_SINGLE_ACT_Q_INDEX_M;
  436. break;
  437. case ICE_DROP_PACKET:
  438. act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
  439. ICE_SINGLE_ACT_VALID_BIT;
  440. break;
  441. case ICE_FWD_TO_QGRP:
  442. q_rgn = f_info->qgrp_size > 0 ?
  443. (u8)ilog2(f_info->qgrp_size) : 0;
  444. act |= ICE_SINGLE_ACT_TO_Q;
  445. act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
  446. ICE_SINGLE_ACT_Q_INDEX_M;
  447. act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
  448. ICE_SINGLE_ACT_Q_REGION_M;
  449. break;
  450. default:
  451. return;
  452. }
  453. if (f_info->lb_en)
  454. act |= ICE_SINGLE_ACT_LB_ENABLE;
  455. if (f_info->lan_en)
  456. act |= ICE_SINGLE_ACT_LAN_ENABLE;
  457. switch (f_info->lkup_type) {
  458. case ICE_SW_LKUP_MAC:
  459. daddr = f_info->l_data.mac.mac_addr;
  460. break;
  461. case ICE_SW_LKUP_VLAN:
  462. vlan_id = f_info->l_data.vlan.vlan_id;
  463. if (f_info->fltr_act == ICE_FWD_TO_VSI ||
  464. f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
  465. act |= ICE_SINGLE_ACT_PRUNE;
  466. act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
  467. }
  468. break;
  469. case ICE_SW_LKUP_ETHERTYPE_MAC:
  470. daddr = f_info->l_data.ethertype_mac.mac_addr;
  471. /* fall-through */
  472. case ICE_SW_LKUP_ETHERTYPE:
  473. off = (__be16 *)&eth_hdr[ICE_ETH_ETHTYPE_OFFSET];
  474. *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
  475. break;
  476. case ICE_SW_LKUP_MAC_VLAN:
  477. daddr = f_info->l_data.mac_vlan.mac_addr;
  478. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  479. break;
  480. case ICE_SW_LKUP_PROMISC_VLAN:
  481. vlan_id = f_info->l_data.mac_vlan.vlan_id;
  482. /* fall-through */
  483. case ICE_SW_LKUP_PROMISC:
  484. daddr = f_info->l_data.mac_vlan.mac_addr;
  485. break;
  486. default:
  487. break;
  488. }
  489. s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
  490. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
  491. cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
  492. /* Recipe set depending on lookup type */
  493. s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
  494. s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
  495. s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
  496. if (daddr)
  497. ether_addr_copy(&eth_hdr[ICE_ETH_DA_OFFSET], daddr);
  498. if (!(vlan_id > ICE_MAX_VLAN_ID)) {
  499. off = (__be16 *)&eth_hdr[ICE_ETH_VLAN_TCI_OFFSET];
  500. *off = cpu_to_be16(vlan_id);
  501. }
  502. /* Create the switch rule with the final dummy Ethernet header */
  503. if (opc != ice_aqc_opc_update_sw_rules)
  504. s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(sizeof(eth_hdr));
  505. memcpy(s_rule->pdata.lkup_tx_rx.hdr, eth_hdr, sizeof(eth_hdr));
  506. }
  507. /**
  508. * ice_add_marker_act
  509. * @hw: pointer to the hardware structure
  510. * @m_ent: the management entry for which sw marker needs to be added
  511. * @sw_marker: sw marker to tag the Rx descriptor with
  512. * @l_id: large action resource id
  513. *
  514. * Create a large action to hold software marker and update the switch rule
  515. * entry pointed by m_ent with newly created large action
  516. */
  517. static enum ice_status
  518. ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
  519. u16 sw_marker, u16 l_id)
  520. {
  521. struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
  522. /* For software marker we need 3 large actions
  523. * 1. FWD action: FWD TO VSI or VSI LIST
  524. * 2. GENERIC VALUE action to hold the profile id
  525. * 3. GENERIC VALUE action to hold the software marker id
  526. */
  527. const u16 num_lg_acts = 3;
  528. enum ice_status status;
  529. u16 lg_act_size;
  530. u16 rules_size;
  531. u16 vsi_info;
  532. u32 act;
  533. if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
  534. return ICE_ERR_PARAM;
  535. /* Create two back-to-back switch rules and submit them to the HW using
  536. * one memory buffer:
  537. * 1. Large Action
  538. * 2. Look up tx rx
  539. */
  540. lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
  541. rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  542. lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
  543. if (!lg_act)
  544. return ICE_ERR_NO_MEMORY;
  545. rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
  546. /* Fill in the first switch rule i.e. large action */
  547. lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
  548. lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
  549. lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
  550. /* First action VSI forwarding or VSI list forwarding depending on how
  551. * many VSIs
  552. */
  553. vsi_info = (m_ent->vsi_count > 1) ?
  554. m_ent->fltr_info.fwd_id.vsi_list_id :
  555. m_ent->fltr_info.fwd_id.vsi_id;
  556. act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
  557. act |= (vsi_info << ICE_LG_ACT_VSI_LIST_ID_S) &
  558. ICE_LG_ACT_VSI_LIST_ID_M;
  559. if (m_ent->vsi_count > 1)
  560. act |= ICE_LG_ACT_VSI_LIST;
  561. lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
  562. /* Second action descriptor type */
  563. act = ICE_LG_ACT_GENERIC;
  564. act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
  565. lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
  566. act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
  567. ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
  568. /* Third action Marker value */
  569. act |= ICE_LG_ACT_GENERIC;
  570. act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
  571. ICE_LG_ACT_GENERIC_VALUE_M;
  572. lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
  573. /* call the fill switch rule to fill the lookup tx rx structure */
  574. ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
  575. ice_aqc_opc_update_sw_rules);
  576. /* Update the action to point to the large action id */
  577. rx_tx->pdata.lkup_tx_rx.act =
  578. cpu_to_le32(ICE_SINGLE_ACT_PTR |
  579. ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
  580. ICE_SINGLE_ACT_PTR_VAL_M));
  581. /* Use the filter rule id of the previously created rule with single
  582. * act. Once the update happens, hardware will treat this as large
  583. * action
  584. */
  585. rx_tx->pdata.lkup_tx_rx.index =
  586. cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
  587. status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
  588. ice_aqc_opc_update_sw_rules, NULL);
  589. if (!status) {
  590. m_ent->lg_act_idx = l_id;
  591. m_ent->sw_marker_id = sw_marker;
  592. }
  593. devm_kfree(ice_hw_to_dev(hw), lg_act);
  594. return status;
  595. }
  596. /**
  597. * ice_create_vsi_list_map
  598. * @hw: pointer to the hardware structure
  599. * @vsi_array: array of VSIs to form a VSI list
  600. * @num_vsi: num VSI in the array
  601. * @vsi_list_id: VSI list id generated as part of allocate resource
  602. *
  603. * Helper function to create a new entry of VSI list id to VSI mapping
  604. * using the given VSI list id
  605. */
  606. static struct ice_vsi_list_map_info *
  607. ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
  608. u16 vsi_list_id)
  609. {
  610. struct ice_switch_info *sw = hw->switch_info;
  611. struct ice_vsi_list_map_info *v_map;
  612. int i;
  613. v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL);
  614. if (!v_map)
  615. return NULL;
  616. v_map->vsi_list_id = vsi_list_id;
  617. for (i = 0; i < num_vsi; i++)
  618. set_bit(vsi_array[i], v_map->vsi_map);
  619. list_add(&v_map->list_entry, &sw->vsi_list_map_head);
  620. return v_map;
  621. }
  622. /**
  623. * ice_update_vsi_list_rule
  624. * @hw: pointer to the hardware structure
  625. * @vsi_array: array of VSIs to form a VSI list
  626. * @num_vsi: num VSI in the array
  627. * @vsi_list_id: VSI list id generated as part of allocate resource
  628. * @remove: Boolean value to indicate if this is a remove action
  629. * @opc: switch rules population command type - pass in the command opcode
  630. * @lkup_type: lookup type of the filter
  631. *
  632. * Call AQ command to add a new switch rule or update existing switch rule
  633. * using the given VSI list id
  634. */
  635. static enum ice_status
  636. ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
  637. u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
  638. enum ice_sw_lkup_type lkup_type)
  639. {
  640. struct ice_aqc_sw_rules_elem *s_rule;
  641. enum ice_status status;
  642. u16 s_rule_size;
  643. u16 type;
  644. int i;
  645. if (!num_vsi)
  646. return ICE_ERR_PARAM;
  647. if (lkup_type == ICE_SW_LKUP_MAC ||
  648. lkup_type == ICE_SW_LKUP_MAC_VLAN ||
  649. lkup_type == ICE_SW_LKUP_ETHERTYPE ||
  650. lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
  651. lkup_type == ICE_SW_LKUP_PROMISC ||
  652. lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
  653. type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
  654. ICE_AQC_SW_RULES_T_VSI_LIST_SET;
  655. else if (lkup_type == ICE_SW_LKUP_VLAN)
  656. type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
  657. ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
  658. else
  659. return ICE_ERR_PARAM;
  660. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
  661. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  662. if (!s_rule)
  663. return ICE_ERR_NO_MEMORY;
  664. for (i = 0; i < num_vsi; i++)
  665. s_rule->pdata.vsi_list.vsi[i] = cpu_to_le16(vsi_array[i]);
  666. s_rule->type = cpu_to_le16(type);
  667. s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
  668. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  669. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
  670. devm_kfree(ice_hw_to_dev(hw), s_rule);
  671. return status;
  672. }
  673. /**
  674. * ice_create_vsi_list_rule - Creates and populates a VSI list rule
  675. * @hw: pointer to the hw struct
  676. * @vsi_array: array of VSIs to form a VSI list
  677. * @num_vsi: number of VSIs in the array
  678. * @vsi_list_id: stores the ID of the VSI list to be created
  679. * @lkup_type: switch rule filter's lookup type
  680. */
  681. static enum ice_status
  682. ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
  683. u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
  684. {
  685. enum ice_status status;
  686. int i;
  687. for (i = 0; i < num_vsi; i++)
  688. if (vsi_array[i] >= ICE_MAX_VSI)
  689. return ICE_ERR_OUT_OF_RANGE;
  690. status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
  691. ice_aqc_opc_alloc_res);
  692. if (status)
  693. return status;
  694. /* Update the newly created VSI list to include the specified VSIs */
  695. return ice_update_vsi_list_rule(hw, vsi_array, num_vsi, *vsi_list_id,
  696. false, ice_aqc_opc_add_sw_rules,
  697. lkup_type);
  698. }
  699. /**
  700. * ice_create_pkt_fwd_rule
  701. * @hw: pointer to the hardware structure
  702. * @f_entry: entry containing packet forwarding information
  703. *
  704. * Create switch rule with given filter information and add an entry
  705. * to the corresponding filter management list to track this switch rule
  706. * and VSI mapping
  707. */
  708. static enum ice_status
  709. ice_create_pkt_fwd_rule(struct ice_hw *hw,
  710. struct ice_fltr_list_entry *f_entry)
  711. {
  712. struct ice_switch_info *sw = hw->switch_info;
  713. struct ice_fltr_mgmt_list_entry *fm_entry;
  714. struct ice_aqc_sw_rules_elem *s_rule;
  715. enum ice_sw_lkup_type l_type;
  716. enum ice_status status;
  717. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  718. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  719. if (!s_rule)
  720. return ICE_ERR_NO_MEMORY;
  721. fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
  722. GFP_KERNEL);
  723. if (!fm_entry) {
  724. status = ICE_ERR_NO_MEMORY;
  725. goto ice_create_pkt_fwd_rule_exit;
  726. }
  727. fm_entry->fltr_info = f_entry->fltr_info;
  728. /* Initialize all the fields for the management entry */
  729. fm_entry->vsi_count = 1;
  730. fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
  731. fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
  732. fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
  733. ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
  734. ice_aqc_opc_add_sw_rules);
  735. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  736. ice_aqc_opc_add_sw_rules, NULL);
  737. if (status) {
  738. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  739. goto ice_create_pkt_fwd_rule_exit;
  740. }
  741. f_entry->fltr_info.fltr_rule_id =
  742. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  743. fm_entry->fltr_info.fltr_rule_id =
  744. le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  745. /* The book keeping entries will get removed when base driver
  746. * calls remove filter AQ command
  747. */
  748. l_type = fm_entry->fltr_info.lkup_type;
  749. if (l_type == ICE_SW_LKUP_MAC) {
  750. mutex_lock(&sw->mac_list_lock);
  751. list_add(&fm_entry->list_entry, &sw->mac_list_head);
  752. mutex_unlock(&sw->mac_list_lock);
  753. } else if (l_type == ICE_SW_LKUP_VLAN) {
  754. mutex_lock(&sw->vlan_list_lock);
  755. list_add(&fm_entry->list_entry, &sw->vlan_list_head);
  756. mutex_unlock(&sw->vlan_list_lock);
  757. } else if (l_type == ICE_SW_LKUP_ETHERTYPE ||
  758. l_type == ICE_SW_LKUP_ETHERTYPE_MAC) {
  759. mutex_lock(&sw->eth_m_list_lock);
  760. list_add(&fm_entry->list_entry, &sw->eth_m_list_head);
  761. mutex_unlock(&sw->eth_m_list_lock);
  762. } else if (l_type == ICE_SW_LKUP_PROMISC ||
  763. l_type == ICE_SW_LKUP_PROMISC_VLAN) {
  764. mutex_lock(&sw->promisc_list_lock);
  765. list_add(&fm_entry->list_entry, &sw->promisc_list_head);
  766. mutex_unlock(&sw->promisc_list_lock);
  767. } else if (fm_entry->fltr_info.lkup_type == ICE_SW_LKUP_MAC_VLAN) {
  768. mutex_lock(&sw->mac_vlan_list_lock);
  769. list_add(&fm_entry->list_entry, &sw->mac_vlan_list_head);
  770. mutex_unlock(&sw->mac_vlan_list_lock);
  771. } else {
  772. status = ICE_ERR_NOT_IMPL;
  773. }
  774. ice_create_pkt_fwd_rule_exit:
  775. devm_kfree(ice_hw_to_dev(hw), s_rule);
  776. return status;
  777. }
  778. /**
  779. * ice_update_pkt_fwd_rule
  780. * @hw: pointer to the hardware structure
  781. * @rule_id: rule of previously created switch rule to update
  782. * @vsi_list_id: VSI list id to be updated with
  783. * @f_info: ice_fltr_info to pull other information for switch rule
  784. *
  785. * Call AQ command to update a previously created switch rule with a
  786. * VSI list id
  787. */
  788. static enum ice_status
  789. ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
  790. struct ice_fltr_info f_info)
  791. {
  792. struct ice_aqc_sw_rules_elem *s_rule;
  793. struct ice_fltr_info tmp_fltr;
  794. enum ice_status status;
  795. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  796. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
  797. if (!s_rule)
  798. return ICE_ERR_NO_MEMORY;
  799. tmp_fltr = f_info;
  800. tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
  801. tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
  802. ice_fill_sw_rule(hw, &tmp_fltr, s_rule,
  803. ice_aqc_opc_update_sw_rules);
  804. s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
  805. /* Update switch rule with new rule set to forward VSI list */
  806. status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  807. ice_aqc_opc_update_sw_rules, NULL);
  808. devm_kfree(ice_hw_to_dev(hw), s_rule);
  809. return status;
  810. }
  811. /**
  812. * ice_handle_vsi_list_mgmt
  813. * @hw: pointer to the hardware structure
  814. * @m_entry: pointer to current filter management list entry
  815. * @cur_fltr: filter information from the book keeping entry
  816. * @new_fltr: filter information with the new VSI to be added
  817. *
  818. * Call AQ command to add or update previously created VSI list with new VSI.
  819. *
  820. * Helper function to do book keeping associated with adding filter information
  821. * The algorithm to do the booking keeping is described below :
  822. * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
  823. * if only one VSI has been added till now
  824. * Allocate a new VSI list and add two VSIs
  825. * to this list using switch rule command
  826. * Update the previously created switch rule with the
  827. * newly created VSI list id
  828. * if a VSI list was previously created
  829. * Add the new VSI to the previously created VSI list set
  830. * using the update switch rule command
  831. */
  832. static enum ice_status
  833. ice_handle_vsi_list_mgmt(struct ice_hw *hw,
  834. struct ice_fltr_mgmt_list_entry *m_entry,
  835. struct ice_fltr_info *cur_fltr,
  836. struct ice_fltr_info *new_fltr)
  837. {
  838. enum ice_status status = 0;
  839. u16 vsi_list_id = 0;
  840. if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
  841. cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
  842. return ICE_ERR_NOT_IMPL;
  843. if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
  844. new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
  845. (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
  846. cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
  847. return ICE_ERR_NOT_IMPL;
  848. if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
  849. /* Only one entry existed in the mapping and it was not already
  850. * a part of a VSI list. So, create a VSI list with the old and
  851. * new VSIs.
  852. */
  853. u16 vsi_id_arr[2];
  854. u16 fltr_rule;
  855. /* A rule already exists with the new VSI being added */
  856. if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id)
  857. return ICE_ERR_ALREADY_EXISTS;
  858. vsi_id_arr[0] = cur_fltr->fwd_id.vsi_id;
  859. vsi_id_arr[1] = new_fltr->fwd_id.vsi_id;
  860. status = ice_create_vsi_list_rule(hw, &vsi_id_arr[0], 2,
  861. &vsi_list_id,
  862. new_fltr->lkup_type);
  863. if (status)
  864. return status;
  865. fltr_rule = cur_fltr->fltr_rule_id;
  866. /* Update the previous switch rule of "MAC forward to VSI" to
  867. * "MAC fwd to VSI list"
  868. */
  869. status = ice_update_pkt_fwd_rule(hw, fltr_rule, vsi_list_id,
  870. *new_fltr);
  871. if (status)
  872. return status;
  873. cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
  874. cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  875. m_entry->vsi_list_info =
  876. ice_create_vsi_list_map(hw, &vsi_id_arr[0], 2,
  877. vsi_list_id);
  878. /* If this entry was large action then the large action needs
  879. * to be updated to point to FWD to VSI list
  880. */
  881. if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
  882. status =
  883. ice_add_marker_act(hw, m_entry,
  884. m_entry->sw_marker_id,
  885. m_entry->lg_act_idx);
  886. } else {
  887. u16 vsi_id = new_fltr->fwd_id.vsi_id;
  888. enum ice_adminq_opc opcode;
  889. if (!m_entry->vsi_list_info)
  890. return ICE_ERR_CFG;
  891. /* A rule already exists with the new VSI being added */
  892. if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map))
  893. return 0;
  894. /* Update the previously created VSI list set with
  895. * the new VSI id passed in
  896. */
  897. vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
  898. opcode = ice_aqc_opc_update_sw_rules;
  899. status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
  900. false, opcode,
  901. new_fltr->lkup_type);
  902. /* update VSI list mapping info with new VSI id */
  903. if (!status)
  904. set_bit(vsi_id, m_entry->vsi_list_info->vsi_map);
  905. }
  906. if (!status)
  907. m_entry->vsi_count++;
  908. return status;
  909. }
  910. /**
  911. * ice_find_mac_entry
  912. * @hw: pointer to the hardware structure
  913. * @mac_addr: MAC address to search for
  914. *
  915. * Helper function to search for a MAC entry using a given MAC address
  916. * Returns pointer to the entry if found.
  917. */
  918. static struct ice_fltr_mgmt_list_entry *
  919. ice_find_mac_entry(struct ice_hw *hw, u8 *mac_addr)
  920. {
  921. struct ice_fltr_mgmt_list_entry *m_list_itr, *mac_ret = NULL;
  922. struct ice_switch_info *sw = hw->switch_info;
  923. mutex_lock(&sw->mac_list_lock);
  924. list_for_each_entry(m_list_itr, &sw->mac_list_head, list_entry) {
  925. u8 *buf = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
  926. if (ether_addr_equal(buf, mac_addr)) {
  927. mac_ret = m_list_itr;
  928. break;
  929. }
  930. }
  931. mutex_unlock(&sw->mac_list_lock);
  932. return mac_ret;
  933. }
  934. /**
  935. * ice_add_shared_mac - Add one MAC shared filter rule
  936. * @hw: pointer to the hardware structure
  937. * @f_entry: structure containing MAC forwarding information
  938. *
  939. * Adds or updates the book keeping list for the MAC addresses
  940. */
  941. static enum ice_status
  942. ice_add_shared_mac(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
  943. {
  944. struct ice_fltr_info *new_fltr, *cur_fltr;
  945. struct ice_fltr_mgmt_list_entry *m_entry;
  946. new_fltr = &f_entry->fltr_info;
  947. m_entry = ice_find_mac_entry(hw, &new_fltr->l_data.mac.mac_addr[0]);
  948. if (!m_entry)
  949. return ice_create_pkt_fwd_rule(hw, f_entry);
  950. cur_fltr = &m_entry->fltr_info;
  951. return ice_handle_vsi_list_mgmt(hw, m_entry, cur_fltr, new_fltr);
  952. }
  953. /**
  954. * ice_add_mac - Add a MAC address based filter rule
  955. * @hw: pointer to the hardware structure
  956. * @m_list: list of MAC addresses and forwarding information
  957. *
  958. * IMPORTANT: When the ucast_shared flag is set to false and m_list has
  959. * multiple unicast addresses, the function assumes that all the
  960. * addresses are unique in a given add_mac call. It doesn't
  961. * check for duplicates in this case, removing duplicates from a given
  962. * list should be taken care of in the caller of this function.
  963. */
  964. enum ice_status
  965. ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
  966. {
  967. struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
  968. struct ice_fltr_list_entry *m_list_itr;
  969. u16 elem_sent, total_elem_left;
  970. enum ice_status status = 0;
  971. u16 num_unicast = 0;
  972. u16 s_rule_size;
  973. if (!m_list || !hw)
  974. return ICE_ERR_PARAM;
  975. list_for_each_entry(m_list_itr, m_list, list_entry) {
  976. u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
  977. if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
  978. return ICE_ERR_PARAM;
  979. if (is_zero_ether_addr(add))
  980. return ICE_ERR_PARAM;
  981. if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
  982. /* Don't overwrite the unicast address */
  983. if (ice_find_mac_entry(hw, add))
  984. return ICE_ERR_ALREADY_EXISTS;
  985. num_unicast++;
  986. } else if (is_multicast_ether_addr(add) ||
  987. (is_unicast_ether_addr(add) && hw->ucast_shared)) {
  988. status = ice_add_shared_mac(hw, m_list_itr);
  989. if (status) {
  990. m_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
  991. return status;
  992. }
  993. m_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
  994. }
  995. }
  996. /* Exit if no suitable entries were found for adding bulk switch rule */
  997. if (!num_unicast)
  998. return 0;
  999. /* Allocate switch rule buffer for the bulk update for unicast */
  1000. s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
  1001. s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
  1002. GFP_KERNEL);
  1003. if (!s_rule)
  1004. return ICE_ERR_NO_MEMORY;
  1005. r_iter = s_rule;
  1006. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1007. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1008. u8 *addr = &f_info->l_data.mac.mac_addr[0];
  1009. if (is_unicast_ether_addr(addr)) {
  1010. ice_fill_sw_rule(hw, &m_list_itr->fltr_info,
  1011. r_iter, ice_aqc_opc_add_sw_rules);
  1012. r_iter = (struct ice_aqc_sw_rules_elem *)
  1013. ((u8 *)r_iter + s_rule_size);
  1014. }
  1015. }
  1016. /* Call AQ bulk switch rule update for all unicast addresses */
  1017. r_iter = s_rule;
  1018. /* Call AQ switch rule in AQ_MAX chunk */
  1019. for (total_elem_left = num_unicast; total_elem_left > 0;
  1020. total_elem_left -= elem_sent) {
  1021. struct ice_aqc_sw_rules_elem *entry = r_iter;
  1022. elem_sent = min(total_elem_left,
  1023. (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
  1024. status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
  1025. elem_sent, ice_aqc_opc_add_sw_rules,
  1026. NULL);
  1027. if (status)
  1028. goto ice_add_mac_exit;
  1029. r_iter = (struct ice_aqc_sw_rules_elem *)
  1030. ((u8 *)r_iter + (elem_sent * s_rule_size));
  1031. }
  1032. /* Fill up rule id based on the value returned from FW */
  1033. r_iter = s_rule;
  1034. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1035. struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
  1036. u8 *addr = &f_info->l_data.mac.mac_addr[0];
  1037. struct ice_switch_info *sw = hw->switch_info;
  1038. struct ice_fltr_mgmt_list_entry *fm_entry;
  1039. if (is_unicast_ether_addr(addr)) {
  1040. f_info->fltr_rule_id =
  1041. le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
  1042. f_info->fltr_act = ICE_FWD_TO_VSI;
  1043. /* Create an entry to track this MAC address */
  1044. fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
  1045. sizeof(*fm_entry), GFP_KERNEL);
  1046. if (!fm_entry) {
  1047. status = ICE_ERR_NO_MEMORY;
  1048. goto ice_add_mac_exit;
  1049. }
  1050. fm_entry->fltr_info = *f_info;
  1051. fm_entry->vsi_count = 1;
  1052. /* The book keeping entries will get removed when
  1053. * base driver calls remove filter AQ command
  1054. */
  1055. mutex_lock(&sw->mac_list_lock);
  1056. list_add(&fm_entry->list_entry, &sw->mac_list_head);
  1057. mutex_unlock(&sw->mac_list_lock);
  1058. r_iter = (struct ice_aqc_sw_rules_elem *)
  1059. ((u8 *)r_iter + s_rule_size);
  1060. }
  1061. }
  1062. ice_add_mac_exit:
  1063. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1064. return status;
  1065. }
  1066. /**
  1067. * ice_find_vlan_entry
  1068. * @hw: pointer to the hardware structure
  1069. * @vlan_id: VLAN id to search for
  1070. *
  1071. * Helper function to search for a VLAN entry using a given VLAN id
  1072. * Returns pointer to the entry if found.
  1073. */
  1074. static struct ice_fltr_mgmt_list_entry *
  1075. ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id)
  1076. {
  1077. struct ice_fltr_mgmt_list_entry *vlan_list_itr, *vlan_ret = NULL;
  1078. struct ice_switch_info *sw = hw->switch_info;
  1079. mutex_lock(&sw->vlan_list_lock);
  1080. list_for_each_entry(vlan_list_itr, &sw->vlan_list_head, list_entry)
  1081. if (vlan_list_itr->fltr_info.l_data.vlan.vlan_id == vlan_id) {
  1082. vlan_ret = vlan_list_itr;
  1083. break;
  1084. }
  1085. mutex_unlock(&sw->vlan_list_lock);
  1086. return vlan_ret;
  1087. }
  1088. /**
  1089. * ice_add_vlan_internal - Add one VLAN based filter rule
  1090. * @hw: pointer to the hardware structure
  1091. * @f_entry: filter entry containing one VLAN information
  1092. */
  1093. static enum ice_status
  1094. ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
  1095. {
  1096. struct ice_fltr_info *new_fltr, *cur_fltr;
  1097. struct ice_fltr_mgmt_list_entry *v_list_itr;
  1098. u16 vlan_id;
  1099. new_fltr = &f_entry->fltr_info;
  1100. /* VLAN id should only be 12 bits */
  1101. if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
  1102. return ICE_ERR_PARAM;
  1103. vlan_id = new_fltr->l_data.vlan.vlan_id;
  1104. v_list_itr = ice_find_vlan_entry(hw, vlan_id);
  1105. if (!v_list_itr) {
  1106. u16 vsi_id = ICE_VSI_INVAL_ID;
  1107. enum ice_status status;
  1108. u16 vsi_list_id = 0;
  1109. if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
  1110. enum ice_sw_lkup_type lkup_type = new_fltr->lkup_type;
  1111. /* All VLAN pruning rules use a VSI list.
  1112. * Convert the action to forwarding to a VSI list.
  1113. */
  1114. vsi_id = new_fltr->fwd_id.vsi_id;
  1115. status = ice_create_vsi_list_rule(hw, &vsi_id, 1,
  1116. &vsi_list_id,
  1117. lkup_type);
  1118. if (status)
  1119. return status;
  1120. new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
  1121. new_fltr->fwd_id.vsi_list_id = vsi_list_id;
  1122. }
  1123. status = ice_create_pkt_fwd_rule(hw, f_entry);
  1124. if (!status && vsi_id != ICE_VSI_INVAL_ID) {
  1125. v_list_itr = ice_find_vlan_entry(hw, vlan_id);
  1126. if (!v_list_itr)
  1127. return ICE_ERR_DOES_NOT_EXIST;
  1128. v_list_itr->vsi_list_info =
  1129. ice_create_vsi_list_map(hw, &vsi_id, 1,
  1130. vsi_list_id);
  1131. }
  1132. return status;
  1133. }
  1134. cur_fltr = &v_list_itr->fltr_info;
  1135. return ice_handle_vsi_list_mgmt(hw, v_list_itr, cur_fltr, new_fltr);
  1136. }
  1137. /**
  1138. * ice_add_vlan - Add VLAN based filter rule
  1139. * @hw: pointer to the hardware structure
  1140. * @v_list: list of VLAN entries and forwarding information
  1141. */
  1142. enum ice_status
  1143. ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
  1144. {
  1145. struct ice_fltr_list_entry *v_list_itr;
  1146. if (!v_list || !hw)
  1147. return ICE_ERR_PARAM;
  1148. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1149. enum ice_status status;
  1150. if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
  1151. return ICE_ERR_PARAM;
  1152. status = ice_add_vlan_internal(hw, v_list_itr);
  1153. if (status) {
  1154. v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
  1155. return status;
  1156. }
  1157. v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
  1158. }
  1159. return 0;
  1160. }
  1161. /**
  1162. * ice_remove_vsi_list_rule
  1163. * @hw: pointer to the hardware structure
  1164. * @vsi_list_id: VSI list id generated as part of allocate resource
  1165. * @lkup_type: switch rule filter lookup type
  1166. */
  1167. static enum ice_status
  1168. ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
  1169. enum ice_sw_lkup_type lkup_type)
  1170. {
  1171. struct ice_aqc_sw_rules_elem *s_rule;
  1172. enum ice_status status;
  1173. u16 s_rule_size;
  1174. s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
  1175. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1176. if (!s_rule)
  1177. return ICE_ERR_NO_MEMORY;
  1178. s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
  1179. s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
  1180. /* FW expects number of VSIs in vsi_list resource to be 0 for clear
  1181. * command. Since memory is zero'ed out during initialization, it's not
  1182. * necessary to explicitly initialize the variable to 0.
  1183. */
  1184. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1,
  1185. ice_aqc_opc_remove_sw_rules, NULL);
  1186. if (!status)
  1187. /* Free the vsi_list resource that we allocated */
  1188. status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
  1189. ice_aqc_opc_free_res);
  1190. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1191. return status;
  1192. }
  1193. /**
  1194. * ice_handle_rem_vsi_list_mgmt
  1195. * @hw: pointer to the hardware structure
  1196. * @vsi_id: ID of the VSI to remove
  1197. * @fm_list_itr: filter management entry for which the VSI list management
  1198. * needs to be done
  1199. */
  1200. static enum ice_status
  1201. ice_handle_rem_vsi_list_mgmt(struct ice_hw *hw, u16 vsi_id,
  1202. struct ice_fltr_mgmt_list_entry *fm_list_itr)
  1203. {
  1204. struct ice_switch_info *sw = hw->switch_info;
  1205. enum ice_status status = 0;
  1206. enum ice_sw_lkup_type lkup_type;
  1207. bool is_last_elem = true;
  1208. bool conv_list = false;
  1209. bool del_list = false;
  1210. u16 vsi_list_id;
  1211. lkup_type = fm_list_itr->fltr_info.lkup_type;
  1212. vsi_list_id = fm_list_itr->fltr_info.fwd_id.vsi_list_id;
  1213. if (fm_list_itr->vsi_count > 1) {
  1214. status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
  1215. true,
  1216. ice_aqc_opc_update_sw_rules,
  1217. lkup_type);
  1218. if (status)
  1219. return status;
  1220. fm_list_itr->vsi_count--;
  1221. is_last_elem = false;
  1222. clear_bit(vsi_id, fm_list_itr->vsi_list_info->vsi_map);
  1223. }
  1224. /* For non-VLAN rules that forward packets to a VSI list, convert them
  1225. * to forwarding packets to a VSI if there is only one VSI left in the
  1226. * list. Unused lists are then removed.
  1227. * VLAN rules need to use VSI lists even with only one VSI.
  1228. */
  1229. if (fm_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST) {
  1230. if (lkup_type == ICE_SW_LKUP_VLAN) {
  1231. del_list = is_last_elem;
  1232. } else if (fm_list_itr->vsi_count == 1) {
  1233. conv_list = true;
  1234. del_list = true;
  1235. }
  1236. }
  1237. if (del_list) {
  1238. /* Remove the VSI list since it is no longer used */
  1239. struct ice_vsi_list_map_info *vsi_list_info =
  1240. fm_list_itr->vsi_list_info;
  1241. status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
  1242. if (status)
  1243. return status;
  1244. if (conv_list) {
  1245. u16 rem_vsi_id;
  1246. rem_vsi_id = find_first_bit(vsi_list_info->vsi_map,
  1247. ICE_MAX_VSI);
  1248. /* Error out when the expected last element is not in
  1249. * the VSI list map
  1250. */
  1251. if (rem_vsi_id == ICE_MAX_VSI)
  1252. return ICE_ERR_OUT_OF_RANGE;
  1253. /* Change the list entry action from VSI_LIST to VSI */
  1254. fm_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1255. fm_list_itr->fltr_info.fwd_id.vsi_id = rem_vsi_id;
  1256. }
  1257. list_del(&vsi_list_info->list_entry);
  1258. devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
  1259. fm_list_itr->vsi_list_info = NULL;
  1260. }
  1261. if (conv_list) {
  1262. /* Convert the rule's forward action to forwarding packets to
  1263. * a VSI
  1264. */
  1265. struct ice_aqc_sw_rules_elem *s_rule;
  1266. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  1267. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE,
  1268. GFP_KERNEL);
  1269. if (!s_rule)
  1270. return ICE_ERR_NO_MEMORY;
  1271. ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
  1272. ice_aqc_opc_update_sw_rules);
  1273. s_rule->pdata.lkup_tx_rx.index =
  1274. cpu_to_le16(fm_list_itr->fltr_info.fltr_rule_id);
  1275. status = ice_aq_sw_rules(hw, s_rule,
  1276. ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
  1277. ice_aqc_opc_update_sw_rules, NULL);
  1278. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1279. if (status)
  1280. return status;
  1281. }
  1282. if (is_last_elem) {
  1283. /* Remove the lookup rule */
  1284. struct ice_aqc_sw_rules_elem *s_rule;
  1285. s_rule = devm_kzalloc(ice_hw_to_dev(hw),
  1286. ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
  1287. GFP_KERNEL);
  1288. if (!s_rule)
  1289. return ICE_ERR_NO_MEMORY;
  1290. ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
  1291. ice_aqc_opc_remove_sw_rules);
  1292. status = ice_aq_sw_rules(hw, s_rule,
  1293. ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
  1294. ice_aqc_opc_remove_sw_rules, NULL);
  1295. if (status)
  1296. return status;
  1297. /* Remove a book keeping entry from the MAC address list */
  1298. mutex_lock(&sw->mac_list_lock);
  1299. list_del(&fm_list_itr->list_entry);
  1300. mutex_unlock(&sw->mac_list_lock);
  1301. devm_kfree(ice_hw_to_dev(hw), fm_list_itr);
  1302. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1303. }
  1304. return status;
  1305. }
  1306. /**
  1307. * ice_remove_mac_entry
  1308. * @hw: pointer to the hardware structure
  1309. * @f_entry: structure containing MAC forwarding information
  1310. */
  1311. static enum ice_status
  1312. ice_remove_mac_entry(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
  1313. {
  1314. struct ice_fltr_mgmt_list_entry *m_entry;
  1315. u16 vsi_id;
  1316. u8 *add;
  1317. add = &f_entry->fltr_info.l_data.mac.mac_addr[0];
  1318. m_entry = ice_find_mac_entry(hw, add);
  1319. if (!m_entry)
  1320. return ICE_ERR_PARAM;
  1321. vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
  1322. return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, m_entry);
  1323. }
  1324. /**
  1325. * ice_remove_mac - remove a MAC address based filter rule
  1326. * @hw: pointer to the hardware structure
  1327. * @m_list: list of MAC addresses and forwarding information
  1328. *
  1329. * This function removes either a MAC filter rule or a specific VSI from a
  1330. * VSI list for a multicast MAC address.
  1331. *
  1332. * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
  1333. * ice_add_mac. Caller should be aware that this call will only work if all
  1334. * the entries passed into m_list were added previously. It will not attempt to
  1335. * do a partial remove of entries that were found.
  1336. */
  1337. enum ice_status
  1338. ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
  1339. {
  1340. struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
  1341. u8 s_rule_size = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
  1342. struct ice_switch_info *sw = hw->switch_info;
  1343. struct ice_fltr_mgmt_list_entry *m_entry;
  1344. struct ice_fltr_list_entry *m_list_itr;
  1345. u16 elem_sent, total_elem_left;
  1346. enum ice_status status = 0;
  1347. u16 num_unicast = 0;
  1348. if (!m_list)
  1349. return ICE_ERR_PARAM;
  1350. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1351. u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
  1352. if (is_unicast_ether_addr(addr) && !hw->ucast_shared)
  1353. num_unicast++;
  1354. else if (is_multicast_ether_addr(addr) ||
  1355. (is_unicast_ether_addr(addr) && hw->ucast_shared))
  1356. ice_remove_mac_entry(hw, m_list_itr);
  1357. }
  1358. /* Exit if no unicast addresses found. Multicast switch rules
  1359. * were added individually
  1360. */
  1361. if (!num_unicast)
  1362. return 0;
  1363. /* Allocate switch rule buffer for the bulk update for unicast */
  1364. s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
  1365. GFP_KERNEL);
  1366. if (!s_rule)
  1367. return ICE_ERR_NO_MEMORY;
  1368. r_iter = s_rule;
  1369. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1370. u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
  1371. if (is_unicast_ether_addr(addr)) {
  1372. m_entry = ice_find_mac_entry(hw, addr);
  1373. if (!m_entry) {
  1374. status = ICE_ERR_DOES_NOT_EXIST;
  1375. goto ice_remove_mac_exit;
  1376. }
  1377. ice_fill_sw_rule(hw, &m_entry->fltr_info,
  1378. r_iter, ice_aqc_opc_remove_sw_rules);
  1379. r_iter = (struct ice_aqc_sw_rules_elem *)
  1380. ((u8 *)r_iter + s_rule_size);
  1381. }
  1382. }
  1383. /* Call AQ bulk switch rule update for all unicast addresses */
  1384. r_iter = s_rule;
  1385. /* Call AQ switch rule in AQ_MAX chunk */
  1386. for (total_elem_left = num_unicast; total_elem_left > 0;
  1387. total_elem_left -= elem_sent) {
  1388. struct ice_aqc_sw_rules_elem *entry = r_iter;
  1389. elem_sent = min(total_elem_left,
  1390. (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
  1391. status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
  1392. elem_sent, ice_aqc_opc_remove_sw_rules,
  1393. NULL);
  1394. if (status)
  1395. break;
  1396. r_iter = (struct ice_aqc_sw_rules_elem *)
  1397. ((u8 *)r_iter + s_rule_size);
  1398. }
  1399. list_for_each_entry(m_list_itr, m_list, list_entry) {
  1400. u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
  1401. if (is_unicast_ether_addr(addr)) {
  1402. m_entry = ice_find_mac_entry(hw, addr);
  1403. if (!m_entry)
  1404. return ICE_ERR_OUT_OF_RANGE;
  1405. mutex_lock(&sw->mac_list_lock);
  1406. list_del(&m_entry->list_entry);
  1407. mutex_unlock(&sw->mac_list_lock);
  1408. devm_kfree(ice_hw_to_dev(hw), m_entry);
  1409. }
  1410. }
  1411. ice_remove_mac_exit:
  1412. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1413. return status;
  1414. }
  1415. /**
  1416. * ice_cfg_dflt_vsi - add filter rule to set/unset given VSI as default
  1417. * VSI for the switch (represented by swid)
  1418. * @hw: pointer to the hardware structure
  1419. * @vsi_id: number of VSI to set as default
  1420. * @set: true to add the above mentioned switch rule, false to remove it
  1421. * @direction: ICE_FLTR_RX or ICE_FLTR_TX
  1422. */
  1423. enum ice_status
  1424. ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
  1425. {
  1426. struct ice_aqc_sw_rules_elem *s_rule;
  1427. struct ice_fltr_info f_info;
  1428. enum ice_adminq_opc opcode;
  1429. enum ice_status status;
  1430. u16 s_rule_size;
  1431. s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
  1432. ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
  1433. s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
  1434. if (!s_rule)
  1435. return ICE_ERR_NO_MEMORY;
  1436. memset(&f_info, 0, sizeof(f_info));
  1437. f_info.lkup_type = ICE_SW_LKUP_DFLT;
  1438. f_info.flag = direction;
  1439. f_info.fltr_act = ICE_FWD_TO_VSI;
  1440. f_info.fwd_id.vsi_id = vsi_id;
  1441. if (f_info.flag & ICE_FLTR_RX) {
  1442. f_info.src = hw->port_info->lport;
  1443. if (!set)
  1444. f_info.fltr_rule_id =
  1445. hw->port_info->dflt_rx_vsi_rule_id;
  1446. } else if (f_info.flag & ICE_FLTR_TX) {
  1447. f_info.src = vsi_id;
  1448. if (!set)
  1449. f_info.fltr_rule_id =
  1450. hw->port_info->dflt_tx_vsi_rule_id;
  1451. }
  1452. if (set)
  1453. opcode = ice_aqc_opc_add_sw_rules;
  1454. else
  1455. opcode = ice_aqc_opc_remove_sw_rules;
  1456. ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
  1457. status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
  1458. if (status || !(f_info.flag & ICE_FLTR_TX_RX))
  1459. goto out;
  1460. if (set) {
  1461. u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
  1462. if (f_info.flag & ICE_FLTR_TX) {
  1463. hw->port_info->dflt_tx_vsi_num = vsi_id;
  1464. hw->port_info->dflt_tx_vsi_rule_id = index;
  1465. } else if (f_info.flag & ICE_FLTR_RX) {
  1466. hw->port_info->dflt_rx_vsi_num = vsi_id;
  1467. hw->port_info->dflt_rx_vsi_rule_id = index;
  1468. }
  1469. } else {
  1470. if (f_info.flag & ICE_FLTR_TX) {
  1471. hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
  1472. hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
  1473. } else if (f_info.flag & ICE_FLTR_RX) {
  1474. hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
  1475. hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
  1476. }
  1477. }
  1478. out:
  1479. devm_kfree(ice_hw_to_dev(hw), s_rule);
  1480. return status;
  1481. }
  1482. /**
  1483. * ice_remove_vlan_internal - Remove one VLAN based filter rule
  1484. * @hw: pointer to the hardware structure
  1485. * @f_entry: filter entry containing one VLAN information
  1486. */
  1487. static enum ice_status
  1488. ice_remove_vlan_internal(struct ice_hw *hw,
  1489. struct ice_fltr_list_entry *f_entry)
  1490. {
  1491. struct ice_fltr_info *new_fltr;
  1492. struct ice_fltr_mgmt_list_entry *v_list_elem;
  1493. u16 vsi_id;
  1494. new_fltr = &f_entry->fltr_info;
  1495. v_list_elem = ice_find_vlan_entry(hw, new_fltr->l_data.vlan.vlan_id);
  1496. if (!v_list_elem)
  1497. return ICE_ERR_PARAM;
  1498. vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
  1499. return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, v_list_elem);
  1500. }
  1501. /**
  1502. * ice_remove_vlan - Remove VLAN based filter rule
  1503. * @hw: pointer to the hardware structure
  1504. * @v_list: list of VLAN entries and forwarding information
  1505. */
  1506. enum ice_status
  1507. ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
  1508. {
  1509. struct ice_fltr_list_entry *v_list_itr;
  1510. enum ice_status status = 0;
  1511. if (!v_list || !hw)
  1512. return ICE_ERR_PARAM;
  1513. list_for_each_entry(v_list_itr, v_list, list_entry) {
  1514. status = ice_remove_vlan_internal(hw, v_list_itr);
  1515. if (status) {
  1516. v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
  1517. return status;
  1518. }
  1519. v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
  1520. }
  1521. return status;
  1522. }
  1523. /**
  1524. * ice_add_to_vsi_fltr_list - Add VSI filters to the list
  1525. * @hw: pointer to the hardware structure
  1526. * @vsi_id: ID of VSI to remove filters from
  1527. * @lkup_list_head: pointer to the list that has certain lookup type filters
  1528. * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id
  1529. */
  1530. static enum ice_status
  1531. ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
  1532. struct list_head *lkup_list_head,
  1533. struct list_head *vsi_list_head)
  1534. {
  1535. struct ice_fltr_mgmt_list_entry *fm_entry;
  1536. /* check to make sure VSI id is valid and within boundary */
  1537. if (vsi_id >=
  1538. (sizeof(fm_entry->vsi_list_info->vsi_map) * BITS_PER_BYTE - 1))
  1539. return ICE_ERR_PARAM;
  1540. list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
  1541. struct ice_fltr_info *fi;
  1542. fi = &fm_entry->fltr_info;
  1543. if ((fi->fltr_act == ICE_FWD_TO_VSI &&
  1544. fi->fwd_id.vsi_id == vsi_id) ||
  1545. (fi->fltr_act == ICE_FWD_TO_VSI_LIST &&
  1546. (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map)))) {
  1547. struct ice_fltr_list_entry *tmp;
  1548. /* this memory is freed up in the caller function
  1549. * ice_remove_vsi_lkup_fltr() once filters for
  1550. * this VSI are removed
  1551. */
  1552. tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp),
  1553. GFP_KERNEL);
  1554. if (!tmp)
  1555. return ICE_ERR_NO_MEMORY;
  1556. memcpy(&tmp->fltr_info, fi, sizeof(*fi));
  1557. /* Expected below fields to be set to ICE_FWD_TO_VSI and
  1558. * the particular VSI id since we are only removing this
  1559. * one VSI
  1560. */
  1561. if (fi->fltr_act == ICE_FWD_TO_VSI_LIST) {
  1562. tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1563. tmp->fltr_info.fwd_id.vsi_id = vsi_id;
  1564. }
  1565. list_add(&tmp->list_entry, vsi_list_head);
  1566. }
  1567. }
  1568. return 0;
  1569. }
  1570. /**
  1571. * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
  1572. * @hw: pointer to the hardware structure
  1573. * @vsi_id: ID of VSI to remove filters from
  1574. * @lkup: switch rule filter lookup type
  1575. */
  1576. static void
  1577. ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
  1578. enum ice_sw_lkup_type lkup)
  1579. {
  1580. struct ice_switch_info *sw = hw->switch_info;
  1581. struct ice_fltr_list_entry *fm_entry;
  1582. struct list_head remove_list_head;
  1583. struct ice_fltr_list_entry *tmp;
  1584. enum ice_status status;
  1585. INIT_LIST_HEAD(&remove_list_head);
  1586. switch (lkup) {
  1587. case ICE_SW_LKUP_MAC:
  1588. mutex_lock(&sw->mac_list_lock);
  1589. status = ice_add_to_vsi_fltr_list(hw, vsi_id,
  1590. &sw->mac_list_head,
  1591. &remove_list_head);
  1592. mutex_unlock(&sw->mac_list_lock);
  1593. if (!status) {
  1594. ice_remove_mac(hw, &remove_list_head);
  1595. goto free_fltr_list;
  1596. }
  1597. break;
  1598. case ICE_SW_LKUP_VLAN:
  1599. mutex_lock(&sw->vlan_list_lock);
  1600. status = ice_add_to_vsi_fltr_list(hw, vsi_id,
  1601. &sw->vlan_list_head,
  1602. &remove_list_head);
  1603. mutex_unlock(&sw->vlan_list_lock);
  1604. if (!status) {
  1605. ice_remove_vlan(hw, &remove_list_head);
  1606. goto free_fltr_list;
  1607. }
  1608. break;
  1609. case ICE_SW_LKUP_MAC_VLAN:
  1610. case ICE_SW_LKUP_ETHERTYPE:
  1611. case ICE_SW_LKUP_ETHERTYPE_MAC:
  1612. case ICE_SW_LKUP_PROMISC:
  1613. case ICE_SW_LKUP_PROMISC_VLAN:
  1614. case ICE_SW_LKUP_DFLT:
  1615. ice_debug(hw, ICE_DBG_SW,
  1616. "Remove filters for this lookup type hasn't been implemented yet\n");
  1617. break;
  1618. }
  1619. return;
  1620. free_fltr_list:
  1621. list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
  1622. list_del(&fm_entry->list_entry);
  1623. devm_kfree(ice_hw_to_dev(hw), fm_entry);
  1624. }
  1625. }
  1626. /**
  1627. * ice_remove_vsi_fltr - Remove all filters for a VSI
  1628. * @hw: pointer to the hardware structure
  1629. * @vsi_id: ID of VSI to remove filters from
  1630. */
  1631. void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id)
  1632. {
  1633. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC);
  1634. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC_VLAN);
  1635. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC);
  1636. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_VLAN);
  1637. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_DFLT);
  1638. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE);
  1639. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC);
  1640. ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN);
  1641. }