bnxt_sriov.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2014-2016 Broadcom Corporation
  4. * Copyright (c) 2016-2018 Broadcom Limited
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/if_vlan.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/etherdevice.h>
  16. #include "bnxt_hsi.h"
  17. #include "bnxt.h"
  18. #include "bnxt_ulp.h"
  19. #include "bnxt_sriov.h"
  20. #include "bnxt_vfr.h"
  21. #include "bnxt_ethtool.h"
  22. #ifdef CONFIG_BNXT_SRIOV
  23. static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
  24. struct bnxt_vf_info *vf, u16 event_id)
  25. {
  26. struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
  27. struct hwrm_fwd_async_event_cmpl_input req = {0};
  28. struct hwrm_async_event_cmpl *async_cmpl;
  29. int rc = 0;
  30. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
  31. if (vf)
  32. req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
  33. else
  34. /* broadcast this async event to all VFs */
  35. req.encap_async_event_target_id = cpu_to_le16(0xffff);
  36. async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
  37. async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
  38. async_cmpl->event_id = cpu_to_le16(event_id);
  39. mutex_lock(&bp->hwrm_cmd_lock);
  40. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  41. if (rc) {
  42. netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
  43. rc);
  44. goto fwd_async_event_cmpl_exit;
  45. }
  46. if (resp->error_code) {
  47. netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
  48. resp->error_code);
  49. rc = -1;
  50. }
  51. fwd_async_event_cmpl_exit:
  52. mutex_unlock(&bp->hwrm_cmd_lock);
  53. return rc;
  54. }
  55. static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
  56. {
  57. if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  58. netdev_err(bp->dev, "vf ndo called though PF is down\n");
  59. return -EINVAL;
  60. }
  61. if (!bp->pf.active_vfs) {
  62. netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
  63. return -EINVAL;
  64. }
  65. if (vf_id >= bp->pf.active_vfs) {
  66. netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
  67. return -EINVAL;
  68. }
  69. return 0;
  70. }
  71. int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
  72. {
  73. struct hwrm_func_cfg_input req = {0};
  74. struct bnxt *bp = netdev_priv(dev);
  75. struct bnxt_vf_info *vf;
  76. bool old_setting = false;
  77. u32 func_flags;
  78. int rc;
  79. if (bp->hwrm_spec_code < 0x10701)
  80. return -ENOTSUPP;
  81. rc = bnxt_vf_ndo_prep(bp, vf_id);
  82. if (rc)
  83. return rc;
  84. vf = &bp->pf.vf[vf_id];
  85. if (vf->flags & BNXT_VF_SPOOFCHK)
  86. old_setting = true;
  87. if (old_setting == setting)
  88. return 0;
  89. func_flags = vf->func_flags;
  90. if (setting)
  91. func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
  92. else
  93. func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
  94. /*TODO: if the driver supports VLAN filter on guest VLAN,
  95. * the spoof check should also include vlan anti-spoofing
  96. */
  97. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  98. req.fid = cpu_to_le16(vf->fw_fid);
  99. req.flags = cpu_to_le32(func_flags);
  100. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  101. if (!rc) {
  102. vf->func_flags = func_flags;
  103. if (setting)
  104. vf->flags |= BNXT_VF_SPOOFCHK;
  105. else
  106. vf->flags &= ~BNXT_VF_SPOOFCHK;
  107. }
  108. return rc;
  109. }
  110. int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
  111. {
  112. struct bnxt *bp = netdev_priv(dev);
  113. struct bnxt_vf_info *vf;
  114. if (bnxt_vf_ndo_prep(bp, vf_id))
  115. return -EINVAL;
  116. vf = &bp->pf.vf[vf_id];
  117. if (trusted)
  118. vf->flags |= BNXT_VF_TRUST;
  119. else
  120. vf->flags &= ~BNXT_VF_TRUST;
  121. return 0;
  122. }
  123. int bnxt_get_vf_config(struct net_device *dev, int vf_id,
  124. struct ifla_vf_info *ivi)
  125. {
  126. struct bnxt *bp = netdev_priv(dev);
  127. struct bnxt_vf_info *vf;
  128. int rc;
  129. rc = bnxt_vf_ndo_prep(bp, vf_id);
  130. if (rc)
  131. return rc;
  132. ivi->vf = vf_id;
  133. vf = &bp->pf.vf[vf_id];
  134. if (is_valid_ether_addr(vf->mac_addr))
  135. memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
  136. else
  137. memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
  138. ivi->max_tx_rate = vf->max_tx_rate;
  139. ivi->min_tx_rate = vf->min_tx_rate;
  140. ivi->vlan = vf->vlan;
  141. if (vf->flags & BNXT_VF_QOS)
  142. ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
  143. else
  144. ivi->qos = 0;
  145. ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
  146. ivi->trusted = !!(vf->flags & BNXT_VF_TRUST);
  147. if (!(vf->flags & BNXT_VF_LINK_FORCED))
  148. ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
  149. else if (vf->flags & BNXT_VF_LINK_UP)
  150. ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
  151. else
  152. ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
  153. return 0;
  154. }
  155. int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
  156. {
  157. struct hwrm_func_cfg_input req = {0};
  158. struct bnxt *bp = netdev_priv(dev);
  159. struct bnxt_vf_info *vf;
  160. int rc;
  161. rc = bnxt_vf_ndo_prep(bp, vf_id);
  162. if (rc)
  163. return rc;
  164. /* reject bc or mc mac addr, zero mac addr means allow
  165. * VF to use its own mac addr
  166. */
  167. if (is_multicast_ether_addr(mac)) {
  168. netdev_err(dev, "Invalid VF ethernet address\n");
  169. return -EINVAL;
  170. }
  171. vf = &bp->pf.vf[vf_id];
  172. memcpy(vf->mac_addr, mac, ETH_ALEN);
  173. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  174. req.fid = cpu_to_le16(vf->fw_fid);
  175. req.flags = cpu_to_le32(vf->func_flags);
  176. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
  177. memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
  178. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  179. }
  180. int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
  181. __be16 vlan_proto)
  182. {
  183. struct hwrm_func_cfg_input req = {0};
  184. struct bnxt *bp = netdev_priv(dev);
  185. struct bnxt_vf_info *vf;
  186. u16 vlan_tag;
  187. int rc;
  188. if (bp->hwrm_spec_code < 0x10201)
  189. return -ENOTSUPP;
  190. if (vlan_proto != htons(ETH_P_8021Q))
  191. return -EPROTONOSUPPORT;
  192. rc = bnxt_vf_ndo_prep(bp, vf_id);
  193. if (rc)
  194. return rc;
  195. /* TODO: needed to implement proper handling of user priority,
  196. * currently fail the command if there is valid priority
  197. */
  198. if (vlan_id > 4095 || qos)
  199. return -EINVAL;
  200. vf = &bp->pf.vf[vf_id];
  201. vlan_tag = vlan_id;
  202. if (vlan_tag == vf->vlan)
  203. return 0;
  204. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  205. req.fid = cpu_to_le16(vf->fw_fid);
  206. req.flags = cpu_to_le32(vf->func_flags);
  207. req.dflt_vlan = cpu_to_le16(vlan_tag);
  208. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
  209. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  210. if (!rc)
  211. vf->vlan = vlan_tag;
  212. return rc;
  213. }
  214. int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
  215. int max_tx_rate)
  216. {
  217. struct hwrm_func_cfg_input req = {0};
  218. struct bnxt *bp = netdev_priv(dev);
  219. struct bnxt_vf_info *vf;
  220. u32 pf_link_speed;
  221. int rc;
  222. rc = bnxt_vf_ndo_prep(bp, vf_id);
  223. if (rc)
  224. return rc;
  225. vf = &bp->pf.vf[vf_id];
  226. pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
  227. if (max_tx_rate > pf_link_speed) {
  228. netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
  229. max_tx_rate, vf_id);
  230. return -EINVAL;
  231. }
  232. if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
  233. netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
  234. min_tx_rate, vf_id);
  235. return -EINVAL;
  236. }
  237. if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
  238. return 0;
  239. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  240. req.fid = cpu_to_le16(vf->fw_fid);
  241. req.flags = cpu_to_le32(vf->func_flags);
  242. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
  243. req.max_bw = cpu_to_le32(max_tx_rate);
  244. req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
  245. req.min_bw = cpu_to_le32(min_tx_rate);
  246. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  247. if (!rc) {
  248. vf->min_tx_rate = min_tx_rate;
  249. vf->max_tx_rate = max_tx_rate;
  250. }
  251. return rc;
  252. }
  253. int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
  254. {
  255. struct bnxt *bp = netdev_priv(dev);
  256. struct bnxt_vf_info *vf;
  257. int rc;
  258. rc = bnxt_vf_ndo_prep(bp, vf_id);
  259. if (rc)
  260. return rc;
  261. vf = &bp->pf.vf[vf_id];
  262. vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
  263. switch (link) {
  264. case IFLA_VF_LINK_STATE_AUTO:
  265. vf->flags |= BNXT_VF_LINK_UP;
  266. break;
  267. case IFLA_VF_LINK_STATE_DISABLE:
  268. vf->flags |= BNXT_VF_LINK_FORCED;
  269. break;
  270. case IFLA_VF_LINK_STATE_ENABLE:
  271. vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
  272. break;
  273. default:
  274. netdev_err(bp->dev, "Invalid link option\n");
  275. rc = -EINVAL;
  276. break;
  277. }
  278. if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
  279. rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
  280. ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
  281. return rc;
  282. }
  283. static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
  284. {
  285. int i;
  286. struct bnxt_vf_info *vf;
  287. for (i = 0; i < num_vfs; i++) {
  288. vf = &bp->pf.vf[i];
  289. memset(vf, 0, sizeof(*vf));
  290. }
  291. return 0;
  292. }
  293. static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
  294. {
  295. int i, rc = 0;
  296. struct bnxt_pf_info *pf = &bp->pf;
  297. struct hwrm_func_vf_resc_free_input req = {0};
  298. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
  299. mutex_lock(&bp->hwrm_cmd_lock);
  300. for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
  301. req.vf_id = cpu_to_le16(i);
  302. rc = _hwrm_send_message(bp, &req, sizeof(req),
  303. HWRM_CMD_TIMEOUT);
  304. if (rc)
  305. break;
  306. }
  307. mutex_unlock(&bp->hwrm_cmd_lock);
  308. return rc;
  309. }
  310. static void bnxt_free_vf_resources(struct bnxt *bp)
  311. {
  312. struct pci_dev *pdev = bp->pdev;
  313. int i;
  314. kfree(bp->pf.vf_event_bmap);
  315. bp->pf.vf_event_bmap = NULL;
  316. for (i = 0; i < 4; i++) {
  317. if (bp->pf.hwrm_cmd_req_addr[i]) {
  318. dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
  319. bp->pf.hwrm_cmd_req_addr[i],
  320. bp->pf.hwrm_cmd_req_dma_addr[i]);
  321. bp->pf.hwrm_cmd_req_addr[i] = NULL;
  322. }
  323. }
  324. kfree(bp->pf.vf);
  325. bp->pf.vf = NULL;
  326. }
  327. static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
  328. {
  329. struct pci_dev *pdev = bp->pdev;
  330. u32 nr_pages, size, i, j, k = 0;
  331. bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
  332. if (!bp->pf.vf)
  333. return -ENOMEM;
  334. bnxt_set_vf_attr(bp, num_vfs);
  335. size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
  336. nr_pages = size / BNXT_PAGE_SIZE;
  337. if (size & (BNXT_PAGE_SIZE - 1))
  338. nr_pages++;
  339. for (i = 0; i < nr_pages; i++) {
  340. bp->pf.hwrm_cmd_req_addr[i] =
  341. dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
  342. &bp->pf.hwrm_cmd_req_dma_addr[i],
  343. GFP_KERNEL);
  344. if (!bp->pf.hwrm_cmd_req_addr[i])
  345. return -ENOMEM;
  346. for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
  347. struct bnxt_vf_info *vf = &bp->pf.vf[k];
  348. vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
  349. j * BNXT_HWRM_REQ_MAX_SIZE;
  350. vf->hwrm_cmd_req_dma_addr =
  351. bp->pf.hwrm_cmd_req_dma_addr[i] + j *
  352. BNXT_HWRM_REQ_MAX_SIZE;
  353. k++;
  354. }
  355. }
  356. /* Max 128 VF's */
  357. bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
  358. if (!bp->pf.vf_event_bmap)
  359. return -ENOMEM;
  360. bp->pf.hwrm_cmd_req_pages = nr_pages;
  361. return 0;
  362. }
  363. static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
  364. {
  365. struct hwrm_func_buf_rgtr_input req = {0};
  366. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
  367. req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
  368. req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
  369. req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
  370. req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
  371. req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
  372. req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
  373. req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
  374. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  375. }
  376. /* Only called by PF to reserve resources for VFs, returns actual number of
  377. * VFs configured, or < 0 on error.
  378. */
  379. static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
  380. {
  381. struct hwrm_func_vf_resource_cfg_input req = {0};
  382. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  383. u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
  384. u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
  385. struct bnxt_pf_info *pf = &bp->pf;
  386. int i, rc = 0, min = 1;
  387. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
  388. vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
  389. vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
  390. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  391. vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
  392. else
  393. vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
  394. vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
  395. vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
  396. vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
  397. vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
  398. req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
  399. req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
  400. if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
  401. min = 0;
  402. req.min_rsscos_ctx = cpu_to_le16(min);
  403. }
  404. if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
  405. pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
  406. req.min_cmpl_rings = cpu_to_le16(min);
  407. req.min_tx_rings = cpu_to_le16(min);
  408. req.min_rx_rings = cpu_to_le16(min);
  409. req.min_l2_ctxs = cpu_to_le16(min);
  410. req.min_vnics = cpu_to_le16(min);
  411. req.min_stat_ctx = cpu_to_le16(min);
  412. req.min_hw_ring_grps = cpu_to_le16(min);
  413. } else {
  414. vf_cp_rings /= num_vfs;
  415. vf_tx_rings /= num_vfs;
  416. vf_rx_rings /= num_vfs;
  417. vf_vnics /= num_vfs;
  418. vf_stat_ctx /= num_vfs;
  419. vf_ring_grps /= num_vfs;
  420. req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
  421. req.min_tx_rings = cpu_to_le16(vf_tx_rings);
  422. req.min_rx_rings = cpu_to_le16(vf_rx_rings);
  423. req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
  424. req.min_vnics = cpu_to_le16(vf_vnics);
  425. req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
  426. req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
  427. }
  428. req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
  429. req.max_tx_rings = cpu_to_le16(vf_tx_rings);
  430. req.max_rx_rings = cpu_to_le16(vf_rx_rings);
  431. req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
  432. req.max_vnics = cpu_to_le16(vf_vnics);
  433. req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
  434. req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
  435. mutex_lock(&bp->hwrm_cmd_lock);
  436. for (i = 0; i < num_vfs; i++) {
  437. req.vf_id = cpu_to_le16(pf->first_vf_id + i);
  438. rc = _hwrm_send_message(bp, &req, sizeof(req),
  439. HWRM_CMD_TIMEOUT);
  440. if (rc) {
  441. rc = -ENOMEM;
  442. break;
  443. }
  444. pf->active_vfs = i + 1;
  445. pf->vf[i].fw_fid = pf->first_vf_id + i;
  446. }
  447. mutex_unlock(&bp->hwrm_cmd_lock);
  448. if (pf->active_vfs) {
  449. u16 n = pf->active_vfs;
  450. hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
  451. hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
  452. hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
  453. n;
  454. hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
  455. hw_resc->max_rsscos_ctxs -= pf->active_vfs;
  456. hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
  457. hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
  458. rc = pf->active_vfs;
  459. }
  460. return rc;
  461. }
  462. /* Only called by PF to reserve resources for VFs, returns actual number of
  463. * VFs configured, or < 0 on error.
  464. */
  465. static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
  466. {
  467. u32 rc = 0, mtu, i;
  468. u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
  469. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  470. u16 vf_ring_grps, max_stat_ctxs;
  471. struct hwrm_func_cfg_input req = {0};
  472. struct bnxt_pf_info *pf = &bp->pf;
  473. int total_vf_tx_rings = 0;
  474. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  475. max_stat_ctxs = hw_resc->max_stat_ctxs;
  476. /* Remaining rings are distributed equally amongs VF's for now */
  477. vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
  478. bp->cp_nr_rings) / num_vfs;
  479. vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
  480. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  481. vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
  482. num_vfs;
  483. else
  484. vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
  485. num_vfs;
  486. vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
  487. vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
  488. vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
  489. vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
  490. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
  491. FUNC_CFG_REQ_ENABLES_MRU |
  492. FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
  493. FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
  494. FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
  495. FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
  496. FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
  497. FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
  498. FUNC_CFG_REQ_ENABLES_NUM_VNICS |
  499. FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
  500. mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  501. req.mru = cpu_to_le16(mtu);
  502. req.mtu = cpu_to_le16(mtu);
  503. req.num_rsscos_ctxs = cpu_to_le16(1);
  504. req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
  505. req.num_tx_rings = cpu_to_le16(vf_tx_rings);
  506. req.num_rx_rings = cpu_to_le16(vf_rx_rings);
  507. req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
  508. req.num_l2_ctxs = cpu_to_le16(4);
  509. req.num_vnics = cpu_to_le16(vf_vnics);
  510. /* FIXME spec currently uses 1 bit for stats ctx */
  511. req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
  512. mutex_lock(&bp->hwrm_cmd_lock);
  513. for (i = 0; i < num_vfs; i++) {
  514. int vf_tx_rsvd = vf_tx_rings;
  515. req.fid = cpu_to_le16(pf->first_vf_id + i);
  516. rc = _hwrm_send_message(bp, &req, sizeof(req),
  517. HWRM_CMD_TIMEOUT);
  518. if (rc)
  519. break;
  520. pf->active_vfs = i + 1;
  521. pf->vf[i].fw_fid = le16_to_cpu(req.fid);
  522. rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
  523. &vf_tx_rsvd);
  524. if (rc)
  525. break;
  526. total_vf_tx_rings += vf_tx_rsvd;
  527. }
  528. mutex_unlock(&bp->hwrm_cmd_lock);
  529. if (rc)
  530. rc = -ENOMEM;
  531. if (pf->active_vfs) {
  532. hw_resc->max_tx_rings -= total_vf_tx_rings;
  533. hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
  534. hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
  535. hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
  536. hw_resc->max_rsscos_ctxs -= num_vfs;
  537. hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
  538. hw_resc->max_vnics -= vf_vnics * num_vfs;
  539. rc = pf->active_vfs;
  540. }
  541. return rc;
  542. }
  543. static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
  544. {
  545. if (BNXT_NEW_RM(bp))
  546. return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
  547. else
  548. return bnxt_hwrm_func_cfg(bp, num_vfs);
  549. }
  550. static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
  551. {
  552. int rc = 0, vfs_supported;
  553. int min_rx_rings, min_tx_rings, min_rss_ctxs;
  554. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  555. int tx_ok = 0, rx_ok = 0, rss_ok = 0;
  556. int avail_cp, avail_stat;
  557. /* Check if we can enable requested num of vf's. At a mininum
  558. * we require 1 RX 1 TX rings for each VF. In this minimum conf
  559. * features like TPA will not be available.
  560. */
  561. vfs_supported = *num_vfs;
  562. avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
  563. avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
  564. avail_cp = min_t(int, avail_cp, avail_stat);
  565. while (vfs_supported) {
  566. min_rx_rings = vfs_supported;
  567. min_tx_rings = vfs_supported;
  568. min_rss_ctxs = vfs_supported;
  569. if (bp->flags & BNXT_FLAG_AGG_RINGS) {
  570. if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
  571. min_rx_rings)
  572. rx_ok = 1;
  573. } else {
  574. if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
  575. min_rx_rings)
  576. rx_ok = 1;
  577. }
  578. if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
  579. avail_cp < min_rx_rings)
  580. rx_ok = 0;
  581. if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
  582. avail_cp >= min_tx_rings)
  583. tx_ok = 1;
  584. if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
  585. min_rss_ctxs)
  586. rss_ok = 1;
  587. if (tx_ok && rx_ok && rss_ok)
  588. break;
  589. vfs_supported--;
  590. }
  591. if (!vfs_supported) {
  592. netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
  593. return -EINVAL;
  594. }
  595. if (vfs_supported != *num_vfs) {
  596. netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
  597. *num_vfs, vfs_supported);
  598. *num_vfs = vfs_supported;
  599. }
  600. rc = bnxt_alloc_vf_resources(bp, *num_vfs);
  601. if (rc)
  602. goto err_out1;
  603. /* Reserve resources for VFs */
  604. rc = bnxt_func_cfg(bp, *num_vfs);
  605. if (rc != *num_vfs) {
  606. if (rc <= 0) {
  607. netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
  608. *num_vfs = 0;
  609. goto err_out2;
  610. }
  611. netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
  612. *num_vfs = rc;
  613. }
  614. /* Register buffers for VFs */
  615. rc = bnxt_hwrm_func_buf_rgtr(bp);
  616. if (rc)
  617. goto err_out2;
  618. bnxt_ulp_sriov_cfg(bp, *num_vfs);
  619. rc = pci_enable_sriov(bp->pdev, *num_vfs);
  620. if (rc)
  621. goto err_out2;
  622. return 0;
  623. err_out2:
  624. /* Free the resources reserved for various VF's */
  625. bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
  626. err_out1:
  627. bnxt_free_vf_resources(bp);
  628. return rc;
  629. }
  630. void bnxt_sriov_disable(struct bnxt *bp)
  631. {
  632. u16 num_vfs = pci_num_vf(bp->pdev);
  633. if (!num_vfs)
  634. return;
  635. /* synchronize VF and VF-rep create and destroy */
  636. mutex_lock(&bp->sriov_lock);
  637. bnxt_vf_reps_destroy(bp);
  638. if (pci_vfs_assigned(bp->pdev)) {
  639. bnxt_hwrm_fwd_async_event_cmpl(
  640. bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
  641. netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
  642. num_vfs);
  643. } else {
  644. pci_disable_sriov(bp->pdev);
  645. /* Free the HW resources reserved for various VF's */
  646. bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
  647. }
  648. mutex_unlock(&bp->sriov_lock);
  649. bnxt_free_vf_resources(bp);
  650. bp->pf.active_vfs = 0;
  651. /* Reclaim all resources for the PF. */
  652. rtnl_lock();
  653. bnxt_restore_pf_fw_resources(bp);
  654. rtnl_unlock();
  655. bnxt_ulp_sriov_cfg(bp, 0);
  656. }
  657. int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
  658. {
  659. struct net_device *dev = pci_get_drvdata(pdev);
  660. struct bnxt *bp = netdev_priv(dev);
  661. if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
  662. netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
  663. return 0;
  664. }
  665. rtnl_lock();
  666. if (!netif_running(dev)) {
  667. netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
  668. rtnl_unlock();
  669. return 0;
  670. }
  671. bp->sriov_cfg = true;
  672. rtnl_unlock();
  673. if (pci_vfs_assigned(bp->pdev)) {
  674. netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
  675. num_vfs = 0;
  676. goto sriov_cfg_exit;
  677. }
  678. /* Check if enabled VFs is same as requested */
  679. if (num_vfs && num_vfs == bp->pf.active_vfs)
  680. goto sriov_cfg_exit;
  681. /* if there are previous existing VFs, clean them up */
  682. bnxt_sriov_disable(bp);
  683. if (!num_vfs)
  684. goto sriov_cfg_exit;
  685. bnxt_sriov_enable(bp, &num_vfs);
  686. sriov_cfg_exit:
  687. bp->sriov_cfg = false;
  688. wake_up(&bp->sriov_cfg_wait);
  689. return num_vfs;
  690. }
  691. static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
  692. void *encap_resp, __le64 encap_resp_addr,
  693. __le16 encap_resp_cpr, u32 msg_size)
  694. {
  695. int rc = 0;
  696. struct hwrm_fwd_resp_input req = {0};
  697. struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
  698. if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
  699. return -EINVAL;
  700. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
  701. /* Set the new target id */
  702. req.target_id = cpu_to_le16(vf->fw_fid);
  703. req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
  704. req.encap_resp_len = cpu_to_le16(msg_size);
  705. req.encap_resp_addr = encap_resp_addr;
  706. req.encap_resp_cmpl_ring = encap_resp_cpr;
  707. memcpy(req.encap_resp, encap_resp, msg_size);
  708. mutex_lock(&bp->hwrm_cmd_lock);
  709. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  710. if (rc) {
  711. netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
  712. goto fwd_resp_exit;
  713. }
  714. if (resp->error_code) {
  715. netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
  716. resp->error_code);
  717. rc = -1;
  718. }
  719. fwd_resp_exit:
  720. mutex_unlock(&bp->hwrm_cmd_lock);
  721. return rc;
  722. }
  723. static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
  724. u32 msg_size)
  725. {
  726. int rc = 0;
  727. struct hwrm_reject_fwd_resp_input req = {0};
  728. struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
  729. if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
  730. return -EINVAL;
  731. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
  732. /* Set the new target id */
  733. req.target_id = cpu_to_le16(vf->fw_fid);
  734. req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
  735. memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
  736. mutex_lock(&bp->hwrm_cmd_lock);
  737. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  738. if (rc) {
  739. netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
  740. goto fwd_err_resp_exit;
  741. }
  742. if (resp->error_code) {
  743. netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
  744. resp->error_code);
  745. rc = -1;
  746. }
  747. fwd_err_resp_exit:
  748. mutex_unlock(&bp->hwrm_cmd_lock);
  749. return rc;
  750. }
  751. static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
  752. u32 msg_size)
  753. {
  754. int rc = 0;
  755. struct hwrm_exec_fwd_resp_input req = {0};
  756. struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
  757. if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
  758. return -EINVAL;
  759. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
  760. /* Set the new target id */
  761. req.target_id = cpu_to_le16(vf->fw_fid);
  762. req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
  763. memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
  764. mutex_lock(&bp->hwrm_cmd_lock);
  765. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  766. if (rc) {
  767. netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
  768. goto exec_fwd_resp_exit;
  769. }
  770. if (resp->error_code) {
  771. netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
  772. resp->error_code);
  773. rc = -1;
  774. }
  775. exec_fwd_resp_exit:
  776. mutex_unlock(&bp->hwrm_cmd_lock);
  777. return rc;
  778. }
  779. static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
  780. {
  781. u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
  782. struct hwrm_func_vf_cfg_input *req =
  783. (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
  784. /* Allow VF to set a valid MAC address, if trust is set to on or
  785. * if the PF assigned MAC address is zero
  786. */
  787. if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
  788. if (is_valid_ether_addr(req->dflt_mac_addr) &&
  789. ((vf->flags & BNXT_VF_TRUST) ||
  790. !is_valid_ether_addr(vf->mac_addr) ||
  791. ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
  792. ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
  793. return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
  794. }
  795. return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
  796. }
  797. return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
  798. }
  799. static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
  800. {
  801. u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
  802. struct hwrm_cfa_l2_filter_alloc_input *req =
  803. (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
  804. bool mac_ok = false;
  805. if (!is_valid_ether_addr((const u8 *)req->l2_addr))
  806. return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
  807. /* Allow VF to set a valid MAC address, if trust is set to on.
  808. * Or VF MAC address must first match MAC address in PF's context.
  809. * Otherwise, it must match the VF MAC address if firmware spec >=
  810. * 1.2.2
  811. */
  812. if (vf->flags & BNXT_VF_TRUST) {
  813. mac_ok = true;
  814. } else if (is_valid_ether_addr(vf->mac_addr)) {
  815. if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
  816. mac_ok = true;
  817. } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
  818. if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
  819. mac_ok = true;
  820. } else {
  821. /* There are two cases:
  822. * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
  823. * to the PF and so it doesn't have to match
  824. * 2.Allow VF to modify it's own MAC when PF has not assigned a
  825. * valid MAC address and firmware spec >= 0x10202
  826. */
  827. mac_ok = true;
  828. }
  829. if (mac_ok)
  830. return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
  831. return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
  832. }
  833. static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
  834. {
  835. int rc = 0;
  836. if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
  837. /* real link */
  838. rc = bnxt_hwrm_exec_fwd_resp(
  839. bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
  840. } else {
  841. struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
  842. struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
  843. phy_qcfg_req =
  844. (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
  845. mutex_lock(&bp->hwrm_cmd_lock);
  846. memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
  847. sizeof(phy_qcfg_resp));
  848. mutex_unlock(&bp->hwrm_cmd_lock);
  849. phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
  850. phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
  851. phy_qcfg_resp.valid = 1;
  852. if (vf->flags & BNXT_VF_LINK_UP) {
  853. /* if physical link is down, force link up on VF */
  854. if (phy_qcfg_resp.link !=
  855. PORT_PHY_QCFG_RESP_LINK_LINK) {
  856. phy_qcfg_resp.link =
  857. PORT_PHY_QCFG_RESP_LINK_LINK;
  858. phy_qcfg_resp.link_speed = cpu_to_le16(
  859. PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
  860. phy_qcfg_resp.duplex_cfg =
  861. PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
  862. phy_qcfg_resp.duplex_state =
  863. PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
  864. phy_qcfg_resp.pause =
  865. (PORT_PHY_QCFG_RESP_PAUSE_TX |
  866. PORT_PHY_QCFG_RESP_PAUSE_RX);
  867. }
  868. } else {
  869. /* force link down */
  870. phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
  871. phy_qcfg_resp.link_speed = 0;
  872. phy_qcfg_resp.duplex_state =
  873. PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
  874. phy_qcfg_resp.pause = 0;
  875. }
  876. rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
  877. phy_qcfg_req->resp_addr,
  878. phy_qcfg_req->cmpl_ring,
  879. sizeof(phy_qcfg_resp));
  880. }
  881. return rc;
  882. }
  883. static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
  884. {
  885. int rc = 0;
  886. struct input *encap_req = vf->hwrm_cmd_req_addr;
  887. u32 req_type = le16_to_cpu(encap_req->req_type);
  888. switch (req_type) {
  889. case HWRM_FUNC_VF_CFG:
  890. rc = bnxt_vf_configure_mac(bp, vf);
  891. break;
  892. case HWRM_CFA_L2_FILTER_ALLOC:
  893. rc = bnxt_vf_validate_set_mac(bp, vf);
  894. break;
  895. case HWRM_FUNC_CFG:
  896. /* TODO Validate if VF is allowed to change mac address,
  897. * mtu, num of rings etc
  898. */
  899. rc = bnxt_hwrm_exec_fwd_resp(
  900. bp, vf, sizeof(struct hwrm_func_cfg_input));
  901. break;
  902. case HWRM_PORT_PHY_QCFG:
  903. rc = bnxt_vf_set_link(bp, vf);
  904. break;
  905. default:
  906. break;
  907. }
  908. return rc;
  909. }
  910. void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
  911. {
  912. u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
  913. /* Scan through VF's and process commands */
  914. while (1) {
  915. vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
  916. if (vf_id >= active_vfs)
  917. break;
  918. clear_bit(vf_id, bp->pf.vf_event_bmap);
  919. bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
  920. i = vf_id + 1;
  921. }
  922. }
  923. void bnxt_update_vf_mac(struct bnxt *bp)
  924. {
  925. struct hwrm_func_qcaps_input req = {0};
  926. struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
  927. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
  928. req.fid = cpu_to_le16(0xffff);
  929. mutex_lock(&bp->hwrm_cmd_lock);
  930. if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
  931. goto update_vf_mac_exit;
  932. /* Store MAC address from the firmware. There are 2 cases:
  933. * 1. MAC address is valid. It is assigned from the PF and we
  934. * need to override the current VF MAC address with it.
  935. * 2. MAC address is zero. The VF will use a random MAC address by
  936. * default but the stored zero MAC will allow the VF user to change
  937. * the random MAC address using ndo_set_mac_address() if he wants.
  938. */
  939. if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
  940. memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
  941. /* overwrite netdev dev_addr with admin VF MAC */
  942. if (is_valid_ether_addr(bp->vf.mac_addr))
  943. memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
  944. update_vf_mac_exit:
  945. mutex_unlock(&bp->hwrm_cmd_lock);
  946. }
  947. int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
  948. {
  949. struct hwrm_func_vf_cfg_input req = {0};
  950. int rc = 0;
  951. if (!BNXT_VF(bp))
  952. return 0;
  953. if (bp->hwrm_spec_code < 0x10202) {
  954. if (is_valid_ether_addr(bp->vf.mac_addr))
  955. rc = -EADDRNOTAVAIL;
  956. goto mac_done;
  957. }
  958. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
  959. req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
  960. memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
  961. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  962. mac_done:
  963. if (rc && strict) {
  964. rc = -EADDRNOTAVAIL;
  965. netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
  966. mac);
  967. return rc;
  968. }
  969. return 0;
  970. }
  971. #else
  972. void bnxt_sriov_disable(struct bnxt *bp)
  973. {
  974. }
  975. void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
  976. {
  977. netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
  978. }
  979. void bnxt_update_vf_mac(struct bnxt *bp)
  980. {
  981. }
  982. int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
  983. {
  984. return 0;
  985. }
  986. #endif