bnxt_sriov.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2014-2016 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/pci.h>
  11. #include <linux/netdevice.h>
  12. #include <linux/if_vlan.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/etherdevice.h>
  15. #include "bnxt_hsi.h"
  16. #include "bnxt.h"
  17. #include "bnxt_sriov.h"
  18. #include "bnxt_ethtool.h"
  19. #ifdef CONFIG_BNXT_SRIOV
  20. static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
  21. struct bnxt_vf_info *vf, u16 event_id)
  22. {
  23. struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
  24. struct hwrm_fwd_async_event_cmpl_input req = {0};
  25. struct hwrm_async_event_cmpl *async_cmpl;
  26. int rc = 0;
  27. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
  28. if (vf)
  29. req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
  30. else
  31. /* broadcast this async event to all VFs */
  32. req.encap_async_event_target_id = cpu_to_le16(0xffff);
  33. async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
  34. async_cmpl->type =
  35. cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
  36. async_cmpl->event_id = cpu_to_le16(event_id);
  37. mutex_lock(&bp->hwrm_cmd_lock);
  38. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  39. if (rc) {
  40. netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
  41. rc);
  42. goto fwd_async_event_cmpl_exit;
  43. }
  44. if (resp->error_code) {
  45. netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
  46. resp->error_code);
  47. rc = -1;
  48. }
  49. fwd_async_event_cmpl_exit:
  50. mutex_unlock(&bp->hwrm_cmd_lock);
  51. return rc;
  52. }
  53. static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
  54. {
  55. if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  56. netdev_err(bp->dev, "vf ndo called though PF is down\n");
  57. return -EINVAL;
  58. }
  59. if (!bp->pf.active_vfs) {
  60. netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
  61. return -EINVAL;
  62. }
  63. if (vf_id >= bp->pf.active_vfs) {
  64. netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
  65. return -EINVAL;
  66. }
  67. return 0;
  68. }
  69. int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
  70. {
  71. struct hwrm_func_cfg_input req = {0};
  72. struct bnxt *bp = netdev_priv(dev);
  73. struct bnxt_vf_info *vf;
  74. bool old_setting = false;
  75. u32 func_flags;
  76. int rc;
  77. rc = bnxt_vf_ndo_prep(bp, vf_id);
  78. if (rc)
  79. return rc;
  80. vf = &bp->pf.vf[vf_id];
  81. if (vf->flags & BNXT_VF_SPOOFCHK)
  82. old_setting = true;
  83. if (old_setting == setting)
  84. return 0;
  85. func_flags = vf->func_flags;
  86. if (setting)
  87. func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
  88. else
  89. func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
  90. /*TODO: if the driver supports VLAN filter on guest VLAN,
  91. * the spoof check should also include vlan anti-spoofing
  92. */
  93. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  94. req.fid = cpu_to_le16(vf->fw_fid);
  95. req.flags = cpu_to_le32(func_flags);
  96. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  97. if (!rc) {
  98. vf->func_flags = func_flags;
  99. if (setting)
  100. vf->flags |= BNXT_VF_SPOOFCHK;
  101. else
  102. vf->flags &= ~BNXT_VF_SPOOFCHK;
  103. }
  104. return rc;
  105. }
  106. int bnxt_get_vf_config(struct net_device *dev, int vf_id,
  107. struct ifla_vf_info *ivi)
  108. {
  109. struct bnxt *bp = netdev_priv(dev);
  110. struct bnxt_vf_info *vf;
  111. int rc;
  112. rc = bnxt_vf_ndo_prep(bp, vf_id);
  113. if (rc)
  114. return rc;
  115. ivi->vf = vf_id;
  116. vf = &bp->pf.vf[vf_id];
  117. memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
  118. ivi->max_tx_rate = vf->max_tx_rate;
  119. ivi->min_tx_rate = vf->min_tx_rate;
  120. ivi->vlan = vf->vlan;
  121. ivi->qos = vf->flags & BNXT_VF_QOS;
  122. ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
  123. if (!(vf->flags & BNXT_VF_LINK_FORCED))
  124. ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
  125. else if (vf->flags & BNXT_VF_LINK_UP)
  126. ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
  127. else
  128. ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
  129. return 0;
  130. }
  131. int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
  132. {
  133. struct hwrm_func_cfg_input req = {0};
  134. struct bnxt *bp = netdev_priv(dev);
  135. struct bnxt_vf_info *vf;
  136. int rc;
  137. rc = bnxt_vf_ndo_prep(bp, vf_id);
  138. if (rc)
  139. return rc;
  140. /* reject bc or mc mac addr, zero mac addr means allow
  141. * VF to use its own mac addr
  142. */
  143. if (is_multicast_ether_addr(mac)) {
  144. netdev_err(dev, "Invalid VF ethernet address\n");
  145. return -EINVAL;
  146. }
  147. vf = &bp->pf.vf[vf_id];
  148. memcpy(vf->mac_addr, mac, ETH_ALEN);
  149. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  150. req.fid = cpu_to_le16(vf->fw_fid);
  151. req.flags = cpu_to_le32(vf->func_flags);
  152. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
  153. memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
  154. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  155. }
  156. int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
  157. __be16 vlan_proto)
  158. {
  159. struct hwrm_func_cfg_input req = {0};
  160. struct bnxt *bp = netdev_priv(dev);
  161. struct bnxt_vf_info *vf;
  162. u16 vlan_tag;
  163. int rc;
  164. if (bp->hwrm_spec_code < 0x10201)
  165. return -ENOTSUPP;
  166. if (vlan_proto != htons(ETH_P_8021Q))
  167. return -EPROTONOSUPPORT;
  168. rc = bnxt_vf_ndo_prep(bp, vf_id);
  169. if (rc)
  170. return rc;
  171. /* TODO: needed to implement proper handling of user priority,
  172. * currently fail the command if there is valid priority
  173. */
  174. if (vlan_id > 4095 || qos)
  175. return -EINVAL;
  176. vf = &bp->pf.vf[vf_id];
  177. vlan_tag = vlan_id;
  178. if (vlan_tag == vf->vlan)
  179. return 0;
  180. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  181. req.fid = cpu_to_le16(vf->fw_fid);
  182. req.flags = cpu_to_le32(vf->func_flags);
  183. req.dflt_vlan = cpu_to_le16(vlan_tag);
  184. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
  185. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  186. if (!rc)
  187. vf->vlan = vlan_tag;
  188. return rc;
  189. }
  190. int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
  191. int max_tx_rate)
  192. {
  193. struct hwrm_func_cfg_input req = {0};
  194. struct bnxt *bp = netdev_priv(dev);
  195. struct bnxt_vf_info *vf;
  196. u32 pf_link_speed;
  197. int rc;
  198. rc = bnxt_vf_ndo_prep(bp, vf_id);
  199. if (rc)
  200. return rc;
  201. vf = &bp->pf.vf[vf_id];
  202. pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
  203. if (max_tx_rate > pf_link_speed) {
  204. netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
  205. max_tx_rate, vf_id);
  206. return -EINVAL;
  207. }
  208. if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
  209. netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
  210. min_tx_rate, vf_id);
  211. return -EINVAL;
  212. }
  213. if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
  214. return 0;
  215. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  216. req.fid = cpu_to_le16(vf->fw_fid);
  217. req.flags = cpu_to_le32(vf->func_flags);
  218. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
  219. req.max_bw = cpu_to_le32(max_tx_rate);
  220. req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
  221. req.min_bw = cpu_to_le32(min_tx_rate);
  222. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  223. if (!rc) {
  224. vf->min_tx_rate = min_tx_rate;
  225. vf->max_tx_rate = max_tx_rate;
  226. }
  227. return rc;
  228. }
  229. int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
  230. {
  231. struct bnxt *bp = netdev_priv(dev);
  232. struct bnxt_vf_info *vf;
  233. int rc;
  234. rc = bnxt_vf_ndo_prep(bp, vf_id);
  235. if (rc)
  236. return rc;
  237. vf = &bp->pf.vf[vf_id];
  238. vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
  239. switch (link) {
  240. case IFLA_VF_LINK_STATE_AUTO:
  241. vf->flags |= BNXT_VF_LINK_UP;
  242. break;
  243. case IFLA_VF_LINK_STATE_DISABLE:
  244. vf->flags |= BNXT_VF_LINK_FORCED;
  245. break;
  246. case IFLA_VF_LINK_STATE_ENABLE:
  247. vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
  248. break;
  249. default:
  250. netdev_err(bp->dev, "Invalid link option\n");
  251. rc = -EINVAL;
  252. break;
  253. }
  254. if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
  255. rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
  256. HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
  257. return rc;
  258. }
  259. static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
  260. {
  261. int i;
  262. struct bnxt_vf_info *vf;
  263. for (i = 0; i < num_vfs; i++) {
  264. vf = &bp->pf.vf[i];
  265. memset(vf, 0, sizeof(*vf));
  266. vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
  267. }
  268. return 0;
  269. }
  270. static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
  271. {
  272. int i, rc = 0;
  273. struct bnxt_pf_info *pf = &bp->pf;
  274. struct hwrm_func_vf_resc_free_input req = {0};
  275. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
  276. mutex_lock(&bp->hwrm_cmd_lock);
  277. for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
  278. req.vf_id = cpu_to_le16(i);
  279. rc = _hwrm_send_message(bp, &req, sizeof(req),
  280. HWRM_CMD_TIMEOUT);
  281. if (rc)
  282. break;
  283. }
  284. mutex_unlock(&bp->hwrm_cmd_lock);
  285. return rc;
  286. }
  287. static void bnxt_free_vf_resources(struct bnxt *bp)
  288. {
  289. struct pci_dev *pdev = bp->pdev;
  290. int i;
  291. kfree(bp->pf.vf_event_bmap);
  292. bp->pf.vf_event_bmap = NULL;
  293. for (i = 0; i < 4; i++) {
  294. if (bp->pf.hwrm_cmd_req_addr[i]) {
  295. dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
  296. bp->pf.hwrm_cmd_req_addr[i],
  297. bp->pf.hwrm_cmd_req_dma_addr[i]);
  298. bp->pf.hwrm_cmd_req_addr[i] = NULL;
  299. }
  300. }
  301. kfree(bp->pf.vf);
  302. bp->pf.vf = NULL;
  303. }
  304. static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
  305. {
  306. struct pci_dev *pdev = bp->pdev;
  307. u32 nr_pages, size, i, j, k = 0;
  308. bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
  309. if (!bp->pf.vf)
  310. return -ENOMEM;
  311. bnxt_set_vf_attr(bp, num_vfs);
  312. size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
  313. nr_pages = size / BNXT_PAGE_SIZE;
  314. if (size & (BNXT_PAGE_SIZE - 1))
  315. nr_pages++;
  316. for (i = 0; i < nr_pages; i++) {
  317. bp->pf.hwrm_cmd_req_addr[i] =
  318. dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
  319. &bp->pf.hwrm_cmd_req_dma_addr[i],
  320. GFP_KERNEL);
  321. if (!bp->pf.hwrm_cmd_req_addr[i])
  322. return -ENOMEM;
  323. for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
  324. struct bnxt_vf_info *vf = &bp->pf.vf[k];
  325. vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
  326. j * BNXT_HWRM_REQ_MAX_SIZE;
  327. vf->hwrm_cmd_req_dma_addr =
  328. bp->pf.hwrm_cmd_req_dma_addr[i] + j *
  329. BNXT_HWRM_REQ_MAX_SIZE;
  330. k++;
  331. }
  332. }
  333. /* Max 128 VF's */
  334. bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
  335. if (!bp->pf.vf_event_bmap)
  336. return -ENOMEM;
  337. bp->pf.hwrm_cmd_req_pages = nr_pages;
  338. return 0;
  339. }
  340. static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
  341. {
  342. struct hwrm_func_buf_rgtr_input req = {0};
  343. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
  344. req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
  345. req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
  346. req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
  347. req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
  348. req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
  349. req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
  350. req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
  351. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  352. }
  353. /* only call by PF to reserve resources for VF */
  354. static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
  355. {
  356. u32 rc = 0, mtu, i;
  357. u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
  358. u16 vf_ring_grps;
  359. struct hwrm_func_cfg_input req = {0};
  360. struct bnxt_pf_info *pf = &bp->pf;
  361. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  362. /* Remaining rings are distributed equally amongs VF's for now */
  363. /* TODO: the following workaroud is needed to restrict total number
  364. * of vf_cp_rings not exceed number of HW ring groups. This WA should
  365. * be removed once new HWRM provides HW ring groups capability in
  366. * hwrm_func_qcap.
  367. */
  368. vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs);
  369. vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs;
  370. /* TODO: restore this logic below once the WA above is removed */
  371. /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */
  372. vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
  373. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  374. vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
  375. num_vfs;
  376. else
  377. vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
  378. vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
  379. vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
  380. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
  381. FUNC_CFG_REQ_ENABLES_MRU |
  382. FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
  383. FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
  384. FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
  385. FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
  386. FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
  387. FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
  388. FUNC_CFG_REQ_ENABLES_NUM_VNICS |
  389. FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
  390. mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  391. req.mru = cpu_to_le16(mtu);
  392. req.mtu = cpu_to_le16(mtu);
  393. req.num_rsscos_ctxs = cpu_to_le16(1);
  394. req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
  395. req.num_tx_rings = cpu_to_le16(vf_tx_rings);
  396. req.num_rx_rings = cpu_to_le16(vf_rx_rings);
  397. req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
  398. req.num_l2_ctxs = cpu_to_le16(4);
  399. vf_vnics = 1;
  400. req.num_vnics = cpu_to_le16(vf_vnics);
  401. /* FIXME spec currently uses 1 bit for stats ctx */
  402. req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
  403. mutex_lock(&bp->hwrm_cmd_lock);
  404. for (i = 0; i < num_vfs; i++) {
  405. req.fid = cpu_to_le16(pf->first_vf_id + i);
  406. rc = _hwrm_send_message(bp, &req, sizeof(req),
  407. HWRM_CMD_TIMEOUT);
  408. if (rc)
  409. break;
  410. pf->active_vfs = i + 1;
  411. pf->vf[i].fw_fid = le16_to_cpu(req.fid);
  412. }
  413. mutex_unlock(&bp->hwrm_cmd_lock);
  414. if (!rc) {
  415. pf->max_tx_rings -= vf_tx_rings * num_vfs;
  416. pf->max_rx_rings -= vf_rx_rings * num_vfs;
  417. pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
  418. pf->max_cp_rings -= vf_cp_rings * num_vfs;
  419. pf->max_rsscos_ctxs -= num_vfs;
  420. pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
  421. pf->max_vnics -= vf_vnics * num_vfs;
  422. }
  423. return rc;
  424. }
  425. static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
  426. {
  427. int rc = 0, vfs_supported;
  428. int min_rx_rings, min_tx_rings, min_rss_ctxs;
  429. int tx_ok = 0, rx_ok = 0, rss_ok = 0;
  430. /* Check if we can enable requested num of vf's. At a mininum
  431. * we require 1 RX 1 TX rings for each VF. In this minimum conf
  432. * features like TPA will not be available.
  433. */
  434. vfs_supported = *num_vfs;
  435. while (vfs_supported) {
  436. min_rx_rings = vfs_supported;
  437. min_tx_rings = vfs_supported;
  438. min_rss_ctxs = vfs_supported;
  439. if (bp->flags & BNXT_FLAG_AGG_RINGS) {
  440. if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
  441. min_rx_rings)
  442. rx_ok = 1;
  443. } else {
  444. if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
  445. min_rx_rings)
  446. rx_ok = 1;
  447. }
  448. if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
  449. tx_ok = 1;
  450. if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
  451. rss_ok = 1;
  452. if (tx_ok && rx_ok && rss_ok)
  453. break;
  454. vfs_supported--;
  455. }
  456. if (!vfs_supported) {
  457. netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
  458. return -EINVAL;
  459. }
  460. if (vfs_supported != *num_vfs) {
  461. netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
  462. *num_vfs, vfs_supported);
  463. *num_vfs = vfs_supported;
  464. }
  465. rc = bnxt_alloc_vf_resources(bp, *num_vfs);
  466. if (rc)
  467. goto err_out1;
  468. /* Reserve resources for VFs */
  469. rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
  470. if (rc)
  471. goto err_out2;
  472. /* Register buffers for VFs */
  473. rc = bnxt_hwrm_func_buf_rgtr(bp);
  474. if (rc)
  475. goto err_out2;
  476. rc = pci_enable_sriov(bp->pdev, *num_vfs);
  477. if (rc)
  478. goto err_out2;
  479. return 0;
  480. err_out2:
  481. /* Free the resources reserved for various VF's */
  482. bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
  483. err_out1:
  484. bnxt_free_vf_resources(bp);
  485. return rc;
  486. }
  487. void bnxt_sriov_disable(struct bnxt *bp)
  488. {
  489. u16 num_vfs = pci_num_vf(bp->pdev);
  490. if (!num_vfs)
  491. return;
  492. if (pci_vfs_assigned(bp->pdev)) {
  493. bnxt_hwrm_fwd_async_event_cmpl(
  494. bp, NULL,
  495. HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
  496. netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
  497. num_vfs);
  498. } else {
  499. pci_disable_sriov(bp->pdev);
  500. /* Free the HW resources reserved for various VF's */
  501. bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
  502. }
  503. bnxt_free_vf_resources(bp);
  504. bp->pf.active_vfs = 0;
  505. /* Reclaim all resources for the PF. */
  506. bnxt_hwrm_func_qcaps(bp);
  507. }
  508. int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
  509. {
  510. struct net_device *dev = pci_get_drvdata(pdev);
  511. struct bnxt *bp = netdev_priv(dev);
  512. if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
  513. netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
  514. return 0;
  515. }
  516. rtnl_lock();
  517. if (!netif_running(dev)) {
  518. netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
  519. rtnl_unlock();
  520. return 0;
  521. }
  522. bp->sriov_cfg = true;
  523. rtnl_unlock();
  524. if (pci_vfs_assigned(bp->pdev)) {
  525. netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
  526. num_vfs = 0;
  527. goto sriov_cfg_exit;
  528. }
  529. /* Check if enabled VFs is same as requested */
  530. if (num_vfs && num_vfs == bp->pf.active_vfs)
  531. goto sriov_cfg_exit;
  532. /* if there are previous existing VFs, clean them up */
  533. bnxt_sriov_disable(bp);
  534. if (!num_vfs)
  535. goto sriov_cfg_exit;
  536. bnxt_sriov_enable(bp, &num_vfs);
  537. sriov_cfg_exit:
  538. bp->sriov_cfg = false;
  539. wake_up(&bp->sriov_cfg_wait);
  540. return num_vfs;
  541. }
  542. static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
  543. void *encap_resp, __le64 encap_resp_addr,
  544. __le16 encap_resp_cpr, u32 msg_size)
  545. {
  546. int rc = 0;
  547. struct hwrm_fwd_resp_input req = {0};
  548. struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
  549. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
  550. /* Set the new target id */
  551. req.target_id = cpu_to_le16(vf->fw_fid);
  552. req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
  553. req.encap_resp_len = cpu_to_le16(msg_size);
  554. req.encap_resp_addr = encap_resp_addr;
  555. req.encap_resp_cmpl_ring = encap_resp_cpr;
  556. memcpy(req.encap_resp, encap_resp, msg_size);
  557. mutex_lock(&bp->hwrm_cmd_lock);
  558. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  559. if (rc) {
  560. netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
  561. goto fwd_resp_exit;
  562. }
  563. if (resp->error_code) {
  564. netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
  565. resp->error_code);
  566. rc = -1;
  567. }
  568. fwd_resp_exit:
  569. mutex_unlock(&bp->hwrm_cmd_lock);
  570. return rc;
  571. }
  572. static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
  573. u32 msg_size)
  574. {
  575. int rc = 0;
  576. struct hwrm_reject_fwd_resp_input req = {0};
  577. struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
  578. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
  579. /* Set the new target id */
  580. req.target_id = cpu_to_le16(vf->fw_fid);
  581. req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
  582. memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
  583. mutex_lock(&bp->hwrm_cmd_lock);
  584. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  585. if (rc) {
  586. netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
  587. goto fwd_err_resp_exit;
  588. }
  589. if (resp->error_code) {
  590. netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
  591. resp->error_code);
  592. rc = -1;
  593. }
  594. fwd_err_resp_exit:
  595. mutex_unlock(&bp->hwrm_cmd_lock);
  596. return rc;
  597. }
  598. static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
  599. u32 msg_size)
  600. {
  601. int rc = 0;
  602. struct hwrm_exec_fwd_resp_input req = {0};
  603. struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
  604. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
  605. /* Set the new target id */
  606. req.target_id = cpu_to_le16(vf->fw_fid);
  607. req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
  608. memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
  609. mutex_lock(&bp->hwrm_cmd_lock);
  610. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  611. if (rc) {
  612. netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
  613. goto exec_fwd_resp_exit;
  614. }
  615. if (resp->error_code) {
  616. netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
  617. resp->error_code);
  618. rc = -1;
  619. }
  620. exec_fwd_resp_exit:
  621. mutex_unlock(&bp->hwrm_cmd_lock);
  622. return rc;
  623. }
  624. static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
  625. {
  626. u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
  627. struct hwrm_cfa_l2_filter_alloc_input *req =
  628. (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
  629. if (!is_valid_ether_addr(vf->mac_addr) ||
  630. ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
  631. return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
  632. else
  633. return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
  634. }
  635. static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
  636. {
  637. int rc = 0;
  638. if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
  639. /* real link */
  640. rc = bnxt_hwrm_exec_fwd_resp(
  641. bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
  642. } else {
  643. struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
  644. struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
  645. phy_qcfg_req =
  646. (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
  647. mutex_lock(&bp->hwrm_cmd_lock);
  648. memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
  649. sizeof(phy_qcfg_resp));
  650. mutex_unlock(&bp->hwrm_cmd_lock);
  651. phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
  652. if (vf->flags & BNXT_VF_LINK_UP) {
  653. /* if physical link is down, force link up on VF */
  654. if (phy_qcfg_resp.link !=
  655. PORT_PHY_QCFG_RESP_LINK_LINK) {
  656. phy_qcfg_resp.link =
  657. PORT_PHY_QCFG_RESP_LINK_LINK;
  658. phy_qcfg_resp.link_speed = cpu_to_le16(
  659. PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
  660. phy_qcfg_resp.duplex =
  661. PORT_PHY_QCFG_RESP_DUPLEX_FULL;
  662. phy_qcfg_resp.pause =
  663. (PORT_PHY_QCFG_RESP_PAUSE_TX |
  664. PORT_PHY_QCFG_RESP_PAUSE_RX);
  665. }
  666. } else {
  667. /* force link down */
  668. phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
  669. phy_qcfg_resp.link_speed = 0;
  670. phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
  671. phy_qcfg_resp.pause = 0;
  672. }
  673. rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
  674. phy_qcfg_req->resp_addr,
  675. phy_qcfg_req->cmpl_ring,
  676. sizeof(phy_qcfg_resp));
  677. }
  678. return rc;
  679. }
  680. static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
  681. {
  682. int rc = 0;
  683. struct input *encap_req = vf->hwrm_cmd_req_addr;
  684. u32 req_type = le16_to_cpu(encap_req->req_type);
  685. switch (req_type) {
  686. case HWRM_CFA_L2_FILTER_ALLOC:
  687. rc = bnxt_vf_validate_set_mac(bp, vf);
  688. break;
  689. case HWRM_FUNC_CFG:
  690. /* TODO Validate if VF is allowed to change mac address,
  691. * mtu, num of rings etc
  692. */
  693. rc = bnxt_hwrm_exec_fwd_resp(
  694. bp, vf, sizeof(struct hwrm_func_cfg_input));
  695. break;
  696. case HWRM_PORT_PHY_QCFG:
  697. rc = bnxt_vf_set_link(bp, vf);
  698. break;
  699. default:
  700. break;
  701. }
  702. return rc;
  703. }
  704. void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
  705. {
  706. u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
  707. /* Scan through VF's and process commands */
  708. while (1) {
  709. vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
  710. if (vf_id >= active_vfs)
  711. break;
  712. clear_bit(vf_id, bp->pf.vf_event_bmap);
  713. bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
  714. i = vf_id + 1;
  715. }
  716. }
  717. void bnxt_update_vf_mac(struct bnxt *bp)
  718. {
  719. struct hwrm_func_qcaps_input req = {0};
  720. struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
  721. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
  722. req.fid = cpu_to_le16(0xffff);
  723. mutex_lock(&bp->hwrm_cmd_lock);
  724. if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
  725. goto update_vf_mac_exit;
  726. /* Store MAC address from the firmware. There are 2 cases:
  727. * 1. MAC address is valid. It is assigned from the PF and we
  728. * need to override the current VF MAC address with it.
  729. * 2. MAC address is zero. The VF will use a random MAC address by
  730. * default but the stored zero MAC will allow the VF user to change
  731. * the random MAC address using ndo_set_mac_address() if he wants.
  732. */
  733. if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
  734. memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
  735. /* overwrite netdev dev_addr with admin VF MAC */
  736. if (is_valid_ether_addr(bp->vf.mac_addr))
  737. memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
  738. update_vf_mac_exit:
  739. mutex_unlock(&bp->hwrm_cmd_lock);
  740. }
  741. int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
  742. {
  743. struct hwrm_func_vf_cfg_input req = {0};
  744. int rc = 0;
  745. if (!BNXT_VF(bp))
  746. return 0;
  747. if (bp->hwrm_spec_code < 0x10202) {
  748. if (is_valid_ether_addr(bp->vf.mac_addr))
  749. rc = -EADDRNOTAVAIL;
  750. goto mac_done;
  751. }
  752. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
  753. req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
  754. memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
  755. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  756. mac_done:
  757. if (rc) {
  758. rc = -EADDRNOTAVAIL;
  759. netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
  760. mac);
  761. }
  762. return rc;
  763. }
  764. #else
  765. void bnxt_sriov_disable(struct bnxt *bp)
  766. {
  767. }
  768. void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
  769. {
  770. netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
  771. }
  772. void bnxt_update_vf_mac(struct bnxt *bp)
  773. {
  774. }
  775. int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
  776. {
  777. return 0;
  778. }
  779. #endif