bnxt_vfr.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2016-2017 Broadcom Limited
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/pci.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/etherdevice.h>
  12. #include <linux/rtnetlink.h>
  13. #include <linux/jhash.h>
  14. #include <net/pkt_cls.h>
  15. #include "bnxt_hsi.h"
  16. #include "bnxt.h"
  17. #include "bnxt_vfr.h"
  18. #include "bnxt_devlink.h"
  19. #include "bnxt_tc.h"
  20. #ifdef CONFIG_BNXT_SRIOV
  21. #define CFA_HANDLE_INVALID 0xffff
  22. #define VF_IDX_INVALID 0xffff
  23. static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,
  24. u16 *tx_cfa_action, u16 *rx_cfa_code)
  25. {
  26. struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  27. struct hwrm_cfa_vfr_alloc_input req = { 0 };
  28. int rc;
  29. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1);
  30. req.vf_id = cpu_to_le16(vf_idx);
  31. sprintf(req.vfr_name, "vfr%d", vf_idx);
  32. mutex_lock(&bp->hwrm_cmd_lock);
  33. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  34. if (!rc) {
  35. *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action);
  36. *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code);
  37. netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
  38. *tx_cfa_action, *rx_cfa_code);
  39. } else {
  40. netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
  41. }
  42. mutex_unlock(&bp->hwrm_cmd_lock);
  43. return rc;
  44. }
  45. static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
  46. {
  47. struct hwrm_cfa_vfr_free_input req = { 0 };
  48. int rc;
  49. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1);
  50. sprintf(req.vfr_name, "vfr%d", vf_idx);
  51. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  52. if (rc)
  53. netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
  54. return rc;
  55. }
  56. static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
  57. u16 *max_mtu)
  58. {
  59. struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  60. struct hwrm_func_qcfg_input req = {0};
  61. u16 mtu;
  62. int rc;
  63. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
  64. req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
  65. mutex_lock(&bp->hwrm_cmd_lock);
  66. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  67. if (!rc) {
  68. mtu = le16_to_cpu(resp->max_mtu_configured);
  69. if (!mtu)
  70. *max_mtu = BNXT_MAX_MTU;
  71. else
  72. *max_mtu = mtu;
  73. }
  74. mutex_unlock(&bp->hwrm_cmd_lock);
  75. return rc;
  76. }
  77. static int bnxt_vf_rep_open(struct net_device *dev)
  78. {
  79. struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
  80. struct bnxt *bp = vf_rep->bp;
  81. /* Enable link and TX only if the parent PF is open. */
  82. if (netif_running(bp->dev)) {
  83. netif_carrier_on(dev);
  84. netif_tx_start_all_queues(dev);
  85. }
  86. return 0;
  87. }
  88. static int bnxt_vf_rep_close(struct net_device *dev)
  89. {
  90. netif_carrier_off(dev);
  91. netif_tx_disable(dev);
  92. return 0;
  93. }
  94. static netdev_tx_t bnxt_vf_rep_xmit(struct sk_buff *skb,
  95. struct net_device *dev)
  96. {
  97. struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
  98. int rc, len = skb->len;
  99. skb_dst_drop(skb);
  100. dst_hold((struct dst_entry *)vf_rep->dst);
  101. skb_dst_set(skb, (struct dst_entry *)vf_rep->dst);
  102. skb->dev = vf_rep->dst->u.port_info.lower_dev;
  103. rc = dev_queue_xmit(skb);
  104. if (!rc) {
  105. vf_rep->tx_stats.packets++;
  106. vf_rep->tx_stats.bytes += len;
  107. }
  108. return rc;
  109. }
  110. static void
  111. bnxt_vf_rep_get_stats64(struct net_device *dev,
  112. struct rtnl_link_stats64 *stats)
  113. {
  114. struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
  115. stats->rx_packets = vf_rep->rx_stats.packets;
  116. stats->rx_bytes = vf_rep->rx_stats.bytes;
  117. stats->tx_packets = vf_rep->tx_stats.packets;
  118. stats->tx_bytes = vf_rep->tx_stats.bytes;
  119. }
  120. static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type,
  121. void *type_data,
  122. void *cb_priv)
  123. {
  124. struct bnxt_vf_rep *vf_rep = cb_priv;
  125. struct bnxt *bp = vf_rep->bp;
  126. int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid;
  127. if (!bnxt_tc_flower_enabled(vf_rep->bp) ||
  128. !tc_cls_can_offload_and_chain0(bp->dev, type_data))
  129. return -EOPNOTSUPP;
  130. switch (type) {
  131. case TC_SETUP_CLSFLOWER:
  132. return bnxt_tc_setup_flower(bp, vf_fid, type_data);
  133. default:
  134. return -EOPNOTSUPP;
  135. }
  136. }
  137. static int bnxt_vf_rep_setup_tc_block(struct net_device *dev,
  138. struct tc_block_offload *f)
  139. {
  140. struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
  141. if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
  142. return -EOPNOTSUPP;
  143. switch (f->command) {
  144. case TC_BLOCK_BIND:
  145. return tcf_block_cb_register(f->block,
  146. bnxt_vf_rep_setup_tc_block_cb,
  147. vf_rep, vf_rep, f->extack);
  148. case TC_BLOCK_UNBIND:
  149. tcf_block_cb_unregister(f->block,
  150. bnxt_vf_rep_setup_tc_block_cb, vf_rep);
  151. return 0;
  152. default:
  153. return -EOPNOTSUPP;
  154. }
  155. }
  156. static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
  157. void *type_data)
  158. {
  159. switch (type) {
  160. case TC_SETUP_BLOCK:
  161. return bnxt_vf_rep_setup_tc_block(dev, type_data);
  162. default:
  163. return -EOPNOTSUPP;
  164. }
  165. }
  166. struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code)
  167. {
  168. u16 vf_idx;
  169. if (cfa_code && bp->cfa_code_map && BNXT_PF(bp)) {
  170. vf_idx = bp->cfa_code_map[cfa_code];
  171. if (vf_idx != VF_IDX_INVALID)
  172. return bp->vf_reps[vf_idx]->dev;
  173. }
  174. return NULL;
  175. }
  176. void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb)
  177. {
  178. struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev);
  179. struct bnxt_vf_rep_stats *rx_stats;
  180. rx_stats = &vf_rep->rx_stats;
  181. vf_rep->rx_stats.bytes += skb->len;
  182. vf_rep->rx_stats.packets++;
  183. netif_receive_skb(skb);
  184. }
  185. static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf,
  186. size_t len)
  187. {
  188. struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
  189. struct pci_dev *pf_pdev = vf_rep->bp->pdev;
  190. int rc;
  191. rc = snprintf(buf, len, "pf%dvf%d", PCI_FUNC(pf_pdev->devfn),
  192. vf_rep->vf_idx);
  193. if (rc >= len)
  194. return -EOPNOTSUPP;
  195. return 0;
  196. }
  197. static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
  198. struct ethtool_drvinfo *info)
  199. {
  200. strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
  201. strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
  202. }
  203. static int bnxt_vf_rep_port_attr_get(struct net_device *dev,
  204. struct switchdev_attr *attr)
  205. {
  206. struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
  207. /* as only PORT_PARENT_ID is supported currently use common code
  208. * between PF and VF-rep for now.
  209. */
  210. return bnxt_port_attr_get(vf_rep->bp, attr);
  211. }
  212. static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = {
  213. .switchdev_port_attr_get = bnxt_vf_rep_port_attr_get
  214. };
  215. static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = {
  216. .get_drvinfo = bnxt_vf_rep_get_drvinfo
  217. };
  218. static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
  219. .ndo_open = bnxt_vf_rep_open,
  220. .ndo_stop = bnxt_vf_rep_close,
  221. .ndo_start_xmit = bnxt_vf_rep_xmit,
  222. .ndo_get_stats64 = bnxt_vf_rep_get_stats64,
  223. .ndo_setup_tc = bnxt_vf_rep_setup_tc,
  224. .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
  225. };
  226. bool bnxt_dev_is_vf_rep(struct net_device *dev)
  227. {
  228. return dev->netdev_ops == &bnxt_vf_rep_netdev_ops;
  229. }
  230. /* Called when the parent PF interface is closed:
  231. * As the mode transition from SWITCHDEV to LEGACY
  232. * happens under the rtnl_lock() this routine is safe
  233. * under the rtnl_lock()
  234. */
  235. void bnxt_vf_reps_close(struct bnxt *bp)
  236. {
  237. struct bnxt_vf_rep *vf_rep;
  238. u16 num_vfs, i;
  239. if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
  240. return;
  241. num_vfs = pci_num_vf(bp->pdev);
  242. for (i = 0; i < num_vfs; i++) {
  243. vf_rep = bp->vf_reps[i];
  244. if (netif_running(vf_rep->dev))
  245. bnxt_vf_rep_close(vf_rep->dev);
  246. }
  247. }
  248. /* Called when the parent PF interface is opened (re-opened):
  249. * As the mode transition from SWITCHDEV to LEGACY
  250. * happen under the rtnl_lock() this routine is safe
  251. * under the rtnl_lock()
  252. */
  253. void bnxt_vf_reps_open(struct bnxt *bp)
  254. {
  255. int i;
  256. if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
  257. return;
  258. for (i = 0; i < pci_num_vf(bp->pdev); i++)
  259. bnxt_vf_rep_open(bp->vf_reps[i]->dev);
  260. }
  261. static void __bnxt_vf_reps_destroy(struct bnxt *bp)
  262. {
  263. u16 num_vfs = pci_num_vf(bp->pdev);
  264. struct bnxt_vf_rep *vf_rep;
  265. int i;
  266. for (i = 0; i < num_vfs; i++) {
  267. vf_rep = bp->vf_reps[i];
  268. if (vf_rep) {
  269. dst_release((struct dst_entry *)vf_rep->dst);
  270. if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID)
  271. hwrm_cfa_vfr_free(bp, vf_rep->vf_idx);
  272. if (vf_rep->dev) {
  273. /* if register_netdev failed, then netdev_ops
  274. * would have been set to NULL
  275. */
  276. if (vf_rep->dev->netdev_ops)
  277. unregister_netdev(vf_rep->dev);
  278. free_netdev(vf_rep->dev);
  279. }
  280. }
  281. }
  282. kfree(bp->vf_reps);
  283. bp->vf_reps = NULL;
  284. }
  285. void bnxt_vf_reps_destroy(struct bnxt *bp)
  286. {
  287. bool closed = false;
  288. if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
  289. return;
  290. if (!bp->vf_reps)
  291. return;
  292. /* Ensure that parent PF's and VF-reps' RX/TX has been quiesced
  293. * before proceeding with VF-rep cleanup.
  294. */
  295. rtnl_lock();
  296. if (netif_running(bp->dev)) {
  297. bnxt_close_nic(bp, false, false);
  298. closed = true;
  299. }
  300. /* un-publish cfa_code_map so that RX path can't see it anymore */
  301. kfree(bp->cfa_code_map);
  302. bp->cfa_code_map = NULL;
  303. bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
  304. if (closed)
  305. bnxt_open_nic(bp, false, false);
  306. rtnl_unlock();
  307. /* Need to call vf_reps_destroy() outside of rntl_lock
  308. * as unregister_netdev takes rtnl_lock
  309. */
  310. __bnxt_vf_reps_destroy(bp);
  311. }
  312. /* Use the OUI of the PF's perm addr and report the same mac addr
  313. * for the same VF-rep each time
  314. */
  315. static void bnxt_vf_rep_eth_addr_gen(u8 *src_mac, u16 vf_idx, u8 *mac)
  316. {
  317. u32 addr;
  318. ether_addr_copy(mac, src_mac);
  319. addr = jhash(src_mac, ETH_ALEN, 0) + vf_idx;
  320. mac[3] = (u8)(addr & 0xFF);
  321. mac[4] = (u8)((addr >> 8) & 0xFF);
  322. mac[5] = (u8)((addr >> 16) & 0xFF);
  323. }
  324. static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
  325. struct net_device *dev)
  326. {
  327. struct net_device *pf_dev = bp->dev;
  328. u16 max_mtu;
  329. dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
  330. dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
  331. SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops);
  332. /* Just inherit all the featues of the parent PF as the VF-R
  333. * uses the RX/TX rings of the parent PF
  334. */
  335. dev->hw_features = pf_dev->hw_features;
  336. dev->gso_partial_features = pf_dev->gso_partial_features;
  337. dev->vlan_features = pf_dev->vlan_features;
  338. dev->hw_enc_features = pf_dev->hw_enc_features;
  339. dev->features |= pf_dev->features;
  340. bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
  341. dev->perm_addr);
  342. ether_addr_copy(dev->dev_addr, dev->perm_addr);
  343. /* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */
  344. if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu))
  345. dev->max_mtu = max_mtu;
  346. dev->min_mtu = ETH_ZLEN;
  347. }
  348. static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
  349. {
  350. struct pci_dev *pdev = bp->pdev;
  351. int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
  352. u32 dw;
  353. if (!pos) {
  354. netdev_info(bp->dev, "Unable do read adapter's DSN");
  355. return -EOPNOTSUPP;
  356. }
  357. /* DSN (two dw) is at an offset of 4 from the cap pos */
  358. pos += 4;
  359. pci_read_config_dword(pdev, pos, &dw);
  360. put_unaligned_le32(dw, &dsn[0]);
  361. pci_read_config_dword(pdev, pos + 4, &dw);
  362. put_unaligned_le32(dw, &dsn[4]);
  363. return 0;
  364. }
  365. static int bnxt_vf_reps_create(struct bnxt *bp)
  366. {
  367. u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
  368. struct bnxt_vf_rep *vf_rep;
  369. struct net_device *dev;
  370. int rc, i;
  371. bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL);
  372. if (!bp->vf_reps)
  373. return -ENOMEM;
  374. /* storage for cfa_code to vf-idx mapping */
  375. cfa_code_map = kmalloc_array(MAX_CFA_CODE, sizeof(*bp->cfa_code_map),
  376. GFP_KERNEL);
  377. if (!cfa_code_map) {
  378. rc = -ENOMEM;
  379. goto err;
  380. }
  381. for (i = 0; i < MAX_CFA_CODE; i++)
  382. cfa_code_map[i] = VF_IDX_INVALID;
  383. for (i = 0; i < num_vfs; i++) {
  384. dev = alloc_etherdev(sizeof(*vf_rep));
  385. if (!dev) {
  386. rc = -ENOMEM;
  387. goto err;
  388. }
  389. vf_rep = netdev_priv(dev);
  390. bp->vf_reps[i] = vf_rep;
  391. vf_rep->dev = dev;
  392. vf_rep->bp = bp;
  393. vf_rep->vf_idx = i;
  394. vf_rep->tx_cfa_action = CFA_HANDLE_INVALID;
  395. /* get cfa handles from FW */
  396. rc = hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx,
  397. &vf_rep->tx_cfa_action,
  398. &vf_rep->rx_cfa_code);
  399. if (rc) {
  400. rc = -ENOLINK;
  401. goto err;
  402. }
  403. cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx;
  404. vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
  405. GFP_KERNEL);
  406. if (!vf_rep->dst) {
  407. rc = -ENOMEM;
  408. goto err;
  409. }
  410. /* only cfa_action is needed to mux a packet while TXing */
  411. vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action;
  412. vf_rep->dst->u.port_info.lower_dev = bp->dev;
  413. bnxt_vf_rep_netdev_init(bp, vf_rep, dev);
  414. rc = register_netdev(dev);
  415. if (rc) {
  416. /* no need for unregister_netdev in cleanup */
  417. dev->netdev_ops = NULL;
  418. goto err;
  419. }
  420. }
  421. /* Read the adapter's DSN to use as the eswitch switch_id */
  422. rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
  423. if (rc)
  424. goto err;
  425. /* publish cfa_code_map only after all VF-reps have been initialized */
  426. bp->cfa_code_map = cfa_code_map;
  427. bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
  428. netif_keep_dst(bp->dev);
  429. return 0;
  430. err:
  431. netdev_info(bp->dev, "%s error=%d", __func__, rc);
  432. kfree(cfa_code_map);
  433. __bnxt_vf_reps_destroy(bp);
  434. return rc;
  435. }
  436. /* Devlink related routines */
  437. int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
  438. {
  439. struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
  440. *mode = bp->eswitch_mode;
  441. return 0;
  442. }
  443. int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
  444. {
  445. struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
  446. int rc = 0;
  447. mutex_lock(&bp->sriov_lock);
  448. if (bp->eswitch_mode == mode) {
  449. netdev_info(bp->dev, "already in %s eswitch mode",
  450. mode == DEVLINK_ESWITCH_MODE_LEGACY ?
  451. "legacy" : "switchdev");
  452. rc = -EINVAL;
  453. goto done;
  454. }
  455. switch (mode) {
  456. case DEVLINK_ESWITCH_MODE_LEGACY:
  457. bnxt_vf_reps_destroy(bp);
  458. break;
  459. case DEVLINK_ESWITCH_MODE_SWITCHDEV:
  460. if (bp->hwrm_spec_code < 0x10803) {
  461. netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
  462. rc = -ENOTSUPP;
  463. goto done;
  464. }
  465. if (pci_num_vf(bp->pdev) == 0) {
  466. netdev_info(bp->dev, "Enable VFs before setting switchdev mode");
  467. rc = -EPERM;
  468. goto done;
  469. }
  470. rc = bnxt_vf_reps_create(bp);
  471. break;
  472. default:
  473. rc = -EINVAL;
  474. goto done;
  475. }
  476. done:
  477. mutex_unlock(&bp->sriov_lock);
  478. return rc;
  479. }
  480. #endif