lio_vf_rep.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2017 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more details.
  17. ***********************************************************************/
  18. #include <linux/pci.h>
  19. #include <linux/if_vlan.h>
  20. #include "liquidio_common.h"
  21. #include "octeon_droq.h"
  22. #include "octeon_iq.h"
  23. #include "response_manager.h"
  24. #include "octeon_device.h"
  25. #include "octeon_nic.h"
  26. #include "octeon_main.h"
  27. #include "octeon_network.h"
  28. #include <net/switchdev.h>
  29. #include "lio_vf_rep.h"
  30. #include "octeon_network.h"
  31. static int lio_vf_rep_open(struct net_device *ndev);
  32. static int lio_vf_rep_stop(struct net_device *ndev);
  33. static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
  34. struct net_device *ndev);
  35. static void lio_vf_rep_tx_timeout(struct net_device *netdev);
  36. static int lio_vf_rep_phys_port_name(struct net_device *dev,
  37. char *buf, size_t len);
  38. static void lio_vf_rep_get_stats64(struct net_device *dev,
  39. struct rtnl_link_stats64 *stats64);
  40. static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
  41. static const struct net_device_ops lio_vf_rep_ndev_ops = {
  42. .ndo_open = lio_vf_rep_open,
  43. .ndo_stop = lio_vf_rep_stop,
  44. .ndo_start_xmit = lio_vf_rep_pkt_xmit,
  45. .ndo_tx_timeout = lio_vf_rep_tx_timeout,
  46. .ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
  47. .ndo_get_stats64 = lio_vf_rep_get_stats64,
  48. .ndo_change_mtu = lio_vf_rep_change_mtu,
  49. };
  50. static void
  51. lio_vf_rep_send_sc_complete(struct octeon_device *oct,
  52. u32 status, void *ptr)
  53. {
  54. struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
  55. struct lio_vf_rep_sc_ctx *ctx =
  56. (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
  57. struct lio_vf_rep_resp *resp =
  58. (struct lio_vf_rep_resp *)sc->virtrptr;
  59. if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status))
  60. WRITE_ONCE(resp->status, 0);
  61. complete(&ctx->complete);
  62. }
  63. static int
  64. lio_vf_rep_send_soft_command(struct octeon_device *oct,
  65. void *req, int req_size,
  66. void *resp, int resp_size)
  67. {
  68. int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
  69. int ctx_size = sizeof(struct lio_vf_rep_sc_ctx);
  70. struct octeon_soft_command *sc = NULL;
  71. struct lio_vf_rep_resp *rep_resp;
  72. struct lio_vf_rep_sc_ctx *ctx;
  73. void *sc_req;
  74. int err;
  75. sc = (struct octeon_soft_command *)
  76. octeon_alloc_soft_command(oct, req_size,
  77. tot_resp_size, ctx_size);
  78. if (!sc)
  79. return -ENOMEM;
  80. ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
  81. memset(ctx, 0, ctx_size);
  82. init_completion(&ctx->complete);
  83. sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
  84. memcpy(sc_req, req, req_size);
  85. rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
  86. memset(rep_resp, 0, tot_resp_size);
  87. WRITE_ONCE(rep_resp->status, 1);
  88. sc->iq_no = 0;
  89. octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
  90. OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
  91. sc->callback = lio_vf_rep_send_sc_complete;
  92. sc->callback_arg = sc;
  93. sc->wait_time = LIO_VF_REP_REQ_TMO_MS;
  94. err = octeon_send_soft_command(oct, sc);
  95. if (err == IQ_SEND_FAILED)
  96. goto free_buff;
  97. wait_for_completion_timeout(&ctx->complete,
  98. msecs_to_jiffies
  99. (2 * LIO_VF_REP_REQ_TMO_MS));
  100. err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
  101. if (err)
  102. dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
  103. if (resp)
  104. memcpy(resp, (rep_resp + 1), resp_size);
  105. free_buff:
  106. octeon_free_soft_command(oct, sc);
  107. return err;
  108. }
  109. static int
  110. lio_vf_rep_open(struct net_device *ndev)
  111. {
  112. struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
  113. struct lio_vf_rep_req rep_cfg;
  114. struct octeon_device *oct;
  115. int ret;
  116. oct = vf_rep->oct;
  117. memset(&rep_cfg, 0, sizeof(rep_cfg));
  118. rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
  119. rep_cfg.ifidx = vf_rep->ifidx;
  120. rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
  121. ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
  122. sizeof(rep_cfg), NULL, 0);
  123. if (ret) {
  124. dev_err(&oct->pci_dev->dev,
  125. "VF_REP open failed with err %d\n", ret);
  126. return -EIO;
  127. }
  128. atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
  129. LIO_IFSTATE_RUNNING));
  130. netif_carrier_on(ndev);
  131. netif_start_queue(ndev);
  132. return 0;
  133. }
  134. static int
  135. lio_vf_rep_stop(struct net_device *ndev)
  136. {
  137. struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
  138. struct lio_vf_rep_req rep_cfg;
  139. struct octeon_device *oct;
  140. int ret;
  141. oct = vf_rep->oct;
  142. memset(&rep_cfg, 0, sizeof(rep_cfg));
  143. rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
  144. rep_cfg.ifidx = vf_rep->ifidx;
  145. rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
  146. ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
  147. sizeof(rep_cfg), NULL, 0);
  148. if (ret) {
  149. dev_err(&oct->pci_dev->dev,
  150. "VF_REP dev stop failed with err %d\n", ret);
  151. return -EIO;
  152. }
  153. atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
  154. ~LIO_IFSTATE_RUNNING));
  155. netif_tx_disable(ndev);
  156. netif_carrier_off(ndev);
  157. return 0;
  158. }
  159. static void
  160. lio_vf_rep_tx_timeout(struct net_device *ndev)
  161. {
  162. netif_trans_update(ndev);
  163. netif_wake_queue(ndev);
  164. }
  165. static void
  166. lio_vf_rep_get_stats64(struct net_device *dev,
  167. struct rtnl_link_stats64 *stats64)
  168. {
  169. struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
  170. /* Swap tx and rx stats as VF rep is a switch port */
  171. stats64->tx_packets = vf_rep->stats.rx_packets;
  172. stats64->tx_bytes = vf_rep->stats.rx_bytes;
  173. stats64->tx_dropped = vf_rep->stats.rx_dropped;
  174. stats64->rx_packets = vf_rep->stats.tx_packets;
  175. stats64->rx_bytes = vf_rep->stats.tx_bytes;
  176. stats64->rx_dropped = vf_rep->stats.tx_dropped;
  177. }
  178. static int
  179. lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
  180. {
  181. struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
  182. struct lio_vf_rep_req rep_cfg;
  183. struct octeon_device *oct;
  184. int ret;
  185. oct = vf_rep->oct;
  186. memset(&rep_cfg, 0, sizeof(rep_cfg));
  187. rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
  188. rep_cfg.ifidx = vf_rep->ifidx;
  189. rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
  190. ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
  191. sizeof(rep_cfg), NULL, 0);
  192. if (ret) {
  193. dev_err(&oct->pci_dev->dev,
  194. "Change MTU failed with err %d\n", ret);
  195. return -EIO;
  196. }
  197. ndev->mtu = new_mtu;
  198. return 0;
  199. }
  200. static int
  201. lio_vf_rep_phys_port_name(struct net_device *dev,
  202. char *buf, size_t len)
  203. {
  204. struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
  205. struct octeon_device *oct = vf_rep->oct;
  206. int ret;
  207. ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
  208. vf_rep->ifidx - oct->pf_num * 64 - 1);
  209. if (ret >= len)
  210. return -EOPNOTSUPP;
  211. return 0;
  212. }
  213. static struct net_device *
  214. lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
  215. {
  216. int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
  217. int vfid_mask = max_vfs - 1;
  218. if (ifidx <= oct->pf_num * max_vfs ||
  219. ifidx >= oct->pf_num * max_vfs + max_vfs)
  220. return NULL;
  221. /* ifidx 1-63 for PF0 VFs
  222. * ifidx 65-127 for PF1 VFs
  223. */
  224. vf_id = (ifidx & vfid_mask) - 1;
  225. return oct->vf_rep_list.ndev[vf_id];
  226. }
  227. static void
  228. lio_vf_rep_copy_packet(struct octeon_device *oct,
  229. struct sk_buff *skb,
  230. int len)
  231. {
  232. if (likely(len > MIN_SKB_SIZE)) {
  233. struct octeon_skb_page_info *pg_info;
  234. unsigned char *va;
  235. pg_info = ((struct octeon_skb_page_info *)(skb->cb));
  236. if (pg_info->page) {
  237. va = page_address(pg_info->page) +
  238. pg_info->page_offset;
  239. memcpy(skb->data, va, MIN_SKB_SIZE);
  240. skb_put(skb, MIN_SKB_SIZE);
  241. }
  242. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  243. pg_info->page,
  244. pg_info->page_offset + MIN_SKB_SIZE,
  245. len - MIN_SKB_SIZE,
  246. LIO_RXBUFFER_SZ);
  247. } else {
  248. struct octeon_skb_page_info *pg_info =
  249. ((struct octeon_skb_page_info *)(skb->cb));
  250. skb_copy_to_linear_data(skb, page_address(pg_info->page) +
  251. pg_info->page_offset, len);
  252. skb_put(skb, len);
  253. put_page(pg_info->page);
  254. }
  255. }
  256. static int
  257. lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
  258. {
  259. struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
  260. struct lio_vf_rep_desc *vf_rep;
  261. struct net_device *vf_ndev;
  262. struct octeon_device *oct;
  263. union octeon_rh *rh;
  264. struct sk_buff *skb;
  265. int i, ifidx;
  266. oct = lio_get_device(recv_pkt->octeon_id);
  267. if (!oct)
  268. goto free_buffers;
  269. skb = recv_pkt->buffer_ptr[0];
  270. rh = &recv_pkt->rh;
  271. ifidx = rh->r.ossp;
  272. vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
  273. if (!vf_ndev)
  274. goto free_buffers;
  275. vf_rep = netdev_priv(vf_ndev);
  276. if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
  277. recv_pkt->buffer_count > 1)
  278. goto free_buffers;
  279. skb->dev = vf_ndev;
  280. /* Multiple buffers are not used for vf_rep packets.
  281. * So just buffer_size[0] is valid.
  282. */
  283. lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
  284. skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
  285. skb->protocol = eth_type_trans(skb, skb->dev);
  286. skb->ip_summed = CHECKSUM_NONE;
  287. netif_rx(skb);
  288. octeon_free_recv_info(recv_info);
  289. return 0;
  290. free_buffers:
  291. for (i = 0; i < recv_pkt->buffer_count; i++)
  292. recv_buffer_free(recv_pkt->buffer_ptr[i]);
  293. octeon_free_recv_info(recv_info);
  294. return 0;
  295. }
  296. static void
  297. lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
  298. u32 status, void *buf)
  299. {
  300. struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
  301. struct sk_buff *skb = sc->ctxptr;
  302. struct net_device *ndev = skb->dev;
  303. u32 iq_no;
  304. dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
  305. sc->datasize, DMA_TO_DEVICE);
  306. dev_kfree_skb_any(skb);
  307. iq_no = sc->iq_no;
  308. octeon_free_soft_command(oct, sc);
  309. if (octnet_iq_is_full(oct, iq_no))
  310. return;
  311. if (netif_queue_stopped(ndev))
  312. netif_wake_queue(ndev);
  313. }
  314. static netdev_tx_t
  315. lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
  316. {
  317. struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
  318. struct net_device *parent_ndev = vf_rep->parent_ndev;
  319. struct octeon_device *oct = vf_rep->oct;
  320. struct octeon_instr_pki_ih3 *pki_ih3;
  321. struct octeon_soft_command *sc;
  322. struct lio *parent_lio;
  323. int status;
  324. parent_lio = GET_LIO(parent_ndev);
  325. if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
  326. skb->len <= 0)
  327. goto xmit_failed;
  328. if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
  329. dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
  330. netif_stop_queue(ndev);
  331. return NETDEV_TX_BUSY;
  332. }
  333. sc = (struct octeon_soft_command *)
  334. octeon_alloc_soft_command(oct, 0, 0, 0);
  335. if (!sc) {
  336. dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
  337. goto xmit_failed;
  338. }
  339. /* Multiple buffers are not used for vf_rep packets. */
  340. if (skb_shinfo(skb)->nr_frags != 0) {
  341. dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
  342. goto xmit_failed;
  343. }
  344. sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
  345. skb->data, skb->len, DMA_TO_DEVICE);
  346. if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
  347. dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
  348. goto xmit_failed;
  349. }
  350. sc->virtdptr = skb->data;
  351. sc->datasize = skb->len;
  352. sc->ctxptr = skb;
  353. sc->iq_no = parent_lio->txq;
  354. octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
  355. vf_rep->ifidx, 0, 0);
  356. pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
  357. pki_ih3->tagtype = ORDERED_TAG;
  358. sc->callback = lio_vf_rep_packet_sent_callback;
  359. sc->callback_arg = sc;
  360. status = octeon_send_soft_command(oct, sc);
  361. if (status == IQ_SEND_FAILED) {
  362. dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
  363. sc->datasize, DMA_TO_DEVICE);
  364. goto xmit_failed;
  365. }
  366. if (status == IQ_SEND_STOP)
  367. netif_stop_queue(ndev);
  368. netif_trans_update(ndev);
  369. return NETDEV_TX_OK;
  370. xmit_failed:
  371. dev_kfree_skb_any(skb);
  372. return NETDEV_TX_OK;
  373. }
  374. static int
  375. lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr)
  376. {
  377. struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
  378. struct net_device *parent_ndev = vf_rep->parent_ndev;
  379. struct lio *lio = GET_LIO(parent_ndev);
  380. switch (attr->id) {
  381. case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
  382. attr->u.ppid.id_len = ETH_ALEN;
  383. ether_addr_copy(attr->u.ppid.id,
  384. (void *)&lio->linfo.hw_addr + 2);
  385. break;
  386. default:
  387. return -EOPNOTSUPP;
  388. }
  389. return 0;
  390. }
  391. static const struct switchdev_ops lio_vf_rep_switchdev_ops = {
  392. .switchdev_port_attr_get = lio_vf_rep_attr_get,
  393. };
  394. static void
  395. lio_vf_rep_fetch_stats(struct work_struct *work)
  396. {
  397. struct cavium_wk *wk = (struct cavium_wk *)work;
  398. struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
  399. struct lio_vf_rep_stats stats;
  400. struct lio_vf_rep_req rep_cfg;
  401. struct octeon_device *oct;
  402. int ret;
  403. oct = vf_rep->oct;
  404. memset(&rep_cfg, 0, sizeof(rep_cfg));
  405. rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
  406. rep_cfg.ifidx = vf_rep->ifidx;
  407. ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
  408. &stats, sizeof(stats));
  409. if (!ret) {
  410. octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
  411. memcpy(&vf_rep->stats, &stats, sizeof(stats));
  412. }
  413. schedule_delayed_work(&vf_rep->stats_wk.work,
  414. msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
  415. }
  416. int
  417. lio_vf_rep_create(struct octeon_device *oct)
  418. {
  419. struct lio_vf_rep_desc *vf_rep;
  420. struct net_device *ndev;
  421. int i, num_vfs;
  422. if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
  423. return 0;
  424. if (!oct->sriov_info.sriov_enabled)
  425. return 0;
  426. num_vfs = oct->sriov_info.num_vfs_alloced;
  427. oct->vf_rep_list.num_vfs = 0;
  428. for (i = 0; i < num_vfs; i++) {
  429. ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
  430. if (!ndev) {
  431. dev_err(&oct->pci_dev->dev,
  432. "VF rep device %d creation failed\n", i);
  433. goto cleanup;
  434. }
  435. ndev->min_mtu = LIO_MIN_MTU_SIZE;
  436. ndev->max_mtu = LIO_MAX_MTU_SIZE;
  437. ndev->netdev_ops = &lio_vf_rep_ndev_ops;
  438. SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops);
  439. vf_rep = netdev_priv(ndev);
  440. memset(vf_rep, 0, sizeof(*vf_rep));
  441. vf_rep->ndev = ndev;
  442. vf_rep->oct = oct;
  443. vf_rep->parent_ndev = oct->props[0].netdev;
  444. vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
  445. eth_hw_addr_random(ndev);
  446. if (register_netdev(ndev)) {
  447. dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
  448. free_netdev(ndev);
  449. goto cleanup;
  450. }
  451. netif_carrier_off(ndev);
  452. INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
  453. lio_vf_rep_fetch_stats);
  454. vf_rep->stats_wk.ctxptr = (void *)vf_rep;
  455. schedule_delayed_work(&vf_rep->stats_wk.work,
  456. msecs_to_jiffies
  457. (LIO_VF_REP_STATS_POLL_TIME_MS));
  458. oct->vf_rep_list.num_vfs++;
  459. oct->vf_rep_list.ndev[i] = ndev;
  460. }
  461. if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
  462. OPCODE_NIC_VF_REP_PKT,
  463. lio_vf_rep_pkt_recv, oct)) {
  464. dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
  465. goto cleanup;
  466. }
  467. return 0;
  468. cleanup:
  469. for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
  470. ndev = oct->vf_rep_list.ndev[i];
  471. oct->vf_rep_list.ndev[i] = NULL;
  472. if (ndev) {
  473. vf_rep = netdev_priv(ndev);
  474. cancel_delayed_work_sync
  475. (&vf_rep->stats_wk.work);
  476. unregister_netdev(ndev);
  477. free_netdev(ndev);
  478. }
  479. }
  480. oct->vf_rep_list.num_vfs = 0;
  481. return -1;
  482. }
  483. void
  484. lio_vf_rep_destroy(struct octeon_device *oct)
  485. {
  486. struct lio_vf_rep_desc *vf_rep;
  487. struct net_device *ndev;
  488. int i;
  489. if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
  490. return;
  491. if (!oct->sriov_info.sriov_enabled)
  492. return;
  493. for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
  494. ndev = oct->vf_rep_list.ndev[i];
  495. oct->vf_rep_list.ndev[i] = NULL;
  496. if (ndev) {
  497. vf_rep = netdev_priv(ndev);
  498. cancel_delayed_work_sync
  499. (&vf_rep->stats_wk.work);
  500. netif_tx_disable(ndev);
  501. netif_carrier_off(ndev);
  502. unregister_netdev(ndev);
  503. free_netdev(ndev);
  504. }
  505. }
  506. oct->vf_rep_list.num_vfs = 0;
  507. }
  508. static int
  509. lio_vf_rep_netdev_event(struct notifier_block *nb,
  510. unsigned long event, void *ptr)
  511. {
  512. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  513. struct lio_vf_rep_desc *vf_rep;
  514. struct lio_vf_rep_req rep_cfg;
  515. struct octeon_device *oct;
  516. int ret;
  517. switch (event) {
  518. case NETDEV_REGISTER:
  519. case NETDEV_CHANGENAME:
  520. break;
  521. default:
  522. return NOTIFY_DONE;
  523. }
  524. if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
  525. return NOTIFY_DONE;
  526. vf_rep = netdev_priv(ndev);
  527. oct = vf_rep->oct;
  528. if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
  529. dev_err(&oct->pci_dev->dev,
  530. "Device name change sync failed as the size is > %d\n",
  531. LIO_IF_NAME_SIZE);
  532. return NOTIFY_DONE;
  533. }
  534. memset(&rep_cfg, 0, sizeof(rep_cfg));
  535. rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
  536. rep_cfg.ifidx = vf_rep->ifidx;
  537. strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
  538. ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
  539. sizeof(rep_cfg), NULL, 0);
  540. if (ret)
  541. dev_err(&oct->pci_dev->dev,
  542. "vf_rep netdev name change failed with err %d\n", ret);
  543. return NOTIFY_DONE;
  544. }
  545. static struct notifier_block lio_vf_rep_netdev_notifier = {
  546. .notifier_call = lio_vf_rep_netdev_event,
  547. };
  548. int
  549. lio_vf_rep_modinit(void)
  550. {
  551. if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
  552. pr_err("netdev notifier registration failed\n");
  553. return -EFAULT;
  554. }
  555. return 0;
  556. }
  557. void
  558. lio_vf_rep_modexit(void)
  559. {
  560. if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
  561. pr_err("netdev notifier unregister failed\n");
  562. }