lio_core.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2016 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more details.
  17. ***********************************************************************/
  18. #include <linux/pci.h>
  19. #include <linux/if_vlan.h>
  20. #include "liquidio_common.h"
  21. #include "octeon_droq.h"
  22. #include "octeon_iq.h"
  23. #include "response_manager.h"
  24. #include "octeon_device.h"
  25. #include "octeon_nic.h"
  26. #include "octeon_main.h"
  27. #include "octeon_network.h"
  28. /* OOM task polling interval */
  29. #define LIO_OOM_POLL_INTERVAL_MS 250
  30. #define OCTNIC_MAX_SG MAX_SKB_FRAGS
  31. /**
  32. * \brief Callback for getting interface configuration
  33. * @param status status of request
  34. * @param buf pointer to resp structure
  35. */
  36. void lio_if_cfg_callback(struct octeon_device *oct,
  37. u32 status __attribute__((unused)), void *buf)
  38. {
  39. struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
  40. struct liquidio_if_cfg_context *ctx;
  41. struct liquidio_if_cfg_resp *resp;
  42. resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
  43. ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
  44. oct = lio_get_device(ctx->octeon_id);
  45. if (resp->status)
  46. dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
  47. CVM_CAST64(resp->status));
  48. WRITE_ONCE(ctx->cond, 1);
  49. snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
  50. resp->cfg_info.liquidio_firmware_version);
  51. /* This barrier is required to be sure that the response has been
  52. * written fully before waking up the handler
  53. */
  54. wmb();
  55. wake_up_interruptible(&ctx->wc);
  56. }
  57. /**
  58. * \brief Delete gather lists
  59. * @param lio per-network private data
  60. */
  61. void lio_delete_glists(struct lio *lio)
  62. {
  63. struct octnic_gather *g;
  64. int i;
  65. kfree(lio->glist_lock);
  66. lio->glist_lock = NULL;
  67. if (!lio->glist)
  68. return;
  69. for (i = 0; i < lio->oct_dev->num_iqs; i++) {
  70. do {
  71. g = (struct octnic_gather *)
  72. lio_list_delete_head(&lio->glist[i]);
  73. kfree(g);
  74. } while (g);
  75. if (lio->glists_virt_base && lio->glists_virt_base[i] &&
  76. lio->glists_dma_base && lio->glists_dma_base[i]) {
  77. lio_dma_free(lio->oct_dev,
  78. lio->glist_entry_size * lio->tx_qsize,
  79. lio->glists_virt_base[i],
  80. lio->glists_dma_base[i]);
  81. }
  82. }
  83. kfree(lio->glists_virt_base);
  84. lio->glists_virt_base = NULL;
  85. kfree(lio->glists_dma_base);
  86. lio->glists_dma_base = NULL;
  87. kfree(lio->glist);
  88. lio->glist = NULL;
  89. }
  90. /**
  91. * \brief Setup gather lists
  92. * @param lio per-network private data
  93. */
  94. int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
  95. {
  96. struct octnic_gather *g;
  97. int i, j;
  98. lio->glist_lock =
  99. kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
  100. if (!lio->glist_lock)
  101. return -ENOMEM;
  102. lio->glist =
  103. kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
  104. if (!lio->glist) {
  105. kfree(lio->glist_lock);
  106. lio->glist_lock = NULL;
  107. return -ENOMEM;
  108. }
  109. lio->glist_entry_size =
  110. ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
  111. /* allocate memory to store virtual and dma base address of
  112. * per glist consistent memory
  113. */
  114. lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
  115. GFP_KERNEL);
  116. lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
  117. GFP_KERNEL);
  118. if (!lio->glists_virt_base || !lio->glists_dma_base) {
  119. lio_delete_glists(lio);
  120. return -ENOMEM;
  121. }
  122. for (i = 0; i < num_iqs; i++) {
  123. int numa_node = dev_to_node(&oct->pci_dev->dev);
  124. spin_lock_init(&lio->glist_lock[i]);
  125. INIT_LIST_HEAD(&lio->glist[i]);
  126. lio->glists_virt_base[i] =
  127. lio_dma_alloc(oct,
  128. lio->glist_entry_size * lio->tx_qsize,
  129. &lio->glists_dma_base[i]);
  130. if (!lio->glists_virt_base[i]) {
  131. lio_delete_glists(lio);
  132. return -ENOMEM;
  133. }
  134. for (j = 0; j < lio->tx_qsize; j++) {
  135. g = kzalloc_node(sizeof(*g), GFP_KERNEL,
  136. numa_node);
  137. if (!g)
  138. g = kzalloc(sizeof(*g), GFP_KERNEL);
  139. if (!g)
  140. break;
  141. g->sg = lio->glists_virt_base[i] +
  142. (j * lio->glist_entry_size);
  143. g->sg_dma_ptr = lio->glists_dma_base[i] +
  144. (j * lio->glist_entry_size);
  145. list_add_tail(&g->list, &lio->glist[i]);
  146. }
  147. if (j != lio->tx_qsize) {
  148. lio_delete_glists(lio);
  149. return -ENOMEM;
  150. }
  151. }
  152. return 0;
  153. }
  154. int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
  155. {
  156. struct lio *lio = GET_LIO(netdev);
  157. struct octeon_device *oct = lio->oct_dev;
  158. struct octnic_ctrl_pkt nctrl;
  159. int ret = 0;
  160. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  161. nctrl.ncmd.u64 = 0;
  162. nctrl.ncmd.s.cmd = cmd;
  163. nctrl.ncmd.s.param1 = param1;
  164. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  165. nctrl.wait_time = 100;
  166. nctrl.netpndev = (u64)netdev;
  167. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  168. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  169. if (ret < 0) {
  170. dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
  171. ret);
  172. }
  173. return ret;
  174. }
  175. void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
  176. unsigned int bytes_compl)
  177. {
  178. struct netdev_queue *netdev_queue = txq;
  179. netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
  180. }
  181. void octeon_update_tx_completion_counters(void *buf, int reqtype,
  182. unsigned int *pkts_compl,
  183. unsigned int *bytes_compl)
  184. {
  185. struct octnet_buf_free_info *finfo;
  186. struct sk_buff *skb = NULL;
  187. struct octeon_soft_command *sc;
  188. switch (reqtype) {
  189. case REQTYPE_NORESP_NET:
  190. case REQTYPE_NORESP_NET_SG:
  191. finfo = buf;
  192. skb = finfo->skb;
  193. break;
  194. case REQTYPE_RESP_NET_SG:
  195. case REQTYPE_RESP_NET:
  196. sc = buf;
  197. skb = sc->callback_arg;
  198. break;
  199. default:
  200. return;
  201. }
  202. (*pkts_compl)++;
  203. *bytes_compl += skb->len;
  204. }
  205. int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
  206. {
  207. struct octnet_buf_free_info *finfo;
  208. struct sk_buff *skb;
  209. struct octeon_soft_command *sc;
  210. struct netdev_queue *txq;
  211. switch (reqtype) {
  212. case REQTYPE_NORESP_NET:
  213. case REQTYPE_NORESP_NET_SG:
  214. finfo = buf;
  215. skb = finfo->skb;
  216. break;
  217. case REQTYPE_RESP_NET_SG:
  218. case REQTYPE_RESP_NET:
  219. sc = buf;
  220. skb = sc->callback_arg;
  221. break;
  222. default:
  223. return 0;
  224. }
  225. txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
  226. netdev_tx_sent_queue(txq, skb->len);
  227. return netif_xmit_stopped(txq);
  228. }
  229. void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
  230. {
  231. struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
  232. struct net_device *netdev = (struct net_device *)nctrl->netpndev;
  233. struct lio *lio = GET_LIO(netdev);
  234. struct octeon_device *oct = lio->oct_dev;
  235. u8 *mac;
  236. if (nctrl->completion && nctrl->response_code) {
  237. /* Signal whoever is interested that the response code from the
  238. * firmware has arrived.
  239. */
  240. WRITE_ONCE(*nctrl->response_code, nctrl->status);
  241. complete(nctrl->completion);
  242. }
  243. if (nctrl->status)
  244. return;
  245. switch (nctrl->ncmd.s.cmd) {
  246. case OCTNET_CMD_CHANGE_DEVFLAGS:
  247. case OCTNET_CMD_SET_MULTI_LIST:
  248. case OCTNET_CMD_SET_UC_LIST:
  249. break;
  250. case OCTNET_CMD_CHANGE_MACADDR:
  251. mac = ((u8 *)&nctrl->udd[0]) + 2;
  252. if (nctrl->ncmd.s.param1) {
  253. /* vfidx is 0 based, but vf_num (param1) is 1 based */
  254. int vfidx = nctrl->ncmd.s.param1 - 1;
  255. bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
  256. if (mac_is_admin_assigned)
  257. netif_info(lio, probe, lio->netdev,
  258. "MAC Address %pM is configured for VF %d\n",
  259. mac, vfidx);
  260. } else {
  261. netif_info(lio, probe, lio->netdev,
  262. " MACAddr changed to %pM\n",
  263. mac);
  264. }
  265. break;
  266. case OCTNET_CMD_GPIO_ACCESS:
  267. netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
  268. break;
  269. case OCTNET_CMD_ID_ACTIVE:
  270. netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
  271. break;
  272. case OCTNET_CMD_LRO_ENABLE:
  273. dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
  274. break;
  275. case OCTNET_CMD_LRO_DISABLE:
  276. dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
  277. netdev->name);
  278. break;
  279. case OCTNET_CMD_VERBOSE_ENABLE:
  280. dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
  281. netdev->name);
  282. break;
  283. case OCTNET_CMD_VERBOSE_DISABLE:
  284. dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
  285. netdev->name);
  286. break;
  287. case OCTNET_CMD_VLAN_FILTER_CTL:
  288. if (nctrl->ncmd.s.param1)
  289. dev_info(&oct->pci_dev->dev,
  290. "%s VLAN filter enabled\n", netdev->name);
  291. else
  292. dev_info(&oct->pci_dev->dev,
  293. "%s VLAN filter disabled\n", netdev->name);
  294. break;
  295. case OCTNET_CMD_ADD_VLAN_FILTER:
  296. dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
  297. netdev->name, nctrl->ncmd.s.param1);
  298. break;
  299. case OCTNET_CMD_DEL_VLAN_FILTER:
  300. dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
  301. netdev->name, nctrl->ncmd.s.param1);
  302. break;
  303. case OCTNET_CMD_SET_SETTINGS:
  304. dev_info(&oct->pci_dev->dev, "%s settings changed\n",
  305. netdev->name);
  306. break;
  307. /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
  308. * Command passed by NIC driver
  309. */
  310. case OCTNET_CMD_TNL_RX_CSUM_CTL:
  311. if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
  312. netif_info(lio, probe, lio->netdev,
  313. "RX Checksum Offload Enabled\n");
  314. } else if (nctrl->ncmd.s.param1 ==
  315. OCTNET_CMD_RXCSUM_DISABLE) {
  316. netif_info(lio, probe, lio->netdev,
  317. "RX Checksum Offload Disabled\n");
  318. }
  319. break;
  320. /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
  321. * Command passed by NIC driver
  322. */
  323. case OCTNET_CMD_TNL_TX_CSUM_CTL:
  324. if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
  325. netif_info(lio, probe, lio->netdev,
  326. "TX Checksum Offload Enabled\n");
  327. } else if (nctrl->ncmd.s.param1 ==
  328. OCTNET_CMD_TXCSUM_DISABLE) {
  329. netif_info(lio, probe, lio->netdev,
  330. "TX Checksum Offload Disabled\n");
  331. }
  332. break;
  333. /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
  334. * Command passed by NIC driver
  335. */
  336. case OCTNET_CMD_VXLAN_PORT_CONFIG:
  337. if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
  338. netif_info(lio, probe, lio->netdev,
  339. "VxLAN Destination UDP PORT:%d ADDED\n",
  340. nctrl->ncmd.s.param1);
  341. } else if (nctrl->ncmd.s.more ==
  342. OCTNET_CMD_VXLAN_PORT_DEL) {
  343. netif_info(lio, probe, lio->netdev,
  344. "VxLAN Destination UDP PORT:%d DELETED\n",
  345. nctrl->ncmd.s.param1);
  346. }
  347. break;
  348. case OCTNET_CMD_SET_FLOW_CTL:
  349. netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
  350. break;
  351. case OCTNET_CMD_QUEUE_COUNT_CTL:
  352. netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
  353. nctrl->ncmd.s.param1);
  354. break;
  355. default:
  356. dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
  357. nctrl->ncmd.s.cmd);
  358. }
  359. }
  360. void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
  361. {
  362. bool macaddr_changed = false;
  363. struct net_device *netdev;
  364. struct lio *lio;
  365. rtnl_lock();
  366. netdev = oct->props[0].netdev;
  367. lio = GET_LIO(netdev);
  368. lio->linfo.macaddr_is_admin_asgnd = true;
  369. if (!ether_addr_equal(netdev->dev_addr, mac)) {
  370. macaddr_changed = true;
  371. ether_addr_copy(netdev->dev_addr, mac);
  372. ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
  373. call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
  374. }
  375. rtnl_unlock();
  376. if (macaddr_changed)
  377. dev_info(&oct->pci_dev->dev,
  378. "PF changed VF's MAC address to %pM\n", mac);
  379. /* no need to notify the firmware of the macaddr change because
  380. * the PF did that already
  381. */
  382. }
  383. static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
  384. {
  385. struct cavium_wk *wk = (struct cavium_wk *)work;
  386. struct lio *lio = (struct lio *)wk->ctxptr;
  387. struct octeon_device *oct = lio->oct_dev;
  388. struct octeon_droq *droq;
  389. int q, q_no = 0;
  390. if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
  391. for (q = 0; q < lio->linfo.num_rxpciq; q++) {
  392. q_no = lio->linfo.rxpciq[q].s.q_no;
  393. droq = oct->droq[q_no];
  394. if (!droq)
  395. continue;
  396. octeon_droq_check_oom(droq);
  397. }
  398. }
  399. queue_delayed_work(lio->rxq_status_wq.wq,
  400. &lio->rxq_status_wq.wk.work,
  401. msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
  402. }
  403. int setup_rx_oom_poll_fn(struct net_device *netdev)
  404. {
  405. struct lio *lio = GET_LIO(netdev);
  406. struct octeon_device *oct = lio->oct_dev;
  407. lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
  408. WQ_MEM_RECLAIM, 0);
  409. if (!lio->rxq_status_wq.wq) {
  410. dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
  411. return -ENOMEM;
  412. }
  413. INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
  414. octnet_poll_check_rxq_oom_status);
  415. lio->rxq_status_wq.wk.ctxptr = lio;
  416. queue_delayed_work(lio->rxq_status_wq.wq,
  417. &lio->rxq_status_wq.wk.work,
  418. msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
  419. return 0;
  420. }
  421. void cleanup_rx_oom_poll_fn(struct net_device *netdev)
  422. {
  423. struct lio *lio = GET_LIO(netdev);
  424. if (lio->rxq_status_wq.wq) {
  425. cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
  426. flush_workqueue(lio->rxq_status_wq.wq);
  427. destroy_workqueue(lio->rxq_status_wq.wq);
  428. }
  429. }
  430. /* Runs in interrupt context. */
  431. static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
  432. {
  433. struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
  434. struct net_device *netdev;
  435. struct lio *lio;
  436. netdev = oct->props[iq->ifidx].netdev;
  437. /* This is needed because the first IQ does not have
  438. * a netdev associated with it.
  439. */
  440. if (!netdev)
  441. return;
  442. lio = GET_LIO(netdev);
  443. if (__netif_subqueue_stopped(netdev, iq->q_index) &&
  444. lio->linfo.link.s.link_up &&
  445. (!octnet_iq_is_full(oct, iq_num))) {
  446. netif_wake_subqueue(netdev, iq->q_index);
  447. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
  448. tx_restart, 1);
  449. }
  450. }
  451. /**
  452. * \brief Setup output queue
  453. * @param oct octeon device
  454. * @param q_no which queue
  455. * @param num_descs how many descriptors
  456. * @param desc_size size of each descriptor
  457. * @param app_ctx application context
  458. */
  459. static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
  460. int desc_size, void *app_ctx)
  461. {
  462. int ret_val;
  463. dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
  464. /* droq creation and local register settings. */
  465. ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
  466. if (ret_val < 0)
  467. return ret_val;
  468. if (ret_val == 1) {
  469. dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
  470. return 0;
  471. }
  472. /* Enable the droq queues */
  473. octeon_set_droq_pkt_op(oct, q_no, 1);
  474. /* Send Credit for Octeon Output queues. Credits are always
  475. * sent after the output queue is enabled.
  476. */
  477. writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
  478. return ret_val;
  479. }
  480. /** Routine to push packets arriving on Octeon interface upto network layer.
  481. * @param oct_id - octeon device id.
  482. * @param skbuff - skbuff struct to be passed to network layer.
  483. * @param len - size of total data received.
  484. * @param rh - Control header associated with the packet
  485. * @param param - additional control data with the packet
  486. * @param arg - farg registered in droq_ops
  487. */
  488. static void
  489. liquidio_push_packet(u32 octeon_id __attribute__((unused)),
  490. void *skbuff,
  491. u32 len,
  492. union octeon_rh *rh,
  493. void *param,
  494. void *arg)
  495. {
  496. struct net_device *netdev = (struct net_device *)arg;
  497. struct octeon_droq *droq =
  498. container_of(param, struct octeon_droq, napi);
  499. struct sk_buff *skb = (struct sk_buff *)skbuff;
  500. struct skb_shared_hwtstamps *shhwtstamps;
  501. struct napi_struct *napi = param;
  502. u16 vtag = 0;
  503. u32 r_dh_off;
  504. u64 ns;
  505. if (netdev) {
  506. struct lio *lio = GET_LIO(netdev);
  507. struct octeon_device *oct = lio->oct_dev;
  508. /* Do not proceed if the interface is not in RUNNING state. */
  509. if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
  510. recv_buffer_free(skb);
  511. droq->stats.rx_dropped++;
  512. return;
  513. }
  514. skb->dev = netdev;
  515. skb_record_rx_queue(skb, droq->q_no);
  516. if (likely(len > MIN_SKB_SIZE)) {
  517. struct octeon_skb_page_info *pg_info;
  518. unsigned char *va;
  519. pg_info = ((struct octeon_skb_page_info *)(skb->cb));
  520. if (pg_info->page) {
  521. /* For Paged allocation use the frags */
  522. va = page_address(pg_info->page) +
  523. pg_info->page_offset;
  524. memcpy(skb->data, va, MIN_SKB_SIZE);
  525. skb_put(skb, MIN_SKB_SIZE);
  526. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  527. pg_info->page,
  528. pg_info->page_offset +
  529. MIN_SKB_SIZE,
  530. len - MIN_SKB_SIZE,
  531. LIO_RXBUFFER_SZ);
  532. }
  533. } else {
  534. struct octeon_skb_page_info *pg_info =
  535. ((struct octeon_skb_page_info *)(skb->cb));
  536. skb_copy_to_linear_data(skb, page_address(pg_info->page)
  537. + pg_info->page_offset, len);
  538. skb_put(skb, len);
  539. put_page(pg_info->page);
  540. }
  541. r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
  542. if (oct->ptp_enable) {
  543. if (rh->r_dh.has_hwtstamp) {
  544. /* timestamp is included from the hardware at
  545. * the beginning of the packet.
  546. */
  547. if (ifstate_check
  548. (lio,
  549. LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
  550. /* Nanoseconds are in the first 64-bits
  551. * of the packet.
  552. */
  553. memcpy(&ns, (skb->data + r_dh_off),
  554. sizeof(ns));
  555. r_dh_off -= BYTES_PER_DHLEN_UNIT;
  556. shhwtstamps = skb_hwtstamps(skb);
  557. shhwtstamps->hwtstamp =
  558. ns_to_ktime(ns +
  559. lio->ptp_adjust);
  560. }
  561. }
  562. }
  563. if (rh->r_dh.has_hash) {
  564. __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
  565. u32 hash = be32_to_cpu(*hash_be);
  566. skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
  567. r_dh_off -= BYTES_PER_DHLEN_UNIT;
  568. }
  569. skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
  570. skb->protocol = eth_type_trans(skb, skb->dev);
  571. if ((netdev->features & NETIF_F_RXCSUM) &&
  572. (((rh->r_dh.encap_on) &&
  573. (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
  574. (!(rh->r_dh.encap_on) &&
  575. (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
  576. /* checksum has already been verified */
  577. skb->ip_summed = CHECKSUM_UNNECESSARY;
  578. else
  579. skb->ip_summed = CHECKSUM_NONE;
  580. /* Setting Encapsulation field on basis of status received
  581. * from the firmware
  582. */
  583. if (rh->r_dh.encap_on) {
  584. skb->encapsulation = 1;
  585. skb->csum_level = 1;
  586. droq->stats.rx_vxlan++;
  587. }
  588. /* inbound VLAN tag */
  589. if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  590. rh->r_dh.vlan) {
  591. u16 priority = rh->r_dh.priority;
  592. u16 vid = rh->r_dh.vlan;
  593. vtag = (priority << VLAN_PRIO_SHIFT) | vid;
  594. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
  595. }
  596. napi_gro_receive(napi, skb);
  597. droq->stats.rx_bytes_received += len -
  598. rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
  599. droq->stats.rx_pkts_received++;
  600. } else {
  601. recv_buffer_free(skb);
  602. }
  603. }
  604. /**
  605. * \brief wrapper for calling napi_schedule
  606. * @param param parameters to pass to napi_schedule
  607. *
  608. * Used when scheduling on different CPUs
  609. */
  610. static void napi_schedule_wrapper(void *param)
  611. {
  612. struct napi_struct *napi = param;
  613. napi_schedule(napi);
  614. }
  615. /**
  616. * \brief callback when receive interrupt occurs and we are in NAPI mode
  617. * @param arg pointer to octeon output queue
  618. */
  619. static void liquidio_napi_drv_callback(void *arg)
  620. {
  621. struct octeon_device *oct;
  622. struct octeon_droq *droq = arg;
  623. int this_cpu = smp_processor_id();
  624. oct = droq->oct_dev;
  625. if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
  626. droq->cpu_id == this_cpu) {
  627. napi_schedule_irqoff(&droq->napi);
  628. } else {
  629. call_single_data_t *csd = &droq->csd;
  630. csd->func = napi_schedule_wrapper;
  631. csd->info = &droq->napi;
  632. csd->flags = 0;
  633. smp_call_function_single_async(droq->cpu_id, csd);
  634. }
  635. }
  636. /**
  637. * \brief Entry point for NAPI polling
  638. * @param napi NAPI structure
  639. * @param budget maximum number of items to process
  640. */
  641. static int liquidio_napi_poll(struct napi_struct *napi, int budget)
  642. {
  643. struct octeon_instr_queue *iq;
  644. struct octeon_device *oct;
  645. struct octeon_droq *droq;
  646. int tx_done = 0, iq_no;
  647. int work_done;
  648. droq = container_of(napi, struct octeon_droq, napi);
  649. oct = droq->oct_dev;
  650. iq_no = droq->q_no;
  651. /* Handle Droq descriptors */
  652. work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
  653. /* Flush the instruction queue */
  654. iq = oct->instr_queue[iq_no];
  655. if (iq) {
  656. /* TODO: move this check to inside octeon_flush_iq,
  657. * once check_db_timeout is removed
  658. */
  659. if (atomic_read(&iq->instr_pending))
  660. /* Process iq buffers with in the budget limits */
  661. tx_done = octeon_flush_iq(oct, iq, budget);
  662. else
  663. tx_done = 1;
  664. /* Update iq read-index rather than waiting for next interrupt.
  665. * Return back if tx_done is false.
  666. */
  667. /* sub-queue status update */
  668. lio_update_txq_status(oct, iq_no);
  669. } else {
  670. dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
  671. __func__, iq_no);
  672. }
  673. #define MAX_REG_CNT 2000000U
  674. /* force enable interrupt if reg cnts are high to avoid wraparound */
  675. if ((work_done < budget && tx_done) ||
  676. (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
  677. (droq->pkt_count >= MAX_REG_CNT)) {
  678. tx_done = 1;
  679. napi_complete_done(napi, work_done);
  680. octeon_enable_irq(droq->oct_dev, droq->q_no);
  681. return 0;
  682. }
  683. return (!tx_done) ? (budget) : (work_done);
  684. }
  685. /**
  686. * \brief Setup input and output queues
  687. * @param octeon_dev octeon device
  688. * @param ifidx Interface index
  689. *
  690. * Note: Queues are with respect to the octeon device. Thus
  691. * an input queue is for egress packets, and output queues
  692. * are for ingress packets.
  693. */
  694. int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
  695. u32 num_iqs, u32 num_oqs)
  696. {
  697. struct octeon_droq_ops droq_ops;
  698. struct net_device *netdev;
  699. struct octeon_droq *droq;
  700. struct napi_struct *napi;
  701. int cpu_id_modulus;
  702. int num_tx_descs;
  703. struct lio *lio;
  704. int retval = 0;
  705. int q, q_no;
  706. int cpu_id;
  707. netdev = octeon_dev->props[ifidx].netdev;
  708. lio = GET_LIO(netdev);
  709. memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
  710. droq_ops.fptr = liquidio_push_packet;
  711. droq_ops.farg = netdev;
  712. droq_ops.poll_mode = 1;
  713. droq_ops.napi_fn = liquidio_napi_drv_callback;
  714. cpu_id = 0;
  715. cpu_id_modulus = num_present_cpus();
  716. /* set up DROQs. */
  717. for (q = 0; q < num_oqs; q++) {
  718. q_no = lio->linfo.rxpciq[q].s.q_no;
  719. dev_dbg(&octeon_dev->pci_dev->dev,
  720. "%s index:%d linfo.rxpciq.s.q_no:%d\n",
  721. __func__, q, q_no);
  722. retval = octeon_setup_droq(
  723. octeon_dev, q_no,
  724. CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
  725. lio->ifidx),
  726. CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
  727. lio->ifidx),
  728. NULL);
  729. if (retval) {
  730. dev_err(&octeon_dev->pci_dev->dev,
  731. "%s : Runtime DROQ(RxQ) creation failed.\n",
  732. __func__);
  733. return 1;
  734. }
  735. droq = octeon_dev->droq[q_no];
  736. napi = &droq->napi;
  737. dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
  738. (u64)netdev, (u64)octeon_dev);
  739. netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
  740. /* designate a CPU for this droq */
  741. droq->cpu_id = cpu_id;
  742. cpu_id++;
  743. if (cpu_id >= cpu_id_modulus)
  744. cpu_id = 0;
  745. octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
  746. }
  747. if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
  748. /* 23XX PF/VF can send/recv control messages (via the first
  749. * PF/VF-owned droq) from the firmware even if the ethX
  750. * interface is down, so that's why poll_mode must be off
  751. * for the first droq.
  752. */
  753. octeon_dev->droq[0]->ops.poll_mode = 0;
  754. }
  755. /* set up IQs. */
  756. for (q = 0; q < num_iqs; q++) {
  757. num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
  758. octeon_get_conf(octeon_dev), lio->ifidx);
  759. retval = octeon_setup_iq(octeon_dev, ifidx, q,
  760. lio->linfo.txpciq[q], num_tx_descs,
  761. netdev_get_tx_queue(netdev, q));
  762. if (retval) {
  763. dev_err(&octeon_dev->pci_dev->dev,
  764. " %s : Runtime IQ(TxQ) creation failed.\n",
  765. __func__);
  766. return 1;
  767. }
  768. /* XPS */
  769. if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
  770. octeon_dev->ioq_vector) {
  771. struct octeon_ioq_vector *ioq_vector;
  772. ioq_vector = &octeon_dev->ioq_vector[q];
  773. netif_set_xps_queue(netdev,
  774. &ioq_vector->affinity_mask,
  775. ioq_vector->iq_index);
  776. }
  777. }
  778. return 0;
  779. }
  780. static
  781. int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
  782. {
  783. struct octeon_device *oct = droq->oct_dev;
  784. struct octeon_device_priv *oct_priv =
  785. (struct octeon_device_priv *)oct->priv;
  786. if (droq->ops.poll_mode) {
  787. droq->ops.napi_fn(droq);
  788. } else {
  789. if (ret & MSIX_PO_INT) {
  790. if (OCTEON_CN23XX_VF(oct))
  791. dev_err(&oct->pci_dev->dev,
  792. "should not come here should not get rx when poll mode = 0 for vf\n");
  793. tasklet_schedule(&oct_priv->droq_tasklet);
  794. return 1;
  795. }
  796. /* this will be flushed periodically by check iq db */
  797. if (ret & MSIX_PI_INT)
  798. return 0;
  799. }
  800. return 0;
  801. }
  802. irqreturn_t
  803. liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
  804. {
  805. struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
  806. struct octeon_device *oct = ioq_vector->oct_dev;
  807. struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
  808. u64 ret;
  809. ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
  810. if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
  811. liquidio_schedule_msix_droq_pkt_handler(droq, ret);
  812. return IRQ_HANDLED;
  813. }
  814. /**
  815. * \brief Droq packet processor sceduler
  816. * @param oct octeon device
  817. */
  818. static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
  819. {
  820. struct octeon_device_priv *oct_priv =
  821. (struct octeon_device_priv *)oct->priv;
  822. struct octeon_droq *droq;
  823. u64 oq_no;
  824. if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
  825. for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
  826. oq_no++) {
  827. if (!(oct->droq_intr & BIT_ULL(oq_no)))
  828. continue;
  829. droq = oct->droq[oq_no];
  830. if (droq->ops.poll_mode) {
  831. droq->ops.napi_fn(droq);
  832. oct_priv->napi_mask |= BIT_ULL(oq_no);
  833. } else {
  834. tasklet_schedule(&oct_priv->droq_tasklet);
  835. }
  836. }
  837. }
  838. }
  839. /**
  840. * \brief Interrupt handler for octeon
  841. * @param irq unused
  842. * @param dev octeon device
  843. */
  844. static
  845. irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
  846. void *dev)
  847. {
  848. struct octeon_device *oct = (struct octeon_device *)dev;
  849. irqreturn_t ret;
  850. /* Disable our interrupts for the duration of ISR */
  851. oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
  852. ret = oct->fn_list.process_interrupt_regs(oct);
  853. if (ret == IRQ_HANDLED)
  854. liquidio_schedule_droq_pkt_handlers(oct);
  855. /* Re-enable our interrupts */
  856. if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
  857. oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
  858. return ret;
  859. }
  860. /**
  861. * \brief Setup interrupt for octeon device
  862. * @param oct octeon device
  863. *
  864. * Enable interrupt in Octeon device as given in the PCI interrupt mask.
  865. */
  866. int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
  867. {
  868. struct msix_entry *msix_entries;
  869. char *queue_irq_names = NULL;
  870. int i, num_interrupts = 0;
  871. int num_alloc_ioq_vectors;
  872. char *aux_irq_name = NULL;
  873. int num_ioq_vectors;
  874. int irqret, err;
  875. if (oct->msix_on) {
  876. oct->num_msix_irqs = num_ioqs;
  877. if (OCTEON_CN23XX_PF(oct)) {
  878. num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
  879. /* one non ioq interrupt for handling
  880. * sli_mac_pf_int_sum
  881. */
  882. oct->num_msix_irqs += 1;
  883. } else if (OCTEON_CN23XX_VF(oct)) {
  884. num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
  885. }
  886. /* allocate storage for the names assigned to each irq */
  887. oct->irq_name_storage =
  888. kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
  889. if (!oct->irq_name_storage) {
  890. dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
  891. return -ENOMEM;
  892. }
  893. queue_irq_names = oct->irq_name_storage;
  894. if (OCTEON_CN23XX_PF(oct))
  895. aux_irq_name = &queue_irq_names
  896. [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
  897. oct->msix_entries = kcalloc(oct->num_msix_irqs,
  898. sizeof(struct msix_entry),
  899. GFP_KERNEL);
  900. if (!oct->msix_entries) {
  901. dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
  902. kfree(oct->irq_name_storage);
  903. oct->irq_name_storage = NULL;
  904. return -ENOMEM;
  905. }
  906. msix_entries = (struct msix_entry *)oct->msix_entries;
  907. /*Assumption is that pf msix vectors start from pf srn to pf to
  908. * trs and not from 0. if not change this code
  909. */
  910. if (OCTEON_CN23XX_PF(oct)) {
  911. for (i = 0; i < oct->num_msix_irqs - 1; i++)
  912. msix_entries[i].entry =
  913. oct->sriov_info.pf_srn + i;
  914. msix_entries[oct->num_msix_irqs - 1].entry =
  915. oct->sriov_info.trs;
  916. } else if (OCTEON_CN23XX_VF(oct)) {
  917. for (i = 0; i < oct->num_msix_irqs; i++)
  918. msix_entries[i].entry = i;
  919. }
  920. num_alloc_ioq_vectors = pci_enable_msix_range(
  921. oct->pci_dev, msix_entries,
  922. oct->num_msix_irqs,
  923. oct->num_msix_irqs);
  924. if (num_alloc_ioq_vectors < 0) {
  925. dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
  926. kfree(oct->msix_entries);
  927. oct->msix_entries = NULL;
  928. kfree(oct->irq_name_storage);
  929. oct->irq_name_storage = NULL;
  930. return num_alloc_ioq_vectors;
  931. }
  932. dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
  933. num_ioq_vectors = oct->num_msix_irqs;
  934. /** For PF, there is one non-ioq interrupt handler */
  935. if (OCTEON_CN23XX_PF(oct)) {
  936. num_ioq_vectors -= 1;
  937. snprintf(aux_irq_name, INTRNAMSIZ,
  938. "LiquidIO%u-pf%u-aux", oct->octeon_id,
  939. oct->pf_num);
  940. irqret = request_irq(
  941. msix_entries[num_ioq_vectors].vector,
  942. liquidio_legacy_intr_handler, 0,
  943. aux_irq_name, oct);
  944. if (irqret) {
  945. dev_err(&oct->pci_dev->dev,
  946. "Request_irq failed for MSIX interrupt Error: %d\n",
  947. irqret);
  948. pci_disable_msix(oct->pci_dev);
  949. kfree(oct->msix_entries);
  950. kfree(oct->irq_name_storage);
  951. oct->irq_name_storage = NULL;
  952. oct->msix_entries = NULL;
  953. return irqret;
  954. }
  955. }
  956. for (i = 0 ; i < num_ioq_vectors ; i++) {
  957. if (OCTEON_CN23XX_PF(oct))
  958. snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
  959. INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
  960. oct->octeon_id, oct->pf_num, i);
  961. if (OCTEON_CN23XX_VF(oct))
  962. snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
  963. INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
  964. oct->octeon_id, oct->vf_num, i);
  965. irqret = request_irq(msix_entries[i].vector,
  966. liquidio_msix_intr_handler, 0,
  967. &queue_irq_names[IRQ_NAME_OFF(i)],
  968. &oct->ioq_vector[i]);
  969. if (irqret) {
  970. dev_err(&oct->pci_dev->dev,
  971. "Request_irq failed for MSIX interrupt Error: %d\n",
  972. irqret);
  973. /** Freeing the non-ioq irq vector here . */
  974. free_irq(msix_entries[num_ioq_vectors].vector,
  975. oct);
  976. while (i) {
  977. i--;
  978. /** clearing affinity mask. */
  979. irq_set_affinity_hint(
  980. msix_entries[i].vector,
  981. NULL);
  982. free_irq(msix_entries[i].vector,
  983. &oct->ioq_vector[i]);
  984. }
  985. pci_disable_msix(oct->pci_dev);
  986. kfree(oct->msix_entries);
  987. kfree(oct->irq_name_storage);
  988. oct->irq_name_storage = NULL;
  989. oct->msix_entries = NULL;
  990. return irqret;
  991. }
  992. oct->ioq_vector[i].vector = msix_entries[i].vector;
  993. /* assign the cpu mask for this msix interrupt vector */
  994. irq_set_affinity_hint(msix_entries[i].vector,
  995. &oct->ioq_vector[i].affinity_mask
  996. );
  997. }
  998. dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
  999. oct->octeon_id);
  1000. } else {
  1001. err = pci_enable_msi(oct->pci_dev);
  1002. if (err)
  1003. dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
  1004. err);
  1005. else
  1006. oct->flags |= LIO_FLAG_MSI_ENABLED;
  1007. /* allocate storage for the names assigned to the irq */
  1008. oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
  1009. if (!oct->irq_name_storage)
  1010. return -ENOMEM;
  1011. queue_irq_names = oct->irq_name_storage;
  1012. if (OCTEON_CN23XX_PF(oct))
  1013. snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
  1014. "LiquidIO%u-pf%u-rxtx-%u",
  1015. oct->octeon_id, oct->pf_num, 0);
  1016. if (OCTEON_CN23XX_VF(oct))
  1017. snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
  1018. "LiquidIO%u-vf%u-rxtx-%u",
  1019. oct->octeon_id, oct->vf_num, 0);
  1020. irqret = request_irq(oct->pci_dev->irq,
  1021. liquidio_legacy_intr_handler,
  1022. IRQF_SHARED,
  1023. &queue_irq_names[IRQ_NAME_OFF(0)], oct);
  1024. if (irqret) {
  1025. if (oct->flags & LIO_FLAG_MSI_ENABLED)
  1026. pci_disable_msi(oct->pci_dev);
  1027. dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
  1028. irqret);
  1029. kfree(oct->irq_name_storage);
  1030. oct->irq_name_storage = NULL;
  1031. return irqret;
  1032. }
  1033. }
  1034. return 0;
  1035. }
  1036. static void liquidio_change_mtu_completion(struct octeon_device *oct,
  1037. u32 status, void *buf)
  1038. {
  1039. struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
  1040. struct liquidio_if_cfg_context *ctx;
  1041. ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
  1042. if (status) {
  1043. dev_err(&oct->pci_dev->dev, "MTU change failed. Status: %llx\n",
  1044. CVM_CAST64(status));
  1045. WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_FAIL);
  1046. } else {
  1047. WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_SUCCESS);
  1048. }
  1049. /* This barrier is required to be sure that the response has been
  1050. * written fully before waking up the handler
  1051. */
  1052. wmb();
  1053. wake_up_interruptible(&ctx->wc);
  1054. }
  1055. /**
  1056. * \brief Net device change_mtu
  1057. * @param netdev network device
  1058. */
  1059. int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
  1060. {
  1061. struct lio *lio = GET_LIO(netdev);
  1062. struct octeon_device *oct = lio->oct_dev;
  1063. struct liquidio_if_cfg_context *ctx;
  1064. struct octeon_soft_command *sc;
  1065. union octnet_cmd *ncmd;
  1066. int ctx_size;
  1067. int ret = 0;
  1068. ctx_size = sizeof(struct liquidio_if_cfg_context);
  1069. sc = (struct octeon_soft_command *)
  1070. octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, ctx_size);
  1071. ncmd = (union octnet_cmd *)sc->virtdptr;
  1072. ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
  1073. WRITE_ONCE(ctx->cond, 0);
  1074. ctx->octeon_id = lio_get_device_id(oct);
  1075. init_waitqueue_head(&ctx->wc);
  1076. ncmd->u64 = 0;
  1077. ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
  1078. ncmd->s.param1 = new_mtu;
  1079. octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
  1080. sc->iq_no = lio->linfo.txpciq[0].s.q_no;
  1081. octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
  1082. OPCODE_NIC_CMD, 0, 0, 0);
  1083. sc->callback = liquidio_change_mtu_completion;
  1084. sc->callback_arg = sc;
  1085. sc->wait_time = 100;
  1086. ret = octeon_send_soft_command(oct, sc);
  1087. if (ret == IQ_SEND_FAILED) {
  1088. netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
  1089. return -EINVAL;
  1090. }
  1091. /* Sleep on a wait queue till the cond flag indicates that the
  1092. * response arrived or timed-out.
  1093. */
  1094. if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR ||
  1095. ctx->cond == LIO_CHANGE_MTU_FAIL) {
  1096. octeon_free_soft_command(oct, sc);
  1097. return -EINVAL;
  1098. }
  1099. netdev->mtu = new_mtu;
  1100. lio->mtu = new_mtu;
  1101. octeon_free_soft_command(oct, sc);
  1102. return 0;
  1103. }
  1104. int lio_wait_for_clean_oq(struct octeon_device *oct)
  1105. {
  1106. int retry = 100, pending_pkts = 0;
  1107. int idx;
  1108. do {
  1109. pending_pkts = 0;
  1110. for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
  1111. if (!(oct->io_qmask.oq & BIT_ULL(idx)))
  1112. continue;
  1113. pending_pkts +=
  1114. atomic_read(&oct->droq[idx]->pkts_pending);
  1115. }
  1116. if (pending_pkts > 0)
  1117. schedule_timeout_uninterruptible(1);
  1118. } while (retry-- && pending_pkts);
  1119. return pending_pkts;
  1120. }
  1121. static void
  1122. octnet_nic_stats_callback(struct octeon_device *oct_dev,
  1123. u32 status, void *ptr)
  1124. {
  1125. struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
  1126. struct oct_nic_stats_resp *resp =
  1127. (struct oct_nic_stats_resp *)sc->virtrptr;
  1128. struct oct_nic_stats_ctrl *ctrl =
  1129. (struct oct_nic_stats_ctrl *)sc->ctxptr;
  1130. struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
  1131. struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
  1132. struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
  1133. struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
  1134. if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
  1135. octeon_swap_8B_data((u64 *)&resp->stats,
  1136. (sizeof(struct oct_link_stats)) >> 3);
  1137. /* RX link-level stats */
  1138. rstats->total_rcvd = rsp_rstats->total_rcvd;
  1139. rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
  1140. rstats->total_bcst = rsp_rstats->total_bcst;
  1141. rstats->total_mcst = rsp_rstats->total_mcst;
  1142. rstats->runts = rsp_rstats->runts;
  1143. rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
  1144. /* Accounts for over/under-run of buffers */
  1145. rstats->fifo_err = rsp_rstats->fifo_err;
  1146. rstats->dmac_drop = rsp_rstats->dmac_drop;
  1147. rstats->fcs_err = rsp_rstats->fcs_err;
  1148. rstats->jabber_err = rsp_rstats->jabber_err;
  1149. rstats->l2_err = rsp_rstats->l2_err;
  1150. rstats->frame_err = rsp_rstats->frame_err;
  1151. rstats->red_drops = rsp_rstats->red_drops;
  1152. /* RX firmware stats */
  1153. rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
  1154. rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
  1155. rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
  1156. rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
  1157. rstats->fw_err_pko = rsp_rstats->fw_err_pko;
  1158. rstats->fw_err_link = rsp_rstats->fw_err_link;
  1159. rstats->fw_err_drop = rsp_rstats->fw_err_drop;
  1160. rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
  1161. rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
  1162. /* Number of packets that are LROed */
  1163. rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
  1164. /* Number of octets that are LROed */
  1165. rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
  1166. /* Number of LRO packets formed */
  1167. rstats->fw_total_lro = rsp_rstats->fw_total_lro;
  1168. /* Number of times lRO of packet aborted */
  1169. rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
  1170. rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
  1171. rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
  1172. rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
  1173. rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
  1174. /* intrmod: packet forward rate */
  1175. rstats->fwd_rate = rsp_rstats->fwd_rate;
  1176. /* TX link-level stats */
  1177. tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
  1178. tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
  1179. tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
  1180. tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
  1181. tstats->ctl_sent = rsp_tstats->ctl_sent;
  1182. /* Packets sent after one collision*/
  1183. tstats->one_collision_sent = rsp_tstats->one_collision_sent;
  1184. /* Packets sent after multiple collision*/
  1185. tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
  1186. /* Packets not sent due to max collisions */
  1187. tstats->max_collision_fail = rsp_tstats->max_collision_fail;
  1188. /* Packets not sent due to max deferrals */
  1189. tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
  1190. /* Accounts for over/under-run of buffers */
  1191. tstats->fifo_err = rsp_tstats->fifo_err;
  1192. tstats->runts = rsp_tstats->runts;
  1193. /* Total number of collisions detected */
  1194. tstats->total_collisions = rsp_tstats->total_collisions;
  1195. /* firmware stats */
  1196. tstats->fw_total_sent = rsp_tstats->fw_total_sent;
  1197. tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
  1198. tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
  1199. tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
  1200. tstats->fw_err_pko = rsp_tstats->fw_err_pko;
  1201. tstats->fw_err_pki = rsp_tstats->fw_err_pki;
  1202. tstats->fw_err_link = rsp_tstats->fw_err_link;
  1203. tstats->fw_err_drop = rsp_tstats->fw_err_drop;
  1204. tstats->fw_tso = rsp_tstats->fw_tso;
  1205. tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
  1206. tstats->fw_err_tso = rsp_tstats->fw_err_tso;
  1207. tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
  1208. resp->status = 1;
  1209. } else {
  1210. resp->status = -1;
  1211. }
  1212. complete(&ctrl->complete);
  1213. }
  1214. int octnet_get_link_stats(struct net_device *netdev)
  1215. {
  1216. struct lio *lio = GET_LIO(netdev);
  1217. struct octeon_device *oct_dev = lio->oct_dev;
  1218. struct octeon_soft_command *sc;
  1219. struct oct_nic_stats_ctrl *ctrl;
  1220. struct oct_nic_stats_resp *resp;
  1221. int retval;
  1222. /* Alloc soft command */
  1223. sc = (struct octeon_soft_command *)
  1224. octeon_alloc_soft_command(oct_dev,
  1225. 0,
  1226. sizeof(struct oct_nic_stats_resp),
  1227. sizeof(struct octnic_ctrl_pkt));
  1228. if (!sc)
  1229. return -ENOMEM;
  1230. resp = (struct oct_nic_stats_resp *)sc->virtrptr;
  1231. memset(resp, 0, sizeof(struct oct_nic_stats_resp));
  1232. ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
  1233. memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
  1234. ctrl->netdev = netdev;
  1235. init_completion(&ctrl->complete);
  1236. sc->iq_no = lio->linfo.txpciq[0].s.q_no;
  1237. octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
  1238. OPCODE_NIC_PORT_STATS, 0, 0, 0);
  1239. sc->callback = octnet_nic_stats_callback;
  1240. sc->callback_arg = sc;
  1241. sc->wait_time = 500; /*in milli seconds*/
  1242. retval = octeon_send_soft_command(oct_dev, sc);
  1243. if (retval == IQ_SEND_FAILED) {
  1244. octeon_free_soft_command(oct_dev, sc);
  1245. return -EINVAL;
  1246. }
  1247. wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
  1248. if (resp->status != 1) {
  1249. octeon_free_soft_command(oct_dev, sc);
  1250. return -EINVAL;
  1251. }
  1252. octeon_free_soft_command(oct_dev, sc);
  1253. return 0;
  1254. }
  1255. static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct,
  1256. u32 status,
  1257. void *buf)
  1258. {
  1259. struct liquidio_nic_seapi_ctl_context *ctx;
  1260. struct octeon_soft_command *sc = buf;
  1261. ctx = sc->ctxptr;
  1262. oct = lio_get_device(ctx->octeon_id);
  1263. if (status) {
  1264. dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n",
  1265. __func__,
  1266. CVM_CAST64(status));
  1267. }
  1268. ctx->status = status;
  1269. complete(&ctx->complete);
  1270. }
  1271. int liquidio_set_speed(struct lio *lio, int speed)
  1272. {
  1273. struct liquidio_nic_seapi_ctl_context *ctx;
  1274. struct octeon_device *oct = lio->oct_dev;
  1275. struct oct_nic_seapi_resp *resp;
  1276. struct octeon_soft_command *sc;
  1277. union octnet_cmd *ncmd;
  1278. u32 ctx_size;
  1279. int retval;
  1280. u32 var;
  1281. if (oct->speed_setting == speed)
  1282. return 0;
  1283. if (!OCTEON_CN23XX_PF(oct)) {
  1284. dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
  1285. __func__);
  1286. return -EOPNOTSUPP;
  1287. }
  1288. ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
  1289. sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
  1290. sizeof(struct oct_nic_seapi_resp),
  1291. ctx_size);
  1292. if (!sc)
  1293. return -ENOMEM;
  1294. ncmd = sc->virtdptr;
  1295. ctx = sc->ctxptr;
  1296. resp = sc->virtrptr;
  1297. memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
  1298. ctx->octeon_id = lio_get_device_id(oct);
  1299. ctx->status = 0;
  1300. init_completion(&ctx->complete);
  1301. ncmd->u64 = 0;
  1302. ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
  1303. ncmd->s.param1 = speed;
  1304. octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
  1305. sc->iq_no = lio->linfo.txpciq[0].s.q_no;
  1306. octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
  1307. OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
  1308. sc->callback = liquidio_nic_seapi_ctl_callback;
  1309. sc->callback_arg = sc;
  1310. sc->wait_time = 5000;
  1311. retval = octeon_send_soft_command(oct, sc);
  1312. if (retval == IQ_SEND_FAILED) {
  1313. dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
  1314. retval = -EBUSY;
  1315. } else {
  1316. /* Wait for response or timeout */
  1317. if (wait_for_completion_timeout(&ctx->complete,
  1318. msecs_to_jiffies(10000)) == 0) {
  1319. dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
  1320. __func__);
  1321. octeon_free_soft_command(oct, sc);
  1322. return -EINTR;
  1323. }
  1324. retval = resp->status;
  1325. if (retval) {
  1326. dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
  1327. __func__, retval);
  1328. octeon_free_soft_command(oct, sc);
  1329. return -EIO;
  1330. }
  1331. var = be32_to_cpu((__force __be32)resp->speed);
  1332. if (var != speed) {
  1333. dev_err(&oct->pci_dev->dev,
  1334. "%s: setting failed speed= %x, expect %x\n",
  1335. __func__, var, speed);
  1336. }
  1337. oct->speed_setting = var;
  1338. }
  1339. octeon_free_soft_command(oct, sc);
  1340. return retval;
  1341. }
  1342. int liquidio_get_speed(struct lio *lio)
  1343. {
  1344. struct liquidio_nic_seapi_ctl_context *ctx;
  1345. struct octeon_device *oct = lio->oct_dev;
  1346. struct oct_nic_seapi_resp *resp;
  1347. struct octeon_soft_command *sc;
  1348. union octnet_cmd *ncmd;
  1349. u32 ctx_size;
  1350. int retval;
  1351. ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
  1352. sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
  1353. sizeof(struct oct_nic_seapi_resp),
  1354. ctx_size);
  1355. if (!sc)
  1356. return -ENOMEM;
  1357. ncmd = sc->virtdptr;
  1358. ctx = sc->ctxptr;
  1359. resp = sc->virtrptr;
  1360. memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
  1361. ctx->octeon_id = lio_get_device_id(oct);
  1362. ctx->status = 0;
  1363. init_completion(&ctx->complete);
  1364. ncmd->u64 = 0;
  1365. ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
  1366. octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
  1367. sc->iq_no = lio->linfo.txpciq[0].s.q_no;
  1368. octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
  1369. OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
  1370. sc->callback = liquidio_nic_seapi_ctl_callback;
  1371. sc->callback_arg = sc;
  1372. sc->wait_time = 5000;
  1373. retval = octeon_send_soft_command(oct, sc);
  1374. if (retval == IQ_SEND_FAILED) {
  1375. dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
  1376. oct->no_speed_setting = 1;
  1377. oct->speed_setting = 25;
  1378. retval = -EBUSY;
  1379. } else {
  1380. if (wait_for_completion_timeout(&ctx->complete,
  1381. msecs_to_jiffies(10000)) == 0) {
  1382. dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
  1383. __func__);
  1384. oct->speed_setting = 25;
  1385. oct->no_speed_setting = 1;
  1386. octeon_free_soft_command(oct, sc);
  1387. return -EINTR;
  1388. }
  1389. retval = resp->status;
  1390. if (retval) {
  1391. dev_err(&oct->pci_dev->dev,
  1392. "%s failed retval=%d\n", __func__, retval);
  1393. oct->no_speed_setting = 1;
  1394. oct->speed_setting = 25;
  1395. octeon_free_soft_command(oct, sc);
  1396. retval = -EIO;
  1397. } else {
  1398. u32 var;
  1399. var = be32_to_cpu((__force __be32)resp->speed);
  1400. oct->speed_setting = var;
  1401. if (var == 0xffff) {
  1402. oct->no_speed_setting = 1;
  1403. /* unable to access boot variables
  1404. * get the default value based on the NIC type
  1405. */
  1406. oct->speed_setting = 25;
  1407. }
  1408. }
  1409. }
  1410. octeon_free_soft_command(oct, sc);
  1411. return retval;
  1412. }