123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680 |
- /**********************************************************************
- * Author: Cavium, Inc.
- *
- * Contact: support@cavium.com
- * Please include "LiquidIO" in the subject.
- *
- * Copyright (c) 2003-2016 Cavium, Inc.
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more details.
- ***********************************************************************/
- #include <linux/pci.h>
- #include <linux/if_vlan.h>
- #include "liquidio_common.h"
- #include "octeon_droq.h"
- #include "octeon_iq.h"
- #include "response_manager.h"
- #include "octeon_device.h"
- #include "octeon_nic.h"
- #include "octeon_main.h"
- #include "octeon_network.h"
- /* OOM task polling interval */
- #define LIO_OOM_POLL_INTERVAL_MS 250
- #define OCTNIC_MAX_SG MAX_SKB_FRAGS
- /**
- * \brief Callback for getting interface configuration
- * @param status status of request
- * @param buf pointer to resp structure
- */
- void lio_if_cfg_callback(struct octeon_device *oct,
- u32 status __attribute__((unused)), void *buf)
- {
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- struct liquidio_if_cfg_context *ctx;
- struct liquidio_if_cfg_resp *resp;
- resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
- oct = lio_get_device(ctx->octeon_id);
- if (resp->status)
- dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
- CVM_CAST64(resp->status));
- WRITE_ONCE(ctx->cond, 1);
- snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
- resp->cfg_info.liquidio_firmware_version);
- /* This barrier is required to be sure that the response has been
- * written fully before waking up the handler
- */
- wmb();
- wake_up_interruptible(&ctx->wc);
- }
- /**
- * \brief Delete gather lists
- * @param lio per-network private data
- */
- void lio_delete_glists(struct lio *lio)
- {
- struct octnic_gather *g;
- int i;
- kfree(lio->glist_lock);
- lio->glist_lock = NULL;
- if (!lio->glist)
- return;
- for (i = 0; i < lio->oct_dev->num_iqs; i++) {
- do {
- g = (struct octnic_gather *)
- lio_list_delete_head(&lio->glist[i]);
- kfree(g);
- } while (g);
- if (lio->glists_virt_base && lio->glists_virt_base[i] &&
- lio->glists_dma_base && lio->glists_dma_base[i]) {
- lio_dma_free(lio->oct_dev,
- lio->glist_entry_size * lio->tx_qsize,
- lio->glists_virt_base[i],
- lio->glists_dma_base[i]);
- }
- }
- kfree(lio->glists_virt_base);
- lio->glists_virt_base = NULL;
- kfree(lio->glists_dma_base);
- lio->glists_dma_base = NULL;
- kfree(lio->glist);
- lio->glist = NULL;
- }
- /**
- * \brief Setup gather lists
- * @param lio per-network private data
- */
- int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
- {
- struct octnic_gather *g;
- int i, j;
- lio->glist_lock =
- kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
- if (!lio->glist_lock)
- return -ENOMEM;
- lio->glist =
- kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
- if (!lio->glist) {
- kfree(lio->glist_lock);
- lio->glist_lock = NULL;
- return -ENOMEM;
- }
- lio->glist_entry_size =
- ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
- /* allocate memory to store virtual and dma base address of
- * per glist consistent memory
- */
- lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
- GFP_KERNEL);
- lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
- GFP_KERNEL);
- if (!lio->glists_virt_base || !lio->glists_dma_base) {
- lio_delete_glists(lio);
- return -ENOMEM;
- }
- for (i = 0; i < num_iqs; i++) {
- int numa_node = dev_to_node(&oct->pci_dev->dev);
- spin_lock_init(&lio->glist_lock[i]);
- INIT_LIST_HEAD(&lio->glist[i]);
- lio->glists_virt_base[i] =
- lio_dma_alloc(oct,
- lio->glist_entry_size * lio->tx_qsize,
- &lio->glists_dma_base[i]);
- if (!lio->glists_virt_base[i]) {
- lio_delete_glists(lio);
- return -ENOMEM;
- }
- for (j = 0; j < lio->tx_qsize; j++) {
- g = kzalloc_node(sizeof(*g), GFP_KERNEL,
- numa_node);
- if (!g)
- g = kzalloc(sizeof(*g), GFP_KERNEL);
- if (!g)
- break;
- g->sg = lio->glists_virt_base[i] +
- (j * lio->glist_entry_size);
- g->sg_dma_ptr = lio->glists_dma_base[i] +
- (j * lio->glist_entry_size);
- list_add_tail(&g->list, &lio->glist[i]);
- }
- if (j != lio->tx_qsize) {
- lio_delete_glists(lio);
- return -ENOMEM;
- }
- }
- return 0;
- }
- int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
- {
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- struct octnic_ctrl_pkt nctrl;
- int ret = 0;
- memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
- nctrl.ncmd.u64 = 0;
- nctrl.ncmd.s.cmd = cmd;
- nctrl.ncmd.s.param1 = param1;
- nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
- nctrl.netpndev = (u64)netdev;
- nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
- ret);
- }
- return ret;
- }
- void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
- unsigned int bytes_compl)
- {
- struct netdev_queue *netdev_queue = txq;
- netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
- }
- void octeon_update_tx_completion_counters(void *buf, int reqtype,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl)
- {
- struct octnet_buf_free_info *finfo;
- struct sk_buff *skb = NULL;
- struct octeon_soft_command *sc;
- switch (reqtype) {
- case REQTYPE_NORESP_NET:
- case REQTYPE_NORESP_NET_SG:
- finfo = buf;
- skb = finfo->skb;
- break;
- case REQTYPE_RESP_NET_SG:
- case REQTYPE_RESP_NET:
- sc = buf;
- skb = sc->callback_arg;
- break;
- default:
- return;
- }
- (*pkts_compl)++;
- *bytes_compl += skb->len;
- }
- int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
- {
- struct octnet_buf_free_info *finfo;
- struct sk_buff *skb;
- struct octeon_soft_command *sc;
- struct netdev_queue *txq;
- switch (reqtype) {
- case REQTYPE_NORESP_NET:
- case REQTYPE_NORESP_NET_SG:
- finfo = buf;
- skb = finfo->skb;
- break;
- case REQTYPE_RESP_NET_SG:
- case REQTYPE_RESP_NET:
- sc = buf;
- skb = sc->callback_arg;
- break;
- default:
- return 0;
- }
- txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
- netdev_tx_sent_queue(txq, skb->len);
- return netif_xmit_stopped(txq);
- }
- void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
- {
- struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
- struct net_device *netdev = (struct net_device *)nctrl->netpndev;
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- u8 *mac;
- if (nctrl->completion && nctrl->response_code) {
- /* Signal whoever is interested that the response code from the
- * firmware has arrived.
- */
- WRITE_ONCE(*nctrl->response_code, nctrl->status);
- complete(nctrl->completion);
- }
- if (nctrl->status)
- return;
- switch (nctrl->ncmd.s.cmd) {
- case OCTNET_CMD_CHANGE_DEVFLAGS:
- case OCTNET_CMD_SET_MULTI_LIST:
- case OCTNET_CMD_SET_UC_LIST:
- break;
- case OCTNET_CMD_CHANGE_MACADDR:
- mac = ((u8 *)&nctrl->udd[0]) + 2;
- if (nctrl->ncmd.s.param1) {
- /* vfidx is 0 based, but vf_num (param1) is 1 based */
- int vfidx = nctrl->ncmd.s.param1 - 1;
- bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
- if (mac_is_admin_assigned)
- netif_info(lio, probe, lio->netdev,
- "MAC Address %pM is configured for VF %d\n",
- mac, vfidx);
- } else {
- netif_info(lio, probe, lio->netdev,
- " MACAddr changed to %pM\n",
- mac);
- }
- break;
- case OCTNET_CMD_GPIO_ACCESS:
- netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
- break;
- case OCTNET_CMD_ID_ACTIVE:
- netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
- break;
- case OCTNET_CMD_LRO_ENABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
- break;
- case OCTNET_CMD_LRO_DISABLE:
- dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
- netdev->name);
- break;
- case OCTNET_CMD_VERBOSE_ENABLE:
- dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
- netdev->name);
- break;
- case OCTNET_CMD_VERBOSE_DISABLE:
- dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
- netdev->name);
- break;
- case OCTNET_CMD_VLAN_FILTER_CTL:
- if (nctrl->ncmd.s.param1)
- dev_info(&oct->pci_dev->dev,
- "%s VLAN filter enabled\n", netdev->name);
- else
- dev_info(&oct->pci_dev->dev,
- "%s VLAN filter disabled\n", netdev->name);
- break;
- case OCTNET_CMD_ADD_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
- netdev->name, nctrl->ncmd.s.param1);
- break;
- case OCTNET_CMD_DEL_VLAN_FILTER:
- dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
- netdev->name, nctrl->ncmd.s.param1);
- break;
- case OCTNET_CMD_SET_SETTINGS:
- dev_info(&oct->pci_dev->dev, "%s settings changed\n",
- netdev->name);
- break;
- /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
- * Command passed by NIC driver
- */
- case OCTNET_CMD_TNL_RX_CSUM_CTL:
- if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
- netif_info(lio, probe, lio->netdev,
- "RX Checksum Offload Enabled\n");
- } else if (nctrl->ncmd.s.param1 ==
- OCTNET_CMD_RXCSUM_DISABLE) {
- netif_info(lio, probe, lio->netdev,
- "RX Checksum Offload Disabled\n");
- }
- break;
- /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
- * Command passed by NIC driver
- */
- case OCTNET_CMD_TNL_TX_CSUM_CTL:
- if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
- netif_info(lio, probe, lio->netdev,
- "TX Checksum Offload Enabled\n");
- } else if (nctrl->ncmd.s.param1 ==
- OCTNET_CMD_TXCSUM_DISABLE) {
- netif_info(lio, probe, lio->netdev,
- "TX Checksum Offload Disabled\n");
- }
- break;
- /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
- * Command passed by NIC driver
- */
- case OCTNET_CMD_VXLAN_PORT_CONFIG:
- if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
- netif_info(lio, probe, lio->netdev,
- "VxLAN Destination UDP PORT:%d ADDED\n",
- nctrl->ncmd.s.param1);
- } else if (nctrl->ncmd.s.more ==
- OCTNET_CMD_VXLAN_PORT_DEL) {
- netif_info(lio, probe, lio->netdev,
- "VxLAN Destination UDP PORT:%d DELETED\n",
- nctrl->ncmd.s.param1);
- }
- break;
- case OCTNET_CMD_SET_FLOW_CTL:
- netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
- break;
- case OCTNET_CMD_QUEUE_COUNT_CTL:
- netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
- nctrl->ncmd.s.param1);
- break;
- default:
- dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
- nctrl->ncmd.s.cmd);
- }
- }
- void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
- {
- bool macaddr_changed = false;
- struct net_device *netdev;
- struct lio *lio;
- rtnl_lock();
- netdev = oct->props[0].netdev;
- lio = GET_LIO(netdev);
- lio->linfo.macaddr_is_admin_asgnd = true;
- if (!ether_addr_equal(netdev->dev_addr, mac)) {
- macaddr_changed = true;
- ether_addr_copy(netdev->dev_addr, mac);
- ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
- call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
- }
- rtnl_unlock();
- if (macaddr_changed)
- dev_info(&oct->pci_dev->dev,
- "PF changed VF's MAC address to %pM\n", mac);
- /* no need to notify the firmware of the macaddr change because
- * the PF did that already
- */
- }
- static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
- {
- struct cavium_wk *wk = (struct cavium_wk *)work;
- struct lio *lio = (struct lio *)wk->ctxptr;
- struct octeon_device *oct = lio->oct_dev;
- struct octeon_droq *droq;
- int q, q_no = 0;
- if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
- for (q = 0; q < lio->linfo.num_rxpciq; q++) {
- q_no = lio->linfo.rxpciq[q].s.q_no;
- droq = oct->droq[q_no];
- if (!droq)
- continue;
- octeon_droq_check_oom(droq);
- }
- }
- queue_delayed_work(lio->rxq_status_wq.wq,
- &lio->rxq_status_wq.wk.work,
- msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
- }
- int setup_rx_oom_poll_fn(struct net_device *netdev)
- {
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
- WQ_MEM_RECLAIM, 0);
- if (!lio->rxq_status_wq.wq) {
- dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
- return -ENOMEM;
- }
- INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
- octnet_poll_check_rxq_oom_status);
- lio->rxq_status_wq.wk.ctxptr = lio;
- queue_delayed_work(lio->rxq_status_wq.wq,
- &lio->rxq_status_wq.wk.work,
- msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
- return 0;
- }
- void cleanup_rx_oom_poll_fn(struct net_device *netdev)
- {
- struct lio *lio = GET_LIO(netdev);
- if (lio->rxq_status_wq.wq) {
- cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
- flush_workqueue(lio->rxq_status_wq.wq);
- destroy_workqueue(lio->rxq_status_wq.wq);
- }
- }
- /* Runs in interrupt context. */
- static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
- {
- struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
- struct net_device *netdev;
- struct lio *lio;
- netdev = oct->props[iq->ifidx].netdev;
- /* This is needed because the first IQ does not have
- * a netdev associated with it.
- */
- if (!netdev)
- return;
- lio = GET_LIO(netdev);
- if (__netif_subqueue_stopped(netdev, iq->q_index) &&
- lio->linfo.link.s.link_up &&
- (!octnet_iq_is_full(oct, iq_num))) {
- netif_wake_subqueue(netdev, iq->q_index);
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
- tx_restart, 1);
- }
- }
- /**
- * \brief Setup output queue
- * @param oct octeon device
- * @param q_no which queue
- * @param num_descs how many descriptors
- * @param desc_size size of each descriptor
- * @param app_ctx application context
- */
- static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
- int desc_size, void *app_ctx)
- {
- int ret_val;
- dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
- /* droq creation and local register settings. */
- ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
- if (ret_val < 0)
- return ret_val;
- if (ret_val == 1) {
- dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
- return 0;
- }
- /* Enable the droq queues */
- octeon_set_droq_pkt_op(oct, q_no, 1);
- /* Send Credit for Octeon Output queues. Credits are always
- * sent after the output queue is enabled.
- */
- writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
- return ret_val;
- }
- /** Routine to push packets arriving on Octeon interface upto network layer.
- * @param oct_id - octeon device id.
- * @param skbuff - skbuff struct to be passed to network layer.
- * @param len - size of total data received.
- * @param rh - Control header associated with the packet
- * @param param - additional control data with the packet
- * @param arg - farg registered in droq_ops
- */
- static void
- liquidio_push_packet(u32 octeon_id __attribute__((unused)),
- void *skbuff,
- u32 len,
- union octeon_rh *rh,
- void *param,
- void *arg)
- {
- struct net_device *netdev = (struct net_device *)arg;
- struct octeon_droq *droq =
- container_of(param, struct octeon_droq, napi);
- struct sk_buff *skb = (struct sk_buff *)skbuff;
- struct skb_shared_hwtstamps *shhwtstamps;
- struct napi_struct *napi = param;
- u16 vtag = 0;
- u32 r_dh_off;
- u64 ns;
- if (netdev) {
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- /* Do not proceed if the interface is not in RUNNING state. */
- if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
- recv_buffer_free(skb);
- droq->stats.rx_dropped++;
- return;
- }
- skb->dev = netdev;
- skb_record_rx_queue(skb, droq->q_no);
- if (likely(len > MIN_SKB_SIZE)) {
- struct octeon_skb_page_info *pg_info;
- unsigned char *va;
- pg_info = ((struct octeon_skb_page_info *)(skb->cb));
- if (pg_info->page) {
- /* For Paged allocation use the frags */
- va = page_address(pg_info->page) +
- pg_info->page_offset;
- memcpy(skb->data, va, MIN_SKB_SIZE);
- skb_put(skb, MIN_SKB_SIZE);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- pg_info->page,
- pg_info->page_offset +
- MIN_SKB_SIZE,
- len - MIN_SKB_SIZE,
- LIO_RXBUFFER_SZ);
- }
- } else {
- struct octeon_skb_page_info *pg_info =
- ((struct octeon_skb_page_info *)(skb->cb));
- skb_copy_to_linear_data(skb, page_address(pg_info->page)
- + pg_info->page_offset, len);
- skb_put(skb, len);
- put_page(pg_info->page);
- }
- r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
- if (oct->ptp_enable) {
- if (rh->r_dh.has_hwtstamp) {
- /* timestamp is included from the hardware at
- * the beginning of the packet.
- */
- if (ifstate_check
- (lio,
- LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
- /* Nanoseconds are in the first 64-bits
- * of the packet.
- */
- memcpy(&ns, (skb->data + r_dh_off),
- sizeof(ns));
- r_dh_off -= BYTES_PER_DHLEN_UNIT;
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp =
- ns_to_ktime(ns +
- lio->ptp_adjust);
- }
- }
- }
- if (rh->r_dh.has_hash) {
- __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
- u32 hash = be32_to_cpu(*hash_be);
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
- r_dh_off -= BYTES_PER_DHLEN_UNIT;
- }
- skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
- skb->protocol = eth_type_trans(skb, skb->dev);
- if ((netdev->features & NETIF_F_RXCSUM) &&
- (((rh->r_dh.encap_on) &&
- (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
- (!(rh->r_dh.encap_on) &&
- (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
- /* checksum has already been verified */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
- /* Setting Encapsulation field on basis of status received
- * from the firmware
- */
- if (rh->r_dh.encap_on) {
- skb->encapsulation = 1;
- skb->csum_level = 1;
- droq->stats.rx_vxlan++;
- }
- /* inbound VLAN tag */
- if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- rh->r_dh.vlan) {
- u16 priority = rh->r_dh.priority;
- u16 vid = rh->r_dh.vlan;
- vtag = (priority << VLAN_PRIO_SHIFT) | vid;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
- }
- napi_gro_receive(napi, skb);
- droq->stats.rx_bytes_received += len -
- rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
- droq->stats.rx_pkts_received++;
- } else {
- recv_buffer_free(skb);
- }
- }
- /**
- * \brief wrapper for calling napi_schedule
- * @param param parameters to pass to napi_schedule
- *
- * Used when scheduling on different CPUs
- */
- static void napi_schedule_wrapper(void *param)
- {
- struct napi_struct *napi = param;
- napi_schedule(napi);
- }
- /**
- * \brief callback when receive interrupt occurs and we are in NAPI mode
- * @param arg pointer to octeon output queue
- */
- static void liquidio_napi_drv_callback(void *arg)
- {
- struct octeon_device *oct;
- struct octeon_droq *droq = arg;
- int this_cpu = smp_processor_id();
- oct = droq->oct_dev;
- if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
- droq->cpu_id == this_cpu) {
- napi_schedule_irqoff(&droq->napi);
- } else {
- call_single_data_t *csd = &droq->csd;
- csd->func = napi_schedule_wrapper;
- csd->info = &droq->napi;
- csd->flags = 0;
- smp_call_function_single_async(droq->cpu_id, csd);
- }
- }
- /**
- * \brief Entry point for NAPI polling
- * @param napi NAPI structure
- * @param budget maximum number of items to process
- */
- static int liquidio_napi_poll(struct napi_struct *napi, int budget)
- {
- struct octeon_instr_queue *iq;
- struct octeon_device *oct;
- struct octeon_droq *droq;
- int tx_done = 0, iq_no;
- int work_done;
- droq = container_of(napi, struct octeon_droq, napi);
- oct = droq->oct_dev;
- iq_no = droq->q_no;
- /* Handle Droq descriptors */
- work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
- /* Flush the instruction queue */
- iq = oct->instr_queue[iq_no];
- if (iq) {
- /* TODO: move this check to inside octeon_flush_iq,
- * once check_db_timeout is removed
- */
- if (atomic_read(&iq->instr_pending))
- /* Process iq buffers with in the budget limits */
- tx_done = octeon_flush_iq(oct, iq, budget);
- else
- tx_done = 1;
- /* Update iq read-index rather than waiting for next interrupt.
- * Return back if tx_done is false.
- */
- /* sub-queue status update */
- lio_update_txq_status(oct, iq_no);
- } else {
- dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
- __func__, iq_no);
- }
- #define MAX_REG_CNT 2000000U
- /* force enable interrupt if reg cnts are high to avoid wraparound */
- if ((work_done < budget && tx_done) ||
- (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
- (droq->pkt_count >= MAX_REG_CNT)) {
- tx_done = 1;
- napi_complete_done(napi, work_done);
- octeon_enable_irq(droq->oct_dev, droq->q_no);
- return 0;
- }
- return (!tx_done) ? (budget) : (work_done);
- }
- /**
- * \brief Setup input and output queues
- * @param octeon_dev octeon device
- * @param ifidx Interface index
- *
- * Note: Queues are with respect to the octeon device. Thus
- * an input queue is for egress packets, and output queues
- * are for ingress packets.
- */
- int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
- u32 num_iqs, u32 num_oqs)
- {
- struct octeon_droq_ops droq_ops;
- struct net_device *netdev;
- struct octeon_droq *droq;
- struct napi_struct *napi;
- int cpu_id_modulus;
- int num_tx_descs;
- struct lio *lio;
- int retval = 0;
- int q, q_no;
- int cpu_id;
- netdev = octeon_dev->props[ifidx].netdev;
- lio = GET_LIO(netdev);
- memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
- droq_ops.fptr = liquidio_push_packet;
- droq_ops.farg = netdev;
- droq_ops.poll_mode = 1;
- droq_ops.napi_fn = liquidio_napi_drv_callback;
- cpu_id = 0;
- cpu_id_modulus = num_present_cpus();
- /* set up DROQs. */
- for (q = 0; q < num_oqs; q++) {
- q_no = lio->linfo.rxpciq[q].s.q_no;
- dev_dbg(&octeon_dev->pci_dev->dev,
- "%s index:%d linfo.rxpciq.s.q_no:%d\n",
- __func__, q, q_no);
- retval = octeon_setup_droq(
- octeon_dev, q_no,
- CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
- lio->ifidx),
- CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
- lio->ifidx),
- NULL);
- if (retval) {
- dev_err(&octeon_dev->pci_dev->dev,
- "%s : Runtime DROQ(RxQ) creation failed.\n",
- __func__);
- return 1;
- }
- droq = octeon_dev->droq[q_no];
- napi = &droq->napi;
- dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
- (u64)netdev, (u64)octeon_dev);
- netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
- /* designate a CPU for this droq */
- droq->cpu_id = cpu_id;
- cpu_id++;
- if (cpu_id >= cpu_id_modulus)
- cpu_id = 0;
- octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
- }
- if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
- /* 23XX PF/VF can send/recv control messages (via the first
- * PF/VF-owned droq) from the firmware even if the ethX
- * interface is down, so that's why poll_mode must be off
- * for the first droq.
- */
- octeon_dev->droq[0]->ops.poll_mode = 0;
- }
- /* set up IQs. */
- for (q = 0; q < num_iqs; q++) {
- num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
- octeon_get_conf(octeon_dev), lio->ifidx);
- retval = octeon_setup_iq(octeon_dev, ifidx, q,
- lio->linfo.txpciq[q], num_tx_descs,
- netdev_get_tx_queue(netdev, q));
- if (retval) {
- dev_err(&octeon_dev->pci_dev->dev,
- " %s : Runtime IQ(TxQ) creation failed.\n",
- __func__);
- return 1;
- }
- /* XPS */
- if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
- octeon_dev->ioq_vector) {
- struct octeon_ioq_vector *ioq_vector;
- ioq_vector = &octeon_dev->ioq_vector[q];
- netif_set_xps_queue(netdev,
- &ioq_vector->affinity_mask,
- ioq_vector->iq_index);
- }
- }
- return 0;
- }
- static
- int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
- {
- struct octeon_device *oct = droq->oct_dev;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
- if (droq->ops.poll_mode) {
- droq->ops.napi_fn(droq);
- } else {
- if (ret & MSIX_PO_INT) {
- if (OCTEON_CN23XX_VF(oct))
- dev_err(&oct->pci_dev->dev,
- "should not come here should not get rx when poll mode = 0 for vf\n");
- tasklet_schedule(&oct_priv->droq_tasklet);
- return 1;
- }
- /* this will be flushed periodically by check iq db */
- if (ret & MSIX_PI_INT)
- return 0;
- }
- return 0;
- }
- irqreturn_t
- liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
- {
- struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
- struct octeon_device *oct = ioq_vector->oct_dev;
- struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
- u64 ret;
- ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
- if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
- liquidio_schedule_msix_droq_pkt_handler(droq, ret);
- return IRQ_HANDLED;
- }
- /**
- * \brief Droq packet processor sceduler
- * @param oct octeon device
- */
- static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
- {
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
- struct octeon_droq *droq;
- u64 oq_no;
- if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
- for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
- oq_no++) {
- if (!(oct->droq_intr & BIT_ULL(oq_no)))
- continue;
- droq = oct->droq[oq_no];
- if (droq->ops.poll_mode) {
- droq->ops.napi_fn(droq);
- oct_priv->napi_mask |= BIT_ULL(oq_no);
- } else {
- tasklet_schedule(&oct_priv->droq_tasklet);
- }
- }
- }
- }
- /**
- * \brief Interrupt handler for octeon
- * @param irq unused
- * @param dev octeon device
- */
- static
- irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
- void *dev)
- {
- struct octeon_device *oct = (struct octeon_device *)dev;
- irqreturn_t ret;
- /* Disable our interrupts for the duration of ISR */
- oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
- ret = oct->fn_list.process_interrupt_regs(oct);
- if (ret == IRQ_HANDLED)
- liquidio_schedule_droq_pkt_handlers(oct);
- /* Re-enable our interrupts */
- if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
- oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
- return ret;
- }
- /**
- * \brief Setup interrupt for octeon device
- * @param oct octeon device
- *
- * Enable interrupt in Octeon device as given in the PCI interrupt mask.
- */
- int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
- {
- struct msix_entry *msix_entries;
- char *queue_irq_names = NULL;
- int i, num_interrupts = 0;
- int num_alloc_ioq_vectors;
- char *aux_irq_name = NULL;
- int num_ioq_vectors;
- int irqret, err;
- if (oct->msix_on) {
- oct->num_msix_irqs = num_ioqs;
- if (OCTEON_CN23XX_PF(oct)) {
- num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
- /* one non ioq interrupt for handling
- * sli_mac_pf_int_sum
- */
- oct->num_msix_irqs += 1;
- } else if (OCTEON_CN23XX_VF(oct)) {
- num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
- }
- /* allocate storage for the names assigned to each irq */
- oct->irq_name_storage =
- kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
- if (!oct->irq_name_storage) {
- dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
- return -ENOMEM;
- }
- queue_irq_names = oct->irq_name_storage;
- if (OCTEON_CN23XX_PF(oct))
- aux_irq_name = &queue_irq_names
- [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
- oct->msix_entries = kcalloc(oct->num_msix_irqs,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!oct->msix_entries) {
- dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- return -ENOMEM;
- }
- msix_entries = (struct msix_entry *)oct->msix_entries;
- /*Assumption is that pf msix vectors start from pf srn to pf to
- * trs and not from 0. if not change this code
- */
- if (OCTEON_CN23XX_PF(oct)) {
- for (i = 0; i < oct->num_msix_irqs - 1; i++)
- msix_entries[i].entry =
- oct->sriov_info.pf_srn + i;
- msix_entries[oct->num_msix_irqs - 1].entry =
- oct->sriov_info.trs;
- } else if (OCTEON_CN23XX_VF(oct)) {
- for (i = 0; i < oct->num_msix_irqs; i++)
- msix_entries[i].entry = i;
- }
- num_alloc_ioq_vectors = pci_enable_msix_range(
- oct->pci_dev, msix_entries,
- oct->num_msix_irqs,
- oct->num_msix_irqs);
- if (num_alloc_ioq_vectors < 0) {
- dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
- kfree(oct->msix_entries);
- oct->msix_entries = NULL;
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- return num_alloc_ioq_vectors;
- }
- dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
- num_ioq_vectors = oct->num_msix_irqs;
- /** For PF, there is one non-ioq interrupt handler */
- if (OCTEON_CN23XX_PF(oct)) {
- num_ioq_vectors -= 1;
- snprintf(aux_irq_name, INTRNAMSIZ,
- "LiquidIO%u-pf%u-aux", oct->octeon_id,
- oct->pf_num);
- irqret = request_irq(
- msix_entries[num_ioq_vectors].vector,
- liquidio_legacy_intr_handler, 0,
- aux_irq_name, oct);
- if (irqret) {
- dev_err(&oct->pci_dev->dev,
- "Request_irq failed for MSIX interrupt Error: %d\n",
- irqret);
- pci_disable_msix(oct->pci_dev);
- kfree(oct->msix_entries);
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- oct->msix_entries = NULL;
- return irqret;
- }
- }
- for (i = 0 ; i < num_ioq_vectors ; i++) {
- if (OCTEON_CN23XX_PF(oct))
- snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
- INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
- oct->octeon_id, oct->pf_num, i);
- if (OCTEON_CN23XX_VF(oct))
- snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
- INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
- oct->octeon_id, oct->vf_num, i);
- irqret = request_irq(msix_entries[i].vector,
- liquidio_msix_intr_handler, 0,
- &queue_irq_names[IRQ_NAME_OFF(i)],
- &oct->ioq_vector[i]);
- if (irqret) {
- dev_err(&oct->pci_dev->dev,
- "Request_irq failed for MSIX interrupt Error: %d\n",
- irqret);
- /** Freeing the non-ioq irq vector here . */
- free_irq(msix_entries[num_ioq_vectors].vector,
- oct);
- while (i) {
- i--;
- /** clearing affinity mask. */
- irq_set_affinity_hint(
- msix_entries[i].vector,
- NULL);
- free_irq(msix_entries[i].vector,
- &oct->ioq_vector[i]);
- }
- pci_disable_msix(oct->pci_dev);
- kfree(oct->msix_entries);
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- oct->msix_entries = NULL;
- return irqret;
- }
- oct->ioq_vector[i].vector = msix_entries[i].vector;
- /* assign the cpu mask for this msix interrupt vector */
- irq_set_affinity_hint(msix_entries[i].vector,
- &oct->ioq_vector[i].affinity_mask
- );
- }
- dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
- oct->octeon_id);
- } else {
- err = pci_enable_msi(oct->pci_dev);
- if (err)
- dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
- err);
- else
- oct->flags |= LIO_FLAG_MSI_ENABLED;
- /* allocate storage for the names assigned to the irq */
- oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
- if (!oct->irq_name_storage)
- return -ENOMEM;
- queue_irq_names = oct->irq_name_storage;
- if (OCTEON_CN23XX_PF(oct))
- snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
- "LiquidIO%u-pf%u-rxtx-%u",
- oct->octeon_id, oct->pf_num, 0);
- if (OCTEON_CN23XX_VF(oct))
- snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
- "LiquidIO%u-vf%u-rxtx-%u",
- oct->octeon_id, oct->vf_num, 0);
- irqret = request_irq(oct->pci_dev->irq,
- liquidio_legacy_intr_handler,
- IRQF_SHARED,
- &queue_irq_names[IRQ_NAME_OFF(0)], oct);
- if (irqret) {
- if (oct->flags & LIO_FLAG_MSI_ENABLED)
- pci_disable_msi(oct->pci_dev);
- dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
- irqret);
- kfree(oct->irq_name_storage);
- oct->irq_name_storage = NULL;
- return irqret;
- }
- }
- return 0;
- }
- static void liquidio_change_mtu_completion(struct octeon_device *oct,
- u32 status, void *buf)
- {
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- struct liquidio_if_cfg_context *ctx;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
- if (status) {
- dev_err(&oct->pci_dev->dev, "MTU change failed. Status: %llx\n",
- CVM_CAST64(status));
- WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_FAIL);
- } else {
- WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_SUCCESS);
- }
- /* This barrier is required to be sure that the response has been
- * written fully before waking up the handler
- */
- wmb();
- wake_up_interruptible(&ctx->wc);
- }
- /**
- * \brief Net device change_mtu
- * @param netdev network device
- */
- int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
- {
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct = lio->oct_dev;
- struct liquidio_if_cfg_context *ctx;
- struct octeon_soft_command *sc;
- union octnet_cmd *ncmd;
- int ctx_size;
- int ret = 0;
- ctx_size = sizeof(struct liquidio_if_cfg_context);
- sc = (struct octeon_soft_command *)
- octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, ctx_size);
- ncmd = (union octnet_cmd *)sc->virtdptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
- WRITE_ONCE(ctx->cond, 0);
- ctx->octeon_id = lio_get_device_id(oct);
- init_waitqueue_head(&ctx->wc);
- ncmd->u64 = 0;
- ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
- ncmd->s.param1 = new_mtu;
- octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
- sc->iq_no = lio->linfo.txpciq[0].s.q_no;
- octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
- OPCODE_NIC_CMD, 0, 0, 0);
- sc->callback = liquidio_change_mtu_completion;
- sc->callback_arg = sc;
- sc->wait_time = 100;
- ret = octeon_send_soft_command(oct, sc);
- if (ret == IQ_SEND_FAILED) {
- netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
- return -EINVAL;
- }
- /* Sleep on a wait queue till the cond flag indicates that the
- * response arrived or timed-out.
- */
- if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR ||
- ctx->cond == LIO_CHANGE_MTU_FAIL) {
- octeon_free_soft_command(oct, sc);
- return -EINVAL;
- }
- netdev->mtu = new_mtu;
- lio->mtu = new_mtu;
- octeon_free_soft_command(oct, sc);
- return 0;
- }
- int lio_wait_for_clean_oq(struct octeon_device *oct)
- {
- int retry = 100, pending_pkts = 0;
- int idx;
- do {
- pending_pkts = 0;
- for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
- if (!(oct->io_qmask.oq & BIT_ULL(idx)))
- continue;
- pending_pkts +=
- atomic_read(&oct->droq[idx]->pkts_pending);
- }
- if (pending_pkts > 0)
- schedule_timeout_uninterruptible(1);
- } while (retry-- && pending_pkts);
- return pending_pkts;
- }
- static void
- octnet_nic_stats_callback(struct octeon_device *oct_dev,
- u32 status, void *ptr)
- {
- struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
- struct oct_nic_stats_resp *resp =
- (struct oct_nic_stats_resp *)sc->virtrptr;
- struct oct_nic_stats_ctrl *ctrl =
- (struct oct_nic_stats_ctrl *)sc->ctxptr;
- struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
- struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
- struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
- struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
- if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
- octeon_swap_8B_data((u64 *)&resp->stats,
- (sizeof(struct oct_link_stats)) >> 3);
- /* RX link-level stats */
- rstats->total_rcvd = rsp_rstats->total_rcvd;
- rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
- rstats->total_bcst = rsp_rstats->total_bcst;
- rstats->total_mcst = rsp_rstats->total_mcst;
- rstats->runts = rsp_rstats->runts;
- rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
- /* Accounts for over/under-run of buffers */
- rstats->fifo_err = rsp_rstats->fifo_err;
- rstats->dmac_drop = rsp_rstats->dmac_drop;
- rstats->fcs_err = rsp_rstats->fcs_err;
- rstats->jabber_err = rsp_rstats->jabber_err;
- rstats->l2_err = rsp_rstats->l2_err;
- rstats->frame_err = rsp_rstats->frame_err;
- rstats->red_drops = rsp_rstats->red_drops;
- /* RX firmware stats */
- rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
- rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
- rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
- rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
- rstats->fw_err_pko = rsp_rstats->fw_err_pko;
- rstats->fw_err_link = rsp_rstats->fw_err_link;
- rstats->fw_err_drop = rsp_rstats->fw_err_drop;
- rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
- rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
- /* Number of packets that are LROed */
- rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
- /* Number of octets that are LROed */
- rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
- /* Number of LRO packets formed */
- rstats->fw_total_lro = rsp_rstats->fw_total_lro;
- /* Number of times lRO of packet aborted */
- rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
- rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
- rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
- rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
- rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
- /* intrmod: packet forward rate */
- rstats->fwd_rate = rsp_rstats->fwd_rate;
- /* TX link-level stats */
- tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
- tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
- tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
- tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
- tstats->ctl_sent = rsp_tstats->ctl_sent;
- /* Packets sent after one collision*/
- tstats->one_collision_sent = rsp_tstats->one_collision_sent;
- /* Packets sent after multiple collision*/
- tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
- /* Packets not sent due to max collisions */
- tstats->max_collision_fail = rsp_tstats->max_collision_fail;
- /* Packets not sent due to max deferrals */
- tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
- /* Accounts for over/under-run of buffers */
- tstats->fifo_err = rsp_tstats->fifo_err;
- tstats->runts = rsp_tstats->runts;
- /* Total number of collisions detected */
- tstats->total_collisions = rsp_tstats->total_collisions;
- /* firmware stats */
- tstats->fw_total_sent = rsp_tstats->fw_total_sent;
- tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
- tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
- tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
- tstats->fw_err_pko = rsp_tstats->fw_err_pko;
- tstats->fw_err_pki = rsp_tstats->fw_err_pki;
- tstats->fw_err_link = rsp_tstats->fw_err_link;
- tstats->fw_err_drop = rsp_tstats->fw_err_drop;
- tstats->fw_tso = rsp_tstats->fw_tso;
- tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
- tstats->fw_err_tso = rsp_tstats->fw_err_tso;
- tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
- resp->status = 1;
- } else {
- resp->status = -1;
- }
- complete(&ctrl->complete);
- }
- int octnet_get_link_stats(struct net_device *netdev)
- {
- struct lio *lio = GET_LIO(netdev);
- struct octeon_device *oct_dev = lio->oct_dev;
- struct octeon_soft_command *sc;
- struct oct_nic_stats_ctrl *ctrl;
- struct oct_nic_stats_resp *resp;
- int retval;
- /* Alloc soft command */
- sc = (struct octeon_soft_command *)
- octeon_alloc_soft_command(oct_dev,
- 0,
- sizeof(struct oct_nic_stats_resp),
- sizeof(struct octnic_ctrl_pkt));
- if (!sc)
- return -ENOMEM;
- resp = (struct oct_nic_stats_resp *)sc->virtrptr;
- memset(resp, 0, sizeof(struct oct_nic_stats_resp));
- ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
- memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
- ctrl->netdev = netdev;
- init_completion(&ctrl->complete);
- sc->iq_no = lio->linfo.txpciq[0].s.q_no;
- octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
- OPCODE_NIC_PORT_STATS, 0, 0, 0);
- sc->callback = octnet_nic_stats_callback;
- sc->callback_arg = sc;
- sc->wait_time = 500; /*in milli seconds*/
- retval = octeon_send_soft_command(oct_dev, sc);
- if (retval == IQ_SEND_FAILED) {
- octeon_free_soft_command(oct_dev, sc);
- return -EINVAL;
- }
- wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
- if (resp->status != 1) {
- octeon_free_soft_command(oct_dev, sc);
- return -EINVAL;
- }
- octeon_free_soft_command(oct_dev, sc);
- return 0;
- }
- static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct,
- u32 status,
- void *buf)
- {
- struct liquidio_nic_seapi_ctl_context *ctx;
- struct octeon_soft_command *sc = buf;
- ctx = sc->ctxptr;
- oct = lio_get_device(ctx->octeon_id);
- if (status) {
- dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n",
- __func__,
- CVM_CAST64(status));
- }
- ctx->status = status;
- complete(&ctx->complete);
- }
- int liquidio_set_speed(struct lio *lio, int speed)
- {
- struct liquidio_nic_seapi_ctl_context *ctx;
- struct octeon_device *oct = lio->oct_dev;
- struct oct_nic_seapi_resp *resp;
- struct octeon_soft_command *sc;
- union octnet_cmd *ncmd;
- u32 ctx_size;
- int retval;
- u32 var;
- if (oct->speed_setting == speed)
- return 0;
- if (!OCTEON_CN23XX_PF(oct)) {
- dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
- __func__);
- return -EOPNOTSUPP;
- }
- ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
- sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
- sizeof(struct oct_nic_seapi_resp),
- ctx_size);
- if (!sc)
- return -ENOMEM;
- ncmd = sc->virtdptr;
- ctx = sc->ctxptr;
- resp = sc->virtrptr;
- memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
- ctx->octeon_id = lio_get_device_id(oct);
- ctx->status = 0;
- init_completion(&ctx->complete);
- ncmd->u64 = 0;
- ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
- ncmd->s.param1 = speed;
- octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
- sc->iq_no = lio->linfo.txpciq[0].s.q_no;
- octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
- OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
- sc->callback = liquidio_nic_seapi_ctl_callback;
- sc->callback_arg = sc;
- sc->wait_time = 5000;
- retval = octeon_send_soft_command(oct, sc);
- if (retval == IQ_SEND_FAILED) {
- dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
- retval = -EBUSY;
- } else {
- /* Wait for response or timeout */
- if (wait_for_completion_timeout(&ctx->complete,
- msecs_to_jiffies(10000)) == 0) {
- dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
- __func__);
- octeon_free_soft_command(oct, sc);
- return -EINTR;
- }
- retval = resp->status;
- if (retval) {
- dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
- __func__, retval);
- octeon_free_soft_command(oct, sc);
- return -EIO;
- }
- var = be32_to_cpu((__force __be32)resp->speed);
- if (var != speed) {
- dev_err(&oct->pci_dev->dev,
- "%s: setting failed speed= %x, expect %x\n",
- __func__, var, speed);
- }
- oct->speed_setting = var;
- }
- octeon_free_soft_command(oct, sc);
- return retval;
- }
- int liquidio_get_speed(struct lio *lio)
- {
- struct liquidio_nic_seapi_ctl_context *ctx;
- struct octeon_device *oct = lio->oct_dev;
- struct oct_nic_seapi_resp *resp;
- struct octeon_soft_command *sc;
- union octnet_cmd *ncmd;
- u32 ctx_size;
- int retval;
- ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
- sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
- sizeof(struct oct_nic_seapi_resp),
- ctx_size);
- if (!sc)
- return -ENOMEM;
- ncmd = sc->virtdptr;
- ctx = sc->ctxptr;
- resp = sc->virtrptr;
- memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
- ctx->octeon_id = lio_get_device_id(oct);
- ctx->status = 0;
- init_completion(&ctx->complete);
- ncmd->u64 = 0;
- ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
- octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
- sc->iq_no = lio->linfo.txpciq[0].s.q_no;
- octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
- OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
- sc->callback = liquidio_nic_seapi_ctl_callback;
- sc->callback_arg = sc;
- sc->wait_time = 5000;
- retval = octeon_send_soft_command(oct, sc);
- if (retval == IQ_SEND_FAILED) {
- dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
- oct->no_speed_setting = 1;
- oct->speed_setting = 25;
- retval = -EBUSY;
- } else {
- if (wait_for_completion_timeout(&ctx->complete,
- msecs_to_jiffies(10000)) == 0) {
- dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
- __func__);
- oct->speed_setting = 25;
- oct->no_speed_setting = 1;
- octeon_free_soft_command(oct, sc);
- return -EINTR;
- }
- retval = resp->status;
- if (retval) {
- dev_err(&oct->pci_dev->dev,
- "%s failed retval=%d\n", __func__, retval);
- oct->no_speed_setting = 1;
- oct->speed_setting = 25;
- octeon_free_soft_command(oct, sc);
- retval = -EIO;
- } else {
- u32 var;
- var = be32_to_cpu((__force __be32)resp->speed);
- oct->speed_setting = var;
- if (var == 0xffff) {
- oct->no_speed_setting = 1;
- /* unable to access boot variables
- * get the default value based on the NIC type
- */
- oct->speed_setting = 25;
- }
- }
- }
- octeon_free_soft_command(oct, sc);
- return retval;
- }
|