123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103 |
- /*
- * Linux network driver for QLogic BR-series Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
- /*
- * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
- * Copyright (c) 2014-2015 QLogic Corporation
- * All rights reserved
- * www.qlogic.com
- */
- #include "bna.h"
- static inline int
- ethport_can_be_up(struct bna_ethport *ethport)
- {
- int ready = 0;
- if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
- ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
- (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
- (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
- else
- ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
- (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
- !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
- return ready;
- }
- #define ethport_is_up ethport_can_be_up
- enum bna_ethport_event {
- ETHPORT_E_START = 1,
- ETHPORT_E_STOP = 2,
- ETHPORT_E_FAIL = 3,
- ETHPORT_E_UP = 4,
- ETHPORT_E_DOWN = 5,
- ETHPORT_E_FWRESP_UP_OK = 6,
- ETHPORT_E_FWRESP_DOWN = 7,
- ETHPORT_E_FWRESP_UP_FAIL = 8,
- };
- enum bna_enet_event {
- ENET_E_START = 1,
- ENET_E_STOP = 2,
- ENET_E_FAIL = 3,
- ENET_E_PAUSE_CFG = 4,
- ENET_E_MTU_CFG = 5,
- ENET_E_FWRESP_PAUSE = 6,
- ENET_E_CHLD_STOPPED = 7,
- };
- enum bna_ioceth_event {
- IOCETH_E_ENABLE = 1,
- IOCETH_E_DISABLE = 2,
- IOCETH_E_IOC_RESET = 3,
- IOCETH_E_IOC_FAILED = 4,
- IOCETH_E_IOC_READY = 5,
- IOCETH_E_ENET_ATTR_RESP = 6,
- IOCETH_E_ENET_STOPPED = 7,
- IOCETH_E_IOC_DISABLED = 8,
- };
- #define bna_stats_copy(_name, _type) \
- do { \
- count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
- stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
- stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
- for (i = 0; i < count; i++) \
- stats_dst[i] = be64_to_cpu(stats_src[i]); \
- } while (0) \
- /*
- * FW response handlers
- */
- static void
- bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
- struct bfi_msgq_mhdr *msghdr)
- {
- ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
- if (ethport_can_be_up(ethport))
- bfa_fsm_send_event(ethport, ETHPORT_E_UP);
- }
- static void
- bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
- struct bfi_msgq_mhdr *msghdr)
- {
- int ethport_up = ethport_is_up(ethport);
- ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
- if (ethport_up)
- bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
- }
- static void
- bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
- struct bfi_msgq_mhdr *msghdr)
- {
- struct bfi_enet_enable_req *admin_req =
- ðport->bfi_enet_cmd.admin_req;
- struct bfi_enet_rsp *rsp =
- container_of(msghdr, struct bfi_enet_rsp, mh);
- switch (admin_req->enable) {
- case BNA_STATUS_T_ENABLED:
- if (rsp->error == BFI_ENET_CMD_OK)
- bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
- else {
- ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
- bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
- }
- break;
- case BNA_STATUS_T_DISABLED:
- bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
- ethport->link_status = BNA_LINK_DOWN;
- ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
- break;
- }
- }
- static void
- bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
- struct bfi_msgq_mhdr *msghdr)
- {
- struct bfi_enet_diag_lb_req *diag_lb_req =
- ðport->bfi_enet_cmd.lpbk_req;
- struct bfi_enet_rsp *rsp =
- container_of(msghdr, struct bfi_enet_rsp, mh);
- switch (diag_lb_req->enable) {
- case BNA_STATUS_T_ENABLED:
- if (rsp->error == BFI_ENET_CMD_OK)
- bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
- else {
- ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
- bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
- }
- break;
- case BNA_STATUS_T_DISABLED:
- bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
- break;
- }
- }
- static void
- bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
- {
- bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
- }
- static void
- bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
- struct bfi_msgq_mhdr *msghdr)
- {
- struct bfi_enet_attr_rsp *rsp =
- container_of(msghdr, struct bfi_enet_attr_rsp, mh);
- /**
- * Store only if not set earlier, since BNAD can override the HW
- * attributes
- */
- if (!ioceth->attr.fw_query_complete) {
- ioceth->attr.num_txq = ntohl(rsp->max_cfg);
- ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
- ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
- ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
- ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
- ioceth->attr.fw_query_complete = true;
- }
- bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
- }
- static void
- bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
- {
- struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
- u64 *stats_src;
- u64 *stats_dst;
- u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
- u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
- int count;
- int i;
- bna_stats_copy(mac, mac);
- bna_stats_copy(bpc, bpc);
- bna_stats_copy(rad, rad);
- bna_stats_copy(rlb, rad);
- bna_stats_copy(fc_rx, fc_rx);
- bna_stats_copy(fc_tx, fc_tx);
- stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
- /* Copy Rxf stats to SW area, scatter them while copying */
- for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
- stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
- memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
- if (rx_enet_mask & BIT(i)) {
- int k;
- count = sizeof(struct bfi_enet_stats_rxf) /
- sizeof(u64);
- for (k = 0; k < count; k++) {
- stats_dst[k] = be64_to_cpu(*stats_src);
- stats_src++;
- }
- }
- }
- /* Copy Txf stats to SW area, scatter them while copying */
- for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
- stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
- memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
- if (tx_enet_mask & BIT(i)) {
- int k;
- count = sizeof(struct bfi_enet_stats_txf) /
- sizeof(u64);
- for (k = 0; k < count; k++) {
- stats_dst[k] = be64_to_cpu(*stats_src);
- stats_src++;
- }
- }
- }
- bna->stats_mod.stats_get_busy = false;
- bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
- }
- static void
- bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
- struct bfi_msgq_mhdr *msghdr)
- {
- ethport->link_status = BNA_LINK_UP;
- /* Dispatch events */
- ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
- }
- static void
- bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
- struct bfi_msgq_mhdr *msghdr)
- {
- ethport->link_status = BNA_LINK_DOWN;
- /* Dispatch events */
- ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
- }
- static void
- bna_err_handler(struct bna *bna, u32 intr_status)
- {
- if (BNA_IS_HALT_INTR(bna, intr_status))
- bna_halt_clear(bna);
- bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
- }
- void
- bna_mbox_handler(struct bna *bna, u32 intr_status)
- {
- if (BNA_IS_ERR_INTR(bna, intr_status)) {
- bna_err_handler(bna, intr_status);
- return;
- }
- if (BNA_IS_MBOX_INTR(bna, intr_status))
- bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
- }
- static void
- bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
- {
- struct bna *bna = (struct bna *)arg;
- struct bna_tx *tx;
- struct bna_rx *rx;
- switch (msghdr->msg_id) {
- case BFI_ENET_I2H_RX_CFG_SET_RSP:
- bna_rx_from_rid(bna, msghdr->enet_id, rx);
- if (rx)
- bna_bfi_rx_enet_start_rsp(rx, msghdr);
- break;
- case BFI_ENET_I2H_RX_CFG_CLR_RSP:
- bna_rx_from_rid(bna, msghdr->enet_id, rx);
- if (rx)
- bna_bfi_rx_enet_stop_rsp(rx, msghdr);
- break;
- case BFI_ENET_I2H_RIT_CFG_RSP:
- case BFI_ENET_I2H_RSS_CFG_RSP:
- case BFI_ENET_I2H_RSS_ENABLE_RSP:
- case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
- case BFI_ENET_I2H_RX_DEFAULT_RSP:
- case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
- case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
- case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
- case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
- case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
- case BFI_ENET_I2H_RX_VLAN_SET_RSP:
- case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
- bna_rx_from_rid(bna, msghdr->enet_id, rx);
- if (rx)
- bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
- break;
- case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
- bna_rx_from_rid(bna, msghdr->enet_id, rx);
- if (rx)
- bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
- break;
- case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
- bna_rx_from_rid(bna, msghdr->enet_id, rx);
- if (rx)
- bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
- break;
- case BFI_ENET_I2H_TX_CFG_SET_RSP:
- bna_tx_from_rid(bna, msghdr->enet_id, tx);
- if (tx)
- bna_bfi_tx_enet_start_rsp(tx, msghdr);
- break;
- case BFI_ENET_I2H_TX_CFG_CLR_RSP:
- bna_tx_from_rid(bna, msghdr->enet_id, tx);
- if (tx)
- bna_bfi_tx_enet_stop_rsp(tx, msghdr);
- break;
- case BFI_ENET_I2H_PORT_ADMIN_RSP:
- bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
- break;
- case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
- bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
- break;
- case BFI_ENET_I2H_SET_PAUSE_RSP:
- bna_bfi_pause_set_rsp(&bna->enet, msghdr);
- break;
- case BFI_ENET_I2H_GET_ATTR_RSP:
- bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
- break;
- case BFI_ENET_I2H_STATS_GET_RSP:
- bna_bfi_stats_get_rsp(bna, msghdr);
- break;
- case BFI_ENET_I2H_STATS_CLR_RSP:
- /* No-op */
- break;
- case BFI_ENET_I2H_LINK_UP_AEN:
- bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
- break;
- case BFI_ENET_I2H_LINK_DOWN_AEN:
- bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
- break;
- case BFI_ENET_I2H_PORT_ENABLE_AEN:
- bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
- break;
- case BFI_ENET_I2H_PORT_DISABLE_AEN:
- bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
- break;
- case BFI_ENET_I2H_BW_UPDATE_AEN:
- bna_bfi_bw_update_aen(&bna->tx_mod);
- break;
- default:
- break;
- }
- }
- /* ETHPORT */
- #define call_ethport_stop_cbfn(_ethport) \
- do { \
- if ((_ethport)->stop_cbfn) { \
- void (*cbfn)(struct bna_enet *); \
- cbfn = (_ethport)->stop_cbfn; \
- (_ethport)->stop_cbfn = NULL; \
- cbfn(&(_ethport)->bna->enet); \
- } \
- } while (0)
- #define call_ethport_adminup_cbfn(ethport, status) \
- do { \
- if ((ethport)->adminup_cbfn) { \
- void (*cbfn)(struct bnad *, enum bna_cb_status); \
- cbfn = (ethport)->adminup_cbfn; \
- (ethport)->adminup_cbfn = NULL; \
- cbfn((ethport)->bna->bnad, status); \
- } \
- } while (0)
- static void
- bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
- {
- struct bfi_enet_enable_req *admin_up_req =
- ðport->bfi_enet_cmd.admin_req;
- bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
- BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
- admin_up_req->mh.num_entries = htons(
- bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
- admin_up_req->enable = BNA_STATUS_T_ENABLED;
- bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
- sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
- bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
- }
- static void
- bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
- {
- struct bfi_enet_enable_req *admin_down_req =
- ðport->bfi_enet_cmd.admin_req;
- bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
- BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
- admin_down_req->mh.num_entries = htons(
- bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
- admin_down_req->enable = BNA_STATUS_T_DISABLED;
- bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
- sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
- bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
- }
- static void
- bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
- {
- struct bfi_enet_diag_lb_req *lpbk_up_req =
- ðport->bfi_enet_cmd.lpbk_req;
- bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
- BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
- lpbk_up_req->mh.num_entries = htons(
- bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
- lpbk_up_req->mode = (ethport->bna->enet.type ==
- BNA_ENET_T_LOOPBACK_INTERNAL) ?
- BFI_ENET_DIAG_LB_OPMODE_EXT :
- BFI_ENET_DIAG_LB_OPMODE_CBL;
- lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
- bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
- sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
- bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
- }
- static void
- bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
- {
- struct bfi_enet_diag_lb_req *lpbk_down_req =
- ðport->bfi_enet_cmd.lpbk_req;
- bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
- BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
- lpbk_down_req->mh.num_entries = htons(
- bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
- lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
- bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
- sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
- bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
- }
- static void
- bna_bfi_ethport_up(struct bna_ethport *ethport)
- {
- if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
- bna_bfi_ethport_admin_up(ethport);
- else
- bna_bfi_ethport_lpbk_up(ethport);
- }
- static void
- bna_bfi_ethport_down(struct bna_ethport *ethport)
- {
- if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
- bna_bfi_ethport_admin_down(ethport);
- else
- bna_bfi_ethport_lpbk_down(ethport);
- }
- bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
- enum bna_ethport_event);
- bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
- enum bna_ethport_event);
- bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
- enum bna_ethport_event);
- bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
- enum bna_ethport_event);
- bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
- enum bna_ethport_event);
- bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
- enum bna_ethport_event);
- static void
- bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
- {
- call_ethport_stop_cbfn(ethport);
- }
- static void
- bna_ethport_sm_stopped(struct bna_ethport *ethport,
- enum bna_ethport_event event)
- {
- switch (event) {
- case ETHPORT_E_START:
- bfa_fsm_set_state(ethport, bna_ethport_sm_down);
- break;
- case ETHPORT_E_STOP:
- call_ethport_stop_cbfn(ethport);
- break;
- case ETHPORT_E_FAIL:
- /* No-op */
- break;
- case ETHPORT_E_DOWN:
- /* This event is received due to Rx objects failing */
- /* No-op */
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ethport_sm_down_entry(struct bna_ethport *ethport)
- {
- }
- static void
- bna_ethport_sm_down(struct bna_ethport *ethport,
- enum bna_ethport_event event)
- {
- switch (event) {
- case ETHPORT_E_STOP:
- bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
- break;
- case ETHPORT_E_FAIL:
- bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
- break;
- case ETHPORT_E_UP:
- bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
- bna_bfi_ethport_up(ethport);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
- {
- }
- static void
- bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
- enum bna_ethport_event event)
- {
- switch (event) {
- case ETHPORT_E_STOP:
- bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
- break;
- case ETHPORT_E_FAIL:
- call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
- bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
- break;
- case ETHPORT_E_DOWN:
- call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
- bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
- break;
- case ETHPORT_E_FWRESP_UP_OK:
- call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
- bfa_fsm_set_state(ethport, bna_ethport_sm_up);
- break;
- case ETHPORT_E_FWRESP_UP_FAIL:
- call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
- bfa_fsm_set_state(ethport, bna_ethport_sm_down);
- break;
- case ETHPORT_E_FWRESP_DOWN:
- /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
- bna_bfi_ethport_up(ethport);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
- {
- /**
- * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
- * mbox due to up_resp_wait -> down_resp_wait transition on event
- * ETHPORT_E_DOWN
- */
- }
- static void
- bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
- enum bna_ethport_event event)
- {
- switch (event) {
- case ETHPORT_E_STOP:
- bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
- break;
- case ETHPORT_E_FAIL:
- bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
- break;
- case ETHPORT_E_UP:
- bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
- break;
- case ETHPORT_E_FWRESP_UP_OK:
- /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
- bna_bfi_ethport_down(ethport);
- break;
- case ETHPORT_E_FWRESP_UP_FAIL:
- case ETHPORT_E_FWRESP_DOWN:
- bfa_fsm_set_state(ethport, bna_ethport_sm_down);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ethport_sm_up_entry(struct bna_ethport *ethport)
- {
- }
- static void
- bna_ethport_sm_up(struct bna_ethport *ethport,
- enum bna_ethport_event event)
- {
- switch (event) {
- case ETHPORT_E_STOP:
- bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
- bna_bfi_ethport_down(ethport);
- break;
- case ETHPORT_E_FAIL:
- bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
- break;
- case ETHPORT_E_DOWN:
- bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
- bna_bfi_ethport_down(ethport);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
- {
- }
- static void
- bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
- enum bna_ethport_event event)
- {
- switch (event) {
- case ETHPORT_E_FAIL:
- bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
- break;
- case ETHPORT_E_DOWN:
- /**
- * This event is received due to Rx objects stopping in
- * parallel to ethport
- */
- /* No-op */
- break;
- case ETHPORT_E_FWRESP_UP_OK:
- /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
- bna_bfi_ethport_down(ethport);
- break;
- case ETHPORT_E_FWRESP_UP_FAIL:
- case ETHPORT_E_FWRESP_DOWN:
- bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
- {
- ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
- ethport->bna = bna;
- ethport->link_status = BNA_LINK_DOWN;
- ethport->link_cbfn = bnad_cb_ethport_link_status;
- ethport->rx_started_count = 0;
- ethport->stop_cbfn = NULL;
- ethport->adminup_cbfn = NULL;
- bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
- }
- static void
- bna_ethport_uninit(struct bna_ethport *ethport)
- {
- ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
- ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
- ethport->bna = NULL;
- }
- static void
- bna_ethport_start(struct bna_ethport *ethport)
- {
- bfa_fsm_send_event(ethport, ETHPORT_E_START);
- }
- static void
- bna_enet_cb_ethport_stopped(struct bna_enet *enet)
- {
- bfa_wc_down(&enet->chld_stop_wc);
- }
- static void
- bna_ethport_stop(struct bna_ethport *ethport)
- {
- ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
- bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
- }
- static void
- bna_ethport_fail(struct bna_ethport *ethport)
- {
- /* Reset the physical port status to enabled */
- ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
- if (ethport->link_status != BNA_LINK_DOWN) {
- ethport->link_status = BNA_LINK_DOWN;
- ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
- }
- bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
- }
- /* Should be called only when ethport is disabled */
- void
- bna_ethport_cb_rx_started(struct bna_ethport *ethport)
- {
- ethport->rx_started_count++;
- if (ethport->rx_started_count == 1) {
- ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
- if (ethport_can_be_up(ethport))
- bfa_fsm_send_event(ethport, ETHPORT_E_UP);
- }
- }
- void
- bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
- {
- int ethport_up = ethport_is_up(ethport);
- ethport->rx_started_count--;
- if (ethport->rx_started_count == 0) {
- ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
- if (ethport_up)
- bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
- }
- }
- /* ENET */
- #define bna_enet_chld_start(enet) \
- do { \
- enum bna_tx_type tx_type = \
- ((enet)->type == BNA_ENET_T_REGULAR) ? \
- BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
- enum bna_rx_type rx_type = \
- ((enet)->type == BNA_ENET_T_REGULAR) ? \
- BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
- bna_ethport_start(&(enet)->bna->ethport); \
- bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
- bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
- } while (0)
- #define bna_enet_chld_stop(enet) \
- do { \
- enum bna_tx_type tx_type = \
- ((enet)->type == BNA_ENET_T_REGULAR) ? \
- BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
- enum bna_rx_type rx_type = \
- ((enet)->type == BNA_ENET_T_REGULAR) ? \
- BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
- bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
- bfa_wc_up(&(enet)->chld_stop_wc); \
- bna_ethport_stop(&(enet)->bna->ethport); \
- bfa_wc_up(&(enet)->chld_stop_wc); \
- bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
- bfa_wc_up(&(enet)->chld_stop_wc); \
- bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
- bfa_wc_wait(&(enet)->chld_stop_wc); \
- } while (0)
- #define bna_enet_chld_fail(enet) \
- do { \
- bna_ethport_fail(&(enet)->bna->ethport); \
- bna_tx_mod_fail(&(enet)->bna->tx_mod); \
- bna_rx_mod_fail(&(enet)->bna->rx_mod); \
- } while (0)
- #define bna_enet_rx_start(enet) \
- do { \
- enum bna_rx_type rx_type = \
- ((enet)->type == BNA_ENET_T_REGULAR) ? \
- BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
- bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
- } while (0)
- #define bna_enet_rx_stop(enet) \
- do { \
- enum bna_rx_type rx_type = \
- ((enet)->type == BNA_ENET_T_REGULAR) ? \
- BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
- bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
- bfa_wc_up(&(enet)->chld_stop_wc); \
- bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
- bfa_wc_wait(&(enet)->chld_stop_wc); \
- } while (0)
- #define call_enet_stop_cbfn(enet) \
- do { \
- if ((enet)->stop_cbfn) { \
- void (*cbfn)(void *); \
- void *cbarg; \
- cbfn = (enet)->stop_cbfn; \
- cbarg = (enet)->stop_cbarg; \
- (enet)->stop_cbfn = NULL; \
- (enet)->stop_cbarg = NULL; \
- cbfn(cbarg); \
- } \
- } while (0)
- #define call_enet_mtu_cbfn(enet) \
- do { \
- if ((enet)->mtu_cbfn) { \
- void (*cbfn)(struct bnad *); \
- cbfn = (enet)->mtu_cbfn; \
- (enet)->mtu_cbfn = NULL; \
- cbfn((enet)->bna->bnad); \
- } \
- } while (0)
- static void bna_enet_cb_chld_stopped(void *arg);
- static void bna_bfi_pause_set(struct bna_enet *enet);
- bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
- enum bna_enet_event);
- bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
- enum bna_enet_event);
- bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
- enum bna_enet_event);
- bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
- enum bna_enet_event);
- bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
- enum bna_enet_event);
- bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
- enum bna_enet_event);
- bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
- enum bna_enet_event);
- static void
- bna_enet_sm_stopped_entry(struct bna_enet *enet)
- {
- call_enet_mtu_cbfn(enet);
- call_enet_stop_cbfn(enet);
- }
- static void
- bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
- {
- switch (event) {
- case ENET_E_START:
- bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
- break;
- case ENET_E_STOP:
- call_enet_stop_cbfn(enet);
- break;
- case ENET_E_FAIL:
- /* No-op */
- break;
- case ENET_E_PAUSE_CFG:
- break;
- case ENET_E_MTU_CFG:
- call_enet_mtu_cbfn(enet);
- break;
- case ENET_E_CHLD_STOPPED:
- /**
- * This event is received due to Ethport, Tx and Rx objects
- * failing
- */
- /* No-op */
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
- {
- bna_bfi_pause_set(enet);
- }
- static void
- bna_enet_sm_pause_init_wait(struct bna_enet *enet,
- enum bna_enet_event event)
- {
- switch (event) {
- case ENET_E_STOP:
- enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
- bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
- break;
- case ENET_E_FAIL:
- enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
- bfa_fsm_set_state(enet, bna_enet_sm_stopped);
- break;
- case ENET_E_PAUSE_CFG:
- enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
- break;
- case ENET_E_MTU_CFG:
- /* No-op */
- break;
- case ENET_E_FWRESP_PAUSE:
- if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
- enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
- bna_bfi_pause_set(enet);
- } else {
- bfa_fsm_set_state(enet, bna_enet_sm_started);
- bna_enet_chld_start(enet);
- }
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
- {
- enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
- }
- static void
- bna_enet_sm_last_resp_wait(struct bna_enet *enet,
- enum bna_enet_event event)
- {
- switch (event) {
- case ENET_E_FAIL:
- case ENET_E_FWRESP_PAUSE:
- bfa_fsm_set_state(enet, bna_enet_sm_stopped);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_enet_sm_started_entry(struct bna_enet *enet)
- {
- /**
- * NOTE: Do not call bna_enet_chld_start() here, since it will be
- * inadvertently called during cfg_wait->started transition as well
- */
- call_enet_mtu_cbfn(enet);
- }
- static void
- bna_enet_sm_started(struct bna_enet *enet,
- enum bna_enet_event event)
- {
- switch (event) {
- case ENET_E_STOP:
- bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
- break;
- case ENET_E_FAIL:
- bfa_fsm_set_state(enet, bna_enet_sm_stopped);
- bna_enet_chld_fail(enet);
- break;
- case ENET_E_PAUSE_CFG:
- bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
- bna_bfi_pause_set(enet);
- break;
- case ENET_E_MTU_CFG:
- bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
- bna_enet_rx_stop(enet);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
- {
- }
- static void
- bna_enet_sm_cfg_wait(struct bna_enet *enet,
- enum bna_enet_event event)
- {
- switch (event) {
- case ENET_E_STOP:
- enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
- enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
- bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
- break;
- case ENET_E_FAIL:
- enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
- enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
- bfa_fsm_set_state(enet, bna_enet_sm_stopped);
- bna_enet_chld_fail(enet);
- break;
- case ENET_E_PAUSE_CFG:
- enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
- break;
- case ENET_E_MTU_CFG:
- enet->flags |= BNA_ENET_F_MTU_CHANGED;
- break;
- case ENET_E_CHLD_STOPPED:
- bna_enet_rx_start(enet);
- /* Fall through */
- case ENET_E_FWRESP_PAUSE:
- if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
- enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
- bna_bfi_pause_set(enet);
- } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
- enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
- bna_enet_rx_stop(enet);
- } else {
- bfa_fsm_set_state(enet, bna_enet_sm_started);
- }
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
- {
- enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
- enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
- }
- static void
- bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
- enum bna_enet_event event)
- {
- switch (event) {
- case ENET_E_FAIL:
- bfa_fsm_set_state(enet, bna_enet_sm_stopped);
- bna_enet_chld_fail(enet);
- break;
- case ENET_E_FWRESP_PAUSE:
- case ENET_E_CHLD_STOPPED:
- bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
- {
- bna_enet_chld_stop(enet);
- }
- static void
- bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
- enum bna_enet_event event)
- {
- switch (event) {
- case ENET_E_FAIL:
- bfa_fsm_set_state(enet, bna_enet_sm_stopped);
- bna_enet_chld_fail(enet);
- break;
- case ENET_E_CHLD_STOPPED:
- bfa_fsm_set_state(enet, bna_enet_sm_stopped);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_bfi_pause_set(struct bna_enet *enet)
- {
- struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
- bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
- BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
- pause_req->mh.num_entries = htons(
- bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
- pause_req->tx_pause = enet->pause_config.tx_pause;
- pause_req->rx_pause = enet->pause_config.rx_pause;
- bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
- sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
- bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
- }
- static void
- bna_enet_cb_chld_stopped(void *arg)
- {
- struct bna_enet *enet = (struct bna_enet *)arg;
- bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
- }
- static void
- bna_enet_init(struct bna_enet *enet, struct bna *bna)
- {
- enet->bna = bna;
- enet->flags = 0;
- enet->mtu = 0;
- enet->type = BNA_ENET_T_REGULAR;
- enet->stop_cbfn = NULL;
- enet->stop_cbarg = NULL;
- enet->mtu_cbfn = NULL;
- bfa_fsm_set_state(enet, bna_enet_sm_stopped);
- }
- static void
- bna_enet_uninit(struct bna_enet *enet)
- {
- enet->flags = 0;
- enet->bna = NULL;
- }
- static void
- bna_enet_start(struct bna_enet *enet)
- {
- enet->flags |= BNA_ENET_F_IOCETH_READY;
- if (enet->flags & BNA_ENET_F_ENABLED)
- bfa_fsm_send_event(enet, ENET_E_START);
- }
- static void
- bna_ioceth_cb_enet_stopped(void *arg)
- {
- struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
- bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
- }
- static void
- bna_enet_stop(struct bna_enet *enet)
- {
- enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
- enet->stop_cbarg = &enet->bna->ioceth;
- enet->flags &= ~BNA_ENET_F_IOCETH_READY;
- bfa_fsm_send_event(enet, ENET_E_STOP);
- }
- static void
- bna_enet_fail(struct bna_enet *enet)
- {
- enet->flags &= ~BNA_ENET_F_IOCETH_READY;
- bfa_fsm_send_event(enet, ENET_E_FAIL);
- }
- void
- bna_enet_cb_tx_stopped(struct bna_enet *enet)
- {
- bfa_wc_down(&enet->chld_stop_wc);
- }
- void
- bna_enet_cb_rx_stopped(struct bna_enet *enet)
- {
- bfa_wc_down(&enet->chld_stop_wc);
- }
- int
- bna_enet_mtu_get(struct bna_enet *enet)
- {
- return enet->mtu;
- }
- void
- bna_enet_enable(struct bna_enet *enet)
- {
- if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
- return;
- enet->flags |= BNA_ENET_F_ENABLED;
- if (enet->flags & BNA_ENET_F_IOCETH_READY)
- bfa_fsm_send_event(enet, ENET_E_START);
- }
- void
- bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
- void (*cbfn)(void *))
- {
- if (type == BNA_SOFT_CLEANUP) {
- (*cbfn)(enet->bna->bnad);
- return;
- }
- enet->stop_cbfn = cbfn;
- enet->stop_cbarg = enet->bna->bnad;
- enet->flags &= ~BNA_ENET_F_ENABLED;
- bfa_fsm_send_event(enet, ENET_E_STOP);
- }
- void
- bna_enet_pause_config(struct bna_enet *enet,
- struct bna_pause_config *pause_config)
- {
- enet->pause_config = *pause_config;
- bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
- }
- void
- bna_enet_mtu_set(struct bna_enet *enet, int mtu,
- void (*cbfn)(struct bnad *))
- {
- enet->mtu = mtu;
- enet->mtu_cbfn = cbfn;
- bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
- }
- void
- bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac)
- {
- bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac);
- }
- /* IOCETH */
- #define enable_mbox_intr(_ioceth) \
- do { \
- u32 intr_status; \
- bna_intr_status_get((_ioceth)->bna, intr_status); \
- bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
- bna_mbox_intr_enable((_ioceth)->bna); \
- } while (0)
- #define disable_mbox_intr(_ioceth) \
- do { \
- bna_mbox_intr_disable((_ioceth)->bna); \
- bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
- } while (0)
- #define call_ioceth_stop_cbfn(_ioceth) \
- do { \
- if ((_ioceth)->stop_cbfn) { \
- void (*cbfn)(struct bnad *); \
- struct bnad *cbarg; \
- cbfn = (_ioceth)->stop_cbfn; \
- cbarg = (_ioceth)->stop_cbarg; \
- (_ioceth)->stop_cbfn = NULL; \
- (_ioceth)->stop_cbarg = NULL; \
- cbfn(cbarg); \
- } \
- } while (0)
- #define bna_stats_mod_uninit(_stats_mod) \
- do { \
- } while (0)
- #define bna_stats_mod_start(_stats_mod) \
- do { \
- (_stats_mod)->ioc_ready = true; \
- } while (0)
- #define bna_stats_mod_stop(_stats_mod) \
- do { \
- (_stats_mod)->ioc_ready = false; \
- } while (0)
- #define bna_stats_mod_fail(_stats_mod) \
- do { \
- (_stats_mod)->ioc_ready = false; \
- (_stats_mod)->stats_get_busy = false; \
- (_stats_mod)->stats_clr_busy = false; \
- } while (0)
- static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
- bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
- enum bna_ioceth_event);
- bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
- enum bna_ioceth_event);
- bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
- enum bna_ioceth_event);
- bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
- enum bna_ioceth_event);
- bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
- enum bna_ioceth_event);
- bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
- enum bna_ioceth_event);
- bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
- enum bna_ioceth_event);
- bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
- enum bna_ioceth_event);
- static void
- bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
- {
- call_ioceth_stop_cbfn(ioceth);
- }
- static void
- bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
- enum bna_ioceth_event event)
- {
- switch (event) {
- case IOCETH_E_ENABLE:
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
- bfa_nw_ioc_enable(&ioceth->ioc);
- break;
- case IOCETH_E_DISABLE:
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
- break;
- case IOCETH_E_IOC_RESET:
- enable_mbox_intr(ioceth);
- break;
- case IOCETH_E_IOC_FAILED:
- disable_mbox_intr(ioceth);
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
- {
- /**
- * Do not call bfa_nw_ioc_enable() here. It must be called in the
- * previous state due to failed -> ioc_ready_wait transition.
- */
- }
- static void
- bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
- enum bna_ioceth_event event)
- {
- switch (event) {
- case IOCETH_E_DISABLE:
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
- bfa_nw_ioc_disable(&ioceth->ioc);
- break;
- case IOCETH_E_IOC_RESET:
- enable_mbox_intr(ioceth);
- break;
- case IOCETH_E_IOC_FAILED:
- disable_mbox_intr(ioceth);
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
- break;
- case IOCETH_E_IOC_READY:
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
- {
- bna_bfi_attr_get(ioceth);
- }
- static void
- bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
- enum bna_ioceth_event event)
- {
- switch (event) {
- case IOCETH_E_DISABLE:
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
- break;
- case IOCETH_E_IOC_FAILED:
- disable_mbox_intr(ioceth);
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
- break;
- case IOCETH_E_ENET_ATTR_RESP:
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
- {
- bna_enet_start(&ioceth->bna->enet);
- bna_stats_mod_start(&ioceth->bna->stats_mod);
- bnad_cb_ioceth_ready(ioceth->bna->bnad);
- }
- static void
- bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
- {
- switch (event) {
- case IOCETH_E_DISABLE:
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
- break;
- case IOCETH_E_IOC_FAILED:
- disable_mbox_intr(ioceth);
- bna_enet_fail(&ioceth->bna->enet);
- bna_stats_mod_fail(&ioceth->bna->stats_mod);
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
- {
- }
- static void
- bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
- enum bna_ioceth_event event)
- {
- switch (event) {
- case IOCETH_E_IOC_FAILED:
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
- disable_mbox_intr(ioceth);
- bfa_nw_ioc_disable(&ioceth->ioc);
- break;
- case IOCETH_E_ENET_ATTR_RESP:
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
- bfa_nw_ioc_disable(&ioceth->ioc);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
- {
- bna_stats_mod_stop(&ioceth->bna->stats_mod);
- bna_enet_stop(&ioceth->bna->enet);
- }
- static void
- bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
- enum bna_ioceth_event event)
- {
- switch (event) {
- case IOCETH_E_IOC_FAILED:
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
- disable_mbox_intr(ioceth);
- bna_enet_fail(&ioceth->bna->enet);
- bna_stats_mod_fail(&ioceth->bna->stats_mod);
- bfa_nw_ioc_disable(&ioceth->ioc);
- break;
- case IOCETH_E_ENET_STOPPED:
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
- bfa_nw_ioc_disable(&ioceth->ioc);
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
- {
- }
- static void
- bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
- enum bna_ioceth_event event)
- {
- switch (event) {
- case IOCETH_E_IOC_DISABLED:
- disable_mbox_intr(ioceth);
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
- break;
- case IOCETH_E_ENET_STOPPED:
- /* This event is received due to enet failing */
- /* No-op */
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
- {
- bnad_cb_ioceth_failed(ioceth->bna->bnad);
- }
- static void
- bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
- enum bna_ioceth_event event)
- {
- switch (event) {
- case IOCETH_E_DISABLE:
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
- bfa_nw_ioc_disable(&ioceth->ioc);
- break;
- case IOCETH_E_IOC_RESET:
- enable_mbox_intr(ioceth);
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
- break;
- case IOCETH_E_IOC_FAILED:
- break;
- default:
- bfa_sm_fault(event);
- }
- }
- static void
- bna_bfi_attr_get(struct bna_ioceth *ioceth)
- {
- struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
- bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
- BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
- attr_req->mh.num_entries = htons(
- bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
- bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
- sizeof(struct bfi_enet_attr_req), &attr_req->mh);
- bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
- }
- /* IOC callback functions */
- static void
- bna_cb_ioceth_enable(void *arg, enum bfa_status error)
- {
- struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
- if (error)
- bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
- else
- bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
- }
- static void
- bna_cb_ioceth_disable(void *arg)
- {
- struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
- bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
- }
- static void
- bna_cb_ioceth_hbfail(void *arg)
- {
- struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
- bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
- }
- static void
- bna_cb_ioceth_reset(void *arg)
- {
- struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
- bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
- }
- static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
- .enable_cbfn = bna_cb_ioceth_enable,
- .disable_cbfn = bna_cb_ioceth_disable,
- .hbfail_cbfn = bna_cb_ioceth_hbfail,
- .reset_cbfn = bna_cb_ioceth_reset
- };
- static void bna_attr_init(struct bna_ioceth *ioceth)
- {
- ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
- ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
- ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
- ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
- ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
- ioceth->attr.fw_query_complete = false;
- }
- static void
- bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
- struct bna_res_info *res_info)
- {
- u64 dma;
- u8 *kva;
- ioceth->bna = bna;
- /**
- * Attach IOC and claim:
- * 1. DMA memory for IOC attributes
- * 2. Kernel memory for FW trace
- */
- bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
- bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
- BNA_GET_DMA_ADDR(
- &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
- kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
- bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
- kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
- bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
- /**
- * Attach common modules (Diag, SFP, CEE, Port) and claim respective
- * DMA memory.
- */
- BNA_GET_DMA_ADDR(
- &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
- kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
- bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
- bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
- kva += bfa_nw_cee_meminfo();
- dma += bfa_nw_cee_meminfo();
- bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
- bfa_nw_flash_memclaim(&bna->flash, kva, dma);
- kva += bfa_nw_flash_meminfo();
- dma += bfa_nw_flash_meminfo();
- bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
- bfa_msgq_memclaim(&bna->msgq, kva, dma);
- bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
- kva += bfa_msgq_meminfo();
- dma += bfa_msgq_meminfo();
- ioceth->stop_cbfn = NULL;
- ioceth->stop_cbarg = NULL;
- bna_attr_init(ioceth);
- bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
- }
- static void
- bna_ioceth_uninit(struct bna_ioceth *ioceth)
- {
- bfa_nw_ioc_detach(&ioceth->ioc);
- ioceth->bna = NULL;
- }
- void
- bna_ioceth_enable(struct bna_ioceth *ioceth)
- {
- if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
- bnad_cb_ioceth_ready(ioceth->bna->bnad);
- return;
- }
- if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
- bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
- }
- void
- bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
- {
- if (type == BNA_SOFT_CLEANUP) {
- bnad_cb_ioceth_disabled(ioceth->bna->bnad);
- return;
- }
- ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
- ioceth->stop_cbarg = ioceth->bna->bnad;
- bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
- }
- static void
- bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
- struct bna_res_info *res_info)
- {
- int i;
- ucam_mod->ucmac = (struct bna_mac *)
- res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
- INIT_LIST_HEAD(&ucam_mod->free_q);
- for (i = 0; i < bna->ioceth.attr.num_ucmac; i++)
- list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
- /* A separate queue to allow synchronous setting of a list of MACs */
- INIT_LIST_HEAD(&ucam_mod->del_q);
- for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++)
- list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
- ucam_mod->bna = bna;
- }
- static void
- bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
- {
- ucam_mod->bna = NULL;
- }
- static void
- bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
- struct bna_res_info *res_info)
- {
- int i;
- mcam_mod->mcmac = (struct bna_mac *)
- res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
- INIT_LIST_HEAD(&mcam_mod->free_q);
- for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
- list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
- mcam_mod->mchandle = (struct bna_mcam_handle *)
- res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
- INIT_LIST_HEAD(&mcam_mod->free_handle_q);
- for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
- list_add_tail(&mcam_mod->mchandle[i].qe,
- &mcam_mod->free_handle_q);
- /* A separate queue to allow synchronous setting of a list of MACs */
- INIT_LIST_HEAD(&mcam_mod->del_q);
- for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++)
- list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
- mcam_mod->bna = bna;
- }
- static void
- bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
- {
- mcam_mod->bna = NULL;
- }
- static void
- bna_bfi_stats_get(struct bna *bna)
- {
- struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
- bna->stats_mod.stats_get_busy = true;
- bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
- BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
- stats_req->mh.num_entries = htons(
- bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
- stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
- stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
- stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
- stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
- stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
- bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
- sizeof(struct bfi_enet_stats_req), &stats_req->mh);
- bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
- }
- void
- bna_res_req(struct bna_res_info *res_info)
- {
- /* DMA memory for COMMON_MODULE */
- res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
- res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
- (bfa_nw_cee_meminfo() +
- bfa_nw_flash_meminfo() +
- bfa_msgq_meminfo()), PAGE_SIZE);
- /* DMA memory for retrieving IOC attributes */
- res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
- res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
- ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
- /* Virtual memory for retreiving fw_trc */
- res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
- /* DMA memory for retreiving stats */
- res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
- res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
- res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
- res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
- ALIGN(sizeof(struct bfi_enet_stats),
- PAGE_SIZE);
- }
- void
- bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
- {
- struct bna_attr *attr = &bna->ioceth.attr;
- /* Virtual memory for Tx objects - stored by Tx module */
- res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
- attr->num_txq * sizeof(struct bna_tx);
- /* Virtual memory for TxQ - stored by Tx module */
- res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
- attr->num_txq * sizeof(struct bna_txq);
- /* Virtual memory for Rx objects - stored by Rx module */
- res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
- attr->num_rxp * sizeof(struct bna_rx);
- /* Virtual memory for RxPath - stored by Rx module */
- res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
- attr->num_rxp * sizeof(struct bna_rxp);
- /* Virtual memory for RxQ - stored by Rx module */
- res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
- (attr->num_rxp * 2) * sizeof(struct bna_rxq);
- /* Virtual memory for Unicast MAC address - stored by ucam module */
- res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
- (attr->num_ucmac * 2) * sizeof(struct bna_mac);
- /* Virtual memory for Multicast MAC address - stored by mcam module */
- res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
- (attr->num_mcmac * 2) * sizeof(struct bna_mac);
- /* Virtual memory for Multicast handle - stored by mcam module */
- res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
- res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
- BNA_MEM_T_KVA;
- res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
- res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
- attr->num_mcmac * sizeof(struct bna_mcam_handle);
- }
- void
- bna_init(struct bna *bna, struct bnad *bnad,
- struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
- {
- bna->bnad = bnad;
- bna->pcidev = *pcidev;
- bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
- res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
- bna->stats.hw_stats_dma.msb =
- res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
- bna->stats.hw_stats_dma.lsb =
- res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
- bna_reg_addr_init(bna, &bna->pcidev);
- /* Also initializes diag, cee, sfp, phy_port, msgq */
- bna_ioceth_init(&bna->ioceth, bna, res_info);
- bna_enet_init(&bna->enet, bna);
- bna_ethport_init(&bna->ethport, bna);
- }
- void
- bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
- {
- bna_tx_mod_init(&bna->tx_mod, bna, res_info);
- bna_rx_mod_init(&bna->rx_mod, bna, res_info);
- bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
- bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
- bna->default_mode_rid = BFI_INVALID_RID;
- bna->promisc_rid = BFI_INVALID_RID;
- bna->mod_flags |= BNA_MOD_F_INIT_DONE;
- }
- void
- bna_uninit(struct bna *bna)
- {
- if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
- bna_mcam_mod_uninit(&bna->mcam_mod);
- bna_ucam_mod_uninit(&bna->ucam_mod);
- bna_rx_mod_uninit(&bna->rx_mod);
- bna_tx_mod_uninit(&bna->tx_mod);
- bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
- }
- bna_stats_mod_uninit(&bna->stats_mod);
- bna_ethport_uninit(&bna->ethport);
- bna_enet_uninit(&bna->enet);
- bna_ioceth_uninit(&bna->ioceth);
- bna->bnad = NULL;
- }
- int
- bna_num_txq_set(struct bna *bna, int num_txq)
- {
- if (bna->ioceth.attr.fw_query_complete &&
- (num_txq <= bna->ioceth.attr.num_txq)) {
- bna->ioceth.attr.num_txq = num_txq;
- return BNA_CB_SUCCESS;
- }
- return BNA_CB_FAIL;
- }
- int
- bna_num_rxp_set(struct bna *bna, int num_rxp)
- {
- if (bna->ioceth.attr.fw_query_complete &&
- (num_rxp <= bna->ioceth.attr.num_rxp)) {
- bna->ioceth.attr.num_rxp = num_rxp;
- return BNA_CB_SUCCESS;
- }
- return BNA_CB_FAIL;
- }
- struct bna_mac *
- bna_cam_mod_mac_get(struct list_head *head)
- {
- struct bna_mac *mac;
- mac = list_first_entry_or_null(head, struct bna_mac, qe);
- if (mac)
- list_del(&mac->qe);
- return mac;
- }
- struct bna_mcam_handle *
- bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
- {
- struct bna_mcam_handle *handle;
- handle = list_first_entry_or_null(&mcam_mod->free_handle_q,
- struct bna_mcam_handle, qe);
- if (handle)
- list_del(&handle->qe);
- return handle;
- }
- void
- bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
- struct bna_mcam_handle *handle)
- {
- list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
- }
- void
- bna_hw_stats_get(struct bna *bna)
- {
- if (!bna->stats_mod.ioc_ready) {
- bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
- return;
- }
- if (bna->stats_mod.stats_get_busy) {
- bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
- return;
- }
- bna_bfi_stats_get(bna);
- }
|