bna_enet.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103
  1. /*
  2. * Linux network driver for QLogic BR-series Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  15. * Copyright (c) 2014-2015 QLogic Corporation
  16. * All rights reserved
  17. * www.qlogic.com
  18. */
  19. #include "bna.h"
  20. static inline int
  21. ethport_can_be_up(struct bna_ethport *ethport)
  22. {
  23. int ready = 0;
  24. if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  25. ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
  26. (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
  27. (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
  28. else
  29. ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
  30. (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
  31. !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
  32. return ready;
  33. }
  34. #define ethport_is_up ethport_can_be_up
  35. enum bna_ethport_event {
  36. ETHPORT_E_START = 1,
  37. ETHPORT_E_STOP = 2,
  38. ETHPORT_E_FAIL = 3,
  39. ETHPORT_E_UP = 4,
  40. ETHPORT_E_DOWN = 5,
  41. ETHPORT_E_FWRESP_UP_OK = 6,
  42. ETHPORT_E_FWRESP_DOWN = 7,
  43. ETHPORT_E_FWRESP_UP_FAIL = 8,
  44. };
  45. enum bna_enet_event {
  46. ENET_E_START = 1,
  47. ENET_E_STOP = 2,
  48. ENET_E_FAIL = 3,
  49. ENET_E_PAUSE_CFG = 4,
  50. ENET_E_MTU_CFG = 5,
  51. ENET_E_FWRESP_PAUSE = 6,
  52. ENET_E_CHLD_STOPPED = 7,
  53. };
  54. enum bna_ioceth_event {
  55. IOCETH_E_ENABLE = 1,
  56. IOCETH_E_DISABLE = 2,
  57. IOCETH_E_IOC_RESET = 3,
  58. IOCETH_E_IOC_FAILED = 4,
  59. IOCETH_E_IOC_READY = 5,
  60. IOCETH_E_ENET_ATTR_RESP = 6,
  61. IOCETH_E_ENET_STOPPED = 7,
  62. IOCETH_E_IOC_DISABLED = 8,
  63. };
  64. #define bna_stats_copy(_name, _type) \
  65. do { \
  66. count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
  67. stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
  68. stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
  69. for (i = 0; i < count; i++) \
  70. stats_dst[i] = be64_to_cpu(stats_src[i]); \
  71. } while (0) \
  72. /*
  73. * FW response handlers
  74. */
  75. static void
  76. bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
  77. struct bfi_msgq_mhdr *msghdr)
  78. {
  79. ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
  80. if (ethport_can_be_up(ethport))
  81. bfa_fsm_send_event(ethport, ETHPORT_E_UP);
  82. }
  83. static void
  84. bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
  85. struct bfi_msgq_mhdr *msghdr)
  86. {
  87. int ethport_up = ethport_is_up(ethport);
  88. ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  89. if (ethport_up)
  90. bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
  91. }
  92. static void
  93. bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
  94. struct bfi_msgq_mhdr *msghdr)
  95. {
  96. struct bfi_enet_enable_req *admin_req =
  97. &ethport->bfi_enet_cmd.admin_req;
  98. struct bfi_enet_rsp *rsp =
  99. container_of(msghdr, struct bfi_enet_rsp, mh);
  100. switch (admin_req->enable) {
  101. case BNA_STATUS_T_ENABLED:
  102. if (rsp->error == BFI_ENET_CMD_OK)
  103. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
  104. else {
  105. ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  106. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
  107. }
  108. break;
  109. case BNA_STATUS_T_DISABLED:
  110. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
  111. ethport->link_status = BNA_LINK_DOWN;
  112. ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
  113. break;
  114. }
  115. }
  116. static void
  117. bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
  118. struct bfi_msgq_mhdr *msghdr)
  119. {
  120. struct bfi_enet_diag_lb_req *diag_lb_req =
  121. &ethport->bfi_enet_cmd.lpbk_req;
  122. struct bfi_enet_rsp *rsp =
  123. container_of(msghdr, struct bfi_enet_rsp, mh);
  124. switch (diag_lb_req->enable) {
  125. case BNA_STATUS_T_ENABLED:
  126. if (rsp->error == BFI_ENET_CMD_OK)
  127. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
  128. else {
  129. ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
  130. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
  131. }
  132. break;
  133. case BNA_STATUS_T_DISABLED:
  134. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
  135. break;
  136. }
  137. }
  138. static void
  139. bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
  140. {
  141. bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
  142. }
  143. static void
  144. bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
  145. struct bfi_msgq_mhdr *msghdr)
  146. {
  147. struct bfi_enet_attr_rsp *rsp =
  148. container_of(msghdr, struct bfi_enet_attr_rsp, mh);
  149. /**
  150. * Store only if not set earlier, since BNAD can override the HW
  151. * attributes
  152. */
  153. if (!ioceth->attr.fw_query_complete) {
  154. ioceth->attr.num_txq = ntohl(rsp->max_cfg);
  155. ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
  156. ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
  157. ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
  158. ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
  159. ioceth->attr.fw_query_complete = true;
  160. }
  161. bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
  162. }
  163. static void
  164. bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
  165. {
  166. struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
  167. u64 *stats_src;
  168. u64 *stats_dst;
  169. u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
  170. u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
  171. int count;
  172. int i;
  173. bna_stats_copy(mac, mac);
  174. bna_stats_copy(bpc, bpc);
  175. bna_stats_copy(rad, rad);
  176. bna_stats_copy(rlb, rad);
  177. bna_stats_copy(fc_rx, fc_rx);
  178. bna_stats_copy(fc_tx, fc_tx);
  179. stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
  180. /* Copy Rxf stats to SW area, scatter them while copying */
  181. for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
  182. stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
  183. memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
  184. if (rx_enet_mask & BIT(i)) {
  185. int k;
  186. count = sizeof(struct bfi_enet_stats_rxf) /
  187. sizeof(u64);
  188. for (k = 0; k < count; k++) {
  189. stats_dst[k] = be64_to_cpu(*stats_src);
  190. stats_src++;
  191. }
  192. }
  193. }
  194. /* Copy Txf stats to SW area, scatter them while copying */
  195. for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
  196. stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
  197. memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
  198. if (tx_enet_mask & BIT(i)) {
  199. int k;
  200. count = sizeof(struct bfi_enet_stats_txf) /
  201. sizeof(u64);
  202. for (k = 0; k < count; k++) {
  203. stats_dst[k] = be64_to_cpu(*stats_src);
  204. stats_src++;
  205. }
  206. }
  207. }
  208. bna->stats_mod.stats_get_busy = false;
  209. bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
  210. }
  211. static void
  212. bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
  213. struct bfi_msgq_mhdr *msghdr)
  214. {
  215. ethport->link_status = BNA_LINK_UP;
  216. /* Dispatch events */
  217. ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
  218. }
  219. static void
  220. bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
  221. struct bfi_msgq_mhdr *msghdr)
  222. {
  223. ethport->link_status = BNA_LINK_DOWN;
  224. /* Dispatch events */
  225. ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
  226. }
  227. static void
  228. bna_err_handler(struct bna *bna, u32 intr_status)
  229. {
  230. if (BNA_IS_HALT_INTR(bna, intr_status))
  231. bna_halt_clear(bna);
  232. bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
  233. }
  234. void
  235. bna_mbox_handler(struct bna *bna, u32 intr_status)
  236. {
  237. if (BNA_IS_ERR_INTR(bna, intr_status)) {
  238. bna_err_handler(bna, intr_status);
  239. return;
  240. }
  241. if (BNA_IS_MBOX_INTR(bna, intr_status))
  242. bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
  243. }
  244. static void
  245. bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
  246. {
  247. struct bna *bna = (struct bna *)arg;
  248. struct bna_tx *tx;
  249. struct bna_rx *rx;
  250. switch (msghdr->msg_id) {
  251. case BFI_ENET_I2H_RX_CFG_SET_RSP:
  252. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  253. if (rx)
  254. bna_bfi_rx_enet_start_rsp(rx, msghdr);
  255. break;
  256. case BFI_ENET_I2H_RX_CFG_CLR_RSP:
  257. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  258. if (rx)
  259. bna_bfi_rx_enet_stop_rsp(rx, msghdr);
  260. break;
  261. case BFI_ENET_I2H_RIT_CFG_RSP:
  262. case BFI_ENET_I2H_RSS_CFG_RSP:
  263. case BFI_ENET_I2H_RSS_ENABLE_RSP:
  264. case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
  265. case BFI_ENET_I2H_RX_DEFAULT_RSP:
  266. case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
  267. case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
  268. case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
  269. case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
  270. case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
  271. case BFI_ENET_I2H_RX_VLAN_SET_RSP:
  272. case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
  273. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  274. if (rx)
  275. bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
  276. break;
  277. case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
  278. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  279. if (rx)
  280. bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
  281. break;
  282. case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
  283. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  284. if (rx)
  285. bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
  286. break;
  287. case BFI_ENET_I2H_TX_CFG_SET_RSP:
  288. bna_tx_from_rid(bna, msghdr->enet_id, tx);
  289. if (tx)
  290. bna_bfi_tx_enet_start_rsp(tx, msghdr);
  291. break;
  292. case BFI_ENET_I2H_TX_CFG_CLR_RSP:
  293. bna_tx_from_rid(bna, msghdr->enet_id, tx);
  294. if (tx)
  295. bna_bfi_tx_enet_stop_rsp(tx, msghdr);
  296. break;
  297. case BFI_ENET_I2H_PORT_ADMIN_RSP:
  298. bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
  299. break;
  300. case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
  301. bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
  302. break;
  303. case BFI_ENET_I2H_SET_PAUSE_RSP:
  304. bna_bfi_pause_set_rsp(&bna->enet, msghdr);
  305. break;
  306. case BFI_ENET_I2H_GET_ATTR_RSP:
  307. bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
  308. break;
  309. case BFI_ENET_I2H_STATS_GET_RSP:
  310. bna_bfi_stats_get_rsp(bna, msghdr);
  311. break;
  312. case BFI_ENET_I2H_STATS_CLR_RSP:
  313. /* No-op */
  314. break;
  315. case BFI_ENET_I2H_LINK_UP_AEN:
  316. bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
  317. break;
  318. case BFI_ENET_I2H_LINK_DOWN_AEN:
  319. bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
  320. break;
  321. case BFI_ENET_I2H_PORT_ENABLE_AEN:
  322. bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
  323. break;
  324. case BFI_ENET_I2H_PORT_DISABLE_AEN:
  325. bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
  326. break;
  327. case BFI_ENET_I2H_BW_UPDATE_AEN:
  328. bna_bfi_bw_update_aen(&bna->tx_mod);
  329. break;
  330. default:
  331. break;
  332. }
  333. }
  334. /* ETHPORT */
  335. #define call_ethport_stop_cbfn(_ethport) \
  336. do { \
  337. if ((_ethport)->stop_cbfn) { \
  338. void (*cbfn)(struct bna_enet *); \
  339. cbfn = (_ethport)->stop_cbfn; \
  340. (_ethport)->stop_cbfn = NULL; \
  341. cbfn(&(_ethport)->bna->enet); \
  342. } \
  343. } while (0)
  344. #define call_ethport_adminup_cbfn(ethport, status) \
  345. do { \
  346. if ((ethport)->adminup_cbfn) { \
  347. void (*cbfn)(struct bnad *, enum bna_cb_status); \
  348. cbfn = (ethport)->adminup_cbfn; \
  349. (ethport)->adminup_cbfn = NULL; \
  350. cbfn((ethport)->bna->bnad, status); \
  351. } \
  352. } while (0)
  353. static void
  354. bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
  355. {
  356. struct bfi_enet_enable_req *admin_up_req =
  357. &ethport->bfi_enet_cmd.admin_req;
  358. bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
  359. BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
  360. admin_up_req->mh.num_entries = htons(
  361. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
  362. admin_up_req->enable = BNA_STATUS_T_ENABLED;
  363. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  364. sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
  365. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  366. }
  367. static void
  368. bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
  369. {
  370. struct bfi_enet_enable_req *admin_down_req =
  371. &ethport->bfi_enet_cmd.admin_req;
  372. bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
  373. BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
  374. admin_down_req->mh.num_entries = htons(
  375. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
  376. admin_down_req->enable = BNA_STATUS_T_DISABLED;
  377. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  378. sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
  379. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  380. }
  381. static void
  382. bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
  383. {
  384. struct bfi_enet_diag_lb_req *lpbk_up_req =
  385. &ethport->bfi_enet_cmd.lpbk_req;
  386. bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
  387. BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
  388. lpbk_up_req->mh.num_entries = htons(
  389. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
  390. lpbk_up_req->mode = (ethport->bna->enet.type ==
  391. BNA_ENET_T_LOOPBACK_INTERNAL) ?
  392. BFI_ENET_DIAG_LB_OPMODE_EXT :
  393. BFI_ENET_DIAG_LB_OPMODE_CBL;
  394. lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
  395. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  396. sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
  397. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  398. }
  399. static void
  400. bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
  401. {
  402. struct bfi_enet_diag_lb_req *lpbk_down_req =
  403. &ethport->bfi_enet_cmd.lpbk_req;
  404. bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
  405. BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
  406. lpbk_down_req->mh.num_entries = htons(
  407. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
  408. lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
  409. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  410. sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
  411. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  412. }
  413. static void
  414. bna_bfi_ethport_up(struct bna_ethport *ethport)
  415. {
  416. if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  417. bna_bfi_ethport_admin_up(ethport);
  418. else
  419. bna_bfi_ethport_lpbk_up(ethport);
  420. }
  421. static void
  422. bna_bfi_ethport_down(struct bna_ethport *ethport)
  423. {
  424. if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  425. bna_bfi_ethport_admin_down(ethport);
  426. else
  427. bna_bfi_ethport_lpbk_down(ethport);
  428. }
  429. bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
  430. enum bna_ethport_event);
  431. bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
  432. enum bna_ethport_event);
  433. bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
  434. enum bna_ethport_event);
  435. bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
  436. enum bna_ethport_event);
  437. bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
  438. enum bna_ethport_event);
  439. bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
  440. enum bna_ethport_event);
  441. static void
  442. bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
  443. {
  444. call_ethport_stop_cbfn(ethport);
  445. }
  446. static void
  447. bna_ethport_sm_stopped(struct bna_ethport *ethport,
  448. enum bna_ethport_event event)
  449. {
  450. switch (event) {
  451. case ETHPORT_E_START:
  452. bfa_fsm_set_state(ethport, bna_ethport_sm_down);
  453. break;
  454. case ETHPORT_E_STOP:
  455. call_ethport_stop_cbfn(ethport);
  456. break;
  457. case ETHPORT_E_FAIL:
  458. /* No-op */
  459. break;
  460. case ETHPORT_E_DOWN:
  461. /* This event is received due to Rx objects failing */
  462. /* No-op */
  463. break;
  464. default:
  465. bfa_sm_fault(event);
  466. }
  467. }
  468. static void
  469. bna_ethport_sm_down_entry(struct bna_ethport *ethport)
  470. {
  471. }
  472. static void
  473. bna_ethport_sm_down(struct bna_ethport *ethport,
  474. enum bna_ethport_event event)
  475. {
  476. switch (event) {
  477. case ETHPORT_E_STOP:
  478. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  479. break;
  480. case ETHPORT_E_FAIL:
  481. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  482. break;
  483. case ETHPORT_E_UP:
  484. bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
  485. bna_bfi_ethport_up(ethport);
  486. break;
  487. default:
  488. bfa_sm_fault(event);
  489. }
  490. }
  491. static void
  492. bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
  493. {
  494. }
  495. static void
  496. bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
  497. enum bna_ethport_event event)
  498. {
  499. switch (event) {
  500. case ETHPORT_E_STOP:
  501. bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
  502. break;
  503. case ETHPORT_E_FAIL:
  504. call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
  505. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  506. break;
  507. case ETHPORT_E_DOWN:
  508. call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
  509. bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
  510. break;
  511. case ETHPORT_E_FWRESP_UP_OK:
  512. call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
  513. bfa_fsm_set_state(ethport, bna_ethport_sm_up);
  514. break;
  515. case ETHPORT_E_FWRESP_UP_FAIL:
  516. call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
  517. bfa_fsm_set_state(ethport, bna_ethport_sm_down);
  518. break;
  519. case ETHPORT_E_FWRESP_DOWN:
  520. /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
  521. bna_bfi_ethport_up(ethport);
  522. break;
  523. default:
  524. bfa_sm_fault(event);
  525. }
  526. }
  527. static void
  528. bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
  529. {
  530. /**
  531. * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
  532. * mbox due to up_resp_wait -> down_resp_wait transition on event
  533. * ETHPORT_E_DOWN
  534. */
  535. }
  536. static void
  537. bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
  538. enum bna_ethport_event event)
  539. {
  540. switch (event) {
  541. case ETHPORT_E_STOP:
  542. bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
  543. break;
  544. case ETHPORT_E_FAIL:
  545. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  546. break;
  547. case ETHPORT_E_UP:
  548. bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
  549. break;
  550. case ETHPORT_E_FWRESP_UP_OK:
  551. /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
  552. bna_bfi_ethport_down(ethport);
  553. break;
  554. case ETHPORT_E_FWRESP_UP_FAIL:
  555. case ETHPORT_E_FWRESP_DOWN:
  556. bfa_fsm_set_state(ethport, bna_ethport_sm_down);
  557. break;
  558. default:
  559. bfa_sm_fault(event);
  560. }
  561. }
  562. static void
  563. bna_ethport_sm_up_entry(struct bna_ethport *ethport)
  564. {
  565. }
  566. static void
  567. bna_ethport_sm_up(struct bna_ethport *ethport,
  568. enum bna_ethport_event event)
  569. {
  570. switch (event) {
  571. case ETHPORT_E_STOP:
  572. bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
  573. bna_bfi_ethport_down(ethport);
  574. break;
  575. case ETHPORT_E_FAIL:
  576. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  577. break;
  578. case ETHPORT_E_DOWN:
  579. bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
  580. bna_bfi_ethport_down(ethport);
  581. break;
  582. default:
  583. bfa_sm_fault(event);
  584. }
  585. }
  586. static void
  587. bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
  588. {
  589. }
  590. static void
  591. bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
  592. enum bna_ethport_event event)
  593. {
  594. switch (event) {
  595. case ETHPORT_E_FAIL:
  596. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  597. break;
  598. case ETHPORT_E_DOWN:
  599. /**
  600. * This event is received due to Rx objects stopping in
  601. * parallel to ethport
  602. */
  603. /* No-op */
  604. break;
  605. case ETHPORT_E_FWRESP_UP_OK:
  606. /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
  607. bna_bfi_ethport_down(ethport);
  608. break;
  609. case ETHPORT_E_FWRESP_UP_FAIL:
  610. case ETHPORT_E_FWRESP_DOWN:
  611. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  612. break;
  613. default:
  614. bfa_sm_fault(event);
  615. }
  616. }
  617. static void
  618. bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
  619. {
  620. ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
  621. ethport->bna = bna;
  622. ethport->link_status = BNA_LINK_DOWN;
  623. ethport->link_cbfn = bnad_cb_ethport_link_status;
  624. ethport->rx_started_count = 0;
  625. ethport->stop_cbfn = NULL;
  626. ethport->adminup_cbfn = NULL;
  627. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  628. }
  629. static void
  630. bna_ethport_uninit(struct bna_ethport *ethport)
  631. {
  632. ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
  633. ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  634. ethport->bna = NULL;
  635. }
  636. static void
  637. bna_ethport_start(struct bna_ethport *ethport)
  638. {
  639. bfa_fsm_send_event(ethport, ETHPORT_E_START);
  640. }
  641. static void
  642. bna_enet_cb_ethport_stopped(struct bna_enet *enet)
  643. {
  644. bfa_wc_down(&enet->chld_stop_wc);
  645. }
  646. static void
  647. bna_ethport_stop(struct bna_ethport *ethport)
  648. {
  649. ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
  650. bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
  651. }
  652. static void
  653. bna_ethport_fail(struct bna_ethport *ethport)
  654. {
  655. /* Reset the physical port status to enabled */
  656. ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
  657. if (ethport->link_status != BNA_LINK_DOWN) {
  658. ethport->link_status = BNA_LINK_DOWN;
  659. ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
  660. }
  661. bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
  662. }
  663. /* Should be called only when ethport is disabled */
  664. void
  665. bna_ethport_cb_rx_started(struct bna_ethport *ethport)
  666. {
  667. ethport->rx_started_count++;
  668. if (ethport->rx_started_count == 1) {
  669. ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
  670. if (ethport_can_be_up(ethport))
  671. bfa_fsm_send_event(ethport, ETHPORT_E_UP);
  672. }
  673. }
  674. void
  675. bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
  676. {
  677. int ethport_up = ethport_is_up(ethport);
  678. ethport->rx_started_count--;
  679. if (ethport->rx_started_count == 0) {
  680. ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
  681. if (ethport_up)
  682. bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
  683. }
  684. }
  685. /* ENET */
  686. #define bna_enet_chld_start(enet) \
  687. do { \
  688. enum bna_tx_type tx_type = \
  689. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  690. BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
  691. enum bna_rx_type rx_type = \
  692. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  693. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  694. bna_ethport_start(&(enet)->bna->ethport); \
  695. bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
  696. bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
  697. } while (0)
  698. #define bna_enet_chld_stop(enet) \
  699. do { \
  700. enum bna_tx_type tx_type = \
  701. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  702. BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
  703. enum bna_rx_type rx_type = \
  704. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  705. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  706. bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
  707. bfa_wc_up(&(enet)->chld_stop_wc); \
  708. bna_ethport_stop(&(enet)->bna->ethport); \
  709. bfa_wc_up(&(enet)->chld_stop_wc); \
  710. bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
  711. bfa_wc_up(&(enet)->chld_stop_wc); \
  712. bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
  713. bfa_wc_wait(&(enet)->chld_stop_wc); \
  714. } while (0)
  715. #define bna_enet_chld_fail(enet) \
  716. do { \
  717. bna_ethport_fail(&(enet)->bna->ethport); \
  718. bna_tx_mod_fail(&(enet)->bna->tx_mod); \
  719. bna_rx_mod_fail(&(enet)->bna->rx_mod); \
  720. } while (0)
  721. #define bna_enet_rx_start(enet) \
  722. do { \
  723. enum bna_rx_type rx_type = \
  724. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  725. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  726. bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
  727. } while (0)
  728. #define bna_enet_rx_stop(enet) \
  729. do { \
  730. enum bna_rx_type rx_type = \
  731. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  732. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  733. bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
  734. bfa_wc_up(&(enet)->chld_stop_wc); \
  735. bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
  736. bfa_wc_wait(&(enet)->chld_stop_wc); \
  737. } while (0)
  738. #define call_enet_stop_cbfn(enet) \
  739. do { \
  740. if ((enet)->stop_cbfn) { \
  741. void (*cbfn)(void *); \
  742. void *cbarg; \
  743. cbfn = (enet)->stop_cbfn; \
  744. cbarg = (enet)->stop_cbarg; \
  745. (enet)->stop_cbfn = NULL; \
  746. (enet)->stop_cbarg = NULL; \
  747. cbfn(cbarg); \
  748. } \
  749. } while (0)
  750. #define call_enet_mtu_cbfn(enet) \
  751. do { \
  752. if ((enet)->mtu_cbfn) { \
  753. void (*cbfn)(struct bnad *); \
  754. cbfn = (enet)->mtu_cbfn; \
  755. (enet)->mtu_cbfn = NULL; \
  756. cbfn((enet)->bna->bnad); \
  757. } \
  758. } while (0)
  759. static void bna_enet_cb_chld_stopped(void *arg);
  760. static void bna_bfi_pause_set(struct bna_enet *enet);
  761. bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
  762. enum bna_enet_event);
  763. bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
  764. enum bna_enet_event);
  765. bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
  766. enum bna_enet_event);
  767. bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
  768. enum bna_enet_event);
  769. bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
  770. enum bna_enet_event);
  771. bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
  772. enum bna_enet_event);
  773. bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
  774. enum bna_enet_event);
  775. static void
  776. bna_enet_sm_stopped_entry(struct bna_enet *enet)
  777. {
  778. call_enet_mtu_cbfn(enet);
  779. call_enet_stop_cbfn(enet);
  780. }
  781. static void
  782. bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
  783. {
  784. switch (event) {
  785. case ENET_E_START:
  786. bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
  787. break;
  788. case ENET_E_STOP:
  789. call_enet_stop_cbfn(enet);
  790. break;
  791. case ENET_E_FAIL:
  792. /* No-op */
  793. break;
  794. case ENET_E_PAUSE_CFG:
  795. break;
  796. case ENET_E_MTU_CFG:
  797. call_enet_mtu_cbfn(enet);
  798. break;
  799. case ENET_E_CHLD_STOPPED:
  800. /**
  801. * This event is received due to Ethport, Tx and Rx objects
  802. * failing
  803. */
  804. /* No-op */
  805. break;
  806. default:
  807. bfa_sm_fault(event);
  808. }
  809. }
  810. static void
  811. bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
  812. {
  813. bna_bfi_pause_set(enet);
  814. }
  815. static void
  816. bna_enet_sm_pause_init_wait(struct bna_enet *enet,
  817. enum bna_enet_event event)
  818. {
  819. switch (event) {
  820. case ENET_E_STOP:
  821. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  822. bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
  823. break;
  824. case ENET_E_FAIL:
  825. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  826. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  827. break;
  828. case ENET_E_PAUSE_CFG:
  829. enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
  830. break;
  831. case ENET_E_MTU_CFG:
  832. /* No-op */
  833. break;
  834. case ENET_E_FWRESP_PAUSE:
  835. if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
  836. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  837. bna_bfi_pause_set(enet);
  838. } else {
  839. bfa_fsm_set_state(enet, bna_enet_sm_started);
  840. bna_enet_chld_start(enet);
  841. }
  842. break;
  843. default:
  844. bfa_sm_fault(event);
  845. }
  846. }
  847. static void
  848. bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
  849. {
  850. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  851. }
  852. static void
  853. bna_enet_sm_last_resp_wait(struct bna_enet *enet,
  854. enum bna_enet_event event)
  855. {
  856. switch (event) {
  857. case ENET_E_FAIL:
  858. case ENET_E_FWRESP_PAUSE:
  859. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  860. break;
  861. default:
  862. bfa_sm_fault(event);
  863. }
  864. }
  865. static void
  866. bna_enet_sm_started_entry(struct bna_enet *enet)
  867. {
  868. /**
  869. * NOTE: Do not call bna_enet_chld_start() here, since it will be
  870. * inadvertently called during cfg_wait->started transition as well
  871. */
  872. call_enet_mtu_cbfn(enet);
  873. }
  874. static void
  875. bna_enet_sm_started(struct bna_enet *enet,
  876. enum bna_enet_event event)
  877. {
  878. switch (event) {
  879. case ENET_E_STOP:
  880. bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
  881. break;
  882. case ENET_E_FAIL:
  883. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  884. bna_enet_chld_fail(enet);
  885. break;
  886. case ENET_E_PAUSE_CFG:
  887. bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
  888. bna_bfi_pause_set(enet);
  889. break;
  890. case ENET_E_MTU_CFG:
  891. bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
  892. bna_enet_rx_stop(enet);
  893. break;
  894. default:
  895. bfa_sm_fault(event);
  896. }
  897. }
  898. static void
  899. bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
  900. {
  901. }
  902. static void
  903. bna_enet_sm_cfg_wait(struct bna_enet *enet,
  904. enum bna_enet_event event)
  905. {
  906. switch (event) {
  907. case ENET_E_STOP:
  908. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  909. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  910. bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
  911. break;
  912. case ENET_E_FAIL:
  913. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  914. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  915. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  916. bna_enet_chld_fail(enet);
  917. break;
  918. case ENET_E_PAUSE_CFG:
  919. enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
  920. break;
  921. case ENET_E_MTU_CFG:
  922. enet->flags |= BNA_ENET_F_MTU_CHANGED;
  923. break;
  924. case ENET_E_CHLD_STOPPED:
  925. bna_enet_rx_start(enet);
  926. /* Fall through */
  927. case ENET_E_FWRESP_PAUSE:
  928. if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
  929. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  930. bna_bfi_pause_set(enet);
  931. } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
  932. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  933. bna_enet_rx_stop(enet);
  934. } else {
  935. bfa_fsm_set_state(enet, bna_enet_sm_started);
  936. }
  937. break;
  938. default:
  939. bfa_sm_fault(event);
  940. }
  941. }
  942. static void
  943. bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
  944. {
  945. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  946. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  947. }
  948. static void
  949. bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
  950. enum bna_enet_event event)
  951. {
  952. switch (event) {
  953. case ENET_E_FAIL:
  954. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  955. bna_enet_chld_fail(enet);
  956. break;
  957. case ENET_E_FWRESP_PAUSE:
  958. case ENET_E_CHLD_STOPPED:
  959. bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
  960. break;
  961. default:
  962. bfa_sm_fault(event);
  963. }
  964. }
  965. static void
  966. bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
  967. {
  968. bna_enet_chld_stop(enet);
  969. }
  970. static void
  971. bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
  972. enum bna_enet_event event)
  973. {
  974. switch (event) {
  975. case ENET_E_FAIL:
  976. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  977. bna_enet_chld_fail(enet);
  978. break;
  979. case ENET_E_CHLD_STOPPED:
  980. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  981. break;
  982. default:
  983. bfa_sm_fault(event);
  984. }
  985. }
  986. static void
  987. bna_bfi_pause_set(struct bna_enet *enet)
  988. {
  989. struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
  990. bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
  991. BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
  992. pause_req->mh.num_entries = htons(
  993. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
  994. pause_req->tx_pause = enet->pause_config.tx_pause;
  995. pause_req->rx_pause = enet->pause_config.rx_pause;
  996. bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
  997. sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
  998. bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
  999. }
  1000. static void
  1001. bna_enet_cb_chld_stopped(void *arg)
  1002. {
  1003. struct bna_enet *enet = (struct bna_enet *)arg;
  1004. bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
  1005. }
  1006. static void
  1007. bna_enet_init(struct bna_enet *enet, struct bna *bna)
  1008. {
  1009. enet->bna = bna;
  1010. enet->flags = 0;
  1011. enet->mtu = 0;
  1012. enet->type = BNA_ENET_T_REGULAR;
  1013. enet->stop_cbfn = NULL;
  1014. enet->stop_cbarg = NULL;
  1015. enet->mtu_cbfn = NULL;
  1016. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  1017. }
  1018. static void
  1019. bna_enet_uninit(struct bna_enet *enet)
  1020. {
  1021. enet->flags = 0;
  1022. enet->bna = NULL;
  1023. }
  1024. static void
  1025. bna_enet_start(struct bna_enet *enet)
  1026. {
  1027. enet->flags |= BNA_ENET_F_IOCETH_READY;
  1028. if (enet->flags & BNA_ENET_F_ENABLED)
  1029. bfa_fsm_send_event(enet, ENET_E_START);
  1030. }
  1031. static void
  1032. bna_ioceth_cb_enet_stopped(void *arg)
  1033. {
  1034. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1035. bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
  1036. }
  1037. static void
  1038. bna_enet_stop(struct bna_enet *enet)
  1039. {
  1040. enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
  1041. enet->stop_cbarg = &enet->bna->ioceth;
  1042. enet->flags &= ~BNA_ENET_F_IOCETH_READY;
  1043. bfa_fsm_send_event(enet, ENET_E_STOP);
  1044. }
  1045. static void
  1046. bna_enet_fail(struct bna_enet *enet)
  1047. {
  1048. enet->flags &= ~BNA_ENET_F_IOCETH_READY;
  1049. bfa_fsm_send_event(enet, ENET_E_FAIL);
  1050. }
  1051. void
  1052. bna_enet_cb_tx_stopped(struct bna_enet *enet)
  1053. {
  1054. bfa_wc_down(&enet->chld_stop_wc);
  1055. }
  1056. void
  1057. bna_enet_cb_rx_stopped(struct bna_enet *enet)
  1058. {
  1059. bfa_wc_down(&enet->chld_stop_wc);
  1060. }
  1061. int
  1062. bna_enet_mtu_get(struct bna_enet *enet)
  1063. {
  1064. return enet->mtu;
  1065. }
  1066. void
  1067. bna_enet_enable(struct bna_enet *enet)
  1068. {
  1069. if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
  1070. return;
  1071. enet->flags |= BNA_ENET_F_ENABLED;
  1072. if (enet->flags & BNA_ENET_F_IOCETH_READY)
  1073. bfa_fsm_send_event(enet, ENET_E_START);
  1074. }
  1075. void
  1076. bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
  1077. void (*cbfn)(void *))
  1078. {
  1079. if (type == BNA_SOFT_CLEANUP) {
  1080. (*cbfn)(enet->bna->bnad);
  1081. return;
  1082. }
  1083. enet->stop_cbfn = cbfn;
  1084. enet->stop_cbarg = enet->bna->bnad;
  1085. enet->flags &= ~BNA_ENET_F_ENABLED;
  1086. bfa_fsm_send_event(enet, ENET_E_STOP);
  1087. }
  1088. void
  1089. bna_enet_pause_config(struct bna_enet *enet,
  1090. struct bna_pause_config *pause_config)
  1091. {
  1092. enet->pause_config = *pause_config;
  1093. bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
  1094. }
  1095. void
  1096. bna_enet_mtu_set(struct bna_enet *enet, int mtu,
  1097. void (*cbfn)(struct bnad *))
  1098. {
  1099. enet->mtu = mtu;
  1100. enet->mtu_cbfn = cbfn;
  1101. bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
  1102. }
  1103. void
  1104. bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac)
  1105. {
  1106. bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac);
  1107. }
  1108. /* IOCETH */
  1109. #define enable_mbox_intr(_ioceth) \
  1110. do { \
  1111. u32 intr_status; \
  1112. bna_intr_status_get((_ioceth)->bna, intr_status); \
  1113. bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
  1114. bna_mbox_intr_enable((_ioceth)->bna); \
  1115. } while (0)
  1116. #define disable_mbox_intr(_ioceth) \
  1117. do { \
  1118. bna_mbox_intr_disable((_ioceth)->bna); \
  1119. bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
  1120. } while (0)
  1121. #define call_ioceth_stop_cbfn(_ioceth) \
  1122. do { \
  1123. if ((_ioceth)->stop_cbfn) { \
  1124. void (*cbfn)(struct bnad *); \
  1125. struct bnad *cbarg; \
  1126. cbfn = (_ioceth)->stop_cbfn; \
  1127. cbarg = (_ioceth)->stop_cbarg; \
  1128. (_ioceth)->stop_cbfn = NULL; \
  1129. (_ioceth)->stop_cbarg = NULL; \
  1130. cbfn(cbarg); \
  1131. } \
  1132. } while (0)
  1133. #define bna_stats_mod_uninit(_stats_mod) \
  1134. do { \
  1135. } while (0)
  1136. #define bna_stats_mod_start(_stats_mod) \
  1137. do { \
  1138. (_stats_mod)->ioc_ready = true; \
  1139. } while (0)
  1140. #define bna_stats_mod_stop(_stats_mod) \
  1141. do { \
  1142. (_stats_mod)->ioc_ready = false; \
  1143. } while (0)
  1144. #define bna_stats_mod_fail(_stats_mod) \
  1145. do { \
  1146. (_stats_mod)->ioc_ready = false; \
  1147. (_stats_mod)->stats_get_busy = false; \
  1148. (_stats_mod)->stats_clr_busy = false; \
  1149. } while (0)
  1150. static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
  1151. bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
  1152. enum bna_ioceth_event);
  1153. bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
  1154. enum bna_ioceth_event);
  1155. bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
  1156. enum bna_ioceth_event);
  1157. bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
  1158. enum bna_ioceth_event);
  1159. bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
  1160. enum bna_ioceth_event);
  1161. bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
  1162. enum bna_ioceth_event);
  1163. bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
  1164. enum bna_ioceth_event);
  1165. bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
  1166. enum bna_ioceth_event);
  1167. static void
  1168. bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
  1169. {
  1170. call_ioceth_stop_cbfn(ioceth);
  1171. }
  1172. static void
  1173. bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
  1174. enum bna_ioceth_event event)
  1175. {
  1176. switch (event) {
  1177. case IOCETH_E_ENABLE:
  1178. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
  1179. bfa_nw_ioc_enable(&ioceth->ioc);
  1180. break;
  1181. case IOCETH_E_DISABLE:
  1182. bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
  1183. break;
  1184. case IOCETH_E_IOC_RESET:
  1185. enable_mbox_intr(ioceth);
  1186. break;
  1187. case IOCETH_E_IOC_FAILED:
  1188. disable_mbox_intr(ioceth);
  1189. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1190. break;
  1191. default:
  1192. bfa_sm_fault(event);
  1193. }
  1194. }
  1195. static void
  1196. bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
  1197. {
  1198. /**
  1199. * Do not call bfa_nw_ioc_enable() here. It must be called in the
  1200. * previous state due to failed -> ioc_ready_wait transition.
  1201. */
  1202. }
  1203. static void
  1204. bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
  1205. enum bna_ioceth_event event)
  1206. {
  1207. switch (event) {
  1208. case IOCETH_E_DISABLE:
  1209. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1210. bfa_nw_ioc_disable(&ioceth->ioc);
  1211. break;
  1212. case IOCETH_E_IOC_RESET:
  1213. enable_mbox_intr(ioceth);
  1214. break;
  1215. case IOCETH_E_IOC_FAILED:
  1216. disable_mbox_intr(ioceth);
  1217. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1218. break;
  1219. case IOCETH_E_IOC_READY:
  1220. bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
  1221. break;
  1222. default:
  1223. bfa_sm_fault(event);
  1224. }
  1225. }
  1226. static void
  1227. bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
  1228. {
  1229. bna_bfi_attr_get(ioceth);
  1230. }
  1231. static void
  1232. bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
  1233. enum bna_ioceth_event event)
  1234. {
  1235. switch (event) {
  1236. case IOCETH_E_DISABLE:
  1237. bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
  1238. break;
  1239. case IOCETH_E_IOC_FAILED:
  1240. disable_mbox_intr(ioceth);
  1241. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1242. break;
  1243. case IOCETH_E_ENET_ATTR_RESP:
  1244. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
  1245. break;
  1246. default:
  1247. bfa_sm_fault(event);
  1248. }
  1249. }
  1250. static void
  1251. bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
  1252. {
  1253. bna_enet_start(&ioceth->bna->enet);
  1254. bna_stats_mod_start(&ioceth->bna->stats_mod);
  1255. bnad_cb_ioceth_ready(ioceth->bna->bnad);
  1256. }
  1257. static void
  1258. bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
  1259. {
  1260. switch (event) {
  1261. case IOCETH_E_DISABLE:
  1262. bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
  1263. break;
  1264. case IOCETH_E_IOC_FAILED:
  1265. disable_mbox_intr(ioceth);
  1266. bna_enet_fail(&ioceth->bna->enet);
  1267. bna_stats_mod_fail(&ioceth->bna->stats_mod);
  1268. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1269. break;
  1270. default:
  1271. bfa_sm_fault(event);
  1272. }
  1273. }
  1274. static void
  1275. bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
  1276. {
  1277. }
  1278. static void
  1279. bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
  1280. enum bna_ioceth_event event)
  1281. {
  1282. switch (event) {
  1283. case IOCETH_E_IOC_FAILED:
  1284. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1285. disable_mbox_intr(ioceth);
  1286. bfa_nw_ioc_disable(&ioceth->ioc);
  1287. break;
  1288. case IOCETH_E_ENET_ATTR_RESP:
  1289. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1290. bfa_nw_ioc_disable(&ioceth->ioc);
  1291. break;
  1292. default:
  1293. bfa_sm_fault(event);
  1294. }
  1295. }
  1296. static void
  1297. bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
  1298. {
  1299. bna_stats_mod_stop(&ioceth->bna->stats_mod);
  1300. bna_enet_stop(&ioceth->bna->enet);
  1301. }
  1302. static void
  1303. bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
  1304. enum bna_ioceth_event event)
  1305. {
  1306. switch (event) {
  1307. case IOCETH_E_IOC_FAILED:
  1308. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1309. disable_mbox_intr(ioceth);
  1310. bna_enet_fail(&ioceth->bna->enet);
  1311. bna_stats_mod_fail(&ioceth->bna->stats_mod);
  1312. bfa_nw_ioc_disable(&ioceth->ioc);
  1313. break;
  1314. case IOCETH_E_ENET_STOPPED:
  1315. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1316. bfa_nw_ioc_disable(&ioceth->ioc);
  1317. break;
  1318. default:
  1319. bfa_sm_fault(event);
  1320. }
  1321. }
  1322. static void
  1323. bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
  1324. {
  1325. }
  1326. static void
  1327. bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
  1328. enum bna_ioceth_event event)
  1329. {
  1330. switch (event) {
  1331. case IOCETH_E_IOC_DISABLED:
  1332. disable_mbox_intr(ioceth);
  1333. bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
  1334. break;
  1335. case IOCETH_E_ENET_STOPPED:
  1336. /* This event is received due to enet failing */
  1337. /* No-op */
  1338. break;
  1339. default:
  1340. bfa_sm_fault(event);
  1341. }
  1342. }
  1343. static void
  1344. bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
  1345. {
  1346. bnad_cb_ioceth_failed(ioceth->bna->bnad);
  1347. }
  1348. static void
  1349. bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
  1350. enum bna_ioceth_event event)
  1351. {
  1352. switch (event) {
  1353. case IOCETH_E_DISABLE:
  1354. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1355. bfa_nw_ioc_disable(&ioceth->ioc);
  1356. break;
  1357. case IOCETH_E_IOC_RESET:
  1358. enable_mbox_intr(ioceth);
  1359. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
  1360. break;
  1361. case IOCETH_E_IOC_FAILED:
  1362. break;
  1363. default:
  1364. bfa_sm_fault(event);
  1365. }
  1366. }
  1367. static void
  1368. bna_bfi_attr_get(struct bna_ioceth *ioceth)
  1369. {
  1370. struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
  1371. bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
  1372. BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
  1373. attr_req->mh.num_entries = htons(
  1374. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
  1375. bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
  1376. sizeof(struct bfi_enet_attr_req), &attr_req->mh);
  1377. bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
  1378. }
  1379. /* IOC callback functions */
  1380. static void
  1381. bna_cb_ioceth_enable(void *arg, enum bfa_status error)
  1382. {
  1383. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1384. if (error)
  1385. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
  1386. else
  1387. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
  1388. }
  1389. static void
  1390. bna_cb_ioceth_disable(void *arg)
  1391. {
  1392. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1393. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
  1394. }
  1395. static void
  1396. bna_cb_ioceth_hbfail(void *arg)
  1397. {
  1398. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1399. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
  1400. }
  1401. static void
  1402. bna_cb_ioceth_reset(void *arg)
  1403. {
  1404. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1405. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
  1406. }
  1407. static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
  1408. .enable_cbfn = bna_cb_ioceth_enable,
  1409. .disable_cbfn = bna_cb_ioceth_disable,
  1410. .hbfail_cbfn = bna_cb_ioceth_hbfail,
  1411. .reset_cbfn = bna_cb_ioceth_reset
  1412. };
  1413. static void bna_attr_init(struct bna_ioceth *ioceth)
  1414. {
  1415. ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
  1416. ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
  1417. ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
  1418. ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
  1419. ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
  1420. ioceth->attr.fw_query_complete = false;
  1421. }
  1422. static void
  1423. bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
  1424. struct bna_res_info *res_info)
  1425. {
  1426. u64 dma;
  1427. u8 *kva;
  1428. ioceth->bna = bna;
  1429. /**
  1430. * Attach IOC and claim:
  1431. * 1. DMA memory for IOC attributes
  1432. * 2. Kernel memory for FW trace
  1433. */
  1434. bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
  1435. bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
  1436. BNA_GET_DMA_ADDR(
  1437. &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
  1438. kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
  1439. bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
  1440. kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
  1441. bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
  1442. /**
  1443. * Attach common modules (Diag, SFP, CEE, Port) and claim respective
  1444. * DMA memory.
  1445. */
  1446. BNA_GET_DMA_ADDR(
  1447. &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
  1448. kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
  1449. bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
  1450. bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
  1451. kva += bfa_nw_cee_meminfo();
  1452. dma += bfa_nw_cee_meminfo();
  1453. bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
  1454. bfa_nw_flash_memclaim(&bna->flash, kva, dma);
  1455. kva += bfa_nw_flash_meminfo();
  1456. dma += bfa_nw_flash_meminfo();
  1457. bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
  1458. bfa_msgq_memclaim(&bna->msgq, kva, dma);
  1459. bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
  1460. kva += bfa_msgq_meminfo();
  1461. dma += bfa_msgq_meminfo();
  1462. ioceth->stop_cbfn = NULL;
  1463. ioceth->stop_cbarg = NULL;
  1464. bna_attr_init(ioceth);
  1465. bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
  1466. }
  1467. static void
  1468. bna_ioceth_uninit(struct bna_ioceth *ioceth)
  1469. {
  1470. bfa_nw_ioc_detach(&ioceth->ioc);
  1471. ioceth->bna = NULL;
  1472. }
  1473. void
  1474. bna_ioceth_enable(struct bna_ioceth *ioceth)
  1475. {
  1476. if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
  1477. bnad_cb_ioceth_ready(ioceth->bna->bnad);
  1478. return;
  1479. }
  1480. if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
  1481. bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
  1482. }
  1483. void
  1484. bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
  1485. {
  1486. if (type == BNA_SOFT_CLEANUP) {
  1487. bnad_cb_ioceth_disabled(ioceth->bna->bnad);
  1488. return;
  1489. }
  1490. ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
  1491. ioceth->stop_cbarg = ioceth->bna->bnad;
  1492. bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
  1493. }
  1494. static void
  1495. bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
  1496. struct bna_res_info *res_info)
  1497. {
  1498. int i;
  1499. ucam_mod->ucmac = (struct bna_mac *)
  1500. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
  1501. INIT_LIST_HEAD(&ucam_mod->free_q);
  1502. for (i = 0; i < bna->ioceth.attr.num_ucmac; i++)
  1503. list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
  1504. /* A separate queue to allow synchronous setting of a list of MACs */
  1505. INIT_LIST_HEAD(&ucam_mod->del_q);
  1506. for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++)
  1507. list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
  1508. ucam_mod->bna = bna;
  1509. }
  1510. static void
  1511. bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
  1512. {
  1513. ucam_mod->bna = NULL;
  1514. }
  1515. static void
  1516. bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
  1517. struct bna_res_info *res_info)
  1518. {
  1519. int i;
  1520. mcam_mod->mcmac = (struct bna_mac *)
  1521. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
  1522. INIT_LIST_HEAD(&mcam_mod->free_q);
  1523. for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
  1524. list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
  1525. mcam_mod->mchandle = (struct bna_mcam_handle *)
  1526. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
  1527. INIT_LIST_HEAD(&mcam_mod->free_handle_q);
  1528. for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
  1529. list_add_tail(&mcam_mod->mchandle[i].qe,
  1530. &mcam_mod->free_handle_q);
  1531. /* A separate queue to allow synchronous setting of a list of MACs */
  1532. INIT_LIST_HEAD(&mcam_mod->del_q);
  1533. for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++)
  1534. list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
  1535. mcam_mod->bna = bna;
  1536. }
  1537. static void
  1538. bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
  1539. {
  1540. mcam_mod->bna = NULL;
  1541. }
  1542. static void
  1543. bna_bfi_stats_get(struct bna *bna)
  1544. {
  1545. struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
  1546. bna->stats_mod.stats_get_busy = true;
  1547. bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
  1548. BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
  1549. stats_req->mh.num_entries = htons(
  1550. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
  1551. stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
  1552. stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
  1553. stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
  1554. stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
  1555. stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
  1556. bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
  1557. sizeof(struct bfi_enet_stats_req), &stats_req->mh);
  1558. bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
  1559. }
  1560. void
  1561. bna_res_req(struct bna_res_info *res_info)
  1562. {
  1563. /* DMA memory for COMMON_MODULE */
  1564. res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
  1565. res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
  1566. res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
  1567. res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
  1568. (bfa_nw_cee_meminfo() +
  1569. bfa_nw_flash_meminfo() +
  1570. bfa_msgq_meminfo()), PAGE_SIZE);
  1571. /* DMA memory for retrieving IOC attributes */
  1572. res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
  1573. res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
  1574. res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
  1575. res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
  1576. ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
  1577. /* Virtual memory for retreiving fw_trc */
  1578. res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
  1579. res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
  1580. res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
  1581. res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
  1582. /* DMA memory for retreiving stats */
  1583. res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
  1584. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
  1585. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
  1586. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
  1587. ALIGN(sizeof(struct bfi_enet_stats),
  1588. PAGE_SIZE);
  1589. }
  1590. void
  1591. bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
  1592. {
  1593. struct bna_attr *attr = &bna->ioceth.attr;
  1594. /* Virtual memory for Tx objects - stored by Tx module */
  1595. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
  1596. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
  1597. BNA_MEM_T_KVA;
  1598. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
  1599. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
  1600. attr->num_txq * sizeof(struct bna_tx);
  1601. /* Virtual memory for TxQ - stored by Tx module */
  1602. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
  1603. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
  1604. BNA_MEM_T_KVA;
  1605. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
  1606. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
  1607. attr->num_txq * sizeof(struct bna_txq);
  1608. /* Virtual memory for Rx objects - stored by Rx module */
  1609. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
  1610. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
  1611. BNA_MEM_T_KVA;
  1612. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
  1613. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
  1614. attr->num_rxp * sizeof(struct bna_rx);
  1615. /* Virtual memory for RxPath - stored by Rx module */
  1616. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
  1617. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
  1618. BNA_MEM_T_KVA;
  1619. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
  1620. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
  1621. attr->num_rxp * sizeof(struct bna_rxp);
  1622. /* Virtual memory for RxQ - stored by Rx module */
  1623. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
  1624. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
  1625. BNA_MEM_T_KVA;
  1626. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
  1627. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
  1628. (attr->num_rxp * 2) * sizeof(struct bna_rxq);
  1629. /* Virtual memory for Unicast MAC address - stored by ucam module */
  1630. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
  1631. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
  1632. BNA_MEM_T_KVA;
  1633. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
  1634. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
  1635. (attr->num_ucmac * 2) * sizeof(struct bna_mac);
  1636. /* Virtual memory for Multicast MAC address - stored by mcam module */
  1637. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
  1638. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
  1639. BNA_MEM_T_KVA;
  1640. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
  1641. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
  1642. (attr->num_mcmac * 2) * sizeof(struct bna_mac);
  1643. /* Virtual memory for Multicast handle - stored by mcam module */
  1644. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
  1645. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
  1646. BNA_MEM_T_KVA;
  1647. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
  1648. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
  1649. attr->num_mcmac * sizeof(struct bna_mcam_handle);
  1650. }
  1651. void
  1652. bna_init(struct bna *bna, struct bnad *bnad,
  1653. struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
  1654. {
  1655. bna->bnad = bnad;
  1656. bna->pcidev = *pcidev;
  1657. bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
  1658. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
  1659. bna->stats.hw_stats_dma.msb =
  1660. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
  1661. bna->stats.hw_stats_dma.lsb =
  1662. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
  1663. bna_reg_addr_init(bna, &bna->pcidev);
  1664. /* Also initializes diag, cee, sfp, phy_port, msgq */
  1665. bna_ioceth_init(&bna->ioceth, bna, res_info);
  1666. bna_enet_init(&bna->enet, bna);
  1667. bna_ethport_init(&bna->ethport, bna);
  1668. }
  1669. void
  1670. bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
  1671. {
  1672. bna_tx_mod_init(&bna->tx_mod, bna, res_info);
  1673. bna_rx_mod_init(&bna->rx_mod, bna, res_info);
  1674. bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
  1675. bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
  1676. bna->default_mode_rid = BFI_INVALID_RID;
  1677. bna->promisc_rid = BFI_INVALID_RID;
  1678. bna->mod_flags |= BNA_MOD_F_INIT_DONE;
  1679. }
  1680. void
  1681. bna_uninit(struct bna *bna)
  1682. {
  1683. if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
  1684. bna_mcam_mod_uninit(&bna->mcam_mod);
  1685. bna_ucam_mod_uninit(&bna->ucam_mod);
  1686. bna_rx_mod_uninit(&bna->rx_mod);
  1687. bna_tx_mod_uninit(&bna->tx_mod);
  1688. bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
  1689. }
  1690. bna_stats_mod_uninit(&bna->stats_mod);
  1691. bna_ethport_uninit(&bna->ethport);
  1692. bna_enet_uninit(&bna->enet);
  1693. bna_ioceth_uninit(&bna->ioceth);
  1694. bna->bnad = NULL;
  1695. }
  1696. int
  1697. bna_num_txq_set(struct bna *bna, int num_txq)
  1698. {
  1699. if (bna->ioceth.attr.fw_query_complete &&
  1700. (num_txq <= bna->ioceth.attr.num_txq)) {
  1701. bna->ioceth.attr.num_txq = num_txq;
  1702. return BNA_CB_SUCCESS;
  1703. }
  1704. return BNA_CB_FAIL;
  1705. }
  1706. int
  1707. bna_num_rxp_set(struct bna *bna, int num_rxp)
  1708. {
  1709. if (bna->ioceth.attr.fw_query_complete &&
  1710. (num_rxp <= bna->ioceth.attr.num_rxp)) {
  1711. bna->ioceth.attr.num_rxp = num_rxp;
  1712. return BNA_CB_SUCCESS;
  1713. }
  1714. return BNA_CB_FAIL;
  1715. }
  1716. struct bna_mac *
  1717. bna_cam_mod_mac_get(struct list_head *head)
  1718. {
  1719. struct bna_mac *mac;
  1720. mac = list_first_entry_or_null(head, struct bna_mac, qe);
  1721. if (mac)
  1722. list_del(&mac->qe);
  1723. return mac;
  1724. }
  1725. struct bna_mcam_handle *
  1726. bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
  1727. {
  1728. struct bna_mcam_handle *handle;
  1729. handle = list_first_entry_or_null(&mcam_mod->free_handle_q,
  1730. struct bna_mcam_handle, qe);
  1731. if (handle)
  1732. list_del(&handle->qe);
  1733. return handle;
  1734. }
  1735. void
  1736. bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
  1737. struct bna_mcam_handle *handle)
  1738. {
  1739. list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
  1740. }
  1741. void
  1742. bna_hw_stats_get(struct bna *bna)
  1743. {
  1744. if (!bna->stats_mod.ioc_ready) {
  1745. bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
  1746. return;
  1747. }
  1748. if (bna->stats_mod.stats_get_busy) {
  1749. bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
  1750. return;
  1751. }
  1752. bna_bfi_stats_get(bna);
  1753. }