csio_lnode.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136
  1. /*
  2. * This file is part of the Chelsio FCoE driver for Linux.
  3. *
  4. * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/delay.h>
  36. #include <linux/slab.h>
  37. #include <linux/utsname.h>
  38. #include <scsi/scsi_device.h>
  39. #include <scsi/scsi_transport_fc.h>
  40. #include <asm/unaligned.h>
  41. #include <scsi/fc/fc_els.h>
  42. #include <scsi/fc/fc_fs.h>
  43. #include <scsi/fc/fc_gs.h>
  44. #include <scsi/fc/fc_ms.h>
  45. #include "csio_hw.h"
  46. #include "csio_mb.h"
  47. #include "csio_lnode.h"
  48. #include "csio_rnode.h"
  49. int csio_fcoe_rnodes = 1024;
  50. int csio_fdmi_enable = 1;
  51. #define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1)
  52. /* Lnode SM declarations */
  53. static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
  54. static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
  55. static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
  56. static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
  57. static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
  58. void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
  59. enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);
  60. /* LN event mapping */
  61. static enum csio_ln_ev fwevt_to_lnevt[] = {
  62. CSIO_LNE_NONE, /* None */
  63. CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */
  64. CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */
  65. CSIO_LNE_NONE, /* PLOGI_RCVD */
  66. CSIO_LNE_NONE, /* PLOGO_RCVD */
  67. CSIO_LNE_NONE, /* PRLI_ACC_RCVD */
  68. CSIO_LNE_NONE, /* PRLI_RJT_RCVD */
  69. CSIO_LNE_NONE, /* PRLI_RCVD */
  70. CSIO_LNE_NONE, /* PRLO_RCVD */
  71. CSIO_LNE_NONE, /* NPORT_ID_CHGD */
  72. CSIO_LNE_LOGO, /* FLOGO_RCVD */
  73. CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */
  74. CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */
  75. CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */
  76. CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */
  77. CSIO_LNE_NONE, /* FDISC_RJT_RCVD */
  78. CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */
  79. CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */
  80. CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */
  81. CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
  82. CSIO_LNE_NONE, /* PRLI_TMO */
  83. CSIO_LNE_NONE, /* ADISC_TMO */
  84. CSIO_LNE_NONE, /* RSCN_DEV_LOST */
  85. CSIO_LNE_NONE, /* SCR_ACC_RCVD */
  86. CSIO_LNE_NONE, /* ADISC_RJT_RCVD */
  87. CSIO_LNE_NONE, /* LOGO_SNT */
  88. CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */
  89. };
  90. #define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
  91. CSIO_LNE_NONE : \
  92. fwevt_to_lnevt[_evt])
  93. #define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd)
  94. #define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason)
  95. #define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan)
  96. #define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))
  97. /*
  98. * csio_ln_match_by_portid - lookup lnode using given portid.
  99. * @hw: HW module
  100. * @portid: port-id.
  101. *
  102. * If found, returns lnode matching given portid otherwise returns NULL.
  103. */
  104. static struct csio_lnode *
  105. csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
  106. {
  107. struct csio_lnode *ln = hw->rln;
  108. struct list_head *tmp;
  109. /* Match siblings lnode with portid */
  110. list_for_each(tmp, &hw->sln_head) {
  111. ln = (struct csio_lnode *) tmp;
  112. if (ln->portid == portid)
  113. return ln;
  114. }
  115. return NULL;
  116. }
  117. /*
  118. * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.
  119. * @hw - HW module
  120. * @vnpi - vnp index.
  121. * Returns - If found, returns lnode matching given vnp id
  122. * otherwise returns NULL.
  123. */
  124. static struct csio_lnode *
  125. csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)
  126. {
  127. struct list_head *tmp1, *tmp2;
  128. struct csio_lnode *sln = NULL, *cln = NULL;
  129. if (list_empty(&hw->sln_head)) {
  130. CSIO_INC_STATS(hw, n_lnlkup_miss);
  131. return NULL;
  132. }
  133. /* Traverse sibling lnodes */
  134. list_for_each(tmp1, &hw->sln_head) {
  135. sln = (struct csio_lnode *) tmp1;
  136. /* Match sibling lnode */
  137. if (sln->vnp_flowid == vnp_id)
  138. return sln;
  139. if (list_empty(&sln->cln_head))
  140. continue;
  141. /* Traverse children lnodes */
  142. list_for_each(tmp2, &sln->cln_head) {
  143. cln = (struct csio_lnode *) tmp2;
  144. if (cln->vnp_flowid == vnp_id)
  145. return cln;
  146. }
  147. }
  148. CSIO_INC_STATS(hw, n_lnlkup_miss);
  149. return NULL;
  150. }
  151. /**
  152. * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.
  153. * @hw: HW module.
  154. * @wwpn: WWPN.
  155. *
  156. * If found, returns lnode matching given wwpn, returns NULL otherwise.
  157. */
  158. struct csio_lnode *
  159. csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)
  160. {
  161. struct list_head *tmp1, *tmp2;
  162. struct csio_lnode *sln = NULL, *cln = NULL;
  163. if (list_empty(&hw->sln_head)) {
  164. CSIO_INC_STATS(hw, n_lnlkup_miss);
  165. return NULL;
  166. }
  167. /* Traverse sibling lnodes */
  168. list_for_each(tmp1, &hw->sln_head) {
  169. sln = (struct csio_lnode *) tmp1;
  170. /* Match sibling lnode */
  171. if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))
  172. return sln;
  173. if (list_empty(&sln->cln_head))
  174. continue;
  175. /* Traverse children lnodes */
  176. list_for_each(tmp2, &sln->cln_head) {
  177. cln = (struct csio_lnode *) tmp2;
  178. if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))
  179. return cln;
  180. }
  181. }
  182. return NULL;
  183. }
  184. /* FDMI */
  185. static void
  186. csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)
  187. {
  188. struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;
  189. cmd->ct_rev = FC_CT_REV;
  190. cmd->ct_fs_type = type;
  191. cmd->ct_fs_subtype = sub_type;
  192. cmd->ct_cmd = htons(op);
  193. }
  194. static int
  195. csio_hostname(uint8_t *buf, size_t buf_len)
  196. {
  197. if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)
  198. return 0;
  199. return -1;
  200. }
  201. static int
  202. csio_osname(uint8_t *buf, size_t buf_len)
  203. {
  204. if (snprintf(buf, buf_len, "%s %s %s",
  205. init_utsname()->sysname,
  206. init_utsname()->release,
  207. init_utsname()->version) > 0)
  208. return 0;
  209. return -1;
  210. }
  211. static inline void
  212. csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
  213. {
  214. struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
  215. ae->type = htons(type);
  216. len += 4; /* includes attribute type and length */
  217. len = (len + 3) & ~3; /* should be multiple of 4 bytes */
  218. ae->len = htons(len);
  219. memcpy(ae->value, val, len);
  220. *ptr += len;
  221. }
  222. /*
  223. * csio_ln_fdmi_done - FDMI registeration completion
  224. * @hw: HW context
  225. * @fdmi_req: fdmi request
  226. */
  227. static void
  228. csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
  229. {
  230. void *cmd;
  231. struct csio_lnode *ln = fdmi_req->lnode;
  232. if (fdmi_req->wr_status != FW_SUCCESS) {
  233. csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",
  234. fdmi_req->wr_status);
  235. CSIO_INC_STATS(ln, n_fdmi_err);
  236. }
  237. cmd = fdmi_req->dma_buf.vaddr;
  238. if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
  239. csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",
  240. csio_ct_reason(cmd), csio_ct_expl(cmd));
  241. }
  242. }
  243. /*
  244. * csio_ln_fdmi_rhba_cbfn - RHBA completion
  245. * @hw: HW context
  246. * @fdmi_req: fdmi request
  247. */
  248. static void
  249. csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
  250. {
  251. void *cmd;
  252. uint8_t *pld;
  253. uint32_t len = 0;
  254. __be32 val;
  255. __be16 mfs;
  256. uint32_t numattrs = 0;
  257. struct csio_lnode *ln = fdmi_req->lnode;
  258. struct fs_fdmi_attrs *attrib_blk;
  259. struct fc_fdmi_port_name *port_name;
  260. uint8_t buf[64];
  261. uint8_t *fc4_type;
  262. if (fdmi_req->wr_status != FW_SUCCESS) {
  263. csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
  264. fdmi_req->wr_status);
  265. CSIO_INC_STATS(ln, n_fdmi_err);
  266. }
  267. cmd = fdmi_req->dma_buf.vaddr;
  268. if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
  269. csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",
  270. csio_ct_reason(cmd), csio_ct_expl(cmd));
  271. }
  272. if (!csio_is_rnode_ready(fdmi_req->rnode)) {
  273. CSIO_INC_STATS(ln, n_fdmi_err);
  274. return;
  275. }
  276. /* Prepare CT hdr for RPA cmd */
  277. memset(cmd, 0, FC_CT_HDR_LEN);
  278. csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA);
  279. /* Prepare RPA payload */
  280. pld = (uint8_t *)csio_ct_get_pld(cmd);
  281. port_name = (struct fc_fdmi_port_name *)pld;
  282. memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
  283. pld += sizeof(*port_name);
  284. /* Start appending Port attributes */
  285. attrib_blk = (struct fs_fdmi_attrs *)pld;
  286. attrib_blk->numattrs = 0;
  287. len += sizeof(attrib_blk->numattrs);
  288. pld += sizeof(attrib_blk->numattrs);
  289. fc4_type = &buf[0];
  290. memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
  291. fc4_type[2] = 1;
  292. fc4_type[7] = 1;
  293. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,
  294. fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
  295. numattrs++;
  296. val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
  297. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
  298. (uint8_t *)&val,
  299. FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
  300. numattrs++;
  301. if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)
  302. val = htonl(FC_PORTSPEED_1GBIT);
  303. else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
  304. val = htonl(FC_PORTSPEED_10GBIT);
  305. else
  306. val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
  307. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
  308. (uint8_t *)&val,
  309. FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
  310. numattrs++;
  311. mfs = ln->ln_sparm.csp.sp_bb_data;
  312. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
  313. (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
  314. numattrs++;
  315. strcpy(buf, "csiostor");
  316. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
  317. (uint16_t)strlen(buf));
  318. numattrs++;
  319. if (!csio_hostname(buf, sizeof(buf))) {
  320. csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
  321. buf, (uint16_t)strlen(buf));
  322. numattrs++;
  323. }
  324. attrib_blk->numattrs = htonl(numattrs);
  325. len = (uint32_t)(pld - (uint8_t *)cmd);
  326. /* Submit FDMI RPA request */
  327. spin_lock_irq(&hw->lock);
  328. if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
  329. FCOE_CT, &fdmi_req->dma_buf, len)) {
  330. CSIO_INC_STATS(ln, n_fdmi_err);
  331. csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
  332. }
  333. spin_unlock_irq(&hw->lock);
  334. }
  335. /*
  336. * csio_ln_fdmi_dprt_cbfn - DPRT completion
  337. * @hw: HW context
  338. * @fdmi_req: fdmi request
  339. */
  340. static void
  341. csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
  342. {
  343. void *cmd;
  344. uint8_t *pld;
  345. uint32_t len = 0;
  346. uint32_t numattrs = 0;
  347. __be32 maxpayload = htonl(65536);
  348. struct fc_fdmi_hba_identifier *hbaid;
  349. struct csio_lnode *ln = fdmi_req->lnode;
  350. struct fc_fdmi_rpl *reg_pl;
  351. struct fs_fdmi_attrs *attrib_blk;
  352. uint8_t buf[64];
  353. if (fdmi_req->wr_status != FW_SUCCESS) {
  354. csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
  355. fdmi_req->wr_status);
  356. CSIO_INC_STATS(ln, n_fdmi_err);
  357. }
  358. if (!csio_is_rnode_ready(fdmi_req->rnode)) {
  359. CSIO_INC_STATS(ln, n_fdmi_err);
  360. return;
  361. }
  362. cmd = fdmi_req->dma_buf.vaddr;
  363. if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
  364. csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",
  365. csio_ct_reason(cmd), csio_ct_expl(cmd));
  366. }
  367. /* Prepare CT hdr for RHBA cmd */
  368. memset(cmd, 0, FC_CT_HDR_LEN);
  369. csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA);
  370. len = FC_CT_HDR_LEN;
  371. /* Prepare RHBA payload */
  372. pld = (uint8_t *)csio_ct_get_pld(cmd);
  373. hbaid = (struct fc_fdmi_hba_identifier *)pld;
  374. memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */
  375. pld += sizeof(*hbaid);
  376. /* Register one port per hba */
  377. reg_pl = (struct fc_fdmi_rpl *)pld;
  378. reg_pl->numport = htonl(1);
  379. memcpy(&reg_pl->port[0].portname, csio_ln_wwpn(ln), 8);
  380. pld += sizeof(*reg_pl);
  381. /* Start appending HBA attributes hba */
  382. attrib_blk = (struct fs_fdmi_attrs *)pld;
  383. attrib_blk->numattrs = 0;
  384. len += sizeof(attrib_blk->numattrs);
  385. pld += sizeof(attrib_blk->numattrs);
  386. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),
  387. FC_FDMI_HBA_ATTR_NODENAME_LEN);
  388. numattrs++;
  389. memset(buf, 0, sizeof(buf));
  390. strcpy(buf, "Chelsio Communications");
  391. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
  392. (uint16_t)strlen(buf));
  393. numattrs++;
  394. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
  395. hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
  396. numattrs++;
  397. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
  398. (uint16_t)sizeof(hw->vpd.id));
  399. numattrs++;
  400. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
  401. hw->model_desc, (uint16_t)strlen(hw->model_desc));
  402. numattrs++;
  403. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
  404. hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
  405. numattrs++;
  406. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
  407. hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
  408. numattrs++;
  409. if (!csio_osname(buf, sizeof(buf))) {
  410. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
  411. buf, (uint16_t)strlen(buf));
  412. numattrs++;
  413. }
  414. csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
  415. (uint8_t *)&maxpayload,
  416. FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
  417. len = (uint32_t)(pld - (uint8_t *)cmd);
  418. numattrs++;
  419. attrib_blk->numattrs = htonl(numattrs);
  420. /* Submit FDMI RHBA request */
  421. spin_lock_irq(&hw->lock);
  422. if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
  423. FCOE_CT, &fdmi_req->dma_buf, len)) {
  424. CSIO_INC_STATS(ln, n_fdmi_err);
  425. csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
  426. }
  427. spin_unlock_irq(&hw->lock);
  428. }
  429. /*
  430. * csio_ln_fdmi_dhba_cbfn - DHBA completion
  431. * @hw: HW context
  432. * @fdmi_req: fdmi request
  433. */
  434. static void
  435. csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
  436. {
  437. struct csio_lnode *ln = fdmi_req->lnode;
  438. void *cmd;
  439. struct fc_fdmi_port_name *port_name;
  440. uint32_t len;
  441. if (fdmi_req->wr_status != FW_SUCCESS) {
  442. csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
  443. fdmi_req->wr_status);
  444. CSIO_INC_STATS(ln, n_fdmi_err);
  445. }
  446. if (!csio_is_rnode_ready(fdmi_req->rnode)) {
  447. CSIO_INC_STATS(ln, n_fdmi_err);
  448. return;
  449. }
  450. cmd = fdmi_req->dma_buf.vaddr;
  451. if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
  452. csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",
  453. csio_ct_reason(cmd), csio_ct_expl(cmd));
  454. }
  455. /* Send FDMI cmd to de-register any Port attributes if registered
  456. * before
  457. */
  458. /* Prepare FDMI DPRT cmd */
  459. memset(cmd, 0, FC_CT_HDR_LEN);
  460. csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT);
  461. len = FC_CT_HDR_LEN;
  462. port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);
  463. memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
  464. len += sizeof(*port_name);
  465. /* Submit FDMI request */
  466. spin_lock_irq(&hw->lock);
  467. if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
  468. FCOE_CT, &fdmi_req->dma_buf, len)) {
  469. CSIO_INC_STATS(ln, n_fdmi_err);
  470. csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
  471. }
  472. spin_unlock_irq(&hw->lock);
  473. }
  474. /**
  475. * csio_ln_fdmi_start - Start an FDMI request.
  476. * @ln: lnode
  477. * @context: session context
  478. *
  479. * Issued with lock held.
  480. */
  481. int
  482. csio_ln_fdmi_start(struct csio_lnode *ln, void *context)
  483. {
  484. struct csio_ioreq *fdmi_req;
  485. struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;
  486. void *cmd;
  487. struct fc_fdmi_hba_identifier *hbaid;
  488. uint32_t len;
  489. if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))
  490. return -EPROTONOSUPPORT;
  491. if (!csio_is_rnode_ready(fdmi_rn))
  492. CSIO_INC_STATS(ln, n_fdmi_err);
  493. /* Send FDMI cmd to de-register any HBA attributes if registered
  494. * before
  495. */
  496. fdmi_req = ln->mgmt_req;
  497. fdmi_req->lnode = ln;
  498. fdmi_req->rnode = fdmi_rn;
  499. /* Prepare FDMI DHBA cmd */
  500. cmd = fdmi_req->dma_buf.vaddr;
  501. memset(cmd, 0, FC_CT_HDR_LEN);
  502. csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA);
  503. len = FC_CT_HDR_LEN;
  504. hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);
  505. memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);
  506. len += sizeof(*hbaid);
  507. /* Submit FDMI request */
  508. if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,
  509. FCOE_CT, &fdmi_req->dma_buf, len)) {
  510. CSIO_INC_STATS(ln, n_fdmi_err);
  511. csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");
  512. }
  513. return 0;
  514. }
  515. /*
  516. * csio_ln_vnp_read_cbfn - vnp read completion handler.
  517. * @hw: HW lnode
  518. * @cbfn: Completion handler.
  519. *
  520. * Reads vnp response and updates ln parameters.
  521. */
  522. static void
  523. csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
  524. {
  525. struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);
  526. struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
  527. struct fc_els_csp *csp;
  528. struct fc_els_cssp *clsp;
  529. enum fw_retval retval;
  530. __be32 nport_id;
  531. retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
  532. if (retval != FW_SUCCESS) {
  533. csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
  534. mempool_free(mbp, hw->mb_mempool);
  535. return;
  536. }
  537. spin_lock_irq(&hw->lock);
  538. memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));
  539. memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3);
  540. ln->nport_id = ntohl(nport_id);
  541. ln->nport_id = ln->nport_id >> 8;
  542. /* Update WWNs */
  543. /*
  544. * This may look like a duplication of what csio_fcoe_enable_link()
  545. * does, but is absolutely necessary if the vnpi changes between
  546. * a FCOE LINK UP and FCOE LINK DOWN.
  547. */
  548. memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
  549. memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
  550. /* Copy common sparam */
  551. csp = (struct fc_els_csp *)rsp->cmn_srv_parms;
  552. ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;
  553. ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;
  554. ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred;
  555. ln->ln_sparm.csp.sp_features = csp->sp_features;
  556. ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data;
  557. ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov;
  558. ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov;
  559. /* Copy word 0 & word 1 of class sparam */
  560. clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;
  561. ln->ln_sparm.clsp[2].cp_class = clsp->cp_class;
  562. ln->ln_sparm.clsp[2].cp_init = clsp->cp_init;
  563. ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip;
  564. ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs;
  565. spin_unlock_irq(&hw->lock);
  566. mempool_free(mbp, hw->mb_mempool);
  567. /* Send an event to update local attribs */
  568. csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);
  569. }
  570. /*
  571. * csio_ln_vnp_read - Read vnp params.
  572. * @ln: lnode
  573. * @cbfn: Completion handler.
  574. *
  575. * Issued with lock held.
  576. */
  577. static int
  578. csio_ln_vnp_read(struct csio_lnode *ln,
  579. void (*cbfn) (struct csio_hw *, struct csio_mb *))
  580. {
  581. struct csio_hw *hw = ln->hwp;
  582. struct csio_mb *mbp;
  583. /* Allocate Mbox request */
  584. mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
  585. if (!mbp) {
  586. CSIO_INC_STATS(hw, n_err_nomem);
  587. return -ENOMEM;
  588. }
  589. /* Prepare VNP Command */
  590. csio_fcoe_vnp_read_init_mb(ln, mbp,
  591. CSIO_MB_DEFAULT_TMO,
  592. ln->fcf_flowid,
  593. ln->vnp_flowid,
  594. cbfn);
  595. /* Issue MBOX cmd */
  596. if (csio_mb_issue(hw, mbp)) {
  597. csio_err(hw, "Failed to issue mbox FCoE VNP command\n");
  598. mempool_free(mbp, hw->mb_mempool);
  599. return -EINVAL;
  600. }
  601. return 0;
  602. }
  603. /*
  604. * csio_fcoe_enable_link - Enable fcoe link.
  605. * @ln: lnode
  606. * @enable: enable/disable
  607. * Issued with lock held.
  608. * Issues mbox cmd to bring up FCOE link on port associated with given ln.
  609. */
  610. static int
  611. csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)
  612. {
  613. struct csio_hw *hw = ln->hwp;
  614. struct csio_mb *mbp;
  615. enum fw_retval retval;
  616. uint8_t portid;
  617. uint8_t sub_op;
  618. struct fw_fcoe_link_cmd *lcmd;
  619. int i;
  620. mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
  621. if (!mbp) {
  622. CSIO_INC_STATS(hw, n_err_nomem);
  623. return -ENOMEM;
  624. }
  625. portid = ln->portid;
  626. sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;
  627. csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",
  628. sub_op ? "UP" : "DOWN", portid);
  629. csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
  630. portid, sub_op, 0, 0, 0, NULL);
  631. if (csio_mb_issue(hw, mbp)) {
  632. csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",
  633. portid);
  634. mempool_free(mbp, hw->mb_mempool);
  635. return -EINVAL;
  636. }
  637. retval = csio_mb_fw_retval(mbp);
  638. if (retval != FW_SUCCESS) {
  639. csio_err(hw,
  640. "FCOE LINK %s cmd on port[%d] failed with "
  641. "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);
  642. mempool_free(mbp, hw->mb_mempool);
  643. return -EINVAL;
  644. }
  645. if (!enable)
  646. goto out;
  647. lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;
  648. memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);
  649. memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);
  650. for (i = 0; i < CSIO_MAX_PPORTS; i++)
  651. if (hw->pport[i].portid == portid)
  652. memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);
  653. out:
  654. mempool_free(mbp, hw->mb_mempool);
  655. return 0;
  656. }
  657. /*
  658. * csio_ln_read_fcf_cbfn - Read fcf parameters
  659. * @ln: lnode
  660. *
  661. * read fcf response and Update ln fcf information.
  662. */
  663. static void
  664. csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
  665. {
  666. struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;
  667. struct csio_fcf_info *fcf_info;
  668. struct fw_fcoe_fcf_cmd *rsp =
  669. (struct fw_fcoe_fcf_cmd *)(mbp->mb);
  670. enum fw_retval retval;
  671. retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
  672. if (retval != FW_SUCCESS) {
  673. csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
  674. retval);
  675. mempool_free(mbp, hw->mb_mempool);
  676. return;
  677. }
  678. spin_lock_irq(&hw->lock);
  679. fcf_info = ln->fcfinfo;
  680. fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(
  681. ntohs(rsp->priority_pkd));
  682. fcf_info->vf_id = ntohs(rsp->vf_id);
  683. fcf_info->vlan_id = rsp->vlan_id;
  684. fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);
  685. fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);
  686. fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));
  687. fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);
  688. fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);
  689. fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);
  690. fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);
  691. memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));
  692. memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));
  693. memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));
  694. memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));
  695. memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));
  696. spin_unlock_irq(&hw->lock);
  697. mempool_free(mbp, hw->mb_mempool);
  698. }
  699. /*
  700. * csio_ln_read_fcf_entry - Read fcf entry.
  701. * @ln: lnode
  702. * @cbfn: Completion handler.
  703. *
  704. * Issued with lock held.
  705. */
  706. static int
  707. csio_ln_read_fcf_entry(struct csio_lnode *ln,
  708. void (*cbfn) (struct csio_hw *, struct csio_mb *))
  709. {
  710. struct csio_hw *hw = ln->hwp;
  711. struct csio_mb *mbp;
  712. mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
  713. if (!mbp) {
  714. CSIO_INC_STATS(hw, n_err_nomem);
  715. return -ENOMEM;
  716. }
  717. /* Get FCoE FCF information */
  718. csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
  719. ln->portid, ln->fcf_flowid, cbfn);
  720. if (csio_mb_issue(hw, mbp)) {
  721. csio_err(hw, "failed to issue FCOE FCF cmd\n");
  722. mempool_free(mbp, hw->mb_mempool);
  723. return -EINVAL;
  724. }
  725. return 0;
  726. }
  727. /*
  728. * csio_handle_link_up - Logical Linkup event.
  729. * @hw - HW module.
  730. * @portid - Physical port number
  731. * @fcfi - FCF index.
  732. * @vnpi - VNP index.
  733. * Returns - none.
  734. *
  735. * This event is received from FW, when virtual link is established between
  736. * Physical port[ENode] and FCF. If its new vnpi, then local node object is
  737. * created on this FCF and set to [ONLINE] state.
  738. * Lnode waits for FW_RDEV_CMD event to be received indicating that
  739. * Fabric login is completed and lnode moves to [READY] state.
  740. *
  741. * This called with hw lock held
  742. */
  743. static void
  744. csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
  745. uint32_t vnpi)
  746. {
  747. struct csio_lnode *ln = NULL;
  748. /* Lookup lnode based on vnpi */
  749. ln = csio_ln_lookup_by_vnpi(hw, vnpi);
  750. if (!ln) {
  751. /* Pick lnode based on portid */
  752. ln = csio_ln_lookup_by_portid(hw, portid);
  753. if (!ln) {
  754. csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",
  755. portid);
  756. CSIO_DB_ASSERT(0);
  757. return;
  758. }
  759. /* Check if lnode has valid vnp flowid */
  760. if (ln->vnp_flowid != CSIO_INVALID_IDX) {
  761. /* New VN-Port */
  762. spin_unlock_irq(&hw->lock);
  763. csio_lnode_alloc(hw);
  764. spin_lock_irq(&hw->lock);
  765. if (!ln) {
  766. csio_err(hw,
  767. "failed to allocate fcoe lnode"
  768. "for port:%d vnpi:x%x\n",
  769. portid, vnpi);
  770. CSIO_DB_ASSERT(0);
  771. return;
  772. }
  773. ln->portid = portid;
  774. }
  775. ln->vnp_flowid = vnpi;
  776. ln->dev_num &= ~0xFFFF;
  777. ln->dev_num |= vnpi;
  778. }
  779. /*Initialize fcfi */
  780. ln->fcf_flowid = fcfi;
  781. csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);
  782. CSIO_INC_STATS(ln, n_link_up);
  783. /* Send LINKUP event to SM */
  784. csio_post_event(&ln->sm, CSIO_LNE_LINKUP);
  785. }
  786. /*
  787. * csio_post_event_rns
  788. * @ln - FCOE lnode
  789. * @evt - Given rnode event
  790. * Returns - none
  791. *
  792. * Posts given rnode event to all FCOE rnodes connected with given Lnode.
  793. * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
  794. * event.
  795. *
  796. * This called with hw lock held
  797. */
  798. static void
  799. csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)
  800. {
  801. struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
  802. struct list_head *tmp, *next;
  803. struct csio_rnode *rn;
  804. list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {
  805. rn = (struct csio_rnode *) tmp;
  806. csio_post_event(&rn->sm, evt);
  807. }
  808. }
  809. /*
  810. * csio_cleanup_rns
  811. * @ln - FCOE lnode
  812. * Returns - none
  813. *
  814. * Frees all FCOE rnodes connected with given Lnode.
  815. *
  816. * This called with hw lock held
  817. */
  818. static void
  819. csio_cleanup_rns(struct csio_lnode *ln)
  820. {
  821. struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
  822. struct list_head *tmp, *next_rn;
  823. struct csio_rnode *rn;
  824. list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {
  825. rn = (struct csio_rnode *) tmp;
  826. csio_put_rnode(ln, rn);
  827. }
  828. }
  829. /*
  830. * csio_post_event_lns
  831. * @ln - FCOE lnode
  832. * @evt - Given lnode event
  833. * Returns - none
  834. *
  835. * Posts given lnode event to all FCOE lnodes connected with given Lnode.
  836. * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
  837. * event.
  838. *
  839. * This called with hw lock held
  840. */
  841. static void
  842. csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)
  843. {
  844. struct list_head *tmp;
  845. struct csio_lnode *cln, *sln;
  846. /* If NPIV lnode, send evt only to that and return */
  847. if (csio_is_npiv_ln(ln)) {
  848. csio_post_event(&ln->sm, evt);
  849. return;
  850. }
  851. sln = ln;
  852. /* Traverse children lnodes list and send evt */
  853. list_for_each(tmp, &sln->cln_head) {
  854. cln = (struct csio_lnode *) tmp;
  855. csio_post_event(&cln->sm, evt);
  856. }
  857. /* Send evt to parent lnode */
  858. csio_post_event(&ln->sm, evt);
  859. }
  860. /*
  861. * csio_ln_down - Lcoal nport is down
  862. * @ln - FCOE Lnode
  863. * Returns - none
  864. *
  865. * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.
  866. *
  867. * This called with hw lock held
  868. */
  869. static void
  870. csio_ln_down(struct csio_lnode *ln)
  871. {
  872. csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);
  873. }
  874. /*
  875. * csio_handle_link_down - Logical Linkdown event.
  876. * @hw - HW module.
  877. * @portid - Physical port number
  878. * @fcfi - FCF index.
  879. * @vnpi - VNP index.
  880. * Returns - none
  881. *
  882. * This event is received from FW, when virtual link goes down between
  883. * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on
  884. * this vnpi[VN-Port] will be de-instantiated.
  885. *
  886. * This called with hw lock held
  887. */
  888. static void
  889. csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
  890. uint32_t vnpi)
  891. {
  892. struct csio_fcf_info *fp;
  893. struct csio_lnode *ln;
  894. /* Lookup lnode based on vnpi */
  895. ln = csio_ln_lookup_by_vnpi(hw, vnpi);
  896. if (ln) {
  897. fp = ln->fcfinfo;
  898. CSIO_INC_STATS(ln, n_link_down);
  899. /*Warn if linkdown received if lnode is not in ready state */
  900. if (!csio_is_lnode_ready(ln)) {
  901. csio_ln_warn(ln,
  902. "warn: FCOE link is already in offline "
  903. "Ignoring Fcoe linkdown event on portid %d\n",
  904. portid);
  905. CSIO_INC_STATS(ln, n_evt_drop);
  906. return;
  907. }
  908. /* Verify portid */
  909. if (fp->portid != portid) {
  910. csio_ln_warn(ln,
  911. "warn: FCOE linkdown recv with "
  912. "invalid port %d\n", portid);
  913. CSIO_INC_STATS(ln, n_evt_drop);
  914. return;
  915. }
  916. /* verify fcfi */
  917. if (ln->fcf_flowid != fcfi) {
  918. csio_ln_warn(ln,
  919. "warn: FCOE linkdown recv with "
  920. "invalid fcfi x%x\n", fcfi);
  921. CSIO_INC_STATS(ln, n_evt_drop);
  922. return;
  923. }
  924. csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);
  925. /* Send LINK_DOWN event to lnode s/m */
  926. csio_ln_down(ln);
  927. return;
  928. } else {
  929. csio_warn(hw,
  930. "warn: FCOE linkdown recv with invalid vnpi x%x\n",
  931. vnpi);
  932. CSIO_INC_STATS(hw, n_evt_drop);
  933. }
  934. }
  935. /*
  936. * csio_is_lnode_ready - Checks FCOE lnode is in ready state.
  937. * @ln: Lnode module
  938. *
  939. * Returns True if FCOE lnode is in ready state.
  940. */
  941. int
  942. csio_is_lnode_ready(struct csio_lnode *ln)
  943. {
  944. return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
  945. }
  946. /*****************************************************************************/
  947. /* START: Lnode SM */
  948. /*****************************************************************************/
  949. /*
  950. * csio_lns_uninit - The request in uninit state.
  951. * @ln - FCOE lnode.
  952. * @evt - Event to be processed.
  953. *
  954. * Process the given lnode event which is currently in "uninit" state.
  955. * Invoked with HW lock held.
  956. * Return - none.
  957. */
  958. static void
  959. csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
  960. {
  961. struct csio_hw *hw = csio_lnode_to_hw(ln);
  962. struct csio_lnode *rln = hw->rln;
  963. int rv;
  964. CSIO_INC_STATS(ln, n_evt_sm[evt]);
  965. switch (evt) {
  966. case CSIO_LNE_LINKUP:
  967. csio_set_state(&ln->sm, csio_lns_online);
  968. /* Read FCF only for physical lnode */
  969. if (csio_is_phys_ln(ln)) {
  970. rv = csio_ln_read_fcf_entry(ln,
  971. csio_ln_read_fcf_cbfn);
  972. if (rv != 0) {
  973. /* TODO: Send HW RESET event */
  974. CSIO_INC_STATS(ln, n_err);
  975. break;
  976. }
  977. /* Add FCF record */
  978. list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
  979. }
  980. rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
  981. if (rv != 0) {
  982. /* TODO: Send HW RESET event */
  983. CSIO_INC_STATS(ln, n_err);
  984. }
  985. break;
  986. case CSIO_LNE_DOWN_LINK:
  987. break;
  988. default:
  989. csio_ln_dbg(ln,
  990. "unexp ln event %d recv from did:x%x in "
  991. "ln state[uninit].\n", evt, ln->nport_id);
  992. CSIO_INC_STATS(ln, n_evt_unexp);
  993. break;
  994. } /* switch event */
  995. }
  996. /*
  997. * csio_lns_online - The request in online state.
  998. * @ln - FCOE lnode.
  999. * @evt - Event to be processed.
  1000. *
  1001. * Process the given lnode event which is currently in "online" state.
  1002. * Invoked with HW lock held.
  1003. * Return - none.
  1004. */
  1005. static void
  1006. csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
  1007. {
  1008. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1009. CSIO_INC_STATS(ln, n_evt_sm[evt]);
  1010. switch (evt) {
  1011. case CSIO_LNE_LINKUP:
  1012. csio_ln_warn(ln,
  1013. "warn: FCOE link is up already "
  1014. "Ignoring linkup on port:%d\n", ln->portid);
  1015. CSIO_INC_STATS(ln, n_evt_drop);
  1016. break;
  1017. case CSIO_LNE_FAB_INIT_DONE:
  1018. csio_set_state(&ln->sm, csio_lns_ready);
  1019. spin_unlock_irq(&hw->lock);
  1020. csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);
  1021. spin_lock_irq(&hw->lock);
  1022. break;
  1023. case CSIO_LNE_LINK_DOWN:
  1024. /* Fall through */
  1025. case CSIO_LNE_DOWN_LINK:
  1026. csio_set_state(&ln->sm, csio_lns_uninit);
  1027. if (csio_is_phys_ln(ln)) {
  1028. /* Remove FCF entry */
  1029. list_del_init(&ln->fcfinfo->list);
  1030. }
  1031. break;
  1032. default:
  1033. csio_ln_dbg(ln,
  1034. "unexp ln event %d recv from did:x%x in "
  1035. "ln state[uninit].\n", evt, ln->nport_id);
  1036. CSIO_INC_STATS(ln, n_evt_unexp);
  1037. break;
  1038. } /* switch event */
  1039. }
  1040. /*
  1041. * csio_lns_ready - The request in ready state.
  1042. * @ln - FCOE lnode.
  1043. * @evt - Event to be processed.
  1044. *
  1045. * Process the given lnode event which is currently in "ready" state.
  1046. * Invoked with HW lock held.
  1047. * Return - none.
  1048. */
  1049. static void
  1050. csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
  1051. {
  1052. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1053. CSIO_INC_STATS(ln, n_evt_sm[evt]);
  1054. switch (evt) {
  1055. case CSIO_LNE_FAB_INIT_DONE:
  1056. csio_ln_dbg(ln,
  1057. "ignoring event %d recv from did x%x"
  1058. "in ln state[ready].\n", evt, ln->nport_id);
  1059. CSIO_INC_STATS(ln, n_evt_drop);
  1060. break;
  1061. case CSIO_LNE_LINK_DOWN:
  1062. csio_set_state(&ln->sm, csio_lns_offline);
  1063. csio_post_event_rns(ln, CSIO_RNFE_DOWN);
  1064. spin_unlock_irq(&hw->lock);
  1065. csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
  1066. spin_lock_irq(&hw->lock);
  1067. if (csio_is_phys_ln(ln)) {
  1068. /* Remove FCF entry */
  1069. list_del_init(&ln->fcfinfo->list);
  1070. }
  1071. break;
  1072. case CSIO_LNE_DOWN_LINK:
  1073. csio_set_state(&ln->sm, csio_lns_offline);
  1074. csio_post_event_rns(ln, CSIO_RNFE_DOWN);
  1075. /* Host need to issue aborts in case if FW has not returned
  1076. * WRs with status "ABORTED"
  1077. */
  1078. spin_unlock_irq(&hw->lock);
  1079. csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
  1080. spin_lock_irq(&hw->lock);
  1081. if (csio_is_phys_ln(ln)) {
  1082. /* Remove FCF entry */
  1083. list_del_init(&ln->fcfinfo->list);
  1084. }
  1085. break;
  1086. case CSIO_LNE_CLOSE:
  1087. csio_set_state(&ln->sm, csio_lns_uninit);
  1088. csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
  1089. break;
  1090. case CSIO_LNE_LOGO:
  1091. csio_set_state(&ln->sm, csio_lns_offline);
  1092. csio_post_event_rns(ln, CSIO_RNFE_DOWN);
  1093. break;
  1094. default:
  1095. csio_ln_dbg(ln,
  1096. "unexp ln event %d recv from did:x%x in "
  1097. "ln state[uninit].\n", evt, ln->nport_id);
  1098. CSIO_INC_STATS(ln, n_evt_unexp);
  1099. CSIO_DB_ASSERT(0);
  1100. break;
  1101. } /* switch event */
  1102. }
  1103. /*
  1104. * csio_lns_offline - The request in offline state.
  1105. * @ln - FCOE lnode.
  1106. * @evt - Event to be processed.
  1107. *
  1108. * Process the given lnode event which is currently in "offline" state.
  1109. * Invoked with HW lock held.
  1110. * Return - none.
  1111. */
  1112. static void
  1113. csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
  1114. {
  1115. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1116. struct csio_lnode *rln = hw->rln;
  1117. int rv;
  1118. CSIO_INC_STATS(ln, n_evt_sm[evt]);
  1119. switch (evt) {
  1120. case CSIO_LNE_LINKUP:
  1121. csio_set_state(&ln->sm, csio_lns_online);
  1122. /* Read FCF only for physical lnode */
  1123. if (csio_is_phys_ln(ln)) {
  1124. rv = csio_ln_read_fcf_entry(ln,
  1125. csio_ln_read_fcf_cbfn);
  1126. if (rv != 0) {
  1127. /* TODO: Send HW RESET event */
  1128. CSIO_INC_STATS(ln, n_err);
  1129. break;
  1130. }
  1131. /* Add FCF record */
  1132. list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
  1133. }
  1134. rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
  1135. if (rv != 0) {
  1136. /* TODO: Send HW RESET event */
  1137. CSIO_INC_STATS(ln, n_err);
  1138. }
  1139. break;
  1140. case CSIO_LNE_LINK_DOWN:
  1141. case CSIO_LNE_DOWN_LINK:
  1142. case CSIO_LNE_LOGO:
  1143. csio_ln_dbg(ln,
  1144. "ignoring event %d recv from did x%x"
  1145. "in ln state[offline].\n", evt, ln->nport_id);
  1146. CSIO_INC_STATS(ln, n_evt_drop);
  1147. break;
  1148. case CSIO_LNE_CLOSE:
  1149. csio_set_state(&ln->sm, csio_lns_uninit);
  1150. csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
  1151. break;
  1152. default:
  1153. csio_ln_dbg(ln,
  1154. "unexp ln event %d recv from did:x%x in "
  1155. "ln state[offline]\n", evt, ln->nport_id);
  1156. CSIO_INC_STATS(ln, n_evt_unexp);
  1157. CSIO_DB_ASSERT(0);
  1158. break;
  1159. } /* switch event */
  1160. }
  1161. /*****************************************************************************/
  1162. /* END: Lnode SM */
  1163. /*****************************************************************************/
  1164. static void
  1165. csio_free_fcfinfo(struct kref *kref)
  1166. {
  1167. struct csio_fcf_info *fcfinfo = container_of(kref,
  1168. struct csio_fcf_info, kref);
  1169. kfree(fcfinfo);
  1170. }
  1171. /* Helper routines for attributes */
  1172. /*
  1173. * csio_lnode_state_to_str - Get current state of FCOE lnode.
  1174. * @ln - lnode
  1175. * @str - state of lnode.
  1176. *
  1177. */
  1178. void
  1179. csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
  1180. {
  1181. if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
  1182. strcpy(str, "UNINIT");
  1183. return;
  1184. }
  1185. if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
  1186. strcpy(str, "READY");
  1187. return;
  1188. }
  1189. if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
  1190. strcpy(str, "OFFLINE");
  1191. return;
  1192. }
  1193. strcpy(str, "UNKNOWN");
  1194. } /* csio_lnode_state_to_str */
  1195. int
  1196. csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,
  1197. struct fw_fcoe_port_stats *port_stats)
  1198. {
  1199. struct csio_mb *mbp;
  1200. struct fw_fcoe_port_cmd_params portparams;
  1201. enum fw_retval retval;
  1202. int idx;
  1203. mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
  1204. if (!mbp) {
  1205. csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");
  1206. return -EINVAL;
  1207. }
  1208. portparams.portid = portid;
  1209. for (idx = 1; idx <= 3; idx++) {
  1210. portparams.idx = (idx-1)*6 + 1;
  1211. portparams.nstats = 6;
  1212. if (idx == 3)
  1213. portparams.nstats = 4;
  1214. csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,
  1215. &portparams, NULL);
  1216. if (csio_mb_issue(hw, mbp)) {
  1217. csio_err(hw, "Issue of FCoE port params failed!\n");
  1218. mempool_free(mbp, hw->mb_mempool);
  1219. return -EINVAL;
  1220. }
  1221. csio_mb_process_portparams_rsp(hw, mbp, &retval,
  1222. &portparams, port_stats);
  1223. }
  1224. mempool_free(mbp, hw->mb_mempool);
  1225. return 0;
  1226. }
  1227. /*
  1228. * csio_ln_mgmt_wr_handler -Mgmt Work Request handler.
  1229. * @wr - WR.
  1230. * @len - WR len.
  1231. * This handler is invoked when an outstanding mgmt WR is completed.
  1232. * Its invoked in the context of FW event worker thread for every
  1233. * mgmt event received.
  1234. * Return - none.
  1235. */
  1236. static void
  1237. csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)
  1238. {
  1239. struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
  1240. struct csio_ioreq *io_req = NULL;
  1241. struct fw_fcoe_els_ct_wr *wr_cmd;
  1242. wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;
  1243. if (len < sizeof(struct fw_fcoe_els_ct_wr)) {
  1244. csio_err(mgmtm->hw,
  1245. "Invalid ELS CT WR length recvd, len:%x\n", len);
  1246. mgmtm->stats.n_err++;
  1247. return;
  1248. }
  1249. io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
  1250. io_req->wr_status = csio_wr_status(wr_cmd);
  1251. /* lookup ioreq exists in our active Q */
  1252. spin_lock_irq(&hw->lock);
  1253. if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
  1254. csio_err(mgmtm->hw,
  1255. "Error- Invalid IO handle recv in WR. handle: %p\n",
  1256. io_req);
  1257. mgmtm->stats.n_err++;
  1258. spin_unlock_irq(&hw->lock);
  1259. return;
  1260. }
  1261. mgmtm = csio_hw_to_mgmtm(hw);
  1262. /* Dequeue from active queue */
  1263. list_del_init(&io_req->sm.sm_list);
  1264. mgmtm->stats.n_active--;
  1265. spin_unlock_irq(&hw->lock);
  1266. /* io_req will be freed by completion handler */
  1267. if (io_req->io_cbfn)
  1268. io_req->io_cbfn(hw, io_req);
  1269. }
  1270. /**
  1271. * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.
  1272. * @hw: HW module
  1273. * @cpl_op: CPL opcode
  1274. * @cmd: FW cmd/WR.
  1275. *
  1276. * Process received FCoE cmd/WR event from FW.
  1277. */
  1278. void
  1279. csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
  1280. {
  1281. struct csio_lnode *ln;
  1282. struct csio_rnode *rn;
  1283. uint8_t portid, opcode = *(uint8_t *)cmd;
  1284. struct fw_fcoe_link_cmd *lcmd;
  1285. struct fw_wr_hdr *wr;
  1286. struct fw_rdev_wr *rdev_wr;
  1287. enum fw_fcoe_link_status lstatus;
  1288. uint32_t fcfi, rdev_flowid, vnpi;
  1289. enum csio_ln_ev evt;
  1290. if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {
  1291. lcmd = (struct fw_fcoe_link_cmd *)cmd;
  1292. lstatus = lcmd->lstatus;
  1293. portid = FW_FCOE_LINK_CMD_PORTID_GET(
  1294. ntohl(lcmd->op_to_portid));
  1295. fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));
  1296. vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));
  1297. if (lstatus == FCOE_LINKUP) {
  1298. /* HW lock here */
  1299. spin_lock_irq(&hw->lock);
  1300. csio_handle_link_up(hw, portid, fcfi, vnpi);
  1301. spin_unlock_irq(&hw->lock);
  1302. /* HW un lock here */
  1303. } else if (lstatus == FCOE_LINKDOWN) {
  1304. /* HW lock here */
  1305. spin_lock_irq(&hw->lock);
  1306. csio_handle_link_down(hw, portid, fcfi, vnpi);
  1307. spin_unlock_irq(&hw->lock);
  1308. /* HW un lock here */
  1309. } else {
  1310. csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",
  1311. lcmd->lstatus);
  1312. CSIO_INC_STATS(hw, n_cpl_unexp);
  1313. }
  1314. } else if (cpl_op == CPL_FW6_PLD) {
  1315. wr = (struct fw_wr_hdr *) (cmd + 4);
  1316. if (FW_WR_OP_G(be32_to_cpu(wr->hi))
  1317. == FW_RDEV_WR) {
  1318. rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
  1319. rdev_flowid = FW_RDEV_WR_FLOWID_GET(
  1320. ntohl(rdev_wr->alloc_to_len16));
  1321. vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(
  1322. ntohl(rdev_wr->flags_to_assoc_flowid));
  1323. csio_dbg(hw,
  1324. "FW_RDEV_WR: flowid:x%x ev_cause:x%x "
  1325. "vnpi:0x%x\n", rdev_flowid,
  1326. rdev_wr->event_cause, vnpi);
  1327. if (rdev_wr->protocol != PROT_FCOE) {
  1328. csio_err(hw,
  1329. "FW_RDEV_WR: invalid proto:x%x "
  1330. "received with flowid:x%x\n",
  1331. rdev_wr->protocol,
  1332. rdev_flowid);
  1333. CSIO_INC_STATS(hw, n_evt_drop);
  1334. return;
  1335. }
  1336. /* HW lock here */
  1337. spin_lock_irq(&hw->lock);
  1338. ln = csio_ln_lookup_by_vnpi(hw, vnpi);
  1339. if (!ln) {
  1340. csio_err(hw,
  1341. "FW_DEV_WR: invalid vnpi:x%x received "
  1342. "with flowid:x%x\n", vnpi, rdev_flowid);
  1343. CSIO_INC_STATS(hw, n_evt_drop);
  1344. goto out_pld;
  1345. }
  1346. rn = csio_confirm_rnode(ln, rdev_flowid,
  1347. &rdev_wr->u.fcoe_rdev);
  1348. if (!rn) {
  1349. csio_ln_dbg(ln,
  1350. "Failed to confirm rnode "
  1351. "for flowid:x%x\n", rdev_flowid);
  1352. CSIO_INC_STATS(hw, n_evt_drop);
  1353. goto out_pld;
  1354. }
  1355. /* save previous event for debugging */
  1356. ln->prev_evt = ln->cur_evt;
  1357. ln->cur_evt = rdev_wr->event_cause;
  1358. CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);
  1359. /* Translate all the fabric events to lnode SM events */
  1360. evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);
  1361. if (evt) {
  1362. csio_ln_dbg(ln,
  1363. "Posting event to lnode event:%d "
  1364. "cause:%d flowid:x%x\n", evt,
  1365. rdev_wr->event_cause, rdev_flowid);
  1366. csio_post_event(&ln->sm, evt);
  1367. }
  1368. /* Handover event to rn SM here. */
  1369. csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);
  1370. out_pld:
  1371. spin_unlock_irq(&hw->lock);
  1372. return;
  1373. } else {
  1374. csio_warn(hw, "unexpected WR op(0x%x) recv\n",
  1375. FW_WR_OP_G(be32_to_cpu((wr->hi))));
  1376. CSIO_INC_STATS(hw, n_cpl_unexp);
  1377. }
  1378. } else if (cpl_op == CPL_FW6_MSG) {
  1379. wr = (struct fw_wr_hdr *) (cmd);
  1380. if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
  1381. csio_ln_mgmt_wr_handler(hw, wr,
  1382. sizeof(struct fw_fcoe_els_ct_wr));
  1383. } else {
  1384. csio_warn(hw, "unexpected WR op(0x%x) recv\n",
  1385. FW_WR_OP_G(be32_to_cpu((wr->hi))));
  1386. CSIO_INC_STATS(hw, n_cpl_unexp);
  1387. }
  1388. } else {
  1389. csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);
  1390. CSIO_INC_STATS(hw, n_cpl_unexp);
  1391. }
  1392. }
  1393. /**
  1394. * csio_lnode_start - Kickstart lnode discovery.
  1395. * @ln: lnode
  1396. *
  1397. * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.
  1398. */
  1399. int
  1400. csio_lnode_start(struct csio_lnode *ln)
  1401. {
  1402. int rv = 0;
  1403. if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {
  1404. rv = csio_fcoe_enable_link(ln, 1);
  1405. ln->flags |= CSIO_LNF_LINK_ENABLE;
  1406. }
  1407. return rv;
  1408. }
  1409. /**
  1410. * csio_lnode_stop - Stop the lnode.
  1411. * @ln: lnode
  1412. *
  1413. * This routine is invoked by HW module to stop lnode and its associated NPIV
  1414. * lnodes.
  1415. */
  1416. void
  1417. csio_lnode_stop(struct csio_lnode *ln)
  1418. {
  1419. csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);
  1420. if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {
  1421. csio_fcoe_enable_link(ln, 0);
  1422. ln->flags &= ~CSIO_LNF_LINK_ENABLE;
  1423. }
  1424. csio_ln_dbg(ln, "stopping ln :%p\n", ln);
  1425. }
  1426. /**
  1427. * csio_lnode_close - Close an lnode.
  1428. * @ln: lnode
  1429. *
  1430. * This routine is invoked by HW module to close an lnode and its
  1431. * associated NPIV lnodes. Lnode and its associated NPIV lnodes are
  1432. * set to uninitialized state.
  1433. */
  1434. void
  1435. csio_lnode_close(struct csio_lnode *ln)
  1436. {
  1437. csio_post_event_lns(ln, CSIO_LNE_CLOSE);
  1438. if (csio_is_phys_ln(ln))
  1439. ln->vnp_flowid = CSIO_INVALID_IDX;
  1440. csio_ln_dbg(ln, "closed ln :%p\n", ln);
  1441. }
  1442. /*
  1443. * csio_ln_prep_ecwr - Prepare ELS/CT WR.
  1444. * @io_req - IO request.
  1445. * @wr_len - WR len
  1446. * @immd_len - WR immediate data
  1447. * @sub_op - Sub opcode
  1448. * @sid - source portid.
  1449. * @did - destination portid
  1450. * @flow_id - flowid
  1451. * @fw_wr - ELS/CT WR to be prepared.
  1452. * Returns: 0 - on success
  1453. */
  1454. static int
  1455. csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
  1456. uint32_t immd_len, uint8_t sub_op, uint32_t sid,
  1457. uint32_t did, uint32_t flow_id, uint8_t *fw_wr)
  1458. {
  1459. struct fw_fcoe_els_ct_wr *wr;
  1460. __be32 port_id;
  1461. wr = (struct fw_fcoe_els_ct_wr *)fw_wr;
  1462. wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_FCOE_ELS_CT_WR) |
  1463. FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
  1464. wr_len = DIV_ROUND_UP(wr_len, 16);
  1465. wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(flow_id) |
  1466. FW_WR_LEN16_V(wr_len));
  1467. wr->els_ct_type = sub_op;
  1468. wr->ctl_pri = 0;
  1469. wr->cp_en_class = 0;
  1470. wr->cookie = io_req->fw_handle;
  1471. wr->iqid = cpu_to_be16(csio_q_physiqid(
  1472. io_req->lnode->hwp, io_req->iq_idx));
  1473. wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1);
  1474. wr->tmo_val = (uint8_t) io_req->tmo;
  1475. port_id = htonl(sid);
  1476. memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);
  1477. port_id = htonl(did);
  1478. memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);
  1479. /* Prepare RSP SGL */
  1480. wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);
  1481. wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);
  1482. return 0;
  1483. }
  1484. /*
  1485. * csio_ln_mgmt_submit_wr - Post elsct work request.
  1486. * @mgmtm - mgmtm
  1487. * @io_req - io request.
  1488. * @sub_op - ELS or CT request type
  1489. * @pld - Dma Payload buffer
  1490. * @pld_len - Payload len
  1491. * Prepares ELSCT Work request and sents it to FW.
  1492. * Returns: 0 - on success
  1493. */
  1494. static int
  1495. csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
  1496. uint8_t sub_op, struct csio_dma_buf *pld,
  1497. uint32_t pld_len)
  1498. {
  1499. struct csio_wr_pair wrp;
  1500. struct csio_lnode *ln = io_req->lnode;
  1501. struct csio_rnode *rn = io_req->rnode;
  1502. struct csio_hw *hw = mgmtm->hw;
  1503. uint8_t fw_wr[64];
  1504. struct ulptx_sgl dsgl;
  1505. uint32_t wr_size = 0;
  1506. uint8_t im_len = 0;
  1507. uint32_t wr_off = 0;
  1508. int ret = 0;
  1509. /* Calculate WR Size for this ELS REQ */
  1510. wr_size = sizeof(struct fw_fcoe_els_ct_wr);
  1511. /* Send as immediate data if pld < 256 */
  1512. if (pld_len < 256) {
  1513. wr_size += ALIGN(pld_len, 8);
  1514. im_len = (uint8_t)pld_len;
  1515. } else
  1516. wr_size += sizeof(struct ulptx_sgl);
  1517. /* Roundup WR size in units of 16 bytes */
  1518. wr_size = ALIGN(wr_size, 16);
  1519. /* Get WR to send ELS REQ */
  1520. ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);
  1521. if (ret != 0) {
  1522. csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",
  1523. io_req, ret);
  1524. return ret;
  1525. }
  1526. /* Prepare Generic WR used by all ELS/CT cmd */
  1527. csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,
  1528. ln->nport_id, rn->nport_id,
  1529. csio_rn_flowid(rn),
  1530. &fw_wr[0]);
  1531. /* Copy ELS/CT WR CMD */
  1532. csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,
  1533. sizeof(struct fw_fcoe_els_ct_wr));
  1534. wr_off += sizeof(struct fw_fcoe_els_ct_wr);
  1535. /* Copy payload to Immediate section of WR */
  1536. if (im_len)
  1537. csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
  1538. else {
  1539. /* Program DSGL to dma payload */
  1540. dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
  1541. ULPTX_MORE_F | ULPTX_NSGE_V(1));
  1542. dsgl.len0 = cpu_to_be32(pld_len);
  1543. dsgl.addr0 = cpu_to_be64(pld->paddr);
  1544. csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
  1545. sizeof(struct ulptx_sgl));
  1546. }
  1547. /* Issue work request to xmit ELS/CT req to FW */
  1548. csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);
  1549. return ret;
  1550. }
  1551. /*
  1552. * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.
  1553. * @io_req - IO Request
  1554. * @io_cbfn - Completion handler.
  1555. * @req_type - ELS or CT request type
  1556. * @pld - Dma Payload buffer
  1557. * @pld_len - Payload len
  1558. *
  1559. *
  1560. * This API used submit managment ELS/CT request.
  1561. * This called with hw lock held
  1562. * Returns: 0 - on success
  1563. * -ENOMEM - on error.
  1564. */
  1565. static int
  1566. csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
  1567. void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
  1568. enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,
  1569. uint32_t pld_len)
  1570. {
  1571. struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);
  1572. struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
  1573. int rv;
  1574. io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
  1575. io_req->fw_handle = (uintptr_t) (io_req);
  1576. io_req->eq_idx = mgmtm->eq_idx;
  1577. io_req->iq_idx = mgmtm->iq_idx;
  1578. rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);
  1579. if (rv == 0) {
  1580. list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);
  1581. mgmtm->stats.n_active++;
  1582. }
  1583. return rv;
  1584. }
  1585. /*
  1586. * csio_ln_fdmi_init - FDMI Init entry point.
  1587. * @ln: lnode
  1588. */
  1589. static int
  1590. csio_ln_fdmi_init(struct csio_lnode *ln)
  1591. {
  1592. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1593. struct csio_dma_buf *dma_buf;
  1594. /* Allocate MGMT request required for FDMI */
  1595. ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
  1596. if (!ln->mgmt_req) {
  1597. csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");
  1598. CSIO_INC_STATS(hw, n_err_nomem);
  1599. return -ENOMEM;
  1600. }
  1601. /* Allocate Dma buffers for FDMI response Payload */
  1602. dma_buf = &ln->mgmt_req->dma_buf;
  1603. dma_buf->len = 2048;
  1604. dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
  1605. &dma_buf->paddr);
  1606. if (!dma_buf->vaddr) {
  1607. csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
  1608. kfree(ln->mgmt_req);
  1609. ln->mgmt_req = NULL;
  1610. return -ENOMEM;
  1611. }
  1612. ln->flags |= CSIO_LNF_FDMI_ENABLE;
  1613. return 0;
  1614. }
  1615. /*
  1616. * csio_ln_fdmi_exit - FDMI exit entry point.
  1617. * @ln: lnode
  1618. */
  1619. static int
  1620. csio_ln_fdmi_exit(struct csio_lnode *ln)
  1621. {
  1622. struct csio_dma_buf *dma_buf;
  1623. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1624. if (!ln->mgmt_req)
  1625. return 0;
  1626. dma_buf = &ln->mgmt_req->dma_buf;
  1627. if (dma_buf->vaddr)
  1628. pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
  1629. dma_buf->paddr);
  1630. kfree(ln->mgmt_req);
  1631. return 0;
  1632. }
  1633. int
  1634. csio_scan_done(struct csio_lnode *ln, unsigned long ticks,
  1635. unsigned long time, unsigned long max_scan_ticks,
  1636. unsigned long delta_scan_ticks)
  1637. {
  1638. int rv = 0;
  1639. if (time >= max_scan_ticks)
  1640. return 1;
  1641. if (!ln->tgt_scan_tick)
  1642. ln->tgt_scan_tick = ticks;
  1643. if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {
  1644. if (!ln->last_scan_ntgts)
  1645. ln->last_scan_ntgts = ln->n_scsi_tgts;
  1646. else {
  1647. if (ln->last_scan_ntgts == ln->n_scsi_tgts)
  1648. return 1;
  1649. ln->last_scan_ntgts = ln->n_scsi_tgts;
  1650. }
  1651. ln->tgt_scan_tick = ticks;
  1652. }
  1653. return rv;
  1654. }
  1655. /*
  1656. * csio_notify_lnodes:
  1657. * @hw: HW module
  1658. * @note: Notification
  1659. *
  1660. * Called from the HW SM to fan out notifications to the
  1661. * Lnode SM. Since the HW SM is entered with lock held,
  1662. * there is no need to hold locks here.
  1663. *
  1664. */
  1665. void
  1666. csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)
  1667. {
  1668. struct list_head *tmp;
  1669. struct csio_lnode *ln;
  1670. csio_dbg(hw, "Notifying all nodes of event %d\n", note);
  1671. /* Traverse children lnodes list and send evt */
  1672. list_for_each(tmp, &hw->sln_head) {
  1673. ln = (struct csio_lnode *) tmp;
  1674. switch (note) {
  1675. case CSIO_LN_NOTIFY_HWREADY:
  1676. csio_lnode_start(ln);
  1677. break;
  1678. case CSIO_LN_NOTIFY_HWRESET:
  1679. case CSIO_LN_NOTIFY_HWREMOVE:
  1680. csio_lnode_close(ln);
  1681. break;
  1682. case CSIO_LN_NOTIFY_HWSTOP:
  1683. csio_lnode_stop(ln);
  1684. break;
  1685. default:
  1686. break;
  1687. }
  1688. }
  1689. }
  1690. /*
  1691. * csio_disable_lnodes:
  1692. * @hw: HW module
  1693. * @portid:port id
  1694. * @disable: disable/enable flag.
  1695. * If disable=1, disables all lnode hosted on given physical port.
  1696. * otherwise enables all the lnodes on given phsysical port.
  1697. * This routine need to called with hw lock held.
  1698. */
  1699. void
  1700. csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)
  1701. {
  1702. struct list_head *tmp;
  1703. struct csio_lnode *ln;
  1704. csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);
  1705. /* Traverse sibling lnodes list and send evt */
  1706. list_for_each(tmp, &hw->sln_head) {
  1707. ln = (struct csio_lnode *) tmp;
  1708. if (ln->portid != portid)
  1709. continue;
  1710. if (disable)
  1711. csio_lnode_stop(ln);
  1712. else
  1713. csio_lnode_start(ln);
  1714. }
  1715. }
  1716. /*
  1717. * csio_ln_init - Initialize an lnode.
  1718. * @ln: lnode
  1719. *
  1720. */
  1721. static int
  1722. csio_ln_init(struct csio_lnode *ln)
  1723. {
  1724. int rv = -EINVAL;
  1725. struct csio_lnode *rln, *pln;
  1726. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1727. csio_init_state(&ln->sm, csio_lns_uninit);
  1728. ln->vnp_flowid = CSIO_INVALID_IDX;
  1729. ln->fcf_flowid = CSIO_INVALID_IDX;
  1730. if (csio_is_root_ln(ln)) {
  1731. /* This is the lnode used during initialization */
  1732. ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);
  1733. if (!ln->fcfinfo) {
  1734. csio_ln_err(ln, "Failed to alloc FCF record\n");
  1735. CSIO_INC_STATS(hw, n_err_nomem);
  1736. goto err;
  1737. }
  1738. INIT_LIST_HEAD(&ln->fcf_lsthead);
  1739. kref_init(&ln->fcfinfo->kref);
  1740. if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
  1741. goto err;
  1742. } else { /* Either a non-root physical or a virtual lnode */
  1743. /*
  1744. * THe rest is common for non-root physical and NPIV lnodes.
  1745. * Just get references to all other modules
  1746. */
  1747. rln = csio_root_lnode(ln);
  1748. if (csio_is_npiv_ln(ln)) {
  1749. /* NPIV */
  1750. pln = csio_parent_lnode(ln);
  1751. kref_get(&pln->fcfinfo->kref);
  1752. ln->fcfinfo = pln->fcfinfo;
  1753. } else {
  1754. /* Another non-root physical lnode (FCF) */
  1755. ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),
  1756. GFP_KERNEL);
  1757. if (!ln->fcfinfo) {
  1758. csio_ln_err(ln, "Failed to alloc FCF info\n");
  1759. CSIO_INC_STATS(hw, n_err_nomem);
  1760. goto err;
  1761. }
  1762. kref_init(&ln->fcfinfo->kref);
  1763. if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
  1764. goto err;
  1765. }
  1766. } /* if (!csio_is_root_ln(ln)) */
  1767. return 0;
  1768. err:
  1769. return rv;
  1770. }
  1771. static void
  1772. csio_ln_exit(struct csio_lnode *ln)
  1773. {
  1774. struct csio_lnode *pln;
  1775. csio_cleanup_rns(ln);
  1776. if (csio_is_npiv_ln(ln)) {
  1777. pln = csio_parent_lnode(ln);
  1778. kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);
  1779. } else {
  1780. kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);
  1781. if (csio_fdmi_enable)
  1782. csio_ln_fdmi_exit(ln);
  1783. }
  1784. ln->fcfinfo = NULL;
  1785. }
  1786. /**
  1787. * csio_lnode_init - Initialize the members of an lnode.
  1788. * @ln: lnode
  1789. *
  1790. */
  1791. int
  1792. csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
  1793. struct csio_lnode *pln)
  1794. {
  1795. int rv = -EINVAL;
  1796. /* Link this lnode to hw */
  1797. csio_lnode_to_hw(ln) = hw;
  1798. /* Link child to parent if child lnode */
  1799. if (pln)
  1800. ln->pln = pln;
  1801. else
  1802. ln->pln = NULL;
  1803. /* Initialize scsi_tgt and timers to zero */
  1804. ln->n_scsi_tgts = 0;
  1805. ln->last_scan_ntgts = 0;
  1806. ln->tgt_scan_tick = 0;
  1807. /* Initialize rnode list */
  1808. INIT_LIST_HEAD(&ln->rnhead);
  1809. INIT_LIST_HEAD(&ln->cln_head);
  1810. /* Initialize log level for debug */
  1811. ln->params.log_level = hw->params.log_level;
  1812. if (csio_ln_init(ln))
  1813. goto err;
  1814. /* Add lnode to list of sibling or children lnodes */
  1815. spin_lock_irq(&hw->lock);
  1816. list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);
  1817. if (pln)
  1818. pln->num_vports++;
  1819. spin_unlock_irq(&hw->lock);
  1820. hw->num_lns++;
  1821. return 0;
  1822. err:
  1823. csio_lnode_to_hw(ln) = NULL;
  1824. return rv;
  1825. }
  1826. /**
  1827. * csio_lnode_exit - De-instantiate an lnode.
  1828. * @ln: lnode
  1829. *
  1830. */
  1831. void
  1832. csio_lnode_exit(struct csio_lnode *ln)
  1833. {
  1834. struct csio_hw *hw = csio_lnode_to_hw(ln);
  1835. csio_ln_exit(ln);
  1836. /* Remove this lnode from hw->sln_head */
  1837. spin_lock_irq(&hw->lock);
  1838. list_del_init(&ln->sm.sm_list);
  1839. /* If it is children lnode, decrement the
  1840. * counter in its parent lnode
  1841. */
  1842. if (ln->pln)
  1843. ln->pln->num_vports--;
  1844. /* Update root lnode pointer */
  1845. if (list_empty(&hw->sln_head))
  1846. hw->rln = NULL;
  1847. else
  1848. hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);
  1849. spin_unlock_irq(&hw->lock);
  1850. csio_lnode_to_hw(ln) = NULL;
  1851. hw->num_lns--;
  1852. }