csio_rnode.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922
  1. /*
  2. * This file is part of the Chelsio FCoE driver for Linux.
  3. *
  4. * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/string.h>
  35. #include <scsi/scsi_device.h>
  36. #include <scsi/scsi_transport_fc.h>
  37. #include <scsi/fc/fc_els.h>
  38. #include <scsi/fc/fc_fs.h>
  39. #include "csio_hw.h"
  40. #include "csio_lnode.h"
  41. #include "csio_rnode.h"
  42. static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
  43. static void csio_rnode_exit(struct csio_rnode *);
  44. /* Static machine forward declarations */
  45. static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
  46. static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
  47. static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
  48. static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
  49. /* RNF event mapping */
  50. static enum csio_rn_ev fwevt_to_rnevt[] = {
  51. CSIO_RNFE_NONE, /* None */
  52. CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */
  53. CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */
  54. CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */
  55. CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */
  56. CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */
  57. CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */
  58. CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */
  59. CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */
  60. CSIO_RNFE_NONE, /* NPORT_ID_CHGD */
  61. CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */
  62. CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */
  63. CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */
  64. CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */
  65. CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */
  66. CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */
  67. CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */
  68. CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */
  69. CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */
  70. CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
  71. CSIO_RNFE_NONE, /* PRLI_TMO */
  72. CSIO_RNFE_NONE, /* ADISC_TMO */
  73. CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */
  74. CSIO_RNFE_NONE, /* SCR_ACC_RCVD */
  75. CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */
  76. CSIO_RNFE_NONE, /* LOGO_SNT */
  77. CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */
  78. };
  79. #define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
  80. CSIO_RNFE_NONE : \
  81. fwevt_to_rnevt[_evt])
  82. int
  83. csio_is_rnode_ready(struct csio_rnode *rn)
  84. {
  85. return csio_match_state(rn, csio_rns_ready);
  86. }
  87. static int
  88. csio_is_rnode_uninit(struct csio_rnode *rn)
  89. {
  90. return csio_match_state(rn, csio_rns_uninit);
  91. }
  92. static int
  93. csio_is_rnode_wka(uint8_t rport_type)
  94. {
  95. if ((rport_type == FLOGI_VFPORT) ||
  96. (rport_type == FDISC_VFPORT) ||
  97. (rport_type == NS_VNPORT) ||
  98. (rport_type == FDMI_VNPORT))
  99. return 1;
  100. return 0;
  101. }
  102. /*
  103. * csio_rn_lookup - Finds the rnode with the given flowid
  104. * @ln - lnode
  105. * @flowid - flowid.
  106. *
  107. * Does the rnode lookup on the given lnode and flowid.If no matching entry
  108. * found, NULL is returned.
  109. */
  110. static struct csio_rnode *
  111. csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)
  112. {
  113. struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
  114. struct list_head *tmp;
  115. struct csio_rnode *rn;
  116. list_for_each(tmp, &rnhead->sm.sm_list) {
  117. rn = (struct csio_rnode *) tmp;
  118. if (rn->flowid == flowid)
  119. return rn;
  120. }
  121. return NULL;
  122. }
  123. /*
  124. * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
  125. * @ln: lnode
  126. * @wwpn: wwpn
  127. *
  128. * Does the rnode lookup on the given lnode and wwpn. If no matching entry
  129. * found, NULL is returned.
  130. */
  131. static struct csio_rnode *
  132. csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)
  133. {
  134. struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
  135. struct list_head *tmp;
  136. struct csio_rnode *rn;
  137. list_for_each(tmp, &rnhead->sm.sm_list) {
  138. rn = (struct csio_rnode *) tmp;
  139. if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))
  140. return rn;
  141. }
  142. return NULL;
  143. }
  144. /**
  145. * csio_rnode_lookup_portid - Finds the rnode with the given portid
  146. * @ln: lnode
  147. * @portid: port id
  148. *
  149. * Lookup the rnode list for a given portid. If no matching entry
  150. * found, NULL is returned.
  151. */
  152. struct csio_rnode *
  153. csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)
  154. {
  155. struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
  156. struct list_head *tmp;
  157. struct csio_rnode *rn;
  158. list_for_each(tmp, &rnhead->sm.sm_list) {
  159. rn = (struct csio_rnode *) tmp;
  160. if (rn->nport_id == portid)
  161. return rn;
  162. }
  163. return NULL;
  164. }
  165. static int
  166. csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,
  167. uint32_t *vnp_flowid)
  168. {
  169. struct csio_rnode *rnhead;
  170. struct list_head *tmp, *tmp1;
  171. struct csio_rnode *rn;
  172. struct csio_lnode *ln_tmp;
  173. struct csio_hw *hw = csio_lnode_to_hw(ln);
  174. list_for_each(tmp1, &hw->sln_head) {
  175. ln_tmp = (struct csio_lnode *) tmp1;
  176. if (ln_tmp == ln)
  177. continue;
  178. rnhead = (struct csio_rnode *)&ln_tmp->rnhead;
  179. list_for_each(tmp, &rnhead->sm.sm_list) {
  180. rn = (struct csio_rnode *) tmp;
  181. if (csio_is_rnode_ready(rn)) {
  182. if (rn->flowid == rdev_flowid) {
  183. *vnp_flowid = csio_ln_flowid(ln_tmp);
  184. return 1;
  185. }
  186. }
  187. }
  188. }
  189. return 0;
  190. }
  191. static struct csio_rnode *
  192. csio_alloc_rnode(struct csio_lnode *ln)
  193. {
  194. struct csio_hw *hw = csio_lnode_to_hw(ln);
  195. struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);
  196. if (!rn)
  197. goto err;
  198. memset(rn, 0, sizeof(struct csio_rnode));
  199. if (csio_rnode_init(rn, ln))
  200. goto err_free;
  201. CSIO_INC_STATS(ln, n_rnode_alloc);
  202. return rn;
  203. err_free:
  204. mempool_free(rn, hw->rnode_mempool);
  205. err:
  206. CSIO_INC_STATS(ln, n_rnode_nomem);
  207. return NULL;
  208. }
  209. static void
  210. csio_free_rnode(struct csio_rnode *rn)
  211. {
  212. struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));
  213. csio_rnode_exit(rn);
  214. CSIO_INC_STATS(rn->lnp, n_rnode_free);
  215. mempool_free(rn, hw->rnode_mempool);
  216. }
  217. /*
  218. * csio_get_rnode - Gets rnode with the given flowid
  219. * @ln - lnode
  220. * @flowid - flow id.
  221. *
  222. * Does the rnode lookup on the given lnode and flowid. If no matching
  223. * rnode found, then new rnode with given npid is allocated and returned.
  224. */
  225. static struct csio_rnode *
  226. csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)
  227. {
  228. struct csio_rnode *rn;
  229. rn = csio_rn_lookup(ln, flowid);
  230. if (!rn) {
  231. rn = csio_alloc_rnode(ln);
  232. if (!rn)
  233. return NULL;
  234. rn->flowid = flowid;
  235. }
  236. return rn;
  237. }
  238. /*
  239. * csio_put_rnode - Frees the given rnode
  240. * @ln - lnode
  241. * @flowid - flow id.
  242. *
  243. * Does the rnode lookup on the given lnode and flowid. If no matching
  244. * rnode found, then new rnode with given npid is allocated and returned.
  245. */
  246. void
  247. csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)
  248. {
  249. CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);
  250. csio_free_rnode(rn);
  251. }
  252. /*
  253. * csio_confirm_rnode - confirms rnode based on wwpn.
  254. * @ln: lnode
  255. * @rdev_flowid: remote device flowid
  256. * @rdevp: remote device params
  257. * This routines searches other rnode in list having same wwpn of new rnode.
  258. * If there is a match, then matched rnode is returned and otherwise new rnode
  259. * is returned.
  260. * returns rnode.
  261. */
  262. struct csio_rnode *
  263. csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
  264. struct fcoe_rdev_entry *rdevp)
  265. {
  266. uint8_t rport_type;
  267. struct csio_rnode *rn, *match_rn;
  268. uint32_t vnp_flowid = 0;
  269. __be32 *port_id;
  270. port_id = (__be32 *)&rdevp->r_id[0];
  271. rport_type =
  272. FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
  273. /* Drop rdev event for cntrl port */
  274. if (rport_type == FAB_CTLR_VNPORT) {
  275. csio_ln_dbg(ln,
  276. "Unhandled rport_type:%d recv in rdev evt "
  277. "ssni:x%x\n", rport_type, rdev_flowid);
  278. return NULL;
  279. }
  280. /* Lookup on flowid */
  281. rn = csio_rn_lookup(ln, rdev_flowid);
  282. if (!rn) {
  283. /* Drop events with duplicate flowid */
  284. if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {
  285. csio_ln_warn(ln,
  286. "ssni:%x already active on vnpi:%x",
  287. rdev_flowid, vnp_flowid);
  288. return NULL;
  289. }
  290. /* Lookup on wwpn for NPORTs */
  291. rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
  292. if (!rn)
  293. goto alloc_rnode;
  294. } else {
  295. /* Lookup well-known ports with nport id */
  296. if (csio_is_rnode_wka(rport_type)) {
  297. match_rn = csio_rnode_lookup_portid(ln,
  298. ((ntohl(*port_id) >> 8) & CSIO_DID_MASK));
  299. if (match_rn == NULL) {
  300. csio_rn_flowid(rn) = CSIO_INVALID_IDX;
  301. goto alloc_rnode;
  302. }
  303. /*
  304. * Now compare the wwpn to confirm that
  305. * same port relogged in. If so update the matched rn.
  306. * Else, go ahead and alloc a new rnode.
  307. */
  308. if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
  309. if (rn == match_rn)
  310. goto found_rnode;
  311. csio_ln_dbg(ln,
  312. "nport_id:x%x and wwpn:%llx"
  313. " match for ssni:x%x\n",
  314. rn->nport_id,
  315. wwn_to_u64(rdevp->wwpn),
  316. rdev_flowid);
  317. if (csio_is_rnode_ready(rn)) {
  318. csio_ln_warn(ln,
  319. "rnode is already"
  320. "active ssni:x%x\n",
  321. rdev_flowid);
  322. CSIO_ASSERT(0);
  323. }
  324. csio_rn_flowid(rn) = CSIO_INVALID_IDX;
  325. rn = match_rn;
  326. /* Update rn */
  327. goto found_rnode;
  328. }
  329. csio_rn_flowid(rn) = CSIO_INVALID_IDX;
  330. goto alloc_rnode;
  331. }
  332. /* wwpn match */
  333. if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))
  334. goto found_rnode;
  335. /* Search for rnode that have same wwpn */
  336. match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
  337. if (match_rn != NULL) {
  338. csio_ln_dbg(ln,
  339. "ssni:x%x changed for rport name(wwpn):%llx "
  340. "did:x%x\n", rdev_flowid,
  341. wwn_to_u64(rdevp->wwpn),
  342. match_rn->nport_id);
  343. csio_rn_flowid(rn) = CSIO_INVALID_IDX;
  344. rn = match_rn;
  345. } else {
  346. csio_ln_dbg(ln,
  347. "rnode wwpn mismatch found ssni:x%x "
  348. "name(wwpn):%llx\n",
  349. rdev_flowid,
  350. wwn_to_u64(csio_rn_wwpn(rn)));
  351. if (csio_is_rnode_ready(rn)) {
  352. csio_ln_warn(ln,
  353. "rnode is already active "
  354. "wwpn:%llx ssni:x%x\n",
  355. wwn_to_u64(csio_rn_wwpn(rn)),
  356. rdev_flowid);
  357. CSIO_ASSERT(0);
  358. }
  359. csio_rn_flowid(rn) = CSIO_INVALID_IDX;
  360. goto alloc_rnode;
  361. }
  362. }
  363. found_rnode:
  364. csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
  365. rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
  366. /* Update flowid */
  367. csio_rn_flowid(rn) = rdev_flowid;
  368. /* update rdev entry */
  369. rn->rdev_entry = rdevp;
  370. CSIO_INC_STATS(ln, n_rnode_match);
  371. return rn;
  372. alloc_rnode:
  373. rn = csio_get_rnode(ln, rdev_flowid);
  374. if (!rn)
  375. return NULL;
  376. csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
  377. rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
  378. /* update rdev entry */
  379. rn->rdev_entry = rdevp;
  380. return rn;
  381. }
  382. /*
  383. * csio_rn_verify_rparams - verify rparams.
  384. * @ln: lnode
  385. * @rn: rnode
  386. * @rdevp: remote device params
  387. * returns success if rparams are verified.
  388. */
  389. static int
  390. csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,
  391. struct fcoe_rdev_entry *rdevp)
  392. {
  393. uint8_t null[8];
  394. uint8_t rport_type;
  395. uint8_t fc_class;
  396. __be32 *did;
  397. did = (__be32 *) &rdevp->r_id[0];
  398. rport_type =
  399. FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
  400. switch (rport_type) {
  401. case FLOGI_VFPORT:
  402. rn->role = CSIO_RNFR_FABRIC;
  403. if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {
  404. csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
  405. csio_rn_flowid(rn));
  406. return -EINVAL;
  407. }
  408. /* NPIV support */
  409. if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))
  410. ln->flags |= CSIO_LNF_NPIVSUPP;
  411. break;
  412. case NS_VNPORT:
  413. rn->role = CSIO_RNFR_NS;
  414. if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {
  415. csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
  416. csio_rn_flowid(rn));
  417. return -EINVAL;
  418. }
  419. break;
  420. case REG_FC4_VNPORT:
  421. case REG_VNPORT:
  422. rn->role = CSIO_RNFR_NPORT;
  423. if (rdevp->event_cause == PRLI_ACC_RCVD ||
  424. rdevp->event_cause == PRLI_RCVD) {
  425. if (FW_RDEV_WR_TASK_RETRY_ID_GET(
  426. rdevp->enh_disc_to_tgt))
  427. rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;
  428. if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))
  429. rn->fcp_flags |= FCP_SPPF_RETRY;
  430. if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))
  431. rn->fcp_flags |= FCP_SPPF_CONF_COMPL;
  432. if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))
  433. rn->role |= CSIO_RNFR_TARGET;
  434. if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))
  435. rn->role |= CSIO_RNFR_INITIATOR;
  436. }
  437. break;
  438. case FDMI_VNPORT:
  439. case FAB_CTLR_VNPORT:
  440. rn->role = 0;
  441. break;
  442. default:
  443. csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",
  444. csio_rn_flowid(rn), rport_type);
  445. return -EINVAL;
  446. }
  447. /* validate wwpn/wwnn for Name server/remote port */
  448. if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {
  449. memset(null, 0, 8);
  450. if (!memcmp(rdevp->wwnn, null, 8)) {
  451. csio_ln_err(ln,
  452. "ssni:x%x invalid wwnn received from"
  453. " rport did:x%x\n",
  454. csio_rn_flowid(rn),
  455. (ntohl(*did) & CSIO_DID_MASK));
  456. return -EINVAL;
  457. }
  458. if (!memcmp(rdevp->wwpn, null, 8)) {
  459. csio_ln_err(ln,
  460. "ssni:x%x invalid wwpn received from"
  461. " rport did:x%x\n",
  462. csio_rn_flowid(rn),
  463. (ntohl(*did) & CSIO_DID_MASK));
  464. return -EINVAL;
  465. }
  466. }
  467. /* Copy wwnn, wwpn and nport id */
  468. rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;
  469. memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);
  470. memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);
  471. rn->rn_sparm.csp.sp_bb_data = rdevp->rcv_fr_sz;
  472. fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);
  473. rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);
  474. return 0;
  475. }
  476. static void
  477. __csio_reg_rnode(struct csio_rnode *rn)
  478. {
  479. struct csio_lnode *ln = csio_rnode_to_lnode(rn);
  480. struct csio_hw *hw = csio_lnode_to_hw(ln);
  481. spin_unlock_irq(&hw->lock);
  482. csio_reg_rnode(rn);
  483. spin_lock_irq(&hw->lock);
  484. if (rn->role & CSIO_RNFR_TARGET)
  485. ln->n_scsi_tgts++;
  486. if (rn->nport_id == FC_FID_MGMT_SERV)
  487. csio_ln_fdmi_start(ln, (void *) rn);
  488. }
  489. static void
  490. __csio_unreg_rnode(struct csio_rnode *rn)
  491. {
  492. struct csio_lnode *ln = csio_rnode_to_lnode(rn);
  493. struct csio_hw *hw = csio_lnode_to_hw(ln);
  494. LIST_HEAD(tmp_q);
  495. int cmpl = 0;
  496. if (!list_empty(&rn->host_cmpl_q)) {
  497. csio_dbg(hw, "Returning completion queue I/Os\n");
  498. list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);
  499. cmpl = 1;
  500. }
  501. if (rn->role & CSIO_RNFR_TARGET) {
  502. ln->n_scsi_tgts--;
  503. ln->last_scan_ntgts--;
  504. }
  505. spin_unlock_irq(&hw->lock);
  506. csio_unreg_rnode(rn);
  507. spin_lock_irq(&hw->lock);
  508. /* Cleanup I/Os that were waiting for rnode to unregister */
  509. if (cmpl)
  510. csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);
  511. }
  512. /*****************************************************************************/
  513. /* START: Rnode SM */
  514. /*****************************************************************************/
  515. /*
  516. * csio_rns_uninit -
  517. * @rn - rnode
  518. * @evt - SM event.
  519. *
  520. */
  521. static void
  522. csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
  523. {
  524. struct csio_lnode *ln = csio_rnode_to_lnode(rn);
  525. int ret = 0;
  526. CSIO_INC_STATS(rn, n_evt_sm[evt]);
  527. switch (evt) {
  528. case CSIO_RNFE_LOGGED_IN:
  529. case CSIO_RNFE_PLOGI_RECV:
  530. ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
  531. if (!ret) {
  532. csio_set_state(&rn->sm, csio_rns_ready);
  533. __csio_reg_rnode(rn);
  534. } else {
  535. CSIO_INC_STATS(rn, n_err_inval);
  536. }
  537. break;
  538. case CSIO_RNFE_LOGO_RECV:
  539. csio_ln_dbg(ln,
  540. "ssni:x%x Ignoring event %d recv "
  541. "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
  542. CSIO_INC_STATS(rn, n_evt_drop);
  543. break;
  544. default:
  545. csio_ln_dbg(ln,
  546. "ssni:x%x unexp event %d recv "
  547. "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
  548. CSIO_INC_STATS(rn, n_evt_unexp);
  549. break;
  550. }
  551. }
  552. /*
  553. * csio_rns_ready -
  554. * @rn - rnode
  555. * @evt - SM event.
  556. *
  557. */
  558. static void
  559. csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
  560. {
  561. struct csio_lnode *ln = csio_rnode_to_lnode(rn);
  562. int ret = 0;
  563. CSIO_INC_STATS(rn, n_evt_sm[evt]);
  564. switch (evt) {
  565. case CSIO_RNFE_LOGGED_IN:
  566. case CSIO_RNFE_PLOGI_RECV:
  567. csio_ln_dbg(ln,
  568. "ssni:x%x Ignoring event %d recv from did:x%x "
  569. "in rn state[ready]\n", csio_rn_flowid(rn), evt,
  570. rn->nport_id);
  571. CSIO_INC_STATS(rn, n_evt_drop);
  572. break;
  573. case CSIO_RNFE_PRLI_DONE:
  574. case CSIO_RNFE_PRLI_RECV:
  575. ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
  576. if (!ret)
  577. __csio_reg_rnode(rn);
  578. else
  579. CSIO_INC_STATS(rn, n_err_inval);
  580. break;
  581. case CSIO_RNFE_DOWN:
  582. csio_set_state(&rn->sm, csio_rns_offline);
  583. __csio_unreg_rnode(rn);
  584. /* FW expected to internally aborted outstanding SCSI WRs
  585. * and return all SCSI WRs to host with status "ABORTED".
  586. */
  587. break;
  588. case CSIO_RNFE_LOGO_RECV:
  589. csio_set_state(&rn->sm, csio_rns_offline);
  590. __csio_unreg_rnode(rn);
  591. /* FW expected to internally aborted outstanding SCSI WRs
  592. * and return all SCSI WRs to host with status "ABORTED".
  593. */
  594. break;
  595. case CSIO_RNFE_CLOSE:
  596. /*
  597. * Each rnode receives CLOSE event when driver is removed or
  598. * device is reset
  599. * Note: All outstanding IOs on remote port need to returned
  600. * to uppper layer with appropriate error before sending
  601. * CLOSE event
  602. */
  603. csio_set_state(&rn->sm, csio_rns_uninit);
  604. __csio_unreg_rnode(rn);
  605. break;
  606. case CSIO_RNFE_NAME_MISSING:
  607. csio_set_state(&rn->sm, csio_rns_disappeared);
  608. __csio_unreg_rnode(rn);
  609. /*
  610. * FW expected to internally aborted outstanding SCSI WRs
  611. * and return all SCSI WRs to host with status "ABORTED".
  612. */
  613. break;
  614. default:
  615. csio_ln_dbg(ln,
  616. "ssni:x%x unexp event %d recv from did:x%x "
  617. "in rn state[uninit]\n", csio_rn_flowid(rn), evt,
  618. rn->nport_id);
  619. CSIO_INC_STATS(rn, n_evt_unexp);
  620. break;
  621. }
  622. }
  623. /*
  624. * csio_rns_offline -
  625. * @rn - rnode
  626. * @evt - SM event.
  627. *
  628. */
  629. static void
  630. csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
  631. {
  632. struct csio_lnode *ln = csio_rnode_to_lnode(rn);
  633. int ret = 0;
  634. CSIO_INC_STATS(rn, n_evt_sm[evt]);
  635. switch (evt) {
  636. case CSIO_RNFE_LOGGED_IN:
  637. case CSIO_RNFE_PLOGI_RECV:
  638. ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
  639. if (!ret) {
  640. csio_set_state(&rn->sm, csio_rns_ready);
  641. __csio_reg_rnode(rn);
  642. } else {
  643. CSIO_INC_STATS(rn, n_err_inval);
  644. csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
  645. }
  646. break;
  647. case CSIO_RNFE_DOWN:
  648. csio_ln_dbg(ln,
  649. "ssni:x%x Ignoring event %d recv from did:x%x "
  650. "in rn state[offline]\n", csio_rn_flowid(rn), evt,
  651. rn->nport_id);
  652. CSIO_INC_STATS(rn, n_evt_drop);
  653. break;
  654. case CSIO_RNFE_CLOSE:
  655. /* Each rnode receives CLOSE event when driver is removed or
  656. * device is reset
  657. * Note: All outstanding IOs on remote port need to returned
  658. * to uppper layer with appropriate error before sending
  659. * CLOSE event
  660. */
  661. csio_set_state(&rn->sm, csio_rns_uninit);
  662. break;
  663. case CSIO_RNFE_NAME_MISSING:
  664. csio_set_state(&rn->sm, csio_rns_disappeared);
  665. break;
  666. default:
  667. csio_ln_dbg(ln,
  668. "ssni:x%x unexp event %d recv from did:x%x "
  669. "in rn state[offline]\n", csio_rn_flowid(rn), evt,
  670. rn->nport_id);
  671. CSIO_INC_STATS(rn, n_evt_unexp);
  672. break;
  673. }
  674. }
  675. /*
  676. * csio_rns_disappeared -
  677. * @rn - rnode
  678. * @evt - SM event.
  679. *
  680. */
  681. static void
  682. csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
  683. {
  684. struct csio_lnode *ln = csio_rnode_to_lnode(rn);
  685. int ret = 0;
  686. CSIO_INC_STATS(rn, n_evt_sm[evt]);
  687. switch (evt) {
  688. case CSIO_RNFE_LOGGED_IN:
  689. case CSIO_RNFE_PLOGI_RECV:
  690. ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
  691. if (!ret) {
  692. csio_set_state(&rn->sm, csio_rns_ready);
  693. __csio_reg_rnode(rn);
  694. } else {
  695. CSIO_INC_STATS(rn, n_err_inval);
  696. csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
  697. }
  698. break;
  699. case CSIO_RNFE_CLOSE:
  700. /* Each rnode receives CLOSE event when driver is removed or
  701. * device is reset.
  702. * Note: All outstanding IOs on remote port need to returned
  703. * to uppper layer with appropriate error before sending
  704. * CLOSE event
  705. */
  706. csio_set_state(&rn->sm, csio_rns_uninit);
  707. break;
  708. case CSIO_RNFE_DOWN:
  709. case CSIO_RNFE_NAME_MISSING:
  710. csio_ln_dbg(ln,
  711. "ssni:x%x Ignoring event %d recv from did x%x"
  712. "in rn state[disappeared]\n", csio_rn_flowid(rn),
  713. evt, rn->nport_id);
  714. break;
  715. default:
  716. csio_ln_dbg(ln,
  717. "ssni:x%x unexp event %d recv from did x%x"
  718. "in rn state[disappeared]\n", csio_rn_flowid(rn),
  719. evt, rn->nport_id);
  720. CSIO_INC_STATS(rn, n_evt_unexp);
  721. break;
  722. }
  723. }
  724. /*****************************************************************************/
  725. /* END: Rnode SM */
  726. /*****************************************************************************/
  727. /*
  728. * csio_rnode_devloss_handler - Device loss event handler
  729. * @rn: rnode
  730. *
  731. * Post event to close rnode SM and free rnode.
  732. */
  733. void
  734. csio_rnode_devloss_handler(struct csio_rnode *rn)
  735. {
  736. struct csio_lnode *ln = csio_rnode_to_lnode(rn);
  737. /* ignore if same rnode came back as online */
  738. if (csio_is_rnode_ready(rn))
  739. return;
  740. csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
  741. /* Free rn if in uninit state */
  742. if (csio_is_rnode_uninit(rn))
  743. csio_put_rnode(ln, rn);
  744. }
  745. /**
  746. * csio_rnode_fwevt_handler - Event handler for firmware rnode events.
  747. * @rn: rnode
  748. *
  749. */
  750. void
  751. csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
  752. {
  753. struct csio_lnode *ln = csio_rnode_to_lnode(rn);
  754. enum csio_rn_ev evt;
  755. evt = CSIO_FWE_TO_RNFE(fwevt);
  756. if (!evt) {
  757. csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",
  758. csio_rn_flowid(rn), fwevt);
  759. CSIO_INC_STATS(rn, n_evt_unexp);
  760. return;
  761. }
  762. CSIO_INC_STATS(rn, n_evt_fw[fwevt]);
  763. /* Track previous & current events for debugging */
  764. rn->prev_evt = rn->cur_evt;
  765. rn->cur_evt = fwevt;
  766. /* Post event to rnode SM */
  767. csio_post_event(&rn->sm, evt);
  768. /* Free rn if in uninit state */
  769. if (csio_is_rnode_uninit(rn))
  770. csio_put_rnode(ln, rn);
  771. }
  772. /*
  773. * csio_rnode_init - Initialize rnode.
  774. * @rn: RNode
  775. * @ln: Associated lnode
  776. *
  777. * Caller is responsible for holding the lock. The lock is required
  778. * to be held for inserting the rnode in ln->rnhead list.
  779. */
  780. static int
  781. csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)
  782. {
  783. csio_rnode_to_lnode(rn) = ln;
  784. csio_init_state(&rn->sm, csio_rns_uninit);
  785. INIT_LIST_HEAD(&rn->host_cmpl_q);
  786. csio_rn_flowid(rn) = CSIO_INVALID_IDX;
  787. /* Add rnode to list of lnodes->rnhead */
  788. list_add_tail(&rn->sm.sm_list, &ln->rnhead);
  789. return 0;
  790. }
  791. static void
  792. csio_rnode_exit(struct csio_rnode *rn)
  793. {
  794. list_del_init(&rn->sm.sm_list);
  795. CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));
  796. }