bnx2fc_els.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927
  1. /*
  2. * bnx2fc_els.c: QLogic Linux FCoE offload driver.
  3. * This file contains helper routines that handle ELS requests
  4. * and responses.
  5. *
  6. * Copyright (c) 2008-2013 Broadcom Corporation
  7. * Copyright (c) 2014-2016 QLogic Corporation
  8. * Copyright (c) 2016-2017 Cavium Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation.
  13. *
  14. * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
  15. */
  16. #include "bnx2fc.h"
  17. static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
  18. void *arg);
  19. static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
  20. void *arg);
  21. static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
  22. void *data, u32 data_len,
  23. void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
  24. struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
  25. static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
  26. {
  27. struct bnx2fc_cmd *orig_io_req;
  28. struct bnx2fc_cmd *rrq_req;
  29. int rc = 0;
  30. BUG_ON(!cb_arg);
  31. rrq_req = cb_arg->io_req;
  32. orig_io_req = cb_arg->aborted_io_req;
  33. BUG_ON(!orig_io_req);
  34. BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
  35. orig_io_req->xid, rrq_req->xid);
  36. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  37. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
  38. /*
  39. * els req is timed out. cleanup the IO with FW and
  40. * drop the completion. Remove from active_cmd_queue.
  41. */
  42. BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
  43. rrq_req->xid);
  44. if (rrq_req->on_active_queue) {
  45. list_del_init(&rrq_req->link);
  46. rrq_req->on_active_queue = 0;
  47. rc = bnx2fc_initiate_cleanup(rrq_req);
  48. BUG_ON(rc);
  49. }
  50. }
  51. kfree(cb_arg);
  52. }
  53. int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
  54. {
  55. struct fc_els_rrq rrq;
  56. struct bnx2fc_rport *tgt = aborted_io_req->tgt;
  57. struct fc_lport *lport = NULL;
  58. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  59. u32 sid = 0;
  60. u32 r_a_tov = 0;
  61. unsigned long start = jiffies;
  62. int rc;
  63. if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))
  64. return -EINVAL;
  65. lport = tgt->rdata->local_port;
  66. sid = tgt->sid;
  67. r_a_tov = lport->r_a_tov;
  68. BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
  69. aborted_io_req->xid);
  70. memset(&rrq, 0, sizeof(rrq));
  71. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
  72. if (!cb_arg) {
  73. printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
  74. rc = -ENOMEM;
  75. goto rrq_err;
  76. }
  77. cb_arg->aborted_io_req = aborted_io_req;
  78. rrq.rrq_cmd = ELS_RRQ;
  79. hton24(rrq.rrq_s_id, sid);
  80. rrq.rrq_ox_id = htons(aborted_io_req->xid);
  81. rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
  82. retry_rrq:
  83. rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
  84. bnx2fc_rrq_compl, cb_arg,
  85. r_a_tov);
  86. if (rc == -ENOMEM) {
  87. if (time_after(jiffies, start + (10 * HZ))) {
  88. BNX2FC_ELS_DBG("rrq Failed\n");
  89. rc = FAILED;
  90. goto rrq_err;
  91. }
  92. msleep(20);
  93. goto retry_rrq;
  94. }
  95. rrq_err:
  96. if (rc) {
  97. BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
  98. aborted_io_req->xid);
  99. kfree(cb_arg);
  100. spin_lock_bh(&tgt->tgt_lock);
  101. kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
  102. spin_unlock_bh(&tgt->tgt_lock);
  103. }
  104. return rc;
  105. }
  106. static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
  107. {
  108. struct bnx2fc_cmd *els_req;
  109. struct bnx2fc_rport *tgt;
  110. struct bnx2fc_mp_req *mp_req;
  111. struct fc_frame_header *fc_hdr;
  112. unsigned char *buf;
  113. void *resp_buf;
  114. u32 resp_len, hdr_len;
  115. u16 l2_oxid;
  116. int frame_len;
  117. int rc = 0;
  118. l2_oxid = cb_arg->l2_oxid;
  119. BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
  120. els_req = cb_arg->io_req;
  121. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
  122. /*
  123. * els req is timed out. cleanup the IO with FW and
  124. * drop the completion. libfc will handle the els timeout
  125. */
  126. if (els_req->on_active_queue) {
  127. list_del_init(&els_req->link);
  128. els_req->on_active_queue = 0;
  129. rc = bnx2fc_initiate_cleanup(els_req);
  130. BUG_ON(rc);
  131. }
  132. goto free_arg;
  133. }
  134. tgt = els_req->tgt;
  135. mp_req = &(els_req->mp_req);
  136. fc_hdr = &(mp_req->resp_fc_hdr);
  137. resp_len = mp_req->resp_len;
  138. resp_buf = mp_req->resp_buf;
  139. buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  140. if (!buf) {
  141. printk(KERN_ERR PFX "Unable to alloc mp buf\n");
  142. goto free_arg;
  143. }
  144. hdr_len = sizeof(*fc_hdr);
  145. if (hdr_len + resp_len > PAGE_SIZE) {
  146. printk(KERN_ERR PFX "l2_els_compl: resp len is "
  147. "beyond page size\n");
  148. goto free_buf;
  149. }
  150. memcpy(buf, fc_hdr, hdr_len);
  151. memcpy(buf + hdr_len, resp_buf, resp_len);
  152. frame_len = hdr_len + resp_len;
  153. bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
  154. free_buf:
  155. kfree(buf);
  156. free_arg:
  157. kfree(cb_arg);
  158. }
  159. int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
  160. {
  161. struct fc_els_adisc *adisc;
  162. struct fc_frame_header *fh;
  163. struct bnx2fc_els_cb_arg *cb_arg;
  164. struct fc_lport *lport = tgt->rdata->local_port;
  165. u32 r_a_tov = lport->r_a_tov;
  166. int rc;
  167. fh = fc_frame_header_get(fp);
  168. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  169. if (!cb_arg) {
  170. printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
  171. return -ENOMEM;
  172. }
  173. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  174. BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
  175. adisc = fc_frame_payload_get(fp, sizeof(*adisc));
  176. /* adisc is initialized by libfc */
  177. rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
  178. bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
  179. if (rc)
  180. kfree(cb_arg);
  181. return rc;
  182. }
  183. int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
  184. {
  185. struct fc_els_logo *logo;
  186. struct fc_frame_header *fh;
  187. struct bnx2fc_els_cb_arg *cb_arg;
  188. struct fc_lport *lport = tgt->rdata->local_port;
  189. u32 r_a_tov = lport->r_a_tov;
  190. int rc;
  191. fh = fc_frame_header_get(fp);
  192. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  193. if (!cb_arg) {
  194. printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
  195. return -ENOMEM;
  196. }
  197. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  198. BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
  199. logo = fc_frame_payload_get(fp, sizeof(*logo));
  200. /* logo is initialized by libfc */
  201. rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
  202. bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
  203. if (rc)
  204. kfree(cb_arg);
  205. return rc;
  206. }
  207. int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
  208. {
  209. struct fc_els_rls *rls;
  210. struct fc_frame_header *fh;
  211. struct bnx2fc_els_cb_arg *cb_arg;
  212. struct fc_lport *lport = tgt->rdata->local_port;
  213. u32 r_a_tov = lport->r_a_tov;
  214. int rc;
  215. fh = fc_frame_header_get(fp);
  216. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  217. if (!cb_arg) {
  218. printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
  219. return -ENOMEM;
  220. }
  221. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  222. rls = fc_frame_payload_get(fp, sizeof(*rls));
  223. /* rls is initialized by libfc */
  224. rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
  225. bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
  226. if (rc)
  227. kfree(cb_arg);
  228. return rc;
  229. }
  230. static void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
  231. {
  232. struct bnx2fc_mp_req *mp_req;
  233. struct fc_frame_header *fc_hdr, *fh;
  234. struct bnx2fc_cmd *srr_req;
  235. struct bnx2fc_cmd *orig_io_req;
  236. struct fc_frame *fp;
  237. unsigned char *buf;
  238. void *resp_buf;
  239. u32 resp_len, hdr_len;
  240. u8 opcode;
  241. int rc = 0;
  242. orig_io_req = cb_arg->aborted_io_req;
  243. srr_req = cb_arg->io_req;
  244. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
  245. /* SRR timedout */
  246. BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
  247. "orig_io - 0x%x\n",
  248. orig_io_req->xid);
  249. rc = bnx2fc_initiate_abts(srr_req);
  250. if (rc != SUCCESS) {
  251. BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
  252. "failed. issue cleanup\n");
  253. bnx2fc_initiate_cleanup(srr_req);
  254. }
  255. if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
  256. test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
  257. BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
  258. orig_io_req->xid, orig_io_req->req_flags);
  259. goto srr_compl_done;
  260. }
  261. orig_io_req->srr_retry++;
  262. if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
  263. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  264. spin_unlock_bh(&tgt->tgt_lock);
  265. rc = bnx2fc_send_srr(orig_io_req,
  266. orig_io_req->srr_offset,
  267. orig_io_req->srr_rctl);
  268. spin_lock_bh(&tgt->tgt_lock);
  269. if (!rc)
  270. goto srr_compl_done;
  271. }
  272. rc = bnx2fc_initiate_abts(orig_io_req);
  273. if (rc != SUCCESS) {
  274. BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
  275. "failed xid = 0x%x. issue cleanup\n",
  276. orig_io_req->xid);
  277. bnx2fc_initiate_cleanup(orig_io_req);
  278. }
  279. goto srr_compl_done;
  280. }
  281. if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
  282. test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
  283. BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
  284. orig_io_req->xid, orig_io_req->req_flags);
  285. goto srr_compl_done;
  286. }
  287. mp_req = &(srr_req->mp_req);
  288. fc_hdr = &(mp_req->resp_fc_hdr);
  289. resp_len = mp_req->resp_len;
  290. resp_buf = mp_req->resp_buf;
  291. hdr_len = sizeof(*fc_hdr);
  292. buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  293. if (!buf) {
  294. printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
  295. goto srr_compl_done;
  296. }
  297. memcpy(buf, fc_hdr, hdr_len);
  298. memcpy(buf + hdr_len, resp_buf, resp_len);
  299. fp = fc_frame_alloc(NULL, resp_len);
  300. if (!fp) {
  301. printk(KERN_ERR PFX "fc_frame_alloc failure\n");
  302. goto free_buf;
  303. }
  304. fh = (struct fc_frame_header *) fc_frame_header_get(fp);
  305. /* Copy FC Frame header and payload into the frame */
  306. memcpy(fh, buf, hdr_len + resp_len);
  307. opcode = fc_frame_payload_op(fp);
  308. switch (opcode) {
  309. case ELS_LS_ACC:
  310. BNX2FC_IO_DBG(srr_req, "SRR success\n");
  311. break;
  312. case ELS_LS_RJT:
  313. BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
  314. rc = bnx2fc_initiate_abts(orig_io_req);
  315. if (rc != SUCCESS) {
  316. BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
  317. "failed xid = 0x%x. issue cleanup\n",
  318. orig_io_req->xid);
  319. bnx2fc_initiate_cleanup(orig_io_req);
  320. }
  321. break;
  322. default:
  323. BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
  324. opcode);
  325. break;
  326. }
  327. fc_frame_free(fp);
  328. free_buf:
  329. kfree(buf);
  330. srr_compl_done:
  331. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  332. }
  333. static void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
  334. {
  335. struct bnx2fc_cmd *orig_io_req, *new_io_req;
  336. struct bnx2fc_cmd *rec_req;
  337. struct bnx2fc_mp_req *mp_req;
  338. struct fc_frame_header *fc_hdr, *fh;
  339. struct fc_els_ls_rjt *rjt;
  340. struct fc_els_rec_acc *acc;
  341. struct bnx2fc_rport *tgt;
  342. struct fcoe_err_report_entry *err_entry;
  343. struct scsi_cmnd *sc_cmd;
  344. enum fc_rctl r_ctl;
  345. unsigned char *buf;
  346. void *resp_buf;
  347. struct fc_frame *fp;
  348. u8 opcode;
  349. u32 offset;
  350. u32 e_stat;
  351. u32 resp_len, hdr_len;
  352. int rc = 0;
  353. bool send_seq_clnp = false;
  354. bool abort_io = false;
  355. BNX2FC_MISC_DBG("Entered rec_compl callback\n");
  356. rec_req = cb_arg->io_req;
  357. orig_io_req = cb_arg->aborted_io_req;
  358. BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
  359. tgt = orig_io_req->tgt;
  360. /* Handle REC timeout case */
  361. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
  362. BNX2FC_IO_DBG(rec_req, "timed out, abort "
  363. "orig_io - 0x%x\n",
  364. orig_io_req->xid);
  365. /* els req is timed out. send abts for els */
  366. rc = bnx2fc_initiate_abts(rec_req);
  367. if (rc != SUCCESS) {
  368. BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
  369. "failed. issue cleanup\n");
  370. bnx2fc_initiate_cleanup(rec_req);
  371. }
  372. orig_io_req->rec_retry++;
  373. /* REC timedout. send ABTS to the orig IO req */
  374. if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
  375. spin_unlock_bh(&tgt->tgt_lock);
  376. rc = bnx2fc_send_rec(orig_io_req);
  377. spin_lock_bh(&tgt->tgt_lock);
  378. if (!rc)
  379. goto rec_compl_done;
  380. }
  381. rc = bnx2fc_initiate_abts(orig_io_req);
  382. if (rc != SUCCESS) {
  383. BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
  384. "failed xid = 0x%x. issue cleanup\n",
  385. orig_io_req->xid);
  386. bnx2fc_initiate_cleanup(orig_io_req);
  387. }
  388. goto rec_compl_done;
  389. }
  390. if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
  391. BNX2FC_IO_DBG(rec_req, "completed"
  392. "orig_io - 0x%x\n",
  393. orig_io_req->xid);
  394. goto rec_compl_done;
  395. }
  396. if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
  397. BNX2FC_IO_DBG(rec_req, "abts in prog "
  398. "orig_io - 0x%x\n",
  399. orig_io_req->xid);
  400. goto rec_compl_done;
  401. }
  402. mp_req = &(rec_req->mp_req);
  403. fc_hdr = &(mp_req->resp_fc_hdr);
  404. resp_len = mp_req->resp_len;
  405. acc = resp_buf = mp_req->resp_buf;
  406. hdr_len = sizeof(*fc_hdr);
  407. buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  408. if (!buf) {
  409. printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
  410. goto rec_compl_done;
  411. }
  412. memcpy(buf, fc_hdr, hdr_len);
  413. memcpy(buf + hdr_len, resp_buf, resp_len);
  414. fp = fc_frame_alloc(NULL, resp_len);
  415. if (!fp) {
  416. printk(KERN_ERR PFX "fc_frame_alloc failure\n");
  417. goto free_buf;
  418. }
  419. fh = (struct fc_frame_header *) fc_frame_header_get(fp);
  420. /* Copy FC Frame header and payload into the frame */
  421. memcpy(fh, buf, hdr_len + resp_len);
  422. opcode = fc_frame_payload_op(fp);
  423. if (opcode == ELS_LS_RJT) {
  424. BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
  425. rjt = fc_frame_payload_get(fp, sizeof(*rjt));
  426. if ((rjt->er_reason == ELS_RJT_LOGIC ||
  427. rjt->er_reason == ELS_RJT_UNAB) &&
  428. rjt->er_explan == ELS_EXPL_OXID_RXID) {
  429. BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
  430. new_io_req = bnx2fc_cmd_alloc(tgt);
  431. if (!new_io_req)
  432. goto abort_io;
  433. new_io_req->sc_cmd = orig_io_req->sc_cmd;
  434. /* cleanup orig_io_req that is with the FW */
  435. set_bit(BNX2FC_FLAG_CMD_LOST,
  436. &orig_io_req->req_flags);
  437. bnx2fc_initiate_cleanup(orig_io_req);
  438. /* Post a new IO req with the same sc_cmd */
  439. BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
  440. rc = bnx2fc_post_io_req(tgt, new_io_req);
  441. if (!rc)
  442. goto free_frame;
  443. BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
  444. }
  445. abort_io:
  446. rc = bnx2fc_initiate_abts(orig_io_req);
  447. if (rc != SUCCESS) {
  448. BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
  449. "failed. issue cleanup\n");
  450. bnx2fc_initiate_cleanup(orig_io_req);
  451. }
  452. } else if (opcode == ELS_LS_ACC) {
  453. /* REVISIT: Check if the exchange is already aborted */
  454. offset = ntohl(acc->reca_fc4value);
  455. e_stat = ntohl(acc->reca_e_stat);
  456. if (e_stat & ESB_ST_SEQ_INIT) {
  457. BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
  458. goto free_frame;
  459. }
  460. BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
  461. e_stat, offset);
  462. /* Seq initiative is with us */
  463. err_entry = (struct fcoe_err_report_entry *)
  464. &orig_io_req->err_entry;
  465. sc_cmd = orig_io_req->sc_cmd;
  466. if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
  467. /* SCSI WRITE command */
  468. if (offset == orig_io_req->data_xfer_len) {
  469. BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
  470. /* FCP_RSP lost */
  471. r_ctl = FC_RCTL_DD_CMD_STATUS;
  472. offset = 0;
  473. } else {
  474. /* start transmitting from offset */
  475. BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
  476. send_seq_clnp = true;
  477. r_ctl = FC_RCTL_DD_DATA_DESC;
  478. if (bnx2fc_initiate_seq_cleanup(orig_io_req,
  479. offset, r_ctl))
  480. abort_io = true;
  481. /* XFER_RDY */
  482. }
  483. } else {
  484. /* SCSI READ command */
  485. if (err_entry->data.rx_buf_off ==
  486. orig_io_req->data_xfer_len) {
  487. /* FCP_RSP lost */
  488. BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
  489. r_ctl = FC_RCTL_DD_CMD_STATUS;
  490. offset = 0;
  491. } else {
  492. /* request retransmission from this offset */
  493. send_seq_clnp = true;
  494. offset = err_entry->data.rx_buf_off;
  495. BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
  496. /* FCP_DATA lost */
  497. r_ctl = FC_RCTL_DD_SOL_DATA;
  498. if (bnx2fc_initiate_seq_cleanup(orig_io_req,
  499. offset, r_ctl))
  500. abort_io = true;
  501. }
  502. }
  503. if (abort_io) {
  504. rc = bnx2fc_initiate_abts(orig_io_req);
  505. if (rc != SUCCESS) {
  506. BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
  507. " failed. issue cleanup\n");
  508. bnx2fc_initiate_cleanup(orig_io_req);
  509. }
  510. } else if (!send_seq_clnp) {
  511. BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
  512. spin_unlock_bh(&tgt->tgt_lock);
  513. rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
  514. spin_lock_bh(&tgt->tgt_lock);
  515. if (rc) {
  516. BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
  517. " IO will abort\n");
  518. }
  519. }
  520. }
  521. free_frame:
  522. fc_frame_free(fp);
  523. free_buf:
  524. kfree(buf);
  525. rec_compl_done:
  526. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  527. kfree(cb_arg);
  528. }
  529. int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
  530. {
  531. struct fc_els_rec rec;
  532. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  533. struct fc_lport *lport = tgt->rdata->local_port;
  534. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  535. u32 sid = tgt->sid;
  536. u32 r_a_tov = lport->r_a_tov;
  537. int rc;
  538. BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
  539. memset(&rec, 0, sizeof(rec));
  540. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  541. if (!cb_arg) {
  542. printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
  543. rc = -ENOMEM;
  544. goto rec_err;
  545. }
  546. kref_get(&orig_io_req->refcount);
  547. cb_arg->aborted_io_req = orig_io_req;
  548. rec.rec_cmd = ELS_REC;
  549. hton24(rec.rec_s_id, sid);
  550. rec.rec_ox_id = htons(orig_io_req->xid);
  551. rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
  552. rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
  553. bnx2fc_rec_compl, cb_arg,
  554. r_a_tov);
  555. rec_err:
  556. if (rc) {
  557. BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
  558. spin_lock_bh(&tgt->tgt_lock);
  559. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  560. spin_unlock_bh(&tgt->tgt_lock);
  561. kfree(cb_arg);
  562. }
  563. return rc;
  564. }
  565. int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
  566. {
  567. struct fcp_srr srr;
  568. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  569. struct fc_lport *lport = tgt->rdata->local_port;
  570. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  571. u32 r_a_tov = lport->r_a_tov;
  572. int rc;
  573. BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
  574. memset(&srr, 0, sizeof(srr));
  575. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  576. if (!cb_arg) {
  577. printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
  578. rc = -ENOMEM;
  579. goto srr_err;
  580. }
  581. kref_get(&orig_io_req->refcount);
  582. cb_arg->aborted_io_req = orig_io_req;
  583. srr.srr_op = ELS_SRR;
  584. srr.srr_ox_id = htons(orig_io_req->xid);
  585. srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
  586. srr.srr_rel_off = htonl(offset);
  587. srr.srr_r_ctl = r_ctl;
  588. orig_io_req->srr_offset = offset;
  589. orig_io_req->srr_rctl = r_ctl;
  590. rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
  591. bnx2fc_srr_compl, cb_arg,
  592. r_a_tov);
  593. srr_err:
  594. if (rc) {
  595. BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
  596. spin_lock_bh(&tgt->tgt_lock);
  597. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  598. spin_unlock_bh(&tgt->tgt_lock);
  599. kfree(cb_arg);
  600. } else
  601. set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
  602. return rc;
  603. }
  604. static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
  605. void *data, u32 data_len,
  606. void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
  607. struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
  608. {
  609. struct fcoe_port *port = tgt->port;
  610. struct bnx2fc_interface *interface = port->priv;
  611. struct fc_rport *rport = tgt->rport;
  612. struct fc_lport *lport = port->lport;
  613. struct bnx2fc_cmd *els_req;
  614. struct bnx2fc_mp_req *mp_req;
  615. struct fc_frame_header *fc_hdr;
  616. struct fcoe_task_ctx_entry *task;
  617. struct fcoe_task_ctx_entry *task_page;
  618. int rc = 0;
  619. int task_idx, index;
  620. u32 did, sid;
  621. u16 xid;
  622. rc = fc_remote_port_chkready(rport);
  623. if (rc) {
  624. printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
  625. rc = -EINVAL;
  626. goto els_err;
  627. }
  628. if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
  629. printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
  630. rc = -EINVAL;
  631. goto els_err;
  632. }
  633. if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
  634. printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
  635. rc = -EINVAL;
  636. goto els_err;
  637. }
  638. els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
  639. if (!els_req) {
  640. rc = -ENOMEM;
  641. goto els_err;
  642. }
  643. els_req->sc_cmd = NULL;
  644. els_req->port = port;
  645. els_req->tgt = tgt;
  646. els_req->cb_func = cb_func;
  647. cb_arg->io_req = els_req;
  648. els_req->cb_arg = cb_arg;
  649. els_req->data_xfer_len = data_len;
  650. mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
  651. rc = bnx2fc_init_mp_req(els_req);
  652. if (rc == FAILED) {
  653. printk(KERN_ERR PFX "ELS MP request init failed\n");
  654. spin_lock_bh(&tgt->tgt_lock);
  655. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  656. spin_unlock_bh(&tgt->tgt_lock);
  657. rc = -ENOMEM;
  658. goto els_err;
  659. } else {
  660. /* rc SUCCESS */
  661. rc = 0;
  662. }
  663. /* Set the data_xfer_len to the size of ELS payload */
  664. mp_req->req_len = data_len;
  665. els_req->data_xfer_len = mp_req->req_len;
  666. /* Fill ELS Payload */
  667. if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
  668. memcpy(mp_req->req_buf, data, data_len);
  669. } else {
  670. printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
  671. els_req->cb_func = NULL;
  672. els_req->cb_arg = NULL;
  673. spin_lock_bh(&tgt->tgt_lock);
  674. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  675. spin_unlock_bh(&tgt->tgt_lock);
  676. rc = -EINVAL;
  677. }
  678. if (rc)
  679. goto els_err;
  680. /* Fill FC header */
  681. fc_hdr = &(mp_req->req_fc_hdr);
  682. did = tgt->rport->port_id;
  683. sid = tgt->sid;
  684. if (op == ELS_SRR)
  685. __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
  686. FC_TYPE_FCP, FC_FC_FIRST_SEQ |
  687. FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  688. else
  689. __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
  690. FC_TYPE_ELS, FC_FC_FIRST_SEQ |
  691. FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  692. /* Obtain exchange id */
  693. xid = els_req->xid;
  694. task_idx = xid/BNX2FC_TASKS_PER_PAGE;
  695. index = xid % BNX2FC_TASKS_PER_PAGE;
  696. /* Initialize task context for this IO request */
  697. task_page = (struct fcoe_task_ctx_entry *)
  698. interface->hba->task_ctx[task_idx];
  699. task = &(task_page[index]);
  700. bnx2fc_init_mp_task(els_req, task);
  701. spin_lock_bh(&tgt->tgt_lock);
  702. if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
  703. printk(KERN_ERR PFX "initiate_els.. session not ready\n");
  704. els_req->cb_func = NULL;
  705. els_req->cb_arg = NULL;
  706. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  707. spin_unlock_bh(&tgt->tgt_lock);
  708. return -EINVAL;
  709. }
  710. if (timer_msec)
  711. bnx2fc_cmd_timer_set(els_req, timer_msec);
  712. bnx2fc_add_2_sq(tgt, xid);
  713. els_req->on_active_queue = 1;
  714. list_add_tail(&els_req->link, &tgt->els_queue);
  715. /* Ring doorbell */
  716. bnx2fc_ring_doorbell(tgt);
  717. spin_unlock_bh(&tgt->tgt_lock);
  718. els_err:
  719. return rc;
  720. }
  721. void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
  722. struct fcoe_task_ctx_entry *task, u8 num_rq)
  723. {
  724. struct bnx2fc_mp_req *mp_req;
  725. struct fc_frame_header *fc_hdr;
  726. u64 *hdr;
  727. u64 *temp_hdr;
  728. BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
  729. "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
  730. if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
  731. &els_req->req_flags)) {
  732. BNX2FC_ELS_DBG("Timer context finished processing this "
  733. "els - 0x%x\n", els_req->xid);
  734. /* This IO doesn't receive cleanup completion */
  735. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  736. return;
  737. }
  738. /* Cancel the timeout_work, as we received the response */
  739. if (cancel_delayed_work(&els_req->timeout_work))
  740. kref_put(&els_req->refcount,
  741. bnx2fc_cmd_release); /* drop timer hold */
  742. if (els_req->on_active_queue) {
  743. list_del_init(&els_req->link);
  744. els_req->on_active_queue = 0;
  745. }
  746. mp_req = &(els_req->mp_req);
  747. fc_hdr = &(mp_req->resp_fc_hdr);
  748. hdr = (u64 *)fc_hdr;
  749. temp_hdr = (u64 *)
  750. &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
  751. hdr[0] = cpu_to_be64(temp_hdr[0]);
  752. hdr[1] = cpu_to_be64(temp_hdr[1]);
  753. hdr[2] = cpu_to_be64(temp_hdr[2]);
  754. mp_req->resp_len =
  755. task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
  756. /* Parse ELS response */
  757. if ((els_req->cb_func) && (els_req->cb_arg)) {
  758. els_req->cb_func(els_req->cb_arg);
  759. els_req->cb_arg = NULL;
  760. }
  761. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  762. }
  763. static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
  764. void *arg)
  765. {
  766. struct fcoe_ctlr *fip = arg;
  767. struct fc_exch *exch = fc_seq_exch(seq);
  768. struct fc_lport *lport = exch->lp;
  769. u8 *mac;
  770. u8 op;
  771. if (IS_ERR(fp))
  772. goto done;
  773. mac = fr_cb(fp)->granted_mac;
  774. if (is_zero_ether_addr(mac)) {
  775. op = fc_frame_payload_op(fp);
  776. if (lport->vport) {
  777. if (op == ELS_LS_RJT) {
  778. printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
  779. fc_vport_terminate(lport->vport);
  780. fc_frame_free(fp);
  781. return;
  782. }
  783. }
  784. fcoe_ctlr_recv_flogi(fip, lport, fp);
  785. }
  786. if (!is_zero_ether_addr(mac))
  787. fip->update_mac(lport, mac);
  788. done:
  789. fc_lport_flogi_resp(seq, fp, lport);
  790. }
  791. static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
  792. void *arg)
  793. {
  794. struct fcoe_ctlr *fip = arg;
  795. struct fc_exch *exch = fc_seq_exch(seq);
  796. struct fc_lport *lport = exch->lp;
  797. static u8 zero_mac[ETH_ALEN] = { 0 };
  798. if (!IS_ERR(fp))
  799. fip->update_mac(lport, zero_mac);
  800. fc_lport_logo_resp(seq, fp, lport);
  801. }
  802. struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
  803. struct fc_frame *fp, unsigned int op,
  804. void (*resp)(struct fc_seq *,
  805. struct fc_frame *,
  806. void *),
  807. void *arg, u32 timeout)
  808. {
  809. struct fcoe_port *port = lport_priv(lport);
  810. struct bnx2fc_interface *interface = port->priv;
  811. struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
  812. struct fc_frame_header *fh = fc_frame_header_get(fp);
  813. switch (op) {
  814. case ELS_FLOGI:
  815. case ELS_FDISC:
  816. return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
  817. fip, timeout);
  818. case ELS_LOGO:
  819. /* only hook onto fabric logouts, not port logouts */
  820. if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
  821. break;
  822. return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
  823. fip, timeout);
  824. }
  825. return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
  826. }