fcloop.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259
  1. /*
  2. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful.
  9. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
  10. * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
  11. * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
  12. * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
  13. * See the GNU General Public License for more details, a copy of which
  14. * can be found in the file COPYING included with this package
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/parser.h>
  19. #include <uapi/scsi/fc/fc_fs.h>
  20. #include "../host/nvme.h"
  21. #include "../target/nvmet.h"
  22. #include <linux/nvme-fc-driver.h>
  23. #include <linux/nvme-fc.h>
  24. enum {
  25. NVMF_OPT_ERR = 0,
  26. NVMF_OPT_WWNN = 1 << 0,
  27. NVMF_OPT_WWPN = 1 << 1,
  28. NVMF_OPT_ROLES = 1 << 2,
  29. NVMF_OPT_FCADDR = 1 << 3,
  30. NVMF_OPT_LPWWNN = 1 << 4,
  31. NVMF_OPT_LPWWPN = 1 << 5,
  32. };
  33. struct fcloop_ctrl_options {
  34. int mask;
  35. u64 wwnn;
  36. u64 wwpn;
  37. u32 roles;
  38. u32 fcaddr;
  39. u64 lpwwnn;
  40. u64 lpwwpn;
  41. };
  42. static const match_table_t opt_tokens = {
  43. { NVMF_OPT_WWNN, "wwnn=%s" },
  44. { NVMF_OPT_WWPN, "wwpn=%s" },
  45. { NVMF_OPT_ROLES, "roles=%d" },
  46. { NVMF_OPT_FCADDR, "fcaddr=%x" },
  47. { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
  48. { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
  49. { NVMF_OPT_ERR, NULL }
  50. };
  51. static int
  52. fcloop_parse_options(struct fcloop_ctrl_options *opts,
  53. const char *buf)
  54. {
  55. substring_t args[MAX_OPT_ARGS];
  56. char *options, *o, *p;
  57. int token, ret = 0;
  58. u64 token64;
  59. options = o = kstrdup(buf, GFP_KERNEL);
  60. if (!options)
  61. return -ENOMEM;
  62. while ((p = strsep(&o, ",\n")) != NULL) {
  63. if (!*p)
  64. continue;
  65. token = match_token(p, opt_tokens, args);
  66. opts->mask |= token;
  67. switch (token) {
  68. case NVMF_OPT_WWNN:
  69. if (match_u64(args, &token64)) {
  70. ret = -EINVAL;
  71. goto out_free_options;
  72. }
  73. opts->wwnn = token64;
  74. break;
  75. case NVMF_OPT_WWPN:
  76. if (match_u64(args, &token64)) {
  77. ret = -EINVAL;
  78. goto out_free_options;
  79. }
  80. opts->wwpn = token64;
  81. break;
  82. case NVMF_OPT_ROLES:
  83. if (match_int(args, &token)) {
  84. ret = -EINVAL;
  85. goto out_free_options;
  86. }
  87. opts->roles = token;
  88. break;
  89. case NVMF_OPT_FCADDR:
  90. if (match_hex(args, &token)) {
  91. ret = -EINVAL;
  92. goto out_free_options;
  93. }
  94. opts->fcaddr = token;
  95. break;
  96. case NVMF_OPT_LPWWNN:
  97. if (match_u64(args, &token64)) {
  98. ret = -EINVAL;
  99. goto out_free_options;
  100. }
  101. opts->lpwwnn = token64;
  102. break;
  103. case NVMF_OPT_LPWWPN:
  104. if (match_u64(args, &token64)) {
  105. ret = -EINVAL;
  106. goto out_free_options;
  107. }
  108. opts->lpwwpn = token64;
  109. break;
  110. default:
  111. pr_warn("unknown parameter or missing value '%s'\n", p);
  112. ret = -EINVAL;
  113. goto out_free_options;
  114. }
  115. }
  116. out_free_options:
  117. kfree(options);
  118. return ret;
  119. }
  120. static int
  121. fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
  122. const char *buf)
  123. {
  124. substring_t args[MAX_OPT_ARGS];
  125. char *options, *o, *p;
  126. int token, ret = 0;
  127. u64 token64;
  128. *nname = -1;
  129. *pname = -1;
  130. options = o = kstrdup(buf, GFP_KERNEL);
  131. if (!options)
  132. return -ENOMEM;
  133. while ((p = strsep(&o, ",\n")) != NULL) {
  134. if (!*p)
  135. continue;
  136. token = match_token(p, opt_tokens, args);
  137. switch (token) {
  138. case NVMF_OPT_WWNN:
  139. if (match_u64(args, &token64)) {
  140. ret = -EINVAL;
  141. goto out_free_options;
  142. }
  143. *nname = token64;
  144. break;
  145. case NVMF_OPT_WWPN:
  146. if (match_u64(args, &token64)) {
  147. ret = -EINVAL;
  148. goto out_free_options;
  149. }
  150. *pname = token64;
  151. break;
  152. default:
  153. pr_warn("unknown parameter or missing value '%s'\n", p);
  154. ret = -EINVAL;
  155. goto out_free_options;
  156. }
  157. }
  158. out_free_options:
  159. kfree(options);
  160. if (!ret) {
  161. if (*nname == -1)
  162. return -EINVAL;
  163. if (*pname == -1)
  164. return -EINVAL;
  165. }
  166. return ret;
  167. }
  168. #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  169. #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
  170. NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
  171. #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  172. static DEFINE_SPINLOCK(fcloop_lock);
  173. static LIST_HEAD(fcloop_lports);
  174. static LIST_HEAD(fcloop_nports);
  175. struct fcloop_lport {
  176. struct nvme_fc_local_port *localport;
  177. struct list_head lport_list;
  178. struct completion unreg_done;
  179. };
  180. struct fcloop_lport_priv {
  181. struct fcloop_lport *lport;
  182. };
  183. struct fcloop_rport {
  184. struct nvme_fc_remote_port *remoteport;
  185. struct nvmet_fc_target_port *targetport;
  186. struct fcloop_nport *nport;
  187. struct fcloop_lport *lport;
  188. };
  189. struct fcloop_tport {
  190. struct nvmet_fc_target_port *targetport;
  191. struct nvme_fc_remote_port *remoteport;
  192. struct fcloop_nport *nport;
  193. struct fcloop_lport *lport;
  194. };
  195. struct fcloop_nport {
  196. struct fcloop_rport *rport;
  197. struct fcloop_tport *tport;
  198. struct fcloop_lport *lport;
  199. struct list_head nport_list;
  200. struct kref ref;
  201. u64 node_name;
  202. u64 port_name;
  203. u32 port_role;
  204. u32 port_id;
  205. };
  206. struct fcloop_lsreq {
  207. struct fcloop_tport *tport;
  208. struct nvmefc_ls_req *lsreq;
  209. struct work_struct work;
  210. struct nvmefc_tgt_ls_req tgt_ls_req;
  211. int status;
  212. };
  213. struct fcloop_fcpreq {
  214. struct fcloop_tport *tport;
  215. struct nvmefc_fcp_req *fcpreq;
  216. spinlock_t reqlock;
  217. u16 status;
  218. bool active;
  219. bool aborted;
  220. struct work_struct work;
  221. struct nvmefc_tgt_fcp_req tgt_fcp_req;
  222. };
  223. struct fcloop_ini_fcpreq {
  224. struct nvmefc_fcp_req *fcpreq;
  225. struct fcloop_fcpreq *tfcp_req;
  226. struct work_struct iniwork;
  227. };
  228. static inline struct fcloop_lsreq *
  229. tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
  230. {
  231. return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
  232. }
  233. static inline struct fcloop_fcpreq *
  234. tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  235. {
  236. return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
  237. }
  238. static int
  239. fcloop_create_queue(struct nvme_fc_local_port *localport,
  240. unsigned int qidx, u16 qsize,
  241. void **handle)
  242. {
  243. *handle = localport;
  244. return 0;
  245. }
  246. static void
  247. fcloop_delete_queue(struct nvme_fc_local_port *localport,
  248. unsigned int idx, void *handle)
  249. {
  250. }
  251. /*
  252. * Transmit of LS RSP done (e.g. buffers all set). call back up
  253. * initiator "done" flows.
  254. */
  255. static void
  256. fcloop_tgt_lsrqst_done_work(struct work_struct *work)
  257. {
  258. struct fcloop_lsreq *tls_req =
  259. container_of(work, struct fcloop_lsreq, work);
  260. struct fcloop_tport *tport = tls_req->tport;
  261. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  262. if (!tport || tport->remoteport)
  263. lsreq->done(lsreq, tls_req->status);
  264. }
  265. static int
  266. fcloop_ls_req(struct nvme_fc_local_port *localport,
  267. struct nvme_fc_remote_port *remoteport,
  268. struct nvmefc_ls_req *lsreq)
  269. {
  270. struct fcloop_lsreq *tls_req = lsreq->private;
  271. struct fcloop_rport *rport = remoteport->private;
  272. int ret = 0;
  273. tls_req->lsreq = lsreq;
  274. INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
  275. if (!rport->targetport) {
  276. tls_req->status = -ECONNREFUSED;
  277. tls_req->tport = NULL;
  278. schedule_work(&tls_req->work);
  279. return ret;
  280. }
  281. tls_req->status = 0;
  282. tls_req->tport = rport->targetport->private;
  283. ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
  284. lsreq->rqstaddr, lsreq->rqstlen);
  285. return ret;
  286. }
  287. static int
  288. fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
  289. struct nvmefc_tgt_ls_req *tgt_lsreq)
  290. {
  291. struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
  292. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  293. memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
  294. ((lsreq->rsplen < tgt_lsreq->rsplen) ?
  295. lsreq->rsplen : tgt_lsreq->rsplen));
  296. tgt_lsreq->done(tgt_lsreq);
  297. schedule_work(&tls_req->work);
  298. return 0;
  299. }
  300. /*
  301. * FCP IO operation done by initiator abort.
  302. * call back up initiator "done" flows.
  303. */
  304. static void
  305. fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
  306. {
  307. struct fcloop_ini_fcpreq *inireq =
  308. container_of(work, struct fcloop_ini_fcpreq, iniwork);
  309. inireq->fcpreq->done(inireq->fcpreq);
  310. }
  311. /*
  312. * FCP IO operation done by target completion.
  313. * call back up initiator "done" flows.
  314. */
  315. static void
  316. fcloop_tgt_fcprqst_done_work(struct work_struct *work)
  317. {
  318. struct fcloop_fcpreq *tfcp_req =
  319. container_of(work, struct fcloop_fcpreq, work);
  320. struct fcloop_tport *tport = tfcp_req->tport;
  321. struct nvmefc_fcp_req *fcpreq;
  322. spin_lock(&tfcp_req->reqlock);
  323. fcpreq = tfcp_req->fcpreq;
  324. tfcp_req->fcpreq = NULL;
  325. spin_unlock(&tfcp_req->reqlock);
  326. if (tport->remoteport && fcpreq) {
  327. fcpreq->status = tfcp_req->status;
  328. fcpreq->done(fcpreq);
  329. }
  330. kfree(tfcp_req);
  331. }
  332. static int
  333. fcloop_fcp_req(struct nvme_fc_local_port *localport,
  334. struct nvme_fc_remote_port *remoteport,
  335. void *hw_queue_handle,
  336. struct nvmefc_fcp_req *fcpreq)
  337. {
  338. struct fcloop_rport *rport = remoteport->private;
  339. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  340. struct fcloop_fcpreq *tfcp_req;
  341. int ret = 0;
  342. if (!rport->targetport)
  343. return -ECONNREFUSED;
  344. tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
  345. if (!tfcp_req)
  346. return -ENOMEM;
  347. inireq->fcpreq = fcpreq;
  348. inireq->tfcp_req = tfcp_req;
  349. INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
  350. tfcp_req->fcpreq = fcpreq;
  351. tfcp_req->tport = rport->targetport->private;
  352. spin_lock_init(&tfcp_req->reqlock);
  353. INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
  354. ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
  355. fcpreq->cmdaddr, fcpreq->cmdlen);
  356. return ret;
  357. }
  358. static void
  359. fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
  360. struct scatterlist *io_sg, u32 offset, u32 length)
  361. {
  362. void *data_p, *io_p;
  363. u32 data_len, io_len, tlen;
  364. io_p = sg_virt(io_sg);
  365. io_len = io_sg->length;
  366. for ( ; offset; ) {
  367. tlen = min_t(u32, offset, io_len);
  368. offset -= tlen;
  369. io_len -= tlen;
  370. if (!io_len) {
  371. io_sg = sg_next(io_sg);
  372. io_p = sg_virt(io_sg);
  373. io_len = io_sg->length;
  374. } else
  375. io_p += tlen;
  376. }
  377. data_p = sg_virt(data_sg);
  378. data_len = data_sg->length;
  379. for ( ; length; ) {
  380. tlen = min_t(u32, io_len, data_len);
  381. tlen = min_t(u32, tlen, length);
  382. if (op == NVMET_FCOP_WRITEDATA)
  383. memcpy(data_p, io_p, tlen);
  384. else
  385. memcpy(io_p, data_p, tlen);
  386. length -= tlen;
  387. io_len -= tlen;
  388. if ((!io_len) && (length)) {
  389. io_sg = sg_next(io_sg);
  390. io_p = sg_virt(io_sg);
  391. io_len = io_sg->length;
  392. } else
  393. io_p += tlen;
  394. data_len -= tlen;
  395. if ((!data_len) && (length)) {
  396. data_sg = sg_next(data_sg);
  397. data_p = sg_virt(data_sg);
  398. data_len = data_sg->length;
  399. } else
  400. data_p += tlen;
  401. }
  402. }
  403. static int
  404. fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
  405. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  406. {
  407. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  408. struct nvmefc_fcp_req *fcpreq;
  409. u32 rsplen = 0, xfrlen = 0;
  410. int fcp_err = 0, active, aborted;
  411. u8 op = tgt_fcpreq->op;
  412. spin_lock(&tfcp_req->reqlock);
  413. fcpreq = tfcp_req->fcpreq;
  414. active = tfcp_req->active;
  415. aborted = tfcp_req->aborted;
  416. tfcp_req->active = true;
  417. spin_unlock(&tfcp_req->reqlock);
  418. if (unlikely(active))
  419. /* illegal - call while i/o active */
  420. return -EALREADY;
  421. if (unlikely(aborted)) {
  422. /* target transport has aborted i/o prior */
  423. spin_lock(&tfcp_req->reqlock);
  424. tfcp_req->active = false;
  425. spin_unlock(&tfcp_req->reqlock);
  426. tgt_fcpreq->transferred_length = 0;
  427. tgt_fcpreq->fcp_error = -ECANCELED;
  428. tgt_fcpreq->done(tgt_fcpreq);
  429. return 0;
  430. }
  431. /*
  432. * if fcpreq is NULL, the I/O has been aborted (from
  433. * initiator side). For the target side, act as if all is well
  434. * but don't actually move data.
  435. */
  436. switch (op) {
  437. case NVMET_FCOP_WRITEDATA:
  438. xfrlen = tgt_fcpreq->transfer_length;
  439. if (fcpreq) {
  440. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  441. fcpreq->first_sgl, tgt_fcpreq->offset,
  442. xfrlen);
  443. fcpreq->transferred_length += xfrlen;
  444. }
  445. break;
  446. case NVMET_FCOP_READDATA:
  447. case NVMET_FCOP_READDATA_RSP:
  448. xfrlen = tgt_fcpreq->transfer_length;
  449. if (fcpreq) {
  450. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  451. fcpreq->first_sgl, tgt_fcpreq->offset,
  452. xfrlen);
  453. fcpreq->transferred_length += xfrlen;
  454. }
  455. if (op == NVMET_FCOP_READDATA)
  456. break;
  457. /* Fall-Thru to RSP handling */
  458. /* FALLTHRU */
  459. case NVMET_FCOP_RSP:
  460. if (fcpreq) {
  461. rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
  462. fcpreq->rsplen : tgt_fcpreq->rsplen);
  463. memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
  464. if (rsplen < tgt_fcpreq->rsplen)
  465. fcp_err = -E2BIG;
  466. fcpreq->rcv_rsplen = rsplen;
  467. fcpreq->status = 0;
  468. }
  469. tfcp_req->status = 0;
  470. break;
  471. default:
  472. fcp_err = -EINVAL;
  473. break;
  474. }
  475. spin_lock(&tfcp_req->reqlock);
  476. tfcp_req->active = false;
  477. spin_unlock(&tfcp_req->reqlock);
  478. tgt_fcpreq->transferred_length = xfrlen;
  479. tgt_fcpreq->fcp_error = fcp_err;
  480. tgt_fcpreq->done(tgt_fcpreq);
  481. return 0;
  482. }
  483. static void
  484. fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
  485. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  486. {
  487. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  488. /*
  489. * mark aborted only in case there were 2 threads in transport
  490. * (one doing io, other doing abort) and only kills ops posted
  491. * after the abort request
  492. */
  493. spin_lock(&tfcp_req->reqlock);
  494. tfcp_req->aborted = true;
  495. spin_unlock(&tfcp_req->reqlock);
  496. tfcp_req->status = NVME_SC_INTERNAL;
  497. /*
  498. * nothing more to do. If io wasn't active, the transport should
  499. * immediately call the req_release. If it was active, the op
  500. * will complete, and the lldd should call req_release.
  501. */
  502. }
  503. static void
  504. fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
  505. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  506. {
  507. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  508. schedule_work(&tfcp_req->work);
  509. }
  510. static void
  511. fcloop_ls_abort(struct nvme_fc_local_port *localport,
  512. struct nvme_fc_remote_port *remoteport,
  513. struct nvmefc_ls_req *lsreq)
  514. {
  515. }
  516. static void
  517. fcloop_fcp_abort(struct nvme_fc_local_port *localport,
  518. struct nvme_fc_remote_port *remoteport,
  519. void *hw_queue_handle,
  520. struct nvmefc_fcp_req *fcpreq)
  521. {
  522. struct fcloop_rport *rport = remoteport->private;
  523. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  524. struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
  525. if (!tfcp_req)
  526. /* abort has already been called */
  527. goto finish;
  528. /* break initiator/target relationship for io */
  529. spin_lock(&tfcp_req->reqlock);
  530. inireq->tfcp_req = NULL;
  531. tfcp_req->fcpreq = NULL;
  532. spin_unlock(&tfcp_req->reqlock);
  533. if (rport->targetport)
  534. nvmet_fc_rcv_fcp_abort(rport->targetport,
  535. &tfcp_req->tgt_fcp_req);
  536. finish:
  537. /* post the aborted io completion */
  538. fcpreq->status = -ECANCELED;
  539. schedule_work(&inireq->iniwork);
  540. }
  541. static void
  542. fcloop_nport_free(struct kref *ref)
  543. {
  544. struct fcloop_nport *nport =
  545. container_of(ref, struct fcloop_nport, ref);
  546. unsigned long flags;
  547. spin_lock_irqsave(&fcloop_lock, flags);
  548. list_del(&nport->nport_list);
  549. spin_unlock_irqrestore(&fcloop_lock, flags);
  550. kfree(nport);
  551. }
  552. static void
  553. fcloop_nport_put(struct fcloop_nport *nport)
  554. {
  555. kref_put(&nport->ref, fcloop_nport_free);
  556. }
  557. static int
  558. fcloop_nport_get(struct fcloop_nport *nport)
  559. {
  560. return kref_get_unless_zero(&nport->ref);
  561. }
  562. static void
  563. fcloop_localport_delete(struct nvme_fc_local_port *localport)
  564. {
  565. struct fcloop_lport_priv *lport_priv = localport->private;
  566. struct fcloop_lport *lport = lport_priv->lport;
  567. /* release any threads waiting for the unreg to complete */
  568. complete(&lport->unreg_done);
  569. }
  570. static void
  571. fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
  572. {
  573. struct fcloop_rport *rport = remoteport->private;
  574. fcloop_nport_put(rport->nport);
  575. }
  576. static void
  577. fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
  578. {
  579. struct fcloop_tport *tport = targetport->private;
  580. fcloop_nport_put(tport->nport);
  581. }
  582. #define FCLOOP_HW_QUEUES 4
  583. #define FCLOOP_SGL_SEGS 256
  584. #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
  585. static struct nvme_fc_port_template fctemplate = {
  586. .localport_delete = fcloop_localport_delete,
  587. .remoteport_delete = fcloop_remoteport_delete,
  588. .create_queue = fcloop_create_queue,
  589. .delete_queue = fcloop_delete_queue,
  590. .ls_req = fcloop_ls_req,
  591. .fcp_io = fcloop_fcp_req,
  592. .ls_abort = fcloop_ls_abort,
  593. .fcp_abort = fcloop_fcp_abort,
  594. .max_hw_queues = FCLOOP_HW_QUEUES,
  595. .max_sgl_segments = FCLOOP_SGL_SEGS,
  596. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  597. .dma_boundary = FCLOOP_DMABOUND_4G,
  598. /* sizes of additional private data for data structures */
  599. .local_priv_sz = sizeof(struct fcloop_lport_priv),
  600. .remote_priv_sz = sizeof(struct fcloop_rport),
  601. .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
  602. .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
  603. };
  604. static struct nvmet_fc_target_template tgttemplate = {
  605. .targetport_delete = fcloop_targetport_delete,
  606. .xmt_ls_rsp = fcloop_xmt_ls_rsp,
  607. .fcp_op = fcloop_fcp_op,
  608. .fcp_abort = fcloop_tgt_fcp_abort,
  609. .fcp_req_release = fcloop_fcp_req_release,
  610. .max_hw_queues = FCLOOP_HW_QUEUES,
  611. .max_sgl_segments = FCLOOP_SGL_SEGS,
  612. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  613. .dma_boundary = FCLOOP_DMABOUND_4G,
  614. /* optional features */
  615. .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
  616. NVMET_FCTGTFEAT_OPDONE_IN_ISR,
  617. /* sizes of additional private data for data structures */
  618. .target_priv_sz = sizeof(struct fcloop_tport),
  619. };
  620. static ssize_t
  621. fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
  622. const char *buf, size_t count)
  623. {
  624. struct nvme_fc_port_info pinfo;
  625. struct fcloop_ctrl_options *opts;
  626. struct nvme_fc_local_port *localport;
  627. struct fcloop_lport *lport;
  628. struct fcloop_lport_priv *lport_priv;
  629. unsigned long flags;
  630. int ret = -ENOMEM;
  631. lport = kzalloc(sizeof(*lport), GFP_KERNEL);
  632. if (!lport)
  633. return -ENOMEM;
  634. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  635. if (!opts)
  636. goto out_free_lport;
  637. ret = fcloop_parse_options(opts, buf);
  638. if (ret)
  639. goto out_free_opts;
  640. /* everything there ? */
  641. if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
  642. ret = -EINVAL;
  643. goto out_free_opts;
  644. }
  645. memset(&pinfo, 0, sizeof(pinfo));
  646. pinfo.node_name = opts->wwnn;
  647. pinfo.port_name = opts->wwpn;
  648. pinfo.port_role = opts->roles;
  649. pinfo.port_id = opts->fcaddr;
  650. ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
  651. if (!ret) {
  652. /* success */
  653. lport_priv = localport->private;
  654. lport_priv->lport = lport;
  655. lport->localport = localport;
  656. INIT_LIST_HEAD(&lport->lport_list);
  657. spin_lock_irqsave(&fcloop_lock, flags);
  658. list_add_tail(&lport->lport_list, &fcloop_lports);
  659. spin_unlock_irqrestore(&fcloop_lock, flags);
  660. }
  661. out_free_opts:
  662. kfree(opts);
  663. out_free_lport:
  664. /* free only if we're going to fail */
  665. if (ret)
  666. kfree(lport);
  667. return ret ? ret : count;
  668. }
  669. static void
  670. __unlink_local_port(struct fcloop_lport *lport)
  671. {
  672. list_del(&lport->lport_list);
  673. }
  674. static int
  675. __wait_localport_unreg(struct fcloop_lport *lport)
  676. {
  677. int ret;
  678. init_completion(&lport->unreg_done);
  679. ret = nvme_fc_unregister_localport(lport->localport);
  680. wait_for_completion(&lport->unreg_done);
  681. kfree(lport);
  682. return ret;
  683. }
  684. static ssize_t
  685. fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
  686. const char *buf, size_t count)
  687. {
  688. struct fcloop_lport *tlport, *lport = NULL;
  689. u64 nodename, portname;
  690. unsigned long flags;
  691. int ret;
  692. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  693. if (ret)
  694. return ret;
  695. spin_lock_irqsave(&fcloop_lock, flags);
  696. list_for_each_entry(tlport, &fcloop_lports, lport_list) {
  697. if (tlport->localport->node_name == nodename &&
  698. tlport->localport->port_name == portname) {
  699. lport = tlport;
  700. __unlink_local_port(lport);
  701. break;
  702. }
  703. }
  704. spin_unlock_irqrestore(&fcloop_lock, flags);
  705. if (!lport)
  706. return -ENOENT;
  707. ret = __wait_localport_unreg(lport);
  708. return ret ? ret : count;
  709. }
  710. static struct fcloop_nport *
  711. fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
  712. {
  713. struct fcloop_nport *newnport, *nport = NULL;
  714. struct fcloop_lport *tmplport, *lport = NULL;
  715. struct fcloop_ctrl_options *opts;
  716. unsigned long flags;
  717. u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
  718. int ret;
  719. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  720. if (!opts)
  721. return NULL;
  722. ret = fcloop_parse_options(opts, buf);
  723. if (ret)
  724. goto out_free_opts;
  725. /* everything there ? */
  726. if ((opts->mask & opts_mask) != opts_mask) {
  727. ret = -EINVAL;
  728. goto out_free_opts;
  729. }
  730. newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
  731. if (!newnport)
  732. goto out_free_opts;
  733. INIT_LIST_HEAD(&newnport->nport_list);
  734. newnport->node_name = opts->wwnn;
  735. newnport->port_name = opts->wwpn;
  736. if (opts->mask & NVMF_OPT_ROLES)
  737. newnport->port_role = opts->roles;
  738. if (opts->mask & NVMF_OPT_FCADDR)
  739. newnport->port_id = opts->fcaddr;
  740. kref_init(&newnport->ref);
  741. spin_lock_irqsave(&fcloop_lock, flags);
  742. list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
  743. if (tmplport->localport->node_name == opts->wwnn &&
  744. tmplport->localport->port_name == opts->wwpn)
  745. goto out_invalid_opts;
  746. if (tmplport->localport->node_name == opts->lpwwnn &&
  747. tmplport->localport->port_name == opts->lpwwpn)
  748. lport = tmplport;
  749. }
  750. if (remoteport) {
  751. if (!lport)
  752. goto out_invalid_opts;
  753. newnport->lport = lport;
  754. }
  755. list_for_each_entry(nport, &fcloop_nports, nport_list) {
  756. if (nport->node_name == opts->wwnn &&
  757. nport->port_name == opts->wwpn) {
  758. if ((remoteport && nport->rport) ||
  759. (!remoteport && nport->tport)) {
  760. nport = NULL;
  761. goto out_invalid_opts;
  762. }
  763. fcloop_nport_get(nport);
  764. spin_unlock_irqrestore(&fcloop_lock, flags);
  765. if (remoteport)
  766. nport->lport = lport;
  767. if (opts->mask & NVMF_OPT_ROLES)
  768. nport->port_role = opts->roles;
  769. if (opts->mask & NVMF_OPT_FCADDR)
  770. nport->port_id = opts->fcaddr;
  771. goto out_free_newnport;
  772. }
  773. }
  774. list_add_tail(&newnport->nport_list, &fcloop_nports);
  775. spin_unlock_irqrestore(&fcloop_lock, flags);
  776. kfree(opts);
  777. return newnport;
  778. out_invalid_opts:
  779. spin_unlock_irqrestore(&fcloop_lock, flags);
  780. out_free_newnport:
  781. kfree(newnport);
  782. out_free_opts:
  783. kfree(opts);
  784. return nport;
  785. }
  786. static ssize_t
  787. fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
  788. const char *buf, size_t count)
  789. {
  790. struct nvme_fc_remote_port *remoteport;
  791. struct fcloop_nport *nport;
  792. struct fcloop_rport *rport;
  793. struct nvme_fc_port_info pinfo;
  794. int ret;
  795. nport = fcloop_alloc_nport(buf, count, true);
  796. if (!nport)
  797. return -EIO;
  798. memset(&pinfo, 0, sizeof(pinfo));
  799. pinfo.node_name = nport->node_name;
  800. pinfo.port_name = nport->port_name;
  801. pinfo.port_role = nport->port_role;
  802. pinfo.port_id = nport->port_id;
  803. ret = nvme_fc_register_remoteport(nport->lport->localport,
  804. &pinfo, &remoteport);
  805. if (ret || !remoteport) {
  806. fcloop_nport_put(nport);
  807. return ret;
  808. }
  809. /* success */
  810. rport = remoteport->private;
  811. rport->remoteport = remoteport;
  812. rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
  813. if (nport->tport) {
  814. nport->tport->remoteport = remoteport;
  815. nport->tport->lport = nport->lport;
  816. }
  817. rport->nport = nport;
  818. rport->lport = nport->lport;
  819. nport->rport = rport;
  820. return count;
  821. }
  822. static struct fcloop_rport *
  823. __unlink_remote_port(struct fcloop_nport *nport)
  824. {
  825. struct fcloop_rport *rport = nport->rport;
  826. if (rport && nport->tport)
  827. nport->tport->remoteport = NULL;
  828. nport->rport = NULL;
  829. return rport;
  830. }
  831. static int
  832. __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
  833. {
  834. if (!rport)
  835. return -EALREADY;
  836. return nvme_fc_unregister_remoteport(rport->remoteport);
  837. }
  838. static ssize_t
  839. fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
  840. const char *buf, size_t count)
  841. {
  842. struct fcloop_nport *nport = NULL, *tmpport;
  843. static struct fcloop_rport *rport;
  844. u64 nodename, portname;
  845. unsigned long flags;
  846. int ret;
  847. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  848. if (ret)
  849. return ret;
  850. spin_lock_irqsave(&fcloop_lock, flags);
  851. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  852. if (tmpport->node_name == nodename &&
  853. tmpport->port_name == portname && tmpport->rport) {
  854. nport = tmpport;
  855. rport = __unlink_remote_port(nport);
  856. break;
  857. }
  858. }
  859. spin_unlock_irqrestore(&fcloop_lock, flags);
  860. if (!nport)
  861. return -ENOENT;
  862. ret = __remoteport_unreg(nport, rport);
  863. return ret ? ret : count;
  864. }
  865. static ssize_t
  866. fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
  867. const char *buf, size_t count)
  868. {
  869. struct nvmet_fc_target_port *targetport;
  870. struct fcloop_nport *nport;
  871. struct fcloop_tport *tport;
  872. struct nvmet_fc_port_info tinfo;
  873. int ret;
  874. nport = fcloop_alloc_nport(buf, count, false);
  875. if (!nport)
  876. return -EIO;
  877. tinfo.node_name = nport->node_name;
  878. tinfo.port_name = nport->port_name;
  879. tinfo.port_id = nport->port_id;
  880. ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
  881. &targetport);
  882. if (ret) {
  883. fcloop_nport_put(nport);
  884. return ret;
  885. }
  886. /* success */
  887. tport = targetport->private;
  888. tport->targetport = targetport;
  889. tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
  890. if (nport->rport)
  891. nport->rport->targetport = targetport;
  892. tport->nport = nport;
  893. tport->lport = nport->lport;
  894. nport->tport = tport;
  895. return count;
  896. }
  897. static struct fcloop_tport *
  898. __unlink_target_port(struct fcloop_nport *nport)
  899. {
  900. struct fcloop_tport *tport = nport->tport;
  901. if (tport && nport->rport)
  902. nport->rport->targetport = NULL;
  903. nport->tport = NULL;
  904. return tport;
  905. }
  906. static int
  907. __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
  908. {
  909. if (!tport)
  910. return -EALREADY;
  911. return nvmet_fc_unregister_targetport(tport->targetport);
  912. }
  913. static ssize_t
  914. fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
  915. const char *buf, size_t count)
  916. {
  917. struct fcloop_nport *nport = NULL, *tmpport;
  918. struct fcloop_tport *tport;
  919. u64 nodename, portname;
  920. unsigned long flags;
  921. int ret;
  922. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  923. if (ret)
  924. return ret;
  925. spin_lock_irqsave(&fcloop_lock, flags);
  926. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  927. if (tmpport->node_name == nodename &&
  928. tmpport->port_name == portname && tmpport->tport) {
  929. nport = tmpport;
  930. tport = __unlink_target_port(nport);
  931. break;
  932. }
  933. }
  934. spin_unlock_irqrestore(&fcloop_lock, flags);
  935. if (!nport)
  936. return -ENOENT;
  937. ret = __targetport_unreg(nport, tport);
  938. return ret ? ret : count;
  939. }
  940. static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
  941. static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
  942. static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
  943. static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
  944. static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
  945. static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
  946. static struct attribute *fcloop_dev_attrs[] = {
  947. &dev_attr_add_local_port.attr,
  948. &dev_attr_del_local_port.attr,
  949. &dev_attr_add_remote_port.attr,
  950. &dev_attr_del_remote_port.attr,
  951. &dev_attr_add_target_port.attr,
  952. &dev_attr_del_target_port.attr,
  953. NULL
  954. };
  955. static struct attribute_group fclopp_dev_attrs_group = {
  956. .attrs = fcloop_dev_attrs,
  957. };
  958. static const struct attribute_group *fcloop_dev_attr_groups[] = {
  959. &fclopp_dev_attrs_group,
  960. NULL,
  961. };
  962. static struct class *fcloop_class;
  963. static struct device *fcloop_device;
  964. static int __init fcloop_init(void)
  965. {
  966. int ret;
  967. fcloop_class = class_create(THIS_MODULE, "fcloop");
  968. if (IS_ERR(fcloop_class)) {
  969. pr_err("couldn't register class fcloop\n");
  970. ret = PTR_ERR(fcloop_class);
  971. return ret;
  972. }
  973. fcloop_device = device_create_with_groups(
  974. fcloop_class, NULL, MKDEV(0, 0), NULL,
  975. fcloop_dev_attr_groups, "ctl");
  976. if (IS_ERR(fcloop_device)) {
  977. pr_err("couldn't create ctl device!\n");
  978. ret = PTR_ERR(fcloop_device);
  979. goto out_destroy_class;
  980. }
  981. get_device(fcloop_device);
  982. return 0;
  983. out_destroy_class:
  984. class_destroy(fcloop_class);
  985. return ret;
  986. }
  987. static void __exit fcloop_exit(void)
  988. {
  989. struct fcloop_lport *lport;
  990. struct fcloop_nport *nport;
  991. struct fcloop_tport *tport;
  992. struct fcloop_rport *rport;
  993. unsigned long flags;
  994. int ret;
  995. spin_lock_irqsave(&fcloop_lock, flags);
  996. for (;;) {
  997. nport = list_first_entry_or_null(&fcloop_nports,
  998. typeof(*nport), nport_list);
  999. if (!nport)
  1000. break;
  1001. tport = __unlink_target_port(nport);
  1002. rport = __unlink_remote_port(nport);
  1003. spin_unlock_irqrestore(&fcloop_lock, flags);
  1004. ret = __targetport_unreg(nport, tport);
  1005. if (ret)
  1006. pr_warn("%s: Failed deleting target port\n", __func__);
  1007. ret = __remoteport_unreg(nport, rport);
  1008. if (ret)
  1009. pr_warn("%s: Failed deleting remote port\n", __func__);
  1010. spin_lock_irqsave(&fcloop_lock, flags);
  1011. }
  1012. for (;;) {
  1013. lport = list_first_entry_or_null(&fcloop_lports,
  1014. typeof(*lport), lport_list);
  1015. if (!lport)
  1016. break;
  1017. __unlink_local_port(lport);
  1018. spin_unlock_irqrestore(&fcloop_lock, flags);
  1019. ret = __wait_localport_unreg(lport);
  1020. if (ret)
  1021. pr_warn("%s: Failed deleting local port\n", __func__);
  1022. spin_lock_irqsave(&fcloop_lock, flags);
  1023. }
  1024. spin_unlock_irqrestore(&fcloop_lock, flags);
  1025. put_device(fcloop_device);
  1026. device_destroy(fcloop_class, MKDEV(0, 0));
  1027. class_destroy(fcloop_class);
  1028. }
  1029. module_init(fcloop_init);
  1030. module_exit(fcloop_exit);
  1031. MODULE_LICENSE("GPL v2");