target_core_tpg.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /*******************************************************************************
  2. * Filename: target_core_tpg.c
  3. *
  4. * This file contains generic Target Portal Group related functions.
  5. *
  6. * (c) Copyright 2002-2013 Datera, Inc.
  7. *
  8. * Nicholas A. Bellinger <nab@kernel.org>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23. *
  24. ******************************************************************************/
  25. #include <linux/net.h>
  26. #include <linux/string.h>
  27. #include <linux/timer.h>
  28. #include <linux/slab.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/in.h>
  31. #include <linux/export.h>
  32. #include <net/sock.h>
  33. #include <net/tcp.h>
  34. #include <scsi/scsi_proto.h>
  35. #include <target/target_core_base.h>
  36. #include <target/target_core_backend.h>
  37. #include <target/target_core_fabric.h>
  38. #include "target_core_internal.h"
  39. #include "target_core_alua.h"
  40. #include "target_core_pr.h"
  41. #include "target_core_ua.h"
  42. extern struct se_device *g_lun0_dev;
  43. static DEFINE_SPINLOCK(tpg_lock);
  44. static LIST_HEAD(tpg_list);
  45. /* __core_tpg_get_initiator_node_acl():
  46. *
  47. * mutex_lock(&tpg->acl_node_mutex); must be held when calling
  48. */
  49. struct se_node_acl *__core_tpg_get_initiator_node_acl(
  50. struct se_portal_group *tpg,
  51. const char *initiatorname)
  52. {
  53. struct se_node_acl *acl;
  54. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  55. if (!strcmp(acl->initiatorname, initiatorname))
  56. return acl;
  57. }
  58. return NULL;
  59. }
  60. /* core_tpg_get_initiator_node_acl():
  61. *
  62. *
  63. */
  64. struct se_node_acl *core_tpg_get_initiator_node_acl(
  65. struct se_portal_group *tpg,
  66. unsigned char *initiatorname)
  67. {
  68. struct se_node_acl *acl;
  69. /*
  70. * Obtain se_node_acl->acl_kref using fabric driver provided
  71. * initiatorname[] during node acl endpoint lookup driven by
  72. * new se_session login.
  73. *
  74. * The reference is held until se_session shutdown -> release
  75. * occurs via fabric driver invoked transport_deregister_session()
  76. * or transport_free_session() code.
  77. */
  78. mutex_lock(&tpg->acl_node_mutex);
  79. acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
  80. if (acl) {
  81. if (!kref_get_unless_zero(&acl->acl_kref))
  82. acl = NULL;
  83. }
  84. mutex_unlock(&tpg->acl_node_mutex);
  85. return acl;
  86. }
  87. EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
  88. void core_allocate_nexus_loss_ua(
  89. struct se_node_acl *nacl)
  90. {
  91. struct se_dev_entry *deve;
  92. if (!nacl)
  93. return;
  94. rcu_read_lock();
  95. hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
  96. core_scsi3_ua_allocate(deve, 0x29,
  97. ASCQ_29H_NEXUS_LOSS_OCCURRED);
  98. rcu_read_unlock();
  99. }
  100. EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
  101. /* core_tpg_add_node_to_devs():
  102. *
  103. *
  104. */
  105. void core_tpg_add_node_to_devs(
  106. struct se_node_acl *acl,
  107. struct se_portal_group *tpg,
  108. struct se_lun *lun_orig)
  109. {
  110. bool lun_access_ro = true;
  111. struct se_lun *lun;
  112. struct se_device *dev;
  113. mutex_lock(&tpg->tpg_lun_mutex);
  114. hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
  115. if (lun_orig && lun != lun_orig)
  116. continue;
  117. dev = rcu_dereference_check(lun->lun_se_dev,
  118. lockdep_is_held(&tpg->tpg_lun_mutex));
  119. /*
  120. * By default in LIO-Target $FABRIC_MOD,
  121. * demo_mode_write_protect is ON, or READ_ONLY;
  122. */
  123. if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
  124. lun_access_ro = false;
  125. } else {
  126. /*
  127. * Allow only optical drives to issue R/W in default RO
  128. * demo mode.
  129. */
  130. if (dev->transport->get_device_type(dev) == TYPE_DISK)
  131. lun_access_ro = true;
  132. else
  133. lun_access_ro = false;
  134. }
  135. pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
  136. " access for LUN in Demo Mode\n",
  137. tpg->se_tpg_tfo->get_fabric_name(),
  138. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  139. lun_access_ro ? "READ-ONLY" : "READ-WRITE");
  140. core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
  141. lun_access_ro, acl, tpg);
  142. /*
  143. * Check to see if there are any existing persistent reservation
  144. * APTPL pre-registrations that need to be enabled for this dynamic
  145. * LUN ACL now..
  146. */
  147. core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
  148. lun->unpacked_lun);
  149. }
  150. mutex_unlock(&tpg->tpg_lun_mutex);
  151. }
  152. static void
  153. target_set_nacl_queue_depth(struct se_portal_group *tpg,
  154. struct se_node_acl *acl, u32 queue_depth)
  155. {
  156. acl->queue_depth = queue_depth;
  157. if (!acl->queue_depth) {
  158. pr_warn("Queue depth for %s Initiator Node: %s is 0,"
  159. "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
  160. acl->initiatorname);
  161. acl->queue_depth = 1;
  162. }
  163. }
  164. static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
  165. const unsigned char *initiatorname)
  166. {
  167. struct se_node_acl *acl;
  168. u32 queue_depth;
  169. acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
  170. GFP_KERNEL);
  171. if (!acl)
  172. return NULL;
  173. INIT_LIST_HEAD(&acl->acl_list);
  174. INIT_LIST_HEAD(&acl->acl_sess_list);
  175. INIT_HLIST_HEAD(&acl->lun_entry_hlist);
  176. kref_init(&acl->acl_kref);
  177. init_completion(&acl->acl_free_comp);
  178. spin_lock_init(&acl->nacl_sess_lock);
  179. mutex_init(&acl->lun_entry_mutex);
  180. atomic_set(&acl->acl_pr_ref_count, 0);
  181. if (tpg->se_tpg_tfo->tpg_get_default_depth)
  182. queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
  183. else
  184. queue_depth = 1;
  185. target_set_nacl_queue_depth(tpg, acl, queue_depth);
  186. snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  187. acl->se_tpg = tpg;
  188. acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
  189. tpg->se_tpg_tfo->set_default_node_attributes(acl);
  190. return acl;
  191. }
  192. static void target_add_node_acl(struct se_node_acl *acl)
  193. {
  194. struct se_portal_group *tpg = acl->se_tpg;
  195. mutex_lock(&tpg->acl_node_mutex);
  196. list_add_tail(&acl->acl_list, &tpg->acl_node_list);
  197. mutex_unlock(&tpg->acl_node_mutex);
  198. pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
  199. " Initiator Node: %s\n",
  200. tpg->se_tpg_tfo->get_fabric_name(),
  201. tpg->se_tpg_tfo->tpg_get_tag(tpg),
  202. acl->dynamic_node_acl ? "DYNAMIC" : "",
  203. acl->queue_depth,
  204. tpg->se_tpg_tfo->get_fabric_name(),
  205. acl->initiatorname);
  206. }
  207. bool target_tpg_has_node_acl(struct se_portal_group *tpg,
  208. const char *initiatorname)
  209. {
  210. struct se_node_acl *acl;
  211. bool found = false;
  212. mutex_lock(&tpg->acl_node_mutex);
  213. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  214. if (!strcmp(acl->initiatorname, initiatorname)) {
  215. found = true;
  216. break;
  217. }
  218. }
  219. mutex_unlock(&tpg->acl_node_mutex);
  220. return found;
  221. }
  222. EXPORT_SYMBOL(target_tpg_has_node_acl);
  223. struct se_node_acl *core_tpg_check_initiator_node_acl(
  224. struct se_portal_group *tpg,
  225. unsigned char *initiatorname)
  226. {
  227. struct se_node_acl *acl;
  228. acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
  229. if (acl)
  230. return acl;
  231. if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
  232. return NULL;
  233. acl = target_alloc_node_acl(tpg, initiatorname);
  234. if (!acl)
  235. return NULL;
  236. /*
  237. * When allocating a dynamically generated node_acl, go ahead
  238. * and take the extra kref now before returning to the fabric
  239. * driver caller.
  240. *
  241. * Note this reference will be released at session shutdown
  242. * time within transport_free_session() code.
  243. */
  244. kref_get(&acl->acl_kref);
  245. acl->dynamic_node_acl = 1;
  246. /*
  247. * Here we only create demo-mode MappedLUNs from the active
  248. * TPG LUNs if the fabric is not explicitly asking for
  249. * tpg_check_demo_mode_login_only() == 1.
  250. */
  251. if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
  252. (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
  253. core_tpg_add_node_to_devs(acl, tpg, NULL);
  254. target_add_node_acl(acl);
  255. return acl;
  256. }
  257. EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
  258. void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
  259. {
  260. while (atomic_read(&nacl->acl_pr_ref_count) != 0)
  261. cpu_relax();
  262. }
  263. struct se_node_acl *core_tpg_add_initiator_node_acl(
  264. struct se_portal_group *tpg,
  265. const char *initiatorname)
  266. {
  267. struct se_node_acl *acl;
  268. mutex_lock(&tpg->acl_node_mutex);
  269. acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
  270. if (acl) {
  271. if (acl->dynamic_node_acl) {
  272. acl->dynamic_node_acl = 0;
  273. pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
  274. " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  275. tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
  276. mutex_unlock(&tpg->acl_node_mutex);
  277. return acl;
  278. }
  279. pr_err("ACL entry for %s Initiator"
  280. " Node %s already exists for TPG %u, ignoring"
  281. " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
  282. initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
  283. mutex_unlock(&tpg->acl_node_mutex);
  284. return ERR_PTR(-EEXIST);
  285. }
  286. mutex_unlock(&tpg->acl_node_mutex);
  287. acl = target_alloc_node_acl(tpg, initiatorname);
  288. if (!acl)
  289. return ERR_PTR(-ENOMEM);
  290. target_add_node_acl(acl);
  291. return acl;
  292. }
  293. static void target_shutdown_sessions(struct se_node_acl *acl)
  294. {
  295. struct se_session *sess;
  296. unsigned long flags;
  297. restart:
  298. spin_lock_irqsave(&acl->nacl_sess_lock, flags);
  299. list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
  300. if (sess->sess_tearing_down)
  301. continue;
  302. list_del_init(&sess->sess_acl_list);
  303. spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
  304. if (acl->se_tpg->se_tpg_tfo->close_session)
  305. acl->se_tpg->se_tpg_tfo->close_session(sess);
  306. goto restart;
  307. }
  308. spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
  309. }
  310. void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
  311. {
  312. struct se_portal_group *tpg = acl->se_tpg;
  313. mutex_lock(&tpg->acl_node_mutex);
  314. if (acl->dynamic_node_acl)
  315. acl->dynamic_node_acl = 0;
  316. list_del_init(&acl->acl_list);
  317. mutex_unlock(&tpg->acl_node_mutex);
  318. target_shutdown_sessions(acl);
  319. target_put_nacl(acl);
  320. /*
  321. * Wait for last target_put_nacl() to complete in target_complete_nacl()
  322. * for active fabric session transport_deregister_session() callbacks.
  323. */
  324. wait_for_completion(&acl->acl_free_comp);
  325. core_tpg_wait_for_nacl_pr_ref(acl);
  326. core_free_device_list_for_node(acl, tpg);
  327. pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
  328. " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  329. tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
  330. tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
  331. kfree(acl);
  332. }
  333. /* core_tpg_set_initiator_node_queue_depth():
  334. *
  335. *
  336. */
  337. int core_tpg_set_initiator_node_queue_depth(
  338. struct se_node_acl *acl,
  339. u32 queue_depth)
  340. {
  341. struct se_portal_group *tpg = acl->se_tpg;
  342. /*
  343. * User has requested to change the queue depth for a Initiator Node.
  344. * Change the value in the Node's struct se_node_acl, and call
  345. * target_set_nacl_queue_depth() to set the new queue depth.
  346. */
  347. target_set_nacl_queue_depth(tpg, acl, queue_depth);
  348. /*
  349. * Shutdown all pending sessions to force session reinstatement.
  350. */
  351. target_shutdown_sessions(acl);
  352. pr_debug("Successfully changed queue depth to: %d for Initiator"
  353. " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
  354. acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
  355. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  356. return 0;
  357. }
  358. EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
  359. /* core_tpg_set_initiator_node_tag():
  360. *
  361. * Initiator nodeacl tags are not used internally, but may be used by
  362. * userspace to emulate aliases or groups.
  363. * Returns length of newly-set tag or -EINVAL.
  364. */
  365. int core_tpg_set_initiator_node_tag(
  366. struct se_portal_group *tpg,
  367. struct se_node_acl *acl,
  368. const char *new_tag)
  369. {
  370. if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
  371. return -EINVAL;
  372. if (!strncmp("NULL", new_tag, 4)) {
  373. acl->acl_tag[0] = '\0';
  374. return 0;
  375. }
  376. return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
  377. }
  378. EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
  379. static void core_tpg_lun_ref_release(struct percpu_ref *ref)
  380. {
  381. struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
  382. complete(&lun->lun_shutdown_comp);
  383. }
  384. int core_tpg_register(
  385. struct se_wwn *se_wwn,
  386. struct se_portal_group *se_tpg,
  387. int proto_id)
  388. {
  389. int ret;
  390. if (!se_tpg)
  391. return -EINVAL;
  392. /*
  393. * For the typical case where core_tpg_register() is called by a
  394. * fabric driver from target_core_fabric_ops->fabric_make_tpg()
  395. * configfs context, use the original tf_ops pointer already saved
  396. * by target-core in target_fabric_make_wwn().
  397. *
  398. * Otherwise, for special cases like iscsi-target discovery TPGs
  399. * the caller is responsible for setting ->se_tpg_tfo ahead of
  400. * calling core_tpg_register().
  401. */
  402. if (se_wwn)
  403. se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
  404. if (!se_tpg->se_tpg_tfo) {
  405. pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
  406. return -EINVAL;
  407. }
  408. INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
  409. se_tpg->proto_id = proto_id;
  410. se_tpg->se_tpg_wwn = se_wwn;
  411. atomic_set(&se_tpg->tpg_pr_ref_count, 0);
  412. INIT_LIST_HEAD(&se_tpg->acl_node_list);
  413. INIT_LIST_HEAD(&se_tpg->se_tpg_node);
  414. INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
  415. spin_lock_init(&se_tpg->session_lock);
  416. mutex_init(&se_tpg->tpg_lun_mutex);
  417. mutex_init(&se_tpg->acl_node_mutex);
  418. if (se_tpg->proto_id >= 0) {
  419. se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
  420. if (IS_ERR(se_tpg->tpg_virt_lun0))
  421. return PTR_ERR(se_tpg->tpg_virt_lun0);
  422. ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
  423. true, g_lun0_dev);
  424. if (ret < 0) {
  425. kfree(se_tpg->tpg_virt_lun0);
  426. return ret;
  427. }
  428. }
  429. spin_lock_bh(&tpg_lock);
  430. list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
  431. spin_unlock_bh(&tpg_lock);
  432. pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
  433. "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
  434. se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
  435. se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
  436. se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
  437. return 0;
  438. }
  439. EXPORT_SYMBOL(core_tpg_register);
  440. int core_tpg_deregister(struct se_portal_group *se_tpg)
  441. {
  442. const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
  443. struct se_node_acl *nacl, *nacl_tmp;
  444. LIST_HEAD(node_list);
  445. pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
  446. "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
  447. tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
  448. se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
  449. spin_lock_bh(&tpg_lock);
  450. list_del(&se_tpg->se_tpg_node);
  451. spin_unlock_bh(&tpg_lock);
  452. while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
  453. cpu_relax();
  454. mutex_lock(&se_tpg->acl_node_mutex);
  455. list_splice_init(&se_tpg->acl_node_list, &node_list);
  456. mutex_unlock(&se_tpg->acl_node_mutex);
  457. /*
  458. * Release any remaining demo-mode generated se_node_acl that have
  459. * not been released because of TFO->tpg_check_demo_mode_cache() == 1
  460. * in transport_deregister_session().
  461. */
  462. list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
  463. list_del_init(&nacl->acl_list);
  464. core_tpg_wait_for_nacl_pr_ref(nacl);
  465. core_free_device_list_for_node(nacl, se_tpg);
  466. kfree(nacl);
  467. }
  468. if (se_tpg->proto_id >= 0) {
  469. core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
  470. kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
  471. }
  472. return 0;
  473. }
  474. EXPORT_SYMBOL(core_tpg_deregister);
  475. struct se_lun *core_tpg_alloc_lun(
  476. struct se_portal_group *tpg,
  477. u64 unpacked_lun)
  478. {
  479. struct se_lun *lun;
  480. lun = kzalloc(sizeof(*lun), GFP_KERNEL);
  481. if (!lun) {
  482. pr_err("Unable to allocate se_lun memory\n");
  483. return ERR_PTR(-ENOMEM);
  484. }
  485. lun->unpacked_lun = unpacked_lun;
  486. lun->lun_link_magic = SE_LUN_LINK_MAGIC;
  487. atomic_set(&lun->lun_acl_count, 0);
  488. init_completion(&lun->lun_ref_comp);
  489. init_completion(&lun->lun_shutdown_comp);
  490. INIT_LIST_HEAD(&lun->lun_deve_list);
  491. INIT_LIST_HEAD(&lun->lun_dev_link);
  492. atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
  493. spin_lock_init(&lun->lun_deve_lock);
  494. mutex_init(&lun->lun_tg_pt_md_mutex);
  495. INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
  496. spin_lock_init(&lun->lun_tg_pt_gp_lock);
  497. lun->lun_tpg = tpg;
  498. return lun;
  499. }
  500. int core_tpg_add_lun(
  501. struct se_portal_group *tpg,
  502. struct se_lun *lun,
  503. bool lun_access_ro,
  504. struct se_device *dev)
  505. {
  506. int ret;
  507. ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
  508. GFP_KERNEL);
  509. if (ret < 0)
  510. goto out;
  511. ret = core_alloc_rtpi(lun, dev);
  512. if (ret)
  513. goto out_kill_ref;
  514. if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
  515. !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
  516. target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
  517. mutex_lock(&tpg->tpg_lun_mutex);
  518. spin_lock(&dev->se_port_lock);
  519. lun->lun_index = dev->dev_index;
  520. rcu_assign_pointer(lun->lun_se_dev, dev);
  521. dev->export_count++;
  522. list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
  523. spin_unlock(&dev->se_port_lock);
  524. if (dev->dev_flags & DF_READ_ONLY)
  525. lun->lun_access_ro = true;
  526. else
  527. lun->lun_access_ro = lun_access_ro;
  528. if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
  529. hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
  530. mutex_unlock(&tpg->tpg_lun_mutex);
  531. return 0;
  532. out_kill_ref:
  533. percpu_ref_exit(&lun->lun_ref);
  534. out:
  535. return ret;
  536. }
  537. void core_tpg_remove_lun(
  538. struct se_portal_group *tpg,
  539. struct se_lun *lun)
  540. {
  541. /*
  542. * rcu_dereference_raw protected by se_lun->lun_group symlink
  543. * reference to se_device->dev_group.
  544. */
  545. struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
  546. lun->lun_shutdown = true;
  547. core_clear_lun_from_tpg(lun, tpg);
  548. /*
  549. * Wait for any active I/O references to percpu se_lun->lun_ref to
  550. * be released. Also, se_lun->lun_ref is now used by PR and ALUA
  551. * logic when referencing a remote target port during ALL_TGT_PT=1
  552. * and generating UNIT_ATTENTIONs for ALUA access state transition.
  553. */
  554. transport_clear_lun_ref(lun);
  555. mutex_lock(&tpg->tpg_lun_mutex);
  556. if (lun->lun_se_dev) {
  557. target_detach_tg_pt_gp(lun);
  558. spin_lock(&dev->se_port_lock);
  559. list_del(&lun->lun_dev_link);
  560. dev->export_count--;
  561. rcu_assign_pointer(lun->lun_se_dev, NULL);
  562. spin_unlock(&dev->se_port_lock);
  563. }
  564. if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
  565. hlist_del_rcu(&lun->link);
  566. lun->lun_shutdown = false;
  567. mutex_unlock(&tpg->tpg_lun_mutex);
  568. percpu_ref_exit(&lun->lun_ref);
  569. }