target_core_device.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209
  1. /*******************************************************************************
  2. * Filename: target_core_device.c (based on iscsi_target_device.c)
  3. *
  4. * This file contains the TCM Virtual Device and Disk Transport
  5. * agnostic related functions.
  6. *
  7. * (c) Copyright 2003-2013 Datera, Inc.
  8. *
  9. * Nicholas A. Bellinger <nab@kernel.org>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  24. *
  25. ******************************************************************************/
  26. #include <linux/net.h>
  27. #include <linux/string.h>
  28. #include <linux/delay.h>
  29. #include <linux/timer.h>
  30. #include <linux/slab.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/kthread.h>
  33. #include <linux/in.h>
  34. #include <linux/export.h>
  35. #include <linux/t10-pi.h>
  36. #include <asm/unaligned.h>
  37. #include <net/sock.h>
  38. #include <net/tcp.h>
  39. #include <scsi/scsi_common.h>
  40. #include <scsi/scsi_proto.h>
  41. #include <target/target_core_base.h>
  42. #include <target/target_core_backend.h>
  43. #include <target/target_core_fabric.h>
  44. #include "target_core_internal.h"
  45. #include "target_core_alua.h"
  46. #include "target_core_pr.h"
  47. #include "target_core_ua.h"
  48. static DEFINE_MUTEX(device_mutex);
  49. static LIST_HEAD(device_list);
  50. static DEFINE_IDR(devices_idr);
  51. static struct se_hba *lun0_hba;
  52. /* not static, needed by tpg.c */
  53. struct se_device *g_lun0_dev;
  54. sense_reason_t
  55. transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
  56. {
  57. struct se_lun *se_lun = NULL;
  58. struct se_session *se_sess = se_cmd->se_sess;
  59. struct se_node_acl *nacl = se_sess->se_node_acl;
  60. struct se_dev_entry *deve;
  61. sense_reason_t ret = TCM_NO_SENSE;
  62. rcu_read_lock();
  63. deve = target_nacl_find_deve(nacl, unpacked_lun);
  64. if (deve) {
  65. atomic_long_inc(&deve->total_cmds);
  66. if (se_cmd->data_direction == DMA_TO_DEVICE)
  67. atomic_long_add(se_cmd->data_length,
  68. &deve->write_bytes);
  69. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  70. atomic_long_add(se_cmd->data_length,
  71. &deve->read_bytes);
  72. se_lun = rcu_dereference(deve->se_lun);
  73. if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
  74. se_lun = NULL;
  75. goto out_unlock;
  76. }
  77. se_cmd->se_lun = se_lun;
  78. se_cmd->pr_res_key = deve->pr_res_key;
  79. se_cmd->orig_fe_lun = unpacked_lun;
  80. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  81. se_cmd->lun_ref_active = true;
  82. if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  83. deve->lun_access_ro) {
  84. pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  85. " Access for 0x%08llx\n",
  86. se_cmd->se_tfo->get_fabric_name(),
  87. unpacked_lun);
  88. rcu_read_unlock();
  89. ret = TCM_WRITE_PROTECTED;
  90. goto ref_dev;
  91. }
  92. }
  93. out_unlock:
  94. rcu_read_unlock();
  95. if (!se_lun) {
  96. /*
  97. * Use the se_portal_group->tpg_virt_lun0 to allow for
  98. * REPORT_LUNS, et al to be returned when no active
  99. * MappedLUN=0 exists for this Initiator Port.
  100. */
  101. if (unpacked_lun != 0) {
  102. pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  103. " Access for 0x%08llx\n",
  104. se_cmd->se_tfo->get_fabric_name(),
  105. unpacked_lun);
  106. return TCM_NON_EXISTENT_LUN;
  107. }
  108. se_lun = se_sess->se_tpg->tpg_virt_lun0;
  109. se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
  110. se_cmd->orig_fe_lun = 0;
  111. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  112. percpu_ref_get(&se_lun->lun_ref);
  113. se_cmd->lun_ref_active = true;
  114. /*
  115. * Force WRITE PROTECT for virtual LUN 0
  116. */
  117. if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
  118. (se_cmd->data_direction != DMA_NONE)) {
  119. ret = TCM_WRITE_PROTECTED;
  120. goto ref_dev;
  121. }
  122. }
  123. /*
  124. * RCU reference protected by percpu se_lun->lun_ref taken above that
  125. * must drop to zero (including initial reference) before this se_lun
  126. * pointer can be kfree_rcu() by the final se_lun->lun_group put via
  127. * target_core_fabric_configfs.c:target_fabric_port_release
  128. */
  129. ref_dev:
  130. se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
  131. atomic_long_inc(&se_cmd->se_dev->num_cmds);
  132. if (se_cmd->data_direction == DMA_TO_DEVICE)
  133. atomic_long_add(se_cmd->data_length,
  134. &se_cmd->se_dev->write_bytes);
  135. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  136. atomic_long_add(se_cmd->data_length,
  137. &se_cmd->se_dev->read_bytes);
  138. return ret;
  139. }
  140. EXPORT_SYMBOL(transport_lookup_cmd_lun);
  141. int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
  142. {
  143. struct se_dev_entry *deve;
  144. struct se_lun *se_lun = NULL;
  145. struct se_session *se_sess = se_cmd->se_sess;
  146. struct se_node_acl *nacl = se_sess->se_node_acl;
  147. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  148. unsigned long flags;
  149. rcu_read_lock();
  150. deve = target_nacl_find_deve(nacl, unpacked_lun);
  151. if (deve) {
  152. se_lun = rcu_dereference(deve->se_lun);
  153. if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
  154. se_lun = NULL;
  155. goto out_unlock;
  156. }
  157. se_cmd->se_lun = se_lun;
  158. se_cmd->pr_res_key = deve->pr_res_key;
  159. se_cmd->orig_fe_lun = unpacked_lun;
  160. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  161. se_cmd->lun_ref_active = true;
  162. }
  163. out_unlock:
  164. rcu_read_unlock();
  165. if (!se_lun) {
  166. pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  167. " Access for 0x%08llx\n",
  168. se_cmd->se_tfo->get_fabric_name(),
  169. unpacked_lun);
  170. return -ENODEV;
  171. }
  172. se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
  173. se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
  174. spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
  175. list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
  176. spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
  177. return 0;
  178. }
  179. EXPORT_SYMBOL(transport_lookup_tmr_lun);
  180. bool target_lun_is_rdonly(struct se_cmd *cmd)
  181. {
  182. struct se_session *se_sess = cmd->se_sess;
  183. struct se_dev_entry *deve;
  184. bool ret;
  185. rcu_read_lock();
  186. deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
  187. ret = deve && deve->lun_access_ro;
  188. rcu_read_unlock();
  189. return ret;
  190. }
  191. EXPORT_SYMBOL(target_lun_is_rdonly);
  192. /*
  193. * This function is called from core_scsi3_emulate_pro_register_and_move()
  194. * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
  195. * when a matching rtpi is found.
  196. */
  197. struct se_dev_entry *core_get_se_deve_from_rtpi(
  198. struct se_node_acl *nacl,
  199. u16 rtpi)
  200. {
  201. struct se_dev_entry *deve;
  202. struct se_lun *lun;
  203. struct se_portal_group *tpg = nacl->se_tpg;
  204. rcu_read_lock();
  205. hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
  206. lun = rcu_dereference(deve->se_lun);
  207. if (!lun) {
  208. pr_err("%s device entries device pointer is"
  209. " NULL, but Initiator has access.\n",
  210. tpg->se_tpg_tfo->get_fabric_name());
  211. continue;
  212. }
  213. if (lun->lun_rtpi != rtpi)
  214. continue;
  215. kref_get(&deve->pr_kref);
  216. rcu_read_unlock();
  217. return deve;
  218. }
  219. rcu_read_unlock();
  220. return NULL;
  221. }
  222. void core_free_device_list_for_node(
  223. struct se_node_acl *nacl,
  224. struct se_portal_group *tpg)
  225. {
  226. struct se_dev_entry *deve;
  227. mutex_lock(&nacl->lun_entry_mutex);
  228. hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
  229. struct se_lun *lun = rcu_dereference_check(deve->se_lun,
  230. lockdep_is_held(&nacl->lun_entry_mutex));
  231. core_disable_device_list_for_node(lun, deve, nacl, tpg);
  232. }
  233. mutex_unlock(&nacl->lun_entry_mutex);
  234. }
  235. void core_update_device_list_access(
  236. u64 mapped_lun,
  237. bool lun_access_ro,
  238. struct se_node_acl *nacl)
  239. {
  240. struct se_dev_entry *deve;
  241. mutex_lock(&nacl->lun_entry_mutex);
  242. deve = target_nacl_find_deve(nacl, mapped_lun);
  243. if (deve)
  244. deve->lun_access_ro = lun_access_ro;
  245. mutex_unlock(&nacl->lun_entry_mutex);
  246. }
  247. /*
  248. * Called with rcu_read_lock or nacl->device_list_lock held.
  249. */
  250. struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
  251. {
  252. struct se_dev_entry *deve;
  253. hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
  254. if (deve->mapped_lun == mapped_lun)
  255. return deve;
  256. return NULL;
  257. }
  258. EXPORT_SYMBOL(target_nacl_find_deve);
  259. void target_pr_kref_release(struct kref *kref)
  260. {
  261. struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
  262. pr_kref);
  263. complete(&deve->pr_comp);
  264. }
  265. static void
  266. target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
  267. bool skip_new)
  268. {
  269. struct se_dev_entry *tmp;
  270. rcu_read_lock();
  271. hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
  272. if (skip_new && tmp == new)
  273. continue;
  274. core_scsi3_ua_allocate(tmp, 0x3F,
  275. ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
  276. }
  277. rcu_read_unlock();
  278. }
  279. int core_enable_device_list_for_node(
  280. struct se_lun *lun,
  281. struct se_lun_acl *lun_acl,
  282. u64 mapped_lun,
  283. bool lun_access_ro,
  284. struct se_node_acl *nacl,
  285. struct se_portal_group *tpg)
  286. {
  287. struct se_dev_entry *orig, *new;
  288. new = kzalloc(sizeof(*new), GFP_KERNEL);
  289. if (!new) {
  290. pr_err("Unable to allocate se_dev_entry memory\n");
  291. return -ENOMEM;
  292. }
  293. spin_lock_init(&new->ua_lock);
  294. INIT_LIST_HEAD(&new->ua_list);
  295. INIT_LIST_HEAD(&new->lun_link);
  296. new->mapped_lun = mapped_lun;
  297. kref_init(&new->pr_kref);
  298. init_completion(&new->pr_comp);
  299. new->lun_access_ro = lun_access_ro;
  300. new->creation_time = get_jiffies_64();
  301. new->attach_count++;
  302. mutex_lock(&nacl->lun_entry_mutex);
  303. orig = target_nacl_find_deve(nacl, mapped_lun);
  304. if (orig && orig->se_lun) {
  305. struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
  306. lockdep_is_held(&nacl->lun_entry_mutex));
  307. if (orig_lun != lun) {
  308. pr_err("Existing orig->se_lun doesn't match new lun"
  309. " for dynamic -> explicit NodeACL conversion:"
  310. " %s\n", nacl->initiatorname);
  311. mutex_unlock(&nacl->lun_entry_mutex);
  312. kfree(new);
  313. return -EINVAL;
  314. }
  315. if (orig->se_lun_acl != NULL) {
  316. pr_warn_ratelimited("Detected existing explicit"
  317. " se_lun_acl->se_lun_group reference for %s"
  318. " mapped_lun: %llu, failing\n",
  319. nacl->initiatorname, mapped_lun);
  320. mutex_unlock(&nacl->lun_entry_mutex);
  321. kfree(new);
  322. return -EINVAL;
  323. }
  324. rcu_assign_pointer(new->se_lun, lun);
  325. rcu_assign_pointer(new->se_lun_acl, lun_acl);
  326. hlist_del_rcu(&orig->link);
  327. hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
  328. mutex_unlock(&nacl->lun_entry_mutex);
  329. spin_lock(&lun->lun_deve_lock);
  330. list_del(&orig->lun_link);
  331. list_add_tail(&new->lun_link, &lun->lun_deve_list);
  332. spin_unlock(&lun->lun_deve_lock);
  333. kref_put(&orig->pr_kref, target_pr_kref_release);
  334. wait_for_completion(&orig->pr_comp);
  335. target_luns_data_has_changed(nacl, new, true);
  336. kfree_rcu(orig, rcu_head);
  337. return 0;
  338. }
  339. rcu_assign_pointer(new->se_lun, lun);
  340. rcu_assign_pointer(new->se_lun_acl, lun_acl);
  341. hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
  342. mutex_unlock(&nacl->lun_entry_mutex);
  343. spin_lock(&lun->lun_deve_lock);
  344. list_add_tail(&new->lun_link, &lun->lun_deve_list);
  345. spin_unlock(&lun->lun_deve_lock);
  346. target_luns_data_has_changed(nacl, new, true);
  347. return 0;
  348. }
  349. /*
  350. * Called with se_node_acl->lun_entry_mutex held.
  351. */
  352. void core_disable_device_list_for_node(
  353. struct se_lun *lun,
  354. struct se_dev_entry *orig,
  355. struct se_node_acl *nacl,
  356. struct se_portal_group *tpg)
  357. {
  358. /*
  359. * rcu_dereference_raw protected by se_lun->lun_group symlink
  360. * reference to se_device->dev_group.
  361. */
  362. struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
  363. /*
  364. * If the MappedLUN entry is being disabled, the entry in
  365. * lun->lun_deve_list must be removed now before clearing the
  366. * struct se_dev_entry pointers below as logic in
  367. * core_alua_do_transition_tg_pt() depends on these being present.
  368. *
  369. * deve->se_lun_acl will be NULL for demo-mode created LUNs
  370. * that have not been explicitly converted to MappedLUNs ->
  371. * struct se_lun_acl, but we remove deve->lun_link from
  372. * lun->lun_deve_list. This also means that active UAs and
  373. * NodeACL context specific PR metadata for demo-mode
  374. * MappedLUN *deve will be released below..
  375. */
  376. spin_lock(&lun->lun_deve_lock);
  377. list_del(&orig->lun_link);
  378. spin_unlock(&lun->lun_deve_lock);
  379. /*
  380. * Disable struct se_dev_entry LUN ACL mapping
  381. */
  382. core_scsi3_ua_release_all(orig);
  383. hlist_del_rcu(&orig->link);
  384. clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
  385. orig->lun_access_ro = false;
  386. orig->creation_time = 0;
  387. orig->attach_count--;
  388. /*
  389. * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
  390. * or REGISTER_AND_MOVE PR operation to complete.
  391. */
  392. kref_put(&orig->pr_kref, target_pr_kref_release);
  393. wait_for_completion(&orig->pr_comp);
  394. rcu_assign_pointer(orig->se_lun, NULL);
  395. rcu_assign_pointer(orig->se_lun_acl, NULL);
  396. kfree_rcu(orig, rcu_head);
  397. core_scsi3_free_pr_reg_from_nacl(dev, nacl);
  398. target_luns_data_has_changed(nacl, NULL, false);
  399. }
  400. /* core_clear_lun_from_tpg():
  401. *
  402. *
  403. */
  404. void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
  405. {
  406. struct se_node_acl *nacl;
  407. struct se_dev_entry *deve;
  408. mutex_lock(&tpg->acl_node_mutex);
  409. list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
  410. mutex_lock(&nacl->lun_entry_mutex);
  411. hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
  412. struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
  413. lockdep_is_held(&nacl->lun_entry_mutex));
  414. if (lun != tmp_lun)
  415. continue;
  416. core_disable_device_list_for_node(lun, deve, nacl, tpg);
  417. }
  418. mutex_unlock(&nacl->lun_entry_mutex);
  419. }
  420. mutex_unlock(&tpg->acl_node_mutex);
  421. }
  422. int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
  423. {
  424. struct se_lun *tmp;
  425. spin_lock(&dev->se_port_lock);
  426. if (dev->export_count == 0x0000ffff) {
  427. pr_warn("Reached dev->dev_port_count =="
  428. " 0x0000ffff\n");
  429. spin_unlock(&dev->se_port_lock);
  430. return -ENOSPC;
  431. }
  432. again:
  433. /*
  434. * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
  435. * Here is the table from spc4r17 section 7.7.3.8.
  436. *
  437. * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
  438. *
  439. * Code Description
  440. * 0h Reserved
  441. * 1h Relative port 1, historically known as port A
  442. * 2h Relative port 2, historically known as port B
  443. * 3h to FFFFh Relative port 3 through 65 535
  444. */
  445. lun->lun_rtpi = dev->dev_rpti_counter++;
  446. if (!lun->lun_rtpi)
  447. goto again;
  448. list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
  449. /*
  450. * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
  451. * for 16-bit wrap..
  452. */
  453. if (lun->lun_rtpi == tmp->lun_rtpi)
  454. goto again;
  455. }
  456. spin_unlock(&dev->se_port_lock);
  457. return 0;
  458. }
  459. static void se_release_vpd_for_dev(struct se_device *dev)
  460. {
  461. struct t10_vpd *vpd, *vpd_tmp;
  462. spin_lock(&dev->t10_wwn.t10_vpd_lock);
  463. list_for_each_entry_safe(vpd, vpd_tmp,
  464. &dev->t10_wwn.t10_vpd_list, vpd_list) {
  465. list_del(&vpd->vpd_list);
  466. kfree(vpd);
  467. }
  468. spin_unlock(&dev->t10_wwn.t10_vpd_lock);
  469. }
  470. static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
  471. {
  472. u32 aligned_max_sectors;
  473. u32 alignment;
  474. /*
  475. * Limit max_sectors to a PAGE_SIZE aligned value for modern
  476. * transport_allocate_data_tasks() operation.
  477. */
  478. alignment = max(1ul, PAGE_SIZE / block_size);
  479. aligned_max_sectors = rounddown(max_sectors, alignment);
  480. if (max_sectors != aligned_max_sectors)
  481. pr_info("Rounding down aligned max_sectors from %u to %u\n",
  482. max_sectors, aligned_max_sectors);
  483. return aligned_max_sectors;
  484. }
  485. int core_dev_add_lun(
  486. struct se_portal_group *tpg,
  487. struct se_device *dev,
  488. struct se_lun *lun)
  489. {
  490. int rc;
  491. rc = core_tpg_add_lun(tpg, lun, false, dev);
  492. if (rc < 0)
  493. return rc;
  494. pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
  495. " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  496. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  497. tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
  498. /*
  499. * Update LUN maps for dynamically added initiators when
  500. * generate_node_acl is enabled.
  501. */
  502. if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
  503. struct se_node_acl *acl;
  504. mutex_lock(&tpg->acl_node_mutex);
  505. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  506. if (acl->dynamic_node_acl &&
  507. (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
  508. !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
  509. core_tpg_add_node_to_devs(acl, tpg, lun);
  510. }
  511. }
  512. mutex_unlock(&tpg->acl_node_mutex);
  513. }
  514. return 0;
  515. }
  516. /* core_dev_del_lun():
  517. *
  518. *
  519. */
  520. void core_dev_del_lun(
  521. struct se_portal_group *tpg,
  522. struct se_lun *lun)
  523. {
  524. pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
  525. " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
  526. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  527. tpg->se_tpg_tfo->get_fabric_name());
  528. core_tpg_remove_lun(tpg, lun);
  529. }
  530. struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
  531. struct se_portal_group *tpg,
  532. struct se_node_acl *nacl,
  533. u64 mapped_lun,
  534. int *ret)
  535. {
  536. struct se_lun_acl *lacl;
  537. if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
  538. pr_err("%s InitiatorName exceeds maximum size.\n",
  539. tpg->se_tpg_tfo->get_fabric_name());
  540. *ret = -EOVERFLOW;
  541. return NULL;
  542. }
  543. lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
  544. if (!lacl) {
  545. pr_err("Unable to allocate memory for struct se_lun_acl.\n");
  546. *ret = -ENOMEM;
  547. return NULL;
  548. }
  549. lacl->mapped_lun = mapped_lun;
  550. lacl->se_lun_nacl = nacl;
  551. return lacl;
  552. }
  553. int core_dev_add_initiator_node_lun_acl(
  554. struct se_portal_group *tpg,
  555. struct se_lun_acl *lacl,
  556. struct se_lun *lun,
  557. bool lun_access_ro)
  558. {
  559. struct se_node_acl *nacl = lacl->se_lun_nacl;
  560. /*
  561. * rcu_dereference_raw protected by se_lun->lun_group symlink
  562. * reference to se_device->dev_group.
  563. */
  564. struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
  565. if (!nacl)
  566. return -EINVAL;
  567. if (lun->lun_access_ro)
  568. lun_access_ro = true;
  569. lacl->se_lun = lun;
  570. if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
  571. lun_access_ro, nacl, tpg) < 0)
  572. return -EINVAL;
  573. pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
  574. " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  575. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
  576. lun_access_ro ? "RO" : "RW",
  577. nacl->initiatorname);
  578. /*
  579. * Check to see if there are any existing persistent reservation APTPL
  580. * pre-registrations that need to be enabled for this LUN ACL..
  581. */
  582. core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
  583. lacl->mapped_lun);
  584. return 0;
  585. }
  586. int core_dev_del_initiator_node_lun_acl(
  587. struct se_lun *lun,
  588. struct se_lun_acl *lacl)
  589. {
  590. struct se_portal_group *tpg = lun->lun_tpg;
  591. struct se_node_acl *nacl;
  592. struct se_dev_entry *deve;
  593. nacl = lacl->se_lun_nacl;
  594. if (!nacl)
  595. return -EINVAL;
  596. mutex_lock(&nacl->lun_entry_mutex);
  597. deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
  598. if (deve)
  599. core_disable_device_list_for_node(lun, deve, nacl, tpg);
  600. mutex_unlock(&nacl->lun_entry_mutex);
  601. pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
  602. " InitiatorNode: %s Mapped LUN: %llu\n",
  603. tpg->se_tpg_tfo->get_fabric_name(),
  604. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  605. nacl->initiatorname, lacl->mapped_lun);
  606. return 0;
  607. }
  608. void core_dev_free_initiator_node_lun_acl(
  609. struct se_portal_group *tpg,
  610. struct se_lun_acl *lacl)
  611. {
  612. pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
  613. " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(),
  614. tpg->se_tpg_tfo->tpg_get_tag(tpg),
  615. tpg->se_tpg_tfo->get_fabric_name(),
  616. lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
  617. kfree(lacl);
  618. }
  619. static void scsi_dump_inquiry(struct se_device *dev)
  620. {
  621. struct t10_wwn *wwn = &dev->t10_wwn;
  622. char buf[17];
  623. int i, device_type;
  624. /*
  625. * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
  626. */
  627. for (i = 0; i < 8; i++)
  628. if (wwn->vendor[i] >= 0x20)
  629. buf[i] = wwn->vendor[i];
  630. else
  631. buf[i] = ' ';
  632. buf[i] = '\0';
  633. pr_debug(" Vendor: %s\n", buf);
  634. for (i = 0; i < 16; i++)
  635. if (wwn->model[i] >= 0x20)
  636. buf[i] = wwn->model[i];
  637. else
  638. buf[i] = ' ';
  639. buf[i] = '\0';
  640. pr_debug(" Model: %s\n", buf);
  641. for (i = 0; i < 4; i++)
  642. if (wwn->revision[i] >= 0x20)
  643. buf[i] = wwn->revision[i];
  644. else
  645. buf[i] = ' ';
  646. buf[i] = '\0';
  647. pr_debug(" Revision: %s\n", buf);
  648. device_type = dev->transport->get_device_type(dev);
  649. pr_debug(" Type: %s ", scsi_device_type(device_type));
  650. }
  651. struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
  652. {
  653. struct se_device *dev;
  654. struct se_lun *xcopy_lun;
  655. dev = hba->backend->ops->alloc_device(hba, name);
  656. if (!dev)
  657. return NULL;
  658. dev->se_hba = hba;
  659. dev->transport = hba->backend->ops;
  660. dev->prot_length = sizeof(struct t10_pi_tuple);
  661. dev->hba_index = hba->hba_index;
  662. INIT_LIST_HEAD(&dev->dev_sep_list);
  663. INIT_LIST_HEAD(&dev->dev_tmr_list);
  664. INIT_LIST_HEAD(&dev->delayed_cmd_list);
  665. INIT_LIST_HEAD(&dev->state_list);
  666. INIT_LIST_HEAD(&dev->qf_cmd_list);
  667. spin_lock_init(&dev->execute_task_lock);
  668. spin_lock_init(&dev->delayed_cmd_lock);
  669. spin_lock_init(&dev->dev_reservation_lock);
  670. spin_lock_init(&dev->se_port_lock);
  671. spin_lock_init(&dev->se_tmr_lock);
  672. spin_lock_init(&dev->qf_cmd_lock);
  673. sema_init(&dev->caw_sem, 1);
  674. INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
  675. spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
  676. INIT_LIST_HEAD(&dev->t10_pr.registration_list);
  677. INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
  678. spin_lock_init(&dev->t10_pr.registration_lock);
  679. spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
  680. INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
  681. spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
  682. INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
  683. spin_lock_init(&dev->t10_alua.lba_map_lock);
  684. dev->t10_wwn.t10_dev = dev;
  685. dev->t10_alua.t10_dev = dev;
  686. dev->dev_attrib.da_dev = dev;
  687. dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
  688. dev->dev_attrib.emulate_dpo = 1;
  689. dev->dev_attrib.emulate_fua_write = 1;
  690. dev->dev_attrib.emulate_fua_read = 1;
  691. dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
  692. dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
  693. dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
  694. dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
  695. dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
  696. dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
  697. dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
  698. dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
  699. dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
  700. dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
  701. dev->dev_attrib.is_nonrot = DA_IS_NONROT;
  702. dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
  703. dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
  704. dev->dev_attrib.max_unmap_block_desc_count =
  705. DA_MAX_UNMAP_BLOCK_DESC_COUNT;
  706. dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
  707. dev->dev_attrib.unmap_granularity_alignment =
  708. DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
  709. dev->dev_attrib.unmap_zeroes_data =
  710. DA_UNMAP_ZEROES_DATA_DEFAULT;
  711. dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
  712. xcopy_lun = &dev->xcopy_lun;
  713. rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
  714. init_completion(&xcopy_lun->lun_ref_comp);
  715. init_completion(&xcopy_lun->lun_shutdown_comp);
  716. INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
  717. INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
  718. mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
  719. xcopy_lun->lun_tpg = &xcopy_pt_tpg;
  720. return dev;
  721. }
  722. /*
  723. * Check if the underlying struct block_device request_queue supports
  724. * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
  725. * in ATA and we need to set TPE=1
  726. */
  727. bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
  728. struct request_queue *q)
  729. {
  730. int block_size = queue_logical_block_size(q);
  731. if (!blk_queue_discard(q))
  732. return false;
  733. attrib->max_unmap_lba_count =
  734. q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
  735. /*
  736. * Currently hardcoded to 1 in Linux/SCSI code..
  737. */
  738. attrib->max_unmap_block_desc_count = 1;
  739. attrib->unmap_granularity = q->limits.discard_granularity / block_size;
  740. attrib->unmap_granularity_alignment = q->limits.discard_alignment /
  741. block_size;
  742. attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors);
  743. return true;
  744. }
  745. EXPORT_SYMBOL(target_configure_unmap_from_queue);
  746. /*
  747. * Convert from blocksize advertised to the initiator to the 512 byte
  748. * units unconditionally used by the Linux block layer.
  749. */
  750. sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
  751. {
  752. switch (dev->dev_attrib.block_size) {
  753. case 4096:
  754. return lb << 3;
  755. case 2048:
  756. return lb << 2;
  757. case 1024:
  758. return lb << 1;
  759. default:
  760. return lb;
  761. }
  762. }
  763. EXPORT_SYMBOL(target_to_linux_sector);
  764. struct devices_idr_iter {
  765. struct config_item *prev_item;
  766. int (*fn)(struct se_device *dev, void *data);
  767. void *data;
  768. };
  769. static int target_devices_idr_iter(int id, void *p, void *data)
  770. __must_hold(&device_mutex)
  771. {
  772. struct devices_idr_iter *iter = data;
  773. struct se_device *dev = p;
  774. int ret;
  775. config_item_put(iter->prev_item);
  776. iter->prev_item = NULL;
  777. /*
  778. * We add the device early to the idr, so it can be used
  779. * by backend modules during configuration. We do not want
  780. * to allow other callers to access partially setup devices,
  781. * so we skip them here.
  782. */
  783. if (!target_dev_configured(dev))
  784. return 0;
  785. iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
  786. if (!iter->prev_item)
  787. return 0;
  788. mutex_unlock(&device_mutex);
  789. ret = iter->fn(dev, iter->data);
  790. mutex_lock(&device_mutex);
  791. return ret;
  792. }
  793. /**
  794. * target_for_each_device - iterate over configured devices
  795. * @fn: iterator function
  796. * @data: pointer to data that will be passed to fn
  797. *
  798. * fn must return 0 to continue looping over devices. non-zero will break
  799. * from the loop and return that value to the caller.
  800. */
  801. int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
  802. void *data)
  803. {
  804. struct devices_idr_iter iter = { .fn = fn, .data = data };
  805. int ret;
  806. mutex_lock(&device_mutex);
  807. ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
  808. mutex_unlock(&device_mutex);
  809. config_item_put(iter.prev_item);
  810. return ret;
  811. }
  812. int target_configure_device(struct se_device *dev)
  813. {
  814. struct se_hba *hba = dev->se_hba;
  815. int ret, id;
  816. if (target_dev_configured(dev)) {
  817. pr_err("se_dev->se_dev_ptr already set for storage"
  818. " object\n");
  819. return -EEXIST;
  820. }
  821. /*
  822. * Add early so modules like tcmu can use during its
  823. * configuration.
  824. */
  825. mutex_lock(&device_mutex);
  826. /*
  827. * Use cyclic to try and avoid collisions with devices
  828. * that were recently removed.
  829. */
  830. id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
  831. mutex_unlock(&device_mutex);
  832. if (id < 0) {
  833. ret = -ENOMEM;
  834. goto out;
  835. }
  836. dev->dev_index = id;
  837. ret = dev->transport->configure_device(dev);
  838. if (ret)
  839. goto out_free_index;
  840. /*
  841. * XXX: there is not much point to have two different values here..
  842. */
  843. dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
  844. dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
  845. /*
  846. * Align max_hw_sectors down to PAGE_SIZE I/O transfers
  847. */
  848. dev->dev_attrib.hw_max_sectors =
  849. se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
  850. dev->dev_attrib.hw_block_size);
  851. dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
  852. dev->creation_time = get_jiffies_64();
  853. ret = core_setup_alua(dev);
  854. if (ret)
  855. goto out_destroy_device;
  856. /*
  857. * Startup the struct se_device processing thread
  858. */
  859. dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
  860. dev->transport->name);
  861. if (!dev->tmr_wq) {
  862. pr_err("Unable to create tmr workqueue for %s\n",
  863. dev->transport->name);
  864. ret = -ENOMEM;
  865. goto out_free_alua;
  866. }
  867. /*
  868. * Setup work_queue for QUEUE_FULL
  869. */
  870. INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
  871. /*
  872. * Preload the initial INQUIRY const values if we are doing
  873. * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
  874. * passthrough because this is being provided by the backend LLD.
  875. */
  876. if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
  877. strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
  878. strncpy(&dev->t10_wwn.model[0],
  879. dev->transport->inquiry_prod, 16);
  880. strncpy(&dev->t10_wwn.revision[0],
  881. dev->transport->inquiry_rev, 4);
  882. }
  883. scsi_dump_inquiry(dev);
  884. spin_lock(&hba->device_lock);
  885. hba->dev_count++;
  886. spin_unlock(&hba->device_lock);
  887. dev->dev_flags |= DF_CONFIGURED;
  888. return 0;
  889. out_free_alua:
  890. core_alua_free_lu_gp_mem(dev);
  891. out_destroy_device:
  892. dev->transport->destroy_device(dev);
  893. out_free_index:
  894. mutex_lock(&device_mutex);
  895. idr_remove(&devices_idr, dev->dev_index);
  896. mutex_unlock(&device_mutex);
  897. out:
  898. se_release_vpd_for_dev(dev);
  899. return ret;
  900. }
  901. void target_free_device(struct se_device *dev)
  902. {
  903. struct se_hba *hba = dev->se_hba;
  904. WARN_ON(!list_empty(&dev->dev_sep_list));
  905. if (target_dev_configured(dev)) {
  906. destroy_workqueue(dev->tmr_wq);
  907. dev->transport->destroy_device(dev);
  908. mutex_lock(&device_mutex);
  909. idr_remove(&devices_idr, dev->dev_index);
  910. mutex_unlock(&device_mutex);
  911. spin_lock(&hba->device_lock);
  912. hba->dev_count--;
  913. spin_unlock(&hba->device_lock);
  914. }
  915. core_alua_free_lu_gp_mem(dev);
  916. core_alua_set_lba_map(dev, NULL, 0, 0);
  917. core_scsi3_free_all_registrations(dev);
  918. se_release_vpd_for_dev(dev);
  919. if (dev->transport->free_prot)
  920. dev->transport->free_prot(dev);
  921. dev->transport->free_device(dev);
  922. }
  923. int core_dev_setup_virtual_lun0(void)
  924. {
  925. struct se_hba *hba;
  926. struct se_device *dev;
  927. char buf[] = "rd_pages=8,rd_nullio=1";
  928. int ret;
  929. hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
  930. if (IS_ERR(hba))
  931. return PTR_ERR(hba);
  932. dev = target_alloc_device(hba, "virt_lun0");
  933. if (!dev) {
  934. ret = -ENOMEM;
  935. goto out_free_hba;
  936. }
  937. hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
  938. ret = target_configure_device(dev);
  939. if (ret)
  940. goto out_free_se_dev;
  941. lun0_hba = hba;
  942. g_lun0_dev = dev;
  943. return 0;
  944. out_free_se_dev:
  945. target_free_device(dev);
  946. out_free_hba:
  947. core_delete_hba(hba);
  948. return ret;
  949. }
  950. void core_dev_release_virtual_lun0(void)
  951. {
  952. struct se_hba *hba = lun0_hba;
  953. if (!hba)
  954. return;
  955. if (g_lun0_dev)
  956. target_free_device(g_lun0_dev);
  957. core_delete_hba(hba);
  958. }
  959. /*
  960. * Common CDB parsing for kernel and user passthrough.
  961. */
  962. sense_reason_t
  963. passthrough_parse_cdb(struct se_cmd *cmd,
  964. sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
  965. {
  966. unsigned char *cdb = cmd->t_task_cdb;
  967. struct se_device *dev = cmd->se_dev;
  968. unsigned int size;
  969. /*
  970. * For REPORT LUNS we always need to emulate the response, for everything
  971. * else, pass it up.
  972. */
  973. if (cdb[0] == REPORT_LUNS) {
  974. cmd->execute_cmd = spc_emulate_report_luns;
  975. return TCM_NO_SENSE;
  976. }
  977. /*
  978. * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
  979. * emulate the response, since tcmu does not have the information
  980. * required to process these commands.
  981. */
  982. if (!(dev->transport->transport_flags &
  983. TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
  984. if (cdb[0] == PERSISTENT_RESERVE_IN) {
  985. cmd->execute_cmd = target_scsi3_emulate_pr_in;
  986. size = get_unaligned_be16(&cdb[7]);
  987. return target_cmd_size_check(cmd, size);
  988. }
  989. if (cdb[0] == PERSISTENT_RESERVE_OUT) {
  990. cmd->execute_cmd = target_scsi3_emulate_pr_out;
  991. size = get_unaligned_be32(&cdb[5]);
  992. return target_cmd_size_check(cmd, size);
  993. }
  994. if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
  995. cmd->execute_cmd = target_scsi2_reservation_release;
  996. if (cdb[0] == RELEASE_10)
  997. size = get_unaligned_be16(&cdb[7]);
  998. else
  999. size = cmd->data_length;
  1000. return target_cmd_size_check(cmd, size);
  1001. }
  1002. if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
  1003. cmd->execute_cmd = target_scsi2_reservation_reserve;
  1004. if (cdb[0] == RESERVE_10)
  1005. size = get_unaligned_be16(&cdb[7]);
  1006. else
  1007. size = cmd->data_length;
  1008. return target_cmd_size_check(cmd, size);
  1009. }
  1010. }
  1011. /* Set DATA_CDB flag for ops that should have it */
  1012. switch (cdb[0]) {
  1013. case READ_6:
  1014. case READ_10:
  1015. case READ_12:
  1016. case READ_16:
  1017. case WRITE_6:
  1018. case WRITE_10:
  1019. case WRITE_12:
  1020. case WRITE_16:
  1021. case WRITE_VERIFY:
  1022. case WRITE_VERIFY_12:
  1023. case WRITE_VERIFY_16:
  1024. case COMPARE_AND_WRITE:
  1025. case XDWRITEREAD_10:
  1026. cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
  1027. break;
  1028. case VARIABLE_LENGTH_CMD:
  1029. switch (get_unaligned_be16(&cdb[8])) {
  1030. case READ_32:
  1031. case WRITE_32:
  1032. case WRITE_VERIFY_32:
  1033. case XDWRITEREAD_32:
  1034. cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
  1035. break;
  1036. }
  1037. }
  1038. cmd->execute_cmd = exec_cmd;
  1039. return TCM_NO_SENSE;
  1040. }
  1041. EXPORT_SYMBOL(passthrough_parse_cdb);