xen-scsifront.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165
  1. /*
  2. * Xen SCSI frontend driver
  3. *
  4. * Copyright (c) 2008, FUJITSU Limited
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version 2
  8. * as published by the Free Software Foundation; or, when distributed
  9. * separately from the Linux kernel or incorporated into other
  10. * software packages, subject to the following license:
  11. *
  12. * Permission is hereby granted, free of charge, to any person obtaining a copy
  13. * of this source file (the "Software"), to deal in the Software without
  14. * restriction, including without limitation the rights to use, copy, modify,
  15. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  16. * and to permit persons to whom the Software is furnished to do so, subject to
  17. * the following conditions:
  18. *
  19. * The above copyright notice and this permission notice shall be included in
  20. * all copies or substantial portions of the Software.
  21. *
  22. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  23. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  24. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  25. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  26. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  27. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  28. * IN THE SOFTWARE.
  29. */
  30. #include <linux/module.h>
  31. #include <linux/kernel.h>
  32. #include <linux/device.h>
  33. #include <linux/wait.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/mutex.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/sched.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/pfn.h>
  40. #include <linux/slab.h>
  41. #include <linux/bitops.h>
  42. #include <scsi/scsi_cmnd.h>
  43. #include <scsi/scsi_device.h>
  44. #include <scsi/scsi.h>
  45. #include <scsi/scsi_host.h>
  46. #include <xen/xen.h>
  47. #include <xen/xenbus.h>
  48. #include <xen/grant_table.h>
  49. #include <xen/events.h>
  50. #include <xen/page.h>
  51. #include <xen/interface/grant_table.h>
  52. #include <xen/interface/io/vscsiif.h>
  53. #include <xen/interface/io/protocols.h>
  54. #include <asm/xen/hypervisor.h>
  55. #define GRANT_INVALID_REF 0
  56. #define VSCSIFRONT_OP_ADD_LUN 1
  57. #define VSCSIFRONT_OP_DEL_LUN 2
  58. #define VSCSIFRONT_OP_READD_LUN 3
  59. /* Tuning point. */
  60. #define VSCSIIF_DEFAULT_CMD_PER_LUN 10
  61. #define VSCSIIF_MAX_TARGET 64
  62. #define VSCSIIF_MAX_LUN 255
  63. #define VSCSIIF_RING_SIZE __CONST_RING_SIZE(vscsiif, PAGE_SIZE)
  64. #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE
  65. #define vscsiif_grants_sg(_sg) (PFN_UP((_sg) * \
  66. sizeof(struct scsiif_request_segment)))
  67. struct vscsifrnt_shadow {
  68. /* command between backend and frontend */
  69. unsigned char act;
  70. uint8_t nr_segments;
  71. uint16_t rqid;
  72. uint16_t ref_rqid;
  73. unsigned int nr_grants; /* number of grants in gref[] */
  74. struct scsiif_request_segment *sg; /* scatter/gather elements */
  75. struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
  76. /* Do reset or abort function. */
  77. wait_queue_head_t wq_reset; /* reset work queue */
  78. int wait_reset; /* reset work queue condition */
  79. int32_t rslt_reset; /* reset response status: */
  80. /* SUCCESS or FAILED or: */
  81. #define RSLT_RESET_WAITING 0
  82. #define RSLT_RESET_ERR -1
  83. /* Requested struct scsi_cmnd is stored from kernel. */
  84. struct scsi_cmnd *sc;
  85. int gref[vscsiif_grants_sg(SG_ALL) + SG_ALL];
  86. };
  87. struct vscsifrnt_info {
  88. struct xenbus_device *dev;
  89. struct Scsi_Host *host;
  90. int host_active;
  91. unsigned int evtchn;
  92. unsigned int irq;
  93. grant_ref_t ring_ref;
  94. struct vscsiif_front_ring ring;
  95. struct vscsiif_response ring_rsp;
  96. spinlock_t shadow_lock;
  97. DECLARE_BITMAP(shadow_free_bitmap, VSCSIIF_MAX_REQS);
  98. struct vscsifrnt_shadow *shadow[VSCSIIF_MAX_REQS];
  99. /* Following items are protected by the host lock. */
  100. wait_queue_head_t wq_sync;
  101. wait_queue_head_t wq_pause;
  102. unsigned int wait_ring_available:1;
  103. unsigned int waiting_pause:1;
  104. unsigned int pause:1;
  105. unsigned callers;
  106. char dev_state_path[64];
  107. struct task_struct *curr;
  108. };
  109. static DEFINE_MUTEX(scsifront_mutex);
  110. static void scsifront_wake_up(struct vscsifrnt_info *info)
  111. {
  112. info->wait_ring_available = 0;
  113. wake_up(&info->wq_sync);
  114. }
  115. static int scsifront_get_rqid(struct vscsifrnt_info *info)
  116. {
  117. unsigned long flags;
  118. int free;
  119. spin_lock_irqsave(&info->shadow_lock, flags);
  120. free = find_first_bit(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
  121. __clear_bit(free, info->shadow_free_bitmap);
  122. spin_unlock_irqrestore(&info->shadow_lock, flags);
  123. return free;
  124. }
  125. static int _scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
  126. {
  127. int empty = bitmap_empty(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
  128. __set_bit(id, info->shadow_free_bitmap);
  129. info->shadow[id] = NULL;
  130. return empty || info->wait_ring_available;
  131. }
  132. static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
  133. {
  134. unsigned long flags;
  135. int kick;
  136. spin_lock_irqsave(&info->shadow_lock, flags);
  137. kick = _scsifront_put_rqid(info, id);
  138. spin_unlock_irqrestore(&info->shadow_lock, flags);
  139. if (kick)
  140. scsifront_wake_up(info);
  141. }
  142. static int scsifront_do_request(struct vscsifrnt_info *info,
  143. struct vscsifrnt_shadow *shadow)
  144. {
  145. struct vscsiif_front_ring *ring = &(info->ring);
  146. struct vscsiif_request *ring_req;
  147. struct scsi_cmnd *sc = shadow->sc;
  148. uint32_t id;
  149. int i, notify;
  150. if (RING_FULL(&info->ring))
  151. return -EBUSY;
  152. id = scsifront_get_rqid(info); /* use id in response */
  153. if (id >= VSCSIIF_MAX_REQS)
  154. return -EBUSY;
  155. info->shadow[id] = shadow;
  156. shadow->rqid = id;
  157. ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
  158. ring->req_prod_pvt++;
  159. ring_req->rqid = id;
  160. ring_req->act = shadow->act;
  161. ring_req->ref_rqid = shadow->ref_rqid;
  162. ring_req->nr_segments = shadow->nr_segments;
  163. ring_req->id = sc->device->id;
  164. ring_req->lun = sc->device->lun;
  165. ring_req->channel = sc->device->channel;
  166. ring_req->cmd_len = sc->cmd_len;
  167. BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
  168. memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
  169. ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
  170. ring_req->timeout_per_command = sc->request->timeout / HZ;
  171. for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
  172. ring_req->seg[i] = shadow->seg[i];
  173. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
  174. if (notify)
  175. notify_remote_via_irq(info->irq);
  176. return 0;
  177. }
  178. static void scsifront_gnttab_done(struct vscsifrnt_info *info,
  179. struct vscsifrnt_shadow *shadow)
  180. {
  181. int i;
  182. if (shadow->sc->sc_data_direction == DMA_NONE)
  183. return;
  184. for (i = 0; i < shadow->nr_grants; i++) {
  185. if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
  186. shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
  187. "grant still in use by backend\n");
  188. BUG();
  189. }
  190. gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
  191. }
  192. kfree(shadow->sg);
  193. }
  194. static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
  195. struct vscsiif_response *ring_rsp)
  196. {
  197. struct vscsifrnt_shadow *shadow;
  198. struct scsi_cmnd *sc;
  199. uint32_t id;
  200. uint8_t sense_len;
  201. id = ring_rsp->rqid;
  202. shadow = info->shadow[id];
  203. sc = shadow->sc;
  204. BUG_ON(sc == NULL);
  205. scsifront_gnttab_done(info, shadow);
  206. scsifront_put_rqid(info, id);
  207. sc->result = ring_rsp->rslt;
  208. scsi_set_resid(sc, ring_rsp->residual_len);
  209. sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE,
  210. ring_rsp->sense_len);
  211. if (sense_len)
  212. memcpy(sc->sense_buffer, ring_rsp->sense_buffer, sense_len);
  213. sc->scsi_done(sc);
  214. }
  215. static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
  216. struct vscsiif_response *ring_rsp)
  217. {
  218. uint16_t id = ring_rsp->rqid;
  219. unsigned long flags;
  220. struct vscsifrnt_shadow *shadow = info->shadow[id];
  221. int kick;
  222. spin_lock_irqsave(&info->shadow_lock, flags);
  223. shadow->wait_reset = 1;
  224. switch (shadow->rslt_reset) {
  225. case RSLT_RESET_WAITING:
  226. shadow->rslt_reset = ring_rsp->rslt;
  227. break;
  228. case RSLT_RESET_ERR:
  229. kick = _scsifront_put_rqid(info, id);
  230. spin_unlock_irqrestore(&info->shadow_lock, flags);
  231. kfree(shadow);
  232. if (kick)
  233. scsifront_wake_up(info);
  234. return;
  235. default:
  236. shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
  237. "bad reset state %d, possibly leaking %u\n",
  238. shadow->rslt_reset, id);
  239. break;
  240. }
  241. spin_unlock_irqrestore(&info->shadow_lock, flags);
  242. wake_up(&shadow->wq_reset);
  243. }
  244. static void scsifront_do_response(struct vscsifrnt_info *info,
  245. struct vscsiif_response *ring_rsp)
  246. {
  247. if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
  248. test_bit(ring_rsp->rqid, info->shadow_free_bitmap),
  249. "illegal rqid %u returned by backend!\n", ring_rsp->rqid))
  250. return;
  251. if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB)
  252. scsifront_cdb_cmd_done(info, ring_rsp);
  253. else
  254. scsifront_sync_cmd_done(info, ring_rsp);
  255. }
  256. static int scsifront_ring_drain(struct vscsifrnt_info *info)
  257. {
  258. struct vscsiif_response *ring_rsp;
  259. RING_IDX i, rp;
  260. int more_to_do = 0;
  261. rp = info->ring.sring->rsp_prod;
  262. rmb(); /* ordering required respective to dom0 */
  263. for (i = info->ring.rsp_cons; i != rp; i++) {
  264. ring_rsp = RING_GET_RESPONSE(&info->ring, i);
  265. scsifront_do_response(info, ring_rsp);
  266. }
  267. info->ring.rsp_cons = i;
  268. if (i != info->ring.req_prod_pvt)
  269. RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
  270. else
  271. info->ring.sring->rsp_event = i + 1;
  272. return more_to_do;
  273. }
  274. static int scsifront_cmd_done(struct vscsifrnt_info *info)
  275. {
  276. int more_to_do;
  277. unsigned long flags;
  278. spin_lock_irqsave(info->host->host_lock, flags);
  279. more_to_do = scsifront_ring_drain(info);
  280. info->wait_ring_available = 0;
  281. spin_unlock_irqrestore(info->host->host_lock, flags);
  282. wake_up(&info->wq_sync);
  283. return more_to_do;
  284. }
  285. static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
  286. {
  287. struct vscsifrnt_info *info = dev_id;
  288. while (scsifront_cmd_done(info))
  289. /* Yield point for this unbounded loop. */
  290. cond_resched();
  291. return IRQ_HANDLED;
  292. }
  293. static void scsifront_finish_all(struct vscsifrnt_info *info)
  294. {
  295. unsigned i;
  296. struct vscsiif_response resp;
  297. scsifront_ring_drain(info);
  298. for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
  299. if (test_bit(i, info->shadow_free_bitmap))
  300. continue;
  301. resp.rqid = i;
  302. resp.sense_len = 0;
  303. resp.rslt = DID_RESET << 16;
  304. resp.residual_len = 0;
  305. scsifront_do_response(info, &resp);
  306. }
  307. }
  308. static int map_data_for_request(struct vscsifrnt_info *info,
  309. struct scsi_cmnd *sc,
  310. struct vscsifrnt_shadow *shadow)
  311. {
  312. grant_ref_t gref_head;
  313. struct page *page;
  314. int err, ref, ref_cnt = 0;
  315. int grant_ro = (sc->sc_data_direction == DMA_TO_DEVICE);
  316. unsigned int i, off, len, bytes;
  317. unsigned int data_len = scsi_bufflen(sc);
  318. unsigned int data_grants = 0, seg_grants = 0;
  319. struct scatterlist *sg;
  320. struct scsiif_request_segment *seg;
  321. if (sc->sc_data_direction == DMA_NONE || !data_len)
  322. return 0;
  323. scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i)
  324. data_grants += PFN_UP(sg->offset + sg->length);
  325. if (data_grants > VSCSIIF_SG_TABLESIZE) {
  326. if (data_grants > info->host->sg_tablesize) {
  327. shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
  328. "Unable to map request_buffer for command!\n");
  329. return -E2BIG;
  330. }
  331. seg_grants = vscsiif_grants_sg(data_grants);
  332. shadow->sg = kcalloc(data_grants,
  333. sizeof(struct scsiif_request_segment), GFP_ATOMIC);
  334. if (!shadow->sg)
  335. return -ENOMEM;
  336. }
  337. seg = shadow->sg ? : shadow->seg;
  338. err = gnttab_alloc_grant_references(seg_grants + data_grants,
  339. &gref_head);
  340. if (err) {
  341. kfree(shadow->sg);
  342. shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
  343. "gnttab_alloc_grant_references() error\n");
  344. return -ENOMEM;
  345. }
  346. if (seg_grants) {
  347. page = virt_to_page(seg);
  348. off = offset_in_page(seg);
  349. len = sizeof(struct scsiif_request_segment) * data_grants;
  350. while (len > 0) {
  351. bytes = min_t(unsigned int, len, PAGE_SIZE - off);
  352. ref = gnttab_claim_grant_reference(&gref_head);
  353. BUG_ON(ref == -ENOSPC);
  354. gnttab_grant_foreign_access_ref(ref,
  355. info->dev->otherend_id,
  356. xen_page_to_gfn(page), 1);
  357. shadow->gref[ref_cnt] = ref;
  358. shadow->seg[ref_cnt].gref = ref;
  359. shadow->seg[ref_cnt].offset = (uint16_t)off;
  360. shadow->seg[ref_cnt].length = (uint16_t)bytes;
  361. page++;
  362. len -= bytes;
  363. off = 0;
  364. ref_cnt++;
  365. }
  366. BUG_ON(seg_grants < ref_cnt);
  367. seg_grants = ref_cnt;
  368. }
  369. scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) {
  370. page = sg_page(sg);
  371. off = sg->offset;
  372. len = sg->length;
  373. while (len > 0 && data_len > 0) {
  374. /*
  375. * sg sends a scatterlist that is larger than
  376. * the data_len it wants transferred for certain
  377. * IO sizes.
  378. */
  379. bytes = min_t(unsigned int, len, PAGE_SIZE - off);
  380. bytes = min(bytes, data_len);
  381. ref = gnttab_claim_grant_reference(&gref_head);
  382. BUG_ON(ref == -ENOSPC);
  383. gnttab_grant_foreign_access_ref(ref,
  384. info->dev->otherend_id,
  385. xen_page_to_gfn(page),
  386. grant_ro);
  387. shadow->gref[ref_cnt] = ref;
  388. seg->gref = ref;
  389. seg->offset = (uint16_t)off;
  390. seg->length = (uint16_t)bytes;
  391. page++;
  392. seg++;
  393. len -= bytes;
  394. data_len -= bytes;
  395. off = 0;
  396. ref_cnt++;
  397. }
  398. }
  399. if (seg_grants)
  400. shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
  401. else
  402. shadow->nr_segments = (uint8_t)ref_cnt;
  403. shadow->nr_grants = ref_cnt;
  404. return 0;
  405. }
  406. static int scsifront_enter(struct vscsifrnt_info *info)
  407. {
  408. if (info->pause)
  409. return 1;
  410. info->callers++;
  411. return 0;
  412. }
  413. static void scsifront_return(struct vscsifrnt_info *info)
  414. {
  415. info->callers--;
  416. if (info->callers)
  417. return;
  418. if (!info->waiting_pause)
  419. return;
  420. info->waiting_pause = 0;
  421. wake_up(&info->wq_pause);
  422. }
  423. static int scsifront_queuecommand(struct Scsi_Host *shost,
  424. struct scsi_cmnd *sc)
  425. {
  426. struct vscsifrnt_info *info = shost_priv(shost);
  427. struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
  428. unsigned long flags;
  429. int err;
  430. sc->result = 0;
  431. shadow->sc = sc;
  432. shadow->act = VSCSIIF_ACT_SCSI_CDB;
  433. spin_lock_irqsave(shost->host_lock, flags);
  434. if (scsifront_enter(info)) {
  435. spin_unlock_irqrestore(shost->host_lock, flags);
  436. return SCSI_MLQUEUE_HOST_BUSY;
  437. }
  438. err = map_data_for_request(info, sc, shadow);
  439. if (err < 0) {
  440. pr_debug("%s: err %d\n", __func__, err);
  441. scsifront_return(info);
  442. spin_unlock_irqrestore(shost->host_lock, flags);
  443. if (err == -ENOMEM)
  444. return SCSI_MLQUEUE_HOST_BUSY;
  445. sc->result = DID_ERROR << 16;
  446. sc->scsi_done(sc);
  447. return 0;
  448. }
  449. if (scsifront_do_request(info, shadow)) {
  450. scsifront_gnttab_done(info, shadow);
  451. goto busy;
  452. }
  453. scsifront_return(info);
  454. spin_unlock_irqrestore(shost->host_lock, flags);
  455. return 0;
  456. busy:
  457. scsifront_return(info);
  458. spin_unlock_irqrestore(shost->host_lock, flags);
  459. pr_debug("%s: busy\n", __func__);
  460. return SCSI_MLQUEUE_HOST_BUSY;
  461. }
  462. /*
  463. * Any exception handling (reset or abort) must be forwarded to the backend.
  464. * We have to wait until an answer is returned. This answer contains the
  465. * result to be returned to the requestor.
  466. */
  467. static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
  468. {
  469. struct Scsi_Host *host = sc->device->host;
  470. struct vscsifrnt_info *info = shost_priv(host);
  471. struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
  472. int err = 0;
  473. shadow = kzalloc(sizeof(*shadow), GFP_NOIO);
  474. if (!shadow)
  475. return FAILED;
  476. shadow->act = act;
  477. shadow->rslt_reset = RSLT_RESET_WAITING;
  478. shadow->sc = sc;
  479. shadow->ref_rqid = s->rqid;
  480. init_waitqueue_head(&shadow->wq_reset);
  481. spin_lock_irq(host->host_lock);
  482. for (;;) {
  483. if (scsifront_enter(info))
  484. goto fail;
  485. if (!scsifront_do_request(info, shadow))
  486. break;
  487. scsifront_return(info);
  488. if (err)
  489. goto fail;
  490. info->wait_ring_available = 1;
  491. spin_unlock_irq(host->host_lock);
  492. err = wait_event_interruptible(info->wq_sync,
  493. !info->wait_ring_available);
  494. spin_lock_irq(host->host_lock);
  495. }
  496. spin_unlock_irq(host->host_lock);
  497. err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
  498. spin_lock_irq(host->host_lock);
  499. if (!err) {
  500. err = shadow->rslt_reset;
  501. scsifront_put_rqid(info, shadow->rqid);
  502. kfree(shadow);
  503. } else {
  504. spin_lock(&info->shadow_lock);
  505. shadow->rslt_reset = RSLT_RESET_ERR;
  506. spin_unlock(&info->shadow_lock);
  507. err = FAILED;
  508. }
  509. scsifront_return(info);
  510. spin_unlock_irq(host->host_lock);
  511. return err;
  512. fail:
  513. spin_unlock_irq(host->host_lock);
  514. kfree(shadow);
  515. return FAILED;
  516. }
  517. static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
  518. {
  519. pr_debug("%s\n", __func__);
  520. return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_ABORT);
  521. }
  522. static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
  523. {
  524. pr_debug("%s\n", __func__);
  525. return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_RESET);
  526. }
  527. static int scsifront_sdev_configure(struct scsi_device *sdev)
  528. {
  529. struct vscsifrnt_info *info = shost_priv(sdev->host);
  530. int err;
  531. if (info && current == info->curr) {
  532. err = xenbus_printf(XBT_NIL, info->dev->nodename,
  533. info->dev_state_path, "%d", XenbusStateConnected);
  534. if (err) {
  535. xenbus_dev_error(info->dev, err,
  536. "%s: writing dev_state_path", __func__);
  537. return err;
  538. }
  539. }
  540. return 0;
  541. }
  542. static void scsifront_sdev_destroy(struct scsi_device *sdev)
  543. {
  544. struct vscsifrnt_info *info = shost_priv(sdev->host);
  545. int err;
  546. if (info && current == info->curr) {
  547. err = xenbus_printf(XBT_NIL, info->dev->nodename,
  548. info->dev_state_path, "%d", XenbusStateClosed);
  549. if (err)
  550. xenbus_dev_error(info->dev, err,
  551. "%s: writing dev_state_path", __func__);
  552. }
  553. }
  554. static struct scsi_host_template scsifront_sht = {
  555. .module = THIS_MODULE,
  556. .name = "Xen SCSI frontend driver",
  557. .queuecommand = scsifront_queuecommand,
  558. .eh_abort_handler = scsifront_eh_abort_handler,
  559. .eh_device_reset_handler = scsifront_dev_reset_handler,
  560. .slave_configure = scsifront_sdev_configure,
  561. .slave_destroy = scsifront_sdev_destroy,
  562. .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
  563. .can_queue = VSCSIIF_MAX_REQS,
  564. .this_id = -1,
  565. .cmd_size = sizeof(struct vscsifrnt_shadow),
  566. .sg_tablesize = VSCSIIF_SG_TABLESIZE,
  567. .use_clustering = DISABLE_CLUSTERING,
  568. .proc_name = "scsifront",
  569. };
  570. static int scsifront_alloc_ring(struct vscsifrnt_info *info)
  571. {
  572. struct xenbus_device *dev = info->dev;
  573. struct vscsiif_sring *sring;
  574. grant_ref_t gref;
  575. int err = -ENOMEM;
  576. /***** Frontend to Backend ring start *****/
  577. sring = (struct vscsiif_sring *)__get_free_page(GFP_KERNEL);
  578. if (!sring) {
  579. xenbus_dev_fatal(dev, err,
  580. "fail to allocate shared ring (Front to Back)");
  581. return err;
  582. }
  583. SHARED_RING_INIT(sring);
  584. FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
  585. err = xenbus_grant_ring(dev, sring, 1, &gref);
  586. if (err < 0) {
  587. free_page((unsigned long)sring);
  588. xenbus_dev_fatal(dev, err,
  589. "fail to grant shared ring (Front to Back)");
  590. return err;
  591. }
  592. info->ring_ref = gref;
  593. err = xenbus_alloc_evtchn(dev, &info->evtchn);
  594. if (err) {
  595. xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
  596. goto free_gnttab;
  597. }
  598. err = bind_evtchn_to_irq(info->evtchn);
  599. if (err <= 0) {
  600. xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
  601. goto free_gnttab;
  602. }
  603. info->irq = err;
  604. err = request_threaded_irq(info->irq, NULL, scsifront_irq_fn,
  605. IRQF_ONESHOT, "scsifront", info);
  606. if (err) {
  607. xenbus_dev_fatal(dev, err, "request_threaded_irq");
  608. goto free_irq;
  609. }
  610. return 0;
  611. /* free resource */
  612. free_irq:
  613. unbind_from_irqhandler(info->irq, info);
  614. free_gnttab:
  615. gnttab_end_foreign_access(info->ring_ref, 0,
  616. (unsigned long)info->ring.sring);
  617. return err;
  618. }
  619. static void scsifront_free_ring(struct vscsifrnt_info *info)
  620. {
  621. unbind_from_irqhandler(info->irq, info);
  622. gnttab_end_foreign_access(info->ring_ref, 0,
  623. (unsigned long)info->ring.sring);
  624. }
  625. static int scsifront_init_ring(struct vscsifrnt_info *info)
  626. {
  627. struct xenbus_device *dev = info->dev;
  628. struct xenbus_transaction xbt;
  629. int err;
  630. pr_debug("%s\n", __func__);
  631. err = scsifront_alloc_ring(info);
  632. if (err)
  633. return err;
  634. pr_debug("%s: %u %u\n", __func__, info->ring_ref, info->evtchn);
  635. again:
  636. err = xenbus_transaction_start(&xbt);
  637. if (err)
  638. xenbus_dev_fatal(dev, err, "starting transaction");
  639. err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
  640. info->ring_ref);
  641. if (err) {
  642. xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
  643. goto fail;
  644. }
  645. err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
  646. info->evtchn);
  647. if (err) {
  648. xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
  649. goto fail;
  650. }
  651. err = xenbus_transaction_end(xbt, 0);
  652. if (err) {
  653. if (err == -EAGAIN)
  654. goto again;
  655. xenbus_dev_fatal(dev, err, "completing transaction");
  656. goto free_sring;
  657. }
  658. return 0;
  659. fail:
  660. xenbus_transaction_end(xbt, 1);
  661. free_sring:
  662. scsifront_free_ring(info);
  663. return err;
  664. }
  665. static int scsifront_probe(struct xenbus_device *dev,
  666. const struct xenbus_device_id *id)
  667. {
  668. struct vscsifrnt_info *info;
  669. struct Scsi_Host *host;
  670. int err = -ENOMEM;
  671. char name[TASK_COMM_LEN];
  672. host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
  673. if (!host) {
  674. xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
  675. return err;
  676. }
  677. info = (struct vscsifrnt_info *)host->hostdata;
  678. dev_set_drvdata(&dev->dev, info);
  679. info->dev = dev;
  680. bitmap_fill(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
  681. err = scsifront_init_ring(info);
  682. if (err) {
  683. scsi_host_put(host);
  684. return err;
  685. }
  686. init_waitqueue_head(&info->wq_sync);
  687. init_waitqueue_head(&info->wq_pause);
  688. spin_lock_init(&info->shadow_lock);
  689. snprintf(name, TASK_COMM_LEN, "vscsiif.%d", host->host_no);
  690. host->max_id = VSCSIIF_MAX_TARGET;
  691. host->max_channel = 0;
  692. host->max_lun = VSCSIIF_MAX_LUN;
  693. host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512;
  694. host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE;
  695. err = scsi_add_host(host, &dev->dev);
  696. if (err) {
  697. dev_err(&dev->dev, "fail to add scsi host %d\n", err);
  698. goto free_sring;
  699. }
  700. info->host = host;
  701. info->host_active = 1;
  702. xenbus_switch_state(dev, XenbusStateInitialised);
  703. return 0;
  704. free_sring:
  705. scsifront_free_ring(info);
  706. scsi_host_put(host);
  707. return err;
  708. }
  709. static int scsifront_resume(struct xenbus_device *dev)
  710. {
  711. struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
  712. struct Scsi_Host *host = info->host;
  713. int err;
  714. spin_lock_irq(host->host_lock);
  715. /* Finish all still pending commands. */
  716. scsifront_finish_all(info);
  717. spin_unlock_irq(host->host_lock);
  718. /* Reconnect to dom0. */
  719. scsifront_free_ring(info);
  720. err = scsifront_init_ring(info);
  721. if (err) {
  722. dev_err(&dev->dev, "fail to resume %d\n", err);
  723. scsi_host_put(host);
  724. return err;
  725. }
  726. xenbus_switch_state(dev, XenbusStateInitialised);
  727. return 0;
  728. }
  729. static int scsifront_suspend(struct xenbus_device *dev)
  730. {
  731. struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
  732. struct Scsi_Host *host = info->host;
  733. int err = 0;
  734. /* No new commands for the backend. */
  735. spin_lock_irq(host->host_lock);
  736. info->pause = 1;
  737. while (info->callers && !err) {
  738. info->waiting_pause = 1;
  739. info->wait_ring_available = 0;
  740. spin_unlock_irq(host->host_lock);
  741. wake_up(&info->wq_sync);
  742. err = wait_event_interruptible(info->wq_pause,
  743. !info->waiting_pause);
  744. spin_lock_irq(host->host_lock);
  745. }
  746. spin_unlock_irq(host->host_lock);
  747. return err;
  748. }
  749. static int scsifront_remove(struct xenbus_device *dev)
  750. {
  751. struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
  752. pr_debug("%s: %s removed\n", __func__, dev->nodename);
  753. mutex_lock(&scsifront_mutex);
  754. if (info->host_active) {
  755. /* Scsi_host not yet removed */
  756. scsi_remove_host(info->host);
  757. info->host_active = 0;
  758. }
  759. mutex_unlock(&scsifront_mutex);
  760. scsifront_free_ring(info);
  761. scsi_host_put(info->host);
  762. return 0;
  763. }
  764. static void scsifront_disconnect(struct vscsifrnt_info *info)
  765. {
  766. struct xenbus_device *dev = info->dev;
  767. struct Scsi_Host *host = info->host;
  768. pr_debug("%s: %s disconnect\n", __func__, dev->nodename);
  769. /*
  770. * When this function is executed, all devices of
  771. * Frontend have been deleted.
  772. * Therefore, it need not block I/O before remove_host.
  773. */
  774. mutex_lock(&scsifront_mutex);
  775. if (info->host_active) {
  776. scsi_remove_host(host);
  777. info->host_active = 0;
  778. }
  779. mutex_unlock(&scsifront_mutex);
  780. xenbus_frontend_closed(dev);
  781. }
  782. static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
  783. {
  784. struct xenbus_device *dev = info->dev;
  785. int i, err = 0;
  786. char str[64];
  787. char **dir;
  788. unsigned int dir_n = 0;
  789. unsigned int device_state;
  790. unsigned int hst, chn, tgt, lun;
  791. struct scsi_device *sdev;
  792. dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
  793. if (IS_ERR(dir))
  794. return;
  795. /* mark current task as the one allowed to modify device states */
  796. BUG_ON(info->curr);
  797. info->curr = current;
  798. for (i = 0; i < dir_n; i++) {
  799. /* read status */
  800. snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
  801. err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
  802. &device_state);
  803. if (XENBUS_EXIST_ERR(err))
  804. continue;
  805. /* virtual SCSI device */
  806. snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
  807. err = xenbus_scanf(XBT_NIL, dev->otherend, str,
  808. "%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
  809. if (XENBUS_EXIST_ERR(err))
  810. continue;
  811. /*
  812. * Front device state path, used in slave_configure called
  813. * on successfull scsi_add_device, and in slave_destroy called
  814. * on remove of a device.
  815. */
  816. snprintf(info->dev_state_path, sizeof(info->dev_state_path),
  817. "vscsi-devs/%s/state", dir[i]);
  818. switch (op) {
  819. case VSCSIFRONT_OP_ADD_LUN:
  820. if (device_state != XenbusStateInitialised)
  821. break;
  822. if (scsi_add_device(info->host, chn, tgt, lun)) {
  823. dev_err(&dev->dev, "scsi_add_device\n");
  824. err = xenbus_printf(XBT_NIL, dev->nodename,
  825. info->dev_state_path,
  826. "%d", XenbusStateClosed);
  827. if (err)
  828. xenbus_dev_error(dev, err,
  829. "%s: writing dev_state_path", __func__);
  830. }
  831. break;
  832. case VSCSIFRONT_OP_DEL_LUN:
  833. if (device_state != XenbusStateClosing)
  834. break;
  835. sdev = scsi_device_lookup(info->host, chn, tgt, lun);
  836. if (sdev) {
  837. scsi_remove_device(sdev);
  838. scsi_device_put(sdev);
  839. }
  840. break;
  841. case VSCSIFRONT_OP_READD_LUN:
  842. if (device_state == XenbusStateConnected) {
  843. err = xenbus_printf(XBT_NIL, dev->nodename,
  844. info->dev_state_path,
  845. "%d", XenbusStateConnected);
  846. if (err)
  847. xenbus_dev_error(dev, err,
  848. "%s: writing dev_state_path", __func__);
  849. }
  850. break;
  851. default:
  852. break;
  853. }
  854. }
  855. info->curr = NULL;
  856. kfree(dir);
  857. }
  858. static void scsifront_read_backend_params(struct xenbus_device *dev,
  859. struct vscsifrnt_info *info)
  860. {
  861. unsigned int sg_grant, nr_segs;
  862. struct Scsi_Host *host = info->host;
  863. sg_grant = xenbus_read_unsigned(dev->otherend, "feature-sg-grant", 0);
  864. nr_segs = min_t(unsigned int, sg_grant, SG_ALL);
  865. nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE);
  866. nr_segs = min_t(unsigned int, nr_segs,
  867. VSCSIIF_SG_TABLESIZE * PAGE_SIZE /
  868. sizeof(struct scsiif_request_segment));
  869. if (!info->pause && sg_grant)
  870. dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs);
  871. else if (info->pause && nr_segs < host->sg_tablesize)
  872. dev_warn(&dev->dev,
  873. "SG entries decreased from %d to %u - device may not work properly anymore\n",
  874. host->sg_tablesize, nr_segs);
  875. host->sg_tablesize = nr_segs;
  876. host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512;
  877. }
  878. static void scsifront_backend_changed(struct xenbus_device *dev,
  879. enum xenbus_state backend_state)
  880. {
  881. struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
  882. pr_debug("%s: %p %u %u\n", __func__, dev, dev->state, backend_state);
  883. switch (backend_state) {
  884. case XenbusStateUnknown:
  885. case XenbusStateInitialising:
  886. case XenbusStateInitWait:
  887. case XenbusStateInitialised:
  888. break;
  889. case XenbusStateConnected:
  890. scsifront_read_backend_params(dev, info);
  891. if (info->pause) {
  892. scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_READD_LUN);
  893. xenbus_switch_state(dev, XenbusStateConnected);
  894. info->pause = 0;
  895. return;
  896. }
  897. if (xenbus_read_driver_state(dev->nodename) ==
  898. XenbusStateInitialised)
  899. scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
  900. if (dev->state != XenbusStateConnected)
  901. xenbus_switch_state(dev, XenbusStateConnected);
  902. break;
  903. case XenbusStateClosed:
  904. if (dev->state == XenbusStateClosed)
  905. break;
  906. /* Missed the backend's Closing state -- fallthrough */
  907. case XenbusStateClosing:
  908. scsifront_disconnect(info);
  909. break;
  910. case XenbusStateReconfiguring:
  911. scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
  912. xenbus_switch_state(dev, XenbusStateReconfiguring);
  913. break;
  914. case XenbusStateReconfigured:
  915. scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
  916. xenbus_switch_state(dev, XenbusStateConnected);
  917. break;
  918. }
  919. }
  920. static const struct xenbus_device_id scsifront_ids[] = {
  921. { "vscsi" },
  922. { "" }
  923. };
  924. static struct xenbus_driver scsifront_driver = {
  925. .ids = scsifront_ids,
  926. .probe = scsifront_probe,
  927. .remove = scsifront_remove,
  928. .resume = scsifront_resume,
  929. .suspend = scsifront_suspend,
  930. .otherend_changed = scsifront_backend_changed,
  931. };
  932. static int __init scsifront_init(void)
  933. {
  934. if (!xen_domain())
  935. return -ENODEV;
  936. return xenbus_register_frontend(&scsifront_driver);
  937. }
  938. module_init(scsifront_init);
  939. static void __exit scsifront_exit(void)
  940. {
  941. xenbus_unregister_driver(&scsifront_driver);
  942. }
  943. module_exit(scsifront_exit);
  944. MODULE_DESCRIPTION("Xen SCSI frontend driver");
  945. MODULE_LICENSE("GPL");
  946. MODULE_ALIAS("xen:vscsi");
  947. MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");