xenbus.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117
  1. /* Xenbus code for blkif backend
  2. Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
  3. Copyright (C) 2005 XenSource Ltd
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) "xen-blkback: " fmt
  14. #include <stdarg.h>
  15. #include <linux/module.h>
  16. #include <linux/kthread.h>
  17. #include <xen/events.h>
  18. #include <xen/grant_table.h>
  19. #include "common.h"
  20. /* On the XenBus the max length of 'ring-ref%u'. */
  21. #define RINGREF_NAME_LEN (20)
  22. struct backend_info {
  23. struct xenbus_device *dev;
  24. struct xen_blkif *blkif;
  25. struct xenbus_watch backend_watch;
  26. unsigned major;
  27. unsigned minor;
  28. char *mode;
  29. };
  30. static struct kmem_cache *xen_blkif_cachep;
  31. static void connect(struct backend_info *);
  32. static int connect_ring(struct backend_info *);
  33. static void backend_changed(struct xenbus_watch *, const char **,
  34. unsigned int);
  35. static void xen_blkif_free(struct xen_blkif *blkif);
  36. static void xen_vbd_free(struct xen_vbd *vbd);
  37. struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
  38. {
  39. return be->dev;
  40. }
  41. /*
  42. * The last request could free the device from softirq context and
  43. * xen_blkif_free() can sleep.
  44. */
  45. static void xen_blkif_deferred_free(struct work_struct *work)
  46. {
  47. struct xen_blkif *blkif;
  48. blkif = container_of(work, struct xen_blkif, free_work);
  49. xen_blkif_free(blkif);
  50. }
  51. static int blkback_name(struct xen_blkif *blkif, char *buf)
  52. {
  53. char *devpath, *devname;
  54. struct xenbus_device *dev = blkif->be->dev;
  55. devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
  56. if (IS_ERR(devpath))
  57. return PTR_ERR(devpath);
  58. devname = strstr(devpath, "/dev/");
  59. if (devname != NULL)
  60. devname += strlen("/dev/");
  61. else
  62. devname = devpath;
  63. snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
  64. kfree(devpath);
  65. return 0;
  66. }
  67. static void xen_update_blkif_status(struct xen_blkif *blkif)
  68. {
  69. int err;
  70. char name[TASK_COMM_LEN];
  71. struct xen_blkif_ring *ring;
  72. int i;
  73. /* Not ready to connect? */
  74. if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
  75. return;
  76. /* Already connected? */
  77. if (blkif->be->dev->state == XenbusStateConnected)
  78. return;
  79. /* Attempt to connect: exit if we fail to. */
  80. connect(blkif->be);
  81. if (blkif->be->dev->state != XenbusStateConnected)
  82. return;
  83. err = blkback_name(blkif, name);
  84. if (err) {
  85. xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
  86. return;
  87. }
  88. err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
  89. if (err) {
  90. xenbus_dev_error(blkif->be->dev, err, "block flush");
  91. return;
  92. }
  93. invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
  94. for (i = 0; i < blkif->nr_rings; i++) {
  95. ring = &blkif->rings[i];
  96. ring->xenblkd = kthread_run(xen_blkif_schedule, ring, "%s-%d", name, i);
  97. if (IS_ERR(ring->xenblkd)) {
  98. err = PTR_ERR(ring->xenblkd);
  99. ring->xenblkd = NULL;
  100. xenbus_dev_fatal(blkif->be->dev, err,
  101. "start %s-%d xenblkd", name, i);
  102. goto out;
  103. }
  104. }
  105. return;
  106. out:
  107. while (--i >= 0) {
  108. ring = &blkif->rings[i];
  109. kthread_stop(ring->xenblkd);
  110. }
  111. return;
  112. }
  113. static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
  114. {
  115. unsigned int r;
  116. blkif->rings = kzalloc(blkif->nr_rings * sizeof(struct xen_blkif_ring), GFP_KERNEL);
  117. if (!blkif->rings)
  118. return -ENOMEM;
  119. for (r = 0; r < blkif->nr_rings; r++) {
  120. struct xen_blkif_ring *ring = &blkif->rings[r];
  121. spin_lock_init(&ring->blk_ring_lock);
  122. init_waitqueue_head(&ring->wq);
  123. INIT_LIST_HEAD(&ring->pending_free);
  124. INIT_LIST_HEAD(&ring->persistent_purge_list);
  125. INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
  126. spin_lock_init(&ring->free_pages_lock);
  127. INIT_LIST_HEAD(&ring->free_pages);
  128. spin_lock_init(&ring->pending_free_lock);
  129. init_waitqueue_head(&ring->pending_free_wq);
  130. init_waitqueue_head(&ring->shutdown_wq);
  131. ring->blkif = blkif;
  132. ring->st_print = jiffies;
  133. ring->active = true;
  134. }
  135. return 0;
  136. }
  137. static struct xen_blkif *xen_blkif_alloc(domid_t domid)
  138. {
  139. struct xen_blkif *blkif;
  140. BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
  141. blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
  142. if (!blkif)
  143. return ERR_PTR(-ENOMEM);
  144. blkif->domid = domid;
  145. atomic_set(&blkif->refcnt, 1);
  146. init_completion(&blkif->drain_complete);
  147. INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
  148. return blkif;
  149. }
  150. static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
  151. unsigned int nr_grefs, unsigned int evtchn)
  152. {
  153. int err;
  154. struct xen_blkif *blkif = ring->blkif;
  155. /* Already connected through? */
  156. if (ring->irq)
  157. return 0;
  158. err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
  159. &ring->blk_ring);
  160. if (err < 0)
  161. return err;
  162. switch (blkif->blk_protocol) {
  163. case BLKIF_PROTOCOL_NATIVE:
  164. {
  165. struct blkif_sring *sring;
  166. sring = (struct blkif_sring *)ring->blk_ring;
  167. BACK_RING_INIT(&ring->blk_rings.native, sring,
  168. XEN_PAGE_SIZE * nr_grefs);
  169. break;
  170. }
  171. case BLKIF_PROTOCOL_X86_32:
  172. {
  173. struct blkif_x86_32_sring *sring_x86_32;
  174. sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring;
  175. BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32,
  176. XEN_PAGE_SIZE * nr_grefs);
  177. break;
  178. }
  179. case BLKIF_PROTOCOL_X86_64:
  180. {
  181. struct blkif_x86_64_sring *sring_x86_64;
  182. sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring;
  183. BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64,
  184. XEN_PAGE_SIZE * nr_grefs);
  185. break;
  186. }
  187. default:
  188. BUG();
  189. }
  190. err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
  191. xen_blkif_be_int, 0,
  192. "blkif-backend", ring);
  193. if (err < 0) {
  194. xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
  195. ring->blk_rings.common.sring = NULL;
  196. return err;
  197. }
  198. ring->irq = err;
  199. return 0;
  200. }
  201. static int xen_blkif_disconnect(struct xen_blkif *blkif)
  202. {
  203. struct pending_req *req, *n;
  204. unsigned int j, r;
  205. for (r = 0; r < blkif->nr_rings; r++) {
  206. struct xen_blkif_ring *ring = &blkif->rings[r];
  207. unsigned int i = 0;
  208. if (!ring->active)
  209. continue;
  210. if (ring->xenblkd) {
  211. kthread_stop(ring->xenblkd);
  212. wake_up(&ring->shutdown_wq);
  213. }
  214. /* The above kthread_stop() guarantees that at this point we
  215. * don't have any discard_io or other_io requests. So, checking
  216. * for inflight IO is enough.
  217. */
  218. if (atomic_read(&ring->inflight) > 0)
  219. return -EBUSY;
  220. if (ring->irq) {
  221. unbind_from_irqhandler(ring->irq, ring);
  222. ring->irq = 0;
  223. }
  224. if (ring->blk_rings.common.sring) {
  225. xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
  226. ring->blk_rings.common.sring = NULL;
  227. }
  228. /* Remove all persistent grants and the cache of ballooned pages. */
  229. xen_blkbk_free_caches(ring);
  230. /* Check that there is no request in use */
  231. list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
  232. list_del(&req->free_list);
  233. for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
  234. kfree(req->segments[j]);
  235. for (j = 0; j < MAX_INDIRECT_PAGES; j++)
  236. kfree(req->indirect_pages[j]);
  237. kfree(req);
  238. i++;
  239. }
  240. BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
  241. BUG_ON(!list_empty(&ring->persistent_purge_list));
  242. BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
  243. BUG_ON(!list_empty(&ring->free_pages));
  244. BUG_ON(ring->free_pages_num != 0);
  245. BUG_ON(ring->persistent_gnt_c != 0);
  246. WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
  247. ring->active = false;
  248. }
  249. blkif->nr_ring_pages = 0;
  250. /*
  251. * blkif->rings was allocated in connect_ring, so we should free it in
  252. * here.
  253. */
  254. kfree(blkif->rings);
  255. blkif->rings = NULL;
  256. blkif->nr_rings = 0;
  257. return 0;
  258. }
  259. static void xen_blkif_free(struct xen_blkif *blkif)
  260. {
  261. WARN_ON(xen_blkif_disconnect(blkif));
  262. xen_vbd_free(&blkif->vbd);
  263. kfree(blkif->be->mode);
  264. kfree(blkif->be);
  265. /* Make sure everything is drained before shutting down */
  266. kmem_cache_free(xen_blkif_cachep, blkif);
  267. }
  268. int __init xen_blkif_interface_init(void)
  269. {
  270. xen_blkif_cachep = kmem_cache_create("blkif_cache",
  271. sizeof(struct xen_blkif),
  272. 0, 0, NULL);
  273. if (!xen_blkif_cachep)
  274. return -ENOMEM;
  275. return 0;
  276. }
  277. /*
  278. * sysfs interface for VBD I/O requests
  279. */
  280. #define VBD_SHOW_ALLRING(name, format) \
  281. static ssize_t show_##name(struct device *_dev, \
  282. struct device_attribute *attr, \
  283. char *buf) \
  284. { \
  285. struct xenbus_device *dev = to_xenbus_device(_dev); \
  286. struct backend_info *be = dev_get_drvdata(&dev->dev); \
  287. struct xen_blkif *blkif = be->blkif; \
  288. unsigned int i; \
  289. unsigned long long result = 0; \
  290. \
  291. if (!blkif->rings) \
  292. goto out; \
  293. \
  294. for (i = 0; i < blkif->nr_rings; i++) { \
  295. struct xen_blkif_ring *ring = &blkif->rings[i]; \
  296. \
  297. result += ring->st_##name; \
  298. } \
  299. \
  300. out: \
  301. return sprintf(buf, format, result); \
  302. } \
  303. static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
  304. VBD_SHOW_ALLRING(oo_req, "%llu\n");
  305. VBD_SHOW_ALLRING(rd_req, "%llu\n");
  306. VBD_SHOW_ALLRING(wr_req, "%llu\n");
  307. VBD_SHOW_ALLRING(f_req, "%llu\n");
  308. VBD_SHOW_ALLRING(ds_req, "%llu\n");
  309. VBD_SHOW_ALLRING(rd_sect, "%llu\n");
  310. VBD_SHOW_ALLRING(wr_sect, "%llu\n");
  311. static struct attribute *xen_vbdstat_attrs[] = {
  312. &dev_attr_oo_req.attr,
  313. &dev_attr_rd_req.attr,
  314. &dev_attr_wr_req.attr,
  315. &dev_attr_f_req.attr,
  316. &dev_attr_ds_req.attr,
  317. &dev_attr_rd_sect.attr,
  318. &dev_attr_wr_sect.attr,
  319. NULL
  320. };
  321. static const struct attribute_group xen_vbdstat_group = {
  322. .name = "statistics",
  323. .attrs = xen_vbdstat_attrs,
  324. };
  325. #define VBD_SHOW(name, format, args...) \
  326. static ssize_t show_##name(struct device *_dev, \
  327. struct device_attribute *attr, \
  328. char *buf) \
  329. { \
  330. struct xenbus_device *dev = to_xenbus_device(_dev); \
  331. struct backend_info *be = dev_get_drvdata(&dev->dev); \
  332. \
  333. return sprintf(buf, format, ##args); \
  334. } \
  335. static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
  336. VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
  337. VBD_SHOW(mode, "%s\n", be->mode);
  338. static int xenvbd_sysfs_addif(struct xenbus_device *dev)
  339. {
  340. int error;
  341. error = device_create_file(&dev->dev, &dev_attr_physical_device);
  342. if (error)
  343. goto fail1;
  344. error = device_create_file(&dev->dev, &dev_attr_mode);
  345. if (error)
  346. goto fail2;
  347. error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
  348. if (error)
  349. goto fail3;
  350. return 0;
  351. fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
  352. fail2: device_remove_file(&dev->dev, &dev_attr_mode);
  353. fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
  354. return error;
  355. }
  356. static void xenvbd_sysfs_delif(struct xenbus_device *dev)
  357. {
  358. sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
  359. device_remove_file(&dev->dev, &dev_attr_mode);
  360. device_remove_file(&dev->dev, &dev_attr_physical_device);
  361. }
  362. static void xen_vbd_free(struct xen_vbd *vbd)
  363. {
  364. if (vbd->bdev)
  365. blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
  366. vbd->bdev = NULL;
  367. }
  368. static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
  369. unsigned major, unsigned minor, int readonly,
  370. int cdrom)
  371. {
  372. struct xen_vbd *vbd;
  373. struct block_device *bdev;
  374. struct request_queue *q;
  375. vbd = &blkif->vbd;
  376. vbd->handle = handle;
  377. vbd->readonly = readonly;
  378. vbd->type = 0;
  379. vbd->pdevice = MKDEV(major, minor);
  380. bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
  381. FMODE_READ : FMODE_WRITE, NULL);
  382. if (IS_ERR(bdev)) {
  383. pr_warn("xen_vbd_create: device %08x could not be opened\n",
  384. vbd->pdevice);
  385. return -ENOENT;
  386. }
  387. vbd->bdev = bdev;
  388. if (vbd->bdev->bd_disk == NULL) {
  389. pr_warn("xen_vbd_create: device %08x doesn't exist\n",
  390. vbd->pdevice);
  391. xen_vbd_free(vbd);
  392. return -ENOENT;
  393. }
  394. vbd->size = vbd_sz(vbd);
  395. if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
  396. vbd->type |= VDISK_CDROM;
  397. if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
  398. vbd->type |= VDISK_REMOVABLE;
  399. q = bdev_get_queue(bdev);
  400. if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
  401. vbd->flush_support = true;
  402. if (q && blk_queue_secure_erase(q))
  403. vbd->discard_secure = true;
  404. pr_debug("Successful creation of handle=%04x (dom=%u)\n",
  405. handle, blkif->domid);
  406. return 0;
  407. }
  408. static int xen_blkbk_remove(struct xenbus_device *dev)
  409. {
  410. struct backend_info *be = dev_get_drvdata(&dev->dev);
  411. pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
  412. if (be->major || be->minor)
  413. xenvbd_sysfs_delif(dev);
  414. if (be->backend_watch.node) {
  415. unregister_xenbus_watch(&be->backend_watch);
  416. kfree(be->backend_watch.node);
  417. be->backend_watch.node = NULL;
  418. }
  419. dev_set_drvdata(&dev->dev, NULL);
  420. if (be->blkif)
  421. xen_blkif_disconnect(be->blkif);
  422. /* Put the reference we set in xen_blkif_alloc(). */
  423. xen_blkif_put(be->blkif);
  424. return 0;
  425. }
  426. int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
  427. struct backend_info *be, int state)
  428. {
  429. struct xenbus_device *dev = be->dev;
  430. int err;
  431. err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
  432. "%d", state);
  433. if (err)
  434. dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
  435. return err;
  436. }
  437. static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
  438. {
  439. struct xenbus_device *dev = be->dev;
  440. struct xen_blkif *blkif = be->blkif;
  441. int err;
  442. int state = 0, discard_enable;
  443. struct block_device *bdev = be->blkif->vbd.bdev;
  444. struct request_queue *q = bdev_get_queue(bdev);
  445. err = xenbus_scanf(XBT_NIL, dev->nodename, "discard-enable", "%d",
  446. &discard_enable);
  447. if (err == 1 && !discard_enable)
  448. return;
  449. if (blk_queue_discard(q)) {
  450. err = xenbus_printf(xbt, dev->nodename,
  451. "discard-granularity", "%u",
  452. q->limits.discard_granularity);
  453. if (err) {
  454. dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
  455. return;
  456. }
  457. err = xenbus_printf(xbt, dev->nodename,
  458. "discard-alignment", "%u",
  459. q->limits.discard_alignment);
  460. if (err) {
  461. dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
  462. return;
  463. }
  464. state = 1;
  465. /* Optional. */
  466. err = xenbus_printf(xbt, dev->nodename,
  467. "discard-secure", "%d",
  468. blkif->vbd.discard_secure);
  469. if (err) {
  470. dev_warn(&dev->dev, "writing discard-secure (%d)", err);
  471. return;
  472. }
  473. }
  474. err = xenbus_printf(xbt, dev->nodename, "feature-discard",
  475. "%d", state);
  476. if (err)
  477. dev_warn(&dev->dev, "writing feature-discard (%d)", err);
  478. }
  479. int xen_blkbk_barrier(struct xenbus_transaction xbt,
  480. struct backend_info *be, int state)
  481. {
  482. struct xenbus_device *dev = be->dev;
  483. int err;
  484. err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
  485. "%d", state);
  486. if (err)
  487. dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
  488. return err;
  489. }
  490. /*
  491. * Entry point to this code when a new device is created. Allocate the basic
  492. * structures, and watch the store waiting for the hotplug scripts to tell us
  493. * the device's physical major and minor numbers. Switch to InitWait.
  494. */
  495. static int xen_blkbk_probe(struct xenbus_device *dev,
  496. const struct xenbus_device_id *id)
  497. {
  498. int err;
  499. struct backend_info *be = kzalloc(sizeof(struct backend_info),
  500. GFP_KERNEL);
  501. /* match the pr_debug in xen_blkbk_remove */
  502. pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
  503. if (!be) {
  504. xenbus_dev_fatal(dev, -ENOMEM,
  505. "allocating backend structure");
  506. return -ENOMEM;
  507. }
  508. be->dev = dev;
  509. dev_set_drvdata(&dev->dev, be);
  510. be->blkif = xen_blkif_alloc(dev->otherend_id);
  511. if (IS_ERR(be->blkif)) {
  512. err = PTR_ERR(be->blkif);
  513. be->blkif = NULL;
  514. xenbus_dev_fatal(dev, err, "creating block interface");
  515. goto fail;
  516. }
  517. err = xenbus_printf(XBT_NIL, dev->nodename,
  518. "feature-max-indirect-segments", "%u",
  519. MAX_INDIRECT_SEGMENTS);
  520. if (err)
  521. dev_warn(&dev->dev,
  522. "writing %s/feature-max-indirect-segments (%d)",
  523. dev->nodename, err);
  524. /* Multi-queue: advertise how many queues are supported by us.*/
  525. err = xenbus_printf(XBT_NIL, dev->nodename,
  526. "multi-queue-max-queues", "%u", xenblk_max_queues);
  527. if (err)
  528. pr_warn("Error writing multi-queue-max-queues\n");
  529. /* setup back pointer */
  530. be->blkif->be = be;
  531. err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
  532. "%s/%s", dev->nodename, "physical-device");
  533. if (err)
  534. goto fail;
  535. err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", "%u",
  536. xen_blkif_max_ring_order);
  537. if (err)
  538. pr_warn("%s write out 'max-ring-page-order' failed\n", __func__);
  539. err = xenbus_switch_state(dev, XenbusStateInitWait);
  540. if (err)
  541. goto fail;
  542. return 0;
  543. fail:
  544. pr_warn("%s failed\n", __func__);
  545. xen_blkbk_remove(dev);
  546. return err;
  547. }
  548. /*
  549. * Callback received when the hotplug scripts have placed the physical-device
  550. * node. Read it and the mode node, and create a vbd. If the frontend is
  551. * ready, connect.
  552. */
  553. static void backend_changed(struct xenbus_watch *watch,
  554. const char **vec, unsigned int len)
  555. {
  556. int err;
  557. unsigned major;
  558. unsigned minor;
  559. struct backend_info *be
  560. = container_of(watch, struct backend_info, backend_watch);
  561. struct xenbus_device *dev = be->dev;
  562. int cdrom = 0;
  563. unsigned long handle;
  564. char *device_type;
  565. pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
  566. err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
  567. &major, &minor);
  568. if (XENBUS_EXIST_ERR(err)) {
  569. /*
  570. * Since this watch will fire once immediately after it is
  571. * registered, we expect this. Ignore it, and wait for the
  572. * hotplug scripts.
  573. */
  574. return;
  575. }
  576. if (err != 2) {
  577. xenbus_dev_fatal(dev, err, "reading physical-device");
  578. return;
  579. }
  580. if (be->major | be->minor) {
  581. if (be->major != major || be->minor != minor)
  582. pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
  583. be->major, be->minor, major, minor);
  584. return;
  585. }
  586. be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
  587. if (IS_ERR(be->mode)) {
  588. err = PTR_ERR(be->mode);
  589. be->mode = NULL;
  590. xenbus_dev_fatal(dev, err, "reading mode");
  591. return;
  592. }
  593. device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
  594. if (!IS_ERR(device_type)) {
  595. cdrom = strcmp(device_type, "cdrom") == 0;
  596. kfree(device_type);
  597. }
  598. /* Front end dir is a number, which is used as the handle. */
  599. err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
  600. if (err) {
  601. kfree(be->mode);
  602. be->mode = NULL;
  603. return;
  604. }
  605. be->major = major;
  606. be->minor = minor;
  607. err = xen_vbd_create(be->blkif, handle, major, minor,
  608. !strchr(be->mode, 'w'), cdrom);
  609. if (err)
  610. xenbus_dev_fatal(dev, err, "creating vbd structure");
  611. else {
  612. err = xenvbd_sysfs_addif(dev);
  613. if (err) {
  614. xen_vbd_free(&be->blkif->vbd);
  615. xenbus_dev_fatal(dev, err, "creating sysfs entries");
  616. }
  617. }
  618. if (err) {
  619. kfree(be->mode);
  620. be->mode = NULL;
  621. be->major = 0;
  622. be->minor = 0;
  623. } else {
  624. /* We're potentially connected now */
  625. xen_update_blkif_status(be->blkif);
  626. }
  627. }
  628. /*
  629. * Callback received when the frontend's state changes.
  630. */
  631. static void frontend_changed(struct xenbus_device *dev,
  632. enum xenbus_state frontend_state)
  633. {
  634. struct backend_info *be = dev_get_drvdata(&dev->dev);
  635. int err;
  636. pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
  637. switch (frontend_state) {
  638. case XenbusStateInitialising:
  639. if (dev->state == XenbusStateClosed) {
  640. pr_info("%s: prepare for reconnect\n", dev->nodename);
  641. xenbus_switch_state(dev, XenbusStateInitWait);
  642. }
  643. break;
  644. case XenbusStateInitialised:
  645. case XenbusStateConnected:
  646. /*
  647. * Ensure we connect even when two watches fire in
  648. * close succession and we miss the intermediate value
  649. * of frontend_state.
  650. */
  651. if (dev->state == XenbusStateConnected)
  652. break;
  653. /*
  654. * Enforce precondition before potential leak point.
  655. * xen_blkif_disconnect() is idempotent.
  656. */
  657. err = xen_blkif_disconnect(be->blkif);
  658. if (err) {
  659. xenbus_dev_fatal(dev, err, "pending I/O");
  660. break;
  661. }
  662. err = connect_ring(be);
  663. if (err) {
  664. /*
  665. * Clean up so that memory resources can be used by
  666. * other devices. connect_ring reported already error.
  667. */
  668. xen_blkif_disconnect(be->blkif);
  669. break;
  670. }
  671. xen_update_blkif_status(be->blkif);
  672. break;
  673. case XenbusStateClosing:
  674. xenbus_switch_state(dev, XenbusStateClosing);
  675. break;
  676. case XenbusStateClosed:
  677. xen_blkif_disconnect(be->blkif);
  678. xenbus_switch_state(dev, XenbusStateClosed);
  679. if (xenbus_dev_is_online(dev))
  680. break;
  681. /* fall through if not online */
  682. case XenbusStateUnknown:
  683. /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
  684. device_unregister(&dev->dev);
  685. break;
  686. default:
  687. xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
  688. frontend_state);
  689. break;
  690. }
  691. }
  692. /* ** Connection ** */
  693. /*
  694. * Write the physical details regarding the block device to the store, and
  695. * switch to Connected state.
  696. */
  697. static void connect(struct backend_info *be)
  698. {
  699. struct xenbus_transaction xbt;
  700. int err;
  701. struct xenbus_device *dev = be->dev;
  702. pr_debug("%s %s\n", __func__, dev->otherend);
  703. /* Supply the information about the device the frontend needs */
  704. again:
  705. err = xenbus_transaction_start(&xbt);
  706. if (err) {
  707. xenbus_dev_fatal(dev, err, "starting transaction");
  708. return;
  709. }
  710. /* If we can't advertise it is OK. */
  711. xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
  712. xen_blkbk_discard(xbt, be);
  713. xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
  714. err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1);
  715. if (err) {
  716. xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
  717. dev->nodename);
  718. goto abort;
  719. }
  720. err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
  721. (unsigned long long)vbd_sz(&be->blkif->vbd));
  722. if (err) {
  723. xenbus_dev_fatal(dev, err, "writing %s/sectors",
  724. dev->nodename);
  725. goto abort;
  726. }
  727. /* FIXME: use a typename instead */
  728. err = xenbus_printf(xbt, dev->nodename, "info", "%u",
  729. be->blkif->vbd.type |
  730. (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
  731. if (err) {
  732. xenbus_dev_fatal(dev, err, "writing %s/info",
  733. dev->nodename);
  734. goto abort;
  735. }
  736. err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
  737. (unsigned long)
  738. bdev_logical_block_size(be->blkif->vbd.bdev));
  739. if (err) {
  740. xenbus_dev_fatal(dev, err, "writing %s/sector-size",
  741. dev->nodename);
  742. goto abort;
  743. }
  744. err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
  745. bdev_physical_block_size(be->blkif->vbd.bdev));
  746. if (err)
  747. xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
  748. dev->nodename);
  749. err = xenbus_transaction_end(xbt, 0);
  750. if (err == -EAGAIN)
  751. goto again;
  752. if (err)
  753. xenbus_dev_fatal(dev, err, "ending transaction");
  754. err = xenbus_switch_state(dev, XenbusStateConnected);
  755. if (err)
  756. xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
  757. dev->nodename);
  758. return;
  759. abort:
  760. xenbus_transaction_end(xbt, 1);
  761. }
  762. /*
  763. * Each ring may have multi pages, depends on "ring-page-order".
  764. */
  765. static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
  766. {
  767. unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
  768. struct pending_req *req, *n;
  769. int err, i, j;
  770. struct xen_blkif *blkif = ring->blkif;
  771. struct xenbus_device *dev = blkif->be->dev;
  772. unsigned int ring_page_order, nr_grefs, evtchn;
  773. err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
  774. &evtchn);
  775. if (err != 1) {
  776. err = -EINVAL;
  777. xenbus_dev_fatal(dev, err, "reading %s/event-channel", dir);
  778. return err;
  779. }
  780. err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
  781. &ring_page_order);
  782. if (err != 1) {
  783. err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]);
  784. if (err != 1) {
  785. err = -EINVAL;
  786. xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
  787. return err;
  788. }
  789. nr_grefs = 1;
  790. } else {
  791. unsigned int i;
  792. if (ring_page_order > xen_blkif_max_ring_order) {
  793. err = -EINVAL;
  794. xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d",
  795. dir, ring_page_order,
  796. xen_blkif_max_ring_order);
  797. return err;
  798. }
  799. nr_grefs = 1 << ring_page_order;
  800. for (i = 0; i < nr_grefs; i++) {
  801. char ring_ref_name[RINGREF_NAME_LEN];
  802. snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
  803. err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
  804. "%u", &ring_ref[i]);
  805. if (err != 1) {
  806. err = -EINVAL;
  807. xenbus_dev_fatal(dev, err, "reading %s/%s",
  808. dir, ring_ref_name);
  809. return err;
  810. }
  811. }
  812. }
  813. blkif->nr_ring_pages = nr_grefs;
  814. for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
  815. req = kzalloc(sizeof(*req), GFP_KERNEL);
  816. if (!req)
  817. goto fail;
  818. list_add_tail(&req->free_list, &ring->pending_free);
  819. for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
  820. req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
  821. if (!req->segments[j])
  822. goto fail;
  823. }
  824. for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
  825. req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
  826. GFP_KERNEL);
  827. if (!req->indirect_pages[j])
  828. goto fail;
  829. }
  830. }
  831. /* Map the shared frame, irq etc. */
  832. err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
  833. if (err) {
  834. xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
  835. return err;
  836. }
  837. return 0;
  838. fail:
  839. list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
  840. list_del(&req->free_list);
  841. for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
  842. if (!req->segments[j])
  843. break;
  844. kfree(req->segments[j]);
  845. }
  846. for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
  847. if (!req->indirect_pages[j])
  848. break;
  849. kfree(req->indirect_pages[j]);
  850. }
  851. kfree(req);
  852. }
  853. return -ENOMEM;
  854. }
  855. static int connect_ring(struct backend_info *be)
  856. {
  857. struct xenbus_device *dev = be->dev;
  858. unsigned int pers_grants;
  859. char protocol[64] = "";
  860. int err, i;
  861. char *xspath;
  862. size_t xspathsize;
  863. const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
  864. unsigned int requested_num_queues = 0;
  865. pr_debug("%s %s\n", __func__, dev->otherend);
  866. be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
  867. err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
  868. "%63s", protocol);
  869. if (err <= 0)
  870. strcpy(protocol, "unspecified, assuming default");
  871. else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
  872. be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
  873. else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
  874. be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
  875. else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
  876. be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
  877. else {
  878. xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
  879. return -ENOSYS;
  880. }
  881. err = xenbus_scanf(XBT_NIL, dev->otherend,
  882. "feature-persistent", "%u", &pers_grants);
  883. if (err <= 0)
  884. pers_grants = 0;
  885. be->blkif->vbd.feature_gnt_persistent = pers_grants;
  886. be->blkif->vbd.overflow_max_grants = 0;
  887. /*
  888. * Read the number of hardware queues from frontend.
  889. */
  890. err = xenbus_scanf(XBT_NIL, dev->otherend, "multi-queue-num-queues",
  891. "%u", &requested_num_queues);
  892. if (err < 0) {
  893. requested_num_queues = 1;
  894. } else {
  895. if (requested_num_queues > xenblk_max_queues
  896. || requested_num_queues == 0) {
  897. /* Buggy or malicious guest. */
  898. xenbus_dev_fatal(dev, err,
  899. "guest requested %u queues, exceeding the maximum of %u.",
  900. requested_num_queues, xenblk_max_queues);
  901. return -ENOSYS;
  902. }
  903. }
  904. be->blkif->nr_rings = requested_num_queues;
  905. if (xen_blkif_alloc_rings(be->blkif))
  906. return -ENOMEM;
  907. pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
  908. be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
  909. pers_grants ? "persistent grants" : "");
  910. if (be->blkif->nr_rings == 1)
  911. return read_per_ring_refs(&be->blkif->rings[0], dev->otherend);
  912. else {
  913. xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
  914. xspath = kmalloc(xspathsize, GFP_KERNEL);
  915. if (!xspath) {
  916. xenbus_dev_fatal(dev, -ENOMEM, "reading ring references");
  917. return -ENOMEM;
  918. }
  919. for (i = 0; i < be->blkif->nr_rings; i++) {
  920. memset(xspath, 0, xspathsize);
  921. snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
  922. err = read_per_ring_refs(&be->blkif->rings[i], xspath);
  923. if (err) {
  924. kfree(xspath);
  925. return err;
  926. }
  927. }
  928. kfree(xspath);
  929. }
  930. return 0;
  931. }
  932. static const struct xenbus_device_id xen_blkbk_ids[] = {
  933. { "vbd" },
  934. { "" }
  935. };
  936. static struct xenbus_driver xen_blkbk_driver = {
  937. .ids = xen_blkbk_ids,
  938. .probe = xen_blkbk_probe,
  939. .remove = xen_blkbk_remove,
  940. .otherend_changed = frontend_changed
  941. };
  942. int xen_blkif_xenbus_init(void)
  943. {
  944. return xenbus_register_backend(&xen_blkbk_driver);
  945. }