dev.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. /*
  2. * Copyright (c) 2014 Christoph Hellwig.
  3. */
  4. #include <linux/sunrpc/svc.h>
  5. #include <linux/blkdev.h>
  6. #include <linux/nfs4.h>
  7. #include <linux/nfs_fs.h>
  8. #include <linux/nfs_xdr.h>
  9. #include "blocklayout.h"
  10. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  11. static void
  12. bl_free_device(struct pnfs_block_dev *dev)
  13. {
  14. if (dev->nr_children) {
  15. int i;
  16. for (i = 0; i < dev->nr_children; i++)
  17. bl_free_device(&dev->children[i]);
  18. kfree(dev->children);
  19. } else {
  20. if (dev->bdev)
  21. blkdev_put(dev->bdev, FMODE_READ);
  22. }
  23. }
  24. void
  25. bl_free_deviceid_node(struct nfs4_deviceid_node *d)
  26. {
  27. struct pnfs_block_dev *dev =
  28. container_of(d, struct pnfs_block_dev, node);
  29. bl_free_device(dev);
  30. kfree_rcu(dev, node.rcu);
  31. }
  32. static int
  33. nfs4_block_decode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
  34. {
  35. __be32 *p;
  36. int i;
  37. p = xdr_inline_decode(xdr, 4);
  38. if (!p)
  39. return -EIO;
  40. b->type = be32_to_cpup(p++);
  41. switch (b->type) {
  42. case PNFS_BLOCK_VOLUME_SIMPLE:
  43. p = xdr_inline_decode(xdr, 4);
  44. if (!p)
  45. return -EIO;
  46. b->simple.nr_sigs = be32_to_cpup(p++);
  47. if (!b->simple.nr_sigs) {
  48. dprintk("no signature\n");
  49. return -EIO;
  50. }
  51. b->simple.len = 4 + 4;
  52. for (i = 0; i < b->simple.nr_sigs; i++) {
  53. p = xdr_inline_decode(xdr, 8 + 4);
  54. if (!p)
  55. return -EIO;
  56. p = xdr_decode_hyper(p, &b->simple.sigs[i].offset);
  57. b->simple.sigs[i].sig_len = be32_to_cpup(p++);
  58. p = xdr_inline_decode(xdr, b->simple.sigs[i].sig_len);
  59. if (!p)
  60. return -EIO;
  61. memcpy(&b->simple.sigs[i].sig, p,
  62. b->simple.sigs[i].sig_len);
  63. b->simple.len += 8 + 4 + b->simple.sigs[i].sig_len;
  64. }
  65. break;
  66. case PNFS_BLOCK_VOLUME_SLICE:
  67. p = xdr_inline_decode(xdr, 8 + 8 + 4);
  68. if (!p)
  69. return -EIO;
  70. p = xdr_decode_hyper(p, &b->slice.start);
  71. p = xdr_decode_hyper(p, &b->slice.len);
  72. b->slice.volume = be32_to_cpup(p++);
  73. break;
  74. case PNFS_BLOCK_VOLUME_CONCAT:
  75. p = xdr_inline_decode(xdr, 4);
  76. if (!p)
  77. return -EIO;
  78. b->concat.volumes_count = be32_to_cpup(p++);
  79. p = xdr_inline_decode(xdr, b->concat.volumes_count * 4);
  80. if (!p)
  81. return -EIO;
  82. for (i = 0; i < b->concat.volumes_count; i++)
  83. b->concat.volumes[i] = be32_to_cpup(p++);
  84. break;
  85. case PNFS_BLOCK_VOLUME_STRIPE:
  86. p = xdr_inline_decode(xdr, 8 + 4);
  87. if (!p)
  88. return -EIO;
  89. p = xdr_decode_hyper(p, &b->stripe.chunk_size);
  90. b->stripe.volumes_count = be32_to_cpup(p++);
  91. p = xdr_inline_decode(xdr, b->stripe.volumes_count * 4);
  92. if (!p)
  93. return -EIO;
  94. for (i = 0; i < b->stripe.volumes_count; i++)
  95. b->stripe.volumes[i] = be32_to_cpup(p++);
  96. break;
  97. default:
  98. dprintk("unknown volume type!\n");
  99. return -EIO;
  100. }
  101. return 0;
  102. }
  103. static bool bl_map_simple(struct pnfs_block_dev *dev, u64 offset,
  104. struct pnfs_block_dev_map *map)
  105. {
  106. map->start = dev->start;
  107. map->len = dev->len;
  108. map->disk_offset = dev->disk_offset;
  109. map->bdev = dev->bdev;
  110. return true;
  111. }
  112. static bool bl_map_concat(struct pnfs_block_dev *dev, u64 offset,
  113. struct pnfs_block_dev_map *map)
  114. {
  115. int i;
  116. for (i = 0; i < dev->nr_children; i++) {
  117. struct pnfs_block_dev *child = &dev->children[i];
  118. if (child->start > offset ||
  119. child->start + child->len <= offset)
  120. continue;
  121. child->map(child, offset - child->start, map);
  122. return true;
  123. }
  124. dprintk("%s: ran off loop!\n", __func__);
  125. return false;
  126. }
  127. static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
  128. struct pnfs_block_dev_map *map)
  129. {
  130. struct pnfs_block_dev *child;
  131. u64 chunk;
  132. u32 chunk_idx;
  133. u64 disk_offset;
  134. chunk = div_u64(offset, dev->chunk_size);
  135. div_u64_rem(chunk, dev->nr_children, &chunk_idx);
  136. if (chunk_idx > dev->nr_children) {
  137. dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
  138. __func__, chunk_idx, offset, dev->chunk_size);
  139. /* error, should not happen */
  140. return false;
  141. }
  142. /* truncate offset to the beginning of the stripe */
  143. offset = chunk * dev->chunk_size;
  144. /* disk offset of the stripe */
  145. disk_offset = div_u64(offset, dev->nr_children);
  146. child = &dev->children[chunk_idx];
  147. child->map(child, disk_offset, map);
  148. map->start += offset;
  149. map->disk_offset += disk_offset;
  150. map->len = dev->chunk_size;
  151. return true;
  152. }
  153. static int
  154. bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d,
  155. struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask);
  156. static int
  157. bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d,
  158. struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
  159. {
  160. struct pnfs_block_volume *v = &volumes[idx];
  161. dev_t dev;
  162. dev = bl_resolve_deviceid(server, v, gfp_mask);
  163. if (!dev)
  164. return -EIO;
  165. d->bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL);
  166. if (IS_ERR(d->bdev)) {
  167. printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n",
  168. MAJOR(dev), MINOR(dev), PTR_ERR(d->bdev));
  169. return PTR_ERR(d->bdev);
  170. }
  171. d->len = i_size_read(d->bdev->bd_inode);
  172. d->map = bl_map_simple;
  173. printk(KERN_INFO "pNFS: using block device %s\n",
  174. d->bdev->bd_disk->disk_name);
  175. return 0;
  176. }
  177. static int
  178. bl_parse_slice(struct nfs_server *server, struct pnfs_block_dev *d,
  179. struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
  180. {
  181. struct pnfs_block_volume *v = &volumes[idx];
  182. int ret;
  183. ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask);
  184. if (ret)
  185. return ret;
  186. d->disk_offset = v->slice.start;
  187. d->len = v->slice.len;
  188. return 0;
  189. }
  190. static int
  191. bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d,
  192. struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
  193. {
  194. struct pnfs_block_volume *v = &volumes[idx];
  195. u64 len = 0;
  196. int ret, i;
  197. d->children = kcalloc(v->concat.volumes_count,
  198. sizeof(struct pnfs_block_dev), GFP_KERNEL);
  199. if (!d->children)
  200. return -ENOMEM;
  201. for (i = 0; i < v->concat.volumes_count; i++) {
  202. ret = bl_parse_deviceid(server, &d->children[i],
  203. volumes, v->concat.volumes[i], gfp_mask);
  204. if (ret)
  205. return ret;
  206. d->nr_children++;
  207. d->children[i].start += len;
  208. len += d->children[i].len;
  209. }
  210. d->len = len;
  211. d->map = bl_map_concat;
  212. return 0;
  213. }
  214. static int
  215. bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d,
  216. struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
  217. {
  218. struct pnfs_block_volume *v = &volumes[idx];
  219. u64 len = 0;
  220. int ret, i;
  221. d->children = kcalloc(v->stripe.volumes_count,
  222. sizeof(struct pnfs_block_dev), GFP_KERNEL);
  223. if (!d->children)
  224. return -ENOMEM;
  225. for (i = 0; i < v->stripe.volumes_count; i++) {
  226. ret = bl_parse_deviceid(server, &d->children[i],
  227. volumes, v->stripe.volumes[i], gfp_mask);
  228. if (ret)
  229. return ret;
  230. d->nr_children++;
  231. len += d->children[i].len;
  232. }
  233. d->len = len;
  234. d->chunk_size = v->stripe.chunk_size;
  235. d->map = bl_map_stripe;
  236. return 0;
  237. }
  238. static int
  239. bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d,
  240. struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
  241. {
  242. switch (volumes[idx].type) {
  243. case PNFS_BLOCK_VOLUME_SIMPLE:
  244. return bl_parse_simple(server, d, volumes, idx, gfp_mask);
  245. case PNFS_BLOCK_VOLUME_SLICE:
  246. return bl_parse_slice(server, d, volumes, idx, gfp_mask);
  247. case PNFS_BLOCK_VOLUME_CONCAT:
  248. return bl_parse_concat(server, d, volumes, idx, gfp_mask);
  249. case PNFS_BLOCK_VOLUME_STRIPE:
  250. return bl_parse_stripe(server, d, volumes, idx, gfp_mask);
  251. default:
  252. dprintk("unsupported volume type: %d\n", volumes[idx].type);
  253. return -EIO;
  254. }
  255. }
  256. struct nfs4_deviceid_node *
  257. bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
  258. gfp_t gfp_mask)
  259. {
  260. struct nfs4_deviceid_node *node = NULL;
  261. struct pnfs_block_volume *volumes;
  262. struct pnfs_block_dev *top;
  263. struct xdr_stream xdr;
  264. struct xdr_buf buf;
  265. struct page *scratch;
  266. int nr_volumes, ret, i;
  267. __be32 *p;
  268. scratch = alloc_page(gfp_mask);
  269. if (!scratch)
  270. goto out;
  271. xdr_init_decode_pages(&xdr, &buf, pdev->pages, pdev->pglen);
  272. xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE);
  273. p = xdr_inline_decode(&xdr, sizeof(__be32));
  274. if (!p)
  275. goto out_free_scratch;
  276. nr_volumes = be32_to_cpup(p++);
  277. volumes = kcalloc(nr_volumes, sizeof(struct pnfs_block_volume),
  278. gfp_mask);
  279. if (!volumes)
  280. goto out_free_scratch;
  281. for (i = 0; i < nr_volumes; i++) {
  282. ret = nfs4_block_decode_volume(&xdr, &volumes[i]);
  283. if (ret < 0)
  284. goto out_free_volumes;
  285. }
  286. top = kzalloc(sizeof(*top), gfp_mask);
  287. if (!top)
  288. goto out_free_volumes;
  289. ret = bl_parse_deviceid(server, top, volumes, nr_volumes - 1, gfp_mask);
  290. if (ret) {
  291. bl_free_device(top);
  292. kfree(top);
  293. goto out_free_volumes;
  294. }
  295. node = &top->node;
  296. nfs4_init_deviceid_node(node, server, &pdev->dev_id);
  297. out_free_volumes:
  298. kfree(volumes);
  299. out_free_scratch:
  300. __free_page(scratch);
  301. out:
  302. return node;
  303. }