claim.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/device.h>
  14. #include <linux/sizes.h>
  15. #include <linux/pmem.h>
  16. #include "nd-core.h"
  17. #include "pfn.h"
  18. #include "btt.h"
  19. #include "nd.h"
  20. void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
  21. {
  22. struct nd_namespace_common *ndns = *_ndns;
  23. dev_WARN_ONCE(dev, !mutex_is_locked(&ndns->dev.mutex)
  24. || ndns->claim != dev,
  25. "%s: invalid claim\n", __func__);
  26. ndns->claim = NULL;
  27. *_ndns = NULL;
  28. put_device(&ndns->dev);
  29. }
  30. void nd_detach_ndns(struct device *dev,
  31. struct nd_namespace_common **_ndns)
  32. {
  33. struct nd_namespace_common *ndns = *_ndns;
  34. if (!ndns)
  35. return;
  36. get_device(&ndns->dev);
  37. device_lock(&ndns->dev);
  38. __nd_detach_ndns(dev, _ndns);
  39. device_unlock(&ndns->dev);
  40. put_device(&ndns->dev);
  41. }
  42. bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
  43. struct nd_namespace_common **_ndns)
  44. {
  45. if (attach->claim)
  46. return false;
  47. dev_WARN_ONCE(dev, !mutex_is_locked(&attach->dev.mutex)
  48. || *_ndns,
  49. "%s: invalid claim\n", __func__);
  50. attach->claim = dev;
  51. *_ndns = attach;
  52. get_device(&attach->dev);
  53. return true;
  54. }
  55. bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
  56. struct nd_namespace_common **_ndns)
  57. {
  58. bool claimed;
  59. device_lock(&attach->dev);
  60. claimed = __nd_attach_ndns(dev, attach, _ndns);
  61. device_unlock(&attach->dev);
  62. return claimed;
  63. }
  64. static int namespace_match(struct device *dev, void *data)
  65. {
  66. char *name = data;
  67. return strcmp(name, dev_name(dev)) == 0;
  68. }
  69. static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
  70. {
  71. struct nd_region *nd_region = to_nd_region(dev->parent);
  72. struct device *seed = NULL;
  73. if (is_nd_btt(dev))
  74. seed = nd_region->btt_seed;
  75. else if (is_nd_pfn(dev))
  76. seed = nd_region->pfn_seed;
  77. else if (is_nd_dax(dev))
  78. seed = nd_region->dax_seed;
  79. if (seed == dev || ndns || dev->driver)
  80. return false;
  81. return true;
  82. }
  83. struct nd_pfn *to_nd_pfn_safe(struct device *dev)
  84. {
  85. /*
  86. * pfn device attributes are re-used by dax device instances, so we
  87. * need to be careful to correct device-to-nd_pfn conversion.
  88. */
  89. if (is_nd_pfn(dev))
  90. return to_nd_pfn(dev);
  91. if (is_nd_dax(dev)) {
  92. struct nd_dax *nd_dax = to_nd_dax(dev);
  93. return &nd_dax->nd_pfn;
  94. }
  95. WARN_ON(1);
  96. return NULL;
  97. }
  98. static void nd_detach_and_reset(struct device *dev,
  99. struct nd_namespace_common **_ndns)
  100. {
  101. /* detach the namespace and destroy / reset the device */
  102. nd_detach_ndns(dev, _ndns);
  103. if (is_idle(dev, *_ndns)) {
  104. nd_device_unregister(dev, ND_ASYNC);
  105. } else if (is_nd_btt(dev)) {
  106. struct nd_btt *nd_btt = to_nd_btt(dev);
  107. nd_btt->lbasize = 0;
  108. kfree(nd_btt->uuid);
  109. nd_btt->uuid = NULL;
  110. } else if (is_nd_pfn(dev) || is_nd_dax(dev)) {
  111. struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
  112. kfree(nd_pfn->uuid);
  113. nd_pfn->uuid = NULL;
  114. nd_pfn->mode = PFN_MODE_NONE;
  115. }
  116. }
  117. ssize_t nd_namespace_store(struct device *dev,
  118. struct nd_namespace_common **_ndns, const char *buf,
  119. size_t len)
  120. {
  121. struct nd_namespace_common *ndns;
  122. struct device *found;
  123. char *name;
  124. if (dev->driver) {
  125. dev_dbg(dev, "%s: -EBUSY\n", __func__);
  126. return -EBUSY;
  127. }
  128. name = kstrndup(buf, len, GFP_KERNEL);
  129. if (!name)
  130. return -ENOMEM;
  131. strim(name);
  132. if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
  133. /* pass */;
  134. else {
  135. len = -EINVAL;
  136. goto out;
  137. }
  138. ndns = *_ndns;
  139. if (strcmp(name, "") == 0) {
  140. nd_detach_and_reset(dev, _ndns);
  141. goto out;
  142. } else if (ndns) {
  143. dev_dbg(dev, "namespace already set to: %s\n",
  144. dev_name(&ndns->dev));
  145. len = -EBUSY;
  146. goto out;
  147. }
  148. found = device_find_child(dev->parent, name, namespace_match);
  149. if (!found) {
  150. dev_dbg(dev, "'%s' not found under %s\n", name,
  151. dev_name(dev->parent));
  152. len = -ENODEV;
  153. goto out;
  154. }
  155. ndns = to_ndns(found);
  156. if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
  157. dev_dbg(dev, "%s too small to host\n", name);
  158. len = -ENXIO;
  159. goto out_attach;
  160. }
  161. WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
  162. if (!nd_attach_ndns(dev, ndns, _ndns)) {
  163. dev_dbg(dev, "%s already claimed\n",
  164. dev_name(&ndns->dev));
  165. len = -EBUSY;
  166. }
  167. out_attach:
  168. put_device(&ndns->dev); /* from device_find_child */
  169. out:
  170. kfree(name);
  171. return len;
  172. }
  173. /*
  174. * nd_sb_checksum: compute checksum for a generic info block
  175. *
  176. * Returns a fletcher64 checksum of everything in the given info block
  177. * except the last field (since that's where the checksum lives).
  178. */
  179. u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
  180. {
  181. u64 sum;
  182. __le64 sum_save;
  183. BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
  184. BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K);
  185. BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K);
  186. sum_save = nd_gen_sb->checksum;
  187. nd_gen_sb->checksum = 0;
  188. sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1);
  189. nd_gen_sb->checksum = sum_save;
  190. return sum;
  191. }
  192. EXPORT_SYMBOL(nd_sb_checksum);
  193. static int nsio_rw_bytes(struct nd_namespace_common *ndns,
  194. resource_size_t offset, void *buf, size_t size, int rw)
  195. {
  196. struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
  197. if (unlikely(offset + size > nsio->size)) {
  198. dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
  199. return -EFAULT;
  200. }
  201. if (rw == READ) {
  202. unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
  203. if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align)))
  204. return -EIO;
  205. return memcpy_from_pmem(buf, nsio->addr + offset, size);
  206. } else {
  207. memcpy_to_pmem(nsio->addr + offset, buf, size);
  208. nvdimm_flush(to_nd_region(ndns->dev.parent));
  209. }
  210. return 0;
  211. }
  212. int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
  213. {
  214. struct resource *res = &nsio->res;
  215. struct nd_namespace_common *ndns = &nsio->common;
  216. nsio->size = resource_size(res);
  217. if (!devm_request_mem_region(dev, res->start, resource_size(res),
  218. dev_name(dev))) {
  219. dev_warn(dev, "could not reserve region %pR\n", res);
  220. return -EBUSY;
  221. }
  222. ndns->rw_bytes = nsio_rw_bytes;
  223. if (devm_init_badblocks(dev, &nsio->bb))
  224. return -ENOMEM;
  225. nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
  226. &nsio->res);
  227. nsio->addr = devm_memremap(dev, res->start, resource_size(res),
  228. ARCH_MEMREMAP_PMEM);
  229. return PTR_ERR_OR_ZERO(nsio->addr);
  230. }
  231. EXPORT_SYMBOL_GPL(devm_nsio_enable);
  232. void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
  233. {
  234. struct resource *res = &nsio->res;
  235. devm_memunmap(dev, nsio->addr);
  236. devm_exit_badblocks(dev, &nsio->bb);
  237. devm_release_mem_region(dev, res->start, resource_size(res));
  238. }
  239. EXPORT_SYMBOL_GPL(devm_nsio_disable);