cleancache.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /*
  2. * Cleancache frontend
  3. *
  4. * This code provides the generic "frontend" layer to call a matching
  5. * "backend" driver implementation of cleancache. See
  6. * Documentation/vm/cleancache.rst for more information.
  7. *
  8. * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
  9. * Author: Dan Magenheimer
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/fs.h>
  15. #include <linux/exportfs.h>
  16. #include <linux/mm.h>
  17. #include <linux/debugfs.h>
  18. #include <linux/cleancache.h>
  19. /*
  20. * cleancache_ops is set by cleancache_register_ops to contain the pointers
  21. * to the cleancache "backend" implementation functions.
  22. */
  23. static const struct cleancache_ops *cleancache_ops __read_mostly;
  24. /*
  25. * Counters available via /sys/kernel/debug/cleancache (if debugfs is
  26. * properly configured. These are for information only so are not protected
  27. * against increment races.
  28. */
  29. static u64 cleancache_succ_gets;
  30. static u64 cleancache_failed_gets;
  31. static u64 cleancache_puts;
  32. static u64 cleancache_invalidates;
  33. static void cleancache_register_ops_sb(struct super_block *sb, void *unused)
  34. {
  35. switch (sb->cleancache_poolid) {
  36. case CLEANCACHE_NO_BACKEND:
  37. __cleancache_init_fs(sb);
  38. break;
  39. case CLEANCACHE_NO_BACKEND_SHARED:
  40. __cleancache_init_shared_fs(sb);
  41. break;
  42. }
  43. }
  44. /*
  45. * Register operations for cleancache. Returns 0 on success.
  46. */
  47. int cleancache_register_ops(const struct cleancache_ops *ops)
  48. {
  49. if (cmpxchg(&cleancache_ops, NULL, ops))
  50. return -EBUSY;
  51. /*
  52. * A cleancache backend can be built as a module and hence loaded after
  53. * a cleancache enabled filesystem has called cleancache_init_fs. To
  54. * handle such a scenario, here we call ->init_fs or ->init_shared_fs
  55. * for each active super block. To differentiate between local and
  56. * shared filesystems, we temporarily initialize sb->cleancache_poolid
  57. * to CLEANCACHE_NO_BACKEND or CLEANCACHE_NO_BACKEND_SHARED
  58. * respectively in case there is no backend registered at the time
  59. * cleancache_init_fs or cleancache_init_shared_fs is called.
  60. *
  61. * Since filesystems can be mounted concurrently with cleancache
  62. * backend registration, we have to be careful to guarantee that all
  63. * cleancache enabled filesystems that has been mounted by the time
  64. * cleancache_register_ops is called has got and all mounted later will
  65. * get cleancache_poolid. This is assured by the following statements
  66. * tied together:
  67. *
  68. * a) iterate_supers skips only those super blocks that has started
  69. * ->kill_sb
  70. *
  71. * b) if iterate_supers encounters a super block that has not finished
  72. * ->mount yet, it waits until it is finished
  73. *
  74. * c) cleancache_init_fs is called from ->mount and
  75. * cleancache_invalidate_fs is called from ->kill_sb
  76. *
  77. * d) we call iterate_supers after cleancache_ops has been set
  78. *
  79. * From a) it follows that if iterate_supers skips a super block, then
  80. * either the super block is already dead, in which case we do not need
  81. * to bother initializing cleancache for it, or it was mounted after we
  82. * initiated iterate_supers. In the latter case, it must have seen
  83. * cleancache_ops set according to d) and initialized cleancache from
  84. * ->mount by itself according to c). This proves that we call
  85. * ->init_fs at least once for each active super block.
  86. *
  87. * From b) and c) it follows that if iterate_supers encounters a super
  88. * block that has already started ->init_fs, it will wait until ->mount
  89. * and hence ->init_fs has finished, then check cleancache_poolid, see
  90. * that it has already been set and therefore do nothing. This proves
  91. * that we call ->init_fs no more than once for each super block.
  92. *
  93. * Combined together, the last two paragraphs prove the function
  94. * correctness.
  95. *
  96. * Note that various cleancache callbacks may proceed before this
  97. * function is called or even concurrently with it, but since
  98. * CLEANCACHE_NO_BACKEND is negative, they will all result in a noop
  99. * until the corresponding ->init_fs has been actually called and
  100. * cleancache_ops has been set.
  101. */
  102. iterate_supers(cleancache_register_ops_sb, NULL);
  103. return 0;
  104. }
  105. EXPORT_SYMBOL(cleancache_register_ops);
  106. /* Called by a cleancache-enabled filesystem at time of mount */
  107. void __cleancache_init_fs(struct super_block *sb)
  108. {
  109. int pool_id = CLEANCACHE_NO_BACKEND;
  110. if (cleancache_ops) {
  111. pool_id = cleancache_ops->init_fs(PAGE_SIZE);
  112. if (pool_id < 0)
  113. pool_id = CLEANCACHE_NO_POOL;
  114. }
  115. sb->cleancache_poolid = pool_id;
  116. }
  117. EXPORT_SYMBOL(__cleancache_init_fs);
  118. /* Called by a cleancache-enabled clustered filesystem at time of mount */
  119. void __cleancache_init_shared_fs(struct super_block *sb)
  120. {
  121. int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
  122. if (cleancache_ops) {
  123. pool_id = cleancache_ops->init_shared_fs(&sb->s_uuid, PAGE_SIZE);
  124. if (pool_id < 0)
  125. pool_id = CLEANCACHE_NO_POOL;
  126. }
  127. sb->cleancache_poolid = pool_id;
  128. }
  129. EXPORT_SYMBOL(__cleancache_init_shared_fs);
  130. /*
  131. * If the filesystem uses exportable filehandles, use the filehandle as
  132. * the key, else use the inode number.
  133. */
  134. static int cleancache_get_key(struct inode *inode,
  135. struct cleancache_filekey *key)
  136. {
  137. int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
  138. int len = 0, maxlen = CLEANCACHE_KEY_MAX;
  139. struct super_block *sb = inode->i_sb;
  140. key->u.ino = inode->i_ino;
  141. if (sb->s_export_op != NULL) {
  142. fhfn = sb->s_export_op->encode_fh;
  143. if (fhfn) {
  144. len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
  145. if (len <= FILEID_ROOT || len == FILEID_INVALID)
  146. return -1;
  147. if (maxlen > CLEANCACHE_KEY_MAX)
  148. return -1;
  149. }
  150. }
  151. return 0;
  152. }
  153. /*
  154. * "Get" data from cleancache associated with the poolid/inode/index
  155. * that were specified when the data was put to cleanache and, if
  156. * successful, use it to fill the specified page with data and return 0.
  157. * The pageframe is unchanged and returns -1 if the get fails.
  158. * Page must be locked by caller.
  159. *
  160. * The function has two checks before any action is taken - whether
  161. * a backend is registered and whether the sb->cleancache_poolid
  162. * is correct.
  163. */
  164. int __cleancache_get_page(struct page *page)
  165. {
  166. int ret = -1;
  167. int pool_id;
  168. struct cleancache_filekey key = { .u.key = { 0 } };
  169. if (!cleancache_ops) {
  170. cleancache_failed_gets++;
  171. goto out;
  172. }
  173. VM_BUG_ON_PAGE(!PageLocked(page), page);
  174. pool_id = page->mapping->host->i_sb->cleancache_poolid;
  175. if (pool_id < 0)
  176. goto out;
  177. if (cleancache_get_key(page->mapping->host, &key) < 0)
  178. goto out;
  179. ret = cleancache_ops->get_page(pool_id, key, page->index, page);
  180. if (ret == 0)
  181. cleancache_succ_gets++;
  182. else
  183. cleancache_failed_gets++;
  184. out:
  185. return ret;
  186. }
  187. EXPORT_SYMBOL(__cleancache_get_page);
  188. /*
  189. * "Put" data from a page to cleancache and associate it with the
  190. * (previously-obtained per-filesystem) poolid and the page's,
  191. * inode and page index. Page must be locked. Note that a put_page
  192. * always "succeeds", though a subsequent get_page may succeed or fail.
  193. *
  194. * The function has two checks before any action is taken - whether
  195. * a backend is registered and whether the sb->cleancache_poolid
  196. * is correct.
  197. */
  198. void __cleancache_put_page(struct page *page)
  199. {
  200. int pool_id;
  201. struct cleancache_filekey key = { .u.key = { 0 } };
  202. if (!cleancache_ops) {
  203. cleancache_puts++;
  204. return;
  205. }
  206. VM_BUG_ON_PAGE(!PageLocked(page), page);
  207. pool_id = page->mapping->host->i_sb->cleancache_poolid;
  208. if (pool_id >= 0 &&
  209. cleancache_get_key(page->mapping->host, &key) >= 0) {
  210. cleancache_ops->put_page(pool_id, key, page->index, page);
  211. cleancache_puts++;
  212. }
  213. }
  214. EXPORT_SYMBOL(__cleancache_put_page);
  215. /*
  216. * Invalidate any data from cleancache associated with the poolid and the
  217. * page's inode and page index so that a subsequent "get" will fail.
  218. *
  219. * The function has two checks before any action is taken - whether
  220. * a backend is registered and whether the sb->cleancache_poolid
  221. * is correct.
  222. */
  223. void __cleancache_invalidate_page(struct address_space *mapping,
  224. struct page *page)
  225. {
  226. /* careful... page->mapping is NULL sometimes when this is called */
  227. int pool_id = mapping->host->i_sb->cleancache_poolid;
  228. struct cleancache_filekey key = { .u.key = { 0 } };
  229. if (!cleancache_ops)
  230. return;
  231. if (pool_id >= 0) {
  232. VM_BUG_ON_PAGE(!PageLocked(page), page);
  233. if (cleancache_get_key(mapping->host, &key) >= 0) {
  234. cleancache_ops->invalidate_page(pool_id,
  235. key, page->index);
  236. cleancache_invalidates++;
  237. }
  238. }
  239. }
  240. EXPORT_SYMBOL(__cleancache_invalidate_page);
  241. /*
  242. * Invalidate all data from cleancache associated with the poolid and the
  243. * mappings's inode so that all subsequent gets to this poolid/inode
  244. * will fail.
  245. *
  246. * The function has two checks before any action is taken - whether
  247. * a backend is registered and whether the sb->cleancache_poolid
  248. * is correct.
  249. */
  250. void __cleancache_invalidate_inode(struct address_space *mapping)
  251. {
  252. int pool_id = mapping->host->i_sb->cleancache_poolid;
  253. struct cleancache_filekey key = { .u.key = { 0 } };
  254. if (!cleancache_ops)
  255. return;
  256. if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
  257. cleancache_ops->invalidate_inode(pool_id, key);
  258. }
  259. EXPORT_SYMBOL(__cleancache_invalidate_inode);
  260. /*
  261. * Called by any cleancache-enabled filesystem at time of unmount;
  262. * note that pool_id is surrendered and may be returned by a subsequent
  263. * cleancache_init_fs or cleancache_init_shared_fs.
  264. */
  265. void __cleancache_invalidate_fs(struct super_block *sb)
  266. {
  267. int pool_id;
  268. pool_id = sb->cleancache_poolid;
  269. sb->cleancache_poolid = CLEANCACHE_NO_POOL;
  270. if (cleancache_ops && pool_id >= 0)
  271. cleancache_ops->invalidate_fs(pool_id);
  272. }
  273. EXPORT_SYMBOL(__cleancache_invalidate_fs);
  274. static int __init init_cleancache(void)
  275. {
  276. #ifdef CONFIG_DEBUG_FS
  277. struct dentry *root = debugfs_create_dir("cleancache", NULL);
  278. if (root == NULL)
  279. return -ENXIO;
  280. debugfs_create_u64("succ_gets", 0444, root, &cleancache_succ_gets);
  281. debugfs_create_u64("failed_gets", 0444, root, &cleancache_failed_gets);
  282. debugfs_create_u64("puts", 0444, root, &cleancache_puts);
  283. debugfs_create_u64("invalidates", 0444, root, &cleancache_invalidates);
  284. #endif
  285. return 0;
  286. }
  287. module_init(init_cleancache)