cookie.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. /* netfs cookie management
  2. *
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. * See Documentation/filesystems/caching/netfs-api.txt for more information on
  12. * the netfs API.
  13. */
  14. #define FSCACHE_DEBUG_LEVEL COOKIE
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. struct kmem_cache *fscache_cookie_jar;
  19. static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
  20. static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
  21. static int fscache_alloc_object(struct fscache_cache *cache,
  22. struct fscache_cookie *cookie);
  23. static int fscache_attach_object(struct fscache_cookie *cookie,
  24. struct fscache_object *object);
  25. /*
  26. * initialise an cookie jar slab element prior to any use
  27. */
  28. void fscache_cookie_init_once(void *_cookie)
  29. {
  30. struct fscache_cookie *cookie = _cookie;
  31. memset(cookie, 0, sizeof(*cookie));
  32. spin_lock_init(&cookie->lock);
  33. spin_lock_init(&cookie->stores_lock);
  34. INIT_HLIST_HEAD(&cookie->backing_objects);
  35. }
  36. /*
  37. * request a cookie to represent an object (index, datafile, xattr, etc)
  38. * - parent specifies the parent object
  39. * - the top level index cookie for each netfs is stored in the fscache_netfs
  40. * struct upon registration
  41. * - def points to the definition
  42. * - the netfs_data will be passed to the functions pointed to in *def
  43. * - all attached caches will be searched to see if they contain this object
  44. * - index objects aren't stored on disk until there's a dependent file that
  45. * needs storing
  46. * - other objects are stored in a selected cache immediately, and all the
  47. * indices forming the path to it are instantiated if necessary
  48. * - we never let on to the netfs about errors
  49. * - we may set a negative cookie pointer, but that's okay
  50. */
  51. struct fscache_cookie *__fscache_acquire_cookie(
  52. struct fscache_cookie *parent,
  53. const struct fscache_cookie_def *def,
  54. void *netfs_data,
  55. bool enable)
  56. {
  57. struct fscache_cookie *cookie;
  58. BUG_ON(!def);
  59. _enter("{%s},{%s},%p,%u",
  60. parent ? (char *) parent->def->name : "<no-parent>",
  61. def->name, netfs_data, enable);
  62. fscache_stat(&fscache_n_acquires);
  63. /* if there's no parent cookie, then we don't create one here either */
  64. if (!parent) {
  65. fscache_stat(&fscache_n_acquires_null);
  66. _leave(" [no parent]");
  67. return NULL;
  68. }
  69. /* validate the definition */
  70. BUG_ON(!def->get_key);
  71. BUG_ON(!def->name[0]);
  72. BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX &&
  73. parent->def->type != FSCACHE_COOKIE_TYPE_INDEX);
  74. /* allocate and initialise a cookie */
  75. cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
  76. if (!cookie) {
  77. fscache_stat(&fscache_n_acquires_oom);
  78. _leave(" [ENOMEM]");
  79. return NULL;
  80. }
  81. atomic_set(&cookie->usage, 1);
  82. atomic_set(&cookie->n_children, 0);
  83. /* We keep the active count elevated until relinquishment to prevent an
  84. * attempt to wake up every time the object operations queue quiesces.
  85. */
  86. atomic_set(&cookie->n_active, 1);
  87. atomic_inc(&parent->usage);
  88. atomic_inc(&parent->n_children);
  89. cookie->def = def;
  90. cookie->parent = parent;
  91. cookie->netfs_data = netfs_data;
  92. cookie->flags = (1 << FSCACHE_COOKIE_NO_DATA_YET);
  93. /* radix tree insertion won't use the preallocation pool unless it's
  94. * told it may not wait */
  95. INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
  96. switch (cookie->def->type) {
  97. case FSCACHE_COOKIE_TYPE_INDEX:
  98. fscache_stat(&fscache_n_cookie_index);
  99. break;
  100. case FSCACHE_COOKIE_TYPE_DATAFILE:
  101. fscache_stat(&fscache_n_cookie_data);
  102. break;
  103. default:
  104. fscache_stat(&fscache_n_cookie_special);
  105. break;
  106. }
  107. if (enable) {
  108. /* if the object is an index then we need do nothing more here
  109. * - we create indices on disk when we need them as an index
  110. * may exist in multiple caches */
  111. if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
  112. if (fscache_acquire_non_index_cookie(cookie) == 0) {
  113. set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
  114. } else {
  115. atomic_dec(&parent->n_children);
  116. __fscache_cookie_put(cookie);
  117. fscache_stat(&fscache_n_acquires_nobufs);
  118. _leave(" = NULL");
  119. return NULL;
  120. }
  121. } else {
  122. set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
  123. }
  124. }
  125. fscache_stat(&fscache_n_acquires_ok);
  126. _leave(" = %p", cookie);
  127. return cookie;
  128. }
  129. EXPORT_SYMBOL(__fscache_acquire_cookie);
  130. /*
  131. * Enable a cookie to permit it to accept new operations.
  132. */
  133. void __fscache_enable_cookie(struct fscache_cookie *cookie,
  134. bool (*can_enable)(void *data),
  135. void *data)
  136. {
  137. _enter("%p", cookie);
  138. wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
  139. TASK_UNINTERRUPTIBLE);
  140. if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
  141. goto out_unlock;
  142. if (can_enable && !can_enable(data)) {
  143. /* The netfs decided it didn't want to enable after all */
  144. } else if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
  145. /* Wait for outstanding disablement to complete */
  146. __fscache_wait_on_invalidate(cookie);
  147. if (fscache_acquire_non_index_cookie(cookie) == 0)
  148. set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
  149. } else {
  150. set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
  151. }
  152. out_unlock:
  153. clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
  154. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
  155. }
  156. EXPORT_SYMBOL(__fscache_enable_cookie);
  157. /*
  158. * acquire a non-index cookie
  159. * - this must make sure the index chain is instantiated and instantiate the
  160. * object representation too
  161. */
  162. static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
  163. {
  164. struct fscache_object *object;
  165. struct fscache_cache *cache;
  166. uint64_t i_size;
  167. int ret;
  168. _enter("");
  169. set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  170. /* now we need to see whether the backing objects for this cookie yet
  171. * exist, if not there'll be nothing to search */
  172. down_read(&fscache_addremove_sem);
  173. if (list_empty(&fscache_cache_list)) {
  174. up_read(&fscache_addremove_sem);
  175. _leave(" = 0 [no caches]");
  176. return 0;
  177. }
  178. /* select a cache in which to store the object */
  179. cache = fscache_select_cache_for_object(cookie->parent);
  180. if (!cache) {
  181. up_read(&fscache_addremove_sem);
  182. fscache_stat(&fscache_n_acquires_no_cache);
  183. _leave(" = -ENOMEDIUM [no cache]");
  184. return -ENOMEDIUM;
  185. }
  186. _debug("cache %s", cache->tag->name);
  187. set_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
  188. /* ask the cache to allocate objects for this cookie and its parent
  189. * chain */
  190. ret = fscache_alloc_object(cache, cookie);
  191. if (ret < 0) {
  192. up_read(&fscache_addremove_sem);
  193. _leave(" = %d", ret);
  194. return ret;
  195. }
  196. /* pass on how big the object we're caching is supposed to be */
  197. cookie->def->get_attr(cookie->netfs_data, &i_size);
  198. spin_lock(&cookie->lock);
  199. if (hlist_empty(&cookie->backing_objects)) {
  200. spin_unlock(&cookie->lock);
  201. goto unavailable;
  202. }
  203. object = hlist_entry(cookie->backing_objects.first,
  204. struct fscache_object, cookie_link);
  205. fscache_set_store_limit(object, i_size);
  206. /* initiate the process of looking up all the objects in the chain
  207. * (done by fscache_initialise_object()) */
  208. fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD);
  209. spin_unlock(&cookie->lock);
  210. /* we may be required to wait for lookup to complete at this point */
  211. if (!fscache_defer_lookup) {
  212. _debug("non-deferred lookup %p", &cookie->flags);
  213. wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  214. TASK_UNINTERRUPTIBLE);
  215. _debug("complete");
  216. if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
  217. goto unavailable;
  218. }
  219. up_read(&fscache_addremove_sem);
  220. _leave(" = 0 [deferred]");
  221. return 0;
  222. unavailable:
  223. up_read(&fscache_addremove_sem);
  224. _leave(" = -ENOBUFS");
  225. return -ENOBUFS;
  226. }
  227. /*
  228. * recursively allocate cache object records for a cookie/cache combination
  229. * - caller must be holding the addremove sem
  230. */
  231. static int fscache_alloc_object(struct fscache_cache *cache,
  232. struct fscache_cookie *cookie)
  233. {
  234. struct fscache_object *object;
  235. int ret;
  236. _enter("%p,%p{%s}", cache, cookie, cookie->def->name);
  237. spin_lock(&cookie->lock);
  238. hlist_for_each_entry(object, &cookie->backing_objects,
  239. cookie_link) {
  240. if (object->cache == cache)
  241. goto object_already_extant;
  242. }
  243. spin_unlock(&cookie->lock);
  244. /* ask the cache to allocate an object (we may end up with duplicate
  245. * objects at this stage, but we sort that out later) */
  246. fscache_stat(&fscache_n_cop_alloc_object);
  247. object = cache->ops->alloc_object(cache, cookie);
  248. fscache_stat_d(&fscache_n_cop_alloc_object);
  249. if (IS_ERR(object)) {
  250. fscache_stat(&fscache_n_object_no_alloc);
  251. ret = PTR_ERR(object);
  252. goto error;
  253. }
  254. fscache_stat(&fscache_n_object_alloc);
  255. object->debug_id = atomic_inc_return(&fscache_object_debug_id);
  256. _debug("ALLOC OBJ%x: %s {%lx}",
  257. object->debug_id, cookie->def->name, object->events);
  258. ret = fscache_alloc_object(cache, cookie->parent);
  259. if (ret < 0)
  260. goto error_put;
  261. /* only attach if we managed to allocate all we needed, otherwise
  262. * discard the object we just allocated and instead use the one
  263. * attached to the cookie */
  264. if (fscache_attach_object(cookie, object) < 0) {
  265. fscache_stat(&fscache_n_cop_put_object);
  266. cache->ops->put_object(object);
  267. fscache_stat_d(&fscache_n_cop_put_object);
  268. }
  269. _leave(" = 0");
  270. return 0;
  271. object_already_extant:
  272. ret = -ENOBUFS;
  273. if (fscache_object_is_dying(object) ||
  274. fscache_cache_is_broken(object)) {
  275. spin_unlock(&cookie->lock);
  276. goto error;
  277. }
  278. spin_unlock(&cookie->lock);
  279. _leave(" = 0 [found]");
  280. return 0;
  281. error_put:
  282. fscache_stat(&fscache_n_cop_put_object);
  283. cache->ops->put_object(object);
  284. fscache_stat_d(&fscache_n_cop_put_object);
  285. error:
  286. _leave(" = %d", ret);
  287. return ret;
  288. }
  289. /*
  290. * attach a cache object to a cookie
  291. */
  292. static int fscache_attach_object(struct fscache_cookie *cookie,
  293. struct fscache_object *object)
  294. {
  295. struct fscache_object *p;
  296. struct fscache_cache *cache = object->cache;
  297. int ret;
  298. _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
  299. spin_lock(&cookie->lock);
  300. /* there may be multiple initial creations of this object, but we only
  301. * want one */
  302. ret = -EEXIST;
  303. hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
  304. if (p->cache == object->cache) {
  305. if (fscache_object_is_dying(p))
  306. ret = -ENOBUFS;
  307. goto cant_attach_object;
  308. }
  309. }
  310. /* pin the parent object */
  311. spin_lock_nested(&cookie->parent->lock, 1);
  312. hlist_for_each_entry(p, &cookie->parent->backing_objects,
  313. cookie_link) {
  314. if (p->cache == object->cache) {
  315. if (fscache_object_is_dying(p)) {
  316. ret = -ENOBUFS;
  317. spin_unlock(&cookie->parent->lock);
  318. goto cant_attach_object;
  319. }
  320. object->parent = p;
  321. spin_lock(&p->lock);
  322. p->n_children++;
  323. spin_unlock(&p->lock);
  324. break;
  325. }
  326. }
  327. spin_unlock(&cookie->parent->lock);
  328. /* attach to the cache's object list */
  329. if (list_empty(&object->cache_link)) {
  330. spin_lock(&cache->object_list_lock);
  331. list_add(&object->cache_link, &cache->object_list);
  332. spin_unlock(&cache->object_list_lock);
  333. }
  334. /* attach to the cookie */
  335. object->cookie = cookie;
  336. atomic_inc(&cookie->usage);
  337. hlist_add_head(&object->cookie_link, &cookie->backing_objects);
  338. fscache_objlist_add(object);
  339. ret = 0;
  340. cant_attach_object:
  341. spin_unlock(&cookie->lock);
  342. _leave(" = %d", ret);
  343. return ret;
  344. }
  345. /*
  346. * Invalidate an object. Callable with spinlocks held.
  347. */
  348. void __fscache_invalidate(struct fscache_cookie *cookie)
  349. {
  350. struct fscache_object *object;
  351. _enter("{%s}", cookie->def->name);
  352. fscache_stat(&fscache_n_invalidates);
  353. /* Only permit invalidation of data files. Invalidating an index will
  354. * require the caller to release all its attachments to the tree rooted
  355. * there, and if it's doing that, it may as well just retire the
  356. * cookie.
  357. */
  358. ASSERTCMP(cookie->def->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
  359. /* We will be updating the cookie too. */
  360. BUG_ON(!cookie->def->get_aux);
  361. /* If there's an object, we tell the object state machine to handle the
  362. * invalidation on our behalf, otherwise there's nothing to do.
  363. */
  364. if (!hlist_empty(&cookie->backing_objects)) {
  365. spin_lock(&cookie->lock);
  366. if (fscache_cookie_enabled(cookie) &&
  367. !hlist_empty(&cookie->backing_objects) &&
  368. !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
  369. &cookie->flags)) {
  370. object = hlist_entry(cookie->backing_objects.first,
  371. struct fscache_object,
  372. cookie_link);
  373. if (fscache_object_is_live(object))
  374. fscache_raise_event(
  375. object, FSCACHE_OBJECT_EV_INVALIDATE);
  376. }
  377. spin_unlock(&cookie->lock);
  378. }
  379. _leave("");
  380. }
  381. EXPORT_SYMBOL(__fscache_invalidate);
  382. /*
  383. * Wait for object invalidation to complete.
  384. */
  385. void __fscache_wait_on_invalidate(struct fscache_cookie *cookie)
  386. {
  387. _enter("%p", cookie);
  388. wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING,
  389. TASK_UNINTERRUPTIBLE);
  390. _leave("");
  391. }
  392. EXPORT_SYMBOL(__fscache_wait_on_invalidate);
  393. /*
  394. * update the index entries backing a cookie
  395. */
  396. void __fscache_update_cookie(struct fscache_cookie *cookie)
  397. {
  398. struct fscache_object *object;
  399. fscache_stat(&fscache_n_updates);
  400. if (!cookie) {
  401. fscache_stat(&fscache_n_updates_null);
  402. _leave(" [no cookie]");
  403. return;
  404. }
  405. _enter("{%s}", cookie->def->name);
  406. BUG_ON(!cookie->def->get_aux);
  407. spin_lock(&cookie->lock);
  408. if (fscache_cookie_enabled(cookie)) {
  409. /* update the index entry on disk in each cache backing this
  410. * cookie.
  411. */
  412. hlist_for_each_entry(object,
  413. &cookie->backing_objects, cookie_link) {
  414. fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
  415. }
  416. }
  417. spin_unlock(&cookie->lock);
  418. _leave("");
  419. }
  420. EXPORT_SYMBOL(__fscache_update_cookie);
  421. /*
  422. * Disable a cookie to stop it from accepting new requests from the netfs.
  423. */
  424. void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
  425. {
  426. struct fscache_object *object;
  427. bool awaken = false;
  428. _enter("%p,%u", cookie, invalidate);
  429. ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
  430. if (atomic_read(&cookie->n_children) != 0) {
  431. pr_err("Cookie '%s' still has children\n",
  432. cookie->def->name);
  433. BUG();
  434. }
  435. wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
  436. TASK_UNINTERRUPTIBLE);
  437. if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
  438. goto out_unlock_enable;
  439. /* If the cookie is being invalidated, wait for that to complete first
  440. * so that we can reuse the flag.
  441. */
  442. __fscache_wait_on_invalidate(cookie);
  443. /* Dispose of the backing objects */
  444. set_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags);
  445. spin_lock(&cookie->lock);
  446. if (!hlist_empty(&cookie->backing_objects)) {
  447. hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
  448. if (invalidate)
  449. set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
  450. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  451. fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
  452. }
  453. } else {
  454. if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
  455. awaken = true;
  456. }
  457. spin_unlock(&cookie->lock);
  458. if (awaken)
  459. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
  460. /* Wait for cessation of activity requiring access to the netfs (when
  461. * n_active reaches 0). This makes sure outstanding reads and writes
  462. * have completed.
  463. */
  464. if (!atomic_dec_and_test(&cookie->n_active))
  465. wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
  466. TASK_UNINTERRUPTIBLE);
  467. /* Make sure any pending writes are cancelled. */
  468. if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
  469. fscache_invalidate_writes(cookie);
  470. /* Reset the cookie state if it wasn't relinquished */
  471. if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
  472. atomic_inc(&cookie->n_active);
  473. set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  474. }
  475. out_unlock_enable:
  476. clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
  477. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
  478. _leave("");
  479. }
  480. EXPORT_SYMBOL(__fscache_disable_cookie);
  481. /*
  482. * release a cookie back to the cache
  483. * - the object will be marked as recyclable on disk if retire is true
  484. * - all dependents of this cookie must have already been unregistered
  485. * (indices/files/pages)
  486. */
  487. void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
  488. {
  489. fscache_stat(&fscache_n_relinquishes);
  490. if (retire)
  491. fscache_stat(&fscache_n_relinquishes_retire);
  492. if (!cookie) {
  493. fscache_stat(&fscache_n_relinquishes_null);
  494. _leave(" [no cookie]");
  495. return;
  496. }
  497. _enter("%p{%s,%p,%d},%d",
  498. cookie, cookie->def->name, cookie->netfs_data,
  499. atomic_read(&cookie->n_active), retire);
  500. /* No further netfs-accessing operations on this cookie permitted */
  501. set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
  502. __fscache_disable_cookie(cookie, retire);
  503. /* Clear pointers back to the netfs */
  504. cookie->netfs_data = NULL;
  505. cookie->def = NULL;
  506. BUG_ON(cookie->stores.rnode);
  507. if (cookie->parent) {
  508. ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
  509. ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0);
  510. atomic_dec(&cookie->parent->n_children);
  511. }
  512. /* Dispose of the netfs's link to the cookie */
  513. ASSERTCMP(atomic_read(&cookie->usage), >, 0);
  514. fscache_cookie_put(cookie);
  515. _leave("");
  516. }
  517. EXPORT_SYMBOL(__fscache_relinquish_cookie);
  518. /*
  519. * destroy a cookie
  520. */
  521. void __fscache_cookie_put(struct fscache_cookie *cookie)
  522. {
  523. struct fscache_cookie *parent;
  524. _enter("%p", cookie);
  525. for (;;) {
  526. _debug("FREE COOKIE %p", cookie);
  527. parent = cookie->parent;
  528. BUG_ON(!hlist_empty(&cookie->backing_objects));
  529. kmem_cache_free(fscache_cookie_jar, cookie);
  530. if (!parent)
  531. break;
  532. cookie = parent;
  533. BUG_ON(atomic_read(&cookie->usage) <= 0);
  534. if (!atomic_dec_and_test(&cookie->usage))
  535. break;
  536. }
  537. _leave("");
  538. }
  539. /*
  540. * check the consistency between the netfs inode and the backing cache
  541. *
  542. * NOTE: it only serves no-index type
  543. */
  544. int __fscache_check_consistency(struct fscache_cookie *cookie)
  545. {
  546. struct fscache_operation *op;
  547. struct fscache_object *object;
  548. bool wake_cookie = false;
  549. int ret;
  550. _enter("%p,", cookie);
  551. ASSERTCMP(cookie->def->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
  552. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  553. return -ERESTARTSYS;
  554. if (hlist_empty(&cookie->backing_objects))
  555. return 0;
  556. op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
  557. if (!op)
  558. return -ENOMEM;
  559. fscache_operation_init(op, NULL, NULL, NULL);
  560. op->flags = FSCACHE_OP_MYTHREAD |
  561. (1 << FSCACHE_OP_WAITING) |
  562. (1 << FSCACHE_OP_UNUSE_COOKIE);
  563. spin_lock(&cookie->lock);
  564. if (!fscache_cookie_enabled(cookie) ||
  565. hlist_empty(&cookie->backing_objects))
  566. goto inconsistent;
  567. object = hlist_entry(cookie->backing_objects.first,
  568. struct fscache_object, cookie_link);
  569. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  570. goto inconsistent;
  571. op->debug_id = atomic_inc_return(&fscache_op_debug_id);
  572. __fscache_use_cookie(cookie);
  573. if (fscache_submit_op(object, op) < 0)
  574. goto submit_failed;
  575. /* the work queue now carries its own ref on the object */
  576. spin_unlock(&cookie->lock);
  577. ret = fscache_wait_for_operation_activation(object, op, NULL, NULL);
  578. if (ret == 0) {
  579. /* ask the cache to honour the operation */
  580. ret = object->cache->ops->check_consistency(op);
  581. fscache_op_complete(op, false);
  582. } else if (ret == -ENOBUFS) {
  583. ret = 0;
  584. }
  585. fscache_put_operation(op);
  586. _leave(" = %d", ret);
  587. return ret;
  588. submit_failed:
  589. wake_cookie = __fscache_unuse_cookie(cookie);
  590. inconsistent:
  591. spin_unlock(&cookie->lock);
  592. if (wake_cookie)
  593. __fscache_wake_unused_cookie(cookie);
  594. kfree(op);
  595. _leave(" = -ESTALE");
  596. return -ESTALE;
  597. }
  598. EXPORT_SYMBOL(__fscache_check_consistency);