page.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. /*
  19. * check to see if a page is being written to the cache
  20. */
  21. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  22. {
  23. void *val;
  24. rcu_read_lock();
  25. val = radix_tree_lookup(&cookie->stores, page->index);
  26. rcu_read_unlock();
  27. trace_fscache_check_page(cookie, page, val, 0);
  28. return val != NULL;
  29. }
  30. EXPORT_SYMBOL(__fscache_check_page_write);
  31. /*
  32. * wait for a page to finish being written to the cache
  33. */
  34. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  35. {
  36. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  37. trace_fscache_page(cookie, page, fscache_page_write_wait);
  38. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  39. }
  40. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  41. /*
  42. * wait for a page to finish being written to the cache. Put a timeout here
  43. * since we might be called recursively via parent fs.
  44. */
  45. static
  46. bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
  47. {
  48. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  49. return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
  50. HZ);
  51. }
  52. /*
  53. * decide whether a page can be released, possibly by cancelling a store to it
  54. * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged
  55. */
  56. bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
  57. struct page *page,
  58. gfp_t gfp)
  59. {
  60. struct page *xpage;
  61. void *val;
  62. _enter("%p,%p,%x", cookie, page, gfp);
  63. trace_fscache_page(cookie, page, fscache_page_maybe_release);
  64. try_again:
  65. rcu_read_lock();
  66. val = radix_tree_lookup(&cookie->stores, page->index);
  67. if (!val) {
  68. rcu_read_unlock();
  69. fscache_stat(&fscache_n_store_vmscan_not_storing);
  70. __fscache_uncache_page(cookie, page);
  71. return true;
  72. }
  73. /* see if the page is actually undergoing storage - if so we can't get
  74. * rid of it till the cache has finished with it */
  75. if (radix_tree_tag_get(&cookie->stores, page->index,
  76. FSCACHE_COOKIE_STORING_TAG)) {
  77. rcu_read_unlock();
  78. goto page_busy;
  79. }
  80. /* the page is pending storage, so we attempt to cancel the store and
  81. * discard the store request so that the page can be reclaimed */
  82. spin_lock(&cookie->stores_lock);
  83. rcu_read_unlock();
  84. if (radix_tree_tag_get(&cookie->stores, page->index,
  85. FSCACHE_COOKIE_STORING_TAG)) {
  86. /* the page started to undergo storage whilst we were looking,
  87. * so now we can only wait or return */
  88. spin_unlock(&cookie->stores_lock);
  89. goto page_busy;
  90. }
  91. xpage = radix_tree_delete(&cookie->stores, page->index);
  92. trace_fscache_page(cookie, page, fscache_page_radix_delete);
  93. spin_unlock(&cookie->stores_lock);
  94. if (xpage) {
  95. fscache_stat(&fscache_n_store_vmscan_cancelled);
  96. fscache_stat(&fscache_n_store_radix_deletes);
  97. ASSERTCMP(xpage, ==, page);
  98. } else {
  99. fscache_stat(&fscache_n_store_vmscan_gone);
  100. }
  101. wake_up_bit(&cookie->flags, 0);
  102. trace_fscache_wake_cookie(cookie);
  103. if (xpage)
  104. put_page(xpage);
  105. __fscache_uncache_page(cookie, page);
  106. return true;
  107. page_busy:
  108. /* We will wait here if we're allowed to, but that could deadlock the
  109. * allocator as the work threads writing to the cache may all end up
  110. * sleeping on memory allocation, so we may need to impose a timeout
  111. * too. */
  112. if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) {
  113. fscache_stat(&fscache_n_store_vmscan_busy);
  114. return false;
  115. }
  116. fscache_stat(&fscache_n_store_vmscan_wait);
  117. if (!release_page_wait_timeout(cookie, page))
  118. _debug("fscache writeout timeout page: %p{%lx}",
  119. page, page->index);
  120. gfp &= ~__GFP_DIRECT_RECLAIM;
  121. goto try_again;
  122. }
  123. EXPORT_SYMBOL(__fscache_maybe_release_page);
  124. /*
  125. * note that a page has finished being written to the cache
  126. */
  127. static void fscache_end_page_write(struct fscache_object *object,
  128. struct page *page)
  129. {
  130. struct fscache_cookie *cookie;
  131. struct page *xpage = NULL, *val;
  132. spin_lock(&object->lock);
  133. cookie = object->cookie;
  134. if (cookie) {
  135. /* delete the page from the tree if it is now no longer
  136. * pending */
  137. spin_lock(&cookie->stores_lock);
  138. radix_tree_tag_clear(&cookie->stores, page->index,
  139. FSCACHE_COOKIE_STORING_TAG);
  140. trace_fscache_page(cookie, page, fscache_page_radix_clear_store);
  141. if (!radix_tree_tag_get(&cookie->stores, page->index,
  142. FSCACHE_COOKIE_PENDING_TAG)) {
  143. fscache_stat(&fscache_n_store_radix_deletes);
  144. xpage = radix_tree_delete(&cookie->stores, page->index);
  145. trace_fscache_page(cookie, page, fscache_page_radix_delete);
  146. trace_fscache_page(cookie, page, fscache_page_write_end);
  147. val = radix_tree_lookup(&cookie->stores, page->index);
  148. trace_fscache_check_page(cookie, page, val, 1);
  149. } else {
  150. trace_fscache_page(cookie, page, fscache_page_write_end_pend);
  151. }
  152. spin_unlock(&cookie->stores_lock);
  153. wake_up_bit(&cookie->flags, 0);
  154. trace_fscache_wake_cookie(cookie);
  155. } else {
  156. trace_fscache_page(cookie, page, fscache_page_write_end_noc);
  157. }
  158. spin_unlock(&object->lock);
  159. if (xpage)
  160. put_page(xpage);
  161. }
  162. /*
  163. * actually apply the changed attributes to a cache object
  164. */
  165. static void fscache_attr_changed_op(struct fscache_operation *op)
  166. {
  167. struct fscache_object *object = op->object;
  168. int ret;
  169. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  170. fscache_stat(&fscache_n_attr_changed_calls);
  171. if (fscache_object_is_active(object)) {
  172. fscache_stat(&fscache_n_cop_attr_changed);
  173. ret = object->cache->ops->attr_changed(object);
  174. fscache_stat_d(&fscache_n_cop_attr_changed);
  175. if (ret < 0)
  176. fscache_abort_object(object);
  177. fscache_op_complete(op, ret < 0);
  178. } else {
  179. fscache_op_complete(op, true);
  180. }
  181. _leave("");
  182. }
  183. /*
  184. * notification that the attributes on an object have changed
  185. */
  186. int __fscache_attr_changed(struct fscache_cookie *cookie)
  187. {
  188. struct fscache_operation *op;
  189. struct fscache_object *object;
  190. bool wake_cookie = false;
  191. _enter("%p", cookie);
  192. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  193. fscache_stat(&fscache_n_attr_changed);
  194. op = kzalloc(sizeof(*op), GFP_KERNEL);
  195. if (!op) {
  196. fscache_stat(&fscache_n_attr_changed_nomem);
  197. _leave(" = -ENOMEM");
  198. return -ENOMEM;
  199. }
  200. fscache_operation_init(cookie, op, fscache_attr_changed_op, NULL, NULL);
  201. trace_fscache_page_op(cookie, NULL, op, fscache_page_op_attr_changed);
  202. op->flags = FSCACHE_OP_ASYNC |
  203. (1 << FSCACHE_OP_EXCLUSIVE) |
  204. (1 << FSCACHE_OP_UNUSE_COOKIE);
  205. spin_lock(&cookie->lock);
  206. if (!fscache_cookie_enabled(cookie) ||
  207. hlist_empty(&cookie->backing_objects))
  208. goto nobufs;
  209. object = hlist_entry(cookie->backing_objects.first,
  210. struct fscache_object, cookie_link);
  211. __fscache_use_cookie(cookie);
  212. if (fscache_submit_exclusive_op(object, op) < 0)
  213. goto nobufs_dec;
  214. spin_unlock(&cookie->lock);
  215. fscache_stat(&fscache_n_attr_changed_ok);
  216. fscache_put_operation(op);
  217. _leave(" = 0");
  218. return 0;
  219. nobufs_dec:
  220. wake_cookie = __fscache_unuse_cookie(cookie);
  221. nobufs:
  222. spin_unlock(&cookie->lock);
  223. fscache_put_operation(op);
  224. if (wake_cookie)
  225. __fscache_wake_unused_cookie(cookie);
  226. fscache_stat(&fscache_n_attr_changed_nobufs);
  227. _leave(" = %d", -ENOBUFS);
  228. return -ENOBUFS;
  229. }
  230. EXPORT_SYMBOL(__fscache_attr_changed);
  231. /*
  232. * Handle cancellation of a pending retrieval op
  233. */
  234. static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
  235. {
  236. struct fscache_retrieval *op =
  237. container_of(_op, struct fscache_retrieval, op);
  238. atomic_set(&op->n_pages, 0);
  239. }
  240. /*
  241. * release a retrieval op reference
  242. */
  243. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  244. {
  245. struct fscache_retrieval *op =
  246. container_of(_op, struct fscache_retrieval, op);
  247. _enter("{OP%x}", op->op.debug_id);
  248. ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
  249. atomic_read(&op->n_pages), ==, 0);
  250. fscache_hist(fscache_retrieval_histogram, op->start_time);
  251. if (op->context)
  252. fscache_put_context(op->cookie, op->context);
  253. _leave("");
  254. }
  255. /*
  256. * allocate a retrieval op
  257. */
  258. static struct fscache_retrieval *fscache_alloc_retrieval(
  259. struct fscache_cookie *cookie,
  260. struct address_space *mapping,
  261. fscache_rw_complete_t end_io_func,
  262. void *context)
  263. {
  264. struct fscache_retrieval *op;
  265. /* allocate a retrieval operation and attempt to submit it */
  266. op = kzalloc(sizeof(*op), GFP_NOIO);
  267. if (!op) {
  268. fscache_stat(&fscache_n_retrievals_nomem);
  269. return NULL;
  270. }
  271. fscache_operation_init(cookie, &op->op, NULL,
  272. fscache_do_cancel_retrieval,
  273. fscache_release_retrieval_op);
  274. op->op.flags = FSCACHE_OP_MYTHREAD |
  275. (1UL << FSCACHE_OP_WAITING) |
  276. (1UL << FSCACHE_OP_UNUSE_COOKIE);
  277. op->cookie = cookie;
  278. op->mapping = mapping;
  279. op->end_io_func = end_io_func;
  280. op->context = context;
  281. op->start_time = jiffies;
  282. INIT_LIST_HEAD(&op->to_do);
  283. /* Pin the netfs read context in case we need to do the actual netfs
  284. * read because we've encountered a cache read failure.
  285. */
  286. if (context)
  287. fscache_get_context(op->cookie, context);
  288. return op;
  289. }
  290. /*
  291. * wait for a deferred lookup to complete
  292. */
  293. int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  294. {
  295. unsigned long jif;
  296. _enter("");
  297. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  298. _leave(" = 0 [imm]");
  299. return 0;
  300. }
  301. fscache_stat(&fscache_n_retrievals_wait);
  302. jif = jiffies;
  303. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  304. TASK_INTERRUPTIBLE) != 0) {
  305. fscache_stat(&fscache_n_retrievals_intr);
  306. _leave(" = -ERESTARTSYS");
  307. return -ERESTARTSYS;
  308. }
  309. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  310. smp_rmb();
  311. fscache_hist(fscache_retrieval_delay_histogram, jif);
  312. _leave(" = 0 [dly]");
  313. return 0;
  314. }
  315. /*
  316. * wait for an object to become active (or dead)
  317. */
  318. int fscache_wait_for_operation_activation(struct fscache_object *object,
  319. struct fscache_operation *op,
  320. atomic_t *stat_op_waits,
  321. atomic_t *stat_object_dead)
  322. {
  323. int ret;
  324. if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
  325. goto check_if_dead;
  326. _debug(">>> WT");
  327. if (stat_op_waits)
  328. fscache_stat(stat_op_waits);
  329. if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
  330. TASK_INTERRUPTIBLE) != 0) {
  331. trace_fscache_op(object->cookie, op, fscache_op_signal);
  332. ret = fscache_cancel_op(op, false);
  333. if (ret == 0)
  334. return -ERESTARTSYS;
  335. /* it's been removed from the pending queue by another party,
  336. * so we should get to run shortly */
  337. wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
  338. TASK_UNINTERRUPTIBLE);
  339. }
  340. _debug("<<< GO");
  341. check_if_dead:
  342. if (op->state == FSCACHE_OP_ST_CANCELLED) {
  343. if (stat_object_dead)
  344. fscache_stat(stat_object_dead);
  345. _leave(" = -ENOBUFS [cancelled]");
  346. return -ENOBUFS;
  347. }
  348. if (unlikely(fscache_object_is_dying(object) ||
  349. fscache_cache_is_broken(object))) {
  350. enum fscache_operation_state state = op->state;
  351. trace_fscache_op(object->cookie, op, fscache_op_signal);
  352. fscache_cancel_op(op, true);
  353. if (stat_object_dead)
  354. fscache_stat(stat_object_dead);
  355. _leave(" = -ENOBUFS [obj dead %d]", state);
  356. return -ENOBUFS;
  357. }
  358. return 0;
  359. }
  360. /*
  361. * read a page from the cache or allocate a block in which to store it
  362. * - we return:
  363. * -ENOMEM - out of memory, nothing done
  364. * -ERESTARTSYS - interrupted
  365. * -ENOBUFS - no backing object available in which to cache the block
  366. * -ENODATA - no data available in the backing object for this block
  367. * 0 - dispatched a read - it'll call end_io_func() when finished
  368. */
  369. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  370. struct page *page,
  371. fscache_rw_complete_t end_io_func,
  372. void *context,
  373. gfp_t gfp)
  374. {
  375. struct fscache_retrieval *op;
  376. struct fscache_object *object;
  377. bool wake_cookie = false;
  378. int ret;
  379. _enter("%p,%p,,,", cookie, page);
  380. fscache_stat(&fscache_n_retrievals);
  381. if (hlist_empty(&cookie->backing_objects))
  382. goto nobufs;
  383. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  384. _leave(" = -ENOBUFS [invalidating]");
  385. return -ENOBUFS;
  386. }
  387. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  388. ASSERTCMP(page, !=, NULL);
  389. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  390. return -ERESTARTSYS;
  391. op = fscache_alloc_retrieval(cookie, page->mapping,
  392. end_io_func, context);
  393. if (!op) {
  394. _leave(" = -ENOMEM");
  395. return -ENOMEM;
  396. }
  397. atomic_set(&op->n_pages, 1);
  398. trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_retr_one);
  399. spin_lock(&cookie->lock);
  400. if (!fscache_cookie_enabled(cookie) ||
  401. hlist_empty(&cookie->backing_objects))
  402. goto nobufs_unlock;
  403. object = hlist_entry(cookie->backing_objects.first,
  404. struct fscache_object, cookie_link);
  405. ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
  406. __fscache_use_cookie(cookie);
  407. atomic_inc(&object->n_reads);
  408. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  409. if (fscache_submit_op(object, &op->op) < 0)
  410. goto nobufs_unlock_dec;
  411. spin_unlock(&cookie->lock);
  412. fscache_stat(&fscache_n_retrieval_ops);
  413. /* we wait for the operation to become active, and then process it
  414. * *here*, in this thread, and not in the thread pool */
  415. ret = fscache_wait_for_operation_activation(
  416. object, &op->op,
  417. __fscache_stat(&fscache_n_retrieval_op_waits),
  418. __fscache_stat(&fscache_n_retrievals_object_dead));
  419. if (ret < 0)
  420. goto error;
  421. /* ask the cache to honour the operation */
  422. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  423. fscache_stat(&fscache_n_cop_allocate_page);
  424. ret = object->cache->ops->allocate_page(op, page, gfp);
  425. fscache_stat_d(&fscache_n_cop_allocate_page);
  426. if (ret == 0)
  427. ret = -ENODATA;
  428. } else {
  429. fscache_stat(&fscache_n_cop_read_or_alloc_page);
  430. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  431. fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
  432. }
  433. error:
  434. if (ret == -ENOMEM)
  435. fscache_stat(&fscache_n_retrievals_nomem);
  436. else if (ret == -ERESTARTSYS)
  437. fscache_stat(&fscache_n_retrievals_intr);
  438. else if (ret == -ENODATA)
  439. fscache_stat(&fscache_n_retrievals_nodata);
  440. else if (ret < 0)
  441. fscache_stat(&fscache_n_retrievals_nobufs);
  442. else
  443. fscache_stat(&fscache_n_retrievals_ok);
  444. fscache_put_retrieval(op);
  445. _leave(" = %d", ret);
  446. return ret;
  447. nobufs_unlock_dec:
  448. atomic_dec(&object->n_reads);
  449. wake_cookie = __fscache_unuse_cookie(cookie);
  450. nobufs_unlock:
  451. spin_unlock(&cookie->lock);
  452. if (wake_cookie)
  453. __fscache_wake_unused_cookie(cookie);
  454. fscache_put_retrieval(op);
  455. nobufs:
  456. fscache_stat(&fscache_n_retrievals_nobufs);
  457. _leave(" = -ENOBUFS");
  458. return -ENOBUFS;
  459. }
  460. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  461. /*
  462. * read a list of page from the cache or allocate a block in which to store
  463. * them
  464. * - we return:
  465. * -ENOMEM - out of memory, some pages may be being read
  466. * -ERESTARTSYS - interrupted, some pages may be being read
  467. * -ENOBUFS - no backing object or space available in which to cache any
  468. * pages not being read
  469. * -ENODATA - no data available in the backing object for some or all of
  470. * the pages
  471. * 0 - dispatched a read on all pages
  472. *
  473. * end_io_func() will be called for each page read from the cache as it is
  474. * finishes being read
  475. *
  476. * any pages for which a read is dispatched will be removed from pages and
  477. * nr_pages
  478. */
  479. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  480. struct address_space *mapping,
  481. struct list_head *pages,
  482. unsigned *nr_pages,
  483. fscache_rw_complete_t end_io_func,
  484. void *context,
  485. gfp_t gfp)
  486. {
  487. struct fscache_retrieval *op;
  488. struct fscache_object *object;
  489. bool wake_cookie = false;
  490. int ret;
  491. _enter("%p,,%d,,,", cookie, *nr_pages);
  492. fscache_stat(&fscache_n_retrievals);
  493. if (hlist_empty(&cookie->backing_objects))
  494. goto nobufs;
  495. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  496. _leave(" = -ENOBUFS [invalidating]");
  497. return -ENOBUFS;
  498. }
  499. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  500. ASSERTCMP(*nr_pages, >, 0);
  501. ASSERT(!list_empty(pages));
  502. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  503. return -ERESTARTSYS;
  504. op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
  505. if (!op)
  506. return -ENOMEM;
  507. atomic_set(&op->n_pages, *nr_pages);
  508. trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi);
  509. spin_lock(&cookie->lock);
  510. if (!fscache_cookie_enabled(cookie) ||
  511. hlist_empty(&cookie->backing_objects))
  512. goto nobufs_unlock;
  513. object = hlist_entry(cookie->backing_objects.first,
  514. struct fscache_object, cookie_link);
  515. __fscache_use_cookie(cookie);
  516. atomic_inc(&object->n_reads);
  517. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  518. if (fscache_submit_op(object, &op->op) < 0)
  519. goto nobufs_unlock_dec;
  520. spin_unlock(&cookie->lock);
  521. fscache_stat(&fscache_n_retrieval_ops);
  522. /* we wait for the operation to become active, and then process it
  523. * *here*, in this thread, and not in the thread pool */
  524. ret = fscache_wait_for_operation_activation(
  525. object, &op->op,
  526. __fscache_stat(&fscache_n_retrieval_op_waits),
  527. __fscache_stat(&fscache_n_retrievals_object_dead));
  528. if (ret < 0)
  529. goto error;
  530. /* ask the cache to honour the operation */
  531. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  532. fscache_stat(&fscache_n_cop_allocate_pages);
  533. ret = object->cache->ops->allocate_pages(
  534. op, pages, nr_pages, gfp);
  535. fscache_stat_d(&fscache_n_cop_allocate_pages);
  536. } else {
  537. fscache_stat(&fscache_n_cop_read_or_alloc_pages);
  538. ret = object->cache->ops->read_or_alloc_pages(
  539. op, pages, nr_pages, gfp);
  540. fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
  541. }
  542. error:
  543. if (ret == -ENOMEM)
  544. fscache_stat(&fscache_n_retrievals_nomem);
  545. else if (ret == -ERESTARTSYS)
  546. fscache_stat(&fscache_n_retrievals_intr);
  547. else if (ret == -ENODATA)
  548. fscache_stat(&fscache_n_retrievals_nodata);
  549. else if (ret < 0)
  550. fscache_stat(&fscache_n_retrievals_nobufs);
  551. else
  552. fscache_stat(&fscache_n_retrievals_ok);
  553. fscache_put_retrieval(op);
  554. _leave(" = %d", ret);
  555. return ret;
  556. nobufs_unlock_dec:
  557. atomic_dec(&object->n_reads);
  558. wake_cookie = __fscache_unuse_cookie(cookie);
  559. nobufs_unlock:
  560. spin_unlock(&cookie->lock);
  561. fscache_put_retrieval(op);
  562. if (wake_cookie)
  563. __fscache_wake_unused_cookie(cookie);
  564. nobufs:
  565. fscache_stat(&fscache_n_retrievals_nobufs);
  566. _leave(" = -ENOBUFS");
  567. return -ENOBUFS;
  568. }
  569. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  570. /*
  571. * allocate a block in the cache on which to store a page
  572. * - we return:
  573. * -ENOMEM - out of memory, nothing done
  574. * -ERESTARTSYS - interrupted
  575. * -ENOBUFS - no backing object available in which to cache the block
  576. * 0 - block allocated
  577. */
  578. int __fscache_alloc_page(struct fscache_cookie *cookie,
  579. struct page *page,
  580. gfp_t gfp)
  581. {
  582. struct fscache_retrieval *op;
  583. struct fscache_object *object;
  584. bool wake_cookie = false;
  585. int ret;
  586. _enter("%p,%p,,,", cookie, page);
  587. fscache_stat(&fscache_n_allocs);
  588. if (hlist_empty(&cookie->backing_objects))
  589. goto nobufs;
  590. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  591. ASSERTCMP(page, !=, NULL);
  592. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  593. _leave(" = -ENOBUFS [invalidating]");
  594. return -ENOBUFS;
  595. }
  596. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  597. return -ERESTARTSYS;
  598. op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
  599. if (!op)
  600. return -ENOMEM;
  601. atomic_set(&op->n_pages, 1);
  602. trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_alloc_one);
  603. spin_lock(&cookie->lock);
  604. if (!fscache_cookie_enabled(cookie) ||
  605. hlist_empty(&cookie->backing_objects))
  606. goto nobufs_unlock;
  607. object = hlist_entry(cookie->backing_objects.first,
  608. struct fscache_object, cookie_link);
  609. __fscache_use_cookie(cookie);
  610. if (fscache_submit_op(object, &op->op) < 0)
  611. goto nobufs_unlock_dec;
  612. spin_unlock(&cookie->lock);
  613. fscache_stat(&fscache_n_alloc_ops);
  614. ret = fscache_wait_for_operation_activation(
  615. object, &op->op,
  616. __fscache_stat(&fscache_n_alloc_op_waits),
  617. __fscache_stat(&fscache_n_allocs_object_dead));
  618. if (ret < 0)
  619. goto error;
  620. /* ask the cache to honour the operation */
  621. fscache_stat(&fscache_n_cop_allocate_page);
  622. ret = object->cache->ops->allocate_page(op, page, gfp);
  623. fscache_stat_d(&fscache_n_cop_allocate_page);
  624. error:
  625. if (ret == -ERESTARTSYS)
  626. fscache_stat(&fscache_n_allocs_intr);
  627. else if (ret < 0)
  628. fscache_stat(&fscache_n_allocs_nobufs);
  629. else
  630. fscache_stat(&fscache_n_allocs_ok);
  631. fscache_put_retrieval(op);
  632. _leave(" = %d", ret);
  633. return ret;
  634. nobufs_unlock_dec:
  635. wake_cookie = __fscache_unuse_cookie(cookie);
  636. nobufs_unlock:
  637. spin_unlock(&cookie->lock);
  638. fscache_put_retrieval(op);
  639. if (wake_cookie)
  640. __fscache_wake_unused_cookie(cookie);
  641. nobufs:
  642. fscache_stat(&fscache_n_allocs_nobufs);
  643. _leave(" = -ENOBUFS");
  644. return -ENOBUFS;
  645. }
  646. EXPORT_SYMBOL(__fscache_alloc_page);
  647. /*
  648. * Unmark pages allocate in the readahead code path (via:
  649. * fscache_readpages_or_alloc) after delegating to the base filesystem
  650. */
  651. void __fscache_readpages_cancel(struct fscache_cookie *cookie,
  652. struct list_head *pages)
  653. {
  654. struct page *page;
  655. list_for_each_entry(page, pages, lru) {
  656. if (PageFsCache(page))
  657. __fscache_uncache_page(cookie, page);
  658. }
  659. }
  660. EXPORT_SYMBOL(__fscache_readpages_cancel);
  661. /*
  662. * release a write op reference
  663. */
  664. static void fscache_release_write_op(struct fscache_operation *_op)
  665. {
  666. _enter("{OP%x}", _op->debug_id);
  667. }
  668. /*
  669. * perform the background storage of a page into the cache
  670. */
  671. static void fscache_write_op(struct fscache_operation *_op)
  672. {
  673. struct fscache_storage *op =
  674. container_of(_op, struct fscache_storage, op);
  675. struct fscache_object *object = op->op.object;
  676. struct fscache_cookie *cookie;
  677. struct page *page;
  678. unsigned n;
  679. void *results[1];
  680. int ret;
  681. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  682. again:
  683. spin_lock(&object->lock);
  684. cookie = object->cookie;
  685. if (!fscache_object_is_active(object)) {
  686. /* If we get here, then the on-disk cache object likely no
  687. * longer exists, so we should just cancel this write
  688. * operation.
  689. */
  690. spin_unlock(&object->lock);
  691. fscache_op_complete(&op->op, true);
  692. _leave(" [inactive]");
  693. return;
  694. }
  695. if (!cookie) {
  696. /* If we get here, then the cookie belonging to the object was
  697. * detached, probably by the cookie being withdrawn due to
  698. * memory pressure, which means that the pages we might write
  699. * to the cache from no longer exist - therefore, we can just
  700. * cancel this write operation.
  701. */
  702. spin_unlock(&object->lock);
  703. fscache_op_complete(&op->op, true);
  704. _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
  705. _op->flags, _op->state, object->state->short_name,
  706. object->flags);
  707. return;
  708. }
  709. spin_lock(&cookie->stores_lock);
  710. fscache_stat(&fscache_n_store_calls);
  711. /* find a page to store */
  712. results[0] = NULL;
  713. page = NULL;
  714. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  715. FSCACHE_COOKIE_PENDING_TAG);
  716. trace_fscache_gang_lookup(cookie, &op->op, results, n, op->store_limit);
  717. if (n != 1)
  718. goto superseded;
  719. page = results[0];
  720. _debug("gang %d [%lx]", n, page->index);
  721. radix_tree_tag_set(&cookie->stores, page->index,
  722. FSCACHE_COOKIE_STORING_TAG);
  723. radix_tree_tag_clear(&cookie->stores, page->index,
  724. FSCACHE_COOKIE_PENDING_TAG);
  725. trace_fscache_page(cookie, page, fscache_page_radix_pend2store);
  726. spin_unlock(&cookie->stores_lock);
  727. spin_unlock(&object->lock);
  728. if (page->index >= op->store_limit)
  729. goto discard_page;
  730. fscache_stat(&fscache_n_store_pages);
  731. fscache_stat(&fscache_n_cop_write_page);
  732. ret = object->cache->ops->write_page(op, page);
  733. fscache_stat_d(&fscache_n_cop_write_page);
  734. trace_fscache_wrote_page(cookie, page, &op->op, ret);
  735. fscache_end_page_write(object, page);
  736. if (ret < 0) {
  737. fscache_abort_object(object);
  738. fscache_op_complete(&op->op, true);
  739. } else {
  740. fscache_enqueue_operation(&op->op);
  741. }
  742. _leave("");
  743. return;
  744. discard_page:
  745. fscache_stat(&fscache_n_store_pages_over_limit);
  746. trace_fscache_wrote_page(cookie, page, &op->op, -ENOBUFS);
  747. fscache_end_page_write(object, page);
  748. goto again;
  749. superseded:
  750. /* this writer is going away and there aren't any more things to
  751. * write */
  752. _debug("cease");
  753. spin_unlock(&cookie->stores_lock);
  754. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  755. spin_unlock(&object->lock);
  756. fscache_op_complete(&op->op, false);
  757. _leave("");
  758. }
  759. /*
  760. * Clear the pages pending writing for invalidation
  761. */
  762. void fscache_invalidate_writes(struct fscache_cookie *cookie)
  763. {
  764. struct page *page;
  765. void *results[16];
  766. int n, i;
  767. _enter("");
  768. for (;;) {
  769. spin_lock(&cookie->stores_lock);
  770. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
  771. ARRAY_SIZE(results),
  772. FSCACHE_COOKIE_PENDING_TAG);
  773. if (n == 0) {
  774. spin_unlock(&cookie->stores_lock);
  775. break;
  776. }
  777. for (i = n - 1; i >= 0; i--) {
  778. page = results[i];
  779. radix_tree_delete(&cookie->stores, page->index);
  780. trace_fscache_page(cookie, page, fscache_page_radix_delete);
  781. trace_fscache_page(cookie, page, fscache_page_inval);
  782. }
  783. spin_unlock(&cookie->stores_lock);
  784. for (i = n - 1; i >= 0; i--)
  785. put_page(results[i]);
  786. }
  787. wake_up_bit(&cookie->flags, 0);
  788. trace_fscache_wake_cookie(cookie);
  789. _leave("");
  790. }
  791. /*
  792. * request a page be stored in the cache
  793. * - returns:
  794. * -ENOMEM - out of memory, nothing done
  795. * -ENOBUFS - no backing object available in which to cache the page
  796. * 0 - dispatched a write - it'll call end_io_func() when finished
  797. *
  798. * if the cookie still has a backing object at this point, that object can be
  799. * in one of a few states with respect to storage processing:
  800. *
  801. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  802. * set)
  803. *
  804. * (a) no writes yet
  805. *
  806. * (b) writes deferred till post-creation (mark page for writing and
  807. * return immediately)
  808. *
  809. * (2) negative lookup, object created, initial fill being made from netfs
  810. *
  811. * (a) fill point not yet reached this page (mark page for writing and
  812. * return)
  813. *
  814. * (b) fill point passed this page (queue op to store this page)
  815. *
  816. * (3) object extant (queue op to store this page)
  817. *
  818. * any other state is invalid
  819. */
  820. int __fscache_write_page(struct fscache_cookie *cookie,
  821. struct page *page,
  822. loff_t object_size,
  823. gfp_t gfp)
  824. {
  825. struct fscache_storage *op;
  826. struct fscache_object *object;
  827. bool wake_cookie = false;
  828. int ret;
  829. _enter("%p,%x,", cookie, (u32) page->flags);
  830. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  831. ASSERT(PageFsCache(page));
  832. fscache_stat(&fscache_n_stores);
  833. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  834. _leave(" = -ENOBUFS [invalidating]");
  835. return -ENOBUFS;
  836. }
  837. op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
  838. if (!op)
  839. goto nomem;
  840. fscache_operation_init(cookie, &op->op, fscache_write_op, NULL,
  841. fscache_release_write_op);
  842. op->op.flags = FSCACHE_OP_ASYNC |
  843. (1 << FSCACHE_OP_WAITING) |
  844. (1 << FSCACHE_OP_UNUSE_COOKIE);
  845. ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
  846. if (ret < 0)
  847. goto nomem_free;
  848. trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_write_one);
  849. ret = -ENOBUFS;
  850. spin_lock(&cookie->lock);
  851. if (!fscache_cookie_enabled(cookie) ||
  852. hlist_empty(&cookie->backing_objects))
  853. goto nobufs;
  854. object = hlist_entry(cookie->backing_objects.first,
  855. struct fscache_object, cookie_link);
  856. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  857. goto nobufs;
  858. trace_fscache_page(cookie, page, fscache_page_write);
  859. /* add the page to the pending-storage radix tree on the backing
  860. * object */
  861. spin_lock(&object->lock);
  862. if (object->store_limit_l != object_size)
  863. fscache_set_store_limit(object, object_size);
  864. spin_lock(&cookie->stores_lock);
  865. _debug("store limit %llx", (unsigned long long) object->store_limit);
  866. ret = radix_tree_insert(&cookie->stores, page->index, page);
  867. if (ret < 0) {
  868. if (ret == -EEXIST)
  869. goto already_queued;
  870. _debug("insert failed %d", ret);
  871. goto nobufs_unlock_obj;
  872. }
  873. trace_fscache_page(cookie, page, fscache_page_radix_insert);
  874. radix_tree_tag_set(&cookie->stores, page->index,
  875. FSCACHE_COOKIE_PENDING_TAG);
  876. trace_fscache_page(cookie, page, fscache_page_radix_set_pend);
  877. get_page(page);
  878. /* we only want one writer at a time, but we do need to queue new
  879. * writers after exclusive ops */
  880. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  881. goto already_pending;
  882. spin_unlock(&cookie->stores_lock);
  883. spin_unlock(&object->lock);
  884. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  885. op->store_limit = object->store_limit;
  886. __fscache_use_cookie(cookie);
  887. if (fscache_submit_op(object, &op->op) < 0)
  888. goto submit_failed;
  889. spin_unlock(&cookie->lock);
  890. radix_tree_preload_end();
  891. fscache_stat(&fscache_n_store_ops);
  892. fscache_stat(&fscache_n_stores_ok);
  893. /* the work queue now carries its own ref on the object */
  894. fscache_put_operation(&op->op);
  895. _leave(" = 0");
  896. return 0;
  897. already_queued:
  898. fscache_stat(&fscache_n_stores_again);
  899. already_pending:
  900. spin_unlock(&cookie->stores_lock);
  901. spin_unlock(&object->lock);
  902. spin_unlock(&cookie->lock);
  903. radix_tree_preload_end();
  904. fscache_put_operation(&op->op);
  905. fscache_stat(&fscache_n_stores_ok);
  906. _leave(" = 0");
  907. return 0;
  908. submit_failed:
  909. spin_lock(&cookie->stores_lock);
  910. radix_tree_delete(&cookie->stores, page->index);
  911. trace_fscache_page(cookie, page, fscache_page_radix_delete);
  912. spin_unlock(&cookie->stores_lock);
  913. wake_cookie = __fscache_unuse_cookie(cookie);
  914. put_page(page);
  915. ret = -ENOBUFS;
  916. goto nobufs;
  917. nobufs_unlock_obj:
  918. spin_unlock(&cookie->stores_lock);
  919. spin_unlock(&object->lock);
  920. nobufs:
  921. spin_unlock(&cookie->lock);
  922. radix_tree_preload_end();
  923. fscache_put_operation(&op->op);
  924. if (wake_cookie)
  925. __fscache_wake_unused_cookie(cookie);
  926. fscache_stat(&fscache_n_stores_nobufs);
  927. _leave(" = -ENOBUFS");
  928. return -ENOBUFS;
  929. nomem_free:
  930. fscache_put_operation(&op->op);
  931. nomem:
  932. fscache_stat(&fscache_n_stores_oom);
  933. _leave(" = -ENOMEM");
  934. return -ENOMEM;
  935. }
  936. EXPORT_SYMBOL(__fscache_write_page);
  937. /*
  938. * remove a page from the cache
  939. */
  940. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  941. {
  942. struct fscache_object *object;
  943. _enter(",%p", page);
  944. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  945. ASSERTCMP(page, !=, NULL);
  946. fscache_stat(&fscache_n_uncaches);
  947. /* cache withdrawal may beat us to it */
  948. if (!PageFsCache(page))
  949. goto done;
  950. trace_fscache_page(cookie, page, fscache_page_uncache);
  951. /* get the object */
  952. spin_lock(&cookie->lock);
  953. if (hlist_empty(&cookie->backing_objects)) {
  954. ClearPageFsCache(page);
  955. goto done_unlock;
  956. }
  957. object = hlist_entry(cookie->backing_objects.first,
  958. struct fscache_object, cookie_link);
  959. /* there might now be stuff on disk we could read */
  960. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  961. /* only invoke the cache backend if we managed to mark the page
  962. * uncached here; this deals with synchronisation vs withdrawal */
  963. if (TestClearPageFsCache(page) &&
  964. object->cache->ops->uncache_page) {
  965. /* the cache backend releases the cookie lock */
  966. fscache_stat(&fscache_n_cop_uncache_page);
  967. object->cache->ops->uncache_page(object, page);
  968. fscache_stat_d(&fscache_n_cop_uncache_page);
  969. goto done;
  970. }
  971. done_unlock:
  972. spin_unlock(&cookie->lock);
  973. done:
  974. _leave("");
  975. }
  976. EXPORT_SYMBOL(__fscache_uncache_page);
  977. /**
  978. * fscache_mark_page_cached - Mark a page as being cached
  979. * @op: The retrieval op pages are being marked for
  980. * @page: The page to be marked
  981. *
  982. * Mark a netfs page as being cached. After this is called, the netfs
  983. * must call fscache_uncache_page() to remove the mark.
  984. */
  985. void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
  986. {
  987. struct fscache_cookie *cookie = op->op.object->cookie;
  988. #ifdef CONFIG_FSCACHE_STATS
  989. atomic_inc(&fscache_n_marks);
  990. #endif
  991. trace_fscache_page(cookie, page, fscache_page_cached);
  992. _debug("- mark %p{%lx}", page, page->index);
  993. if (TestSetPageFsCache(page)) {
  994. static bool once_only;
  995. if (!once_only) {
  996. once_only = true;
  997. pr_warn("Cookie type %s marked page %lx multiple times\n",
  998. cookie->def->name, page->index);
  999. }
  1000. }
  1001. if (cookie->def->mark_page_cached)
  1002. cookie->def->mark_page_cached(cookie->netfs_data,
  1003. op->mapping, page);
  1004. }
  1005. EXPORT_SYMBOL(fscache_mark_page_cached);
  1006. /**
  1007. * fscache_mark_pages_cached - Mark pages as being cached
  1008. * @op: The retrieval op pages are being marked for
  1009. * @pagevec: The pages to be marked
  1010. *
  1011. * Mark a bunch of netfs pages as being cached. After this is called,
  1012. * the netfs must call fscache_uncache_page() to remove the mark.
  1013. */
  1014. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  1015. struct pagevec *pagevec)
  1016. {
  1017. unsigned long loop;
  1018. for (loop = 0; loop < pagevec->nr; loop++)
  1019. fscache_mark_page_cached(op, pagevec->pages[loop]);
  1020. pagevec_reinit(pagevec);
  1021. }
  1022. EXPORT_SYMBOL(fscache_mark_pages_cached);
  1023. /*
  1024. * Uncache all the pages in an inode that are marked PG_fscache, assuming them
  1025. * to be associated with the given cookie.
  1026. */
  1027. void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
  1028. struct inode *inode)
  1029. {
  1030. struct address_space *mapping = inode->i_mapping;
  1031. struct pagevec pvec;
  1032. pgoff_t next;
  1033. int i;
  1034. _enter("%p,%p", cookie, inode);
  1035. if (!mapping || mapping->nrpages == 0) {
  1036. _leave(" [no pages]");
  1037. return;
  1038. }
  1039. pagevec_init(&pvec);
  1040. next = 0;
  1041. do {
  1042. if (!pagevec_lookup(&pvec, mapping, &next))
  1043. break;
  1044. for (i = 0; i < pagevec_count(&pvec); i++) {
  1045. struct page *page = pvec.pages[i];
  1046. if (PageFsCache(page)) {
  1047. __fscache_wait_on_page_write(cookie, page);
  1048. __fscache_uncache_page(cookie, page);
  1049. }
  1050. }
  1051. pagevec_release(&pvec);
  1052. cond_resched();
  1053. } while (next);
  1054. _leave("");
  1055. }
  1056. EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);