rdwr.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978
  1. /* Storage object read/write
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/mount.h>
  12. #include <linux/slab.h>
  13. #include <linux/file.h>
  14. #include <linux/swap.h>
  15. #include "internal.h"
  16. /*
  17. * detect wake up events generated by the unlocking of pages in which we're
  18. * interested
  19. * - we use this to detect read completion of backing pages
  20. * - the caller holds the waitqueue lock
  21. */
  22. static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
  23. int sync, void *_key)
  24. {
  25. struct cachefiles_one_read *monitor =
  26. container_of(wait, struct cachefiles_one_read, monitor);
  27. struct cachefiles_object *object;
  28. struct fscache_retrieval *op = monitor->op;
  29. struct wait_bit_key *key = _key;
  30. struct page *page = wait->private;
  31. ASSERT(key);
  32. _enter("{%lu},%u,%d,{%p,%u}",
  33. monitor->netfs_page->index, mode, sync,
  34. key->flags, key->bit_nr);
  35. if (key->flags != &page->flags ||
  36. key->bit_nr != PG_locked)
  37. return 0;
  38. _debug("--- monitor %p %lx ---", page, page->flags);
  39. if (!PageUptodate(page) && !PageError(page)) {
  40. /* unlocked, not uptodate and not erronous? */
  41. _debug("page probably truncated");
  42. }
  43. /* remove from the waitqueue */
  44. list_del(&wait->entry);
  45. /* move onto the action list and queue for FS-Cache thread pool */
  46. ASSERT(op);
  47. /* We need to temporarily bump the usage count as we don't own a ref
  48. * here otherwise cachefiles_read_copier() may free the op between the
  49. * monitor being enqueued on the op->to_do list and the op getting
  50. * enqueued on the work queue.
  51. */
  52. fscache_get_retrieval(op);
  53. object = container_of(op->op.object, struct cachefiles_object, fscache);
  54. spin_lock(&object->work_lock);
  55. list_add_tail(&monitor->op_link, &op->to_do);
  56. spin_unlock(&object->work_lock);
  57. fscache_enqueue_retrieval(op);
  58. fscache_put_retrieval(op);
  59. return 0;
  60. }
  61. /*
  62. * handle a probably truncated page
  63. * - check to see if the page is still relevant and reissue the read if
  64. * possible
  65. * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
  66. * must wait again and 0 if successful
  67. */
  68. static int cachefiles_read_reissue(struct cachefiles_object *object,
  69. struct cachefiles_one_read *monitor)
  70. {
  71. struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
  72. struct page *backpage = monitor->back_page, *backpage2;
  73. int ret;
  74. _enter("{ino=%lx},{%lx,%lx}",
  75. d_backing_inode(object->backer)->i_ino,
  76. backpage->index, backpage->flags);
  77. /* skip if the page was truncated away completely */
  78. if (backpage->mapping != bmapping) {
  79. _leave(" = -ENODATA [mapping]");
  80. return -ENODATA;
  81. }
  82. backpage2 = find_get_page(bmapping, backpage->index);
  83. if (!backpage2) {
  84. _leave(" = -ENODATA [gone]");
  85. return -ENODATA;
  86. }
  87. if (backpage != backpage2) {
  88. put_page(backpage2);
  89. _leave(" = -ENODATA [different]");
  90. return -ENODATA;
  91. }
  92. /* the page is still there and we already have a ref on it, so we don't
  93. * need a second */
  94. put_page(backpage2);
  95. INIT_LIST_HEAD(&monitor->op_link);
  96. add_page_wait_queue(backpage, &monitor->monitor);
  97. if (trylock_page(backpage)) {
  98. ret = -EIO;
  99. if (PageError(backpage))
  100. goto unlock_discard;
  101. ret = 0;
  102. if (PageUptodate(backpage))
  103. goto unlock_discard;
  104. _debug("reissue read");
  105. ret = bmapping->a_ops->readpage(NULL, backpage);
  106. if (ret < 0)
  107. goto unlock_discard;
  108. }
  109. /* but the page may have been read before the monitor was installed, so
  110. * the monitor may miss the event - so we have to ensure that we do get
  111. * one in such a case */
  112. if (trylock_page(backpage)) {
  113. _debug("jumpstart %p {%lx}", backpage, backpage->flags);
  114. unlock_page(backpage);
  115. }
  116. /* it'll reappear on the todo list */
  117. _leave(" = -EINPROGRESS");
  118. return -EINPROGRESS;
  119. unlock_discard:
  120. unlock_page(backpage);
  121. spin_lock_irq(&object->work_lock);
  122. list_del(&monitor->op_link);
  123. spin_unlock_irq(&object->work_lock);
  124. _leave(" = %d", ret);
  125. return ret;
  126. }
  127. /*
  128. * copy data from backing pages to netfs pages to complete a read operation
  129. * - driven by FS-Cache's thread pool
  130. */
  131. static void cachefiles_read_copier(struct fscache_operation *_op)
  132. {
  133. struct cachefiles_one_read *monitor;
  134. struct cachefiles_object *object;
  135. struct fscache_retrieval *op;
  136. int error, max;
  137. op = container_of(_op, struct fscache_retrieval, op);
  138. object = container_of(op->op.object,
  139. struct cachefiles_object, fscache);
  140. _enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino);
  141. max = 8;
  142. spin_lock_irq(&object->work_lock);
  143. while (!list_empty(&op->to_do)) {
  144. monitor = list_entry(op->to_do.next,
  145. struct cachefiles_one_read, op_link);
  146. list_del(&monitor->op_link);
  147. spin_unlock_irq(&object->work_lock);
  148. _debug("- copy {%lu}", monitor->back_page->index);
  149. recheck:
  150. if (test_bit(FSCACHE_COOKIE_INVALIDATING,
  151. &object->fscache.cookie->flags)) {
  152. error = -ESTALE;
  153. } else if (PageUptodate(monitor->back_page)) {
  154. copy_highpage(monitor->netfs_page, monitor->back_page);
  155. fscache_mark_page_cached(monitor->op,
  156. monitor->netfs_page);
  157. error = 0;
  158. } else if (!PageError(monitor->back_page)) {
  159. /* the page has probably been truncated */
  160. error = cachefiles_read_reissue(object, monitor);
  161. if (error == -EINPROGRESS)
  162. goto next;
  163. goto recheck;
  164. } else {
  165. cachefiles_io_error_obj(
  166. object,
  167. "Readpage failed on backing file %lx",
  168. (unsigned long) monitor->back_page->flags);
  169. error = -EIO;
  170. }
  171. put_page(monitor->back_page);
  172. fscache_end_io(op, monitor->netfs_page, error);
  173. put_page(monitor->netfs_page);
  174. fscache_retrieval_complete(op, 1);
  175. fscache_put_retrieval(op);
  176. kfree(monitor);
  177. next:
  178. /* let the thread pool have some air occasionally */
  179. max--;
  180. if (max < 0 || need_resched()) {
  181. if (!list_empty(&op->to_do))
  182. fscache_enqueue_retrieval(op);
  183. _leave(" [maxed out]");
  184. return;
  185. }
  186. spin_lock_irq(&object->work_lock);
  187. }
  188. spin_unlock_irq(&object->work_lock);
  189. _leave("");
  190. }
  191. /*
  192. * read the corresponding page to the given set from the backing file
  193. * - an uncertain page is simply discarded, to be tried again another time
  194. */
  195. static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
  196. struct fscache_retrieval *op,
  197. struct page *netpage)
  198. {
  199. struct cachefiles_one_read *monitor;
  200. struct address_space *bmapping;
  201. struct page *newpage, *backpage;
  202. int ret;
  203. _enter("");
  204. _debug("read back %p{%lu,%d}",
  205. netpage, netpage->index, page_count(netpage));
  206. monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
  207. if (!monitor)
  208. goto nomem;
  209. monitor->netfs_page = netpage;
  210. monitor->op = fscache_get_retrieval(op);
  211. init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
  212. /* attempt to get hold of the backing page */
  213. bmapping = d_backing_inode(object->backer)->i_mapping;
  214. newpage = NULL;
  215. for (;;) {
  216. backpage = find_get_page(bmapping, netpage->index);
  217. if (backpage)
  218. goto backing_page_already_present;
  219. if (!newpage) {
  220. newpage = __page_cache_alloc(cachefiles_gfp);
  221. if (!newpage)
  222. goto nomem_monitor;
  223. }
  224. ret = add_to_page_cache_lru(newpage, bmapping,
  225. netpage->index, cachefiles_gfp);
  226. if (ret == 0)
  227. goto installed_new_backing_page;
  228. if (ret != -EEXIST)
  229. goto nomem_page;
  230. }
  231. /* we've installed a new backing page, so now we need to start
  232. * it reading */
  233. installed_new_backing_page:
  234. _debug("- new %p", newpage);
  235. backpage = newpage;
  236. newpage = NULL;
  237. read_backing_page:
  238. ret = bmapping->a_ops->readpage(NULL, backpage);
  239. if (ret < 0)
  240. goto read_error;
  241. /* set the monitor to transfer the data across */
  242. monitor_backing_page:
  243. _debug("- monitor add");
  244. /* install the monitor */
  245. get_page(monitor->netfs_page);
  246. get_page(backpage);
  247. monitor->back_page = backpage;
  248. monitor->monitor.private = backpage;
  249. add_page_wait_queue(backpage, &monitor->monitor);
  250. monitor = NULL;
  251. /* but the page may have been read before the monitor was installed, so
  252. * the monitor may miss the event - so we have to ensure that we do get
  253. * one in such a case */
  254. if (trylock_page(backpage)) {
  255. _debug("jumpstart %p {%lx}", backpage, backpage->flags);
  256. unlock_page(backpage);
  257. }
  258. goto success;
  259. /* if the backing page is already present, it can be in one of
  260. * three states: read in progress, read failed or read okay */
  261. backing_page_already_present:
  262. _debug("- present");
  263. if (newpage) {
  264. put_page(newpage);
  265. newpage = NULL;
  266. }
  267. if (PageError(backpage))
  268. goto io_error;
  269. if (PageUptodate(backpage))
  270. goto backing_page_already_uptodate;
  271. if (!trylock_page(backpage))
  272. goto monitor_backing_page;
  273. _debug("read %p {%lx}", backpage, backpage->flags);
  274. goto read_backing_page;
  275. /* the backing page is already up to date, attach the netfs
  276. * page to the pagecache and LRU and copy the data across */
  277. backing_page_already_uptodate:
  278. _debug("- uptodate");
  279. fscache_mark_page_cached(op, netpage);
  280. copy_highpage(netpage, backpage);
  281. fscache_end_io(op, netpage, 0);
  282. fscache_retrieval_complete(op, 1);
  283. success:
  284. _debug("success");
  285. ret = 0;
  286. out:
  287. if (backpage)
  288. put_page(backpage);
  289. if (monitor) {
  290. fscache_put_retrieval(monitor->op);
  291. kfree(monitor);
  292. }
  293. _leave(" = %d", ret);
  294. return ret;
  295. read_error:
  296. _debug("read error %d", ret);
  297. if (ret == -ENOMEM) {
  298. fscache_retrieval_complete(op, 1);
  299. goto out;
  300. }
  301. io_error:
  302. cachefiles_io_error_obj(object, "Page read error on backing file");
  303. fscache_retrieval_complete(op, 1);
  304. ret = -ENOBUFS;
  305. goto out;
  306. nomem_page:
  307. put_page(newpage);
  308. nomem_monitor:
  309. fscache_put_retrieval(monitor->op);
  310. kfree(monitor);
  311. nomem:
  312. fscache_retrieval_complete(op, 1);
  313. _leave(" = -ENOMEM");
  314. return -ENOMEM;
  315. }
  316. /*
  317. * read a page from the cache or allocate a block in which to store it
  318. * - cache withdrawal is prevented by the caller
  319. * - returns -EINTR if interrupted
  320. * - returns -ENOMEM if ran out of memory
  321. * - returns -ENOBUFS if no buffers can be made available
  322. * - returns -ENOBUFS if page is beyond EOF
  323. * - if the page is backed by a block in the cache:
  324. * - a read will be started which will call the callback on completion
  325. * - 0 will be returned
  326. * - else if the page is unbacked:
  327. * - the metadata will be retained
  328. * - -ENODATA will be returned
  329. */
  330. int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
  331. struct page *page,
  332. gfp_t gfp)
  333. {
  334. struct cachefiles_object *object;
  335. struct cachefiles_cache *cache;
  336. struct inode *inode;
  337. sector_t block0, block;
  338. unsigned shift;
  339. int ret;
  340. object = container_of(op->op.object,
  341. struct cachefiles_object, fscache);
  342. cache = container_of(object->fscache.cache,
  343. struct cachefiles_cache, cache);
  344. _enter("{%p},{%lx},,,", object, page->index);
  345. if (!object->backer)
  346. goto enobufs;
  347. inode = d_backing_inode(object->backer);
  348. ASSERT(S_ISREG(inode->i_mode));
  349. ASSERT(inode->i_mapping->a_ops->bmap);
  350. ASSERT(inode->i_mapping->a_ops->readpages);
  351. /* calculate the shift required to use bmap */
  352. shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
  353. op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
  354. op->op.flags |= FSCACHE_OP_ASYNC;
  355. op->op.processor = cachefiles_read_copier;
  356. /* we assume the absence or presence of the first block is a good
  357. * enough indication for the page as a whole
  358. * - TODO: don't use bmap() for this as it is _not_ actually good
  359. * enough for this as it doesn't indicate errors, but it's all we've
  360. * got for the moment
  361. */
  362. block0 = page->index;
  363. block0 <<= shift;
  364. block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
  365. _debug("%llx -> %llx",
  366. (unsigned long long) block0,
  367. (unsigned long long) block);
  368. if (block) {
  369. /* submit the apparently valid page to the backing fs to be
  370. * read from disk */
  371. ret = cachefiles_read_backing_file_one(object, op, page);
  372. } else if (cachefiles_has_space(cache, 0, 1) == 0) {
  373. /* there's space in the cache we can use */
  374. fscache_mark_page_cached(op, page);
  375. fscache_retrieval_complete(op, 1);
  376. ret = -ENODATA;
  377. } else {
  378. goto enobufs;
  379. }
  380. _leave(" = %d", ret);
  381. return ret;
  382. enobufs:
  383. fscache_retrieval_complete(op, 1);
  384. _leave(" = -ENOBUFS");
  385. return -ENOBUFS;
  386. }
  387. /*
  388. * read the corresponding pages to the given set from the backing file
  389. * - any uncertain pages are simply discarded, to be tried again another time
  390. */
  391. static int cachefiles_read_backing_file(struct cachefiles_object *object,
  392. struct fscache_retrieval *op,
  393. struct list_head *list)
  394. {
  395. struct cachefiles_one_read *monitor = NULL;
  396. struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
  397. struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
  398. int ret = 0;
  399. _enter("");
  400. list_for_each_entry_safe(netpage, _n, list, lru) {
  401. list_del(&netpage->lru);
  402. _debug("read back %p{%lu,%d}",
  403. netpage, netpage->index, page_count(netpage));
  404. if (!monitor) {
  405. monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
  406. if (!monitor)
  407. goto nomem;
  408. monitor->op = fscache_get_retrieval(op);
  409. init_waitqueue_func_entry(&monitor->monitor,
  410. cachefiles_read_waiter);
  411. }
  412. for (;;) {
  413. backpage = find_get_page(bmapping, netpage->index);
  414. if (backpage)
  415. goto backing_page_already_present;
  416. if (!newpage) {
  417. newpage = __page_cache_alloc(cachefiles_gfp);
  418. if (!newpage)
  419. goto nomem;
  420. }
  421. ret = add_to_page_cache_lru(newpage, bmapping,
  422. netpage->index,
  423. cachefiles_gfp);
  424. if (ret == 0)
  425. goto installed_new_backing_page;
  426. if (ret != -EEXIST)
  427. goto nomem;
  428. }
  429. /* we've installed a new backing page, so now we need
  430. * to start it reading */
  431. installed_new_backing_page:
  432. _debug("- new %p", newpage);
  433. backpage = newpage;
  434. newpage = NULL;
  435. reread_backing_page:
  436. ret = bmapping->a_ops->readpage(NULL, backpage);
  437. if (ret < 0)
  438. goto read_error;
  439. /* add the netfs page to the pagecache and LRU, and set the
  440. * monitor to transfer the data across */
  441. monitor_backing_page:
  442. _debug("- monitor add");
  443. ret = add_to_page_cache_lru(netpage, op->mapping,
  444. netpage->index, cachefiles_gfp);
  445. if (ret < 0) {
  446. if (ret == -EEXIST) {
  447. put_page(backpage);
  448. backpage = NULL;
  449. put_page(netpage);
  450. netpage = NULL;
  451. fscache_retrieval_complete(op, 1);
  452. continue;
  453. }
  454. goto nomem;
  455. }
  456. /* install a monitor */
  457. get_page(netpage);
  458. monitor->netfs_page = netpage;
  459. get_page(backpage);
  460. monitor->back_page = backpage;
  461. monitor->monitor.private = backpage;
  462. add_page_wait_queue(backpage, &monitor->monitor);
  463. monitor = NULL;
  464. /* but the page may have been read before the monitor was
  465. * installed, so the monitor may miss the event - so we have to
  466. * ensure that we do get one in such a case */
  467. if (trylock_page(backpage)) {
  468. _debug("2unlock %p {%lx}", backpage, backpage->flags);
  469. unlock_page(backpage);
  470. }
  471. put_page(backpage);
  472. backpage = NULL;
  473. put_page(netpage);
  474. netpage = NULL;
  475. continue;
  476. /* if the backing page is already present, it can be in one of
  477. * three states: read in progress, read failed or read okay */
  478. backing_page_already_present:
  479. _debug("- present %p", backpage);
  480. if (PageError(backpage))
  481. goto io_error;
  482. if (PageUptodate(backpage))
  483. goto backing_page_already_uptodate;
  484. _debug("- not ready %p{%lx}", backpage, backpage->flags);
  485. if (!trylock_page(backpage))
  486. goto monitor_backing_page;
  487. if (PageError(backpage)) {
  488. _debug("error %lx", backpage->flags);
  489. unlock_page(backpage);
  490. goto io_error;
  491. }
  492. if (PageUptodate(backpage))
  493. goto backing_page_already_uptodate_unlock;
  494. /* we've locked a page that's neither up to date nor erroneous,
  495. * so we need to attempt to read it again */
  496. goto reread_backing_page;
  497. /* the backing page is already up to date, attach the netfs
  498. * page to the pagecache and LRU and copy the data across */
  499. backing_page_already_uptodate_unlock:
  500. _debug("uptodate %lx", backpage->flags);
  501. unlock_page(backpage);
  502. backing_page_already_uptodate:
  503. _debug("- uptodate");
  504. ret = add_to_page_cache_lru(netpage, op->mapping,
  505. netpage->index, cachefiles_gfp);
  506. if (ret < 0) {
  507. if (ret == -EEXIST) {
  508. put_page(backpage);
  509. backpage = NULL;
  510. put_page(netpage);
  511. netpage = NULL;
  512. fscache_retrieval_complete(op, 1);
  513. continue;
  514. }
  515. goto nomem;
  516. }
  517. copy_highpage(netpage, backpage);
  518. put_page(backpage);
  519. backpage = NULL;
  520. fscache_mark_page_cached(op, netpage);
  521. /* the netpage is unlocked and marked up to date here */
  522. fscache_end_io(op, netpage, 0);
  523. put_page(netpage);
  524. netpage = NULL;
  525. fscache_retrieval_complete(op, 1);
  526. continue;
  527. }
  528. netpage = NULL;
  529. _debug("out");
  530. out:
  531. /* tidy up */
  532. if (newpage)
  533. put_page(newpage);
  534. if (netpage)
  535. put_page(netpage);
  536. if (backpage)
  537. put_page(backpage);
  538. if (monitor) {
  539. fscache_put_retrieval(op);
  540. kfree(monitor);
  541. }
  542. list_for_each_entry_safe(netpage, _n, list, lru) {
  543. list_del(&netpage->lru);
  544. put_page(netpage);
  545. fscache_retrieval_complete(op, 1);
  546. }
  547. _leave(" = %d", ret);
  548. return ret;
  549. nomem:
  550. _debug("nomem");
  551. ret = -ENOMEM;
  552. goto record_page_complete;
  553. read_error:
  554. _debug("read error %d", ret);
  555. if (ret == -ENOMEM)
  556. goto record_page_complete;
  557. io_error:
  558. cachefiles_io_error_obj(object, "Page read error on backing file");
  559. ret = -ENOBUFS;
  560. record_page_complete:
  561. fscache_retrieval_complete(op, 1);
  562. goto out;
  563. }
  564. /*
  565. * read a list of pages from the cache or allocate blocks in which to store
  566. * them
  567. */
  568. int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
  569. struct list_head *pages,
  570. unsigned *nr_pages,
  571. gfp_t gfp)
  572. {
  573. struct cachefiles_object *object;
  574. struct cachefiles_cache *cache;
  575. struct list_head backpages;
  576. struct pagevec pagevec;
  577. struct inode *inode;
  578. struct page *page, *_n;
  579. unsigned shift, nrbackpages;
  580. int ret, ret2, space;
  581. object = container_of(op->op.object,
  582. struct cachefiles_object, fscache);
  583. cache = container_of(object->fscache.cache,
  584. struct cachefiles_cache, cache);
  585. _enter("{OBJ%x,%d},,%d,,",
  586. object->fscache.debug_id, atomic_read(&op->op.usage),
  587. *nr_pages);
  588. if (!object->backer)
  589. goto all_enobufs;
  590. space = 1;
  591. if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
  592. space = 0;
  593. inode = d_backing_inode(object->backer);
  594. ASSERT(S_ISREG(inode->i_mode));
  595. ASSERT(inode->i_mapping->a_ops->bmap);
  596. ASSERT(inode->i_mapping->a_ops->readpages);
  597. /* calculate the shift required to use bmap */
  598. shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
  599. pagevec_init(&pagevec);
  600. op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
  601. op->op.flags |= FSCACHE_OP_ASYNC;
  602. op->op.processor = cachefiles_read_copier;
  603. INIT_LIST_HEAD(&backpages);
  604. nrbackpages = 0;
  605. ret = space ? -ENODATA : -ENOBUFS;
  606. list_for_each_entry_safe(page, _n, pages, lru) {
  607. sector_t block0, block;
  608. /* we assume the absence or presence of the first block is a
  609. * good enough indication for the page as a whole
  610. * - TODO: don't use bmap() for this as it is _not_ actually
  611. * good enough for this as it doesn't indicate errors, but
  612. * it's all we've got for the moment
  613. */
  614. block0 = page->index;
  615. block0 <<= shift;
  616. block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
  617. block0);
  618. _debug("%llx -> %llx",
  619. (unsigned long long) block0,
  620. (unsigned long long) block);
  621. if (block) {
  622. /* we have data - add it to the list to give to the
  623. * backing fs */
  624. list_move(&page->lru, &backpages);
  625. (*nr_pages)--;
  626. nrbackpages++;
  627. } else if (space && pagevec_add(&pagevec, page) == 0) {
  628. fscache_mark_pages_cached(op, &pagevec);
  629. fscache_retrieval_complete(op, 1);
  630. ret = -ENODATA;
  631. } else {
  632. fscache_retrieval_complete(op, 1);
  633. }
  634. }
  635. if (pagevec_count(&pagevec) > 0)
  636. fscache_mark_pages_cached(op, &pagevec);
  637. if (list_empty(pages))
  638. ret = 0;
  639. /* submit the apparently valid pages to the backing fs to be read from
  640. * disk */
  641. if (nrbackpages > 0) {
  642. ret2 = cachefiles_read_backing_file(object, op, &backpages);
  643. if (ret2 == -ENOMEM || ret2 == -EINTR)
  644. ret = ret2;
  645. }
  646. _leave(" = %d [nr=%u%s]",
  647. ret, *nr_pages, list_empty(pages) ? " empty" : "");
  648. return ret;
  649. all_enobufs:
  650. fscache_retrieval_complete(op, *nr_pages);
  651. return -ENOBUFS;
  652. }
  653. /*
  654. * allocate a block in the cache in which to store a page
  655. * - cache withdrawal is prevented by the caller
  656. * - returns -EINTR if interrupted
  657. * - returns -ENOMEM if ran out of memory
  658. * - returns -ENOBUFS if no buffers can be made available
  659. * - returns -ENOBUFS if page is beyond EOF
  660. * - otherwise:
  661. * - the metadata will be retained
  662. * - 0 will be returned
  663. */
  664. int cachefiles_allocate_page(struct fscache_retrieval *op,
  665. struct page *page,
  666. gfp_t gfp)
  667. {
  668. struct cachefiles_object *object;
  669. struct cachefiles_cache *cache;
  670. int ret;
  671. object = container_of(op->op.object,
  672. struct cachefiles_object, fscache);
  673. cache = container_of(object->fscache.cache,
  674. struct cachefiles_cache, cache);
  675. _enter("%p,{%lx},", object, page->index);
  676. ret = cachefiles_has_space(cache, 0, 1);
  677. if (ret == 0)
  678. fscache_mark_page_cached(op, page);
  679. else
  680. ret = -ENOBUFS;
  681. fscache_retrieval_complete(op, 1);
  682. _leave(" = %d", ret);
  683. return ret;
  684. }
  685. /*
  686. * allocate blocks in the cache in which to store a set of pages
  687. * - cache withdrawal is prevented by the caller
  688. * - returns -EINTR if interrupted
  689. * - returns -ENOMEM if ran out of memory
  690. * - returns -ENOBUFS if some buffers couldn't be made available
  691. * - returns -ENOBUFS if some pages are beyond EOF
  692. * - otherwise:
  693. * - -ENODATA will be returned
  694. * - metadata will be retained for any page marked
  695. */
  696. int cachefiles_allocate_pages(struct fscache_retrieval *op,
  697. struct list_head *pages,
  698. unsigned *nr_pages,
  699. gfp_t gfp)
  700. {
  701. struct cachefiles_object *object;
  702. struct cachefiles_cache *cache;
  703. struct pagevec pagevec;
  704. struct page *page;
  705. int ret;
  706. object = container_of(op->op.object,
  707. struct cachefiles_object, fscache);
  708. cache = container_of(object->fscache.cache,
  709. struct cachefiles_cache, cache);
  710. _enter("%p,,,%d,", object, *nr_pages);
  711. ret = cachefiles_has_space(cache, 0, *nr_pages);
  712. if (ret == 0) {
  713. pagevec_init(&pagevec);
  714. list_for_each_entry(page, pages, lru) {
  715. if (pagevec_add(&pagevec, page) == 0)
  716. fscache_mark_pages_cached(op, &pagevec);
  717. }
  718. if (pagevec_count(&pagevec) > 0)
  719. fscache_mark_pages_cached(op, &pagevec);
  720. ret = -ENODATA;
  721. } else {
  722. ret = -ENOBUFS;
  723. }
  724. fscache_retrieval_complete(op, *nr_pages);
  725. _leave(" = %d", ret);
  726. return ret;
  727. }
  728. /*
  729. * request a page be stored in the cache
  730. * - cache withdrawal is prevented by the caller
  731. * - this request may be ignored if there's no cache block available, in which
  732. * case -ENOBUFS will be returned
  733. * - if the op is in progress, 0 will be returned
  734. */
  735. int cachefiles_write_page(struct fscache_storage *op, struct page *page)
  736. {
  737. struct cachefiles_object *object;
  738. struct cachefiles_cache *cache;
  739. struct file *file;
  740. struct path path;
  741. loff_t pos, eof;
  742. size_t len;
  743. void *data;
  744. int ret = -ENOBUFS;
  745. ASSERT(op != NULL);
  746. ASSERT(page != NULL);
  747. object = container_of(op->op.object,
  748. struct cachefiles_object, fscache);
  749. _enter("%p,%p{%lx},,,", object, page, page->index);
  750. if (!object->backer) {
  751. _leave(" = -ENOBUFS");
  752. return -ENOBUFS;
  753. }
  754. ASSERT(d_is_reg(object->backer));
  755. cache = container_of(object->fscache.cache,
  756. struct cachefiles_cache, cache);
  757. pos = (loff_t)page->index << PAGE_SHIFT;
  758. /* We mustn't write more data than we have, so we have to beware of a
  759. * partial page at EOF.
  760. */
  761. eof = object->fscache.store_limit_l;
  762. if (pos >= eof)
  763. goto error;
  764. /* write the page to the backing filesystem and let it store it in its
  765. * own time */
  766. path.mnt = cache->mnt;
  767. path.dentry = object->backer;
  768. file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
  769. if (IS_ERR(file)) {
  770. ret = PTR_ERR(file);
  771. goto error_2;
  772. }
  773. len = PAGE_SIZE;
  774. if (eof & ~PAGE_MASK) {
  775. if (eof - pos < PAGE_SIZE) {
  776. _debug("cut short %llx to %llx",
  777. pos, eof);
  778. len = eof - pos;
  779. ASSERTCMP(pos + len, ==, eof);
  780. }
  781. }
  782. data = kmap(page);
  783. ret = __kernel_write(file, data, len, &pos);
  784. kunmap(page);
  785. fput(file);
  786. if (ret != len)
  787. goto error_eio;
  788. _leave(" = 0");
  789. return 0;
  790. error_eio:
  791. ret = -EIO;
  792. error_2:
  793. if (ret == -EIO)
  794. cachefiles_io_error_obj(object,
  795. "Write page to backing file failed");
  796. error:
  797. _leave(" = -ENOBUFS [%d]", ret);
  798. return -ENOBUFS;
  799. }
  800. /*
  801. * detach a backing block from a page
  802. * - cache withdrawal is prevented by the caller
  803. */
  804. void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
  805. __releases(&object->fscache.cookie->lock)
  806. {
  807. struct cachefiles_object *object;
  808. object = container_of(_object, struct cachefiles_object, fscache);
  809. _enter("%p,{%lu}", object, page->index);
  810. spin_unlock(&object->fscache.cookie->lock);
  811. }