write.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868
  1. /* handling of writes to regular files and writing back to the server
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/backing-dev.h>
  12. #include <linux/slab.h>
  13. #include <linux/fs.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/writeback.h>
  16. #include <linux/pagevec.h>
  17. #include "internal.h"
  18. /*
  19. * mark a page as having been made dirty and thus needing writeback
  20. */
  21. int afs_set_page_dirty(struct page *page)
  22. {
  23. _enter("");
  24. return __set_page_dirty_nobuffers(page);
  25. }
  26. /*
  27. * partly or wholly fill a page that's under preparation for writing
  28. */
  29. static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
  30. loff_t pos, unsigned int len, struct page *page)
  31. {
  32. struct afs_read *req;
  33. int ret;
  34. _enter(",,%llu", (unsigned long long)pos);
  35. req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
  36. GFP_KERNEL);
  37. if (!req)
  38. return -ENOMEM;
  39. refcount_set(&req->usage, 1);
  40. req->pos = pos;
  41. req->len = len;
  42. req->nr_pages = 1;
  43. req->pages = req->array;
  44. req->pages[0] = page;
  45. get_page(page);
  46. ret = afs_fetch_data(vnode, key, req);
  47. afs_put_read(req);
  48. if (ret < 0) {
  49. if (ret == -ENOENT) {
  50. _debug("got NOENT from server"
  51. " - marking file deleted and stale");
  52. set_bit(AFS_VNODE_DELETED, &vnode->flags);
  53. ret = -ESTALE;
  54. }
  55. }
  56. _leave(" = %d", ret);
  57. return ret;
  58. }
  59. /*
  60. * prepare to perform part of a write to a page
  61. */
  62. int afs_write_begin(struct file *file, struct address_space *mapping,
  63. loff_t pos, unsigned len, unsigned flags,
  64. struct page **pagep, void **fsdata)
  65. {
  66. struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  67. struct page *page;
  68. struct key *key = afs_file_key(file);
  69. unsigned long priv;
  70. unsigned f, from = pos & (PAGE_SIZE - 1);
  71. unsigned t, to = from + len;
  72. pgoff_t index = pos >> PAGE_SHIFT;
  73. int ret;
  74. _enter("{%x:%u},{%lx},%u,%u",
  75. vnode->fid.vid, vnode->fid.vnode, index, from, to);
  76. /* We want to store information about how much of a page is altered in
  77. * page->private.
  78. */
  79. BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
  80. page = grab_cache_page_write_begin(mapping, index, flags);
  81. if (!page)
  82. return -ENOMEM;
  83. if (!PageUptodate(page) && len != PAGE_SIZE) {
  84. ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
  85. if (ret < 0) {
  86. unlock_page(page);
  87. put_page(page);
  88. _leave(" = %d [prep]", ret);
  89. return ret;
  90. }
  91. SetPageUptodate(page);
  92. }
  93. /* page won't leak in error case: it eventually gets cleaned off LRU */
  94. *pagep = page;
  95. try_again:
  96. /* See if this page is already partially written in a way that we can
  97. * merge the new write with.
  98. */
  99. t = f = 0;
  100. if (PagePrivate(page)) {
  101. priv = page_private(page);
  102. f = priv & AFS_PRIV_MAX;
  103. t = priv >> AFS_PRIV_SHIFT;
  104. ASSERTCMP(f, <=, t);
  105. }
  106. if (f != t) {
  107. if (PageWriteback(page)) {
  108. trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
  109. page->index, priv);
  110. goto flush_conflicting_write;
  111. }
  112. /* If the file is being filled locally, allow inter-write
  113. * spaces to be merged into writes. If it's not, only write
  114. * back what the user gives us.
  115. */
  116. if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
  117. (to < f || from > t))
  118. goto flush_conflicting_write;
  119. if (from < f)
  120. f = from;
  121. if (to > t)
  122. t = to;
  123. } else {
  124. f = from;
  125. t = to;
  126. }
  127. priv = (unsigned long)t << AFS_PRIV_SHIFT;
  128. priv |= f;
  129. trace_afs_page_dirty(vnode, tracepoint_string("begin"),
  130. page->index, priv);
  131. SetPagePrivate(page);
  132. set_page_private(page, priv);
  133. _leave(" = 0");
  134. return 0;
  135. /* The previous write and this write aren't adjacent or overlapping, so
  136. * flush the page out.
  137. */
  138. flush_conflicting_write:
  139. _debug("flush conflict");
  140. ret = write_one_page(page);
  141. if (ret < 0) {
  142. _leave(" = %d", ret);
  143. return ret;
  144. }
  145. ret = lock_page_killable(page);
  146. if (ret < 0) {
  147. _leave(" = %d", ret);
  148. return ret;
  149. }
  150. goto try_again;
  151. }
  152. /*
  153. * finalise part of a write to a page
  154. */
  155. int afs_write_end(struct file *file, struct address_space *mapping,
  156. loff_t pos, unsigned len, unsigned copied,
  157. struct page *page, void *fsdata)
  158. {
  159. struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
  160. struct key *key = afs_file_key(file);
  161. loff_t i_size, maybe_i_size;
  162. int ret;
  163. _enter("{%x:%u},{%lx}",
  164. vnode->fid.vid, vnode->fid.vnode, page->index);
  165. maybe_i_size = pos + copied;
  166. i_size = i_size_read(&vnode->vfs_inode);
  167. if (maybe_i_size > i_size) {
  168. spin_lock(&vnode->wb_lock);
  169. i_size = i_size_read(&vnode->vfs_inode);
  170. if (maybe_i_size > i_size)
  171. i_size_write(&vnode->vfs_inode, maybe_i_size);
  172. spin_unlock(&vnode->wb_lock);
  173. }
  174. if (!PageUptodate(page)) {
  175. if (copied < len) {
  176. /* Try and load any missing data from the server. The
  177. * unmarshalling routine will take care of clearing any
  178. * bits that are beyond the EOF.
  179. */
  180. ret = afs_fill_page(vnode, key, pos + copied,
  181. len - copied, page);
  182. if (ret < 0)
  183. goto out;
  184. }
  185. SetPageUptodate(page);
  186. }
  187. set_page_dirty(page);
  188. if (PageDirty(page))
  189. _debug("dirtied");
  190. ret = copied;
  191. out:
  192. unlock_page(page);
  193. put_page(page);
  194. return ret;
  195. }
  196. /*
  197. * kill all the pages in the given range
  198. */
  199. static void afs_kill_pages(struct address_space *mapping,
  200. pgoff_t first, pgoff_t last)
  201. {
  202. struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  203. struct pagevec pv;
  204. unsigned count, loop;
  205. _enter("{%x:%u},%lx-%lx",
  206. vnode->fid.vid, vnode->fid.vnode, first, last);
  207. pagevec_init(&pv);
  208. do {
  209. _debug("kill %lx-%lx", first, last);
  210. count = last - first + 1;
  211. if (count > PAGEVEC_SIZE)
  212. count = PAGEVEC_SIZE;
  213. pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
  214. ASSERTCMP(pv.nr, ==, count);
  215. for (loop = 0; loop < count; loop++) {
  216. struct page *page = pv.pages[loop];
  217. ClearPageUptodate(page);
  218. SetPageError(page);
  219. end_page_writeback(page);
  220. if (page->index >= first)
  221. first = page->index + 1;
  222. lock_page(page);
  223. generic_error_remove_page(mapping, page);
  224. unlock_page(page);
  225. }
  226. __pagevec_release(&pv);
  227. } while (first <= last);
  228. _leave("");
  229. }
  230. /*
  231. * Redirty all the pages in a given range.
  232. */
  233. static void afs_redirty_pages(struct writeback_control *wbc,
  234. struct address_space *mapping,
  235. pgoff_t first, pgoff_t last)
  236. {
  237. struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  238. struct pagevec pv;
  239. unsigned count, loop;
  240. _enter("{%x:%u},%lx-%lx",
  241. vnode->fid.vid, vnode->fid.vnode, first, last);
  242. pagevec_init(&pv);
  243. do {
  244. _debug("redirty %lx-%lx", first, last);
  245. count = last - first + 1;
  246. if (count > PAGEVEC_SIZE)
  247. count = PAGEVEC_SIZE;
  248. pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
  249. ASSERTCMP(pv.nr, ==, count);
  250. for (loop = 0; loop < count; loop++) {
  251. struct page *page = pv.pages[loop];
  252. redirty_page_for_writepage(wbc, page);
  253. end_page_writeback(page);
  254. if (page->index >= first)
  255. first = page->index + 1;
  256. }
  257. __pagevec_release(&pv);
  258. } while (first <= last);
  259. _leave("");
  260. }
  261. /*
  262. * write to a file
  263. */
  264. static int afs_store_data(struct address_space *mapping,
  265. pgoff_t first, pgoff_t last,
  266. unsigned offset, unsigned to)
  267. {
  268. struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  269. struct afs_fs_cursor fc;
  270. struct afs_wb_key *wbk = NULL;
  271. struct list_head *p;
  272. int ret = -ENOKEY, ret2;
  273. _enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
  274. vnode->volume->name,
  275. vnode->fid.vid,
  276. vnode->fid.vnode,
  277. vnode->fid.unique,
  278. first, last, offset, to);
  279. spin_lock(&vnode->wb_lock);
  280. p = vnode->wb_keys.next;
  281. /* Iterate through the list looking for a valid key to use. */
  282. try_next_key:
  283. while (p != &vnode->wb_keys) {
  284. wbk = list_entry(p, struct afs_wb_key, vnode_link);
  285. _debug("wbk %u", key_serial(wbk->key));
  286. ret2 = key_validate(wbk->key);
  287. if (ret2 == 0)
  288. goto found_key;
  289. if (ret == -ENOKEY)
  290. ret = ret2;
  291. p = p->next;
  292. }
  293. spin_unlock(&vnode->wb_lock);
  294. afs_put_wb_key(wbk);
  295. _leave(" = %d [no keys]", ret);
  296. return ret;
  297. found_key:
  298. refcount_inc(&wbk->usage);
  299. spin_unlock(&vnode->wb_lock);
  300. _debug("USE WB KEY %u", key_serial(wbk->key));
  301. ret = -ERESTARTSYS;
  302. if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
  303. while (afs_select_fileserver(&fc)) {
  304. fc.cb_break = afs_calc_vnode_cb_break(vnode);
  305. afs_fs_store_data(&fc, mapping, first, last, offset, to);
  306. }
  307. afs_check_for_remote_deletion(&fc, fc.vnode);
  308. afs_vnode_commit_status(&fc, vnode, fc.cb_break);
  309. ret = afs_end_vnode_operation(&fc);
  310. }
  311. switch (ret) {
  312. case 0:
  313. afs_stat_v(vnode, n_stores);
  314. atomic_long_add((last * PAGE_SIZE + to) -
  315. (first * PAGE_SIZE + offset),
  316. &afs_v2net(vnode)->n_store_bytes);
  317. break;
  318. case -EACCES:
  319. case -EPERM:
  320. case -ENOKEY:
  321. case -EKEYEXPIRED:
  322. case -EKEYREJECTED:
  323. case -EKEYREVOKED:
  324. _debug("next");
  325. spin_lock(&vnode->wb_lock);
  326. p = wbk->vnode_link.next;
  327. afs_put_wb_key(wbk);
  328. goto try_next_key;
  329. }
  330. afs_put_wb_key(wbk);
  331. _leave(" = %d", ret);
  332. return ret;
  333. }
  334. /*
  335. * Synchronously write back the locked page and any subsequent non-locked dirty
  336. * pages.
  337. */
  338. static int afs_write_back_from_locked_page(struct address_space *mapping,
  339. struct writeback_control *wbc,
  340. struct page *primary_page,
  341. pgoff_t final_page)
  342. {
  343. struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  344. struct page *pages[8], *page;
  345. unsigned long count, priv;
  346. unsigned n, offset, to, f, t;
  347. pgoff_t start, first, last;
  348. int loop, ret;
  349. _enter(",%lx", primary_page->index);
  350. count = 1;
  351. if (test_set_page_writeback(primary_page))
  352. BUG();
  353. /* Find all consecutive lockable dirty pages that have contiguous
  354. * written regions, stopping when we find a page that is not
  355. * immediately lockable, is not dirty or is missing, or we reach the
  356. * end of the range.
  357. */
  358. start = primary_page->index;
  359. priv = page_private(primary_page);
  360. offset = priv & AFS_PRIV_MAX;
  361. to = priv >> AFS_PRIV_SHIFT;
  362. trace_afs_page_dirty(vnode, tracepoint_string("store"),
  363. primary_page->index, priv);
  364. WARN_ON(offset == to);
  365. if (offset == to)
  366. trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
  367. primary_page->index, priv);
  368. if (start >= final_page ||
  369. (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
  370. goto no_more;
  371. start++;
  372. do {
  373. _debug("more %lx [%lx]", start, count);
  374. n = final_page - start + 1;
  375. if (n > ARRAY_SIZE(pages))
  376. n = ARRAY_SIZE(pages);
  377. n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
  378. _debug("fgpc %u", n);
  379. if (n == 0)
  380. goto no_more;
  381. if (pages[0]->index != start) {
  382. do {
  383. put_page(pages[--n]);
  384. } while (n > 0);
  385. goto no_more;
  386. }
  387. for (loop = 0; loop < n; loop++) {
  388. page = pages[loop];
  389. if (to != PAGE_SIZE &&
  390. !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
  391. break;
  392. if (page->index > final_page)
  393. break;
  394. if (!trylock_page(page))
  395. break;
  396. if (!PageDirty(page) || PageWriteback(page)) {
  397. unlock_page(page);
  398. break;
  399. }
  400. priv = page_private(page);
  401. f = priv & AFS_PRIV_MAX;
  402. t = priv >> AFS_PRIV_SHIFT;
  403. if (f != 0 &&
  404. !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
  405. unlock_page(page);
  406. break;
  407. }
  408. to = t;
  409. trace_afs_page_dirty(vnode, tracepoint_string("store+"),
  410. page->index, priv);
  411. if (!clear_page_dirty_for_io(page))
  412. BUG();
  413. if (test_set_page_writeback(page))
  414. BUG();
  415. unlock_page(page);
  416. put_page(page);
  417. }
  418. count += loop;
  419. if (loop < n) {
  420. for (; loop < n; loop++)
  421. put_page(pages[loop]);
  422. goto no_more;
  423. }
  424. start += loop;
  425. } while (start <= final_page && count < 65536);
  426. no_more:
  427. /* We now have a contiguous set of dirty pages, each with writeback
  428. * set; the first page is still locked at this point, but all the rest
  429. * have been unlocked.
  430. */
  431. unlock_page(primary_page);
  432. first = primary_page->index;
  433. last = first + count - 1;
  434. _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
  435. ret = afs_store_data(mapping, first, last, offset, to);
  436. switch (ret) {
  437. case 0:
  438. ret = count;
  439. break;
  440. default:
  441. pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
  442. /* Fall through */
  443. case -EACCES:
  444. case -EPERM:
  445. case -ENOKEY:
  446. case -EKEYEXPIRED:
  447. case -EKEYREJECTED:
  448. case -EKEYREVOKED:
  449. afs_redirty_pages(wbc, mapping, first, last);
  450. mapping_set_error(mapping, ret);
  451. break;
  452. case -EDQUOT:
  453. case -ENOSPC:
  454. afs_redirty_pages(wbc, mapping, first, last);
  455. mapping_set_error(mapping, -ENOSPC);
  456. break;
  457. case -EROFS:
  458. case -EIO:
  459. case -EREMOTEIO:
  460. case -EFBIG:
  461. case -ENOENT:
  462. case -ENOMEDIUM:
  463. case -ENXIO:
  464. afs_kill_pages(mapping, first, last);
  465. mapping_set_error(mapping, ret);
  466. break;
  467. }
  468. _leave(" = %d", ret);
  469. return ret;
  470. }
  471. /*
  472. * write a page back to the server
  473. * - the caller locked the page for us
  474. */
  475. int afs_writepage(struct page *page, struct writeback_control *wbc)
  476. {
  477. int ret;
  478. _enter("{%lx},", page->index);
  479. ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
  480. wbc->range_end >> PAGE_SHIFT);
  481. if (ret < 0) {
  482. _leave(" = %d", ret);
  483. return 0;
  484. }
  485. wbc->nr_to_write -= ret;
  486. _leave(" = 0");
  487. return 0;
  488. }
  489. /*
  490. * write a region of pages back to the server
  491. */
  492. static int afs_writepages_region(struct address_space *mapping,
  493. struct writeback_control *wbc,
  494. pgoff_t index, pgoff_t end, pgoff_t *_next)
  495. {
  496. struct page *page;
  497. int ret, n;
  498. _enter(",,%lx,%lx,", index, end);
  499. do {
  500. n = find_get_pages_range_tag(mapping, &index, end,
  501. PAGECACHE_TAG_DIRTY, 1, &page);
  502. if (!n)
  503. break;
  504. _debug("wback %lx", page->index);
  505. /*
  506. * at this point we hold neither the i_pages lock nor the
  507. * page lock: the page may be truncated or invalidated
  508. * (changing page->mapping to NULL), or even swizzled
  509. * back from swapper_space to tmpfs file mapping
  510. */
  511. ret = lock_page_killable(page);
  512. if (ret < 0) {
  513. put_page(page);
  514. _leave(" = %d", ret);
  515. return ret;
  516. }
  517. if (page->mapping != mapping || !PageDirty(page)) {
  518. unlock_page(page);
  519. put_page(page);
  520. continue;
  521. }
  522. if (PageWriteback(page)) {
  523. unlock_page(page);
  524. if (wbc->sync_mode != WB_SYNC_NONE)
  525. wait_on_page_writeback(page);
  526. put_page(page);
  527. continue;
  528. }
  529. if (!clear_page_dirty_for_io(page))
  530. BUG();
  531. ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
  532. put_page(page);
  533. if (ret < 0) {
  534. _leave(" = %d", ret);
  535. return ret;
  536. }
  537. wbc->nr_to_write -= ret;
  538. cond_resched();
  539. } while (index < end && wbc->nr_to_write > 0);
  540. *_next = index;
  541. _leave(" = 0 [%lx]", *_next);
  542. return 0;
  543. }
  544. /*
  545. * write some of the pending data back to the server
  546. */
  547. int afs_writepages(struct address_space *mapping,
  548. struct writeback_control *wbc)
  549. {
  550. pgoff_t start, end, next;
  551. int ret;
  552. _enter("");
  553. if (wbc->range_cyclic) {
  554. start = mapping->writeback_index;
  555. end = -1;
  556. ret = afs_writepages_region(mapping, wbc, start, end, &next);
  557. if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
  558. ret = afs_writepages_region(mapping, wbc, 0, start,
  559. &next);
  560. mapping->writeback_index = next;
  561. } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
  562. end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
  563. ret = afs_writepages_region(mapping, wbc, 0, end, &next);
  564. if (wbc->nr_to_write > 0)
  565. mapping->writeback_index = next;
  566. } else {
  567. start = wbc->range_start >> PAGE_SHIFT;
  568. end = wbc->range_end >> PAGE_SHIFT;
  569. ret = afs_writepages_region(mapping, wbc, start, end, &next);
  570. }
  571. _leave(" = %d", ret);
  572. return ret;
  573. }
  574. /*
  575. * completion of write to server
  576. */
  577. void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
  578. {
  579. struct pagevec pv;
  580. unsigned long priv;
  581. unsigned count, loop;
  582. pgoff_t first = call->first, last = call->last;
  583. _enter("{%x:%u},{%lx-%lx}",
  584. vnode->fid.vid, vnode->fid.vnode, first, last);
  585. pagevec_init(&pv);
  586. do {
  587. _debug("done %lx-%lx", first, last);
  588. count = last - first + 1;
  589. if (count > PAGEVEC_SIZE)
  590. count = PAGEVEC_SIZE;
  591. pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
  592. first, count, pv.pages);
  593. ASSERTCMP(pv.nr, ==, count);
  594. for (loop = 0; loop < count; loop++) {
  595. priv = page_private(pv.pages[loop]);
  596. trace_afs_page_dirty(vnode, tracepoint_string("clear"),
  597. pv.pages[loop]->index, priv);
  598. set_page_private(pv.pages[loop], 0);
  599. end_page_writeback(pv.pages[loop]);
  600. }
  601. first += count;
  602. __pagevec_release(&pv);
  603. } while (first <= last);
  604. afs_prune_wb_keys(vnode);
  605. _leave("");
  606. }
  607. /*
  608. * write to an AFS file
  609. */
  610. ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
  611. {
  612. struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
  613. ssize_t result;
  614. size_t count = iov_iter_count(from);
  615. _enter("{%x.%u},{%zu},",
  616. vnode->fid.vid, vnode->fid.vnode, count);
  617. if (IS_SWAPFILE(&vnode->vfs_inode)) {
  618. printk(KERN_INFO
  619. "AFS: Attempt to write to active swap file!\n");
  620. return -EBUSY;
  621. }
  622. if (!count)
  623. return 0;
  624. result = generic_file_write_iter(iocb, from);
  625. _leave(" = %zd", result);
  626. return result;
  627. }
  628. /*
  629. * flush any dirty pages for this process, and check for write errors.
  630. * - the return status from this call provides a reliable indication of
  631. * whether any write errors occurred for this process.
  632. */
  633. int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  634. {
  635. struct inode *inode = file_inode(file);
  636. struct afs_vnode *vnode = AFS_FS_I(inode);
  637. _enter("{%x:%u},{n=%pD},%d",
  638. vnode->fid.vid, vnode->fid.vnode, file,
  639. datasync);
  640. return file_write_and_wait_range(file, start, end);
  641. }
  642. /*
  643. * notification that a previously read-only page is about to become writable
  644. * - if it returns an error, the caller will deliver a bus error signal
  645. */
  646. vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
  647. {
  648. struct file *file = vmf->vma->vm_file;
  649. struct inode *inode = file_inode(file);
  650. struct afs_vnode *vnode = AFS_FS_I(inode);
  651. unsigned long priv;
  652. _enter("{{%x:%u}},{%lx}",
  653. vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
  654. sb_start_pagefault(inode->i_sb);
  655. /* Wait for the page to be written to the cache before we allow it to
  656. * be modified. We then assume the entire page will need writing back.
  657. */
  658. #ifdef CONFIG_AFS_FSCACHE
  659. fscache_wait_on_page_write(vnode->cache, vmf->page);
  660. #endif
  661. if (PageWriteback(vmf->page) &&
  662. wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
  663. return VM_FAULT_RETRY;
  664. if (lock_page_killable(vmf->page) < 0)
  665. return VM_FAULT_RETRY;
  666. /* We mustn't change page->private until writeback is complete as that
  667. * details the portion of the page we need to write back and we might
  668. * need to redirty the page if there's a problem.
  669. */
  670. wait_on_page_writeback(vmf->page);
  671. priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
  672. priv |= 0; /* From */
  673. trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
  674. vmf->page->index, priv);
  675. SetPagePrivate(vmf->page);
  676. set_page_private(vmf->page, priv);
  677. sb_end_pagefault(inode->i_sb);
  678. return VM_FAULT_LOCKED;
  679. }
  680. /*
  681. * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
  682. */
  683. void afs_prune_wb_keys(struct afs_vnode *vnode)
  684. {
  685. LIST_HEAD(graveyard);
  686. struct afs_wb_key *wbk, *tmp;
  687. /* Discard unused keys */
  688. spin_lock(&vnode->wb_lock);
  689. if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
  690. !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
  691. list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
  692. if (refcount_read(&wbk->usage) == 1)
  693. list_move(&wbk->vnode_link, &graveyard);
  694. }
  695. }
  696. spin_unlock(&vnode->wb_lock);
  697. while (!list_empty(&graveyard)) {
  698. wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
  699. list_del(&wbk->vnode_link);
  700. afs_put_wb_key(wbk);
  701. }
  702. }
  703. /*
  704. * Clean up a page during invalidation.
  705. */
  706. int afs_launder_page(struct page *page)
  707. {
  708. struct address_space *mapping = page->mapping;
  709. struct afs_vnode *vnode = AFS_FS_I(mapping->host);
  710. unsigned long priv;
  711. unsigned int f, t;
  712. int ret = 0;
  713. _enter("{%lx}", page->index);
  714. priv = page_private(page);
  715. if (clear_page_dirty_for_io(page)) {
  716. f = 0;
  717. t = PAGE_SIZE;
  718. if (PagePrivate(page)) {
  719. f = priv & AFS_PRIV_MAX;
  720. t = priv >> AFS_PRIV_SHIFT;
  721. }
  722. trace_afs_page_dirty(vnode, tracepoint_string("launder"),
  723. page->index, priv);
  724. ret = afs_store_data(mapping, page->index, page->index, t, f);
  725. }
  726. trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
  727. page->index, priv);
  728. set_page_private(page, 0);
  729. ClearPagePrivate(page);
  730. #ifdef CONFIG_AFS_FSCACHE
  731. if (PageFsCache(page)) {
  732. fscache_wait_on_page_write(vnode->cache, page);
  733. fscache_uncache_page(vnode->cache, page);
  734. }
  735. #endif
  736. return ret;
  737. }