file.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /*
  2. * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
  3. */
  4. #include <linux/time.h>
  5. #include "reiserfs.h"
  6. #include "acl.h"
  7. #include "xattr.h"
  8. #include <linux/uaccess.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/swap.h>
  11. #include <linux/writeback.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/quotaops.h>
  15. /*
  16. * We pack the tails of files on file close, not at the time they are written.
  17. * This implies an unnecessary copy of the tail and an unnecessary indirect item
  18. * insertion/balancing, for files that are written in one write.
  19. * It avoids unnecessary tail packings (balances) for files that are written in
  20. * multiple writes and are small enough to have tails.
  21. *
  22. * file_release is called by the VFS layer when the file is closed. If
  23. * this is the last open file descriptor, and the file
  24. * small enough to have a tail, and the tail is currently in an
  25. * unformatted node, the tail is converted back into a direct item.
  26. *
  27. * We use reiserfs_truncate_file to pack the tail, since it already has
  28. * all the conditions coded.
  29. */
  30. static int reiserfs_file_release(struct inode *inode, struct file *filp)
  31. {
  32. struct reiserfs_transaction_handle th;
  33. int err;
  34. int jbegin_failure = 0;
  35. BUG_ON(!S_ISREG(inode->i_mode));
  36. if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1))
  37. return 0;
  38. mutex_lock(&REISERFS_I(inode)->tailpack);
  39. if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) {
  40. mutex_unlock(&REISERFS_I(inode)->tailpack);
  41. return 0;
  42. }
  43. /* fast out for when nothing needs to be done */
  44. if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
  45. !tail_has_to_be_packed(inode)) &&
  46. REISERFS_I(inode)->i_prealloc_count <= 0) {
  47. mutex_unlock(&REISERFS_I(inode)->tailpack);
  48. return 0;
  49. }
  50. reiserfs_write_lock(inode->i_sb);
  51. /*
  52. * freeing preallocation only involves relogging blocks that
  53. * are already in the current transaction. preallocation gets
  54. * freed at the end of each transaction, so it is impossible for
  55. * us to log any additional blocks (including quota blocks)
  56. */
  57. err = journal_begin(&th, inode->i_sb, 1);
  58. if (err) {
  59. /*
  60. * uh oh, we can't allow the inode to go away while there
  61. * is still preallocation blocks pending. Try to join the
  62. * aborted transaction
  63. */
  64. jbegin_failure = err;
  65. err = journal_join_abort(&th, inode->i_sb);
  66. if (err) {
  67. /*
  68. * hmpf, our choices here aren't good. We can pin
  69. * the inode which will disallow unmount from ever
  70. * happening, we can do nothing, which will corrupt
  71. * random memory on unmount, or we can forcibly
  72. * remove the file from the preallocation list, which
  73. * will leak blocks on disk. Lets pin the inode
  74. * and let the admin know what is going on.
  75. */
  76. igrab(inode);
  77. reiserfs_warning(inode->i_sb, "clm-9001",
  78. "pinning inode %lu because the "
  79. "preallocation can't be freed",
  80. inode->i_ino);
  81. goto out;
  82. }
  83. }
  84. reiserfs_update_inode_transaction(inode);
  85. #ifdef REISERFS_PREALLOCATE
  86. reiserfs_discard_prealloc(&th, inode);
  87. #endif
  88. err = journal_end(&th);
  89. /* copy back the error code from journal_begin */
  90. if (!err)
  91. err = jbegin_failure;
  92. if (!err &&
  93. (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
  94. tail_has_to_be_packed(inode)) {
  95. /*
  96. * if regular file is released by last holder and it has been
  97. * appended (we append by unformatted node only) or its direct
  98. * item(s) had to be converted, then it may have to be
  99. * indirect2direct converted
  100. */
  101. err = reiserfs_truncate_file(inode, 0);
  102. }
  103. out:
  104. reiserfs_write_unlock(inode->i_sb);
  105. mutex_unlock(&REISERFS_I(inode)->tailpack);
  106. return err;
  107. }
  108. static int reiserfs_file_open(struct inode *inode, struct file *file)
  109. {
  110. int err = dquot_file_open(inode, file);
  111. /* somebody might be tailpacking on final close; wait for it */
  112. if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) {
  113. mutex_lock(&REISERFS_I(inode)->tailpack);
  114. atomic_inc(&REISERFS_I(inode)->openers);
  115. mutex_unlock(&REISERFS_I(inode)->tailpack);
  116. }
  117. return err;
  118. }
  119. void reiserfs_vfs_truncate_file(struct inode *inode)
  120. {
  121. mutex_lock(&REISERFS_I(inode)->tailpack);
  122. reiserfs_truncate_file(inode, 1);
  123. mutex_unlock(&REISERFS_I(inode)->tailpack);
  124. }
  125. /* Sync a reiserfs file. */
  126. /*
  127. * FIXME: sync_mapping_buffers() never has anything to sync. Can
  128. * be removed...
  129. */
  130. static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end,
  131. int datasync)
  132. {
  133. struct inode *inode = filp->f_mapping->host;
  134. int err;
  135. int barrier_done;
  136. err = file_write_and_wait_range(filp, start, end);
  137. if (err)
  138. return err;
  139. inode_lock(inode);
  140. BUG_ON(!S_ISREG(inode->i_mode));
  141. err = sync_mapping_buffers(inode->i_mapping);
  142. reiserfs_write_lock(inode->i_sb);
  143. barrier_done = reiserfs_commit_for_inode(inode);
  144. reiserfs_write_unlock(inode->i_sb);
  145. if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
  146. blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
  147. inode_unlock(inode);
  148. if (barrier_done < 0)
  149. return barrier_done;
  150. return (err < 0) ? -EIO : 0;
  151. }
  152. /* taken fs/buffer.c:__block_commit_write */
  153. int reiserfs_commit_page(struct inode *inode, struct page *page,
  154. unsigned from, unsigned to)
  155. {
  156. unsigned block_start, block_end;
  157. int partial = 0;
  158. unsigned blocksize;
  159. struct buffer_head *bh, *head;
  160. unsigned long i_size_index = inode->i_size >> PAGE_SHIFT;
  161. int new;
  162. int logit = reiserfs_file_data_log(inode);
  163. struct super_block *s = inode->i_sb;
  164. int bh_per_page = PAGE_SIZE / s->s_blocksize;
  165. struct reiserfs_transaction_handle th;
  166. int ret = 0;
  167. th.t_trans_id = 0;
  168. blocksize = i_blocksize(inode);
  169. if (logit) {
  170. reiserfs_write_lock(s);
  171. ret = journal_begin(&th, s, bh_per_page + 1);
  172. if (ret)
  173. goto drop_write_lock;
  174. reiserfs_update_inode_transaction(inode);
  175. }
  176. for (bh = head = page_buffers(page), block_start = 0;
  177. bh != head || !block_start;
  178. block_start = block_end, bh = bh->b_this_page) {
  179. new = buffer_new(bh);
  180. clear_buffer_new(bh);
  181. block_end = block_start + blocksize;
  182. if (block_end <= from || block_start >= to) {
  183. if (!buffer_uptodate(bh))
  184. partial = 1;
  185. } else {
  186. set_buffer_uptodate(bh);
  187. if (logit) {
  188. reiserfs_prepare_for_journal(s, bh, 1);
  189. journal_mark_dirty(&th, bh);
  190. } else if (!buffer_dirty(bh)) {
  191. mark_buffer_dirty(bh);
  192. /*
  193. * do data=ordered on any page past the end
  194. * of file and any buffer marked BH_New.
  195. */
  196. if (reiserfs_data_ordered(inode->i_sb) &&
  197. (new || page->index >= i_size_index)) {
  198. reiserfs_add_ordered_list(inode, bh);
  199. }
  200. }
  201. }
  202. }
  203. if (logit) {
  204. ret = journal_end(&th);
  205. drop_write_lock:
  206. reiserfs_write_unlock(s);
  207. }
  208. /*
  209. * If this is a partial write which happened to make all buffers
  210. * uptodate then we can optimize away a bogus readpage() for
  211. * the next read(). Here we 'discover' whether the page went
  212. * uptodate as a result of this (potentially partial) write.
  213. */
  214. if (!partial)
  215. SetPageUptodate(page);
  216. return ret;
  217. }
  218. const struct file_operations reiserfs_file_operations = {
  219. .unlocked_ioctl = reiserfs_ioctl,
  220. #ifdef CONFIG_COMPAT
  221. .compat_ioctl = reiserfs_compat_ioctl,
  222. #endif
  223. .mmap = generic_file_mmap,
  224. .open = reiserfs_file_open,
  225. .release = reiserfs_file_release,
  226. .fsync = reiserfs_sync_file,
  227. .read_iter = generic_file_read_iter,
  228. .write_iter = generic_file_write_iter,
  229. .splice_read = generic_file_splice_read,
  230. .splice_write = iter_file_splice_write,
  231. .llseek = generic_file_llseek,
  232. };
  233. const struct inode_operations reiserfs_file_inode_operations = {
  234. .setattr = reiserfs_setattr,
  235. .listxattr = reiserfs_listxattr,
  236. .permission = reiserfs_permission,
  237. .get_acl = reiserfs_get_acl,
  238. .set_acl = reiserfs_set_acl,
  239. };