btrfs_inode.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2007 Oracle. All rights reserved.
  4. */
  5. #ifndef BTRFS_INODE_H
  6. #define BTRFS_INODE_H
  7. #include <linux/hash.h>
  8. #include "extent_map.h"
  9. #include "extent_io.h"
  10. #include "ordered-data.h"
  11. #include "delayed-inode.h"
  12. /*
  13. * ordered_data_close is set by truncate when a file that used
  14. * to have good data has been truncated to zero. When it is set
  15. * the btrfs file release call will add this inode to the
  16. * ordered operations list so that we make sure to flush out any
  17. * new data the application may have written before commit.
  18. */
  19. enum {
  20. BTRFS_INODE_ORDERED_DATA_CLOSE,
  21. BTRFS_INODE_DUMMY,
  22. BTRFS_INODE_IN_DEFRAG,
  23. BTRFS_INODE_HAS_ASYNC_EXTENT,
  24. BTRFS_INODE_NEEDS_FULL_SYNC,
  25. BTRFS_INODE_COPY_EVERYTHING,
  26. BTRFS_INODE_IN_DELALLOC_LIST,
  27. BTRFS_INODE_READDIO_NEED_LOCK,
  28. BTRFS_INODE_HAS_PROPS,
  29. BTRFS_INODE_SNAPSHOT_FLUSH,
  30. };
  31. /* in memory btrfs inode */
  32. struct btrfs_inode {
  33. /* which subvolume this inode belongs to */
  34. struct btrfs_root *root;
  35. /* key used to find this inode on disk. This is used by the code
  36. * to read in roots of subvolumes
  37. */
  38. struct btrfs_key location;
  39. /*
  40. * Lock for counters and all fields used to determine if the inode is in
  41. * the log or not (last_trans, last_sub_trans, last_log_commit,
  42. * logged_trans).
  43. */
  44. spinlock_t lock;
  45. /* the extent_tree has caches of all the extent mappings to disk */
  46. struct extent_map_tree extent_tree;
  47. /* the io_tree does range state (DIRTY, LOCKED etc) */
  48. struct extent_io_tree io_tree;
  49. /* special utility tree used to record which mirrors have already been
  50. * tried when checksums fail for a given block
  51. */
  52. struct extent_io_tree io_failure_tree;
  53. /* held while logging the inode in tree-log.c */
  54. struct mutex log_mutex;
  55. /* held while doing delalloc reservations */
  56. struct mutex delalloc_mutex;
  57. /* used to order data wrt metadata */
  58. struct btrfs_ordered_inode_tree ordered_tree;
  59. /* list of all the delalloc inodes in the FS. There are times we need
  60. * to write all the delalloc pages to disk, and this list is used
  61. * to walk them all.
  62. */
  63. struct list_head delalloc_inodes;
  64. /* node for the red-black tree that links inodes in subvolume root */
  65. struct rb_node rb_node;
  66. unsigned long runtime_flags;
  67. /* Keep track of who's O_SYNC/fsyncing currently */
  68. atomic_t sync_writers;
  69. /* full 64 bit generation number, struct vfs_inode doesn't have a big
  70. * enough field for this.
  71. */
  72. u64 generation;
  73. /*
  74. * transid of the trans_handle that last modified this inode
  75. */
  76. u64 last_trans;
  77. /*
  78. * transid that last logged this inode
  79. */
  80. u64 logged_trans;
  81. /*
  82. * log transid when this inode was last modified
  83. */
  84. int last_sub_trans;
  85. /* a local copy of root's last_log_commit */
  86. int last_log_commit;
  87. /* total number of bytes pending delalloc, used by stat to calc the
  88. * real block usage of the file
  89. */
  90. u64 delalloc_bytes;
  91. /*
  92. * Total number of bytes pending delalloc that fall within a file
  93. * range that is either a hole or beyond EOF (and no prealloc extent
  94. * exists in the range). This is always <= delalloc_bytes.
  95. */
  96. u64 new_delalloc_bytes;
  97. /*
  98. * total number of bytes pending defrag, used by stat to check whether
  99. * it needs COW.
  100. */
  101. u64 defrag_bytes;
  102. /*
  103. * the size of the file stored in the metadata on disk. data=ordered
  104. * means the in-memory i_size might be larger than the size on disk
  105. * because not all the blocks are written yet.
  106. */
  107. u64 disk_i_size;
  108. /*
  109. * if this is a directory then index_cnt is the counter for the index
  110. * number for new files that are created
  111. */
  112. u64 index_cnt;
  113. /* Cache the directory index number to speed the dir/file remove */
  114. u64 dir_index;
  115. /* the fsync log has some corner cases that mean we have to check
  116. * directories to see if any unlinks have been done before
  117. * the directory was logged. See tree-log.c for all the
  118. * details
  119. */
  120. u64 last_unlink_trans;
  121. /*
  122. * Number of bytes outstanding that are going to need csums. This is
  123. * used in ENOSPC accounting.
  124. */
  125. u64 csum_bytes;
  126. /* flags field from the on disk inode */
  127. u32 flags;
  128. /*
  129. * Counters to keep track of the number of extent item's we may use due
  130. * to delalloc and such. outstanding_extents is the number of extent
  131. * items we think we'll end up using, and reserved_extents is the number
  132. * of extent items we've reserved metadata for.
  133. */
  134. unsigned outstanding_extents;
  135. struct btrfs_block_rsv block_rsv;
  136. /*
  137. * Cached values of inode properties
  138. */
  139. unsigned prop_compress; /* per-file compression algorithm */
  140. /*
  141. * Force compression on the file using the defrag ioctl, could be
  142. * different from prop_compress and takes precedence if set
  143. */
  144. unsigned defrag_compress;
  145. struct btrfs_delayed_node *delayed_node;
  146. /* File creation time. */
  147. struct timespec64 i_otime;
  148. /* Hook into fs_info->delayed_iputs */
  149. struct list_head delayed_iput;
  150. /*
  151. * To avoid races between lockless (i_mutex not held) direct IO writes
  152. * and concurrent fsync requests. Direct IO writes must acquire read
  153. * access on this semaphore for creating an extent map and its
  154. * corresponding ordered extent. The fast fsync path must acquire write
  155. * access on this semaphore before it collects ordered extents and
  156. * extent maps.
  157. */
  158. struct rw_semaphore dio_sem;
  159. struct inode vfs_inode;
  160. };
  161. static inline struct btrfs_inode *BTRFS_I(const struct inode *inode)
  162. {
  163. return container_of(inode, struct btrfs_inode, vfs_inode);
  164. }
  165. static inline unsigned long btrfs_inode_hash(u64 objectid,
  166. const struct btrfs_root *root)
  167. {
  168. u64 h = objectid ^ (root->root_key.objectid * GOLDEN_RATIO_PRIME);
  169. #if BITS_PER_LONG == 32
  170. h = (h >> 32) ^ (h & 0xffffffff);
  171. #endif
  172. return (unsigned long)h;
  173. }
  174. static inline void btrfs_insert_inode_hash(struct inode *inode)
  175. {
  176. unsigned long h = btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root);
  177. __insert_inode_hash(inode, h);
  178. }
  179. static inline u64 btrfs_ino(const struct btrfs_inode *inode)
  180. {
  181. u64 ino = inode->location.objectid;
  182. /*
  183. * !ino: btree_inode
  184. * type == BTRFS_ROOT_ITEM_KEY: subvol dir
  185. */
  186. if (!ino || inode->location.type == BTRFS_ROOT_ITEM_KEY)
  187. ino = inode->vfs_inode.i_ino;
  188. return ino;
  189. }
  190. static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size)
  191. {
  192. i_size_write(&inode->vfs_inode, size);
  193. inode->disk_i_size = size;
  194. }
  195. static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode)
  196. {
  197. struct btrfs_root *root = inode->root;
  198. if (root == root->fs_info->tree_root &&
  199. btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID)
  200. return true;
  201. if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID)
  202. return true;
  203. return false;
  204. }
  205. static inline bool is_data_inode(struct inode *inode)
  206. {
  207. return btrfs_ino(BTRFS_I(inode)) != BTRFS_BTREE_INODE_OBJECTID;
  208. }
  209. static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
  210. int mod)
  211. {
  212. lockdep_assert_held(&inode->lock);
  213. inode->outstanding_extents += mod;
  214. if (btrfs_is_free_space_inode(inode))
  215. return;
  216. trace_btrfs_inode_mod_outstanding_extents(inode->root, btrfs_ino(inode),
  217. mod);
  218. }
  219. /*
  220. * Called every time after doing a buffered, direct IO or memory mapped write.
  221. *
  222. * This is to ensure that if we write to a file that was previously fsynced in
  223. * the current transaction, then try to fsync it again in the same transaction,
  224. * we will know that there were changes in the file and that it needs to be
  225. * logged.
  226. */
  227. static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode)
  228. {
  229. spin_lock(&inode->lock);
  230. inode->last_sub_trans = inode->root->log_transid;
  231. spin_unlock(&inode->lock);
  232. }
  233. static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
  234. {
  235. int ret = 0;
  236. spin_lock(&inode->lock);
  237. if (inode->logged_trans == generation &&
  238. inode->last_sub_trans <= inode->last_log_commit &&
  239. inode->last_sub_trans <= inode->root->last_log_commit) {
  240. /*
  241. * After a ranged fsync we might have left some extent maps
  242. * (that fall outside the fsync's range). So return false
  243. * here if the list isn't empty, to make sure btrfs_log_inode()
  244. * will be called and process those extent maps.
  245. */
  246. smp_mb();
  247. if (list_empty(&inode->extent_tree.modified_extents))
  248. ret = 1;
  249. }
  250. spin_unlock(&inode->lock);
  251. return ret;
  252. }
  253. #define BTRFS_DIO_ORIG_BIO_SUBMITTED 0x1
  254. struct btrfs_dio_private {
  255. struct inode *inode;
  256. unsigned long flags;
  257. u64 logical_offset;
  258. u64 disk_bytenr;
  259. u64 bytes;
  260. void *private;
  261. /* number of bios pending for this dio */
  262. atomic_t pending_bios;
  263. /* IO errors */
  264. int errors;
  265. /* orig_bio is our btrfs_io_bio */
  266. struct bio *orig_bio;
  267. /* dio_bio came from fs/direct-io.c */
  268. struct bio *dio_bio;
  269. /*
  270. * The original bio may be split to several sub-bios, this is
  271. * done during endio of sub-bios
  272. */
  273. blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *,
  274. blk_status_t);
  275. };
  276. /*
  277. * Disable DIO read nolock optimization, so new dio readers will be forced
  278. * to grab i_mutex. It is used to avoid the endless truncate due to
  279. * nonlocked dio read.
  280. */
  281. static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode)
  282. {
  283. set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
  284. smp_mb();
  285. }
  286. static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode)
  287. {
  288. smp_mb__before_atomic();
  289. clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
  290. }
  291. /* Array of bytes with variable length, hexadecimal format 0x1234 */
  292. #define CSUM_FMT "0x%*phN"
  293. #define CSUM_FMT_VALUE(size, bytes) size, bytes
  294. static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode,
  295. u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
  296. {
  297. struct btrfs_root *root = inode->root;
  298. struct btrfs_super_block *sb = root->fs_info->super_copy;
  299. const u16 csum_size = btrfs_super_csum_size(sb);
  300. /* Output minus objectid, which is more meaningful */
  301. if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID)
  302. btrfs_warn_rl(root->fs_info,
  303. "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
  304. root->root_key.objectid, btrfs_ino(inode),
  305. logical_start,
  306. CSUM_FMT_VALUE(csum_size, csum),
  307. CSUM_FMT_VALUE(csum_size, csum_expected),
  308. mirror_num);
  309. else
  310. btrfs_warn_rl(root->fs_info,
  311. "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
  312. root->root_key.objectid, btrfs_ino(inode),
  313. logical_start,
  314. CSUM_FMT_VALUE(csum_size, csum),
  315. CSUM_FMT_VALUE(csum_size, csum_expected),
  316. mirror_num);
  317. }
  318. #endif