transaction.h 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2007 Oracle. All rights reserved.
  4. */
  5. #ifndef BTRFS_TRANSACTION_H
  6. #define BTRFS_TRANSACTION_H
  7. #include <linux/refcount.h>
  8. #include "btrfs_inode.h"
  9. #include "delayed-ref.h"
  10. #include "ctree.h"
  11. enum btrfs_trans_state {
  12. TRANS_STATE_RUNNING = 0,
  13. TRANS_STATE_BLOCKED = 1,
  14. TRANS_STATE_COMMIT_START = 2,
  15. TRANS_STATE_COMMIT_DOING = 3,
  16. TRANS_STATE_UNBLOCKED = 4,
  17. TRANS_STATE_COMPLETED = 5,
  18. TRANS_STATE_MAX = 6,
  19. };
  20. #define BTRFS_TRANS_HAVE_FREE_BGS 0
  21. #define BTRFS_TRANS_DIRTY_BG_RUN 1
  22. #define BTRFS_TRANS_CACHE_ENOSPC 2
  23. struct btrfs_transaction {
  24. u64 transid;
  25. /*
  26. * total external writers(USERSPACE/START/ATTACH) in this
  27. * transaction, it must be zero before the transaction is
  28. * being committed
  29. */
  30. atomic_t num_extwriters;
  31. /*
  32. * total writers in this transaction, it must be zero before the
  33. * transaction can end
  34. */
  35. atomic_t num_writers;
  36. refcount_t use_count;
  37. atomic_t pending_ordered;
  38. unsigned long flags;
  39. /* Be protected by fs_info->trans_lock when we want to change it. */
  40. enum btrfs_trans_state state;
  41. int aborted;
  42. struct list_head list;
  43. struct extent_io_tree dirty_pages;
  44. time64_t start_time;
  45. wait_queue_head_t writer_wait;
  46. wait_queue_head_t commit_wait;
  47. wait_queue_head_t pending_wait;
  48. struct list_head pending_snapshots;
  49. struct list_head pending_chunks;
  50. struct list_head switch_commits;
  51. struct list_head dirty_bgs;
  52. /*
  53. * There is no explicit lock which protects io_bgs, rather its
  54. * consistency is implied by the fact that all the sites which modify
  55. * it do so under some form of transaction critical section, namely:
  56. *
  57. * - btrfs_start_dirty_block_groups - This function can only ever be
  58. * run by one of the transaction committers. Refer to
  59. * BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction
  60. *
  61. * - btrfs_write_dirty_blockgroups - this is called by
  62. * commit_cowonly_roots from transaction critical section
  63. * (TRANS_STATE_COMMIT_DOING)
  64. *
  65. * - btrfs_cleanup_dirty_bgs - called on transaction abort
  66. */
  67. struct list_head io_bgs;
  68. struct list_head dropped_roots;
  69. /*
  70. * we need to make sure block group deletion doesn't race with
  71. * free space cache writeout. This mutex keeps them from stomping
  72. * on each other
  73. */
  74. struct mutex cache_write_mutex;
  75. spinlock_t dirty_bgs_lock;
  76. unsigned int num_dirty_bgs;
  77. /* Protected by spin lock fs_info->unused_bgs_lock. */
  78. struct list_head deleted_bgs;
  79. spinlock_t dropped_roots_lock;
  80. struct btrfs_delayed_ref_root delayed_refs;
  81. struct btrfs_fs_info *fs_info;
  82. };
  83. #define __TRANS_FREEZABLE (1U << 0)
  84. #define __TRANS_START (1U << 9)
  85. #define __TRANS_ATTACH (1U << 10)
  86. #define __TRANS_JOIN (1U << 11)
  87. #define __TRANS_JOIN_NOLOCK (1U << 12)
  88. #define __TRANS_DUMMY (1U << 13)
  89. #define __TRANS_JOIN_NOSTART (1U << 14)
  90. #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
  91. #define TRANS_ATTACH (__TRANS_ATTACH)
  92. #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
  93. #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
  94. #define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
  95. #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
  96. #define BTRFS_SEND_TRANS_STUB ((void *)1)
  97. struct btrfs_trans_handle {
  98. u64 transid;
  99. u64 bytes_reserved;
  100. u64 chunk_bytes_reserved;
  101. unsigned long delayed_ref_updates;
  102. struct btrfs_transaction *transaction;
  103. struct btrfs_block_rsv *block_rsv;
  104. struct btrfs_block_rsv *orig_rsv;
  105. refcount_t use_count;
  106. unsigned int type;
  107. short aborted;
  108. bool adding_csums;
  109. bool allocating_chunk;
  110. bool can_flush_pending_bgs;
  111. bool reloc_reserved;
  112. bool sync;
  113. bool dirty;
  114. struct btrfs_root *root;
  115. struct btrfs_fs_info *fs_info;
  116. struct list_head new_bgs;
  117. };
  118. struct btrfs_pending_snapshot {
  119. struct dentry *dentry;
  120. struct inode *dir;
  121. struct btrfs_root *root;
  122. struct btrfs_root_item *root_item;
  123. struct btrfs_root *snap;
  124. struct btrfs_qgroup_inherit *inherit;
  125. struct btrfs_path *path;
  126. /* block reservation for the operation */
  127. struct btrfs_block_rsv block_rsv;
  128. /* extra metadata reservation for relocation */
  129. int error;
  130. bool readonly;
  131. struct list_head list;
  132. };
  133. static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
  134. struct inode *inode)
  135. {
  136. spin_lock(&BTRFS_I(inode)->lock);
  137. BTRFS_I(inode)->last_trans = trans->transaction->transid;
  138. BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
  139. BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
  140. spin_unlock(&BTRFS_I(inode)->lock);
  141. }
  142. /*
  143. * Make qgroup codes to skip given qgroupid, means the old/new_roots for
  144. * qgroup won't contain the qgroupid in it.
  145. */
  146. static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans,
  147. u64 qgroupid)
  148. {
  149. struct btrfs_delayed_ref_root *delayed_refs;
  150. delayed_refs = &trans->transaction->delayed_refs;
  151. WARN_ON(delayed_refs->qgroup_to_skip);
  152. delayed_refs->qgroup_to_skip = qgroupid;
  153. }
  154. static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
  155. {
  156. struct btrfs_delayed_ref_root *delayed_refs;
  157. delayed_refs = &trans->transaction->delayed_refs;
  158. WARN_ON(!delayed_refs->qgroup_to_skip);
  159. delayed_refs->qgroup_to_skip = 0;
  160. }
  161. int btrfs_end_transaction(struct btrfs_trans_handle *trans);
  162. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  163. unsigned int num_items);
  164. struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
  165. struct btrfs_root *root,
  166. unsigned int num_items,
  167. int min_factor);
  168. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
  169. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
  170. struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
  171. struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
  172. struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
  173. struct btrfs_root *root);
  174. int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
  175. void btrfs_add_dead_root(struct btrfs_root *root);
  176. int btrfs_defrag_root(struct btrfs_root *root);
  177. int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
  178. int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
  179. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  180. int wait_for_unblock);
  181. /*
  182. * Try to commit transaction asynchronously, so this is safe to call
  183. * even holding a spinlock.
  184. *
  185. * It's done by informing transaction_kthread to commit transaction without
  186. * waiting for commit interval.
  187. */
  188. static inline void btrfs_commit_transaction_locksafe(
  189. struct btrfs_fs_info *fs_info)
  190. {
  191. set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
  192. wake_up_process(fs_info->transaction_kthread);
  193. }
  194. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
  195. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
  196. void btrfs_throttle(struct btrfs_fs_info *fs_info);
  197. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  198. struct btrfs_root *root);
  199. int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
  200. struct extent_io_tree *dirty_pages, int mark);
  201. int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
  202. struct extent_io_tree *dirty_pages);
  203. int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
  204. int btrfs_transaction_blocked(struct btrfs_fs_info *info);
  205. int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
  206. void btrfs_put_transaction(struct btrfs_transaction *transaction);
  207. void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info);
  208. void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
  209. struct btrfs_root *root);
  210. #endif