bitmap.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2018 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_btree.h"
  13. #include "scrub/xfs_scrub.h"
  14. #include "scrub/scrub.h"
  15. #include "scrub/common.h"
  16. #include "scrub/trace.h"
  17. #include "scrub/repair.h"
  18. #include "scrub/bitmap.h"
  19. /*
  20. * Set a range of this bitmap. Caller must ensure the range is not set.
  21. *
  22. * This is the logical equivalent of bitmap |= mask(start, len).
  23. */
  24. int
  25. xfs_bitmap_set(
  26. struct xfs_bitmap *bitmap,
  27. uint64_t start,
  28. uint64_t len)
  29. {
  30. struct xfs_bitmap_range *bmr;
  31. bmr = kmem_alloc(sizeof(struct xfs_bitmap_range), KM_MAYFAIL);
  32. if (!bmr)
  33. return -ENOMEM;
  34. INIT_LIST_HEAD(&bmr->list);
  35. bmr->start = start;
  36. bmr->len = len;
  37. list_add_tail(&bmr->list, &bitmap->list);
  38. return 0;
  39. }
  40. /* Free everything related to this bitmap. */
  41. void
  42. xfs_bitmap_destroy(
  43. struct xfs_bitmap *bitmap)
  44. {
  45. struct xfs_bitmap_range *bmr;
  46. struct xfs_bitmap_range *n;
  47. for_each_xfs_bitmap_extent(bmr, n, bitmap) {
  48. list_del(&bmr->list);
  49. kmem_free(bmr);
  50. }
  51. }
  52. /* Set up a per-AG block bitmap. */
  53. void
  54. xfs_bitmap_init(
  55. struct xfs_bitmap *bitmap)
  56. {
  57. INIT_LIST_HEAD(&bitmap->list);
  58. }
  59. /* Compare two btree extents. */
  60. static int
  61. xfs_bitmap_range_cmp(
  62. void *priv,
  63. struct list_head *a,
  64. struct list_head *b)
  65. {
  66. struct xfs_bitmap_range *ap;
  67. struct xfs_bitmap_range *bp;
  68. ap = container_of(a, struct xfs_bitmap_range, list);
  69. bp = container_of(b, struct xfs_bitmap_range, list);
  70. if (ap->start > bp->start)
  71. return 1;
  72. if (ap->start < bp->start)
  73. return -1;
  74. return 0;
  75. }
  76. /*
  77. * Remove all the blocks mentioned in @sub from the extents in @bitmap.
  78. *
  79. * The intent is that callers will iterate the rmapbt for all of its records
  80. * for a given owner to generate @bitmap; and iterate all the blocks of the
  81. * metadata structures that are not being rebuilt and have the same rmapbt
  82. * owner to generate @sub. This routine subtracts all the extents
  83. * mentioned in sub from all the extents linked in @bitmap, which leaves
  84. * @bitmap as the list of blocks that are not accounted for, which we assume
  85. * are the dead blocks of the old metadata structure. The blocks mentioned in
  86. * @bitmap can be reaped.
  87. *
  88. * This is the logical equivalent of bitmap &= ~sub.
  89. */
  90. #define LEFT_ALIGNED (1 << 0)
  91. #define RIGHT_ALIGNED (1 << 1)
  92. int
  93. xfs_bitmap_disunion(
  94. struct xfs_bitmap *bitmap,
  95. struct xfs_bitmap *sub)
  96. {
  97. struct list_head *lp;
  98. struct xfs_bitmap_range *br;
  99. struct xfs_bitmap_range *new_br;
  100. struct xfs_bitmap_range *sub_br;
  101. uint64_t sub_start;
  102. uint64_t sub_len;
  103. int state;
  104. int error = 0;
  105. if (list_empty(&bitmap->list) || list_empty(&sub->list))
  106. return 0;
  107. ASSERT(!list_empty(&sub->list));
  108. list_sort(NULL, &bitmap->list, xfs_bitmap_range_cmp);
  109. list_sort(NULL, &sub->list, xfs_bitmap_range_cmp);
  110. /*
  111. * Now that we've sorted both lists, we iterate bitmap once, rolling
  112. * forward through sub and/or bitmap as necessary until we find an
  113. * overlap or reach the end of either list. We do not reset lp to the
  114. * head of bitmap nor do we reset sub_br to the head of sub. The
  115. * list traversal is similar to merge sort, but we're deleting
  116. * instead. In this manner we avoid O(n^2) operations.
  117. */
  118. sub_br = list_first_entry(&sub->list, struct xfs_bitmap_range,
  119. list);
  120. lp = bitmap->list.next;
  121. while (lp != &bitmap->list) {
  122. br = list_entry(lp, struct xfs_bitmap_range, list);
  123. /*
  124. * Advance sub_br and/or br until we find a pair that
  125. * intersect or we run out of extents.
  126. */
  127. while (sub_br->start + sub_br->len <= br->start) {
  128. if (list_is_last(&sub_br->list, &sub->list))
  129. goto out;
  130. sub_br = list_next_entry(sub_br, list);
  131. }
  132. if (sub_br->start >= br->start + br->len) {
  133. lp = lp->next;
  134. continue;
  135. }
  136. /* trim sub_br to fit the extent we have */
  137. sub_start = sub_br->start;
  138. sub_len = sub_br->len;
  139. if (sub_br->start < br->start) {
  140. sub_len -= br->start - sub_br->start;
  141. sub_start = br->start;
  142. }
  143. if (sub_len > br->len)
  144. sub_len = br->len;
  145. state = 0;
  146. if (sub_start == br->start)
  147. state |= LEFT_ALIGNED;
  148. if (sub_start + sub_len == br->start + br->len)
  149. state |= RIGHT_ALIGNED;
  150. switch (state) {
  151. case LEFT_ALIGNED:
  152. /* Coincides with only the left. */
  153. br->start += sub_len;
  154. br->len -= sub_len;
  155. break;
  156. case RIGHT_ALIGNED:
  157. /* Coincides with only the right. */
  158. br->len -= sub_len;
  159. lp = lp->next;
  160. break;
  161. case LEFT_ALIGNED | RIGHT_ALIGNED:
  162. /* Total overlap, just delete ex. */
  163. lp = lp->next;
  164. list_del(&br->list);
  165. kmem_free(br);
  166. break;
  167. case 0:
  168. /*
  169. * Deleting from the middle: add the new right extent
  170. * and then shrink the left extent.
  171. */
  172. new_br = kmem_alloc(sizeof(struct xfs_bitmap_range),
  173. KM_MAYFAIL);
  174. if (!new_br) {
  175. error = -ENOMEM;
  176. goto out;
  177. }
  178. INIT_LIST_HEAD(&new_br->list);
  179. new_br->start = sub_start + sub_len;
  180. new_br->len = br->start + br->len - new_br->start;
  181. list_add(&new_br->list, &br->list);
  182. br->len = sub_start - br->start;
  183. lp = lp->next;
  184. break;
  185. default:
  186. ASSERT(0);
  187. break;
  188. }
  189. }
  190. out:
  191. return error;
  192. }
  193. #undef LEFT_ALIGNED
  194. #undef RIGHT_ALIGNED
  195. /*
  196. * Record all btree blocks seen while iterating all records of a btree.
  197. *
  198. * We know that the btree query_all function starts at the left edge and walks
  199. * towards the right edge of the tree. Therefore, we know that we can walk up
  200. * the btree cursor towards the root; if the pointer for a given level points
  201. * to the first record/key in that block, we haven't seen this block before;
  202. * and therefore we need to remember that we saw this block in the btree.
  203. *
  204. * So if our btree is:
  205. *
  206. * 4
  207. * / | \
  208. * 1 2 3
  209. *
  210. * Pretend for this example that each leaf block has 100 btree records. For
  211. * the first btree record, we'll observe that bc_ptrs[0] == 1, so we record
  212. * that we saw block 1. Then we observe that bc_ptrs[1] == 1, so we record
  213. * block 4. The list is [1, 4].
  214. *
  215. * For the second btree record, we see that bc_ptrs[0] == 2, so we exit the
  216. * loop. The list remains [1, 4].
  217. *
  218. * For the 101st btree record, we've moved onto leaf block 2. Now
  219. * bc_ptrs[0] == 1 again, so we record that we saw block 2. We see that
  220. * bc_ptrs[1] == 2, so we exit the loop. The list is now [1, 4, 2].
  221. *
  222. * For the 102nd record, bc_ptrs[0] == 2, so we continue.
  223. *
  224. * For the 201st record, we've moved on to leaf block 3. bc_ptrs[0] == 1, so
  225. * we add 3 to the list. Now it is [1, 4, 2, 3].
  226. *
  227. * For the 300th record we just exit, with the list being [1, 4, 2, 3].
  228. */
  229. /*
  230. * Record all the buffers pointed to by the btree cursor. Callers already
  231. * engaged in a btree walk should call this function to capture the list of
  232. * blocks going from the leaf towards the root.
  233. */
  234. int
  235. xfs_bitmap_set_btcur_path(
  236. struct xfs_bitmap *bitmap,
  237. struct xfs_btree_cur *cur)
  238. {
  239. struct xfs_buf *bp;
  240. xfs_fsblock_t fsb;
  241. int i;
  242. int error;
  243. for (i = 0; i < cur->bc_nlevels && cur->bc_ptrs[i] == 1; i++) {
  244. xfs_btree_get_block(cur, i, &bp);
  245. if (!bp)
  246. continue;
  247. fsb = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
  248. error = xfs_bitmap_set(bitmap, fsb, 1);
  249. if (error)
  250. return error;
  251. }
  252. return 0;
  253. }
  254. /* Collect a btree's block in the bitmap. */
  255. STATIC int
  256. xfs_bitmap_collect_btblock(
  257. struct xfs_btree_cur *cur,
  258. int level,
  259. void *priv)
  260. {
  261. struct xfs_bitmap *bitmap = priv;
  262. struct xfs_buf *bp;
  263. xfs_fsblock_t fsbno;
  264. xfs_btree_get_block(cur, level, &bp);
  265. if (!bp)
  266. return 0;
  267. fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, bp->b_bn);
  268. return xfs_bitmap_set(bitmap, fsbno, 1);
  269. }
  270. /* Walk the btree and mark the bitmap wherever a btree block is found. */
  271. int
  272. xfs_bitmap_set_btblocks(
  273. struct xfs_bitmap *bitmap,
  274. struct xfs_btree_cur *cur)
  275. {
  276. return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock, bitmap);
  277. }