btree.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/hfs/btree.c
  4. *
  5. * Copyright (C) 2001
  6. * Brad Boyer (flar@allandria.com)
  7. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  8. *
  9. * Handle opening/closing btree
  10. */
  11. #include <linux/pagemap.h>
  12. #include <linux/slab.h>
  13. #include <linux/log2.h>
  14. #include "btree.h"
  15. /* Get a reference to a B*Tree and do some initial checks */
  16. struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp keycmp)
  17. {
  18. struct hfs_btree *tree;
  19. struct hfs_btree_header_rec *head;
  20. struct address_space *mapping;
  21. struct page *page;
  22. unsigned int size;
  23. tree = kzalloc(sizeof(*tree), GFP_KERNEL);
  24. if (!tree)
  25. return NULL;
  26. mutex_init(&tree->tree_lock);
  27. spin_lock_init(&tree->hash_lock);
  28. /* Set the correct compare function */
  29. tree->sb = sb;
  30. tree->cnid = id;
  31. tree->keycmp = keycmp;
  32. tree->inode = iget_locked(sb, id);
  33. if (!tree->inode)
  34. goto free_tree;
  35. BUG_ON(!(tree->inode->i_state & I_NEW));
  36. {
  37. struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
  38. HFS_I(tree->inode)->flags = 0;
  39. mutex_init(&HFS_I(tree->inode)->extents_lock);
  40. switch (id) {
  41. case HFS_EXT_CNID:
  42. hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
  43. mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
  44. if (HFS_I(tree->inode)->alloc_blocks >
  45. HFS_I(tree->inode)->first_blocks) {
  46. pr_err("invalid btree extent records\n");
  47. unlock_new_inode(tree->inode);
  48. goto free_inode;
  49. }
  50. tree->inode->i_mapping->a_ops = &hfs_btree_aops;
  51. break;
  52. case HFS_CAT_CNID:
  53. hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
  54. mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
  55. if (!HFS_I(tree->inode)->first_blocks) {
  56. pr_err("invalid btree extent records (0 size)\n");
  57. unlock_new_inode(tree->inode);
  58. goto free_inode;
  59. }
  60. tree->inode->i_mapping->a_ops = &hfs_btree_aops;
  61. break;
  62. default:
  63. BUG();
  64. }
  65. }
  66. unlock_new_inode(tree->inode);
  67. mapping = tree->inode->i_mapping;
  68. page = read_mapping_page(mapping, 0, NULL);
  69. if (IS_ERR(page))
  70. goto free_inode;
  71. /* Load the header */
  72. head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
  73. tree->root = be32_to_cpu(head->root);
  74. tree->leaf_count = be32_to_cpu(head->leaf_count);
  75. tree->leaf_head = be32_to_cpu(head->leaf_head);
  76. tree->leaf_tail = be32_to_cpu(head->leaf_tail);
  77. tree->node_count = be32_to_cpu(head->node_count);
  78. tree->free_nodes = be32_to_cpu(head->free_nodes);
  79. tree->attributes = be32_to_cpu(head->attributes);
  80. tree->node_size = be16_to_cpu(head->node_size);
  81. tree->max_key_len = be16_to_cpu(head->max_key_len);
  82. tree->depth = be16_to_cpu(head->depth);
  83. size = tree->node_size;
  84. if (!is_power_of_2(size))
  85. goto fail_page;
  86. if (!tree->node_count)
  87. goto fail_page;
  88. switch (id) {
  89. case HFS_EXT_CNID:
  90. if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
  91. pr_err("invalid extent max_key_len %d\n",
  92. tree->max_key_len);
  93. goto fail_page;
  94. }
  95. break;
  96. case HFS_CAT_CNID:
  97. if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
  98. pr_err("invalid catalog max_key_len %d\n",
  99. tree->max_key_len);
  100. goto fail_page;
  101. }
  102. break;
  103. default:
  104. BUG();
  105. }
  106. tree->node_size_shift = ffs(size) - 1;
  107. tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  108. kunmap(page);
  109. put_page(page);
  110. return tree;
  111. fail_page:
  112. put_page(page);
  113. free_inode:
  114. tree->inode->i_mapping->a_ops = &hfs_aops;
  115. iput(tree->inode);
  116. free_tree:
  117. kfree(tree);
  118. return NULL;
  119. }
  120. /* Release resources used by a btree */
  121. void hfs_btree_close(struct hfs_btree *tree)
  122. {
  123. struct hfs_bnode *node;
  124. int i;
  125. if (!tree)
  126. return;
  127. for (i = 0; i < NODE_HASH_SIZE; i++) {
  128. while ((node = tree->node_hash[i])) {
  129. tree->node_hash[i] = node->next_hash;
  130. if (atomic_read(&node->refcnt))
  131. pr_err("node %d:%d still has %d user(s)!\n",
  132. node->tree->cnid, node->this,
  133. atomic_read(&node->refcnt));
  134. hfs_bnode_free(node);
  135. tree->node_hash_cnt--;
  136. }
  137. }
  138. iput(tree->inode);
  139. kfree(tree);
  140. }
  141. void hfs_btree_write(struct hfs_btree *tree)
  142. {
  143. struct hfs_btree_header_rec *head;
  144. struct hfs_bnode *node;
  145. struct page *page;
  146. node = hfs_bnode_find(tree, 0);
  147. if (IS_ERR(node))
  148. /* panic? */
  149. return;
  150. /* Load the header */
  151. page = node->page[0];
  152. head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
  153. head->root = cpu_to_be32(tree->root);
  154. head->leaf_count = cpu_to_be32(tree->leaf_count);
  155. head->leaf_head = cpu_to_be32(tree->leaf_head);
  156. head->leaf_tail = cpu_to_be32(tree->leaf_tail);
  157. head->node_count = cpu_to_be32(tree->node_count);
  158. head->free_nodes = cpu_to_be32(tree->free_nodes);
  159. head->attributes = cpu_to_be32(tree->attributes);
  160. head->depth = cpu_to_be16(tree->depth);
  161. kunmap(page);
  162. set_page_dirty(page);
  163. hfs_bnode_put(node);
  164. }
  165. static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
  166. {
  167. struct hfs_btree *tree = prev->tree;
  168. struct hfs_bnode *node;
  169. struct hfs_bnode_desc desc;
  170. __be32 cnid;
  171. node = hfs_bnode_create(tree, idx);
  172. if (IS_ERR(node))
  173. return node;
  174. if (!tree->free_nodes)
  175. panic("FIXME!!!");
  176. tree->free_nodes--;
  177. prev->next = idx;
  178. cnid = cpu_to_be32(idx);
  179. hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
  180. node->type = HFS_NODE_MAP;
  181. node->num_recs = 1;
  182. hfs_bnode_clear(node, 0, tree->node_size);
  183. desc.next = 0;
  184. desc.prev = 0;
  185. desc.type = HFS_NODE_MAP;
  186. desc.height = 0;
  187. desc.num_recs = cpu_to_be16(1);
  188. desc.reserved = 0;
  189. hfs_bnode_write(node, &desc, 0, sizeof(desc));
  190. hfs_bnode_write_u16(node, 14, 0x8000);
  191. hfs_bnode_write_u16(node, tree->node_size - 2, 14);
  192. hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
  193. return node;
  194. }
  195. /* Make sure @tree has enough space for the @rsvd_nodes */
  196. int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
  197. {
  198. struct inode *inode = tree->inode;
  199. u32 count;
  200. int res;
  201. while (tree->free_nodes < rsvd_nodes) {
  202. res = hfs_extend_file(inode);
  203. if (res)
  204. return res;
  205. HFS_I(inode)->phys_size = inode->i_size =
  206. (loff_t)HFS_I(inode)->alloc_blocks *
  207. HFS_SB(tree->sb)->alloc_blksz;
  208. HFS_I(inode)->fs_blocks = inode->i_size >>
  209. tree->sb->s_blocksize_bits;
  210. inode_set_bytes(inode, inode->i_size);
  211. count = inode->i_size >> tree->node_size_shift;
  212. tree->free_nodes += count - tree->node_count;
  213. tree->node_count = count;
  214. }
  215. return 0;
  216. }
  217. struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
  218. {
  219. struct hfs_bnode *node, *next_node;
  220. struct page **pagep;
  221. u32 nidx, idx;
  222. unsigned off;
  223. u16 off16;
  224. u16 len;
  225. u8 *data, byte, m;
  226. int i, res;
  227. res = hfs_bmap_reserve(tree, 1);
  228. if (res)
  229. return ERR_PTR(res);
  230. nidx = 0;
  231. node = hfs_bnode_find(tree, nidx);
  232. if (IS_ERR(node))
  233. return node;
  234. len = hfs_brec_lenoff(node, 2, &off16);
  235. off = off16;
  236. off += node->page_offset;
  237. pagep = node->page + (off >> PAGE_SHIFT);
  238. data = kmap(*pagep);
  239. off &= ~PAGE_MASK;
  240. idx = 0;
  241. for (;;) {
  242. while (len) {
  243. byte = data[off];
  244. if (byte != 0xff) {
  245. for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
  246. if (!(byte & m)) {
  247. idx += i;
  248. data[off] |= m;
  249. set_page_dirty(*pagep);
  250. kunmap(*pagep);
  251. tree->free_nodes--;
  252. mark_inode_dirty(tree->inode);
  253. hfs_bnode_put(node);
  254. return hfs_bnode_create(tree, idx);
  255. }
  256. }
  257. }
  258. if (++off >= PAGE_SIZE) {
  259. kunmap(*pagep);
  260. data = kmap(*++pagep);
  261. off = 0;
  262. }
  263. idx += 8;
  264. len--;
  265. }
  266. kunmap(*pagep);
  267. nidx = node->next;
  268. if (!nidx) {
  269. printk(KERN_DEBUG "create new bmap node...\n");
  270. next_node = hfs_bmap_new_bmap(node, idx);
  271. } else
  272. next_node = hfs_bnode_find(tree, nidx);
  273. hfs_bnode_put(node);
  274. if (IS_ERR(next_node))
  275. return next_node;
  276. node = next_node;
  277. len = hfs_brec_lenoff(node, 0, &off16);
  278. off = off16;
  279. off += node->page_offset;
  280. pagep = node->page + (off >> PAGE_SHIFT);
  281. data = kmap(*pagep);
  282. off &= ~PAGE_MASK;
  283. }
  284. }
  285. void hfs_bmap_free(struct hfs_bnode *node)
  286. {
  287. struct hfs_btree *tree;
  288. struct page *page;
  289. u16 off, len;
  290. u32 nidx;
  291. u8 *data, byte, m;
  292. hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
  293. tree = node->tree;
  294. nidx = node->this;
  295. node = hfs_bnode_find(tree, 0);
  296. if (IS_ERR(node))
  297. return;
  298. len = hfs_brec_lenoff(node, 2, &off);
  299. while (nidx >= len * 8) {
  300. u32 i;
  301. nidx -= len * 8;
  302. i = node->next;
  303. if (!i) {
  304. /* panic */;
  305. pr_crit("unable to free bnode %u. bmap not found!\n",
  306. node->this);
  307. hfs_bnode_put(node);
  308. return;
  309. }
  310. hfs_bnode_put(node);
  311. node = hfs_bnode_find(tree, i);
  312. if (IS_ERR(node))
  313. return;
  314. if (node->type != HFS_NODE_MAP) {
  315. /* panic */;
  316. pr_crit("invalid bmap found! (%u,%d)\n",
  317. node->this, node->type);
  318. hfs_bnode_put(node);
  319. return;
  320. }
  321. len = hfs_brec_lenoff(node, 0, &off);
  322. }
  323. off += node->page_offset + nidx / 8;
  324. page = node->page[off >> PAGE_SHIFT];
  325. data = kmap(page);
  326. off &= ~PAGE_MASK;
  327. m = 1 << (~nidx & 7);
  328. byte = data[off];
  329. if (!(byte & m)) {
  330. pr_crit("trying to free free bnode %u(%d)\n",
  331. node->this, node->type);
  332. kunmap(page);
  333. hfs_bnode_put(node);
  334. return;
  335. }
  336. data[off] = byte & ~m;
  337. set_page_dirty(page);
  338. kunmap(page);
  339. hfs_bnode_put(node);
  340. tree->free_nodes++;
  341. mark_inode_dirty(tree->inode);
  342. }