itree_common.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Generic part */
  3. typedef struct {
  4. block_t *p;
  5. block_t key;
  6. struct buffer_head *bh;
  7. } Indirect;
  8. static DEFINE_RWLOCK(pointers_lock);
  9. static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v)
  10. {
  11. p->key = *(p->p = v);
  12. p->bh = bh;
  13. }
  14. static inline int verify_chain(Indirect *from, Indirect *to)
  15. {
  16. while (from <= to && from->key == *from->p)
  17. from++;
  18. return (from > to);
  19. }
  20. static inline block_t *block_end(struct buffer_head *bh)
  21. {
  22. return (block_t *)((char*)bh->b_data + bh->b_size);
  23. }
  24. static inline Indirect *get_branch(struct inode *inode,
  25. int depth,
  26. int *offsets,
  27. Indirect chain[DEPTH],
  28. int *err)
  29. {
  30. struct super_block *sb = inode->i_sb;
  31. Indirect *p = chain;
  32. struct buffer_head *bh;
  33. *err = 0;
  34. /* i_data is not going away, no lock needed */
  35. add_chain (chain, NULL, i_data(inode) + *offsets);
  36. if (!p->key)
  37. goto no_block;
  38. while (--depth) {
  39. bh = sb_bread(sb, block_to_cpu(p->key));
  40. if (!bh)
  41. goto failure;
  42. read_lock(&pointers_lock);
  43. if (!verify_chain(chain, p))
  44. goto changed;
  45. add_chain(++p, bh, (block_t *)bh->b_data + *++offsets);
  46. read_unlock(&pointers_lock);
  47. if (!p->key)
  48. goto no_block;
  49. }
  50. return NULL;
  51. changed:
  52. read_unlock(&pointers_lock);
  53. brelse(bh);
  54. *err = -EAGAIN;
  55. goto no_block;
  56. failure:
  57. *err = -EIO;
  58. no_block:
  59. return p;
  60. }
  61. static int alloc_branch(struct inode *inode,
  62. int num,
  63. int *offsets,
  64. Indirect *branch)
  65. {
  66. int n = 0;
  67. int i;
  68. int parent = minix_new_block(inode);
  69. branch[0].key = cpu_to_block(parent);
  70. if (parent) for (n = 1; n < num; n++) {
  71. struct buffer_head *bh;
  72. /* Allocate the next block */
  73. int nr = minix_new_block(inode);
  74. if (!nr)
  75. break;
  76. branch[n].key = cpu_to_block(nr);
  77. bh = sb_getblk(inode->i_sb, parent);
  78. lock_buffer(bh);
  79. memset(bh->b_data, 0, bh->b_size);
  80. branch[n].bh = bh;
  81. branch[n].p = (block_t*) bh->b_data + offsets[n];
  82. *branch[n].p = branch[n].key;
  83. set_buffer_uptodate(bh);
  84. unlock_buffer(bh);
  85. mark_buffer_dirty_inode(bh, inode);
  86. parent = nr;
  87. }
  88. if (n == num)
  89. return 0;
  90. /* Allocation failed, free what we already allocated */
  91. for (i = 1; i < n; i++)
  92. bforget(branch[i].bh);
  93. for (i = 0; i < n; i++)
  94. minix_free_block(inode, block_to_cpu(branch[i].key));
  95. return -ENOSPC;
  96. }
  97. static inline int splice_branch(struct inode *inode,
  98. Indirect chain[DEPTH],
  99. Indirect *where,
  100. int num)
  101. {
  102. int i;
  103. write_lock(&pointers_lock);
  104. /* Verify that place we are splicing to is still there and vacant */
  105. if (!verify_chain(chain, where-1) || *where->p)
  106. goto changed;
  107. *where->p = where->key;
  108. write_unlock(&pointers_lock);
  109. /* We are done with atomic stuff, now do the rest of housekeeping */
  110. inode->i_ctime = current_time(inode);
  111. /* had we spliced it onto indirect block? */
  112. if (where->bh)
  113. mark_buffer_dirty_inode(where->bh, inode);
  114. mark_inode_dirty(inode);
  115. return 0;
  116. changed:
  117. write_unlock(&pointers_lock);
  118. for (i = 1; i < num; i++)
  119. bforget(where[i].bh);
  120. for (i = 0; i < num; i++)
  121. minix_free_block(inode, block_to_cpu(where[i].key));
  122. return -EAGAIN;
  123. }
  124. static int get_block(struct inode * inode, sector_t block,
  125. struct buffer_head *bh, int create)
  126. {
  127. int err = -EIO;
  128. int offsets[DEPTH];
  129. Indirect chain[DEPTH];
  130. Indirect *partial;
  131. int left;
  132. int depth = block_to_path(inode, block, offsets);
  133. if (depth == 0)
  134. goto out;
  135. reread:
  136. partial = get_branch(inode, depth, offsets, chain, &err);
  137. /* Simplest case - block found, no allocation needed */
  138. if (!partial) {
  139. got_it:
  140. map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key));
  141. /* Clean up and exit */
  142. partial = chain+depth-1; /* the whole chain */
  143. goto cleanup;
  144. }
  145. /* Next simple case - plain lookup or failed read of indirect block */
  146. if (!create || err == -EIO) {
  147. cleanup:
  148. while (partial > chain) {
  149. brelse(partial->bh);
  150. partial--;
  151. }
  152. out:
  153. return err;
  154. }
  155. /*
  156. * Indirect block might be removed by truncate while we were
  157. * reading it. Handling of that case (forget what we've got and
  158. * reread) is taken out of the main path.
  159. */
  160. if (err == -EAGAIN)
  161. goto changed;
  162. left = (chain + depth) - partial;
  163. err = alloc_branch(inode, left, offsets+(partial-chain), partial);
  164. if (err)
  165. goto cleanup;
  166. if (splice_branch(inode, chain, partial, left) < 0)
  167. goto changed;
  168. set_buffer_new(bh);
  169. goto got_it;
  170. changed:
  171. while (partial > chain) {
  172. brelse(partial->bh);
  173. partial--;
  174. }
  175. goto reread;
  176. }
  177. static inline int all_zeroes(block_t *p, block_t *q)
  178. {
  179. while (p < q)
  180. if (*p++)
  181. return 0;
  182. return 1;
  183. }
  184. static Indirect *find_shared(struct inode *inode,
  185. int depth,
  186. int offsets[DEPTH],
  187. Indirect chain[DEPTH],
  188. block_t *top)
  189. {
  190. Indirect *partial, *p;
  191. int k, err;
  192. *top = 0;
  193. for (k = depth; k > 1 && !offsets[k-1]; k--)
  194. ;
  195. partial = get_branch(inode, k, offsets, chain, &err);
  196. write_lock(&pointers_lock);
  197. if (!partial)
  198. partial = chain + k-1;
  199. if (!partial->key && *partial->p) {
  200. write_unlock(&pointers_lock);
  201. goto no_top;
  202. }
  203. for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--)
  204. ;
  205. if (p == chain + k - 1 && p > chain) {
  206. p->p--;
  207. } else {
  208. *top = *p->p;
  209. *p->p = 0;
  210. }
  211. write_unlock(&pointers_lock);
  212. while(partial > p)
  213. {
  214. brelse(partial->bh);
  215. partial--;
  216. }
  217. no_top:
  218. return partial;
  219. }
  220. static inline void free_data(struct inode *inode, block_t *p, block_t *q)
  221. {
  222. unsigned long nr;
  223. for ( ; p < q ; p++) {
  224. nr = block_to_cpu(*p);
  225. if (nr) {
  226. *p = 0;
  227. minix_free_block(inode, nr);
  228. }
  229. }
  230. }
  231. static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
  232. {
  233. struct buffer_head * bh;
  234. unsigned long nr;
  235. if (depth--) {
  236. for ( ; p < q ; p++) {
  237. nr = block_to_cpu(*p);
  238. if (!nr)
  239. continue;
  240. *p = 0;
  241. bh = sb_bread(inode->i_sb, nr);
  242. if (!bh)
  243. continue;
  244. free_branches(inode, (block_t*)bh->b_data,
  245. block_end(bh), depth);
  246. bforget(bh);
  247. minix_free_block(inode, nr);
  248. mark_inode_dirty(inode);
  249. }
  250. } else
  251. free_data(inode, p, q);
  252. }
  253. static inline void truncate (struct inode * inode)
  254. {
  255. struct super_block *sb = inode->i_sb;
  256. block_t *idata = i_data(inode);
  257. int offsets[DEPTH];
  258. Indirect chain[DEPTH];
  259. Indirect *partial;
  260. block_t nr = 0;
  261. int n;
  262. int first_whole;
  263. long iblock;
  264. iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits;
  265. block_truncate_page(inode->i_mapping, inode->i_size, get_block);
  266. n = block_to_path(inode, iblock, offsets);
  267. if (!n)
  268. return;
  269. if (n == 1) {
  270. free_data(inode, idata+offsets[0], idata + DIRECT);
  271. first_whole = 0;
  272. goto do_indirects;
  273. }
  274. first_whole = offsets[0] + 1 - DIRECT;
  275. partial = find_shared(inode, n, offsets, chain, &nr);
  276. if (nr) {
  277. if (partial == chain)
  278. mark_inode_dirty(inode);
  279. else
  280. mark_buffer_dirty_inode(partial->bh, inode);
  281. free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
  282. }
  283. /* Clear the ends of indirect blocks on the shared branch */
  284. while (partial > chain) {
  285. free_branches(inode, partial->p + 1, block_end(partial->bh),
  286. (chain+n-1) - partial);
  287. mark_buffer_dirty_inode(partial->bh, inode);
  288. brelse (partial->bh);
  289. partial--;
  290. }
  291. do_indirects:
  292. /* Kill the remaining (whole) subtrees */
  293. while (first_whole < DEPTH-1) {
  294. nr = idata[DIRECT+first_whole];
  295. if (nr) {
  296. idata[DIRECT+first_whole] = 0;
  297. mark_inode_dirty(inode);
  298. free_branches(inode, &nr, &nr+1, first_whole+1);
  299. }
  300. first_whole++;
  301. }
  302. inode->i_mtime = inode->i_ctime = current_time(inode);
  303. mark_inode_dirty(inode);
  304. }
  305. static inline unsigned nblocks(loff_t size, struct super_block *sb)
  306. {
  307. int k = sb->s_blocksize_bits - 10;
  308. unsigned blocks, res, direct = DIRECT, i = DEPTH;
  309. blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k);
  310. res = blocks;
  311. while (--i && blocks > direct) {
  312. blocks -= direct;
  313. blocks += sb->s_blocksize/sizeof(block_t) - 1;
  314. blocks /= sb->s_blocksize/sizeof(block_t);
  315. res += blocks;
  316. direct = 1;
  317. }
  318. return res;
  319. }