anode.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/hpfs/anode.c
  4. *
  5. * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
  6. *
  7. * handling HPFS anode tree that contains file allocation info
  8. */
  9. #include "hpfs_fn.h"
  10. /* Find a sector in allocation tree */
  11. secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
  12. struct bplus_header *btree, unsigned sec,
  13. struct buffer_head *bh)
  14. {
  15. anode_secno a = -1;
  16. struct anode *anode;
  17. int i;
  18. int c1, c2 = 0;
  19. go_down:
  20. if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
  21. if (bp_internal(btree)) {
  22. for (i = 0; i < btree->n_used_nodes; i++)
  23. if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
  24. a = le32_to_cpu(btree->u.internal[i].down);
  25. brelse(bh);
  26. if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
  27. btree = &anode->btree;
  28. goto go_down;
  29. }
  30. hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a);
  31. brelse(bh);
  32. return -1;
  33. }
  34. for (i = 0; i < btree->n_used_nodes; i++)
  35. if (le32_to_cpu(btree->u.external[i].file_secno) <= sec &&
  36. le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) {
  37. a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno);
  38. if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
  39. brelse(bh);
  40. return -1;
  41. }
  42. if (inode) {
  43. struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
  44. hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno);
  45. hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno);
  46. hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length);
  47. }
  48. brelse(bh);
  49. return a;
  50. }
  51. hpfs_error(s, "sector %08x not found in external anode %08x", sec, a);
  52. brelse(bh);
  53. return -1;
  54. }
  55. /* Add a sector to tree */
  56. secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno)
  57. {
  58. struct bplus_header *btree;
  59. struct anode *anode = NULL, *ranode = NULL;
  60. struct fnode *fnode;
  61. anode_secno a, na = -1, ra, up = -1;
  62. secno se;
  63. struct buffer_head *bh, *bh1, *bh2;
  64. int n;
  65. unsigned fs;
  66. int c1, c2 = 0;
  67. if (fnod) {
  68. if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
  69. btree = &fnode->btree;
  70. } else {
  71. if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
  72. btree = &anode->btree;
  73. }
  74. a = node;
  75. go_down:
  76. if ((n = btree->n_used_nodes - 1) < -!!fnod) {
  77. hpfs_error(s, "anode %08x has no entries", a);
  78. brelse(bh);
  79. return -1;
  80. }
  81. if (bp_internal(btree)) {
  82. a = le32_to_cpu(btree->u.internal[n].down);
  83. btree->u.internal[n].file_secno = cpu_to_le32(-1);
  84. mark_buffer_dirty(bh);
  85. brelse(bh);
  86. if (hpfs_sb(s)->sb_chk)
  87. if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1;
  88. if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
  89. btree = &anode->btree;
  90. goto go_down;
  91. }
  92. if (n >= 0) {
  93. if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) {
  94. hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
  95. le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno,
  96. fnod?'f':'a', node);
  97. brelse(bh);
  98. return -1;
  99. }
  100. if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
  101. le32_add_cpu(&btree->u.external[n].length, 1);
  102. mark_buffer_dirty(bh);
  103. brelse(bh);
  104. return se;
  105. }
  106. } else {
  107. if (fsecno) {
  108. hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno);
  109. brelse(bh);
  110. return -1;
  111. }
  112. se = !fnod ? node : (node + 16384) & ~16383;
  113. }
  114. if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) {
  115. brelse(bh);
  116. return -1;
  117. }
  118. fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length);
  119. if (!btree->n_free_nodes) {
  120. up = a != node ? le32_to_cpu(anode->up) : -1;
  121. if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
  122. brelse(bh);
  123. hpfs_free_sectors(s, se, 1);
  124. return -1;
  125. }
  126. if (a == node && fnod) {
  127. anode->up = cpu_to_le32(node);
  128. anode->btree.flags |= BP_fnode_parent;
  129. anode->btree.n_used_nodes = btree->n_used_nodes;
  130. anode->btree.first_free = btree->first_free;
  131. anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
  132. memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
  133. btree->flags |= BP_internal;
  134. btree->n_free_nodes = 11;
  135. btree->n_used_nodes = 1;
  136. btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
  137. btree->u.internal[0].file_secno = cpu_to_le32(-1);
  138. btree->u.internal[0].down = cpu_to_le32(na);
  139. mark_buffer_dirty(bh);
  140. } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
  141. brelse(bh);
  142. brelse(bh1);
  143. hpfs_free_sectors(s, se, 1);
  144. hpfs_free_sectors(s, na, 1);
  145. return -1;
  146. }
  147. brelse(bh);
  148. bh = bh1;
  149. btree = &anode->btree;
  150. }
  151. btree->n_free_nodes--; n = btree->n_used_nodes++;
  152. le16_add_cpu(&btree->first_free, 12);
  153. btree->u.external[n].disk_secno = cpu_to_le32(se);
  154. btree->u.external[n].file_secno = cpu_to_le32(fs);
  155. btree->u.external[n].length = cpu_to_le32(1);
  156. mark_buffer_dirty(bh);
  157. brelse(bh);
  158. if ((a == node && fnod) || na == -1) return se;
  159. c2 = 0;
  160. while (up != (anode_secno)-1) {
  161. struct anode *new_anode;
  162. if (hpfs_sb(s)->sb_chk)
  163. if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
  164. if (up != node || !fnod) {
  165. if (!(anode = hpfs_map_anode(s, up, &bh))) return -1;
  166. btree = &anode->btree;
  167. } else {
  168. if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1;
  169. btree = &fnode->btree;
  170. }
  171. if (btree->n_free_nodes) {
  172. btree->n_free_nodes--; n = btree->n_used_nodes++;
  173. le16_add_cpu(&btree->first_free, 8);
  174. btree->u.internal[n].file_secno = cpu_to_le32(-1);
  175. btree->u.internal[n].down = cpu_to_le32(na);
  176. btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
  177. mark_buffer_dirty(bh);
  178. brelse(bh);
  179. brelse(bh2);
  180. hpfs_free_sectors(s, ra, 1);
  181. if ((anode = hpfs_map_anode(s, na, &bh))) {
  182. anode->up = cpu_to_le32(up);
  183. if (up == node && fnod)
  184. anode->btree.flags |= BP_fnode_parent;
  185. else
  186. anode->btree.flags &= ~BP_fnode_parent;
  187. mark_buffer_dirty(bh);
  188. brelse(bh);
  189. }
  190. return se;
  191. }
  192. up = up != node ? le32_to_cpu(anode->up) : -1;
  193. btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1);
  194. mark_buffer_dirty(bh);
  195. brelse(bh);
  196. a = na;
  197. if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
  198. anode = new_anode;
  199. /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
  200. anode->btree.flags |= BP_internal;
  201. anode->btree.n_used_nodes = 1;
  202. anode->btree.n_free_nodes = 59;
  203. anode->btree.first_free = cpu_to_le16(16);
  204. anode->btree.u.internal[0].down = cpu_to_le32(a);
  205. anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
  206. mark_buffer_dirty(bh);
  207. brelse(bh);
  208. if ((anode = hpfs_map_anode(s, a, &bh))) {
  209. anode->up = cpu_to_le32(na);
  210. mark_buffer_dirty(bh);
  211. brelse(bh);
  212. }
  213. } else na = a;
  214. }
  215. if ((anode = hpfs_map_anode(s, na, &bh))) {
  216. anode->up = cpu_to_le32(node);
  217. if (fnod)
  218. anode->btree.flags |= BP_fnode_parent;
  219. mark_buffer_dirty(bh);
  220. brelse(bh);
  221. }
  222. if (!fnod) {
  223. if (!(anode = hpfs_map_anode(s, node, &bh))) {
  224. brelse(bh2);
  225. return -1;
  226. }
  227. btree = &anode->btree;
  228. } else {
  229. if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
  230. brelse(bh2);
  231. return -1;
  232. }
  233. btree = &fnode->btree;
  234. }
  235. ranode->up = cpu_to_le32(node);
  236. memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
  237. if (fnod)
  238. ranode->btree.flags |= BP_fnode_parent;
  239. ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
  240. if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
  241. struct anode *unode;
  242. if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
  243. unode->up = cpu_to_le32(ra);
  244. unode->btree.flags &= ~BP_fnode_parent;
  245. mark_buffer_dirty(bh1);
  246. brelse(bh1);
  247. }
  248. }
  249. btree->flags |= BP_internal;
  250. btree->n_free_nodes = fnod ? 10 : 58;
  251. btree->n_used_nodes = 2;
  252. btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
  253. btree->u.internal[0].file_secno = cpu_to_le32(fs);
  254. btree->u.internal[0].down = cpu_to_le32(ra);
  255. btree->u.internal[1].file_secno = cpu_to_le32(-1);
  256. btree->u.internal[1].down = cpu_to_le32(na);
  257. mark_buffer_dirty(bh);
  258. brelse(bh);
  259. mark_buffer_dirty(bh2);
  260. brelse(bh2);
  261. return se;
  262. }
  263. /*
  264. * Remove allocation tree. Recursion would look much nicer but
  265. * I want to avoid it because it can cause stack overflow.
  266. */
  267. void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
  268. {
  269. struct bplus_header *btree1 = btree;
  270. struct anode *anode = NULL;
  271. anode_secno ano = 0, oano;
  272. struct buffer_head *bh;
  273. int level = 0;
  274. int pos = 0;
  275. int i;
  276. int c1, c2 = 0;
  277. int d1, d2;
  278. go_down:
  279. d2 = 0;
  280. while (bp_internal(btree1)) {
  281. ano = le32_to_cpu(btree1->u.internal[pos].down);
  282. if (level) brelse(bh);
  283. if (hpfs_sb(s)->sb_chk)
  284. if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
  285. return;
  286. if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
  287. btree1 = &anode->btree;
  288. level++;
  289. pos = 0;
  290. }
  291. for (i = 0; i < btree1->n_used_nodes; i++)
  292. hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length));
  293. go_up:
  294. if (!level) return;
  295. brelse(bh);
  296. if (hpfs_sb(s)->sb_chk)
  297. if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
  298. hpfs_free_sectors(s, ano, 1);
  299. oano = ano;
  300. ano = le32_to_cpu(anode->up);
  301. if (--level) {
  302. if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
  303. btree1 = &anode->btree;
  304. } else btree1 = btree;
  305. for (i = 0; i < btree1->n_used_nodes; i++) {
  306. if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
  307. if ((pos = i + 1) < btree1->n_used_nodes)
  308. goto go_down;
  309. else
  310. goto go_up;
  311. }
  312. }
  313. hpfs_error(s,
  314. "reference to anode %08x not found in anode %08x "
  315. "(probably bad up pointer)",
  316. oano, level ? ano : -1);
  317. if (level)
  318. brelse(bh);
  319. }
  320. /* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */
  321. static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec)
  322. {
  323. struct anode *anode;
  324. struct buffer_head *bh;
  325. if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
  326. return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh);
  327. }
  328. int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
  329. unsigned len, char *buf)
  330. {
  331. struct buffer_head *bh;
  332. char *data;
  333. secno sec;
  334. unsigned l;
  335. while (len) {
  336. if (ano) {
  337. if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
  338. return -1;
  339. } else sec = a + (pos >> 9);
  340. if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1;
  341. if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
  342. return -1;
  343. l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
  344. memcpy(buf, data + (pos & 0x1ff), l);
  345. brelse(bh);
  346. buf += l; pos += l; len -= l;
  347. }
  348. return 0;
  349. }
  350. int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
  351. unsigned len, const char *buf)
  352. {
  353. struct buffer_head *bh;
  354. char *data;
  355. secno sec;
  356. unsigned l;
  357. while (len) {
  358. if (ano) {
  359. if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
  360. return -1;
  361. } else sec = a + (pos >> 9);
  362. if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1;
  363. if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
  364. return -1;
  365. l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
  366. memcpy(data + (pos & 0x1ff), buf, l);
  367. mark_buffer_dirty(bh);
  368. brelse(bh);
  369. buf += l; pos += l; len -= l;
  370. }
  371. return 0;
  372. }
  373. void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len)
  374. {
  375. struct anode *anode;
  376. struct buffer_head *bh;
  377. if (ano) {
  378. if (!(anode = hpfs_map_anode(s, a, &bh))) return;
  379. hpfs_remove_btree(s, &anode->btree);
  380. brelse(bh);
  381. hpfs_free_sectors(s, a, 1);
  382. } else hpfs_free_sectors(s, a, (len + 511) >> 9);
  383. }
  384. /* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */
  385. void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
  386. {
  387. struct fnode *fnode;
  388. struct anode *anode;
  389. struct buffer_head *bh;
  390. struct bplus_header *btree;
  391. anode_secno node = f;
  392. int i, j, nodes;
  393. int c1, c2 = 0;
  394. if (fno) {
  395. if (!(fnode = hpfs_map_fnode(s, f, &bh))) return;
  396. btree = &fnode->btree;
  397. } else {
  398. if (!(anode = hpfs_map_anode(s, f, &bh))) return;
  399. btree = &anode->btree;
  400. }
  401. if (!secs) {
  402. hpfs_remove_btree(s, btree);
  403. if (fno) {
  404. btree->n_free_nodes = 8;
  405. btree->n_used_nodes = 0;
  406. btree->first_free = cpu_to_le16(8);
  407. btree->flags &= ~BP_internal;
  408. mark_buffer_dirty(bh);
  409. } else hpfs_free_sectors(s, f, 1);
  410. brelse(bh);
  411. return;
  412. }
  413. while (bp_internal(btree)) {
  414. nodes = btree->n_used_nodes + btree->n_free_nodes;
  415. for (i = 0; i < btree->n_used_nodes; i++)
  416. if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
  417. brelse(bh);
  418. hpfs_error(s, "internal btree %08x doesn't end with -1", node);
  419. return;
  420. f:
  421. for (j = i + 1; j < btree->n_used_nodes; j++)
  422. hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0);
  423. btree->n_used_nodes = i + 1;
  424. btree->n_free_nodes = nodes - btree->n_used_nodes;
  425. btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes);
  426. mark_buffer_dirty(bh);
  427. if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) {
  428. brelse(bh);
  429. return;
  430. }
  431. node = le32_to_cpu(btree->u.internal[i].down);
  432. brelse(bh);
  433. if (hpfs_sb(s)->sb_chk)
  434. if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
  435. return;
  436. if (!(anode = hpfs_map_anode(s, node, &bh))) return;
  437. btree = &anode->btree;
  438. }
  439. nodes = btree->n_used_nodes + btree->n_free_nodes;
  440. for (i = 0; i < btree->n_used_nodes; i++)
  441. if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff;
  442. brelse(bh);
  443. return;
  444. ff:
  445. if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) {
  446. hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
  447. if (i) i--;
  448. }
  449. else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) {
  450. hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs -
  451. le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length)
  452. - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */
  453. btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno));
  454. }
  455. for (j = i + 1; j < btree->n_used_nodes; j++)
  456. hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length));
  457. btree->n_used_nodes = i + 1;
  458. btree->n_free_nodes = nodes - btree->n_used_nodes;
  459. btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes);
  460. mark_buffer_dirty(bh);
  461. brelse(bh);
  462. }
  463. /* Remove file or directory and it's eas - note that directory must
  464. be empty when this is called. */
  465. void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
  466. {
  467. struct buffer_head *bh;
  468. struct fnode *fnode;
  469. struct extended_attribute *ea;
  470. struct extended_attribute *ea_end;
  471. if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
  472. if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
  473. else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
  474. ea_end = fnode_end_ea(fnode);
  475. for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
  476. if (ea_indirect(ea))
  477. hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
  478. hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l));
  479. brelse(bh);
  480. hpfs_free_sectors(s, fno, 1);
  481. }