quota_tree.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. /*
  2. * vfsv0 quota IO operations on file
  3. */
  4. #include <linux/errno.h>
  5. #include <linux/fs.h>
  6. #include <linux/mount.h>
  7. #include <linux/dqblk_v2.h>
  8. #include <linux/kernel.h>
  9. #include <linux/init.h>
  10. #include <linux/module.h>
  11. #include <linux/slab.h>
  12. #include <linux/quotaops.h>
  13. #include <asm/byteorder.h>
  14. #include "quota_tree.h"
  15. MODULE_AUTHOR("Jan Kara");
  16. MODULE_DESCRIPTION("Quota trie support");
  17. MODULE_LICENSE("GPL");
  18. #define __QUOTA_QT_PARANOIA
  19. static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
  20. {
  21. unsigned int epb = info->dqi_usable_bs >> 2;
  22. depth = info->dqi_qtree_depth - depth - 1;
  23. while (depth--)
  24. id /= epb;
  25. return id % epb;
  26. }
  27. static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
  28. {
  29. qid_t id = from_kqid(&init_user_ns, qid);
  30. return __get_index(info, id, depth);
  31. }
  32. /* Number of entries in one blocks */
  33. static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
  34. {
  35. return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
  36. / info->dqi_entry_size;
  37. }
  38. static char *getdqbuf(size_t size)
  39. {
  40. char *buf = kmalloc(size, GFP_NOFS);
  41. if (!buf)
  42. printk(KERN_WARNING
  43. "VFS: Not enough memory for quota buffers.\n");
  44. return buf;
  45. }
  46. static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
  47. {
  48. struct super_block *sb = info->dqi_sb;
  49. memset(buf, 0, info->dqi_usable_bs);
  50. return sb->s_op->quota_read(sb, info->dqi_type, buf,
  51. info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
  52. }
  53. static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
  54. {
  55. struct super_block *sb = info->dqi_sb;
  56. ssize_t ret;
  57. ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
  58. info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
  59. if (ret != info->dqi_usable_bs) {
  60. quota_error(sb, "dquota write failed");
  61. if (ret >= 0)
  62. ret = -EIO;
  63. }
  64. return ret;
  65. }
  66. /* Remove empty block from list and return it */
  67. static int get_free_dqblk(struct qtree_mem_dqinfo *info)
  68. {
  69. char *buf = getdqbuf(info->dqi_usable_bs);
  70. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  71. int ret, blk;
  72. if (!buf)
  73. return -ENOMEM;
  74. if (info->dqi_free_blk) {
  75. blk = info->dqi_free_blk;
  76. ret = read_blk(info, blk, buf);
  77. if (ret < 0)
  78. goto out_buf;
  79. info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
  80. }
  81. else {
  82. memset(buf, 0, info->dqi_usable_bs);
  83. /* Assure block allocation... */
  84. ret = write_blk(info, info->dqi_blocks, buf);
  85. if (ret < 0)
  86. goto out_buf;
  87. blk = info->dqi_blocks++;
  88. }
  89. mark_info_dirty(info->dqi_sb, info->dqi_type);
  90. ret = blk;
  91. out_buf:
  92. kfree(buf);
  93. return ret;
  94. }
  95. /* Insert empty block to the list */
  96. static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
  97. {
  98. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  99. int err;
  100. dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
  101. dh->dqdh_prev_free = cpu_to_le32(0);
  102. dh->dqdh_entries = cpu_to_le16(0);
  103. err = write_blk(info, blk, buf);
  104. if (err < 0)
  105. return err;
  106. info->dqi_free_blk = blk;
  107. mark_info_dirty(info->dqi_sb, info->dqi_type);
  108. return 0;
  109. }
  110. /* Remove given block from the list of blocks with free entries */
  111. static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
  112. uint blk)
  113. {
  114. char *tmpbuf = getdqbuf(info->dqi_usable_bs);
  115. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  116. uint nextblk = le32_to_cpu(dh->dqdh_next_free);
  117. uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
  118. int err;
  119. if (!tmpbuf)
  120. return -ENOMEM;
  121. if (nextblk) {
  122. err = read_blk(info, nextblk, tmpbuf);
  123. if (err < 0)
  124. goto out_buf;
  125. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
  126. dh->dqdh_prev_free;
  127. err = write_blk(info, nextblk, tmpbuf);
  128. if (err < 0)
  129. goto out_buf;
  130. }
  131. if (prevblk) {
  132. err = read_blk(info, prevblk, tmpbuf);
  133. if (err < 0)
  134. goto out_buf;
  135. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
  136. dh->dqdh_next_free;
  137. err = write_blk(info, prevblk, tmpbuf);
  138. if (err < 0)
  139. goto out_buf;
  140. } else {
  141. info->dqi_free_entry = nextblk;
  142. mark_info_dirty(info->dqi_sb, info->dqi_type);
  143. }
  144. kfree(tmpbuf);
  145. dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
  146. /* No matter whether write succeeds block is out of list */
  147. if (write_blk(info, blk, buf) < 0)
  148. quota_error(info->dqi_sb, "Can't write block (%u) "
  149. "with free entries", blk);
  150. return 0;
  151. out_buf:
  152. kfree(tmpbuf);
  153. return err;
  154. }
  155. /* Insert given block to the beginning of list with free entries */
  156. static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
  157. uint blk)
  158. {
  159. char *tmpbuf = getdqbuf(info->dqi_usable_bs);
  160. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  161. int err;
  162. if (!tmpbuf)
  163. return -ENOMEM;
  164. dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
  165. dh->dqdh_prev_free = cpu_to_le32(0);
  166. err = write_blk(info, blk, buf);
  167. if (err < 0)
  168. goto out_buf;
  169. if (info->dqi_free_entry) {
  170. err = read_blk(info, info->dqi_free_entry, tmpbuf);
  171. if (err < 0)
  172. goto out_buf;
  173. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
  174. cpu_to_le32(blk);
  175. err = write_blk(info, info->dqi_free_entry, tmpbuf);
  176. if (err < 0)
  177. goto out_buf;
  178. }
  179. kfree(tmpbuf);
  180. info->dqi_free_entry = blk;
  181. mark_info_dirty(info->dqi_sb, info->dqi_type);
  182. return 0;
  183. out_buf:
  184. kfree(tmpbuf);
  185. return err;
  186. }
  187. /* Is the entry in the block free? */
  188. int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
  189. {
  190. int i;
  191. for (i = 0; i < info->dqi_entry_size; i++)
  192. if (disk[i])
  193. return 0;
  194. return 1;
  195. }
  196. EXPORT_SYMBOL(qtree_entry_unused);
  197. /* Find space for dquot */
  198. static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
  199. struct dquot *dquot, int *err)
  200. {
  201. uint blk, i;
  202. struct qt_disk_dqdbheader *dh;
  203. char *buf = getdqbuf(info->dqi_usable_bs);
  204. char *ddquot;
  205. *err = 0;
  206. if (!buf) {
  207. *err = -ENOMEM;
  208. return 0;
  209. }
  210. dh = (struct qt_disk_dqdbheader *)buf;
  211. if (info->dqi_free_entry) {
  212. blk = info->dqi_free_entry;
  213. *err = read_blk(info, blk, buf);
  214. if (*err < 0)
  215. goto out_buf;
  216. } else {
  217. blk = get_free_dqblk(info);
  218. if ((int)blk < 0) {
  219. *err = blk;
  220. kfree(buf);
  221. return 0;
  222. }
  223. memset(buf, 0, info->dqi_usable_bs);
  224. /* This is enough as the block is already zeroed and the entry
  225. * list is empty... */
  226. info->dqi_free_entry = blk;
  227. mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
  228. }
  229. /* Block will be full? */
  230. if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
  231. *err = remove_free_dqentry(info, buf, blk);
  232. if (*err < 0) {
  233. quota_error(dquot->dq_sb, "Can't remove block (%u) "
  234. "from entry free list", blk);
  235. goto out_buf;
  236. }
  237. }
  238. le16_add_cpu(&dh->dqdh_entries, 1);
  239. /* Find free structure in block */
  240. ddquot = buf + sizeof(struct qt_disk_dqdbheader);
  241. for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
  242. if (qtree_entry_unused(info, ddquot))
  243. break;
  244. ddquot += info->dqi_entry_size;
  245. }
  246. #ifdef __QUOTA_QT_PARANOIA
  247. if (i == qtree_dqstr_in_blk(info)) {
  248. quota_error(dquot->dq_sb, "Data block full but it shouldn't");
  249. *err = -EIO;
  250. goto out_buf;
  251. }
  252. #endif
  253. *err = write_blk(info, blk, buf);
  254. if (*err < 0) {
  255. quota_error(dquot->dq_sb, "Can't write quota data block %u",
  256. blk);
  257. goto out_buf;
  258. }
  259. dquot->dq_off = (blk << info->dqi_blocksize_bits) +
  260. sizeof(struct qt_disk_dqdbheader) +
  261. i * info->dqi_entry_size;
  262. kfree(buf);
  263. return blk;
  264. out_buf:
  265. kfree(buf);
  266. return 0;
  267. }
  268. /* Insert reference to structure into the trie */
  269. static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  270. uint *treeblk, int depth)
  271. {
  272. char *buf = getdqbuf(info->dqi_usable_bs);
  273. int ret = 0, newson = 0, newact = 0;
  274. __le32 *ref;
  275. uint newblk;
  276. if (!buf)
  277. return -ENOMEM;
  278. if (!*treeblk) {
  279. ret = get_free_dqblk(info);
  280. if (ret < 0)
  281. goto out_buf;
  282. *treeblk = ret;
  283. memset(buf, 0, info->dqi_usable_bs);
  284. newact = 1;
  285. } else {
  286. ret = read_blk(info, *treeblk, buf);
  287. if (ret < 0) {
  288. quota_error(dquot->dq_sb, "Can't read tree quota "
  289. "block %u", *treeblk);
  290. goto out_buf;
  291. }
  292. }
  293. ref = (__le32 *)buf;
  294. newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  295. if (!newblk)
  296. newson = 1;
  297. if (depth == info->dqi_qtree_depth - 1) {
  298. #ifdef __QUOTA_QT_PARANOIA
  299. if (newblk) {
  300. quota_error(dquot->dq_sb, "Inserting already present "
  301. "quota entry (block %u)",
  302. le32_to_cpu(ref[get_index(info,
  303. dquot->dq_id, depth)]));
  304. ret = -EIO;
  305. goto out_buf;
  306. }
  307. #endif
  308. newblk = find_free_dqentry(info, dquot, &ret);
  309. } else {
  310. ret = do_insert_tree(info, dquot, &newblk, depth+1);
  311. }
  312. if (newson && ret >= 0) {
  313. ref[get_index(info, dquot->dq_id, depth)] =
  314. cpu_to_le32(newblk);
  315. ret = write_blk(info, *treeblk, buf);
  316. } else if (newact && ret < 0) {
  317. put_free_dqblk(info, buf, *treeblk);
  318. }
  319. out_buf:
  320. kfree(buf);
  321. return ret;
  322. }
  323. /* Wrapper for inserting quota structure into tree */
  324. static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
  325. struct dquot *dquot)
  326. {
  327. int tmp = QT_TREEOFF;
  328. #ifdef __QUOTA_QT_PARANOIA
  329. if (info->dqi_blocks <= QT_TREEOFF) {
  330. quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
  331. return -EIO;
  332. }
  333. #endif
  334. return do_insert_tree(info, dquot, &tmp, 0);
  335. }
  336. /*
  337. * We don't have to be afraid of deadlocks as we never have quotas on quota
  338. * files...
  339. */
  340. int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  341. {
  342. int type = dquot->dq_id.type;
  343. struct super_block *sb = dquot->dq_sb;
  344. ssize_t ret;
  345. char *ddquot = getdqbuf(info->dqi_entry_size);
  346. if (!ddquot)
  347. return -ENOMEM;
  348. /* dq_off is guarded by dqio_sem */
  349. if (!dquot->dq_off) {
  350. ret = dq_insert_tree(info, dquot);
  351. if (ret < 0) {
  352. quota_error(sb, "Error %zd occurred while creating "
  353. "quota", ret);
  354. kfree(ddquot);
  355. return ret;
  356. }
  357. }
  358. spin_lock(&dquot->dq_dqb_lock);
  359. info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
  360. spin_unlock(&dquot->dq_dqb_lock);
  361. ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
  362. dquot->dq_off);
  363. if (ret != info->dqi_entry_size) {
  364. quota_error(sb, "dquota write failed");
  365. if (ret >= 0)
  366. ret = -ENOSPC;
  367. } else {
  368. ret = 0;
  369. }
  370. dqstats_inc(DQST_WRITES);
  371. kfree(ddquot);
  372. return ret;
  373. }
  374. EXPORT_SYMBOL(qtree_write_dquot);
  375. /* Free dquot entry in data block */
  376. static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  377. uint blk)
  378. {
  379. struct qt_disk_dqdbheader *dh;
  380. char *buf = getdqbuf(info->dqi_usable_bs);
  381. int ret = 0;
  382. if (!buf)
  383. return -ENOMEM;
  384. if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
  385. quota_error(dquot->dq_sb, "Quota structure has offset to "
  386. "other block (%u) than it should (%u)", blk,
  387. (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
  388. goto out_buf;
  389. }
  390. ret = read_blk(info, blk, buf);
  391. if (ret < 0) {
  392. quota_error(dquot->dq_sb, "Can't read quota data block %u",
  393. blk);
  394. goto out_buf;
  395. }
  396. dh = (struct qt_disk_dqdbheader *)buf;
  397. le16_add_cpu(&dh->dqdh_entries, -1);
  398. if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
  399. ret = remove_free_dqentry(info, buf, blk);
  400. if (ret >= 0)
  401. ret = put_free_dqblk(info, buf, blk);
  402. if (ret < 0) {
  403. quota_error(dquot->dq_sb, "Can't move quota data block "
  404. "(%u) to free list", blk);
  405. goto out_buf;
  406. }
  407. } else {
  408. memset(buf +
  409. (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
  410. 0, info->dqi_entry_size);
  411. if (le16_to_cpu(dh->dqdh_entries) ==
  412. qtree_dqstr_in_blk(info) - 1) {
  413. /* Insert will write block itself */
  414. ret = insert_free_dqentry(info, buf, blk);
  415. if (ret < 0) {
  416. quota_error(dquot->dq_sb, "Can't insert quota "
  417. "data block (%u) to free entry list", blk);
  418. goto out_buf;
  419. }
  420. } else {
  421. ret = write_blk(info, blk, buf);
  422. if (ret < 0) {
  423. quota_error(dquot->dq_sb, "Can't write quota "
  424. "data block %u", blk);
  425. goto out_buf;
  426. }
  427. }
  428. }
  429. dquot->dq_off = 0; /* Quota is now unattached */
  430. out_buf:
  431. kfree(buf);
  432. return ret;
  433. }
  434. /* Remove reference to dquot from tree */
  435. static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  436. uint *blk, int depth)
  437. {
  438. char *buf = getdqbuf(info->dqi_usable_bs);
  439. int ret = 0;
  440. uint newblk;
  441. __le32 *ref = (__le32 *)buf;
  442. if (!buf)
  443. return -ENOMEM;
  444. ret = read_blk(info, *blk, buf);
  445. if (ret < 0) {
  446. quota_error(dquot->dq_sb, "Can't read quota data block %u",
  447. *blk);
  448. goto out_buf;
  449. }
  450. newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  451. if (depth == info->dqi_qtree_depth - 1) {
  452. ret = free_dqentry(info, dquot, newblk);
  453. newblk = 0;
  454. } else {
  455. ret = remove_tree(info, dquot, &newblk, depth+1);
  456. }
  457. if (ret >= 0 && !newblk) {
  458. int i;
  459. ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
  460. /* Block got empty? */
  461. for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
  462. ;
  463. /* Don't put the root block into the free block list */
  464. if (i == (info->dqi_usable_bs >> 2)
  465. && *blk != QT_TREEOFF) {
  466. put_free_dqblk(info, buf, *blk);
  467. *blk = 0;
  468. } else {
  469. ret = write_blk(info, *blk, buf);
  470. if (ret < 0)
  471. quota_error(dquot->dq_sb,
  472. "Can't write quota tree block %u",
  473. *blk);
  474. }
  475. }
  476. out_buf:
  477. kfree(buf);
  478. return ret;
  479. }
  480. /* Delete dquot from tree */
  481. int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  482. {
  483. uint tmp = QT_TREEOFF;
  484. if (!dquot->dq_off) /* Even not allocated? */
  485. return 0;
  486. return remove_tree(info, dquot, &tmp, 0);
  487. }
  488. EXPORT_SYMBOL(qtree_delete_dquot);
  489. /* Find entry in block */
  490. static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
  491. struct dquot *dquot, uint blk)
  492. {
  493. char *buf = getdqbuf(info->dqi_usable_bs);
  494. loff_t ret = 0;
  495. int i;
  496. char *ddquot;
  497. if (!buf)
  498. return -ENOMEM;
  499. ret = read_blk(info, blk, buf);
  500. if (ret < 0) {
  501. quota_error(dquot->dq_sb, "Can't read quota tree "
  502. "block %u", blk);
  503. goto out_buf;
  504. }
  505. ddquot = buf + sizeof(struct qt_disk_dqdbheader);
  506. for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
  507. if (info->dqi_ops->is_id(ddquot, dquot))
  508. break;
  509. ddquot += info->dqi_entry_size;
  510. }
  511. if (i == qtree_dqstr_in_blk(info)) {
  512. quota_error(dquot->dq_sb,
  513. "Quota for id %u referenced but not present",
  514. from_kqid(&init_user_ns, dquot->dq_id));
  515. ret = -EIO;
  516. goto out_buf;
  517. } else {
  518. ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
  519. qt_disk_dqdbheader) + i * info->dqi_entry_size;
  520. }
  521. out_buf:
  522. kfree(buf);
  523. return ret;
  524. }
  525. /* Find entry for given id in the tree */
  526. static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
  527. struct dquot *dquot, uint blk, int depth)
  528. {
  529. char *buf = getdqbuf(info->dqi_usable_bs);
  530. loff_t ret = 0;
  531. __le32 *ref = (__le32 *)buf;
  532. if (!buf)
  533. return -ENOMEM;
  534. ret = read_blk(info, blk, buf);
  535. if (ret < 0) {
  536. quota_error(dquot->dq_sb, "Can't read quota tree block %u",
  537. blk);
  538. goto out_buf;
  539. }
  540. ret = 0;
  541. blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  542. if (!blk) /* No reference? */
  543. goto out_buf;
  544. if (depth < info->dqi_qtree_depth - 1)
  545. ret = find_tree_dqentry(info, dquot, blk, depth+1);
  546. else
  547. ret = find_block_dqentry(info, dquot, blk);
  548. out_buf:
  549. kfree(buf);
  550. return ret;
  551. }
  552. /* Find entry for given id in the tree - wrapper function */
  553. static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
  554. struct dquot *dquot)
  555. {
  556. return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
  557. }
  558. int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  559. {
  560. int type = dquot->dq_id.type;
  561. struct super_block *sb = dquot->dq_sb;
  562. loff_t offset;
  563. char *ddquot;
  564. int ret = 0;
  565. #ifdef __QUOTA_QT_PARANOIA
  566. /* Invalidated quota? */
  567. if (!sb_dqopt(dquot->dq_sb)->files[type]) {
  568. quota_error(sb, "Quota invalidated while reading!");
  569. return -EIO;
  570. }
  571. #endif
  572. /* Do we know offset of the dquot entry in the quota file? */
  573. if (!dquot->dq_off) {
  574. offset = find_dqentry(info, dquot);
  575. if (offset <= 0) { /* Entry not present? */
  576. if (offset < 0)
  577. quota_error(sb,"Can't read quota structure "
  578. "for id %u",
  579. from_kqid(&init_user_ns,
  580. dquot->dq_id));
  581. dquot->dq_off = 0;
  582. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  583. memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
  584. ret = offset;
  585. goto out;
  586. }
  587. dquot->dq_off = offset;
  588. }
  589. ddquot = getdqbuf(info->dqi_entry_size);
  590. if (!ddquot)
  591. return -ENOMEM;
  592. ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
  593. dquot->dq_off);
  594. if (ret != info->dqi_entry_size) {
  595. if (ret >= 0)
  596. ret = -EIO;
  597. quota_error(sb, "Error while reading quota structure for id %u",
  598. from_kqid(&init_user_ns, dquot->dq_id));
  599. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  600. memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
  601. kfree(ddquot);
  602. goto out;
  603. }
  604. spin_lock(&dquot->dq_dqb_lock);
  605. info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
  606. if (!dquot->dq_dqb.dqb_bhardlimit &&
  607. !dquot->dq_dqb.dqb_bsoftlimit &&
  608. !dquot->dq_dqb.dqb_ihardlimit &&
  609. !dquot->dq_dqb.dqb_isoftlimit)
  610. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  611. spin_unlock(&dquot->dq_dqb_lock);
  612. kfree(ddquot);
  613. out:
  614. dqstats_inc(DQST_READS);
  615. return ret;
  616. }
  617. EXPORT_SYMBOL(qtree_read_dquot);
  618. /* Check whether dquot should not be deleted. We know we are
  619. * the only one operating on dquot (thanks to dq_lock) */
  620. int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  621. {
  622. if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
  623. !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
  624. return qtree_delete_dquot(info, dquot);
  625. return 0;
  626. }
  627. EXPORT_SYMBOL(qtree_release_dquot);
  628. static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
  629. unsigned int blk, int depth)
  630. {
  631. char *buf = getdqbuf(info->dqi_usable_bs);
  632. __le32 *ref = (__le32 *)buf;
  633. ssize_t ret;
  634. unsigned int epb = info->dqi_usable_bs >> 2;
  635. unsigned int level_inc = 1;
  636. int i;
  637. if (!buf)
  638. return -ENOMEM;
  639. for (i = depth; i < info->dqi_qtree_depth - 1; i++)
  640. level_inc *= epb;
  641. ret = read_blk(info, blk, buf);
  642. if (ret < 0) {
  643. quota_error(info->dqi_sb,
  644. "Can't read quota tree block %u", blk);
  645. goto out_buf;
  646. }
  647. for (i = __get_index(info, *id, depth); i < epb; i++) {
  648. if (ref[i] == cpu_to_le32(0)) {
  649. *id += level_inc;
  650. continue;
  651. }
  652. if (depth == info->dqi_qtree_depth - 1) {
  653. ret = 0;
  654. goto out_buf;
  655. }
  656. ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1);
  657. if (ret != -ENOENT)
  658. break;
  659. }
  660. if (i == epb) {
  661. ret = -ENOENT;
  662. goto out_buf;
  663. }
  664. out_buf:
  665. kfree(buf);
  666. return ret;
  667. }
  668. int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
  669. {
  670. qid_t id = from_kqid(&init_user_ns, *qid);
  671. int ret;
  672. ret = find_next_id(info, &id, QT_TREEOFF, 0);
  673. if (ret < 0)
  674. return ret;
  675. *qid = make_kqid(&init_user_ns, qid->type, id);
  676. return 0;
  677. }
  678. EXPORT_SYMBOL(qtree_get_next_id);