quota_global.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. /*
  2. * Implementation of operations over global quota file
  3. */
  4. #include <linux/spinlock.h>
  5. #include <linux/fs.h>
  6. #include <linux/slab.h>
  7. #include <linux/quota.h>
  8. #include <linux/quotaops.h>
  9. #include <linux/dqblk_qtree.h>
  10. #include <linux/jiffies.h>
  11. #include <linux/writeback.h>
  12. #include <linux/workqueue.h>
  13. #include <linux/llist.h>
  14. #include <cluster/masklog.h>
  15. #include "ocfs2_fs.h"
  16. #include "ocfs2.h"
  17. #include "alloc.h"
  18. #include "blockcheck.h"
  19. #include "inode.h"
  20. #include "journal.h"
  21. #include "file.h"
  22. #include "sysfile.h"
  23. #include "dlmglue.h"
  24. #include "uptodate.h"
  25. #include "super.h"
  26. #include "buffer_head_io.h"
  27. #include "quota.h"
  28. #include "ocfs2_trace.h"
  29. /*
  30. * Locking of quotas with OCFS2 is rather complex. Here are rules that
  31. * should be obeyed by all the functions:
  32. * - any write of quota structure (either to local or global file) is protected
  33. * by dqio_mutex or dquot->dq_lock.
  34. * - any modification of global quota file holds inode cluster lock, i_mutex,
  35. * and ip_alloc_sem of the global quota file (achieved by
  36. * ocfs2_lock_global_qf). It also has to hold qinfo_lock.
  37. * - an allocation of new blocks for local quota file is protected by
  38. * its ip_alloc_sem
  39. *
  40. * A rough sketch of locking dependencies (lf = local file, gf = global file):
  41. * Normal filesystem operation:
  42. * start_trans -> dqio_mutex -> write to lf
  43. * Syncing of local and global file:
  44. * ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
  45. * write to gf
  46. * -> write to lf
  47. * Acquire dquot for the first time:
  48. * dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
  49. * -> alloc space for gf
  50. * -> start_trans -> qinfo_lock -> write to gf
  51. * -> ip_alloc_sem of lf -> alloc space for lf
  52. * -> write to lf
  53. * Release last reference to dquot:
  54. * dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
  55. * -> write to lf
  56. * Note that all the above operations also hold the inode cluster lock of lf.
  57. * Recovery:
  58. * inode cluster lock of recovered lf
  59. * -> read bitmaps -> ip_alloc_sem of lf
  60. * -> ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
  61. * write to gf
  62. */
  63. static void qsync_work_fn(struct work_struct *work);
  64. static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
  65. {
  66. struct ocfs2_global_disk_dqblk *d = dp;
  67. struct mem_dqblk *m = &dquot->dq_dqb;
  68. /* Update from disk only entries not set by the admin */
  69. if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
  70. m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
  71. m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
  72. }
  73. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  74. m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
  75. if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
  76. m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
  77. m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
  78. }
  79. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  80. m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
  81. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
  82. m->dqb_btime = le64_to_cpu(d->dqb_btime);
  83. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
  84. m->dqb_itime = le64_to_cpu(d->dqb_itime);
  85. OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
  86. }
  87. static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
  88. {
  89. struct ocfs2_global_disk_dqblk *d = dp;
  90. struct mem_dqblk *m = &dquot->dq_dqb;
  91. d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
  92. d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
  93. d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
  94. d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
  95. d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
  96. d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
  97. d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
  98. d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
  99. d->dqb_btime = cpu_to_le64(m->dqb_btime);
  100. d->dqb_itime = cpu_to_le64(m->dqb_itime);
  101. d->dqb_pad1 = d->dqb_pad2 = 0;
  102. }
  103. static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
  104. {
  105. struct ocfs2_global_disk_dqblk *d = dp;
  106. struct ocfs2_mem_dqinfo *oinfo =
  107. sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
  108. if (qtree_entry_unused(&oinfo->dqi_gi, dp))
  109. return 0;
  110. return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type,
  111. le32_to_cpu(d->dqb_id)),
  112. dquot->dq_id);
  113. }
  114. struct qtree_fmt_operations ocfs2_global_ops = {
  115. .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
  116. .disk2mem_dqblk = ocfs2_global_disk2memdqb,
  117. .is_id = ocfs2_global_is_id,
  118. };
  119. int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
  120. {
  121. struct ocfs2_disk_dqtrailer *dqt =
  122. ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
  123. trace_ocfs2_validate_quota_block((unsigned long long)bh->b_blocknr);
  124. BUG_ON(!buffer_uptodate(bh));
  125. /*
  126. * If the ecc fails, we return the error but otherwise
  127. * leave the filesystem running. We know any error is
  128. * local to this block.
  129. */
  130. return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
  131. }
  132. int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
  133. struct buffer_head **bhp)
  134. {
  135. int rc;
  136. *bhp = NULL;
  137. rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, 1, bhp, 0,
  138. ocfs2_validate_quota_block);
  139. if (rc)
  140. mlog_errno(rc);
  141. return rc;
  142. }
  143. /* Read data from global quotafile - avoid pagecache and such because we cannot
  144. * afford acquiring the locks... We use quota cluster lock to serialize
  145. * operations. Caller is responsible for acquiring it. */
  146. ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
  147. size_t len, loff_t off)
  148. {
  149. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  150. struct inode *gqinode = oinfo->dqi_gqinode;
  151. loff_t i_size = i_size_read(gqinode);
  152. int offset = off & (sb->s_blocksize - 1);
  153. sector_t blk = off >> sb->s_blocksize_bits;
  154. int err = 0;
  155. struct buffer_head *bh;
  156. size_t toread, tocopy;
  157. u64 pblock = 0, pcount = 0;
  158. if (off > i_size)
  159. return 0;
  160. if (off + len > i_size)
  161. len = i_size - off;
  162. toread = len;
  163. while (toread > 0) {
  164. tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
  165. if (!pcount) {
  166. err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock,
  167. &pcount, NULL);
  168. if (err) {
  169. mlog_errno(err);
  170. return err;
  171. }
  172. } else {
  173. pcount--;
  174. pblock++;
  175. }
  176. bh = NULL;
  177. err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
  178. if (err) {
  179. mlog_errno(err);
  180. return err;
  181. }
  182. memcpy(data, bh->b_data + offset, tocopy);
  183. brelse(bh);
  184. offset = 0;
  185. toread -= tocopy;
  186. data += tocopy;
  187. blk++;
  188. }
  189. return len;
  190. }
  191. /* Write to quotafile (we know the transaction is already started and has
  192. * enough credits) */
  193. ssize_t ocfs2_quota_write(struct super_block *sb, int type,
  194. const char *data, size_t len, loff_t off)
  195. {
  196. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  197. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  198. struct inode *gqinode = oinfo->dqi_gqinode;
  199. int offset = off & (sb->s_blocksize - 1);
  200. sector_t blk = off >> sb->s_blocksize_bits;
  201. int err = 0, new = 0, ja_type;
  202. struct buffer_head *bh = NULL;
  203. handle_t *handle = journal_current_handle();
  204. u64 pblock, pcount;
  205. if (!handle) {
  206. mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
  207. "because transaction was not started.\n",
  208. (unsigned long long)off, (unsigned long long)len);
  209. return -EIO;
  210. }
  211. if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
  212. WARN_ON(1);
  213. len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
  214. }
  215. if (i_size_read(gqinode) < off + len) {
  216. loff_t rounded_end =
  217. ocfs2_align_bytes_to_blocks(sb, off + len);
  218. /* Space is already allocated in ocfs2_acquire_dquot() */
  219. err = ocfs2_simple_size_update(gqinode,
  220. oinfo->dqi_gqi_bh,
  221. rounded_end);
  222. if (err < 0)
  223. goto out;
  224. new = 1;
  225. }
  226. err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock, &pcount, NULL);
  227. if (err) {
  228. mlog_errno(err);
  229. goto out;
  230. }
  231. /* Not rewriting whole block? */
  232. if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
  233. !new) {
  234. err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
  235. ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
  236. } else {
  237. bh = sb_getblk(sb, pblock);
  238. if (!bh)
  239. err = -ENOMEM;
  240. ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
  241. }
  242. if (err) {
  243. mlog_errno(err);
  244. goto out;
  245. }
  246. lock_buffer(bh);
  247. if (new)
  248. memset(bh->b_data, 0, sb->s_blocksize);
  249. memcpy(bh->b_data + offset, data, len);
  250. flush_dcache_page(bh->b_page);
  251. set_buffer_uptodate(bh);
  252. unlock_buffer(bh);
  253. ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
  254. err = ocfs2_journal_access_dq(handle, INODE_CACHE(gqinode), bh,
  255. ja_type);
  256. if (err < 0) {
  257. brelse(bh);
  258. goto out;
  259. }
  260. ocfs2_journal_dirty(handle, bh);
  261. brelse(bh);
  262. out:
  263. if (err) {
  264. mlog_errno(err);
  265. return err;
  266. }
  267. gqinode->i_version++;
  268. ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
  269. return len;
  270. }
  271. int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  272. {
  273. int status;
  274. struct buffer_head *bh = NULL;
  275. status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
  276. if (status < 0)
  277. return status;
  278. spin_lock(&dq_data_lock);
  279. if (!oinfo->dqi_gqi_count++)
  280. oinfo->dqi_gqi_bh = bh;
  281. else
  282. WARN_ON(bh != oinfo->dqi_gqi_bh);
  283. spin_unlock(&dq_data_lock);
  284. if (ex) {
  285. mutex_lock(&oinfo->dqi_gqinode->i_mutex);
  286. down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  287. } else {
  288. down_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  289. }
  290. return 0;
  291. }
  292. void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  293. {
  294. if (ex) {
  295. up_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  296. mutex_unlock(&oinfo->dqi_gqinode->i_mutex);
  297. } else {
  298. up_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  299. }
  300. ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
  301. brelse(oinfo->dqi_gqi_bh);
  302. spin_lock(&dq_data_lock);
  303. if (!--oinfo->dqi_gqi_count)
  304. oinfo->dqi_gqi_bh = NULL;
  305. spin_unlock(&dq_data_lock);
  306. }
  307. /* Read information header from global quota file */
  308. int ocfs2_global_read_info(struct super_block *sb, int type)
  309. {
  310. struct inode *gqinode = NULL;
  311. unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
  312. GROUP_QUOTA_SYSTEM_INODE };
  313. struct ocfs2_global_disk_dqinfo dinfo;
  314. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  315. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  316. u64 pcount;
  317. int status;
  318. /* Read global header */
  319. gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
  320. OCFS2_INVALID_SLOT);
  321. if (!gqinode) {
  322. mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
  323. type);
  324. status = -EINVAL;
  325. goto out_err;
  326. }
  327. oinfo->dqi_gi.dqi_sb = sb;
  328. oinfo->dqi_gi.dqi_type = type;
  329. ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
  330. oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
  331. oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
  332. oinfo->dqi_gqi_bh = NULL;
  333. oinfo->dqi_gqi_count = 0;
  334. oinfo->dqi_gqinode = gqinode;
  335. status = ocfs2_lock_global_qf(oinfo, 0);
  336. if (status < 0) {
  337. mlog_errno(status);
  338. goto out_err;
  339. }
  340. status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
  341. &pcount, NULL);
  342. if (status < 0)
  343. goto out_unlock;
  344. status = ocfs2_qinfo_lock(oinfo, 0);
  345. if (status < 0)
  346. goto out_unlock;
  347. status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
  348. sizeof(struct ocfs2_global_disk_dqinfo),
  349. OCFS2_GLOBAL_INFO_OFF);
  350. ocfs2_qinfo_unlock(oinfo, 0);
  351. ocfs2_unlock_global_qf(oinfo, 0);
  352. if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
  353. mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
  354. status);
  355. if (status >= 0)
  356. status = -EIO;
  357. mlog_errno(status);
  358. goto out_err;
  359. }
  360. info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
  361. info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
  362. oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
  363. oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
  364. oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
  365. oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
  366. oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
  367. oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
  368. OCFS2_QBLK_RESERVED_SPACE;
  369. oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
  370. INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
  371. schedule_delayed_work(&oinfo->dqi_sync_work,
  372. msecs_to_jiffies(oinfo->dqi_syncms));
  373. out_err:
  374. return status;
  375. out_unlock:
  376. ocfs2_unlock_global_qf(oinfo, 0);
  377. mlog_errno(status);
  378. goto out_err;
  379. }
  380. /* Write information to global quota file. Expects exlusive lock on quota
  381. * file inode and quota info */
  382. static int __ocfs2_global_write_info(struct super_block *sb, int type)
  383. {
  384. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  385. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  386. struct ocfs2_global_disk_dqinfo dinfo;
  387. ssize_t size;
  388. spin_lock(&dq_data_lock);
  389. info->dqi_flags &= ~DQF_INFO_DIRTY;
  390. dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
  391. dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
  392. spin_unlock(&dq_data_lock);
  393. dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
  394. dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
  395. dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
  396. dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
  397. size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
  398. sizeof(struct ocfs2_global_disk_dqinfo),
  399. OCFS2_GLOBAL_INFO_OFF);
  400. if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
  401. mlog(ML_ERROR, "Cannot write global quota info structure\n");
  402. if (size >= 0)
  403. size = -EIO;
  404. return size;
  405. }
  406. return 0;
  407. }
  408. int ocfs2_global_write_info(struct super_block *sb, int type)
  409. {
  410. int err;
  411. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  412. err = ocfs2_qinfo_lock(info, 1);
  413. if (err < 0)
  414. return err;
  415. err = __ocfs2_global_write_info(sb, type);
  416. ocfs2_qinfo_unlock(info, 1);
  417. return err;
  418. }
  419. static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
  420. {
  421. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  422. /*
  423. * We may need to allocate tree blocks and a leaf block but not the
  424. * root block
  425. */
  426. return oinfo->dqi_gi.dqi_qtree_depth;
  427. }
  428. static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
  429. {
  430. /* We modify all the allocated blocks, tree root, info block and
  431. * the inode */
  432. return (ocfs2_global_qinit_alloc(sb, type) + 2) *
  433. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + 1;
  434. }
  435. /* Sync local information about quota modifications with global quota file.
  436. * Caller must have started the transaction and obtained exclusive lock for
  437. * global quota file inode */
  438. int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
  439. {
  440. int err, err2;
  441. struct super_block *sb = dquot->dq_sb;
  442. int type = dquot->dq_id.type;
  443. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  444. struct ocfs2_global_disk_dqblk dqblk;
  445. s64 spacechange, inodechange;
  446. time_t olditime, oldbtime;
  447. err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
  448. sizeof(struct ocfs2_global_disk_dqblk),
  449. dquot->dq_off);
  450. if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
  451. if (err >= 0) {
  452. mlog(ML_ERROR, "Short read from global quota file "
  453. "(%u read)\n", err);
  454. err = -EIO;
  455. }
  456. goto out;
  457. }
  458. /* Update space and inode usage. Get also other information from
  459. * global quota file so that we don't overwrite any changes there.
  460. * We are */
  461. spin_lock(&dq_data_lock);
  462. spacechange = dquot->dq_dqb.dqb_curspace -
  463. OCFS2_DQUOT(dquot)->dq_origspace;
  464. inodechange = dquot->dq_dqb.dqb_curinodes -
  465. OCFS2_DQUOT(dquot)->dq_originodes;
  466. olditime = dquot->dq_dqb.dqb_itime;
  467. oldbtime = dquot->dq_dqb.dqb_btime;
  468. ocfs2_global_disk2memdqb(dquot, &dqblk);
  469. trace_ocfs2_sync_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  470. dquot->dq_dqb.dqb_curspace,
  471. (long long)spacechange,
  472. dquot->dq_dqb.dqb_curinodes,
  473. (long long)inodechange);
  474. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  475. dquot->dq_dqb.dqb_curspace += spacechange;
  476. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  477. dquot->dq_dqb.dqb_curinodes += inodechange;
  478. /* Set properly space grace time... */
  479. if (dquot->dq_dqb.dqb_bsoftlimit &&
  480. dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
  481. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
  482. oldbtime > 0) {
  483. if (dquot->dq_dqb.dqb_btime > 0)
  484. dquot->dq_dqb.dqb_btime =
  485. min(dquot->dq_dqb.dqb_btime, oldbtime);
  486. else
  487. dquot->dq_dqb.dqb_btime = oldbtime;
  488. }
  489. } else {
  490. dquot->dq_dqb.dqb_btime = 0;
  491. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  492. }
  493. /* Set properly inode grace time... */
  494. if (dquot->dq_dqb.dqb_isoftlimit &&
  495. dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
  496. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
  497. olditime > 0) {
  498. if (dquot->dq_dqb.dqb_itime > 0)
  499. dquot->dq_dqb.dqb_itime =
  500. min(dquot->dq_dqb.dqb_itime, olditime);
  501. else
  502. dquot->dq_dqb.dqb_itime = olditime;
  503. }
  504. } else {
  505. dquot->dq_dqb.dqb_itime = 0;
  506. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  507. }
  508. /* All information is properly updated, clear the flags */
  509. __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
  510. __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
  511. __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
  512. __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
  513. __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
  514. __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
  515. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  516. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  517. spin_unlock(&dq_data_lock);
  518. err = ocfs2_qinfo_lock(info, freeing);
  519. if (err < 0) {
  520. mlog(ML_ERROR, "Failed to lock quota info, losing quota write"
  521. " (type=%d, id=%u)\n", dquot->dq_id.type,
  522. (unsigned)from_kqid(&init_user_ns, dquot->dq_id));
  523. goto out;
  524. }
  525. if (freeing)
  526. OCFS2_DQUOT(dquot)->dq_use_count--;
  527. err = qtree_write_dquot(&info->dqi_gi, dquot);
  528. if (err < 0)
  529. goto out_qlock;
  530. if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
  531. err = qtree_release_dquot(&info->dqi_gi, dquot);
  532. if (info_dirty(sb_dqinfo(sb, type))) {
  533. err2 = __ocfs2_global_write_info(sb, type);
  534. if (!err)
  535. err = err2;
  536. }
  537. }
  538. out_qlock:
  539. ocfs2_qinfo_unlock(info, freeing);
  540. out:
  541. if (err < 0)
  542. mlog_errno(err);
  543. return err;
  544. }
  545. /*
  546. * Functions for periodic syncing of dquots with global file
  547. */
  548. static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
  549. {
  550. handle_t *handle;
  551. struct super_block *sb = dquot->dq_sb;
  552. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  553. struct ocfs2_super *osb = OCFS2_SB(sb);
  554. int status = 0;
  555. trace_ocfs2_sync_dquot_helper(from_kqid(&init_user_ns, dquot->dq_id),
  556. dquot->dq_id.type,
  557. type, sb->s_id);
  558. if (type != dquot->dq_id.type)
  559. goto out;
  560. status = ocfs2_lock_global_qf(oinfo, 1);
  561. if (status < 0)
  562. goto out;
  563. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  564. if (IS_ERR(handle)) {
  565. status = PTR_ERR(handle);
  566. mlog_errno(status);
  567. goto out_ilock;
  568. }
  569. mutex_lock(&sb_dqopt(sb)->dqio_mutex);
  570. status = ocfs2_sync_dquot(dquot);
  571. if (status < 0)
  572. mlog_errno(status);
  573. /* We have to write local structure as well... */
  574. status = ocfs2_local_write_dquot(dquot);
  575. if (status < 0)
  576. mlog_errno(status);
  577. mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
  578. ocfs2_commit_trans(osb, handle);
  579. out_ilock:
  580. ocfs2_unlock_global_qf(oinfo, 1);
  581. out:
  582. return status;
  583. }
  584. static void qsync_work_fn(struct work_struct *work)
  585. {
  586. struct ocfs2_mem_dqinfo *oinfo = container_of(work,
  587. struct ocfs2_mem_dqinfo,
  588. dqi_sync_work.work);
  589. struct super_block *sb = oinfo->dqi_gqinode->i_sb;
  590. dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
  591. schedule_delayed_work(&oinfo->dqi_sync_work,
  592. msecs_to_jiffies(oinfo->dqi_syncms));
  593. }
  594. /*
  595. * Wrappers for generic quota functions
  596. */
  597. static int ocfs2_write_dquot(struct dquot *dquot)
  598. {
  599. handle_t *handle;
  600. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  601. int status = 0;
  602. trace_ocfs2_write_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  603. dquot->dq_id.type);
  604. handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
  605. if (IS_ERR(handle)) {
  606. status = PTR_ERR(handle);
  607. mlog_errno(status);
  608. goto out;
  609. }
  610. mutex_lock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
  611. status = ocfs2_local_write_dquot(dquot);
  612. mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
  613. ocfs2_commit_trans(osb, handle);
  614. out:
  615. return status;
  616. }
  617. static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
  618. {
  619. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  620. /*
  621. * We modify tree, leaf block, global info, local chunk header,
  622. * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
  623. * accounts for inode update
  624. */
  625. return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
  626. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
  627. OCFS2_QINFO_WRITE_CREDITS +
  628. OCFS2_INODE_UPDATE_CREDITS;
  629. }
  630. void ocfs2_drop_dquot_refs(struct work_struct *work)
  631. {
  632. struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
  633. dquot_drop_work);
  634. struct llist_node *list;
  635. struct ocfs2_dquot *odquot, *next_odquot;
  636. list = llist_del_all(&osb->dquot_drop_list);
  637. llist_for_each_entry_safe(odquot, next_odquot, list, list) {
  638. /* Drop the reference we acquired in ocfs2_dquot_release() */
  639. dqput(&odquot->dq_dquot);
  640. }
  641. }
  642. /*
  643. * Called when the last reference to dquot is dropped. If we are called from
  644. * downconvert thread, we cannot do all the handling here because grabbing
  645. * quota lock could deadlock (the node holding the quota lock could need some
  646. * other cluster lock to proceed but with blocked downconvert thread we cannot
  647. * release any lock).
  648. */
  649. static int ocfs2_release_dquot(struct dquot *dquot)
  650. {
  651. handle_t *handle;
  652. struct ocfs2_mem_dqinfo *oinfo =
  653. sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
  654. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  655. int status = 0;
  656. trace_ocfs2_release_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  657. dquot->dq_id.type);
  658. mutex_lock(&dquot->dq_lock);
  659. /* Check whether we are not racing with some other dqget() */
  660. if (atomic_read(&dquot->dq_count) > 1)
  661. goto out;
  662. /* Running from downconvert thread? Postpone quota processing to wq */
  663. if (current == osb->dc_task) {
  664. /*
  665. * Grab our own reference to dquot and queue it for delayed
  666. * dropping. Quota code rechecks after calling
  667. * ->release_dquot() and won't free dquot structure.
  668. */
  669. dqgrab(dquot);
  670. /* First entry on list -> queue work */
  671. if (llist_add(&OCFS2_DQUOT(dquot)->list, &osb->dquot_drop_list))
  672. queue_work(ocfs2_wq, &osb->dquot_drop_work);
  673. goto out;
  674. }
  675. status = ocfs2_lock_global_qf(oinfo, 1);
  676. if (status < 0)
  677. goto out;
  678. handle = ocfs2_start_trans(osb,
  679. ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type));
  680. if (IS_ERR(handle)) {
  681. status = PTR_ERR(handle);
  682. mlog_errno(status);
  683. goto out_ilock;
  684. }
  685. status = ocfs2_global_release_dquot(dquot);
  686. if (status < 0) {
  687. mlog_errno(status);
  688. goto out_trans;
  689. }
  690. status = ocfs2_local_release_dquot(handle, dquot);
  691. /*
  692. * If we fail here, we cannot do much as global structure is
  693. * already released. So just complain...
  694. */
  695. if (status < 0)
  696. mlog_errno(status);
  697. /*
  698. * Clear dq_off so that we search for the structure in quota file next
  699. * time we acquire it. The structure might be deleted and reallocated
  700. * elsewhere by another node while our dquot structure is on freelist.
  701. */
  702. dquot->dq_off = 0;
  703. clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  704. out_trans:
  705. ocfs2_commit_trans(osb, handle);
  706. out_ilock:
  707. ocfs2_unlock_global_qf(oinfo, 1);
  708. out:
  709. mutex_unlock(&dquot->dq_lock);
  710. if (status)
  711. mlog_errno(status);
  712. return status;
  713. }
  714. /*
  715. * Read global dquot structure from disk or create it if it does
  716. * not exist. Also update use count of the global structure and
  717. * create structure in node-local quota file.
  718. */
  719. static int ocfs2_acquire_dquot(struct dquot *dquot)
  720. {
  721. int status = 0, err;
  722. int ex = 0;
  723. struct super_block *sb = dquot->dq_sb;
  724. struct ocfs2_super *osb = OCFS2_SB(sb);
  725. int type = dquot->dq_id.type;
  726. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  727. struct inode *gqinode = info->dqi_gqinode;
  728. int need_alloc = ocfs2_global_qinit_alloc(sb, type);
  729. handle_t *handle;
  730. trace_ocfs2_acquire_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  731. type);
  732. mutex_lock(&dquot->dq_lock);
  733. /*
  734. * We need an exclusive lock, because we're going to update use count
  735. * and instantiate possibly new dquot structure
  736. */
  737. status = ocfs2_lock_global_qf(info, 1);
  738. if (status < 0)
  739. goto out;
  740. status = ocfs2_qinfo_lock(info, 0);
  741. if (status < 0)
  742. goto out_dq;
  743. /*
  744. * We always want to read dquot structure from disk because we don't
  745. * know what happened with it while it was on freelist.
  746. */
  747. status = qtree_read_dquot(&info->dqi_gi, dquot);
  748. ocfs2_qinfo_unlock(info, 0);
  749. if (status < 0)
  750. goto out_dq;
  751. OCFS2_DQUOT(dquot)->dq_use_count++;
  752. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  753. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  754. if (!dquot->dq_off) { /* No real quota entry? */
  755. ex = 1;
  756. /*
  757. * Add blocks to quota file before we start a transaction since
  758. * locking allocators ranks above a transaction start
  759. */
  760. WARN_ON(journal_current_handle());
  761. status = ocfs2_extend_no_holes(gqinode, NULL,
  762. i_size_read(gqinode) + (need_alloc << sb->s_blocksize_bits),
  763. i_size_read(gqinode));
  764. if (status < 0)
  765. goto out_dq;
  766. }
  767. handle = ocfs2_start_trans(osb,
  768. ocfs2_calc_global_qinit_credits(sb, type));
  769. if (IS_ERR(handle)) {
  770. status = PTR_ERR(handle);
  771. goto out_dq;
  772. }
  773. status = ocfs2_qinfo_lock(info, ex);
  774. if (status < 0)
  775. goto out_trans;
  776. status = qtree_write_dquot(&info->dqi_gi, dquot);
  777. if (ex && info_dirty(sb_dqinfo(sb, type))) {
  778. err = __ocfs2_global_write_info(sb, type);
  779. if (!status)
  780. status = err;
  781. }
  782. ocfs2_qinfo_unlock(info, ex);
  783. out_trans:
  784. ocfs2_commit_trans(osb, handle);
  785. out_dq:
  786. ocfs2_unlock_global_qf(info, 1);
  787. if (status < 0)
  788. goto out;
  789. status = ocfs2_create_local_dquot(dquot);
  790. if (status < 0)
  791. goto out;
  792. set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  793. out:
  794. mutex_unlock(&dquot->dq_lock);
  795. if (status)
  796. mlog_errno(status);
  797. return status;
  798. }
  799. static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
  800. {
  801. unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
  802. (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
  803. (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
  804. (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
  805. (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
  806. (1 << (DQ_LASTSET_B + QIF_ITIME_B));
  807. int sync = 0;
  808. int status;
  809. struct super_block *sb = dquot->dq_sb;
  810. int type = dquot->dq_id.type;
  811. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  812. handle_t *handle;
  813. struct ocfs2_super *osb = OCFS2_SB(sb);
  814. trace_ocfs2_mark_dquot_dirty(from_kqid(&init_user_ns, dquot->dq_id),
  815. type);
  816. /* In case user set some limits, sync dquot immediately to global
  817. * quota file so that information propagates quicker */
  818. spin_lock(&dq_data_lock);
  819. if (dquot->dq_flags & mask)
  820. sync = 1;
  821. spin_unlock(&dq_data_lock);
  822. /* This is a slight hack but we can't afford getting global quota
  823. * lock if we already have a transaction started. */
  824. if (!sync || journal_current_handle()) {
  825. status = ocfs2_write_dquot(dquot);
  826. goto out;
  827. }
  828. status = ocfs2_lock_global_qf(oinfo, 1);
  829. if (status < 0)
  830. goto out;
  831. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  832. if (IS_ERR(handle)) {
  833. status = PTR_ERR(handle);
  834. mlog_errno(status);
  835. goto out_ilock;
  836. }
  837. mutex_lock(&sb_dqopt(sb)->dqio_mutex);
  838. status = ocfs2_sync_dquot(dquot);
  839. if (status < 0) {
  840. mlog_errno(status);
  841. goto out_dlock;
  842. }
  843. /* Now write updated local dquot structure */
  844. status = ocfs2_local_write_dquot(dquot);
  845. out_dlock:
  846. mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
  847. ocfs2_commit_trans(osb, handle);
  848. out_ilock:
  849. ocfs2_unlock_global_qf(oinfo, 1);
  850. out:
  851. if (status)
  852. mlog_errno(status);
  853. return status;
  854. }
  855. /* This should happen only after set_dqinfo(). */
  856. static int ocfs2_write_info(struct super_block *sb, int type)
  857. {
  858. handle_t *handle;
  859. int status = 0;
  860. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  861. status = ocfs2_lock_global_qf(oinfo, 1);
  862. if (status < 0)
  863. goto out;
  864. handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
  865. if (IS_ERR(handle)) {
  866. status = PTR_ERR(handle);
  867. mlog_errno(status);
  868. goto out_ilock;
  869. }
  870. status = dquot_commit_info(sb, type);
  871. ocfs2_commit_trans(OCFS2_SB(sb), handle);
  872. out_ilock:
  873. ocfs2_unlock_global_qf(oinfo, 1);
  874. out:
  875. if (status)
  876. mlog_errno(status);
  877. return status;
  878. }
  879. static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
  880. {
  881. struct ocfs2_dquot *dquot =
  882. kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
  883. if (!dquot)
  884. return NULL;
  885. return &dquot->dq_dquot;
  886. }
  887. static void ocfs2_destroy_dquot(struct dquot *dquot)
  888. {
  889. kmem_cache_free(ocfs2_dquot_cachep, dquot);
  890. }
  891. const struct dquot_operations ocfs2_quota_operations = {
  892. /* We never make dquot dirty so .write_dquot is never called */
  893. .acquire_dquot = ocfs2_acquire_dquot,
  894. .release_dquot = ocfs2_release_dquot,
  895. .mark_dirty = ocfs2_mark_dquot_dirty,
  896. .write_info = ocfs2_write_info,
  897. .alloc_dquot = ocfs2_alloc_dquot,
  898. .destroy_dquot = ocfs2_destroy_dquot,
  899. };