sufile.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * sufile.c - NILFS segment usage file.
  4. *
  5. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  6. *
  7. * Written by Koji Sato.
  8. * Revised by Ryusuke Konishi.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/fs.h>
  12. #include <linux/string.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/errno.h>
  15. #include "mdt.h"
  16. #include "sufile.h"
  17. #include <trace/events/nilfs2.h>
  18. /**
  19. * struct nilfs_sufile_info - on-memory private data of sufile
  20. * @mi: on-memory private data of metadata file
  21. * @ncleansegs: number of clean segments
  22. * @allocmin: lower limit of allocatable segment range
  23. * @allocmax: upper limit of allocatable segment range
  24. */
  25. struct nilfs_sufile_info {
  26. struct nilfs_mdt_info mi;
  27. unsigned long ncleansegs;/* number of clean segments */
  28. __u64 allocmin; /* lower limit of allocatable segment range */
  29. __u64 allocmax; /* upper limit of allocatable segment range */
  30. };
  31. static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
  32. {
  33. return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
  34. }
  35. static inline unsigned long
  36. nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
  37. {
  38. return NILFS_MDT(sufile)->mi_entries_per_block;
  39. }
  40. static unsigned long
  41. nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
  42. {
  43. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  44. do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  45. return (unsigned long)t;
  46. }
  47. static unsigned long
  48. nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
  49. {
  50. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  51. return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  52. }
  53. static unsigned long
  54. nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
  55. __u64 max)
  56. {
  57. return min_t(unsigned long,
  58. nilfs_sufile_segment_usages_per_block(sufile) -
  59. nilfs_sufile_get_offset(sufile, curr),
  60. max - curr + 1);
  61. }
  62. static struct nilfs_segment_usage *
  63. nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
  64. struct buffer_head *bh, void *kaddr)
  65. {
  66. return kaddr + bh_offset(bh) +
  67. nilfs_sufile_get_offset(sufile, segnum) *
  68. NILFS_MDT(sufile)->mi_entry_size;
  69. }
  70. static inline int nilfs_sufile_get_header_block(struct inode *sufile,
  71. struct buffer_head **bhp)
  72. {
  73. return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
  74. }
  75. static inline int
  76. nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
  77. int create, struct buffer_head **bhp)
  78. {
  79. return nilfs_mdt_get_block(sufile,
  80. nilfs_sufile_get_blkoff(sufile, segnum),
  81. create, NULL, bhp);
  82. }
  83. static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
  84. __u64 segnum)
  85. {
  86. return nilfs_mdt_delete_block(sufile,
  87. nilfs_sufile_get_blkoff(sufile, segnum));
  88. }
  89. static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
  90. u64 ncleanadd, u64 ndirtyadd)
  91. {
  92. struct nilfs_sufile_header *header;
  93. void *kaddr;
  94. kaddr = kmap_atomic(header_bh->b_page);
  95. header = kaddr + bh_offset(header_bh);
  96. le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
  97. le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
  98. kunmap_atomic(kaddr);
  99. mark_buffer_dirty(header_bh);
  100. }
  101. /**
  102. * nilfs_sufile_get_ncleansegs - return the number of clean segments
  103. * @sufile: inode of segment usage file
  104. */
  105. unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
  106. {
  107. return NILFS_SUI(sufile)->ncleansegs;
  108. }
  109. /**
  110. * nilfs_sufile_updatev - modify multiple segment usages at a time
  111. * @sufile: inode of segment usage file
  112. * @segnumv: array of segment numbers
  113. * @nsegs: size of @segnumv array
  114. * @create: creation flag
  115. * @ndone: place to store number of modified segments on @segnumv
  116. * @dofunc: primitive operation for the update
  117. *
  118. * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
  119. * against the given array of segments. The @dofunc is called with
  120. * buffers of a header block and the sufile block in which the target
  121. * segment usage entry is contained. If @ndone is given, the number
  122. * of successfully modified segments from the head is stored in the
  123. * place @ndone points to.
  124. *
  125. * Return Value: On success, zero is returned. On error, one of the
  126. * following negative error codes is returned.
  127. *
  128. * %-EIO - I/O error.
  129. *
  130. * %-ENOMEM - Insufficient amount of memory available.
  131. *
  132. * %-ENOENT - Given segment usage is in hole block (may be returned if
  133. * @create is zero)
  134. *
  135. * %-EINVAL - Invalid segment usage number
  136. */
  137. int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
  138. int create, size_t *ndone,
  139. void (*dofunc)(struct inode *, __u64,
  140. struct buffer_head *,
  141. struct buffer_head *))
  142. {
  143. struct buffer_head *header_bh, *bh;
  144. unsigned long blkoff, prev_blkoff;
  145. __u64 *seg;
  146. size_t nerr = 0, n = 0;
  147. int ret = 0;
  148. if (unlikely(nsegs == 0))
  149. goto out;
  150. down_write(&NILFS_MDT(sufile)->mi_sem);
  151. for (seg = segnumv; seg < segnumv + nsegs; seg++) {
  152. if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
  153. nilfs_msg(sufile->i_sb, KERN_WARNING,
  154. "%s: invalid segment number: %llu",
  155. __func__, (unsigned long long)*seg);
  156. nerr++;
  157. }
  158. }
  159. if (nerr > 0) {
  160. ret = -EINVAL;
  161. goto out_sem;
  162. }
  163. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  164. if (ret < 0)
  165. goto out_sem;
  166. seg = segnumv;
  167. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  168. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  169. if (ret < 0)
  170. goto out_header;
  171. for (;;) {
  172. dofunc(sufile, *seg, header_bh, bh);
  173. if (++seg >= segnumv + nsegs)
  174. break;
  175. prev_blkoff = blkoff;
  176. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  177. if (blkoff == prev_blkoff)
  178. continue;
  179. /* get different block */
  180. brelse(bh);
  181. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  182. if (unlikely(ret < 0))
  183. goto out_header;
  184. }
  185. brelse(bh);
  186. out_header:
  187. n = seg - segnumv;
  188. brelse(header_bh);
  189. out_sem:
  190. up_write(&NILFS_MDT(sufile)->mi_sem);
  191. out:
  192. if (ndone)
  193. *ndone = n;
  194. return ret;
  195. }
  196. int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
  197. void (*dofunc)(struct inode *, __u64,
  198. struct buffer_head *,
  199. struct buffer_head *))
  200. {
  201. struct buffer_head *header_bh, *bh;
  202. int ret;
  203. if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
  204. nilfs_msg(sufile->i_sb, KERN_WARNING,
  205. "%s: invalid segment number: %llu",
  206. __func__, (unsigned long long)segnum);
  207. return -EINVAL;
  208. }
  209. down_write(&NILFS_MDT(sufile)->mi_sem);
  210. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  211. if (ret < 0)
  212. goto out_sem;
  213. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
  214. if (!ret) {
  215. dofunc(sufile, segnum, header_bh, bh);
  216. brelse(bh);
  217. }
  218. brelse(header_bh);
  219. out_sem:
  220. up_write(&NILFS_MDT(sufile)->mi_sem);
  221. return ret;
  222. }
  223. /**
  224. * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
  225. * @sufile: inode of segment usage file
  226. * @start: minimum segment number of allocatable region (inclusive)
  227. * @end: maximum segment number of allocatable region (inclusive)
  228. *
  229. * Return Value: On success, 0 is returned. On error, one of the
  230. * following negative error codes is returned.
  231. *
  232. * %-ERANGE - invalid segment region
  233. */
  234. int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
  235. {
  236. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  237. __u64 nsegs;
  238. int ret = -ERANGE;
  239. down_write(&NILFS_MDT(sufile)->mi_sem);
  240. nsegs = nilfs_sufile_get_nsegments(sufile);
  241. if (start <= end && end < nsegs) {
  242. sui->allocmin = start;
  243. sui->allocmax = end;
  244. ret = 0;
  245. }
  246. up_write(&NILFS_MDT(sufile)->mi_sem);
  247. return ret;
  248. }
  249. /**
  250. * nilfs_sufile_alloc - allocate a segment
  251. * @sufile: inode of segment usage file
  252. * @segnump: pointer to segment number
  253. *
  254. * Description: nilfs_sufile_alloc() allocates a clean segment.
  255. *
  256. * Return Value: On success, 0 is returned and the segment number of the
  257. * allocated segment is stored in the place pointed by @segnump. On error, one
  258. * of the following negative error codes is returned.
  259. *
  260. * %-EIO - I/O error.
  261. *
  262. * %-ENOMEM - Insufficient amount of memory available.
  263. *
  264. * %-ENOSPC - No clean segment left.
  265. */
  266. int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
  267. {
  268. struct buffer_head *header_bh, *su_bh;
  269. struct nilfs_sufile_header *header;
  270. struct nilfs_segment_usage *su;
  271. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  272. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  273. __u64 segnum, maxsegnum, last_alloc;
  274. void *kaddr;
  275. unsigned long nsegments, nsus, cnt;
  276. int ret, j;
  277. down_write(&NILFS_MDT(sufile)->mi_sem);
  278. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  279. if (ret < 0)
  280. goto out_sem;
  281. kaddr = kmap_atomic(header_bh->b_page);
  282. header = kaddr + bh_offset(header_bh);
  283. last_alloc = le64_to_cpu(header->sh_last_alloc);
  284. kunmap_atomic(kaddr);
  285. nsegments = nilfs_sufile_get_nsegments(sufile);
  286. maxsegnum = sui->allocmax;
  287. segnum = last_alloc + 1;
  288. if (segnum < sui->allocmin || segnum > sui->allocmax)
  289. segnum = sui->allocmin;
  290. for (cnt = 0; cnt < nsegments; cnt += nsus) {
  291. if (segnum > maxsegnum) {
  292. if (cnt < sui->allocmax - sui->allocmin + 1) {
  293. /*
  294. * wrap around in the limited region.
  295. * if allocation started from
  296. * sui->allocmin, this never happens.
  297. */
  298. segnum = sui->allocmin;
  299. maxsegnum = last_alloc;
  300. } else if (segnum > sui->allocmin &&
  301. sui->allocmax + 1 < nsegments) {
  302. segnum = sui->allocmax + 1;
  303. maxsegnum = nsegments - 1;
  304. } else if (sui->allocmin > 0) {
  305. segnum = 0;
  306. maxsegnum = sui->allocmin - 1;
  307. } else {
  308. break; /* never happens */
  309. }
  310. }
  311. trace_nilfs2_segment_usage_check(sufile, segnum, cnt);
  312. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
  313. &su_bh);
  314. if (ret < 0)
  315. goto out_header;
  316. kaddr = kmap_atomic(su_bh->b_page);
  317. su = nilfs_sufile_block_get_segment_usage(
  318. sufile, segnum, su_bh, kaddr);
  319. nsus = nilfs_sufile_segment_usages_in_block(
  320. sufile, segnum, maxsegnum);
  321. for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
  322. if (!nilfs_segment_usage_clean(su))
  323. continue;
  324. /* found a clean segment */
  325. nilfs_segment_usage_set_dirty(su);
  326. kunmap_atomic(kaddr);
  327. kaddr = kmap_atomic(header_bh->b_page);
  328. header = kaddr + bh_offset(header_bh);
  329. le64_add_cpu(&header->sh_ncleansegs, -1);
  330. le64_add_cpu(&header->sh_ndirtysegs, 1);
  331. header->sh_last_alloc = cpu_to_le64(segnum);
  332. kunmap_atomic(kaddr);
  333. sui->ncleansegs--;
  334. mark_buffer_dirty(header_bh);
  335. mark_buffer_dirty(su_bh);
  336. nilfs_mdt_mark_dirty(sufile);
  337. brelse(su_bh);
  338. *segnump = segnum;
  339. trace_nilfs2_segment_usage_allocated(sufile, segnum);
  340. goto out_header;
  341. }
  342. kunmap_atomic(kaddr);
  343. brelse(su_bh);
  344. }
  345. /* no segments left */
  346. ret = -ENOSPC;
  347. out_header:
  348. brelse(header_bh);
  349. out_sem:
  350. up_write(&NILFS_MDT(sufile)->mi_sem);
  351. return ret;
  352. }
  353. void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
  354. struct buffer_head *header_bh,
  355. struct buffer_head *su_bh)
  356. {
  357. struct nilfs_segment_usage *su;
  358. void *kaddr;
  359. kaddr = kmap_atomic(su_bh->b_page);
  360. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  361. if (unlikely(!nilfs_segment_usage_clean(su))) {
  362. nilfs_msg(sufile->i_sb, KERN_WARNING,
  363. "%s: segment %llu must be clean", __func__,
  364. (unsigned long long)segnum);
  365. kunmap_atomic(kaddr);
  366. return;
  367. }
  368. nilfs_segment_usage_set_dirty(su);
  369. kunmap_atomic(kaddr);
  370. nilfs_sufile_mod_counter(header_bh, -1, 1);
  371. NILFS_SUI(sufile)->ncleansegs--;
  372. mark_buffer_dirty(su_bh);
  373. nilfs_mdt_mark_dirty(sufile);
  374. }
  375. void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
  376. struct buffer_head *header_bh,
  377. struct buffer_head *su_bh)
  378. {
  379. struct nilfs_segment_usage *su;
  380. void *kaddr;
  381. int clean, dirty;
  382. kaddr = kmap_atomic(su_bh->b_page);
  383. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  384. if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
  385. su->su_nblocks == cpu_to_le32(0)) {
  386. kunmap_atomic(kaddr);
  387. return;
  388. }
  389. clean = nilfs_segment_usage_clean(su);
  390. dirty = nilfs_segment_usage_dirty(su);
  391. /* make the segment garbage */
  392. su->su_lastmod = cpu_to_le64(0);
  393. su->su_nblocks = cpu_to_le32(0);
  394. su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
  395. kunmap_atomic(kaddr);
  396. nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
  397. NILFS_SUI(sufile)->ncleansegs -= clean;
  398. mark_buffer_dirty(su_bh);
  399. nilfs_mdt_mark_dirty(sufile);
  400. }
  401. void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
  402. struct buffer_head *header_bh,
  403. struct buffer_head *su_bh)
  404. {
  405. struct nilfs_segment_usage *su;
  406. void *kaddr;
  407. int sudirty;
  408. kaddr = kmap_atomic(su_bh->b_page);
  409. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  410. if (nilfs_segment_usage_clean(su)) {
  411. nilfs_msg(sufile->i_sb, KERN_WARNING,
  412. "%s: segment %llu is already clean",
  413. __func__, (unsigned long long)segnum);
  414. kunmap_atomic(kaddr);
  415. return;
  416. }
  417. WARN_ON(nilfs_segment_usage_error(su));
  418. WARN_ON(!nilfs_segment_usage_dirty(su));
  419. sudirty = nilfs_segment_usage_dirty(su);
  420. nilfs_segment_usage_set_clean(su);
  421. kunmap_atomic(kaddr);
  422. mark_buffer_dirty(su_bh);
  423. nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
  424. NILFS_SUI(sufile)->ncleansegs++;
  425. nilfs_mdt_mark_dirty(sufile);
  426. trace_nilfs2_segment_usage_freed(sufile, segnum);
  427. }
  428. /**
  429. * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
  430. * @sufile: inode of segment usage file
  431. * @segnum: segment number
  432. */
  433. int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
  434. {
  435. struct buffer_head *bh;
  436. int ret;
  437. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  438. if (!ret) {
  439. mark_buffer_dirty(bh);
  440. nilfs_mdt_mark_dirty(sufile);
  441. brelse(bh);
  442. }
  443. return ret;
  444. }
  445. /**
  446. * nilfs_sufile_set_segment_usage - set usage of a segment
  447. * @sufile: inode of segment usage file
  448. * @segnum: segment number
  449. * @nblocks: number of live blocks in the segment
  450. * @modtime: modification time (option)
  451. */
  452. int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
  453. unsigned long nblocks, time64_t modtime)
  454. {
  455. struct buffer_head *bh;
  456. struct nilfs_segment_usage *su;
  457. void *kaddr;
  458. int ret;
  459. down_write(&NILFS_MDT(sufile)->mi_sem);
  460. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  461. if (ret < 0)
  462. goto out_sem;
  463. kaddr = kmap_atomic(bh->b_page);
  464. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
  465. WARN_ON(nilfs_segment_usage_error(su));
  466. if (modtime)
  467. su->su_lastmod = cpu_to_le64(modtime);
  468. su->su_nblocks = cpu_to_le32(nblocks);
  469. kunmap_atomic(kaddr);
  470. mark_buffer_dirty(bh);
  471. nilfs_mdt_mark_dirty(sufile);
  472. brelse(bh);
  473. out_sem:
  474. up_write(&NILFS_MDT(sufile)->mi_sem);
  475. return ret;
  476. }
  477. /**
  478. * nilfs_sufile_get_stat - get segment usage statistics
  479. * @sufile: inode of segment usage file
  480. * @stat: pointer to a structure of segment usage statistics
  481. *
  482. * Description: nilfs_sufile_get_stat() returns information about segment
  483. * usage.
  484. *
  485. * Return Value: On success, 0 is returned, and segment usage information is
  486. * stored in the place pointed by @stat. On error, one of the following
  487. * negative error codes is returned.
  488. *
  489. * %-EIO - I/O error.
  490. *
  491. * %-ENOMEM - Insufficient amount of memory available.
  492. */
  493. int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
  494. {
  495. struct buffer_head *header_bh;
  496. struct nilfs_sufile_header *header;
  497. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  498. void *kaddr;
  499. int ret;
  500. down_read(&NILFS_MDT(sufile)->mi_sem);
  501. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  502. if (ret < 0)
  503. goto out_sem;
  504. kaddr = kmap_atomic(header_bh->b_page);
  505. header = kaddr + bh_offset(header_bh);
  506. sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
  507. sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  508. sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
  509. sustat->ss_ctime = nilfs->ns_ctime;
  510. sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
  511. spin_lock(&nilfs->ns_last_segment_lock);
  512. sustat->ss_prot_seq = nilfs->ns_prot_seq;
  513. spin_unlock(&nilfs->ns_last_segment_lock);
  514. kunmap_atomic(kaddr);
  515. brelse(header_bh);
  516. out_sem:
  517. up_read(&NILFS_MDT(sufile)->mi_sem);
  518. return ret;
  519. }
  520. void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
  521. struct buffer_head *header_bh,
  522. struct buffer_head *su_bh)
  523. {
  524. struct nilfs_segment_usage *su;
  525. void *kaddr;
  526. int suclean;
  527. kaddr = kmap_atomic(su_bh->b_page);
  528. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  529. if (nilfs_segment_usage_error(su)) {
  530. kunmap_atomic(kaddr);
  531. return;
  532. }
  533. suclean = nilfs_segment_usage_clean(su);
  534. nilfs_segment_usage_set_error(su);
  535. kunmap_atomic(kaddr);
  536. if (suclean) {
  537. nilfs_sufile_mod_counter(header_bh, -1, 0);
  538. NILFS_SUI(sufile)->ncleansegs--;
  539. }
  540. mark_buffer_dirty(su_bh);
  541. nilfs_mdt_mark_dirty(sufile);
  542. }
  543. /**
  544. * nilfs_sufile_truncate_range - truncate range of segment array
  545. * @sufile: inode of segment usage file
  546. * @start: start segment number (inclusive)
  547. * @end: end segment number (inclusive)
  548. *
  549. * Return Value: On success, 0 is returned. On error, one of the
  550. * following negative error codes is returned.
  551. *
  552. * %-EIO - I/O error.
  553. *
  554. * %-ENOMEM - Insufficient amount of memory available.
  555. *
  556. * %-EINVAL - Invalid number of segments specified
  557. *
  558. * %-EBUSY - Dirty or active segments are present in the range
  559. */
  560. static int nilfs_sufile_truncate_range(struct inode *sufile,
  561. __u64 start, __u64 end)
  562. {
  563. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  564. struct buffer_head *header_bh;
  565. struct buffer_head *su_bh;
  566. struct nilfs_segment_usage *su, *su2;
  567. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  568. unsigned long segusages_per_block;
  569. unsigned long nsegs, ncleaned;
  570. __u64 segnum;
  571. void *kaddr;
  572. ssize_t n, nc;
  573. int ret;
  574. int j;
  575. nsegs = nilfs_sufile_get_nsegments(sufile);
  576. ret = -EINVAL;
  577. if (start > end || start >= nsegs)
  578. goto out;
  579. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  580. if (ret < 0)
  581. goto out;
  582. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  583. ncleaned = 0;
  584. for (segnum = start; segnum <= end; segnum += n) {
  585. n = min_t(unsigned long,
  586. segusages_per_block -
  587. nilfs_sufile_get_offset(sufile, segnum),
  588. end - segnum + 1);
  589. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  590. &su_bh);
  591. if (ret < 0) {
  592. if (ret != -ENOENT)
  593. goto out_header;
  594. /* hole */
  595. continue;
  596. }
  597. kaddr = kmap_atomic(su_bh->b_page);
  598. su = nilfs_sufile_block_get_segment_usage(
  599. sufile, segnum, su_bh, kaddr);
  600. su2 = su;
  601. for (j = 0; j < n; j++, su = (void *)su + susz) {
  602. if ((le32_to_cpu(su->su_flags) &
  603. ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
  604. nilfs_segment_is_active(nilfs, segnum + j)) {
  605. ret = -EBUSY;
  606. kunmap_atomic(kaddr);
  607. brelse(su_bh);
  608. goto out_header;
  609. }
  610. }
  611. nc = 0;
  612. for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
  613. if (nilfs_segment_usage_error(su)) {
  614. nilfs_segment_usage_set_clean(su);
  615. nc++;
  616. }
  617. }
  618. kunmap_atomic(kaddr);
  619. if (nc > 0) {
  620. mark_buffer_dirty(su_bh);
  621. ncleaned += nc;
  622. }
  623. brelse(su_bh);
  624. if (n == segusages_per_block) {
  625. /* make hole */
  626. nilfs_sufile_delete_segment_usage_block(sufile, segnum);
  627. }
  628. }
  629. ret = 0;
  630. out_header:
  631. if (ncleaned > 0) {
  632. NILFS_SUI(sufile)->ncleansegs += ncleaned;
  633. nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
  634. nilfs_mdt_mark_dirty(sufile);
  635. }
  636. brelse(header_bh);
  637. out:
  638. return ret;
  639. }
  640. /**
  641. * nilfs_sufile_resize - resize segment array
  642. * @sufile: inode of segment usage file
  643. * @newnsegs: new number of segments
  644. *
  645. * Return Value: On success, 0 is returned. On error, one of the
  646. * following negative error codes is returned.
  647. *
  648. * %-EIO - I/O error.
  649. *
  650. * %-ENOMEM - Insufficient amount of memory available.
  651. *
  652. * %-ENOSPC - Enough free space is not left for shrinking
  653. *
  654. * %-EBUSY - Dirty or active segments exist in the region to be truncated
  655. */
  656. int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
  657. {
  658. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  659. struct buffer_head *header_bh;
  660. struct nilfs_sufile_header *header;
  661. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  662. void *kaddr;
  663. unsigned long nsegs, nrsvsegs;
  664. int ret = 0;
  665. down_write(&NILFS_MDT(sufile)->mi_sem);
  666. nsegs = nilfs_sufile_get_nsegments(sufile);
  667. if (nsegs == newnsegs)
  668. goto out;
  669. ret = -ENOSPC;
  670. nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
  671. if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
  672. goto out;
  673. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  674. if (ret < 0)
  675. goto out;
  676. if (newnsegs > nsegs) {
  677. sui->ncleansegs += newnsegs - nsegs;
  678. } else /* newnsegs < nsegs */ {
  679. ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
  680. if (ret < 0)
  681. goto out_header;
  682. sui->ncleansegs -= nsegs - newnsegs;
  683. }
  684. kaddr = kmap_atomic(header_bh->b_page);
  685. header = kaddr + bh_offset(header_bh);
  686. header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
  687. kunmap_atomic(kaddr);
  688. mark_buffer_dirty(header_bh);
  689. nilfs_mdt_mark_dirty(sufile);
  690. nilfs_set_nsegments(nilfs, newnsegs);
  691. out_header:
  692. brelse(header_bh);
  693. out:
  694. up_write(&NILFS_MDT(sufile)->mi_sem);
  695. return ret;
  696. }
  697. /**
  698. * nilfs_sufile_get_suinfo -
  699. * @sufile: inode of segment usage file
  700. * @segnum: segment number to start looking
  701. * @buf: array of suinfo
  702. * @sisz: byte size of suinfo
  703. * @nsi: size of suinfo array
  704. *
  705. * Description:
  706. *
  707. * Return Value: On success, 0 is returned and .... On error, one of the
  708. * following negative error codes is returned.
  709. *
  710. * %-EIO - I/O error.
  711. *
  712. * %-ENOMEM - Insufficient amount of memory available.
  713. */
  714. ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
  715. unsigned int sisz, size_t nsi)
  716. {
  717. struct buffer_head *su_bh;
  718. struct nilfs_segment_usage *su;
  719. struct nilfs_suinfo *si = buf;
  720. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  721. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  722. void *kaddr;
  723. unsigned long nsegs, segusages_per_block;
  724. ssize_t n;
  725. int ret, i, j;
  726. down_read(&NILFS_MDT(sufile)->mi_sem);
  727. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  728. nsegs = min_t(unsigned long,
  729. nilfs_sufile_get_nsegments(sufile) - segnum,
  730. nsi);
  731. for (i = 0; i < nsegs; i += n, segnum += n) {
  732. n = min_t(unsigned long,
  733. segusages_per_block -
  734. nilfs_sufile_get_offset(sufile, segnum),
  735. nsegs - i);
  736. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  737. &su_bh);
  738. if (ret < 0) {
  739. if (ret != -ENOENT)
  740. goto out;
  741. /* hole */
  742. memset(si, 0, sisz * n);
  743. si = (void *)si + sisz * n;
  744. continue;
  745. }
  746. kaddr = kmap_atomic(su_bh->b_page);
  747. su = nilfs_sufile_block_get_segment_usage(
  748. sufile, segnum, su_bh, kaddr);
  749. for (j = 0; j < n;
  750. j++, su = (void *)su + susz, si = (void *)si + sisz) {
  751. si->sui_lastmod = le64_to_cpu(su->su_lastmod);
  752. si->sui_nblocks = le32_to_cpu(su->su_nblocks);
  753. si->sui_flags = le32_to_cpu(su->su_flags) &
  754. ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
  755. if (nilfs_segment_is_active(nilfs, segnum + j))
  756. si->sui_flags |=
  757. BIT(NILFS_SEGMENT_USAGE_ACTIVE);
  758. }
  759. kunmap_atomic(kaddr);
  760. brelse(su_bh);
  761. }
  762. ret = nsegs;
  763. out:
  764. up_read(&NILFS_MDT(sufile)->mi_sem);
  765. return ret;
  766. }
  767. /**
  768. * nilfs_sufile_set_suinfo - sets segment usage info
  769. * @sufile: inode of segment usage file
  770. * @buf: array of suinfo_update
  771. * @supsz: byte size of suinfo_update
  772. * @nsup: size of suinfo_update array
  773. *
  774. * Description: Takes an array of nilfs_suinfo_update structs and updates
  775. * segment usage accordingly. Only the fields indicated by the sup_flags
  776. * are updated.
  777. *
  778. * Return Value: On success, 0 is returned. On error, one of the
  779. * following negative error codes is returned.
  780. *
  781. * %-EIO - I/O error.
  782. *
  783. * %-ENOMEM - Insufficient amount of memory available.
  784. *
  785. * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
  786. */
  787. ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
  788. unsigned int supsz, size_t nsup)
  789. {
  790. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  791. struct buffer_head *header_bh, *bh;
  792. struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
  793. struct nilfs_segment_usage *su;
  794. void *kaddr;
  795. unsigned long blkoff, prev_blkoff;
  796. int cleansi, cleansu, dirtysi, dirtysu;
  797. long ncleaned = 0, ndirtied = 0;
  798. int ret = 0;
  799. if (unlikely(nsup == 0))
  800. return ret;
  801. for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
  802. if (sup->sup_segnum >= nilfs->ns_nsegments
  803. || (sup->sup_flags &
  804. (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
  805. || (nilfs_suinfo_update_nblocks(sup) &&
  806. sup->sup_sui.sui_nblocks >
  807. nilfs->ns_blocks_per_segment))
  808. return -EINVAL;
  809. }
  810. down_write(&NILFS_MDT(sufile)->mi_sem);
  811. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  812. if (ret < 0)
  813. goto out_sem;
  814. sup = buf;
  815. blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
  816. ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
  817. if (ret < 0)
  818. goto out_header;
  819. for (;;) {
  820. kaddr = kmap_atomic(bh->b_page);
  821. su = nilfs_sufile_block_get_segment_usage(
  822. sufile, sup->sup_segnum, bh, kaddr);
  823. if (nilfs_suinfo_update_lastmod(sup))
  824. su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
  825. if (nilfs_suinfo_update_nblocks(sup))
  826. su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
  827. if (nilfs_suinfo_update_flags(sup)) {
  828. /*
  829. * Active flag is a virtual flag projected by running
  830. * nilfs kernel code - drop it not to write it to
  831. * disk.
  832. */
  833. sup->sup_sui.sui_flags &=
  834. ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
  835. cleansi = nilfs_suinfo_clean(&sup->sup_sui);
  836. cleansu = nilfs_segment_usage_clean(su);
  837. dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
  838. dirtysu = nilfs_segment_usage_dirty(su);
  839. if (cleansi && !cleansu)
  840. ++ncleaned;
  841. else if (!cleansi && cleansu)
  842. --ncleaned;
  843. if (dirtysi && !dirtysu)
  844. ++ndirtied;
  845. else if (!dirtysi && dirtysu)
  846. --ndirtied;
  847. su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
  848. }
  849. kunmap_atomic(kaddr);
  850. sup = (void *)sup + supsz;
  851. if (sup >= supend)
  852. break;
  853. prev_blkoff = blkoff;
  854. blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
  855. if (blkoff == prev_blkoff)
  856. continue;
  857. /* get different block */
  858. mark_buffer_dirty(bh);
  859. put_bh(bh);
  860. ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
  861. if (unlikely(ret < 0))
  862. goto out_mark;
  863. }
  864. mark_buffer_dirty(bh);
  865. put_bh(bh);
  866. out_mark:
  867. if (ncleaned || ndirtied) {
  868. nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
  869. (u64)ndirtied);
  870. NILFS_SUI(sufile)->ncleansegs += ncleaned;
  871. }
  872. nilfs_mdt_mark_dirty(sufile);
  873. out_header:
  874. put_bh(header_bh);
  875. out_sem:
  876. up_write(&NILFS_MDT(sufile)->mi_sem);
  877. return ret;
  878. }
  879. /**
  880. * nilfs_sufile_trim_fs() - trim ioctl handle function
  881. * @sufile: inode of segment usage file
  882. * @range: fstrim_range structure
  883. *
  884. * start: First Byte to trim
  885. * len: number of Bytes to trim from start
  886. * minlen: minimum extent length in Bytes
  887. *
  888. * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes
  889. * from start to start+len. start is rounded up to the next block boundary
  890. * and start+len is rounded down. For each clean segment blkdev_issue_discard
  891. * function is invoked.
  892. *
  893. * Return Value: On success, 0 is returned or negative error code, otherwise.
  894. */
  895. int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
  896. {
  897. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  898. struct buffer_head *su_bh;
  899. struct nilfs_segment_usage *su;
  900. void *kaddr;
  901. size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
  902. sector_t seg_start, seg_end, start_block, end_block;
  903. sector_t start = 0, nblocks = 0;
  904. u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
  905. int ret = 0;
  906. unsigned int sects_per_block;
  907. sects_per_block = (1 << nilfs->ns_blocksize_bits) /
  908. bdev_logical_block_size(nilfs->ns_bdev);
  909. len = range->len >> nilfs->ns_blocksize_bits;
  910. minlen = range->minlen >> nilfs->ns_blocksize_bits;
  911. max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
  912. if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
  913. return -EINVAL;
  914. start_block = (range->start + nilfs->ns_blocksize - 1) >>
  915. nilfs->ns_blocksize_bits;
  916. /*
  917. * range->len can be very large (actually, it is set to
  918. * ULLONG_MAX by default) - truncate upper end of the range
  919. * carefully so as not to overflow.
  920. */
  921. if (max_blocks - start_block < len)
  922. end_block = max_blocks - 1;
  923. else
  924. end_block = start_block + len - 1;
  925. segnum = nilfs_get_segnum_of_block(nilfs, start_block);
  926. segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
  927. down_read(&NILFS_MDT(sufile)->mi_sem);
  928. while (segnum <= segnum_end) {
  929. n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
  930. segnum_end);
  931. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  932. &su_bh);
  933. if (ret < 0) {
  934. if (ret != -ENOENT)
  935. goto out_sem;
  936. /* hole */
  937. segnum += n;
  938. continue;
  939. }
  940. kaddr = kmap_atomic(su_bh->b_page);
  941. su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
  942. su_bh, kaddr);
  943. for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
  944. if (!nilfs_segment_usage_clean(su))
  945. continue;
  946. nilfs_get_segment_range(nilfs, segnum, &seg_start,
  947. &seg_end);
  948. if (!nblocks) {
  949. /* start new extent */
  950. start = seg_start;
  951. nblocks = seg_end - seg_start + 1;
  952. continue;
  953. }
  954. if (start + nblocks == seg_start) {
  955. /* add to previous extent */
  956. nblocks += seg_end - seg_start + 1;
  957. continue;
  958. }
  959. /* discard previous extent */
  960. if (start < start_block) {
  961. nblocks -= start_block - start;
  962. start = start_block;
  963. }
  964. if (nblocks >= minlen) {
  965. kunmap_atomic(kaddr);
  966. ret = blkdev_issue_discard(nilfs->ns_bdev,
  967. start * sects_per_block,
  968. nblocks * sects_per_block,
  969. GFP_NOFS, 0);
  970. if (ret < 0) {
  971. put_bh(su_bh);
  972. goto out_sem;
  973. }
  974. ndiscarded += nblocks;
  975. kaddr = kmap_atomic(su_bh->b_page);
  976. su = nilfs_sufile_block_get_segment_usage(
  977. sufile, segnum, su_bh, kaddr);
  978. }
  979. /* start new extent */
  980. start = seg_start;
  981. nblocks = seg_end - seg_start + 1;
  982. }
  983. kunmap_atomic(kaddr);
  984. put_bh(su_bh);
  985. }
  986. if (nblocks) {
  987. /* discard last extent */
  988. if (start < start_block) {
  989. nblocks -= start_block - start;
  990. start = start_block;
  991. }
  992. if (start + nblocks > end_block + 1)
  993. nblocks = end_block - start + 1;
  994. if (nblocks >= minlen) {
  995. ret = blkdev_issue_discard(nilfs->ns_bdev,
  996. start * sects_per_block,
  997. nblocks * sects_per_block,
  998. GFP_NOFS, 0);
  999. if (!ret)
  1000. ndiscarded += nblocks;
  1001. }
  1002. }
  1003. out_sem:
  1004. up_read(&NILFS_MDT(sufile)->mi_sem);
  1005. range->len = ndiscarded << nilfs->ns_blocksize_bits;
  1006. return ret;
  1007. }
  1008. /**
  1009. * nilfs_sufile_read - read or get sufile inode
  1010. * @sb: super block instance
  1011. * @susize: size of a segment usage entry
  1012. * @raw_inode: on-disk sufile inode
  1013. * @inodep: buffer to store the inode
  1014. */
  1015. int nilfs_sufile_read(struct super_block *sb, size_t susize,
  1016. struct nilfs_inode *raw_inode, struct inode **inodep)
  1017. {
  1018. struct inode *sufile;
  1019. struct nilfs_sufile_info *sui;
  1020. struct buffer_head *header_bh;
  1021. struct nilfs_sufile_header *header;
  1022. void *kaddr;
  1023. int err;
  1024. if (susize > sb->s_blocksize) {
  1025. nilfs_msg(sb, KERN_ERR,
  1026. "too large segment usage size: %zu bytes", susize);
  1027. return -EINVAL;
  1028. } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
  1029. nilfs_msg(sb, KERN_ERR,
  1030. "too small segment usage size: %zu bytes", susize);
  1031. return -EINVAL;
  1032. }
  1033. sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
  1034. if (unlikely(!sufile))
  1035. return -ENOMEM;
  1036. if (!(sufile->i_state & I_NEW))
  1037. goto out;
  1038. err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
  1039. if (err)
  1040. goto failed;
  1041. nilfs_mdt_set_entry_size(sufile, susize,
  1042. sizeof(struct nilfs_sufile_header));
  1043. err = nilfs_read_inode_common(sufile, raw_inode);
  1044. if (err)
  1045. goto failed;
  1046. err = nilfs_sufile_get_header_block(sufile, &header_bh);
  1047. if (err)
  1048. goto failed;
  1049. sui = NILFS_SUI(sufile);
  1050. kaddr = kmap_atomic(header_bh->b_page);
  1051. header = kaddr + bh_offset(header_bh);
  1052. sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  1053. kunmap_atomic(kaddr);
  1054. brelse(header_bh);
  1055. sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
  1056. sui->allocmin = 0;
  1057. unlock_new_inode(sufile);
  1058. out:
  1059. *inodep = sufile;
  1060. return 0;
  1061. failed:
  1062. iget_failed(sufile);
  1063. return err;
  1064. }