super.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010
  1. /*
  2. * Copyright (C) International Business Machines Corp., 2000-2004
  3. * Portions Copyright (C) Christoph Hellwig, 2001-2002
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  13. * the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/module.h>
  21. #include <linux/parser.h>
  22. #include <linux/completion.h>
  23. #include <linux/vfs.h>
  24. #include <linux/quotaops.h>
  25. #include <linux/mount.h>
  26. #include <linux/moduleparam.h>
  27. #include <linux/kthread.h>
  28. #include <linux/posix_acl.h>
  29. #include <linux/buffer_head.h>
  30. #include <linux/exportfs.h>
  31. #include <linux/crc32.h>
  32. #include <linux/slab.h>
  33. #include <asm/uaccess.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/blkdev.h>
  36. #include "jfs_incore.h"
  37. #include "jfs_filsys.h"
  38. #include "jfs_inode.h"
  39. #include "jfs_metapage.h"
  40. #include "jfs_superblock.h"
  41. #include "jfs_dmap.h"
  42. #include "jfs_imap.h"
  43. #include "jfs_acl.h"
  44. #include "jfs_debug.h"
  45. #include "jfs_xattr.h"
  46. MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
  47. MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
  48. MODULE_LICENSE("GPL");
  49. static struct kmem_cache *jfs_inode_cachep;
  50. static const struct super_operations jfs_super_operations;
  51. static const struct export_operations jfs_export_operations;
  52. static struct file_system_type jfs_fs_type;
  53. #define MAX_COMMIT_THREADS 64
  54. static int commit_threads;
  55. module_param(commit_threads, int, 0);
  56. MODULE_PARM_DESC(commit_threads, "Number of commit threads");
  57. static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS];
  58. struct task_struct *jfsIOthread;
  59. struct task_struct *jfsSyncThread;
  60. #ifdef CONFIG_JFS_DEBUG
  61. int jfsloglevel = JFS_LOGLEVEL_WARN;
  62. module_param(jfsloglevel, int, 0644);
  63. MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)");
  64. #endif
  65. static void jfs_handle_error(struct super_block *sb)
  66. {
  67. struct jfs_sb_info *sbi = JFS_SBI(sb);
  68. if (sb->s_flags & MS_RDONLY)
  69. return;
  70. updateSuper(sb, FM_DIRTY);
  71. if (sbi->flag & JFS_ERR_PANIC)
  72. panic("JFS (device %s): panic forced after error\n",
  73. sb->s_id);
  74. else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
  75. jfs_err("ERROR: (device %s): remounting filesystem as read-only",
  76. sb->s_id);
  77. sb->s_flags |= MS_RDONLY;
  78. }
  79. /* nothing is done for continue beyond marking the superblock dirty */
  80. }
  81. void jfs_error(struct super_block *sb, const char *fmt, ...)
  82. {
  83. struct va_format vaf;
  84. va_list args;
  85. va_start(args, fmt);
  86. vaf.fmt = fmt;
  87. vaf.va = &args;
  88. pr_err("ERROR: (device %s): %ps: %pV\n",
  89. sb->s_id, __builtin_return_address(0), &vaf);
  90. va_end(args);
  91. jfs_handle_error(sb);
  92. }
  93. static struct inode *jfs_alloc_inode(struct super_block *sb)
  94. {
  95. struct jfs_inode_info *jfs_inode;
  96. jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
  97. if (!jfs_inode)
  98. return NULL;
  99. #ifdef CONFIG_QUOTA
  100. memset(&jfs_inode->i_dquot, 0, sizeof(jfs_inode->i_dquot));
  101. #endif
  102. return &jfs_inode->vfs_inode;
  103. }
  104. static void jfs_i_callback(struct rcu_head *head)
  105. {
  106. struct inode *inode = container_of(head, struct inode, i_rcu);
  107. struct jfs_inode_info *ji = JFS_IP(inode);
  108. kmem_cache_free(jfs_inode_cachep, ji);
  109. }
  110. static void jfs_destroy_inode(struct inode *inode)
  111. {
  112. struct jfs_inode_info *ji = JFS_IP(inode);
  113. BUG_ON(!list_empty(&ji->anon_inode_list));
  114. spin_lock_irq(&ji->ag_lock);
  115. if (ji->active_ag != -1) {
  116. struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
  117. atomic_dec(&bmap->db_active[ji->active_ag]);
  118. ji->active_ag = -1;
  119. }
  120. spin_unlock_irq(&ji->ag_lock);
  121. call_rcu(&inode->i_rcu, jfs_i_callback);
  122. }
  123. static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  124. {
  125. struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
  126. s64 maxinodes;
  127. struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
  128. jfs_info("In jfs_statfs");
  129. buf->f_type = JFS_SUPER_MAGIC;
  130. buf->f_bsize = sbi->bsize;
  131. buf->f_blocks = sbi->bmap->db_mapsize;
  132. buf->f_bfree = sbi->bmap->db_nfree;
  133. buf->f_bavail = sbi->bmap->db_nfree;
  134. /*
  135. * If we really return the number of allocated & free inodes, some
  136. * applications will fail because they won't see enough free inodes.
  137. * We'll try to calculate some guess as to how many inodes we can
  138. * really allocate
  139. *
  140. * buf->f_files = atomic_read(&imap->im_numinos);
  141. * buf->f_ffree = atomic_read(&imap->im_numfree);
  142. */
  143. maxinodes = min((s64) atomic_read(&imap->im_numinos) +
  144. ((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
  145. << L2INOSPEREXT), (s64) 0xffffffffLL);
  146. buf->f_files = maxinodes;
  147. buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
  148. atomic_read(&imap->im_numfree));
  149. buf->f_fsid.val[0] = (u32)crc32_le(0, sbi->uuid, sizeof(sbi->uuid)/2);
  150. buf->f_fsid.val[1] = (u32)crc32_le(0, sbi->uuid + sizeof(sbi->uuid)/2,
  151. sizeof(sbi->uuid)/2);
  152. buf->f_namelen = JFS_NAME_MAX;
  153. return 0;
  154. }
  155. static void jfs_put_super(struct super_block *sb)
  156. {
  157. struct jfs_sb_info *sbi = JFS_SBI(sb);
  158. int rc;
  159. jfs_info("In jfs_put_super");
  160. dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
  161. rc = jfs_umount(sb);
  162. if (rc)
  163. jfs_err("jfs_umount failed with return code %d", rc);
  164. unload_nls(sbi->nls_tab);
  165. truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
  166. iput(sbi->direct_inode);
  167. kfree(sbi);
  168. }
  169. enum {
  170. Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
  171. Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
  172. Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
  173. Opt_discard, Opt_nodiscard, Opt_discard_minblk
  174. };
  175. static const match_table_t tokens = {
  176. {Opt_integrity, "integrity"},
  177. {Opt_nointegrity, "nointegrity"},
  178. {Opt_iocharset, "iocharset=%s"},
  179. {Opt_resize, "resize=%u"},
  180. {Opt_resize_nosize, "resize"},
  181. {Opt_errors, "errors=%s"},
  182. {Opt_ignore, "noquota"},
  183. {Opt_ignore, "quota"},
  184. {Opt_usrquota, "usrquota"},
  185. {Opt_grpquota, "grpquota"},
  186. {Opt_uid, "uid=%u"},
  187. {Opt_gid, "gid=%u"},
  188. {Opt_umask, "umask=%u"},
  189. {Opt_discard, "discard"},
  190. {Opt_nodiscard, "nodiscard"},
  191. {Opt_discard_minblk, "discard=%u"},
  192. {Opt_err, NULL}
  193. };
  194. static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
  195. int *flag)
  196. {
  197. void *nls_map = (void *)-1; /* -1: no change; NULL: none */
  198. char *p;
  199. struct jfs_sb_info *sbi = JFS_SBI(sb);
  200. *newLVSize = 0;
  201. if (!options)
  202. return 1;
  203. while ((p = strsep(&options, ",")) != NULL) {
  204. substring_t args[MAX_OPT_ARGS];
  205. int token;
  206. if (!*p)
  207. continue;
  208. token = match_token(p, tokens, args);
  209. switch (token) {
  210. case Opt_integrity:
  211. *flag &= ~JFS_NOINTEGRITY;
  212. break;
  213. case Opt_nointegrity:
  214. *flag |= JFS_NOINTEGRITY;
  215. break;
  216. case Opt_ignore:
  217. /* Silently ignore the quota options */
  218. /* Don't do anything ;-) */
  219. break;
  220. case Opt_iocharset:
  221. if (nls_map && nls_map != (void *) -1)
  222. unload_nls(nls_map);
  223. if (!strcmp(args[0].from, "none"))
  224. nls_map = NULL;
  225. else {
  226. nls_map = load_nls(args[0].from);
  227. if (!nls_map) {
  228. pr_err("JFS: charset not found\n");
  229. goto cleanup;
  230. }
  231. }
  232. break;
  233. case Opt_resize:
  234. {
  235. char *resize = args[0].from;
  236. int rc = kstrtoll(resize, 0, newLVSize);
  237. if (rc)
  238. goto cleanup;
  239. break;
  240. }
  241. case Opt_resize_nosize:
  242. {
  243. *newLVSize = sb->s_bdev->bd_inode->i_size >>
  244. sb->s_blocksize_bits;
  245. if (*newLVSize == 0)
  246. pr_err("JFS: Cannot determine volume size\n");
  247. break;
  248. }
  249. case Opt_errors:
  250. {
  251. char *errors = args[0].from;
  252. if (!errors || !*errors)
  253. goto cleanup;
  254. if (!strcmp(errors, "continue")) {
  255. *flag &= ~JFS_ERR_REMOUNT_RO;
  256. *flag &= ~JFS_ERR_PANIC;
  257. *flag |= JFS_ERR_CONTINUE;
  258. } else if (!strcmp(errors, "remount-ro")) {
  259. *flag &= ~JFS_ERR_CONTINUE;
  260. *flag &= ~JFS_ERR_PANIC;
  261. *flag |= JFS_ERR_REMOUNT_RO;
  262. } else if (!strcmp(errors, "panic")) {
  263. *flag &= ~JFS_ERR_CONTINUE;
  264. *flag &= ~JFS_ERR_REMOUNT_RO;
  265. *flag |= JFS_ERR_PANIC;
  266. } else {
  267. pr_err("JFS: %s is an invalid error handler\n",
  268. errors);
  269. goto cleanup;
  270. }
  271. break;
  272. }
  273. #ifdef CONFIG_QUOTA
  274. case Opt_quota:
  275. case Opt_usrquota:
  276. *flag |= JFS_USRQUOTA;
  277. break;
  278. case Opt_grpquota:
  279. *flag |= JFS_GRPQUOTA;
  280. break;
  281. #else
  282. case Opt_usrquota:
  283. case Opt_grpquota:
  284. case Opt_quota:
  285. pr_err("JFS: quota operations not supported\n");
  286. break;
  287. #endif
  288. case Opt_uid:
  289. {
  290. char *uid = args[0].from;
  291. uid_t val;
  292. int rc = kstrtouint(uid, 0, &val);
  293. if (rc)
  294. goto cleanup;
  295. sbi->uid = make_kuid(current_user_ns(), val);
  296. if (!uid_valid(sbi->uid))
  297. goto cleanup;
  298. break;
  299. }
  300. case Opt_gid:
  301. {
  302. char *gid = args[0].from;
  303. gid_t val;
  304. int rc = kstrtouint(gid, 0, &val);
  305. if (rc)
  306. goto cleanup;
  307. sbi->gid = make_kgid(current_user_ns(), val);
  308. if (!gid_valid(sbi->gid))
  309. goto cleanup;
  310. break;
  311. }
  312. case Opt_umask:
  313. {
  314. char *umask = args[0].from;
  315. int rc = kstrtouint(umask, 8, &sbi->umask);
  316. if (rc)
  317. goto cleanup;
  318. if (sbi->umask & ~0777) {
  319. pr_err("JFS: Invalid value of umask\n");
  320. goto cleanup;
  321. }
  322. break;
  323. }
  324. case Opt_discard:
  325. {
  326. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  327. /* if set to 1, even copying files will cause
  328. * trimming :O
  329. * -> user has more control over the online trimming
  330. */
  331. sbi->minblks_trim = 64;
  332. if (blk_queue_discard(q))
  333. *flag |= JFS_DISCARD;
  334. else
  335. pr_err("JFS: discard option not supported on device\n");
  336. break;
  337. }
  338. case Opt_nodiscard:
  339. *flag &= ~JFS_DISCARD;
  340. break;
  341. case Opt_discard_minblk:
  342. {
  343. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  344. char *minblks_trim = args[0].from;
  345. int rc;
  346. if (blk_queue_discard(q)) {
  347. *flag |= JFS_DISCARD;
  348. rc = kstrtouint(minblks_trim, 0,
  349. &sbi->minblks_trim);
  350. if (rc)
  351. goto cleanup;
  352. } else
  353. pr_err("JFS: discard option not supported on device\n");
  354. break;
  355. }
  356. default:
  357. printk("jfs: Unrecognized mount option \"%s\" or missing value\n",
  358. p);
  359. goto cleanup;
  360. }
  361. }
  362. if (nls_map != (void *) -1) {
  363. /* Discard old (if remount) */
  364. unload_nls(sbi->nls_tab);
  365. sbi->nls_tab = nls_map;
  366. }
  367. return 1;
  368. cleanup:
  369. if (nls_map && nls_map != (void *) -1)
  370. unload_nls(nls_map);
  371. return 0;
  372. }
  373. static int jfs_remount(struct super_block *sb, int *flags, char *data)
  374. {
  375. s64 newLVSize = 0;
  376. int rc = 0;
  377. int flag = JFS_SBI(sb)->flag;
  378. int ret;
  379. sync_filesystem(sb);
  380. if (!parse_options(data, sb, &newLVSize, &flag))
  381. return -EINVAL;
  382. if (newLVSize) {
  383. if (sb->s_flags & MS_RDONLY) {
  384. pr_err("JFS: resize requires volume to be mounted read-write\n");
  385. return -EROFS;
  386. }
  387. rc = jfs_extendfs(sb, newLVSize, 0);
  388. if (rc)
  389. return rc;
  390. }
  391. if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
  392. /*
  393. * Invalidate any previously read metadata. fsck may have
  394. * changed the on-disk data since we mounted r/o
  395. */
  396. truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0);
  397. JFS_SBI(sb)->flag = flag;
  398. ret = jfs_mount_rw(sb, 1);
  399. /* mark the fs r/w for quota activity */
  400. sb->s_flags &= ~MS_RDONLY;
  401. dquot_resume(sb, -1);
  402. return ret;
  403. }
  404. if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
  405. rc = dquot_suspend(sb, -1);
  406. if (rc < 0)
  407. return rc;
  408. rc = jfs_umount_rw(sb);
  409. JFS_SBI(sb)->flag = flag;
  410. return rc;
  411. }
  412. if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
  413. if (!(sb->s_flags & MS_RDONLY)) {
  414. rc = jfs_umount_rw(sb);
  415. if (rc)
  416. return rc;
  417. JFS_SBI(sb)->flag = flag;
  418. ret = jfs_mount_rw(sb, 1);
  419. return ret;
  420. }
  421. JFS_SBI(sb)->flag = flag;
  422. return 0;
  423. }
  424. static int jfs_fill_super(struct super_block *sb, void *data, int silent)
  425. {
  426. struct jfs_sb_info *sbi;
  427. struct inode *inode;
  428. int rc;
  429. s64 newLVSize = 0;
  430. int flag, ret = -EINVAL;
  431. jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
  432. sbi = kzalloc(sizeof(struct jfs_sb_info), GFP_KERNEL);
  433. if (!sbi)
  434. return -ENOMEM;
  435. sb->s_fs_info = sbi;
  436. sb->s_max_links = JFS_LINK_MAX;
  437. sbi->sb = sb;
  438. sbi->uid = INVALID_UID;
  439. sbi->gid = INVALID_GID;
  440. sbi->umask = -1;
  441. /* initialize the mount flag and determine the default error handler */
  442. flag = JFS_ERR_REMOUNT_RO;
  443. if (!parse_options((char *) data, sb, &newLVSize, &flag))
  444. goto out_kfree;
  445. sbi->flag = flag;
  446. #ifdef CONFIG_JFS_POSIX_ACL
  447. sb->s_flags |= MS_POSIXACL;
  448. #endif
  449. if (newLVSize) {
  450. pr_err("resize option for remount only\n");
  451. goto out_kfree;
  452. }
  453. /*
  454. * Initialize blocksize to 4K.
  455. */
  456. sb_set_blocksize(sb, PSIZE);
  457. /*
  458. * Set method vectors.
  459. */
  460. sb->s_op = &jfs_super_operations;
  461. sb->s_export_op = &jfs_export_operations;
  462. sb->s_xattr = jfs_xattr_handlers;
  463. #ifdef CONFIG_QUOTA
  464. sb->dq_op = &dquot_operations;
  465. sb->s_qcop = &dquot_quotactl_ops;
  466. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
  467. #endif
  468. /*
  469. * Initialize direct-mapping inode/address-space
  470. */
  471. inode = new_inode(sb);
  472. if (inode == NULL) {
  473. ret = -ENOMEM;
  474. goto out_unload;
  475. }
  476. inode->i_ino = 0;
  477. inode->i_size = sb->s_bdev->bd_inode->i_size;
  478. inode->i_mapping->a_ops = &jfs_metapage_aops;
  479. hlist_add_fake(&inode->i_hash);
  480. mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
  481. sbi->direct_inode = inode;
  482. rc = jfs_mount(sb);
  483. if (rc) {
  484. if (!silent)
  485. jfs_err("jfs_mount failed w/return code = %d", rc);
  486. goto out_mount_failed;
  487. }
  488. if (sb->s_flags & MS_RDONLY)
  489. sbi->log = NULL;
  490. else {
  491. rc = jfs_mount_rw(sb, 0);
  492. if (rc) {
  493. if (!silent) {
  494. jfs_err("jfs_mount_rw failed, return code = %d",
  495. rc);
  496. }
  497. goto out_no_rw;
  498. }
  499. }
  500. sb->s_magic = JFS_SUPER_MAGIC;
  501. if (sbi->mntflag & JFS_OS2)
  502. sb->s_d_op = &jfs_ci_dentry_operations;
  503. inode = jfs_iget(sb, ROOT_I);
  504. if (IS_ERR(inode)) {
  505. ret = PTR_ERR(inode);
  506. goto out_no_rw;
  507. }
  508. sb->s_root = d_make_root(inode);
  509. if (!sb->s_root)
  510. goto out_no_root;
  511. /* logical blocks are represented by 40 bits in pxd_t, etc. */
  512. sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
  513. #if BITS_PER_LONG == 32
  514. /*
  515. * Page cache is indexed by long.
  516. * I would use MAX_LFS_FILESIZE, but it's only half as big
  517. */
  518. sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1,
  519. (u64)sb->s_maxbytes);
  520. #endif
  521. sb->s_time_gran = 1;
  522. return 0;
  523. out_no_root:
  524. jfs_err("jfs_read_super: get root dentry failed");
  525. out_no_rw:
  526. rc = jfs_umount(sb);
  527. if (rc)
  528. jfs_err("jfs_umount failed with return code %d", rc);
  529. out_mount_failed:
  530. filemap_write_and_wait(sbi->direct_inode->i_mapping);
  531. truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
  532. make_bad_inode(sbi->direct_inode);
  533. iput(sbi->direct_inode);
  534. sbi->direct_inode = NULL;
  535. out_unload:
  536. unload_nls(sbi->nls_tab);
  537. out_kfree:
  538. kfree(sbi);
  539. return ret;
  540. }
  541. static int jfs_freeze(struct super_block *sb)
  542. {
  543. struct jfs_sb_info *sbi = JFS_SBI(sb);
  544. struct jfs_log *log = sbi->log;
  545. int rc = 0;
  546. if (!(sb->s_flags & MS_RDONLY)) {
  547. txQuiesce(sb);
  548. rc = lmLogShutdown(log);
  549. if (rc) {
  550. jfs_error(sb, "lmLogShutdown failed\n");
  551. /* let operations fail rather than hang */
  552. txResume(sb);
  553. return rc;
  554. }
  555. rc = updateSuper(sb, FM_CLEAN);
  556. if (rc) {
  557. jfs_err("jfs_freeze: updateSuper failed");
  558. /*
  559. * Don't fail here. Everything succeeded except
  560. * marking the superblock clean, so there's really
  561. * no harm in leaving it frozen for now.
  562. */
  563. }
  564. }
  565. return 0;
  566. }
  567. static int jfs_unfreeze(struct super_block *sb)
  568. {
  569. struct jfs_sb_info *sbi = JFS_SBI(sb);
  570. struct jfs_log *log = sbi->log;
  571. int rc = 0;
  572. if (!(sb->s_flags & MS_RDONLY)) {
  573. rc = updateSuper(sb, FM_MOUNT);
  574. if (rc) {
  575. jfs_error(sb, "updateSuper failed\n");
  576. goto out;
  577. }
  578. rc = lmLogInit(log);
  579. if (rc)
  580. jfs_error(sb, "lmLogInit failed\n");
  581. out:
  582. txResume(sb);
  583. }
  584. return rc;
  585. }
  586. static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
  587. int flags, const char *dev_name, void *data)
  588. {
  589. return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super);
  590. }
  591. static int jfs_sync_fs(struct super_block *sb, int wait)
  592. {
  593. struct jfs_log *log = JFS_SBI(sb)->log;
  594. /* log == NULL indicates read-only mount */
  595. if (log) {
  596. /*
  597. * Write quota structures to quota file, sync_blockdev() will
  598. * write them to disk later
  599. */
  600. dquot_writeback_dquots(sb, -1);
  601. jfs_flush_journal(log, wait);
  602. jfs_syncpt(log, 0);
  603. }
  604. return 0;
  605. }
  606. static int jfs_show_options(struct seq_file *seq, struct dentry *root)
  607. {
  608. struct jfs_sb_info *sbi = JFS_SBI(root->d_sb);
  609. if (uid_valid(sbi->uid))
  610. seq_printf(seq, ",uid=%d", from_kuid(&init_user_ns, sbi->uid));
  611. if (gid_valid(sbi->gid))
  612. seq_printf(seq, ",gid=%d", from_kgid(&init_user_ns, sbi->gid));
  613. if (sbi->umask != -1)
  614. seq_printf(seq, ",umask=%03o", sbi->umask);
  615. if (sbi->flag & JFS_NOINTEGRITY)
  616. seq_puts(seq, ",nointegrity");
  617. if (sbi->flag & JFS_DISCARD)
  618. seq_printf(seq, ",discard=%u", sbi->minblks_trim);
  619. if (sbi->nls_tab)
  620. seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset);
  621. if (sbi->flag & JFS_ERR_CONTINUE)
  622. seq_printf(seq, ",errors=continue");
  623. if (sbi->flag & JFS_ERR_PANIC)
  624. seq_printf(seq, ",errors=panic");
  625. #ifdef CONFIG_QUOTA
  626. if (sbi->flag & JFS_USRQUOTA)
  627. seq_puts(seq, ",usrquota");
  628. if (sbi->flag & JFS_GRPQUOTA)
  629. seq_puts(seq, ",grpquota");
  630. #endif
  631. return 0;
  632. }
  633. #ifdef CONFIG_QUOTA
  634. /* Read data from quotafile - avoid pagecache and such because we cannot afford
  635. * acquiring the locks... As quota files are never truncated and quota code
  636. * itself serializes the operations (and no one else should touch the files)
  637. * we don't have to be afraid of races */
  638. static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
  639. size_t len, loff_t off)
  640. {
  641. struct inode *inode = sb_dqopt(sb)->files[type];
  642. sector_t blk = off >> sb->s_blocksize_bits;
  643. int err = 0;
  644. int offset = off & (sb->s_blocksize - 1);
  645. int tocopy;
  646. size_t toread;
  647. struct buffer_head tmp_bh;
  648. struct buffer_head *bh;
  649. loff_t i_size = i_size_read(inode);
  650. if (off > i_size)
  651. return 0;
  652. if (off+len > i_size)
  653. len = i_size-off;
  654. toread = len;
  655. while (toread > 0) {
  656. tocopy = sb->s_blocksize - offset < toread ?
  657. sb->s_blocksize - offset : toread;
  658. tmp_bh.b_state = 0;
  659. tmp_bh.b_size = i_blocksize(inode);
  660. err = jfs_get_block(inode, blk, &tmp_bh, 0);
  661. if (err)
  662. return err;
  663. if (!buffer_mapped(&tmp_bh)) /* A hole? */
  664. memset(data, 0, tocopy);
  665. else {
  666. bh = sb_bread(sb, tmp_bh.b_blocknr);
  667. if (!bh)
  668. return -EIO;
  669. memcpy(data, bh->b_data+offset, tocopy);
  670. brelse(bh);
  671. }
  672. offset = 0;
  673. toread -= tocopy;
  674. data += tocopy;
  675. blk++;
  676. }
  677. return len;
  678. }
  679. /* Write to quotafile */
  680. static ssize_t jfs_quota_write(struct super_block *sb, int type,
  681. const char *data, size_t len, loff_t off)
  682. {
  683. struct inode *inode = sb_dqopt(sb)->files[type];
  684. sector_t blk = off >> sb->s_blocksize_bits;
  685. int err = 0;
  686. int offset = off & (sb->s_blocksize - 1);
  687. int tocopy;
  688. size_t towrite = len;
  689. struct buffer_head tmp_bh;
  690. struct buffer_head *bh;
  691. inode_lock(inode);
  692. while (towrite > 0) {
  693. tocopy = sb->s_blocksize - offset < towrite ?
  694. sb->s_blocksize - offset : towrite;
  695. tmp_bh.b_state = 0;
  696. tmp_bh.b_size = i_blocksize(inode);
  697. err = jfs_get_block(inode, blk, &tmp_bh, 1);
  698. if (err)
  699. goto out;
  700. if (offset || tocopy != sb->s_blocksize)
  701. bh = sb_bread(sb, tmp_bh.b_blocknr);
  702. else
  703. bh = sb_getblk(sb, tmp_bh.b_blocknr);
  704. if (!bh) {
  705. err = -EIO;
  706. goto out;
  707. }
  708. lock_buffer(bh);
  709. memcpy(bh->b_data+offset, data, tocopy);
  710. flush_dcache_page(bh->b_page);
  711. set_buffer_uptodate(bh);
  712. mark_buffer_dirty(bh);
  713. unlock_buffer(bh);
  714. brelse(bh);
  715. offset = 0;
  716. towrite -= tocopy;
  717. data += tocopy;
  718. blk++;
  719. }
  720. out:
  721. if (len == towrite) {
  722. inode_unlock(inode);
  723. return err;
  724. }
  725. if (inode->i_size < off+len-towrite)
  726. i_size_write(inode, off+len-towrite);
  727. inode->i_version++;
  728. inode->i_mtime = inode->i_ctime = current_time(inode);
  729. mark_inode_dirty(inode);
  730. inode_unlock(inode);
  731. return len - towrite;
  732. }
  733. static struct dquot **jfs_get_dquots(struct inode *inode)
  734. {
  735. return JFS_IP(inode)->i_dquot;
  736. }
  737. #endif
  738. static const struct super_operations jfs_super_operations = {
  739. .alloc_inode = jfs_alloc_inode,
  740. .destroy_inode = jfs_destroy_inode,
  741. .dirty_inode = jfs_dirty_inode,
  742. .write_inode = jfs_write_inode,
  743. .evict_inode = jfs_evict_inode,
  744. .put_super = jfs_put_super,
  745. .sync_fs = jfs_sync_fs,
  746. .freeze_fs = jfs_freeze,
  747. .unfreeze_fs = jfs_unfreeze,
  748. .statfs = jfs_statfs,
  749. .remount_fs = jfs_remount,
  750. .show_options = jfs_show_options,
  751. #ifdef CONFIG_QUOTA
  752. .quota_read = jfs_quota_read,
  753. .quota_write = jfs_quota_write,
  754. .get_dquots = jfs_get_dquots,
  755. #endif
  756. };
  757. static const struct export_operations jfs_export_operations = {
  758. .fh_to_dentry = jfs_fh_to_dentry,
  759. .fh_to_parent = jfs_fh_to_parent,
  760. .get_parent = jfs_get_parent,
  761. };
  762. static struct file_system_type jfs_fs_type = {
  763. .owner = THIS_MODULE,
  764. .name = "jfs",
  765. .mount = jfs_do_mount,
  766. .kill_sb = kill_block_super,
  767. .fs_flags = FS_REQUIRES_DEV,
  768. };
  769. MODULE_ALIAS_FS("jfs");
  770. static void init_once(void *foo)
  771. {
  772. struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
  773. memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
  774. INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
  775. init_rwsem(&jfs_ip->rdwrlock);
  776. mutex_init(&jfs_ip->commit_mutex);
  777. init_rwsem(&jfs_ip->xattr_sem);
  778. spin_lock_init(&jfs_ip->ag_lock);
  779. jfs_ip->active_ag = -1;
  780. inode_init_once(&jfs_ip->vfs_inode);
  781. }
  782. static int __init init_jfs_fs(void)
  783. {
  784. int i;
  785. int rc;
  786. jfs_inode_cachep =
  787. kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
  788. SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
  789. init_once);
  790. if (jfs_inode_cachep == NULL)
  791. return -ENOMEM;
  792. /*
  793. * Metapage initialization
  794. */
  795. rc = metapage_init();
  796. if (rc) {
  797. jfs_err("metapage_init failed w/rc = %d", rc);
  798. goto free_slab;
  799. }
  800. /*
  801. * Transaction Manager initialization
  802. */
  803. rc = txInit();
  804. if (rc) {
  805. jfs_err("txInit failed w/rc = %d", rc);
  806. goto free_metapage;
  807. }
  808. /*
  809. * I/O completion thread (endio)
  810. */
  811. jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO");
  812. if (IS_ERR(jfsIOthread)) {
  813. rc = PTR_ERR(jfsIOthread);
  814. jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
  815. goto end_txmngr;
  816. }
  817. if (commit_threads < 1)
  818. commit_threads = num_online_cpus();
  819. if (commit_threads > MAX_COMMIT_THREADS)
  820. commit_threads = MAX_COMMIT_THREADS;
  821. for (i = 0; i < commit_threads; i++) {
  822. jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL,
  823. "jfsCommit");
  824. if (IS_ERR(jfsCommitThread[i])) {
  825. rc = PTR_ERR(jfsCommitThread[i]);
  826. jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
  827. commit_threads = i;
  828. goto kill_committask;
  829. }
  830. }
  831. jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync");
  832. if (IS_ERR(jfsSyncThread)) {
  833. rc = PTR_ERR(jfsSyncThread);
  834. jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
  835. goto kill_committask;
  836. }
  837. #ifdef PROC_FS_JFS
  838. jfs_proc_init();
  839. #endif
  840. rc = register_filesystem(&jfs_fs_type);
  841. if (!rc)
  842. return 0;
  843. #ifdef PROC_FS_JFS
  844. jfs_proc_clean();
  845. #endif
  846. kthread_stop(jfsSyncThread);
  847. kill_committask:
  848. for (i = 0; i < commit_threads; i++)
  849. kthread_stop(jfsCommitThread[i]);
  850. kthread_stop(jfsIOthread);
  851. end_txmngr:
  852. txExit();
  853. free_metapage:
  854. metapage_exit();
  855. free_slab:
  856. kmem_cache_destroy(jfs_inode_cachep);
  857. return rc;
  858. }
  859. static void __exit exit_jfs_fs(void)
  860. {
  861. int i;
  862. jfs_info("exit_jfs_fs called");
  863. txExit();
  864. metapage_exit();
  865. kthread_stop(jfsIOthread);
  866. for (i = 0; i < commit_threads; i++)
  867. kthread_stop(jfsCommitThread[i]);
  868. kthread_stop(jfsSyncThread);
  869. #ifdef PROC_FS_JFS
  870. jfs_proc_clean();
  871. #endif
  872. unregister_filesystem(&jfs_fs_type);
  873. /*
  874. * Make sure all delayed rcu free inodes are flushed before we
  875. * destroy cache.
  876. */
  877. rcu_barrier();
  878. kmem_cache_destroy(jfs_inode_cachep);
  879. }
  880. module_init(init_jfs_fs)
  881. module_exit(exit_jfs_fs)