super.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. /*
  2. * Copyright (C) International Business Machines Corp., 2000-2004
  3. * Portions Copyright (C) Christoph Hellwig, 2001-2002
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  13. * the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/module.h>
  21. #include <linux/parser.h>
  22. #include <linux/completion.h>
  23. #include <linux/vfs.h>
  24. #include <linux/quotaops.h>
  25. #include <linux/mount.h>
  26. #include <linux/moduleparam.h>
  27. #include <linux/kthread.h>
  28. #include <linux/posix_acl.h>
  29. #include <linux/buffer_head.h>
  30. #include <linux/exportfs.h>
  31. #include <linux/crc32.h>
  32. #include <linux/slab.h>
  33. #include <asm/uaccess.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/blkdev.h>
  36. #include "jfs_incore.h"
  37. #include "jfs_filsys.h"
  38. #include "jfs_inode.h"
  39. #include "jfs_metapage.h"
  40. #include "jfs_superblock.h"
  41. #include "jfs_dmap.h"
  42. #include "jfs_imap.h"
  43. #include "jfs_acl.h"
  44. #include "jfs_debug.h"
  45. #include "jfs_xattr.h"
  46. MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
  47. MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
  48. MODULE_LICENSE("GPL");
  49. static struct kmem_cache *jfs_inode_cachep;
  50. static const struct super_operations jfs_super_operations;
  51. static const struct export_operations jfs_export_operations;
  52. static struct file_system_type jfs_fs_type;
  53. #define MAX_COMMIT_THREADS 64
  54. static int commit_threads;
  55. module_param(commit_threads, int, 0);
  56. MODULE_PARM_DESC(commit_threads, "Number of commit threads");
  57. static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS];
  58. struct task_struct *jfsIOthread;
  59. struct task_struct *jfsSyncThread;
  60. #ifdef CONFIG_JFS_DEBUG
  61. int jfsloglevel = JFS_LOGLEVEL_WARN;
  62. module_param(jfsloglevel, int, 0644);
  63. MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)");
  64. #endif
  65. static void jfs_handle_error(struct super_block *sb)
  66. {
  67. struct jfs_sb_info *sbi = JFS_SBI(sb);
  68. if (sb->s_flags & MS_RDONLY)
  69. return;
  70. updateSuper(sb, FM_DIRTY);
  71. if (sbi->flag & JFS_ERR_PANIC)
  72. panic("JFS (device %s): panic forced after error\n",
  73. sb->s_id);
  74. else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
  75. jfs_err("ERROR: (device %s): remounting filesystem as read-only\n",
  76. sb->s_id);
  77. sb->s_flags |= MS_RDONLY;
  78. }
  79. /* nothing is done for continue beyond marking the superblock dirty */
  80. }
  81. void jfs_error(struct super_block *sb, const char *fmt, ...)
  82. {
  83. struct va_format vaf;
  84. va_list args;
  85. va_start(args, fmt);
  86. vaf.fmt = fmt;
  87. vaf.va = &args;
  88. pr_err("ERROR: (device %s): %ps: %pV\n",
  89. sb->s_id, __builtin_return_address(0), &vaf);
  90. va_end(args);
  91. jfs_handle_error(sb);
  92. }
  93. static struct inode *jfs_alloc_inode(struct super_block *sb)
  94. {
  95. struct jfs_inode_info *jfs_inode;
  96. jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
  97. if (!jfs_inode)
  98. return NULL;
  99. #ifdef CONFIG_QUOTA
  100. memset(&jfs_inode->i_dquot, 0, sizeof(jfs_inode->i_dquot));
  101. #endif
  102. return &jfs_inode->vfs_inode;
  103. }
  104. static void jfs_i_callback(struct rcu_head *head)
  105. {
  106. struct inode *inode = container_of(head, struct inode, i_rcu);
  107. struct jfs_inode_info *ji = JFS_IP(inode);
  108. kmem_cache_free(jfs_inode_cachep, ji);
  109. }
  110. static void jfs_destroy_inode(struct inode *inode)
  111. {
  112. struct jfs_inode_info *ji = JFS_IP(inode);
  113. BUG_ON(!list_empty(&ji->anon_inode_list));
  114. spin_lock_irq(&ji->ag_lock);
  115. if (ji->active_ag != -1) {
  116. struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
  117. atomic_dec(&bmap->db_active[ji->active_ag]);
  118. ji->active_ag = -1;
  119. }
  120. spin_unlock_irq(&ji->ag_lock);
  121. call_rcu(&inode->i_rcu, jfs_i_callback);
  122. }
  123. static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  124. {
  125. struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
  126. s64 maxinodes;
  127. struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
  128. jfs_info("In jfs_statfs");
  129. buf->f_type = JFS_SUPER_MAGIC;
  130. buf->f_bsize = sbi->bsize;
  131. buf->f_blocks = sbi->bmap->db_mapsize;
  132. buf->f_bfree = sbi->bmap->db_nfree;
  133. buf->f_bavail = sbi->bmap->db_nfree;
  134. /*
  135. * If we really return the number of allocated & free inodes, some
  136. * applications will fail because they won't see enough free inodes.
  137. * We'll try to calculate some guess as to how many inodes we can
  138. * really allocate
  139. *
  140. * buf->f_files = atomic_read(&imap->im_numinos);
  141. * buf->f_ffree = atomic_read(&imap->im_numfree);
  142. */
  143. maxinodes = min((s64) atomic_read(&imap->im_numinos) +
  144. ((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
  145. << L2INOSPEREXT), (s64) 0xffffffffLL);
  146. buf->f_files = maxinodes;
  147. buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
  148. atomic_read(&imap->im_numfree));
  149. buf->f_fsid.val[0] = (u32)crc32_le(0, sbi->uuid, sizeof(sbi->uuid)/2);
  150. buf->f_fsid.val[1] = (u32)crc32_le(0, sbi->uuid + sizeof(sbi->uuid)/2,
  151. sizeof(sbi->uuid)/2);
  152. buf->f_namelen = JFS_NAME_MAX;
  153. return 0;
  154. }
  155. static void jfs_put_super(struct super_block *sb)
  156. {
  157. struct jfs_sb_info *sbi = JFS_SBI(sb);
  158. int rc;
  159. jfs_info("In jfs_put_super");
  160. dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
  161. rc = jfs_umount(sb);
  162. if (rc)
  163. jfs_err("jfs_umount failed with return code %d", rc);
  164. unload_nls(sbi->nls_tab);
  165. truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
  166. iput(sbi->direct_inode);
  167. kfree(sbi);
  168. }
  169. enum {
  170. Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
  171. Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
  172. Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
  173. Opt_discard, Opt_nodiscard, Opt_discard_minblk
  174. };
  175. static const match_table_t tokens = {
  176. {Opt_integrity, "integrity"},
  177. {Opt_nointegrity, "nointegrity"},
  178. {Opt_iocharset, "iocharset=%s"},
  179. {Opt_resize, "resize=%u"},
  180. {Opt_resize_nosize, "resize"},
  181. {Opt_errors, "errors=%s"},
  182. {Opt_ignore, "noquota"},
  183. {Opt_ignore, "quota"},
  184. {Opt_usrquota, "usrquota"},
  185. {Opt_grpquota, "grpquota"},
  186. {Opt_uid, "uid=%u"},
  187. {Opt_gid, "gid=%u"},
  188. {Opt_umask, "umask=%u"},
  189. {Opt_discard, "discard"},
  190. {Opt_nodiscard, "nodiscard"},
  191. {Opt_discard_minblk, "discard=%u"},
  192. {Opt_err, NULL}
  193. };
  194. static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
  195. int *flag)
  196. {
  197. void *nls_map = (void *)-1; /* -1: no change; NULL: none */
  198. char *p;
  199. struct jfs_sb_info *sbi = JFS_SBI(sb);
  200. *newLVSize = 0;
  201. if (!options)
  202. return 1;
  203. while ((p = strsep(&options, ",")) != NULL) {
  204. substring_t args[MAX_OPT_ARGS];
  205. int token;
  206. if (!*p)
  207. continue;
  208. token = match_token(p, tokens, args);
  209. switch (token) {
  210. case Opt_integrity:
  211. *flag &= ~JFS_NOINTEGRITY;
  212. break;
  213. case Opt_nointegrity:
  214. *flag |= JFS_NOINTEGRITY;
  215. break;
  216. case Opt_ignore:
  217. /* Silently ignore the quota options */
  218. /* Don't do anything ;-) */
  219. break;
  220. case Opt_iocharset:
  221. if (nls_map && nls_map != (void *) -1)
  222. unload_nls(nls_map);
  223. if (!strcmp(args[0].from, "none"))
  224. nls_map = NULL;
  225. else {
  226. nls_map = load_nls(args[0].from);
  227. if (!nls_map) {
  228. pr_err("JFS: charset not found\n");
  229. goto cleanup;
  230. }
  231. }
  232. break;
  233. case Opt_resize:
  234. {
  235. char *resize = args[0].from;
  236. int rc = kstrtoll(resize, 0, newLVSize);
  237. if (rc)
  238. goto cleanup;
  239. break;
  240. }
  241. case Opt_resize_nosize:
  242. {
  243. *newLVSize = sb->s_bdev->bd_inode->i_size >>
  244. sb->s_blocksize_bits;
  245. if (*newLVSize == 0)
  246. pr_err("JFS: Cannot determine volume size\n");
  247. break;
  248. }
  249. case Opt_errors:
  250. {
  251. char *errors = args[0].from;
  252. if (!errors || !*errors)
  253. goto cleanup;
  254. if (!strcmp(errors, "continue")) {
  255. *flag &= ~JFS_ERR_REMOUNT_RO;
  256. *flag &= ~JFS_ERR_PANIC;
  257. *flag |= JFS_ERR_CONTINUE;
  258. } else if (!strcmp(errors, "remount-ro")) {
  259. *flag &= ~JFS_ERR_CONTINUE;
  260. *flag &= ~JFS_ERR_PANIC;
  261. *flag |= JFS_ERR_REMOUNT_RO;
  262. } else if (!strcmp(errors, "panic")) {
  263. *flag &= ~JFS_ERR_CONTINUE;
  264. *flag &= ~JFS_ERR_REMOUNT_RO;
  265. *flag |= JFS_ERR_PANIC;
  266. } else {
  267. pr_err("JFS: %s is an invalid error handler\n",
  268. errors);
  269. goto cleanup;
  270. }
  271. break;
  272. }
  273. #ifdef CONFIG_QUOTA
  274. case Opt_quota:
  275. case Opt_usrquota:
  276. *flag |= JFS_USRQUOTA;
  277. break;
  278. case Opt_grpquota:
  279. *flag |= JFS_GRPQUOTA;
  280. break;
  281. #else
  282. case Opt_usrquota:
  283. case Opt_grpquota:
  284. case Opt_quota:
  285. pr_err("JFS: quota operations not supported\n");
  286. break;
  287. #endif
  288. case Opt_uid:
  289. {
  290. char *uid = args[0].from;
  291. uid_t val;
  292. int rc = kstrtouint(uid, 0, &val);
  293. if (rc)
  294. goto cleanup;
  295. sbi->uid = make_kuid(current_user_ns(), val);
  296. if (!uid_valid(sbi->uid))
  297. goto cleanup;
  298. break;
  299. }
  300. case Opt_gid:
  301. {
  302. char *gid = args[0].from;
  303. gid_t val;
  304. int rc = kstrtouint(gid, 0, &val);
  305. if (rc)
  306. goto cleanup;
  307. sbi->gid = make_kgid(current_user_ns(), val);
  308. if (!gid_valid(sbi->gid))
  309. goto cleanup;
  310. break;
  311. }
  312. case Opt_umask:
  313. {
  314. char *umask = args[0].from;
  315. int rc = kstrtouint(umask, 8, &sbi->umask);
  316. if (rc)
  317. goto cleanup;
  318. if (sbi->umask & ~0777) {
  319. pr_err("JFS: Invalid value of umask\n");
  320. goto cleanup;
  321. }
  322. break;
  323. }
  324. case Opt_discard:
  325. {
  326. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  327. /* if set to 1, even copying files will cause
  328. * trimming :O
  329. * -> user has more control over the online trimming
  330. */
  331. sbi->minblks_trim = 64;
  332. if (blk_queue_discard(q))
  333. *flag |= JFS_DISCARD;
  334. else
  335. pr_err("JFS: discard option not supported on device\n");
  336. break;
  337. }
  338. case Opt_nodiscard:
  339. *flag &= ~JFS_DISCARD;
  340. break;
  341. case Opt_discard_minblk:
  342. {
  343. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  344. char *minblks_trim = args[0].from;
  345. int rc;
  346. if (blk_queue_discard(q)) {
  347. *flag |= JFS_DISCARD;
  348. rc = kstrtouint(minblks_trim, 0,
  349. &sbi->minblks_trim);
  350. if (rc)
  351. goto cleanup;
  352. } else
  353. pr_err("JFS: discard option not supported on device\n");
  354. break;
  355. }
  356. default:
  357. printk("jfs: Unrecognized mount option \"%s\" or missing value\n",
  358. p);
  359. goto cleanup;
  360. }
  361. }
  362. if (nls_map != (void *) -1) {
  363. /* Discard old (if remount) */
  364. unload_nls(sbi->nls_tab);
  365. sbi->nls_tab = nls_map;
  366. }
  367. return 1;
  368. cleanup:
  369. if (nls_map && nls_map != (void *) -1)
  370. unload_nls(nls_map);
  371. return 0;
  372. }
  373. static int jfs_remount(struct super_block *sb, int *flags, char *data)
  374. {
  375. s64 newLVSize = 0;
  376. int rc = 0;
  377. int flag = JFS_SBI(sb)->flag;
  378. int ret;
  379. sync_filesystem(sb);
  380. if (!parse_options(data, sb, &newLVSize, &flag))
  381. return -EINVAL;
  382. if (newLVSize) {
  383. if (sb->s_flags & MS_RDONLY) {
  384. pr_err("JFS: resize requires volume to be mounted read-write\n");
  385. return -EROFS;
  386. }
  387. rc = jfs_extendfs(sb, newLVSize, 0);
  388. if (rc)
  389. return rc;
  390. }
  391. if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
  392. /*
  393. * Invalidate any previously read metadata. fsck may have
  394. * changed the on-disk data since we mounted r/o
  395. */
  396. truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0);
  397. JFS_SBI(sb)->flag = flag;
  398. ret = jfs_mount_rw(sb, 1);
  399. /* mark the fs r/w for quota activity */
  400. sb->s_flags &= ~MS_RDONLY;
  401. dquot_resume(sb, -1);
  402. return ret;
  403. }
  404. if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
  405. rc = dquot_suspend(sb, -1);
  406. if (rc < 0)
  407. return rc;
  408. rc = jfs_umount_rw(sb);
  409. JFS_SBI(sb)->flag = flag;
  410. return rc;
  411. }
  412. if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
  413. if (!(sb->s_flags & MS_RDONLY)) {
  414. rc = jfs_umount_rw(sb);
  415. if (rc)
  416. return rc;
  417. JFS_SBI(sb)->flag = flag;
  418. ret = jfs_mount_rw(sb, 1);
  419. return ret;
  420. }
  421. JFS_SBI(sb)->flag = flag;
  422. return 0;
  423. }
  424. static int jfs_fill_super(struct super_block *sb, void *data, int silent)
  425. {
  426. struct jfs_sb_info *sbi;
  427. struct inode *inode;
  428. int rc;
  429. s64 newLVSize = 0;
  430. int flag, ret = -EINVAL;
  431. jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
  432. if (!new_valid_dev(sb->s_bdev->bd_dev))
  433. return -EOVERFLOW;
  434. sbi = kzalloc(sizeof(struct jfs_sb_info), GFP_KERNEL);
  435. if (!sbi)
  436. return -ENOMEM;
  437. sb->s_fs_info = sbi;
  438. sb->s_max_links = JFS_LINK_MAX;
  439. sbi->sb = sb;
  440. sbi->uid = INVALID_UID;
  441. sbi->gid = INVALID_GID;
  442. sbi->umask = -1;
  443. /* initialize the mount flag and determine the default error handler */
  444. flag = JFS_ERR_REMOUNT_RO;
  445. if (!parse_options((char *) data, sb, &newLVSize, &flag))
  446. goto out_kfree;
  447. sbi->flag = flag;
  448. #ifdef CONFIG_JFS_POSIX_ACL
  449. sb->s_flags |= MS_POSIXACL;
  450. #endif
  451. if (newLVSize) {
  452. pr_err("resize option for remount only\n");
  453. goto out_kfree;
  454. }
  455. /*
  456. * Initialize blocksize to 4K.
  457. */
  458. sb_set_blocksize(sb, PSIZE);
  459. /*
  460. * Set method vectors.
  461. */
  462. sb->s_op = &jfs_super_operations;
  463. sb->s_export_op = &jfs_export_operations;
  464. sb->s_xattr = jfs_xattr_handlers;
  465. #ifdef CONFIG_QUOTA
  466. sb->dq_op = &dquot_operations;
  467. sb->s_qcop = &dquot_quotactl_ops;
  468. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
  469. #endif
  470. /*
  471. * Initialize direct-mapping inode/address-space
  472. */
  473. inode = new_inode(sb);
  474. if (inode == NULL) {
  475. ret = -ENOMEM;
  476. goto out_unload;
  477. }
  478. inode->i_ino = 0;
  479. inode->i_size = sb->s_bdev->bd_inode->i_size;
  480. inode->i_mapping->a_ops = &jfs_metapage_aops;
  481. hlist_add_fake(&inode->i_hash);
  482. mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
  483. sbi->direct_inode = inode;
  484. rc = jfs_mount(sb);
  485. if (rc) {
  486. if (!silent)
  487. jfs_err("jfs_mount failed w/return code = %d", rc);
  488. goto out_mount_failed;
  489. }
  490. if (sb->s_flags & MS_RDONLY)
  491. sbi->log = NULL;
  492. else {
  493. rc = jfs_mount_rw(sb, 0);
  494. if (rc) {
  495. if (!silent) {
  496. jfs_err("jfs_mount_rw failed, return code = %d",
  497. rc);
  498. }
  499. goto out_no_rw;
  500. }
  501. }
  502. sb->s_magic = JFS_SUPER_MAGIC;
  503. if (sbi->mntflag & JFS_OS2)
  504. sb->s_d_op = &jfs_ci_dentry_operations;
  505. inode = jfs_iget(sb, ROOT_I);
  506. if (IS_ERR(inode)) {
  507. ret = PTR_ERR(inode);
  508. goto out_no_rw;
  509. }
  510. sb->s_root = d_make_root(inode);
  511. if (!sb->s_root)
  512. goto out_no_root;
  513. /* logical blocks are represented by 40 bits in pxd_t, etc. */
  514. sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
  515. #if BITS_PER_LONG == 32
  516. /*
  517. * Page cache is indexed by long.
  518. * I would use MAX_LFS_FILESIZE, but it's only half as big
  519. */
  520. sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1,
  521. (u64)sb->s_maxbytes);
  522. #endif
  523. sb->s_time_gran = 1;
  524. return 0;
  525. out_no_root:
  526. jfs_err("jfs_read_super: get root dentry failed");
  527. out_no_rw:
  528. rc = jfs_umount(sb);
  529. if (rc)
  530. jfs_err("jfs_umount failed with return code %d", rc);
  531. out_mount_failed:
  532. filemap_write_and_wait(sbi->direct_inode->i_mapping);
  533. truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
  534. make_bad_inode(sbi->direct_inode);
  535. iput(sbi->direct_inode);
  536. sbi->direct_inode = NULL;
  537. out_unload:
  538. unload_nls(sbi->nls_tab);
  539. out_kfree:
  540. kfree(sbi);
  541. return ret;
  542. }
  543. static int jfs_freeze(struct super_block *sb)
  544. {
  545. struct jfs_sb_info *sbi = JFS_SBI(sb);
  546. struct jfs_log *log = sbi->log;
  547. int rc = 0;
  548. if (!(sb->s_flags & MS_RDONLY)) {
  549. txQuiesce(sb);
  550. rc = lmLogShutdown(log);
  551. if (rc) {
  552. jfs_error(sb, "lmLogShutdown failed\n");
  553. /* let operations fail rather than hang */
  554. txResume(sb);
  555. return rc;
  556. }
  557. rc = updateSuper(sb, FM_CLEAN);
  558. if (rc) {
  559. jfs_err("jfs_freeze: updateSuper failed\n");
  560. /*
  561. * Don't fail here. Everything succeeded except
  562. * marking the superblock clean, so there's really
  563. * no harm in leaving it frozen for now.
  564. */
  565. }
  566. }
  567. return 0;
  568. }
  569. static int jfs_unfreeze(struct super_block *sb)
  570. {
  571. struct jfs_sb_info *sbi = JFS_SBI(sb);
  572. struct jfs_log *log = sbi->log;
  573. int rc = 0;
  574. if (!(sb->s_flags & MS_RDONLY)) {
  575. rc = updateSuper(sb, FM_MOUNT);
  576. if (rc) {
  577. jfs_error(sb, "updateSuper failed\n");
  578. goto out;
  579. }
  580. rc = lmLogInit(log);
  581. if (rc)
  582. jfs_error(sb, "lmLogInit failed\n");
  583. out:
  584. txResume(sb);
  585. }
  586. return rc;
  587. }
  588. static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
  589. int flags, const char *dev_name, void *data)
  590. {
  591. return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super);
  592. }
  593. static int jfs_sync_fs(struct super_block *sb, int wait)
  594. {
  595. struct jfs_log *log = JFS_SBI(sb)->log;
  596. /* log == NULL indicates read-only mount */
  597. if (log) {
  598. /*
  599. * Write quota structures to quota file, sync_blockdev() will
  600. * write them to disk later
  601. */
  602. dquot_writeback_dquots(sb, -1);
  603. jfs_flush_journal(log, wait);
  604. jfs_syncpt(log, 0);
  605. }
  606. return 0;
  607. }
  608. static int jfs_show_options(struct seq_file *seq, struct dentry *root)
  609. {
  610. struct jfs_sb_info *sbi = JFS_SBI(root->d_sb);
  611. if (uid_valid(sbi->uid))
  612. seq_printf(seq, ",uid=%d", from_kuid(&init_user_ns, sbi->uid));
  613. if (gid_valid(sbi->gid))
  614. seq_printf(seq, ",gid=%d", from_kgid(&init_user_ns, sbi->gid));
  615. if (sbi->umask != -1)
  616. seq_printf(seq, ",umask=%03o", sbi->umask);
  617. if (sbi->flag & JFS_NOINTEGRITY)
  618. seq_puts(seq, ",nointegrity");
  619. if (sbi->flag & JFS_DISCARD)
  620. seq_printf(seq, ",discard=%u", sbi->minblks_trim);
  621. if (sbi->nls_tab)
  622. seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset);
  623. if (sbi->flag & JFS_ERR_CONTINUE)
  624. seq_printf(seq, ",errors=continue");
  625. if (sbi->flag & JFS_ERR_PANIC)
  626. seq_printf(seq, ",errors=panic");
  627. #ifdef CONFIG_QUOTA
  628. if (sbi->flag & JFS_USRQUOTA)
  629. seq_puts(seq, ",usrquota");
  630. if (sbi->flag & JFS_GRPQUOTA)
  631. seq_puts(seq, ",grpquota");
  632. #endif
  633. return 0;
  634. }
  635. #ifdef CONFIG_QUOTA
  636. /* Read data from quotafile - avoid pagecache and such because we cannot afford
  637. * acquiring the locks... As quota files are never truncated and quota code
  638. * itself serializes the operations (and no one else should touch the files)
  639. * we don't have to be afraid of races */
  640. static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
  641. size_t len, loff_t off)
  642. {
  643. struct inode *inode = sb_dqopt(sb)->files[type];
  644. sector_t blk = off >> sb->s_blocksize_bits;
  645. int err = 0;
  646. int offset = off & (sb->s_blocksize - 1);
  647. int tocopy;
  648. size_t toread;
  649. struct buffer_head tmp_bh;
  650. struct buffer_head *bh;
  651. loff_t i_size = i_size_read(inode);
  652. if (off > i_size)
  653. return 0;
  654. if (off+len > i_size)
  655. len = i_size-off;
  656. toread = len;
  657. while (toread > 0) {
  658. tocopy = sb->s_blocksize - offset < toread ?
  659. sb->s_blocksize - offset : toread;
  660. tmp_bh.b_state = 0;
  661. tmp_bh.b_size = 1 << inode->i_blkbits;
  662. err = jfs_get_block(inode, blk, &tmp_bh, 0);
  663. if (err)
  664. return err;
  665. if (!buffer_mapped(&tmp_bh)) /* A hole? */
  666. memset(data, 0, tocopy);
  667. else {
  668. bh = sb_bread(sb, tmp_bh.b_blocknr);
  669. if (!bh)
  670. return -EIO;
  671. memcpy(data, bh->b_data+offset, tocopy);
  672. brelse(bh);
  673. }
  674. offset = 0;
  675. toread -= tocopy;
  676. data += tocopy;
  677. blk++;
  678. }
  679. return len;
  680. }
  681. /* Write to quotafile */
  682. static ssize_t jfs_quota_write(struct super_block *sb, int type,
  683. const char *data, size_t len, loff_t off)
  684. {
  685. struct inode *inode = sb_dqopt(sb)->files[type];
  686. sector_t blk = off >> sb->s_blocksize_bits;
  687. int err = 0;
  688. int offset = off & (sb->s_blocksize - 1);
  689. int tocopy;
  690. size_t towrite = len;
  691. struct buffer_head tmp_bh;
  692. struct buffer_head *bh;
  693. mutex_lock(&inode->i_mutex);
  694. while (towrite > 0) {
  695. tocopy = sb->s_blocksize - offset < towrite ?
  696. sb->s_blocksize - offset : towrite;
  697. tmp_bh.b_state = 0;
  698. tmp_bh.b_size = 1 << inode->i_blkbits;
  699. err = jfs_get_block(inode, blk, &tmp_bh, 1);
  700. if (err)
  701. goto out;
  702. if (offset || tocopy != sb->s_blocksize)
  703. bh = sb_bread(sb, tmp_bh.b_blocknr);
  704. else
  705. bh = sb_getblk(sb, tmp_bh.b_blocknr);
  706. if (!bh) {
  707. err = -EIO;
  708. goto out;
  709. }
  710. lock_buffer(bh);
  711. memcpy(bh->b_data+offset, data, tocopy);
  712. flush_dcache_page(bh->b_page);
  713. set_buffer_uptodate(bh);
  714. mark_buffer_dirty(bh);
  715. unlock_buffer(bh);
  716. brelse(bh);
  717. offset = 0;
  718. towrite -= tocopy;
  719. data += tocopy;
  720. blk++;
  721. }
  722. out:
  723. if (len == towrite) {
  724. mutex_unlock(&inode->i_mutex);
  725. return err;
  726. }
  727. if (inode->i_size < off+len-towrite)
  728. i_size_write(inode, off+len-towrite);
  729. inode->i_version++;
  730. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  731. mark_inode_dirty(inode);
  732. mutex_unlock(&inode->i_mutex);
  733. return len - towrite;
  734. }
  735. static struct dquot **jfs_get_dquots(struct inode *inode)
  736. {
  737. return JFS_IP(inode)->i_dquot;
  738. }
  739. #endif
  740. static const struct super_operations jfs_super_operations = {
  741. .alloc_inode = jfs_alloc_inode,
  742. .destroy_inode = jfs_destroy_inode,
  743. .dirty_inode = jfs_dirty_inode,
  744. .write_inode = jfs_write_inode,
  745. .evict_inode = jfs_evict_inode,
  746. .put_super = jfs_put_super,
  747. .sync_fs = jfs_sync_fs,
  748. .freeze_fs = jfs_freeze,
  749. .unfreeze_fs = jfs_unfreeze,
  750. .statfs = jfs_statfs,
  751. .remount_fs = jfs_remount,
  752. .show_options = jfs_show_options,
  753. #ifdef CONFIG_QUOTA
  754. .quota_read = jfs_quota_read,
  755. .quota_write = jfs_quota_write,
  756. .get_dquots = jfs_get_dquots,
  757. #endif
  758. };
  759. static const struct export_operations jfs_export_operations = {
  760. .fh_to_dentry = jfs_fh_to_dentry,
  761. .fh_to_parent = jfs_fh_to_parent,
  762. .get_parent = jfs_get_parent,
  763. };
  764. static struct file_system_type jfs_fs_type = {
  765. .owner = THIS_MODULE,
  766. .name = "jfs",
  767. .mount = jfs_do_mount,
  768. .kill_sb = kill_block_super,
  769. .fs_flags = FS_REQUIRES_DEV,
  770. };
  771. MODULE_ALIAS_FS("jfs");
  772. static void init_once(void *foo)
  773. {
  774. struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
  775. memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
  776. INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
  777. init_rwsem(&jfs_ip->rdwrlock);
  778. mutex_init(&jfs_ip->commit_mutex);
  779. init_rwsem(&jfs_ip->xattr_sem);
  780. spin_lock_init(&jfs_ip->ag_lock);
  781. jfs_ip->active_ag = -1;
  782. inode_init_once(&jfs_ip->vfs_inode);
  783. }
  784. static int __init init_jfs_fs(void)
  785. {
  786. int i;
  787. int rc;
  788. jfs_inode_cachep =
  789. kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
  790. SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
  791. init_once);
  792. if (jfs_inode_cachep == NULL)
  793. return -ENOMEM;
  794. /*
  795. * Metapage initialization
  796. */
  797. rc = metapage_init();
  798. if (rc) {
  799. jfs_err("metapage_init failed w/rc = %d", rc);
  800. goto free_slab;
  801. }
  802. /*
  803. * Transaction Manager initialization
  804. */
  805. rc = txInit();
  806. if (rc) {
  807. jfs_err("txInit failed w/rc = %d", rc);
  808. goto free_metapage;
  809. }
  810. /*
  811. * I/O completion thread (endio)
  812. */
  813. jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO");
  814. if (IS_ERR(jfsIOthread)) {
  815. rc = PTR_ERR(jfsIOthread);
  816. jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
  817. goto end_txmngr;
  818. }
  819. if (commit_threads < 1)
  820. commit_threads = num_online_cpus();
  821. if (commit_threads > MAX_COMMIT_THREADS)
  822. commit_threads = MAX_COMMIT_THREADS;
  823. for (i = 0; i < commit_threads; i++) {
  824. jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL,
  825. "jfsCommit");
  826. if (IS_ERR(jfsCommitThread[i])) {
  827. rc = PTR_ERR(jfsCommitThread[i]);
  828. jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
  829. commit_threads = i;
  830. goto kill_committask;
  831. }
  832. }
  833. jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync");
  834. if (IS_ERR(jfsSyncThread)) {
  835. rc = PTR_ERR(jfsSyncThread);
  836. jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
  837. goto kill_committask;
  838. }
  839. #ifdef PROC_FS_JFS
  840. jfs_proc_init();
  841. #endif
  842. rc = register_filesystem(&jfs_fs_type);
  843. if (!rc)
  844. return 0;
  845. #ifdef PROC_FS_JFS
  846. jfs_proc_clean();
  847. #endif
  848. kthread_stop(jfsSyncThread);
  849. kill_committask:
  850. for (i = 0; i < commit_threads; i++)
  851. kthread_stop(jfsCommitThread[i]);
  852. kthread_stop(jfsIOthread);
  853. end_txmngr:
  854. txExit();
  855. free_metapage:
  856. metapage_exit();
  857. free_slab:
  858. kmem_cache_destroy(jfs_inode_cachep);
  859. return rc;
  860. }
  861. static void __exit exit_jfs_fs(void)
  862. {
  863. int i;
  864. jfs_info("exit_jfs_fs called");
  865. txExit();
  866. metapage_exit();
  867. kthread_stop(jfsIOthread);
  868. for (i = 0; i < commit_threads; i++)
  869. kthread_stop(jfsCommitThread[i]);
  870. kthread_stop(jfsSyncThread);
  871. #ifdef PROC_FS_JFS
  872. jfs_proc_clean();
  873. #endif
  874. unregister_filesystem(&jfs_fs_type);
  875. /*
  876. * Make sure all delayed rcu free inodes are flushed before we
  877. * destroy cache.
  878. */
  879. rcu_barrier();
  880. kmem_cache_destroy(jfs_inode_cachep);
  881. }
  882. module_init(init_jfs_fs)
  883. module_exit(exit_jfs_fs)