super.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079
  1. /*
  2. * Copyright (C) International Business Machines Corp., 2000-2004
  3. * Portions Copyright (C) Christoph Hellwig, 2001-2002
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  13. * the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/module.h>
  21. #include <linux/parser.h>
  22. #include <linux/completion.h>
  23. #include <linux/vfs.h>
  24. #include <linux/quotaops.h>
  25. #include <linux/mount.h>
  26. #include <linux/moduleparam.h>
  27. #include <linux/kthread.h>
  28. #include <linux/posix_acl.h>
  29. #include <linux/buffer_head.h>
  30. #include <linux/exportfs.h>
  31. #include <linux/crc32.h>
  32. #include <linux/slab.h>
  33. #include <linux/uaccess.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/blkdev.h>
  36. #include "jfs_incore.h"
  37. #include "jfs_filsys.h"
  38. #include "jfs_inode.h"
  39. #include "jfs_metapage.h"
  40. #include "jfs_superblock.h"
  41. #include "jfs_dmap.h"
  42. #include "jfs_imap.h"
  43. #include "jfs_acl.h"
  44. #include "jfs_debug.h"
  45. #include "jfs_xattr.h"
  46. #include "jfs_dinode.h"
  47. MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
  48. MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
  49. MODULE_LICENSE("GPL");
  50. static struct kmem_cache *jfs_inode_cachep;
  51. static const struct super_operations jfs_super_operations;
  52. static const struct export_operations jfs_export_operations;
  53. static struct file_system_type jfs_fs_type;
  54. #define MAX_COMMIT_THREADS 64
  55. static int commit_threads;
  56. module_param(commit_threads, int, 0);
  57. MODULE_PARM_DESC(commit_threads, "Number of commit threads");
  58. static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS];
  59. struct task_struct *jfsIOthread;
  60. struct task_struct *jfsSyncThread;
  61. #ifdef CONFIG_JFS_DEBUG
  62. int jfsloglevel = JFS_LOGLEVEL_WARN;
  63. module_param(jfsloglevel, int, 0644);
  64. MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)");
  65. #endif
  66. static void jfs_handle_error(struct super_block *sb)
  67. {
  68. struct jfs_sb_info *sbi = JFS_SBI(sb);
  69. if (sb_rdonly(sb))
  70. return;
  71. updateSuper(sb, FM_DIRTY);
  72. if (sbi->flag & JFS_ERR_PANIC)
  73. panic("JFS (device %s): panic forced after error\n",
  74. sb->s_id);
  75. else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
  76. jfs_err("ERROR: (device %s): remounting filesystem as read-only",
  77. sb->s_id);
  78. sb->s_flags |= SB_RDONLY;
  79. }
  80. /* nothing is done for continue beyond marking the superblock dirty */
  81. }
  82. void jfs_error(struct super_block *sb, const char *fmt, ...)
  83. {
  84. struct va_format vaf;
  85. va_list args;
  86. va_start(args, fmt);
  87. vaf.fmt = fmt;
  88. vaf.va = &args;
  89. pr_err("ERROR: (device %s): %ps: %pV\n",
  90. sb->s_id, __builtin_return_address(0), &vaf);
  91. va_end(args);
  92. jfs_handle_error(sb);
  93. }
  94. static struct inode *jfs_alloc_inode(struct super_block *sb)
  95. {
  96. struct jfs_inode_info *jfs_inode;
  97. jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
  98. if (!jfs_inode)
  99. return NULL;
  100. #ifdef CONFIG_QUOTA
  101. memset(&jfs_inode->i_dquot, 0, sizeof(jfs_inode->i_dquot));
  102. #endif
  103. return &jfs_inode->vfs_inode;
  104. }
  105. static void jfs_i_callback(struct rcu_head *head)
  106. {
  107. struct inode *inode = container_of(head, struct inode, i_rcu);
  108. struct jfs_inode_info *ji = JFS_IP(inode);
  109. kmem_cache_free(jfs_inode_cachep, ji);
  110. }
  111. static void jfs_destroy_inode(struct inode *inode)
  112. {
  113. struct jfs_inode_info *ji = JFS_IP(inode);
  114. BUG_ON(!list_empty(&ji->anon_inode_list));
  115. spin_lock_irq(&ji->ag_lock);
  116. if (ji->active_ag != -1) {
  117. struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
  118. atomic_dec(&bmap->db_active[ji->active_ag]);
  119. ji->active_ag = -1;
  120. }
  121. spin_unlock_irq(&ji->ag_lock);
  122. call_rcu(&inode->i_rcu, jfs_i_callback);
  123. }
  124. static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  125. {
  126. struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
  127. s64 maxinodes;
  128. struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
  129. jfs_info("In jfs_statfs");
  130. buf->f_type = JFS_SUPER_MAGIC;
  131. buf->f_bsize = sbi->bsize;
  132. buf->f_blocks = sbi->bmap->db_mapsize;
  133. buf->f_bfree = sbi->bmap->db_nfree;
  134. buf->f_bavail = sbi->bmap->db_nfree;
  135. /*
  136. * If we really return the number of allocated & free inodes, some
  137. * applications will fail because they won't see enough free inodes.
  138. * We'll try to calculate some guess as to how many inodes we can
  139. * really allocate
  140. *
  141. * buf->f_files = atomic_read(&imap->im_numinos);
  142. * buf->f_ffree = atomic_read(&imap->im_numfree);
  143. */
  144. maxinodes = min((s64) atomic_read(&imap->im_numinos) +
  145. ((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
  146. << L2INOSPEREXT), (s64) 0xffffffffLL);
  147. buf->f_files = maxinodes;
  148. buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
  149. atomic_read(&imap->im_numfree));
  150. buf->f_fsid.val[0] = (u32)crc32_le(0, sbi->uuid, sizeof(sbi->uuid)/2);
  151. buf->f_fsid.val[1] = (u32)crc32_le(0, sbi->uuid + sizeof(sbi->uuid)/2,
  152. sizeof(sbi->uuid)/2);
  153. buf->f_namelen = JFS_NAME_MAX;
  154. return 0;
  155. }
  156. #ifdef CONFIG_QUOTA
  157. static int jfs_quota_off(struct super_block *sb, int type);
  158. static int jfs_quota_on(struct super_block *sb, int type, int format_id,
  159. const struct path *path);
  160. static void jfs_quota_off_umount(struct super_block *sb)
  161. {
  162. int type;
  163. for (type = 0; type < MAXQUOTAS; type++)
  164. jfs_quota_off(sb, type);
  165. }
  166. static const struct quotactl_ops jfs_quotactl_ops = {
  167. .quota_on = jfs_quota_on,
  168. .quota_off = jfs_quota_off,
  169. .quota_sync = dquot_quota_sync,
  170. .get_state = dquot_get_state,
  171. .set_info = dquot_set_dqinfo,
  172. .get_dqblk = dquot_get_dqblk,
  173. .set_dqblk = dquot_set_dqblk,
  174. .get_nextdqblk = dquot_get_next_dqblk,
  175. };
  176. #else
  177. static inline void jfs_quota_off_umount(struct super_block *sb)
  178. {
  179. }
  180. #endif
  181. static void jfs_put_super(struct super_block *sb)
  182. {
  183. struct jfs_sb_info *sbi = JFS_SBI(sb);
  184. int rc;
  185. jfs_info("In jfs_put_super");
  186. jfs_quota_off_umount(sb);
  187. rc = jfs_umount(sb);
  188. if (rc)
  189. jfs_err("jfs_umount failed with return code %d", rc);
  190. unload_nls(sbi->nls_tab);
  191. truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
  192. iput(sbi->direct_inode);
  193. kfree(sbi);
  194. }
  195. enum {
  196. Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
  197. Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
  198. Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
  199. Opt_discard, Opt_nodiscard, Opt_discard_minblk
  200. };
  201. static const match_table_t tokens = {
  202. {Opt_integrity, "integrity"},
  203. {Opt_nointegrity, "nointegrity"},
  204. {Opt_iocharset, "iocharset=%s"},
  205. {Opt_resize, "resize=%u"},
  206. {Opt_resize_nosize, "resize"},
  207. {Opt_errors, "errors=%s"},
  208. {Opt_ignore, "noquota"},
  209. {Opt_ignore, "quota"},
  210. {Opt_usrquota, "usrquota"},
  211. {Opt_grpquota, "grpquota"},
  212. {Opt_uid, "uid=%u"},
  213. {Opt_gid, "gid=%u"},
  214. {Opt_umask, "umask=%u"},
  215. {Opt_discard, "discard"},
  216. {Opt_nodiscard, "nodiscard"},
  217. {Opt_discard_minblk, "discard=%u"},
  218. {Opt_err, NULL}
  219. };
  220. static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
  221. int *flag)
  222. {
  223. void *nls_map = (void *)-1; /* -1: no change; NULL: none */
  224. char *p;
  225. struct jfs_sb_info *sbi = JFS_SBI(sb);
  226. *newLVSize = 0;
  227. if (!options)
  228. return 1;
  229. while ((p = strsep(&options, ",")) != NULL) {
  230. substring_t args[MAX_OPT_ARGS];
  231. int token;
  232. if (!*p)
  233. continue;
  234. token = match_token(p, tokens, args);
  235. switch (token) {
  236. case Opt_integrity:
  237. *flag &= ~JFS_NOINTEGRITY;
  238. break;
  239. case Opt_nointegrity:
  240. *flag |= JFS_NOINTEGRITY;
  241. break;
  242. case Opt_ignore:
  243. /* Silently ignore the quota options */
  244. /* Don't do anything ;-) */
  245. break;
  246. case Opt_iocharset:
  247. if (nls_map && nls_map != (void *) -1)
  248. unload_nls(nls_map);
  249. if (!strcmp(args[0].from, "none"))
  250. nls_map = NULL;
  251. else {
  252. nls_map = load_nls(args[0].from);
  253. if (!nls_map) {
  254. pr_err("JFS: charset not found\n");
  255. goto cleanup;
  256. }
  257. }
  258. break;
  259. case Opt_resize:
  260. {
  261. char *resize = args[0].from;
  262. int rc = kstrtoll(resize, 0, newLVSize);
  263. if (rc)
  264. goto cleanup;
  265. break;
  266. }
  267. case Opt_resize_nosize:
  268. {
  269. *newLVSize = i_size_read(sb->s_bdev->bd_inode) >>
  270. sb->s_blocksize_bits;
  271. if (*newLVSize == 0)
  272. pr_err("JFS: Cannot determine volume size\n");
  273. break;
  274. }
  275. case Opt_errors:
  276. {
  277. char *errors = args[0].from;
  278. if (!errors || !*errors)
  279. goto cleanup;
  280. if (!strcmp(errors, "continue")) {
  281. *flag &= ~JFS_ERR_REMOUNT_RO;
  282. *flag &= ~JFS_ERR_PANIC;
  283. *flag |= JFS_ERR_CONTINUE;
  284. } else if (!strcmp(errors, "remount-ro")) {
  285. *flag &= ~JFS_ERR_CONTINUE;
  286. *flag &= ~JFS_ERR_PANIC;
  287. *flag |= JFS_ERR_REMOUNT_RO;
  288. } else if (!strcmp(errors, "panic")) {
  289. *flag &= ~JFS_ERR_CONTINUE;
  290. *flag &= ~JFS_ERR_REMOUNT_RO;
  291. *flag |= JFS_ERR_PANIC;
  292. } else {
  293. pr_err("JFS: %s is an invalid error handler\n",
  294. errors);
  295. goto cleanup;
  296. }
  297. break;
  298. }
  299. #ifdef CONFIG_QUOTA
  300. case Opt_quota:
  301. case Opt_usrquota:
  302. *flag |= JFS_USRQUOTA;
  303. break;
  304. case Opt_grpquota:
  305. *flag |= JFS_GRPQUOTA;
  306. break;
  307. #else
  308. case Opt_usrquota:
  309. case Opt_grpquota:
  310. case Opt_quota:
  311. pr_err("JFS: quota operations not supported\n");
  312. break;
  313. #endif
  314. case Opt_uid:
  315. {
  316. char *uid = args[0].from;
  317. uid_t val;
  318. int rc = kstrtouint(uid, 0, &val);
  319. if (rc)
  320. goto cleanup;
  321. sbi->uid = make_kuid(current_user_ns(), val);
  322. if (!uid_valid(sbi->uid))
  323. goto cleanup;
  324. break;
  325. }
  326. case Opt_gid:
  327. {
  328. char *gid = args[0].from;
  329. gid_t val;
  330. int rc = kstrtouint(gid, 0, &val);
  331. if (rc)
  332. goto cleanup;
  333. sbi->gid = make_kgid(current_user_ns(), val);
  334. if (!gid_valid(sbi->gid))
  335. goto cleanup;
  336. break;
  337. }
  338. case Opt_umask:
  339. {
  340. char *umask = args[0].from;
  341. int rc = kstrtouint(umask, 8, &sbi->umask);
  342. if (rc)
  343. goto cleanup;
  344. if (sbi->umask & ~0777) {
  345. pr_err("JFS: Invalid value of umask\n");
  346. goto cleanup;
  347. }
  348. break;
  349. }
  350. case Opt_discard:
  351. {
  352. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  353. /* if set to 1, even copying files will cause
  354. * trimming :O
  355. * -> user has more control over the online trimming
  356. */
  357. sbi->minblks_trim = 64;
  358. if (blk_queue_discard(q))
  359. *flag |= JFS_DISCARD;
  360. else
  361. pr_err("JFS: discard option not supported on device\n");
  362. break;
  363. }
  364. case Opt_nodiscard:
  365. *flag &= ~JFS_DISCARD;
  366. break;
  367. case Opt_discard_minblk:
  368. {
  369. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  370. char *minblks_trim = args[0].from;
  371. int rc;
  372. if (blk_queue_discard(q)) {
  373. *flag |= JFS_DISCARD;
  374. rc = kstrtouint(minblks_trim, 0,
  375. &sbi->minblks_trim);
  376. if (rc)
  377. goto cleanup;
  378. } else
  379. pr_err("JFS: discard option not supported on device\n");
  380. break;
  381. }
  382. default:
  383. printk("jfs: Unrecognized mount option \"%s\" or missing value\n",
  384. p);
  385. goto cleanup;
  386. }
  387. }
  388. if (nls_map != (void *) -1) {
  389. /* Discard old (if remount) */
  390. unload_nls(sbi->nls_tab);
  391. sbi->nls_tab = nls_map;
  392. }
  393. return 1;
  394. cleanup:
  395. if (nls_map && nls_map != (void *) -1)
  396. unload_nls(nls_map);
  397. return 0;
  398. }
  399. static int jfs_remount(struct super_block *sb, int *flags, char *data)
  400. {
  401. s64 newLVSize = 0;
  402. int rc = 0;
  403. int flag = JFS_SBI(sb)->flag;
  404. int ret;
  405. sync_filesystem(sb);
  406. if (!parse_options(data, sb, &newLVSize, &flag))
  407. return -EINVAL;
  408. if (newLVSize) {
  409. if (sb_rdonly(sb)) {
  410. pr_err("JFS: resize requires volume to be mounted read-write\n");
  411. return -EROFS;
  412. }
  413. rc = jfs_extendfs(sb, newLVSize, 0);
  414. if (rc)
  415. return rc;
  416. }
  417. if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
  418. /*
  419. * Invalidate any previously read metadata. fsck may have
  420. * changed the on-disk data since we mounted r/o
  421. */
  422. truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0);
  423. JFS_SBI(sb)->flag = flag;
  424. ret = jfs_mount_rw(sb, 1);
  425. /* mark the fs r/w for quota activity */
  426. sb->s_flags &= ~SB_RDONLY;
  427. dquot_resume(sb, -1);
  428. return ret;
  429. }
  430. if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
  431. rc = dquot_suspend(sb, -1);
  432. if (rc < 0)
  433. return rc;
  434. rc = jfs_umount_rw(sb);
  435. JFS_SBI(sb)->flag = flag;
  436. return rc;
  437. }
  438. if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
  439. if (!sb_rdonly(sb)) {
  440. rc = jfs_umount_rw(sb);
  441. if (rc)
  442. return rc;
  443. JFS_SBI(sb)->flag = flag;
  444. ret = jfs_mount_rw(sb, 1);
  445. return ret;
  446. }
  447. JFS_SBI(sb)->flag = flag;
  448. return 0;
  449. }
  450. static int jfs_fill_super(struct super_block *sb, void *data, int silent)
  451. {
  452. struct jfs_sb_info *sbi;
  453. struct inode *inode;
  454. int rc;
  455. s64 newLVSize = 0;
  456. int flag, ret = -EINVAL;
  457. jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
  458. sbi = kzalloc(sizeof(struct jfs_sb_info), GFP_KERNEL);
  459. if (!sbi)
  460. return -ENOMEM;
  461. sb->s_fs_info = sbi;
  462. sb->s_max_links = JFS_LINK_MAX;
  463. sbi->sb = sb;
  464. sbi->uid = INVALID_UID;
  465. sbi->gid = INVALID_GID;
  466. sbi->umask = -1;
  467. /* initialize the mount flag and determine the default error handler */
  468. flag = JFS_ERR_REMOUNT_RO;
  469. if (!parse_options((char *) data, sb, &newLVSize, &flag))
  470. goto out_kfree;
  471. sbi->flag = flag;
  472. #ifdef CONFIG_JFS_POSIX_ACL
  473. sb->s_flags |= SB_POSIXACL;
  474. #endif
  475. if (newLVSize) {
  476. pr_err("resize option for remount only\n");
  477. goto out_kfree;
  478. }
  479. /*
  480. * Initialize blocksize to 4K.
  481. */
  482. sb_set_blocksize(sb, PSIZE);
  483. /*
  484. * Set method vectors.
  485. */
  486. sb->s_op = &jfs_super_operations;
  487. sb->s_export_op = &jfs_export_operations;
  488. sb->s_xattr = jfs_xattr_handlers;
  489. #ifdef CONFIG_QUOTA
  490. sb->dq_op = &dquot_operations;
  491. sb->s_qcop = &jfs_quotactl_ops;
  492. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
  493. #endif
  494. /*
  495. * Initialize direct-mapping inode/address-space
  496. */
  497. inode = new_inode(sb);
  498. if (inode == NULL) {
  499. ret = -ENOMEM;
  500. goto out_unload;
  501. }
  502. inode->i_ino = 0;
  503. inode->i_size = i_size_read(sb->s_bdev->bd_inode);
  504. inode->i_mapping->a_ops = &jfs_metapage_aops;
  505. inode_fake_hash(inode);
  506. mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
  507. sbi->direct_inode = inode;
  508. rc = jfs_mount(sb);
  509. if (rc) {
  510. if (!silent)
  511. jfs_err("jfs_mount failed w/return code = %d", rc);
  512. goto out_mount_failed;
  513. }
  514. if (sb_rdonly(sb))
  515. sbi->log = NULL;
  516. else {
  517. rc = jfs_mount_rw(sb, 0);
  518. if (rc) {
  519. if (!silent) {
  520. jfs_err("jfs_mount_rw failed, return code = %d",
  521. rc);
  522. }
  523. goto out_no_rw;
  524. }
  525. }
  526. sb->s_magic = JFS_SUPER_MAGIC;
  527. if (sbi->mntflag & JFS_OS2)
  528. sb->s_d_op = &jfs_ci_dentry_operations;
  529. inode = jfs_iget(sb, ROOT_I);
  530. if (IS_ERR(inode)) {
  531. ret = PTR_ERR(inode);
  532. goto out_no_rw;
  533. }
  534. sb->s_root = d_make_root(inode);
  535. if (!sb->s_root)
  536. goto out_no_root;
  537. /* logical blocks are represented by 40 bits in pxd_t, etc.
  538. * and page cache is indexed by long
  539. */
  540. sb->s_maxbytes = min(((loff_t)sb->s_blocksize) << 40, MAX_LFS_FILESIZE);
  541. sb->s_time_gran = 1;
  542. return 0;
  543. out_no_root:
  544. jfs_err("jfs_read_super: get root dentry failed");
  545. out_no_rw:
  546. rc = jfs_umount(sb);
  547. if (rc)
  548. jfs_err("jfs_umount failed with return code %d", rc);
  549. out_mount_failed:
  550. filemap_write_and_wait(sbi->direct_inode->i_mapping);
  551. truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
  552. make_bad_inode(sbi->direct_inode);
  553. iput(sbi->direct_inode);
  554. sbi->direct_inode = NULL;
  555. out_unload:
  556. unload_nls(sbi->nls_tab);
  557. out_kfree:
  558. kfree(sbi);
  559. return ret;
  560. }
  561. static int jfs_freeze(struct super_block *sb)
  562. {
  563. struct jfs_sb_info *sbi = JFS_SBI(sb);
  564. struct jfs_log *log = sbi->log;
  565. int rc = 0;
  566. if (!sb_rdonly(sb)) {
  567. txQuiesce(sb);
  568. rc = lmLogShutdown(log);
  569. if (rc) {
  570. jfs_error(sb, "lmLogShutdown failed\n");
  571. /* let operations fail rather than hang */
  572. txResume(sb);
  573. return rc;
  574. }
  575. rc = updateSuper(sb, FM_CLEAN);
  576. if (rc) {
  577. jfs_err("jfs_freeze: updateSuper failed");
  578. /*
  579. * Don't fail here. Everything succeeded except
  580. * marking the superblock clean, so there's really
  581. * no harm in leaving it frozen for now.
  582. */
  583. }
  584. }
  585. return 0;
  586. }
  587. static int jfs_unfreeze(struct super_block *sb)
  588. {
  589. struct jfs_sb_info *sbi = JFS_SBI(sb);
  590. struct jfs_log *log = sbi->log;
  591. int rc = 0;
  592. if (!sb_rdonly(sb)) {
  593. rc = updateSuper(sb, FM_MOUNT);
  594. if (rc) {
  595. jfs_error(sb, "updateSuper failed\n");
  596. goto out;
  597. }
  598. rc = lmLogInit(log);
  599. if (rc)
  600. jfs_error(sb, "lmLogInit failed\n");
  601. out:
  602. txResume(sb);
  603. }
  604. return rc;
  605. }
  606. static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
  607. int flags, const char *dev_name, void *data)
  608. {
  609. return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super);
  610. }
  611. static int jfs_sync_fs(struct super_block *sb, int wait)
  612. {
  613. struct jfs_log *log = JFS_SBI(sb)->log;
  614. /* log == NULL indicates read-only mount */
  615. if (log) {
  616. /*
  617. * Write quota structures to quota file, sync_blockdev() will
  618. * write them to disk later
  619. */
  620. dquot_writeback_dquots(sb, -1);
  621. jfs_flush_journal(log, wait);
  622. jfs_syncpt(log, 0);
  623. }
  624. return 0;
  625. }
  626. static int jfs_show_options(struct seq_file *seq, struct dentry *root)
  627. {
  628. struct jfs_sb_info *sbi = JFS_SBI(root->d_sb);
  629. if (uid_valid(sbi->uid))
  630. seq_printf(seq, ",uid=%d", from_kuid(&init_user_ns, sbi->uid));
  631. if (gid_valid(sbi->gid))
  632. seq_printf(seq, ",gid=%d", from_kgid(&init_user_ns, sbi->gid));
  633. if (sbi->umask != -1)
  634. seq_printf(seq, ",umask=%03o", sbi->umask);
  635. if (sbi->flag & JFS_NOINTEGRITY)
  636. seq_puts(seq, ",nointegrity");
  637. if (sbi->flag & JFS_DISCARD)
  638. seq_printf(seq, ",discard=%u", sbi->minblks_trim);
  639. if (sbi->nls_tab)
  640. seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset);
  641. if (sbi->flag & JFS_ERR_CONTINUE)
  642. seq_printf(seq, ",errors=continue");
  643. if (sbi->flag & JFS_ERR_PANIC)
  644. seq_printf(seq, ",errors=panic");
  645. #ifdef CONFIG_QUOTA
  646. if (sbi->flag & JFS_USRQUOTA)
  647. seq_puts(seq, ",usrquota");
  648. if (sbi->flag & JFS_GRPQUOTA)
  649. seq_puts(seq, ",grpquota");
  650. #endif
  651. return 0;
  652. }
  653. #ifdef CONFIG_QUOTA
  654. /* Read data from quotafile - avoid pagecache and such because we cannot afford
  655. * acquiring the locks... As quota files are never truncated and quota code
  656. * itself serializes the operations (and no one else should touch the files)
  657. * we don't have to be afraid of races */
  658. static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
  659. size_t len, loff_t off)
  660. {
  661. struct inode *inode = sb_dqopt(sb)->files[type];
  662. sector_t blk = off >> sb->s_blocksize_bits;
  663. int err = 0;
  664. int offset = off & (sb->s_blocksize - 1);
  665. int tocopy;
  666. size_t toread;
  667. struct buffer_head tmp_bh;
  668. struct buffer_head *bh;
  669. loff_t i_size = i_size_read(inode);
  670. if (off > i_size)
  671. return 0;
  672. if (off+len > i_size)
  673. len = i_size-off;
  674. toread = len;
  675. while (toread > 0) {
  676. tocopy = sb->s_blocksize - offset < toread ?
  677. sb->s_blocksize - offset : toread;
  678. tmp_bh.b_state = 0;
  679. tmp_bh.b_size = i_blocksize(inode);
  680. err = jfs_get_block(inode, blk, &tmp_bh, 0);
  681. if (err)
  682. return err;
  683. if (!buffer_mapped(&tmp_bh)) /* A hole? */
  684. memset(data, 0, tocopy);
  685. else {
  686. bh = sb_bread(sb, tmp_bh.b_blocknr);
  687. if (!bh)
  688. return -EIO;
  689. memcpy(data, bh->b_data+offset, tocopy);
  690. brelse(bh);
  691. }
  692. offset = 0;
  693. toread -= tocopy;
  694. data += tocopy;
  695. blk++;
  696. }
  697. return len;
  698. }
  699. /* Write to quotafile */
  700. static ssize_t jfs_quota_write(struct super_block *sb, int type,
  701. const char *data, size_t len, loff_t off)
  702. {
  703. struct inode *inode = sb_dqopt(sb)->files[type];
  704. sector_t blk = off >> sb->s_blocksize_bits;
  705. int err = 0;
  706. int offset = off & (sb->s_blocksize - 1);
  707. int tocopy;
  708. size_t towrite = len;
  709. struct buffer_head tmp_bh;
  710. struct buffer_head *bh;
  711. inode_lock(inode);
  712. while (towrite > 0) {
  713. tocopy = sb->s_blocksize - offset < towrite ?
  714. sb->s_blocksize - offset : towrite;
  715. tmp_bh.b_state = 0;
  716. tmp_bh.b_size = i_blocksize(inode);
  717. err = jfs_get_block(inode, blk, &tmp_bh, 1);
  718. if (err)
  719. goto out;
  720. if (offset || tocopy != sb->s_blocksize)
  721. bh = sb_bread(sb, tmp_bh.b_blocknr);
  722. else
  723. bh = sb_getblk(sb, tmp_bh.b_blocknr);
  724. if (!bh) {
  725. err = -EIO;
  726. goto out;
  727. }
  728. lock_buffer(bh);
  729. memcpy(bh->b_data+offset, data, tocopy);
  730. flush_dcache_page(bh->b_page);
  731. set_buffer_uptodate(bh);
  732. mark_buffer_dirty(bh);
  733. unlock_buffer(bh);
  734. brelse(bh);
  735. offset = 0;
  736. towrite -= tocopy;
  737. data += tocopy;
  738. blk++;
  739. }
  740. out:
  741. if (len == towrite) {
  742. inode_unlock(inode);
  743. return err;
  744. }
  745. if (inode->i_size < off+len-towrite)
  746. i_size_write(inode, off+len-towrite);
  747. inode->i_mtime = inode->i_ctime = current_time(inode);
  748. mark_inode_dirty(inode);
  749. inode_unlock(inode);
  750. return len - towrite;
  751. }
  752. static struct dquot **jfs_get_dquots(struct inode *inode)
  753. {
  754. return JFS_IP(inode)->i_dquot;
  755. }
  756. static int jfs_quota_on(struct super_block *sb, int type, int format_id,
  757. const struct path *path)
  758. {
  759. int err;
  760. struct inode *inode;
  761. err = dquot_quota_on(sb, type, format_id, path);
  762. if (err)
  763. return err;
  764. inode = d_inode(path->dentry);
  765. inode_lock(inode);
  766. JFS_IP(inode)->mode2 |= JFS_NOATIME_FL | JFS_IMMUTABLE_FL;
  767. inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
  768. S_NOATIME | S_IMMUTABLE);
  769. inode_unlock(inode);
  770. mark_inode_dirty(inode);
  771. return 0;
  772. }
  773. static int jfs_quota_off(struct super_block *sb, int type)
  774. {
  775. struct inode *inode = sb_dqopt(sb)->files[type];
  776. int err;
  777. if (!inode || !igrab(inode))
  778. goto out;
  779. err = dquot_quota_off(sb, type);
  780. if (err)
  781. goto out_put;
  782. inode_lock(inode);
  783. JFS_IP(inode)->mode2 &= ~(JFS_NOATIME_FL | JFS_IMMUTABLE_FL);
  784. inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
  785. inode_unlock(inode);
  786. mark_inode_dirty(inode);
  787. out_put:
  788. iput(inode);
  789. return err;
  790. out:
  791. return dquot_quota_off(sb, type);
  792. }
  793. #endif
  794. static const struct super_operations jfs_super_operations = {
  795. .alloc_inode = jfs_alloc_inode,
  796. .destroy_inode = jfs_destroy_inode,
  797. .dirty_inode = jfs_dirty_inode,
  798. .write_inode = jfs_write_inode,
  799. .evict_inode = jfs_evict_inode,
  800. .put_super = jfs_put_super,
  801. .sync_fs = jfs_sync_fs,
  802. .freeze_fs = jfs_freeze,
  803. .unfreeze_fs = jfs_unfreeze,
  804. .statfs = jfs_statfs,
  805. .remount_fs = jfs_remount,
  806. .show_options = jfs_show_options,
  807. #ifdef CONFIG_QUOTA
  808. .quota_read = jfs_quota_read,
  809. .quota_write = jfs_quota_write,
  810. .get_dquots = jfs_get_dquots,
  811. #endif
  812. };
  813. static const struct export_operations jfs_export_operations = {
  814. .fh_to_dentry = jfs_fh_to_dentry,
  815. .fh_to_parent = jfs_fh_to_parent,
  816. .get_parent = jfs_get_parent,
  817. };
  818. static struct file_system_type jfs_fs_type = {
  819. .owner = THIS_MODULE,
  820. .name = "jfs",
  821. .mount = jfs_do_mount,
  822. .kill_sb = kill_block_super,
  823. .fs_flags = FS_REQUIRES_DEV,
  824. };
  825. MODULE_ALIAS_FS("jfs");
  826. static void init_once(void *foo)
  827. {
  828. struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
  829. memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
  830. INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
  831. init_rwsem(&jfs_ip->rdwrlock);
  832. mutex_init(&jfs_ip->commit_mutex);
  833. init_rwsem(&jfs_ip->xattr_sem);
  834. spin_lock_init(&jfs_ip->ag_lock);
  835. jfs_ip->active_ag = -1;
  836. inode_init_once(&jfs_ip->vfs_inode);
  837. }
  838. static int __init init_jfs_fs(void)
  839. {
  840. int i;
  841. int rc;
  842. jfs_inode_cachep =
  843. kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info),
  844. 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
  845. offsetof(struct jfs_inode_info, i_inline), IDATASIZE,
  846. init_once);
  847. if (jfs_inode_cachep == NULL)
  848. return -ENOMEM;
  849. /*
  850. * Metapage initialization
  851. */
  852. rc = metapage_init();
  853. if (rc) {
  854. jfs_err("metapage_init failed w/rc = %d", rc);
  855. goto free_slab;
  856. }
  857. /*
  858. * Transaction Manager initialization
  859. */
  860. rc = txInit();
  861. if (rc) {
  862. jfs_err("txInit failed w/rc = %d", rc);
  863. goto free_metapage;
  864. }
  865. /*
  866. * I/O completion thread (endio)
  867. */
  868. jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO");
  869. if (IS_ERR(jfsIOthread)) {
  870. rc = PTR_ERR(jfsIOthread);
  871. jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
  872. goto end_txmngr;
  873. }
  874. if (commit_threads < 1)
  875. commit_threads = num_online_cpus();
  876. if (commit_threads > MAX_COMMIT_THREADS)
  877. commit_threads = MAX_COMMIT_THREADS;
  878. for (i = 0; i < commit_threads; i++) {
  879. jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL,
  880. "jfsCommit");
  881. if (IS_ERR(jfsCommitThread[i])) {
  882. rc = PTR_ERR(jfsCommitThread[i]);
  883. jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
  884. commit_threads = i;
  885. goto kill_committask;
  886. }
  887. }
  888. jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync");
  889. if (IS_ERR(jfsSyncThread)) {
  890. rc = PTR_ERR(jfsSyncThread);
  891. jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
  892. goto kill_committask;
  893. }
  894. #ifdef PROC_FS_JFS
  895. jfs_proc_init();
  896. #endif
  897. rc = register_filesystem(&jfs_fs_type);
  898. if (!rc)
  899. return 0;
  900. #ifdef PROC_FS_JFS
  901. jfs_proc_clean();
  902. #endif
  903. kthread_stop(jfsSyncThread);
  904. kill_committask:
  905. for (i = 0; i < commit_threads; i++)
  906. kthread_stop(jfsCommitThread[i]);
  907. kthread_stop(jfsIOthread);
  908. end_txmngr:
  909. txExit();
  910. free_metapage:
  911. metapage_exit();
  912. free_slab:
  913. kmem_cache_destroy(jfs_inode_cachep);
  914. return rc;
  915. }
  916. static void __exit exit_jfs_fs(void)
  917. {
  918. int i;
  919. jfs_info("exit_jfs_fs called");
  920. txExit();
  921. metapage_exit();
  922. kthread_stop(jfsIOthread);
  923. for (i = 0; i < commit_threads; i++)
  924. kthread_stop(jfsCommitThread[i]);
  925. kthread_stop(jfsSyncThread);
  926. #ifdef PROC_FS_JFS
  927. jfs_proc_clean();
  928. #endif
  929. unregister_filesystem(&jfs_fs_type);
  930. /*
  931. * Make sure all delayed rcu free inodes are flushed before we
  932. * destroy cache.
  933. */
  934. rcu_barrier();
  935. kmem_cache_destroy(jfs_inode_cachep);
  936. }
  937. module_init(init_jfs_fs)
  938. module_exit(exit_jfs_fs)