inode.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU GPL.
  5. See the file COPYING.
  6. */
  7. #include "fuse_i.h"
  8. #include <linux/pagemap.h>
  9. #include <linux/slab.h>
  10. #include <linux/file.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/moduleparam.h>
  15. #include <linux/parser.h>
  16. #include <linux/statfs.h>
  17. #include <linux/random.h>
  18. #include <linux/sched.h>
  19. #include <linux/exportfs.h>
  20. #include <linux/posix_acl.h>
  21. #include <linux/pid_namespace.h>
  22. MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
  23. MODULE_DESCRIPTION("Filesystem in Userspace");
  24. MODULE_LICENSE("GPL");
  25. static struct kmem_cache *fuse_inode_cachep;
  26. struct list_head fuse_conn_list;
  27. DEFINE_MUTEX(fuse_mutex);
  28. static int set_global_limit(const char *val, const struct kernel_param *kp);
  29. unsigned max_user_bgreq;
  30. module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
  31. &max_user_bgreq, 0644);
  32. __MODULE_PARM_TYPE(max_user_bgreq, "uint");
  33. MODULE_PARM_DESC(max_user_bgreq,
  34. "Global limit for the maximum number of backgrounded requests an "
  35. "unprivileged user can set");
  36. unsigned max_user_congthresh;
  37. module_param_call(max_user_congthresh, set_global_limit, param_get_uint,
  38. &max_user_congthresh, 0644);
  39. __MODULE_PARM_TYPE(max_user_congthresh, "uint");
  40. MODULE_PARM_DESC(max_user_congthresh,
  41. "Global limit for the maximum congestion threshold an "
  42. "unprivileged user can set");
  43. #define FUSE_SUPER_MAGIC 0x65735546
  44. #define FUSE_DEFAULT_BLKSIZE 512
  45. /** Maximum number of outstanding background requests */
  46. #define FUSE_DEFAULT_MAX_BACKGROUND 12
  47. /** Congestion starts at 75% of maximum */
  48. #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4)
  49. struct fuse_mount_data {
  50. int fd;
  51. unsigned rootmode;
  52. kuid_t user_id;
  53. kgid_t group_id;
  54. unsigned fd_present:1;
  55. unsigned rootmode_present:1;
  56. unsigned user_id_present:1;
  57. unsigned group_id_present:1;
  58. unsigned default_permissions:1;
  59. unsigned allow_other:1;
  60. unsigned max_read;
  61. unsigned blksize;
  62. };
  63. struct fuse_forget_link *fuse_alloc_forget(void)
  64. {
  65. return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL);
  66. }
  67. static struct inode *fuse_alloc_inode(struct super_block *sb)
  68. {
  69. struct inode *inode;
  70. struct fuse_inode *fi;
  71. inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL);
  72. if (!inode)
  73. return NULL;
  74. fi = get_fuse_inode(inode);
  75. fi->i_time = 0;
  76. fi->nodeid = 0;
  77. fi->nlookup = 0;
  78. fi->attr_version = 0;
  79. fi->writectr = 0;
  80. fi->orig_ino = 0;
  81. fi->state = 0;
  82. INIT_LIST_HEAD(&fi->write_files);
  83. INIT_LIST_HEAD(&fi->queued_writes);
  84. INIT_LIST_HEAD(&fi->writepages);
  85. init_waitqueue_head(&fi->page_waitq);
  86. mutex_init(&fi->mutex);
  87. fi->forget = fuse_alloc_forget();
  88. if (!fi->forget) {
  89. kmem_cache_free(fuse_inode_cachep, inode);
  90. return NULL;
  91. }
  92. return inode;
  93. }
  94. static void fuse_i_callback(struct rcu_head *head)
  95. {
  96. struct inode *inode = container_of(head, struct inode, i_rcu);
  97. kmem_cache_free(fuse_inode_cachep, inode);
  98. }
  99. static void fuse_destroy_inode(struct inode *inode)
  100. {
  101. struct fuse_inode *fi = get_fuse_inode(inode);
  102. BUG_ON(!list_empty(&fi->write_files));
  103. BUG_ON(!list_empty(&fi->queued_writes));
  104. mutex_destroy(&fi->mutex);
  105. kfree(fi->forget);
  106. call_rcu(&inode->i_rcu, fuse_i_callback);
  107. }
  108. static void fuse_evict_inode(struct inode *inode)
  109. {
  110. truncate_inode_pages_final(&inode->i_data);
  111. clear_inode(inode);
  112. if (inode->i_sb->s_flags & SB_ACTIVE) {
  113. struct fuse_conn *fc = get_fuse_conn(inode);
  114. struct fuse_inode *fi = get_fuse_inode(inode);
  115. fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup);
  116. fi->forget = NULL;
  117. }
  118. }
  119. static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
  120. {
  121. sync_filesystem(sb);
  122. if (*flags & SB_MANDLOCK)
  123. return -EINVAL;
  124. return 0;
  125. }
  126. /*
  127. * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
  128. * so that it will fit.
  129. */
  130. static ino_t fuse_squash_ino(u64 ino64)
  131. {
  132. ino_t ino = (ino_t) ino64;
  133. if (sizeof(ino_t) < sizeof(u64))
  134. ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
  135. return ino;
  136. }
  137. void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
  138. u64 attr_valid)
  139. {
  140. struct fuse_conn *fc = get_fuse_conn(inode);
  141. struct fuse_inode *fi = get_fuse_inode(inode);
  142. fi->attr_version = ++fc->attr_version;
  143. fi->i_time = attr_valid;
  144. inode->i_ino = fuse_squash_ino(attr->ino);
  145. inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
  146. set_nlink(inode, attr->nlink);
  147. inode->i_uid = make_kuid(fc->user_ns, attr->uid);
  148. inode->i_gid = make_kgid(fc->user_ns, attr->gid);
  149. inode->i_blocks = attr->blocks;
  150. inode->i_atime.tv_sec = attr->atime;
  151. inode->i_atime.tv_nsec = attr->atimensec;
  152. /* mtime from server may be stale due to local buffered write */
  153. if (!fc->writeback_cache || !S_ISREG(inode->i_mode)) {
  154. inode->i_mtime.tv_sec = attr->mtime;
  155. inode->i_mtime.tv_nsec = attr->mtimensec;
  156. inode->i_ctime.tv_sec = attr->ctime;
  157. inode->i_ctime.tv_nsec = attr->ctimensec;
  158. }
  159. if (attr->blksize != 0)
  160. inode->i_blkbits = ilog2(attr->blksize);
  161. else
  162. inode->i_blkbits = inode->i_sb->s_blocksize_bits;
  163. /*
  164. * Don't set the sticky bit in i_mode, unless we want the VFS
  165. * to check permissions. This prevents failures due to the
  166. * check in may_delete().
  167. */
  168. fi->orig_i_mode = inode->i_mode;
  169. if (!fc->default_permissions)
  170. inode->i_mode &= ~S_ISVTX;
  171. fi->orig_ino = attr->ino;
  172. }
  173. void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
  174. u64 attr_valid, u64 attr_version)
  175. {
  176. struct fuse_conn *fc = get_fuse_conn(inode);
  177. struct fuse_inode *fi = get_fuse_inode(inode);
  178. bool is_wb = fc->writeback_cache;
  179. loff_t oldsize;
  180. struct timespec64 old_mtime;
  181. spin_lock(&fc->lock);
  182. if ((attr_version != 0 && fi->attr_version > attr_version) ||
  183. test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
  184. spin_unlock(&fc->lock);
  185. return;
  186. }
  187. old_mtime = inode->i_mtime;
  188. fuse_change_attributes_common(inode, attr, attr_valid);
  189. oldsize = inode->i_size;
  190. /*
  191. * In case of writeback_cache enabled, the cached writes beyond EOF
  192. * extend local i_size without keeping userspace server in sync. So,
  193. * attr->size coming from server can be stale. We cannot trust it.
  194. */
  195. if (!is_wb || !S_ISREG(inode->i_mode))
  196. i_size_write(inode, attr->size);
  197. spin_unlock(&fc->lock);
  198. if (!is_wb && S_ISREG(inode->i_mode)) {
  199. bool inval = false;
  200. if (oldsize != attr->size) {
  201. truncate_pagecache(inode, attr->size);
  202. inval = true;
  203. } else if (fc->auto_inval_data) {
  204. struct timespec64 new_mtime = {
  205. .tv_sec = attr->mtime,
  206. .tv_nsec = attr->mtimensec,
  207. };
  208. /*
  209. * Auto inval mode also checks and invalidates if mtime
  210. * has changed.
  211. */
  212. if (!timespec64_equal(&old_mtime, &new_mtime))
  213. inval = true;
  214. }
  215. if (inval)
  216. invalidate_inode_pages2(inode->i_mapping);
  217. }
  218. }
  219. static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
  220. {
  221. inode->i_mode = attr->mode & S_IFMT;
  222. inode->i_size = attr->size;
  223. inode->i_mtime.tv_sec = attr->mtime;
  224. inode->i_mtime.tv_nsec = attr->mtimensec;
  225. inode->i_ctime.tv_sec = attr->ctime;
  226. inode->i_ctime.tv_nsec = attr->ctimensec;
  227. if (S_ISREG(inode->i_mode)) {
  228. fuse_init_common(inode);
  229. fuse_init_file_inode(inode);
  230. } else if (S_ISDIR(inode->i_mode))
  231. fuse_init_dir(inode);
  232. else if (S_ISLNK(inode->i_mode))
  233. fuse_init_symlink(inode);
  234. else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
  235. S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
  236. fuse_init_common(inode);
  237. init_special_inode(inode, inode->i_mode,
  238. new_decode_dev(attr->rdev));
  239. } else
  240. BUG();
  241. }
  242. int fuse_inode_eq(struct inode *inode, void *_nodeidp)
  243. {
  244. u64 nodeid = *(u64 *) _nodeidp;
  245. if (get_node_id(inode) == nodeid)
  246. return 1;
  247. else
  248. return 0;
  249. }
  250. static int fuse_inode_set(struct inode *inode, void *_nodeidp)
  251. {
  252. u64 nodeid = *(u64 *) _nodeidp;
  253. get_fuse_inode(inode)->nodeid = nodeid;
  254. return 0;
  255. }
  256. struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
  257. int generation, struct fuse_attr *attr,
  258. u64 attr_valid, u64 attr_version)
  259. {
  260. struct inode *inode;
  261. struct fuse_inode *fi;
  262. struct fuse_conn *fc = get_fuse_conn_super(sb);
  263. retry:
  264. inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid);
  265. if (!inode)
  266. return NULL;
  267. if ((inode->i_state & I_NEW)) {
  268. inode->i_flags |= S_NOATIME;
  269. if (!fc->writeback_cache || !S_ISREG(attr->mode))
  270. inode->i_flags |= S_NOCMTIME;
  271. inode->i_generation = generation;
  272. fuse_init_inode(inode, attr);
  273. unlock_new_inode(inode);
  274. } else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
  275. /* Inode has changed type, any I/O on the old should fail */
  276. make_bad_inode(inode);
  277. iput(inode);
  278. goto retry;
  279. }
  280. fi = get_fuse_inode(inode);
  281. spin_lock(&fc->lock);
  282. fi->nlookup++;
  283. spin_unlock(&fc->lock);
  284. fuse_change_attributes(inode, attr, attr_valid, attr_version);
  285. return inode;
  286. }
  287. int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
  288. loff_t offset, loff_t len)
  289. {
  290. struct inode *inode;
  291. pgoff_t pg_start;
  292. pgoff_t pg_end;
  293. inode = ilookup5(sb, nodeid, fuse_inode_eq, &nodeid);
  294. if (!inode)
  295. return -ENOENT;
  296. fuse_invalidate_attr(inode);
  297. forget_all_cached_acls(inode);
  298. if (offset >= 0) {
  299. pg_start = offset >> PAGE_SHIFT;
  300. if (len <= 0)
  301. pg_end = -1;
  302. else
  303. pg_end = (offset + len - 1) >> PAGE_SHIFT;
  304. invalidate_inode_pages2_range(inode->i_mapping,
  305. pg_start, pg_end);
  306. }
  307. iput(inode);
  308. return 0;
  309. }
  310. bool fuse_lock_inode(struct inode *inode)
  311. {
  312. bool locked = false;
  313. if (!get_fuse_conn(inode)->parallel_dirops) {
  314. mutex_lock(&get_fuse_inode(inode)->mutex);
  315. locked = true;
  316. }
  317. return locked;
  318. }
  319. void fuse_unlock_inode(struct inode *inode, bool locked)
  320. {
  321. if (locked)
  322. mutex_unlock(&get_fuse_inode(inode)->mutex);
  323. }
  324. static void fuse_umount_begin(struct super_block *sb)
  325. {
  326. fuse_abort_conn(get_fuse_conn_super(sb), false);
  327. }
  328. static void fuse_send_destroy(struct fuse_conn *fc)
  329. {
  330. struct fuse_req *req = fc->destroy_req;
  331. if (req && fc->conn_init) {
  332. fc->destroy_req = NULL;
  333. req->in.h.opcode = FUSE_DESTROY;
  334. __set_bit(FR_FORCE, &req->flags);
  335. __clear_bit(FR_BACKGROUND, &req->flags);
  336. fuse_request_send(fc, req);
  337. fuse_put_request(fc, req);
  338. }
  339. }
  340. static void fuse_put_super(struct super_block *sb)
  341. {
  342. struct fuse_conn *fc = get_fuse_conn_super(sb);
  343. mutex_lock(&fuse_mutex);
  344. list_del(&fc->entry);
  345. fuse_ctl_remove_conn(fc);
  346. mutex_unlock(&fuse_mutex);
  347. fuse_conn_put(fc);
  348. }
  349. static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
  350. {
  351. stbuf->f_type = FUSE_SUPER_MAGIC;
  352. stbuf->f_bsize = attr->bsize;
  353. stbuf->f_frsize = attr->frsize;
  354. stbuf->f_blocks = attr->blocks;
  355. stbuf->f_bfree = attr->bfree;
  356. stbuf->f_bavail = attr->bavail;
  357. stbuf->f_files = attr->files;
  358. stbuf->f_ffree = attr->ffree;
  359. stbuf->f_namelen = attr->namelen;
  360. /* fsid is left zero */
  361. }
  362. static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
  363. {
  364. struct super_block *sb = dentry->d_sb;
  365. struct fuse_conn *fc = get_fuse_conn_super(sb);
  366. FUSE_ARGS(args);
  367. struct fuse_statfs_out outarg;
  368. int err;
  369. if (!fuse_allow_current_process(fc)) {
  370. buf->f_type = FUSE_SUPER_MAGIC;
  371. return 0;
  372. }
  373. memset(&outarg, 0, sizeof(outarg));
  374. args.in.numargs = 0;
  375. args.in.h.opcode = FUSE_STATFS;
  376. args.in.h.nodeid = get_node_id(d_inode(dentry));
  377. args.out.numargs = 1;
  378. args.out.args[0].size = sizeof(outarg);
  379. args.out.args[0].value = &outarg;
  380. err = fuse_simple_request(fc, &args);
  381. if (!err)
  382. convert_fuse_statfs(buf, &outarg.st);
  383. return err;
  384. }
  385. enum {
  386. OPT_FD,
  387. OPT_ROOTMODE,
  388. OPT_USER_ID,
  389. OPT_GROUP_ID,
  390. OPT_DEFAULT_PERMISSIONS,
  391. OPT_ALLOW_OTHER,
  392. OPT_MAX_READ,
  393. OPT_BLKSIZE,
  394. OPT_ERR
  395. };
  396. static const match_table_t tokens = {
  397. {OPT_FD, "fd=%u"},
  398. {OPT_ROOTMODE, "rootmode=%o"},
  399. {OPT_USER_ID, "user_id=%u"},
  400. {OPT_GROUP_ID, "group_id=%u"},
  401. {OPT_DEFAULT_PERMISSIONS, "default_permissions"},
  402. {OPT_ALLOW_OTHER, "allow_other"},
  403. {OPT_MAX_READ, "max_read=%u"},
  404. {OPT_BLKSIZE, "blksize=%u"},
  405. {OPT_ERR, NULL}
  406. };
  407. static int fuse_match_uint(substring_t *s, unsigned int *res)
  408. {
  409. int err = -ENOMEM;
  410. char *buf = match_strdup(s);
  411. if (buf) {
  412. err = kstrtouint(buf, 10, res);
  413. kfree(buf);
  414. }
  415. return err;
  416. }
  417. static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev,
  418. struct user_namespace *user_ns)
  419. {
  420. char *p;
  421. memset(d, 0, sizeof(struct fuse_mount_data));
  422. d->max_read = ~0;
  423. d->blksize = FUSE_DEFAULT_BLKSIZE;
  424. while ((p = strsep(&opt, ",")) != NULL) {
  425. int token;
  426. int value;
  427. unsigned uv;
  428. substring_t args[MAX_OPT_ARGS];
  429. if (!*p)
  430. continue;
  431. token = match_token(p, tokens, args);
  432. switch (token) {
  433. case OPT_FD:
  434. if (match_int(&args[0], &value))
  435. return 0;
  436. d->fd = value;
  437. d->fd_present = 1;
  438. break;
  439. case OPT_ROOTMODE:
  440. if (match_octal(&args[0], &value))
  441. return 0;
  442. if (!fuse_valid_type(value))
  443. return 0;
  444. d->rootmode = value;
  445. d->rootmode_present = 1;
  446. break;
  447. case OPT_USER_ID:
  448. if (fuse_match_uint(&args[0], &uv))
  449. return 0;
  450. d->user_id = make_kuid(user_ns, uv);
  451. if (!uid_valid(d->user_id))
  452. return 0;
  453. d->user_id_present = 1;
  454. break;
  455. case OPT_GROUP_ID:
  456. if (fuse_match_uint(&args[0], &uv))
  457. return 0;
  458. d->group_id = make_kgid(user_ns, uv);
  459. if (!gid_valid(d->group_id))
  460. return 0;
  461. d->group_id_present = 1;
  462. break;
  463. case OPT_DEFAULT_PERMISSIONS:
  464. d->default_permissions = 1;
  465. break;
  466. case OPT_ALLOW_OTHER:
  467. d->allow_other = 1;
  468. break;
  469. case OPT_MAX_READ:
  470. if (match_int(&args[0], &value))
  471. return 0;
  472. d->max_read = value;
  473. break;
  474. case OPT_BLKSIZE:
  475. if (!is_bdev || match_int(&args[0], &value))
  476. return 0;
  477. d->blksize = value;
  478. break;
  479. default:
  480. return 0;
  481. }
  482. }
  483. if (!d->fd_present || !d->rootmode_present ||
  484. !d->user_id_present || !d->group_id_present)
  485. return 0;
  486. return 1;
  487. }
  488. static int fuse_show_options(struct seq_file *m, struct dentry *root)
  489. {
  490. struct super_block *sb = root->d_sb;
  491. struct fuse_conn *fc = get_fuse_conn_super(sb);
  492. seq_printf(m, ",user_id=%u", from_kuid_munged(fc->user_ns, fc->user_id));
  493. seq_printf(m, ",group_id=%u", from_kgid_munged(fc->user_ns, fc->group_id));
  494. if (fc->default_permissions)
  495. seq_puts(m, ",default_permissions");
  496. if (fc->allow_other)
  497. seq_puts(m, ",allow_other");
  498. if (fc->max_read != ~0)
  499. seq_printf(m, ",max_read=%u", fc->max_read);
  500. if (sb->s_bdev && sb->s_blocksize != FUSE_DEFAULT_BLKSIZE)
  501. seq_printf(m, ",blksize=%lu", sb->s_blocksize);
  502. return 0;
  503. }
  504. static void fuse_iqueue_init(struct fuse_iqueue *fiq)
  505. {
  506. memset(fiq, 0, sizeof(struct fuse_iqueue));
  507. spin_lock_init(&fiq->lock);
  508. init_waitqueue_head(&fiq->waitq);
  509. INIT_LIST_HEAD(&fiq->pending);
  510. INIT_LIST_HEAD(&fiq->interrupts);
  511. fiq->forget_list_tail = &fiq->forget_list_head;
  512. fiq->connected = 1;
  513. }
  514. static void fuse_pqueue_init(struct fuse_pqueue *fpq)
  515. {
  516. memset(fpq, 0, sizeof(struct fuse_pqueue));
  517. spin_lock_init(&fpq->lock);
  518. INIT_LIST_HEAD(&fpq->processing);
  519. INIT_LIST_HEAD(&fpq->io);
  520. fpq->connected = 1;
  521. }
  522. void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
  523. {
  524. memset(fc, 0, sizeof(*fc));
  525. spin_lock_init(&fc->lock);
  526. init_rwsem(&fc->killsb);
  527. refcount_set(&fc->count, 1);
  528. atomic_set(&fc->dev_count, 1);
  529. init_waitqueue_head(&fc->blocked_waitq);
  530. init_waitqueue_head(&fc->reserved_req_waitq);
  531. fuse_iqueue_init(&fc->iq);
  532. INIT_LIST_HEAD(&fc->bg_queue);
  533. INIT_LIST_HEAD(&fc->entry);
  534. INIT_LIST_HEAD(&fc->devices);
  535. atomic_set(&fc->num_waiting, 0);
  536. fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
  537. fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;
  538. fc->khctr = 0;
  539. fc->polled_files = RB_ROOT;
  540. fc->blocked = 0;
  541. fc->initialized = 0;
  542. fc->connected = 1;
  543. fc->attr_version = 1;
  544. get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
  545. fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
  546. fc->user_ns = get_user_ns(user_ns);
  547. }
  548. EXPORT_SYMBOL_GPL(fuse_conn_init);
  549. void fuse_conn_put(struct fuse_conn *fc)
  550. {
  551. if (refcount_dec_and_test(&fc->count)) {
  552. if (fc->destroy_req)
  553. fuse_request_free(fc->destroy_req);
  554. put_pid_ns(fc->pid_ns);
  555. put_user_ns(fc->user_ns);
  556. fc->release(fc);
  557. }
  558. }
  559. EXPORT_SYMBOL_GPL(fuse_conn_put);
  560. struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
  561. {
  562. refcount_inc(&fc->count);
  563. return fc;
  564. }
  565. EXPORT_SYMBOL_GPL(fuse_conn_get);
  566. static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode)
  567. {
  568. struct fuse_attr attr;
  569. memset(&attr, 0, sizeof(attr));
  570. attr.mode = mode;
  571. attr.ino = FUSE_ROOT_ID;
  572. attr.nlink = 1;
  573. return fuse_iget(sb, 1, 0, &attr, 0, 0);
  574. }
  575. struct fuse_inode_handle {
  576. u64 nodeid;
  577. u32 generation;
  578. };
  579. static struct dentry *fuse_get_dentry(struct super_block *sb,
  580. struct fuse_inode_handle *handle)
  581. {
  582. struct fuse_conn *fc = get_fuse_conn_super(sb);
  583. struct inode *inode;
  584. struct dentry *entry;
  585. int err = -ESTALE;
  586. if (handle->nodeid == 0)
  587. goto out_err;
  588. inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid);
  589. if (!inode) {
  590. struct fuse_entry_out outarg;
  591. const struct qstr name = QSTR_INIT(".", 1);
  592. if (!fc->export_support)
  593. goto out_err;
  594. err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg,
  595. &inode);
  596. if (err && err != -ENOENT)
  597. goto out_err;
  598. if (err || !inode) {
  599. err = -ESTALE;
  600. goto out_err;
  601. }
  602. err = -EIO;
  603. if (get_node_id(inode) != handle->nodeid)
  604. goto out_iput;
  605. }
  606. err = -ESTALE;
  607. if (inode->i_generation != handle->generation)
  608. goto out_iput;
  609. entry = d_obtain_alias(inode);
  610. if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID)
  611. fuse_invalidate_entry_cache(entry);
  612. return entry;
  613. out_iput:
  614. iput(inode);
  615. out_err:
  616. return ERR_PTR(err);
  617. }
  618. static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
  619. struct inode *parent)
  620. {
  621. int len = parent ? 6 : 3;
  622. u64 nodeid;
  623. u32 generation;
  624. if (*max_len < len) {
  625. *max_len = len;
  626. return FILEID_INVALID;
  627. }
  628. nodeid = get_fuse_inode(inode)->nodeid;
  629. generation = inode->i_generation;
  630. fh[0] = (u32)(nodeid >> 32);
  631. fh[1] = (u32)(nodeid & 0xffffffff);
  632. fh[2] = generation;
  633. if (parent) {
  634. nodeid = get_fuse_inode(parent)->nodeid;
  635. generation = parent->i_generation;
  636. fh[3] = (u32)(nodeid >> 32);
  637. fh[4] = (u32)(nodeid & 0xffffffff);
  638. fh[5] = generation;
  639. }
  640. *max_len = len;
  641. return parent ? 0x82 : 0x81;
  642. }
  643. static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
  644. struct fid *fid, int fh_len, int fh_type)
  645. {
  646. struct fuse_inode_handle handle;
  647. if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3)
  648. return NULL;
  649. handle.nodeid = (u64) fid->raw[0] << 32;
  650. handle.nodeid |= (u64) fid->raw[1];
  651. handle.generation = fid->raw[2];
  652. return fuse_get_dentry(sb, &handle);
  653. }
  654. static struct dentry *fuse_fh_to_parent(struct super_block *sb,
  655. struct fid *fid, int fh_len, int fh_type)
  656. {
  657. struct fuse_inode_handle parent;
  658. if (fh_type != 0x82 || fh_len < 6)
  659. return NULL;
  660. parent.nodeid = (u64) fid->raw[3] << 32;
  661. parent.nodeid |= (u64) fid->raw[4];
  662. parent.generation = fid->raw[5];
  663. return fuse_get_dentry(sb, &parent);
  664. }
  665. static struct dentry *fuse_get_parent(struct dentry *child)
  666. {
  667. struct inode *child_inode = d_inode(child);
  668. struct fuse_conn *fc = get_fuse_conn(child_inode);
  669. struct inode *inode;
  670. struct dentry *parent;
  671. struct fuse_entry_out outarg;
  672. const struct qstr name = QSTR_INIT("..", 2);
  673. int err;
  674. if (!fc->export_support)
  675. return ERR_PTR(-ESTALE);
  676. err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode),
  677. &name, &outarg, &inode);
  678. if (err) {
  679. if (err == -ENOENT)
  680. return ERR_PTR(-ESTALE);
  681. return ERR_PTR(err);
  682. }
  683. parent = d_obtain_alias(inode);
  684. if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID)
  685. fuse_invalidate_entry_cache(parent);
  686. return parent;
  687. }
  688. static const struct export_operations fuse_export_operations = {
  689. .fh_to_dentry = fuse_fh_to_dentry,
  690. .fh_to_parent = fuse_fh_to_parent,
  691. .encode_fh = fuse_encode_fh,
  692. .get_parent = fuse_get_parent,
  693. };
  694. static const struct super_operations fuse_super_operations = {
  695. .alloc_inode = fuse_alloc_inode,
  696. .destroy_inode = fuse_destroy_inode,
  697. .evict_inode = fuse_evict_inode,
  698. .write_inode = fuse_write_inode,
  699. .drop_inode = generic_delete_inode,
  700. .remount_fs = fuse_remount_fs,
  701. .put_super = fuse_put_super,
  702. .umount_begin = fuse_umount_begin,
  703. .statfs = fuse_statfs,
  704. .show_options = fuse_show_options,
  705. };
  706. static void sanitize_global_limit(unsigned *limit)
  707. {
  708. if (*limit == 0)
  709. *limit = ((totalram_pages << PAGE_SHIFT) >> 13) /
  710. sizeof(struct fuse_req);
  711. if (*limit >= 1 << 16)
  712. *limit = (1 << 16) - 1;
  713. }
  714. static int set_global_limit(const char *val, const struct kernel_param *kp)
  715. {
  716. int rv;
  717. rv = param_set_uint(val, kp);
  718. if (rv)
  719. return rv;
  720. sanitize_global_limit((unsigned *)kp->arg);
  721. return 0;
  722. }
  723. static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
  724. {
  725. int cap_sys_admin = capable(CAP_SYS_ADMIN);
  726. if (arg->minor < 13)
  727. return;
  728. sanitize_global_limit(&max_user_bgreq);
  729. sanitize_global_limit(&max_user_congthresh);
  730. if (arg->max_background) {
  731. fc->max_background = arg->max_background;
  732. if (!cap_sys_admin && fc->max_background > max_user_bgreq)
  733. fc->max_background = max_user_bgreq;
  734. }
  735. if (arg->congestion_threshold) {
  736. fc->congestion_threshold = arg->congestion_threshold;
  737. if (!cap_sys_admin &&
  738. fc->congestion_threshold > max_user_congthresh)
  739. fc->congestion_threshold = max_user_congthresh;
  740. }
  741. }
  742. static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
  743. {
  744. struct fuse_init_out *arg = &req->misc.init_out;
  745. if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
  746. fc->conn_error = 1;
  747. else {
  748. unsigned long ra_pages;
  749. process_init_limits(fc, arg);
  750. if (arg->minor >= 6) {
  751. ra_pages = arg->max_readahead / PAGE_SIZE;
  752. if (arg->flags & FUSE_ASYNC_READ)
  753. fc->async_read = 1;
  754. if (!(arg->flags & FUSE_POSIX_LOCKS))
  755. fc->no_lock = 1;
  756. if (arg->minor >= 17) {
  757. if (!(arg->flags & FUSE_FLOCK_LOCKS))
  758. fc->no_flock = 1;
  759. } else {
  760. if (!(arg->flags & FUSE_POSIX_LOCKS))
  761. fc->no_flock = 1;
  762. }
  763. if (arg->flags & FUSE_ATOMIC_O_TRUNC)
  764. fc->atomic_o_trunc = 1;
  765. if (arg->minor >= 9) {
  766. /* LOOKUP has dependency on proto version */
  767. if (arg->flags & FUSE_EXPORT_SUPPORT)
  768. fc->export_support = 1;
  769. }
  770. if (arg->flags & FUSE_BIG_WRITES)
  771. fc->big_writes = 1;
  772. if (arg->flags & FUSE_DONT_MASK)
  773. fc->dont_mask = 1;
  774. if (arg->flags & FUSE_AUTO_INVAL_DATA)
  775. fc->auto_inval_data = 1;
  776. if (arg->flags & FUSE_DO_READDIRPLUS) {
  777. fc->do_readdirplus = 1;
  778. if (arg->flags & FUSE_READDIRPLUS_AUTO)
  779. fc->readdirplus_auto = 1;
  780. }
  781. if (arg->flags & FUSE_ASYNC_DIO)
  782. fc->async_dio = 1;
  783. if (arg->flags & FUSE_WRITEBACK_CACHE)
  784. fc->writeback_cache = 1;
  785. if (arg->flags & FUSE_PARALLEL_DIROPS)
  786. fc->parallel_dirops = 1;
  787. if (arg->flags & FUSE_HANDLE_KILLPRIV)
  788. fc->handle_killpriv = 1;
  789. if (arg->time_gran && arg->time_gran <= 1000000000)
  790. fc->sb->s_time_gran = arg->time_gran;
  791. if ((arg->flags & FUSE_POSIX_ACL)) {
  792. fc->default_permissions = 1;
  793. fc->posix_acl = 1;
  794. fc->sb->s_xattr = fuse_acl_xattr_handlers;
  795. }
  796. if (arg->flags & FUSE_ABORT_ERROR)
  797. fc->abort_err = 1;
  798. } else {
  799. ra_pages = fc->max_read / PAGE_SIZE;
  800. fc->no_lock = 1;
  801. fc->no_flock = 1;
  802. }
  803. fc->sb->s_bdi->ra_pages =
  804. min(fc->sb->s_bdi->ra_pages, ra_pages);
  805. fc->minor = arg->minor;
  806. fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
  807. fc->max_write = max_t(unsigned, 4096, fc->max_write);
  808. fc->conn_init = 1;
  809. }
  810. fuse_set_initialized(fc);
  811. wake_up_all(&fc->blocked_waitq);
  812. }
  813. static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
  814. {
  815. struct fuse_init_in *arg = &req->misc.init_in;
  816. arg->major = FUSE_KERNEL_VERSION;
  817. arg->minor = FUSE_KERNEL_MINOR_VERSION;
  818. arg->max_readahead = fc->sb->s_bdi->ra_pages * PAGE_SIZE;
  819. arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
  820. FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
  821. FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
  822. FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
  823. FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
  824. FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
  825. FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
  826. FUSE_ABORT_ERROR;
  827. req->in.h.opcode = FUSE_INIT;
  828. req->in.numargs = 1;
  829. req->in.args[0].size = sizeof(*arg);
  830. req->in.args[0].value = arg;
  831. req->out.numargs = 1;
  832. /* Variable length argument used for backward compatibility
  833. with interface version < 7.5. Rest of init_out is zeroed
  834. by do_get_request(), so a short reply is not a problem */
  835. req->out.argvar = 1;
  836. req->out.args[0].size = sizeof(struct fuse_init_out);
  837. req->out.args[0].value = &req->misc.init_out;
  838. req->end = process_init_reply;
  839. fuse_request_send_background(fc, req);
  840. }
  841. static void fuse_free_conn(struct fuse_conn *fc)
  842. {
  843. WARN_ON(!list_empty(&fc->devices));
  844. kfree_rcu(fc, rcu);
  845. }
  846. static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
  847. {
  848. int err;
  849. char *suffix = "";
  850. if (sb->s_bdev) {
  851. suffix = "-fuseblk";
  852. /*
  853. * sb->s_bdi points to blkdev's bdi however we want to redirect
  854. * it to our private bdi...
  855. */
  856. bdi_put(sb->s_bdi);
  857. sb->s_bdi = &noop_backing_dev_info;
  858. }
  859. err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev),
  860. MINOR(fc->dev), suffix);
  861. if (err)
  862. return err;
  863. sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
  864. /* fuse does it's own writeback accounting */
  865. sb->s_bdi->capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
  866. /*
  867. * For a single fuse filesystem use max 1% of dirty +
  868. * writeback threshold.
  869. *
  870. * This gives about 1M of write buffer for memory maps on a
  871. * machine with 1G and 10% dirty_ratio, which should be more
  872. * than enough.
  873. *
  874. * Privileged users can raise it by writing to
  875. *
  876. * /sys/class/bdi/<bdi>/max_ratio
  877. */
  878. bdi_set_max_ratio(sb->s_bdi, 1);
  879. return 0;
  880. }
  881. struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc)
  882. {
  883. struct fuse_dev *fud;
  884. fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL);
  885. if (fud) {
  886. fud->fc = fuse_conn_get(fc);
  887. fuse_pqueue_init(&fud->pq);
  888. spin_lock(&fc->lock);
  889. list_add_tail(&fud->entry, &fc->devices);
  890. spin_unlock(&fc->lock);
  891. }
  892. return fud;
  893. }
  894. EXPORT_SYMBOL_GPL(fuse_dev_alloc);
  895. void fuse_dev_free(struct fuse_dev *fud)
  896. {
  897. struct fuse_conn *fc = fud->fc;
  898. if (fc) {
  899. spin_lock(&fc->lock);
  900. list_del(&fud->entry);
  901. spin_unlock(&fc->lock);
  902. fuse_conn_put(fc);
  903. }
  904. kfree(fud);
  905. }
  906. EXPORT_SYMBOL_GPL(fuse_dev_free);
  907. static int fuse_fill_super(struct super_block *sb, void *data, int silent)
  908. {
  909. struct fuse_dev *fud;
  910. struct fuse_conn *fc;
  911. struct inode *root;
  912. struct fuse_mount_data d;
  913. struct file *file;
  914. struct dentry *root_dentry;
  915. struct fuse_req *init_req;
  916. int err;
  917. int is_bdev = sb->s_bdev != NULL;
  918. err = -EINVAL;
  919. if (sb->s_flags & SB_MANDLOCK)
  920. goto err;
  921. sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
  922. if (!parse_fuse_opt(data, &d, is_bdev, sb->s_user_ns))
  923. goto err;
  924. if (is_bdev) {
  925. #ifdef CONFIG_BLOCK
  926. err = -EINVAL;
  927. if (!sb_set_blocksize(sb, d.blksize))
  928. goto err;
  929. #endif
  930. } else {
  931. sb->s_blocksize = PAGE_SIZE;
  932. sb->s_blocksize_bits = PAGE_SHIFT;
  933. }
  934. sb->s_magic = FUSE_SUPER_MAGIC;
  935. sb->s_op = &fuse_super_operations;
  936. sb->s_xattr = fuse_xattr_handlers;
  937. sb->s_maxbytes = MAX_LFS_FILESIZE;
  938. sb->s_time_gran = 1;
  939. sb->s_export_op = &fuse_export_operations;
  940. sb->s_iflags |= SB_I_IMA_UNVERIFIABLE_SIGNATURE;
  941. if (sb->s_user_ns != &init_user_ns)
  942. sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER;
  943. file = fget(d.fd);
  944. err = -EINVAL;
  945. if (!file)
  946. goto err;
  947. /*
  948. * Require mount to happen from the same user namespace which
  949. * opened /dev/fuse to prevent potential attacks.
  950. */
  951. if (file->f_op != &fuse_dev_operations ||
  952. file->f_cred->user_ns != sb->s_user_ns)
  953. goto err_fput;
  954. /*
  955. * If we are not in the initial user namespace posix
  956. * acls must be translated.
  957. */
  958. if (sb->s_user_ns != &init_user_ns)
  959. sb->s_xattr = fuse_no_acl_xattr_handlers;
  960. fc = kmalloc(sizeof(*fc), GFP_KERNEL);
  961. err = -ENOMEM;
  962. if (!fc)
  963. goto err_fput;
  964. fuse_conn_init(fc, sb->s_user_ns);
  965. fc->release = fuse_free_conn;
  966. fud = fuse_dev_alloc(fc);
  967. if (!fud)
  968. goto err_put_conn;
  969. fc->dev = sb->s_dev;
  970. fc->sb = sb;
  971. err = fuse_bdi_init(fc, sb);
  972. if (err)
  973. goto err_dev_free;
  974. /* Handle umasking inside the fuse code */
  975. if (sb->s_flags & SB_POSIXACL)
  976. fc->dont_mask = 1;
  977. sb->s_flags |= SB_POSIXACL;
  978. fc->default_permissions = d.default_permissions;
  979. fc->allow_other = d.allow_other;
  980. fc->user_id = d.user_id;
  981. fc->group_id = d.group_id;
  982. fc->max_read = max_t(unsigned, 4096, d.max_read);
  983. /* Used by get_root_inode() */
  984. sb->s_fs_info = fc;
  985. err = -ENOMEM;
  986. root = fuse_get_root_inode(sb, d.rootmode);
  987. sb->s_d_op = &fuse_root_dentry_operations;
  988. root_dentry = d_make_root(root);
  989. if (!root_dentry)
  990. goto err_dev_free;
  991. /* Root dentry doesn't have .d_revalidate */
  992. sb->s_d_op = &fuse_dentry_operations;
  993. init_req = fuse_request_alloc(0);
  994. if (!init_req)
  995. goto err_put_root;
  996. __set_bit(FR_BACKGROUND, &init_req->flags);
  997. if (is_bdev) {
  998. fc->destroy_req = fuse_request_alloc(0);
  999. if (!fc->destroy_req)
  1000. goto err_free_init_req;
  1001. }
  1002. mutex_lock(&fuse_mutex);
  1003. err = -EINVAL;
  1004. if (file->private_data)
  1005. goto err_unlock;
  1006. err = fuse_ctl_add_conn(fc);
  1007. if (err)
  1008. goto err_unlock;
  1009. list_add_tail(&fc->entry, &fuse_conn_list);
  1010. sb->s_root = root_dentry;
  1011. file->private_data = fud;
  1012. mutex_unlock(&fuse_mutex);
  1013. /*
  1014. * atomic_dec_and_test() in fput() provides the necessary
  1015. * memory barrier for file->private_data to be visible on all
  1016. * CPUs after this
  1017. */
  1018. fput(file);
  1019. fuse_send_init(fc, init_req);
  1020. return 0;
  1021. err_unlock:
  1022. mutex_unlock(&fuse_mutex);
  1023. err_free_init_req:
  1024. fuse_request_free(init_req);
  1025. err_put_root:
  1026. dput(root_dentry);
  1027. err_dev_free:
  1028. fuse_dev_free(fud);
  1029. err_put_conn:
  1030. fuse_conn_put(fc);
  1031. sb->s_fs_info = NULL;
  1032. err_fput:
  1033. fput(file);
  1034. err:
  1035. return err;
  1036. }
  1037. static struct dentry *fuse_mount(struct file_system_type *fs_type,
  1038. int flags, const char *dev_name,
  1039. void *raw_data)
  1040. {
  1041. return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
  1042. }
  1043. static void fuse_sb_destroy(struct super_block *sb)
  1044. {
  1045. struct fuse_conn *fc = get_fuse_conn_super(sb);
  1046. if (fc) {
  1047. fuse_send_destroy(fc);
  1048. fuse_abort_conn(fc, false);
  1049. fuse_wait_aborted(fc);
  1050. down_write(&fc->killsb);
  1051. fc->sb = NULL;
  1052. up_write(&fc->killsb);
  1053. }
  1054. }
  1055. static void fuse_kill_sb_anon(struct super_block *sb)
  1056. {
  1057. fuse_sb_destroy(sb);
  1058. kill_anon_super(sb);
  1059. }
  1060. static struct file_system_type fuse_fs_type = {
  1061. .owner = THIS_MODULE,
  1062. .name = "fuse",
  1063. .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT,
  1064. .mount = fuse_mount,
  1065. .kill_sb = fuse_kill_sb_anon,
  1066. };
  1067. MODULE_ALIAS_FS("fuse");
  1068. #ifdef CONFIG_BLOCK
  1069. static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
  1070. int flags, const char *dev_name,
  1071. void *raw_data)
  1072. {
  1073. return mount_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super);
  1074. }
  1075. static void fuse_kill_sb_blk(struct super_block *sb)
  1076. {
  1077. fuse_sb_destroy(sb);
  1078. kill_block_super(sb);
  1079. }
  1080. static struct file_system_type fuseblk_fs_type = {
  1081. .owner = THIS_MODULE,
  1082. .name = "fuseblk",
  1083. .mount = fuse_mount_blk,
  1084. .kill_sb = fuse_kill_sb_blk,
  1085. .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
  1086. };
  1087. MODULE_ALIAS_FS("fuseblk");
  1088. static inline int register_fuseblk(void)
  1089. {
  1090. return register_filesystem(&fuseblk_fs_type);
  1091. }
  1092. static inline void unregister_fuseblk(void)
  1093. {
  1094. unregister_filesystem(&fuseblk_fs_type);
  1095. }
  1096. #else
  1097. static inline int register_fuseblk(void)
  1098. {
  1099. return 0;
  1100. }
  1101. static inline void unregister_fuseblk(void)
  1102. {
  1103. }
  1104. #endif
  1105. static void fuse_inode_init_once(void *foo)
  1106. {
  1107. struct inode *inode = foo;
  1108. inode_init_once(inode);
  1109. }
  1110. static int __init fuse_fs_init(void)
  1111. {
  1112. int err;
  1113. fuse_inode_cachep = kmem_cache_create("fuse_inode",
  1114. sizeof(struct fuse_inode), 0,
  1115. SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT,
  1116. fuse_inode_init_once);
  1117. err = -ENOMEM;
  1118. if (!fuse_inode_cachep)
  1119. goto out;
  1120. err = register_fuseblk();
  1121. if (err)
  1122. goto out2;
  1123. err = register_filesystem(&fuse_fs_type);
  1124. if (err)
  1125. goto out3;
  1126. return 0;
  1127. out3:
  1128. unregister_fuseblk();
  1129. out2:
  1130. kmem_cache_destroy(fuse_inode_cachep);
  1131. out:
  1132. return err;
  1133. }
  1134. static void fuse_fs_cleanup(void)
  1135. {
  1136. unregister_filesystem(&fuse_fs_type);
  1137. unregister_fuseblk();
  1138. /*
  1139. * Make sure all delayed rcu free inodes are flushed before we
  1140. * destroy cache.
  1141. */
  1142. rcu_barrier();
  1143. kmem_cache_destroy(fuse_inode_cachep);
  1144. }
  1145. static struct kobject *fuse_kobj;
  1146. static int fuse_sysfs_init(void)
  1147. {
  1148. int err;
  1149. fuse_kobj = kobject_create_and_add("fuse", fs_kobj);
  1150. if (!fuse_kobj) {
  1151. err = -ENOMEM;
  1152. goto out_err;
  1153. }
  1154. err = sysfs_create_mount_point(fuse_kobj, "connections");
  1155. if (err)
  1156. goto out_fuse_unregister;
  1157. return 0;
  1158. out_fuse_unregister:
  1159. kobject_put(fuse_kobj);
  1160. out_err:
  1161. return err;
  1162. }
  1163. static void fuse_sysfs_cleanup(void)
  1164. {
  1165. sysfs_remove_mount_point(fuse_kobj, "connections");
  1166. kobject_put(fuse_kobj);
  1167. }
  1168. static int __init fuse_init(void)
  1169. {
  1170. int res;
  1171. printk(KERN_INFO "fuse init (API version %i.%i)\n",
  1172. FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
  1173. INIT_LIST_HEAD(&fuse_conn_list);
  1174. res = fuse_fs_init();
  1175. if (res)
  1176. goto err;
  1177. res = fuse_dev_init();
  1178. if (res)
  1179. goto err_fs_cleanup;
  1180. res = fuse_sysfs_init();
  1181. if (res)
  1182. goto err_dev_cleanup;
  1183. res = fuse_ctl_init();
  1184. if (res)
  1185. goto err_sysfs_cleanup;
  1186. sanitize_global_limit(&max_user_bgreq);
  1187. sanitize_global_limit(&max_user_congthresh);
  1188. return 0;
  1189. err_sysfs_cleanup:
  1190. fuse_sysfs_cleanup();
  1191. err_dev_cleanup:
  1192. fuse_dev_cleanup();
  1193. err_fs_cleanup:
  1194. fuse_fs_cleanup();
  1195. err:
  1196. return res;
  1197. }
  1198. static void __exit fuse_exit(void)
  1199. {
  1200. printk(KERN_DEBUG "fuse exit\n");
  1201. fuse_ctl_cleanup();
  1202. fuse_sysfs_cleanup();
  1203. fuse_fs_cleanup();
  1204. fuse_dev_cleanup();
  1205. }
  1206. module_init(fuse_init);
  1207. module_exit(fuse_exit);