inode.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /*
  2. * linux/fs/hfs/inode.c
  3. *
  4. * Copyright (C) 1995-1997 Paul H. Hargrove
  5. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  6. * This file may be distributed under the terms of the GNU General Public License.
  7. *
  8. * This file contains inode-related functions which do not depend on
  9. * which scheme is being used to represent forks.
  10. *
  11. * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
  12. */
  13. #include <linux/pagemap.h>
  14. #include <linux/mpage.h>
  15. #include <linux/sched.h>
  16. #include <linux/uio.h>
  17. #include "hfs_fs.h"
  18. #include "btree.h"
  19. static const struct file_operations hfs_file_operations;
  20. static const struct inode_operations hfs_file_inode_operations;
  21. /*================ Variable-like macros ================*/
  22. #define HFS_VALID_MODE_BITS (S_IFREG | S_IFDIR | S_IRWXUGO)
  23. static int hfs_writepage(struct page *page, struct writeback_control *wbc)
  24. {
  25. return block_write_full_page(page, hfs_get_block, wbc);
  26. }
  27. static int hfs_readpage(struct file *file, struct page *page)
  28. {
  29. return block_read_full_page(page, hfs_get_block);
  30. }
  31. static void hfs_write_failed(struct address_space *mapping, loff_t to)
  32. {
  33. struct inode *inode = mapping->host;
  34. if (to > inode->i_size) {
  35. truncate_pagecache(inode, inode->i_size);
  36. hfs_file_truncate(inode);
  37. }
  38. }
  39. static int hfs_write_begin(struct file *file, struct address_space *mapping,
  40. loff_t pos, unsigned len, unsigned flags,
  41. struct page **pagep, void **fsdata)
  42. {
  43. int ret;
  44. *pagep = NULL;
  45. ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  46. hfs_get_block,
  47. &HFS_I(mapping->host)->phys_size);
  48. if (unlikely(ret))
  49. hfs_write_failed(mapping, pos + len);
  50. return ret;
  51. }
  52. static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
  53. {
  54. return generic_block_bmap(mapping, block, hfs_get_block);
  55. }
  56. static int hfs_releasepage(struct page *page, gfp_t mask)
  57. {
  58. struct inode *inode = page->mapping->host;
  59. struct super_block *sb = inode->i_sb;
  60. struct hfs_btree *tree;
  61. struct hfs_bnode *node;
  62. u32 nidx;
  63. int i, res = 1;
  64. switch (inode->i_ino) {
  65. case HFS_EXT_CNID:
  66. tree = HFS_SB(sb)->ext_tree;
  67. break;
  68. case HFS_CAT_CNID:
  69. tree = HFS_SB(sb)->cat_tree;
  70. break;
  71. default:
  72. BUG();
  73. return 0;
  74. }
  75. if (!tree)
  76. return 0;
  77. if (tree->node_size >= PAGE_CACHE_SIZE) {
  78. nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
  79. spin_lock(&tree->hash_lock);
  80. node = hfs_bnode_findhash(tree, nidx);
  81. if (!node)
  82. ;
  83. else if (atomic_read(&node->refcnt))
  84. res = 0;
  85. if (res && node) {
  86. hfs_bnode_unhash(node);
  87. hfs_bnode_free(node);
  88. }
  89. spin_unlock(&tree->hash_lock);
  90. } else {
  91. nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
  92. i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
  93. spin_lock(&tree->hash_lock);
  94. do {
  95. node = hfs_bnode_findhash(tree, nidx++);
  96. if (!node)
  97. continue;
  98. if (atomic_read(&node->refcnt)) {
  99. res = 0;
  100. break;
  101. }
  102. hfs_bnode_unhash(node);
  103. hfs_bnode_free(node);
  104. } while (--i && nidx < tree->node_count);
  105. spin_unlock(&tree->hash_lock);
  106. }
  107. return res ? try_to_free_buffers(page) : 0;
  108. }
  109. static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
  110. loff_t offset)
  111. {
  112. struct file *file = iocb->ki_filp;
  113. struct address_space *mapping = file->f_mapping;
  114. struct inode *inode = file_inode(file)->i_mapping->host;
  115. size_t count = iov_iter_count(iter);
  116. ssize_t ret;
  117. ret = blockdev_direct_IO(iocb, inode, iter, offset, hfs_get_block);
  118. /*
  119. * In case of error extending write may have instantiated a few
  120. * blocks outside i_size. Trim these off again.
  121. */
  122. if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
  123. loff_t isize = i_size_read(inode);
  124. loff_t end = offset + count;
  125. if (end > isize)
  126. hfs_write_failed(mapping, end);
  127. }
  128. return ret;
  129. }
  130. static int hfs_writepages(struct address_space *mapping,
  131. struct writeback_control *wbc)
  132. {
  133. return mpage_writepages(mapping, wbc, hfs_get_block);
  134. }
  135. const struct address_space_operations hfs_btree_aops = {
  136. .readpage = hfs_readpage,
  137. .writepage = hfs_writepage,
  138. .write_begin = hfs_write_begin,
  139. .write_end = generic_write_end,
  140. .bmap = hfs_bmap,
  141. .releasepage = hfs_releasepage,
  142. };
  143. const struct address_space_operations hfs_aops = {
  144. .readpage = hfs_readpage,
  145. .writepage = hfs_writepage,
  146. .write_begin = hfs_write_begin,
  147. .write_end = generic_write_end,
  148. .bmap = hfs_bmap,
  149. .direct_IO = hfs_direct_IO,
  150. .writepages = hfs_writepages,
  151. };
  152. /*
  153. * hfs_new_inode
  154. */
  155. struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, umode_t mode)
  156. {
  157. struct super_block *sb = dir->i_sb;
  158. struct inode *inode = new_inode(sb);
  159. if (!inode)
  160. return NULL;
  161. mutex_init(&HFS_I(inode)->extents_lock);
  162. INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
  163. hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
  164. inode->i_ino = HFS_SB(sb)->next_id++;
  165. inode->i_mode = mode;
  166. inode->i_uid = current_fsuid();
  167. inode->i_gid = current_fsgid();
  168. set_nlink(inode, 1);
  169. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
  170. HFS_I(inode)->flags = 0;
  171. HFS_I(inode)->rsrc_inode = NULL;
  172. HFS_I(inode)->fs_blocks = 0;
  173. if (S_ISDIR(mode)) {
  174. inode->i_size = 2;
  175. HFS_SB(sb)->folder_count++;
  176. if (dir->i_ino == HFS_ROOT_CNID)
  177. HFS_SB(sb)->root_dirs++;
  178. inode->i_op = &hfs_dir_inode_operations;
  179. inode->i_fop = &hfs_dir_operations;
  180. inode->i_mode |= S_IRWXUGO;
  181. inode->i_mode &= ~HFS_SB(inode->i_sb)->s_dir_umask;
  182. } else if (S_ISREG(mode)) {
  183. HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
  184. HFS_SB(sb)->file_count++;
  185. if (dir->i_ino == HFS_ROOT_CNID)
  186. HFS_SB(sb)->root_files++;
  187. inode->i_op = &hfs_file_inode_operations;
  188. inode->i_fop = &hfs_file_operations;
  189. inode->i_mapping->a_ops = &hfs_aops;
  190. inode->i_mode |= S_IRUGO|S_IXUGO;
  191. if (mode & S_IWUSR)
  192. inode->i_mode |= S_IWUGO;
  193. inode->i_mode &= ~HFS_SB(inode->i_sb)->s_file_umask;
  194. HFS_I(inode)->phys_size = 0;
  195. HFS_I(inode)->alloc_blocks = 0;
  196. HFS_I(inode)->first_blocks = 0;
  197. HFS_I(inode)->cached_start = 0;
  198. HFS_I(inode)->cached_blocks = 0;
  199. memset(HFS_I(inode)->first_extents, 0, sizeof(hfs_extent_rec));
  200. memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
  201. }
  202. insert_inode_hash(inode);
  203. mark_inode_dirty(inode);
  204. set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
  205. hfs_mark_mdb_dirty(sb);
  206. return inode;
  207. }
  208. void hfs_delete_inode(struct inode *inode)
  209. {
  210. struct super_block *sb = inode->i_sb;
  211. hfs_dbg(INODE, "delete_inode: %lu\n", inode->i_ino);
  212. if (S_ISDIR(inode->i_mode)) {
  213. HFS_SB(sb)->folder_count--;
  214. if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
  215. HFS_SB(sb)->root_dirs--;
  216. set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
  217. hfs_mark_mdb_dirty(sb);
  218. return;
  219. }
  220. HFS_SB(sb)->file_count--;
  221. if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
  222. HFS_SB(sb)->root_files--;
  223. if (S_ISREG(inode->i_mode)) {
  224. if (!inode->i_nlink) {
  225. inode->i_size = 0;
  226. hfs_file_truncate(inode);
  227. }
  228. }
  229. set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
  230. hfs_mark_mdb_dirty(sb);
  231. }
  232. void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
  233. __be32 __log_size, __be32 phys_size, u32 clump_size)
  234. {
  235. struct super_block *sb = inode->i_sb;
  236. u32 log_size = be32_to_cpu(__log_size);
  237. u16 count;
  238. int i;
  239. memcpy(HFS_I(inode)->first_extents, ext, sizeof(hfs_extent_rec));
  240. for (count = 0, i = 0; i < 3; i++)
  241. count += be16_to_cpu(ext[i].count);
  242. HFS_I(inode)->first_blocks = count;
  243. inode->i_size = HFS_I(inode)->phys_size = log_size;
  244. HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
  245. inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
  246. HFS_I(inode)->alloc_blocks = be32_to_cpu(phys_size) /
  247. HFS_SB(sb)->alloc_blksz;
  248. HFS_I(inode)->clump_blocks = clump_size / HFS_SB(sb)->alloc_blksz;
  249. if (!HFS_I(inode)->clump_blocks)
  250. HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
  251. }
  252. struct hfs_iget_data {
  253. struct hfs_cat_key *key;
  254. hfs_cat_rec *rec;
  255. };
  256. static int hfs_test_inode(struct inode *inode, void *data)
  257. {
  258. struct hfs_iget_data *idata = data;
  259. hfs_cat_rec *rec;
  260. rec = idata->rec;
  261. switch (rec->type) {
  262. case HFS_CDR_DIR:
  263. return inode->i_ino == be32_to_cpu(rec->dir.DirID);
  264. case HFS_CDR_FIL:
  265. return inode->i_ino == be32_to_cpu(rec->file.FlNum);
  266. default:
  267. BUG();
  268. return 1;
  269. }
  270. }
  271. /*
  272. * hfs_read_inode
  273. */
  274. static int hfs_read_inode(struct inode *inode, void *data)
  275. {
  276. struct hfs_iget_data *idata = data;
  277. struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
  278. hfs_cat_rec *rec;
  279. HFS_I(inode)->flags = 0;
  280. HFS_I(inode)->rsrc_inode = NULL;
  281. mutex_init(&HFS_I(inode)->extents_lock);
  282. INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
  283. /* Initialize the inode */
  284. inode->i_uid = hsb->s_uid;
  285. inode->i_gid = hsb->s_gid;
  286. set_nlink(inode, 1);
  287. if (idata->key)
  288. HFS_I(inode)->cat_key = *idata->key;
  289. else
  290. HFS_I(inode)->flags |= HFS_FLG_RSRC;
  291. HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
  292. rec = idata->rec;
  293. switch (rec->type) {
  294. case HFS_CDR_FIL:
  295. if (!HFS_IS_RSRC(inode)) {
  296. hfs_inode_read_fork(inode, rec->file.ExtRec, rec->file.LgLen,
  297. rec->file.PyLen, be16_to_cpu(rec->file.ClpSize));
  298. } else {
  299. hfs_inode_read_fork(inode, rec->file.RExtRec, rec->file.RLgLen,
  300. rec->file.RPyLen, be16_to_cpu(rec->file.ClpSize));
  301. }
  302. inode->i_ino = be32_to_cpu(rec->file.FlNum);
  303. inode->i_mode = S_IRUGO | S_IXUGO;
  304. if (!(rec->file.Flags & HFS_FIL_LOCK))
  305. inode->i_mode |= S_IWUGO;
  306. inode->i_mode &= ~hsb->s_file_umask;
  307. inode->i_mode |= S_IFREG;
  308. inode->i_ctime = inode->i_atime = inode->i_mtime =
  309. hfs_m_to_utime(rec->file.MdDat);
  310. inode->i_op = &hfs_file_inode_operations;
  311. inode->i_fop = &hfs_file_operations;
  312. inode->i_mapping->a_ops = &hfs_aops;
  313. break;
  314. case HFS_CDR_DIR:
  315. inode->i_ino = be32_to_cpu(rec->dir.DirID);
  316. inode->i_size = be16_to_cpu(rec->dir.Val) + 2;
  317. HFS_I(inode)->fs_blocks = 0;
  318. inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
  319. inode->i_ctime = inode->i_atime = inode->i_mtime =
  320. hfs_m_to_utime(rec->dir.MdDat);
  321. inode->i_op = &hfs_dir_inode_operations;
  322. inode->i_fop = &hfs_dir_operations;
  323. break;
  324. default:
  325. make_bad_inode(inode);
  326. }
  327. return 0;
  328. }
  329. /*
  330. * __hfs_iget()
  331. *
  332. * Given the MDB for a HFS filesystem, a 'key' and an 'entry' in
  333. * the catalog B-tree and the 'type' of the desired file return the
  334. * inode for that file/directory or NULL. Note that 'type' indicates
  335. * whether we want the actual file or directory, or the corresponding
  336. * metadata (AppleDouble header file or CAP metadata file).
  337. */
  338. struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key, hfs_cat_rec *rec)
  339. {
  340. struct hfs_iget_data data = { key, rec };
  341. struct inode *inode;
  342. u32 cnid;
  343. switch (rec->type) {
  344. case HFS_CDR_DIR:
  345. cnid = be32_to_cpu(rec->dir.DirID);
  346. break;
  347. case HFS_CDR_FIL:
  348. cnid = be32_to_cpu(rec->file.FlNum);
  349. break;
  350. default:
  351. return NULL;
  352. }
  353. inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data);
  354. if (inode && (inode->i_state & I_NEW))
  355. unlock_new_inode(inode);
  356. return inode;
  357. }
  358. void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext,
  359. __be32 *log_size, __be32 *phys_size)
  360. {
  361. memcpy(ext, HFS_I(inode)->first_extents, sizeof(hfs_extent_rec));
  362. if (log_size)
  363. *log_size = cpu_to_be32(inode->i_size);
  364. if (phys_size)
  365. *phys_size = cpu_to_be32(HFS_I(inode)->alloc_blocks *
  366. HFS_SB(inode->i_sb)->alloc_blksz);
  367. }
  368. int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
  369. {
  370. struct inode *main_inode = inode;
  371. struct hfs_find_data fd;
  372. hfs_cat_rec rec;
  373. int res;
  374. hfs_dbg(INODE, "hfs_write_inode: %lu\n", inode->i_ino);
  375. res = hfs_ext_write_extent(inode);
  376. if (res)
  377. return res;
  378. if (inode->i_ino < HFS_FIRSTUSER_CNID) {
  379. switch (inode->i_ino) {
  380. case HFS_ROOT_CNID:
  381. break;
  382. case HFS_EXT_CNID:
  383. hfs_btree_write(HFS_SB(inode->i_sb)->ext_tree);
  384. return 0;
  385. case HFS_CAT_CNID:
  386. hfs_btree_write(HFS_SB(inode->i_sb)->cat_tree);
  387. return 0;
  388. default:
  389. BUG();
  390. return -EIO;
  391. }
  392. }
  393. if (HFS_IS_RSRC(inode))
  394. main_inode = HFS_I(inode)->rsrc_inode;
  395. if (!main_inode->i_nlink)
  396. return 0;
  397. if (hfs_find_init(HFS_SB(main_inode->i_sb)->cat_tree, &fd))
  398. /* panic? */
  399. return -EIO;
  400. fd.search_key->cat = HFS_I(main_inode)->cat_key;
  401. if (hfs_brec_find(&fd))
  402. /* panic? */
  403. goto out;
  404. if (S_ISDIR(main_inode->i_mode)) {
  405. if (fd.entrylength < sizeof(struct hfs_cat_dir))
  406. /* panic? */;
  407. hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
  408. sizeof(struct hfs_cat_dir));
  409. if (rec.type != HFS_CDR_DIR ||
  410. be32_to_cpu(rec.dir.DirID) != inode->i_ino) {
  411. }
  412. rec.dir.MdDat = hfs_u_to_mtime(inode->i_mtime);
  413. rec.dir.Val = cpu_to_be16(inode->i_size - 2);
  414. hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
  415. sizeof(struct hfs_cat_dir));
  416. } else if (HFS_IS_RSRC(inode)) {
  417. hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
  418. sizeof(struct hfs_cat_file));
  419. hfs_inode_write_fork(inode, rec.file.RExtRec,
  420. &rec.file.RLgLen, &rec.file.RPyLen);
  421. hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
  422. sizeof(struct hfs_cat_file));
  423. } else {
  424. if (fd.entrylength < sizeof(struct hfs_cat_file))
  425. /* panic? */;
  426. hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
  427. sizeof(struct hfs_cat_file));
  428. if (rec.type != HFS_CDR_FIL ||
  429. be32_to_cpu(rec.file.FlNum) != inode->i_ino) {
  430. }
  431. if (inode->i_mode & S_IWUSR)
  432. rec.file.Flags &= ~HFS_FIL_LOCK;
  433. else
  434. rec.file.Flags |= HFS_FIL_LOCK;
  435. hfs_inode_write_fork(inode, rec.file.ExtRec, &rec.file.LgLen, &rec.file.PyLen);
  436. rec.file.MdDat = hfs_u_to_mtime(inode->i_mtime);
  437. hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
  438. sizeof(struct hfs_cat_file));
  439. }
  440. out:
  441. hfs_find_exit(&fd);
  442. return 0;
  443. }
  444. static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
  445. unsigned int flags)
  446. {
  447. struct inode *inode = NULL;
  448. hfs_cat_rec rec;
  449. struct hfs_find_data fd;
  450. int res;
  451. if (HFS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
  452. goto out;
  453. inode = HFS_I(dir)->rsrc_inode;
  454. if (inode)
  455. goto out;
  456. inode = new_inode(dir->i_sb);
  457. if (!inode)
  458. return ERR_PTR(-ENOMEM);
  459. res = hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
  460. if (res) {
  461. iput(inode);
  462. return ERR_PTR(res);
  463. }
  464. fd.search_key->cat = HFS_I(dir)->cat_key;
  465. res = hfs_brec_read(&fd, &rec, sizeof(rec));
  466. if (!res) {
  467. struct hfs_iget_data idata = { NULL, &rec };
  468. hfs_read_inode(inode, &idata);
  469. }
  470. hfs_find_exit(&fd);
  471. if (res) {
  472. iput(inode);
  473. return ERR_PTR(res);
  474. }
  475. HFS_I(inode)->rsrc_inode = dir;
  476. HFS_I(dir)->rsrc_inode = inode;
  477. igrab(dir);
  478. hlist_add_fake(&inode->i_hash);
  479. mark_inode_dirty(inode);
  480. out:
  481. d_add(dentry, inode);
  482. return NULL;
  483. }
  484. void hfs_evict_inode(struct inode *inode)
  485. {
  486. truncate_inode_pages_final(&inode->i_data);
  487. clear_inode(inode);
  488. if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
  489. HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
  490. iput(HFS_I(inode)->rsrc_inode);
  491. }
  492. }
  493. static int hfs_file_open(struct inode *inode, struct file *file)
  494. {
  495. if (HFS_IS_RSRC(inode))
  496. inode = HFS_I(inode)->rsrc_inode;
  497. atomic_inc(&HFS_I(inode)->opencnt);
  498. return 0;
  499. }
  500. static int hfs_file_release(struct inode *inode, struct file *file)
  501. {
  502. //struct super_block *sb = inode->i_sb;
  503. if (HFS_IS_RSRC(inode))
  504. inode = HFS_I(inode)->rsrc_inode;
  505. if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) {
  506. mutex_lock(&inode->i_mutex);
  507. hfs_file_truncate(inode);
  508. //if (inode->i_flags & S_DEAD) {
  509. // hfs_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
  510. // hfs_delete_inode(inode);
  511. //}
  512. mutex_unlock(&inode->i_mutex);
  513. }
  514. return 0;
  515. }
  516. /*
  517. * hfs_notify_change()
  518. *
  519. * Based very closely on fs/msdos/inode.c by Werner Almesberger
  520. *
  521. * This is the notify_change() field in the super_operations structure
  522. * for HFS file systems. The purpose is to take that changes made to
  523. * an inode and apply then in a filesystem-dependent manner. In this
  524. * case the process has a few of tasks to do:
  525. * 1) prevent changes to the i_uid and i_gid fields.
  526. * 2) map file permissions to the closest allowable permissions
  527. * 3) Since multiple Linux files can share the same on-disk inode under
  528. * HFS (for instance the data and resource forks of a file) a change
  529. * to permissions must be applied to all other in-core inodes which
  530. * correspond to the same HFS file.
  531. */
  532. int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
  533. {
  534. struct inode *inode = d_inode(dentry);
  535. struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
  536. int error;
  537. error = inode_change_ok(inode, attr); /* basic permission checks */
  538. if (error)
  539. return error;
  540. /* no uig/gid changes and limit which mode bits can be set */
  541. if (((attr->ia_valid & ATTR_UID) &&
  542. (!uid_eq(attr->ia_uid, hsb->s_uid))) ||
  543. ((attr->ia_valid & ATTR_GID) &&
  544. (!gid_eq(attr->ia_gid, hsb->s_gid))) ||
  545. ((attr->ia_valid & ATTR_MODE) &&
  546. ((S_ISDIR(inode->i_mode) &&
  547. (attr->ia_mode != inode->i_mode)) ||
  548. (attr->ia_mode & ~HFS_VALID_MODE_BITS)))) {
  549. return hsb->s_quiet ? 0 : error;
  550. }
  551. if (attr->ia_valid & ATTR_MODE) {
  552. /* Only the 'w' bits can ever change and only all together. */
  553. if (attr->ia_mode & S_IWUSR)
  554. attr->ia_mode = inode->i_mode | S_IWUGO;
  555. else
  556. attr->ia_mode = inode->i_mode & ~S_IWUGO;
  557. attr->ia_mode &= S_ISDIR(inode->i_mode) ? ~hsb->s_dir_umask: ~hsb->s_file_umask;
  558. }
  559. if ((attr->ia_valid & ATTR_SIZE) &&
  560. attr->ia_size != i_size_read(inode)) {
  561. inode_dio_wait(inode);
  562. error = inode_newsize_ok(inode, attr->ia_size);
  563. if (error)
  564. return error;
  565. truncate_setsize(inode, attr->ia_size);
  566. hfs_file_truncate(inode);
  567. }
  568. setattr_copy(inode, attr);
  569. mark_inode_dirty(inode);
  570. return 0;
  571. }
  572. static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
  573. int datasync)
  574. {
  575. struct inode *inode = filp->f_mapping->host;
  576. struct super_block * sb;
  577. int ret, err;
  578. ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
  579. if (ret)
  580. return ret;
  581. mutex_lock(&inode->i_mutex);
  582. /* sync the inode to buffers */
  583. ret = write_inode_now(inode, 0);
  584. /* sync the superblock to buffers */
  585. sb = inode->i_sb;
  586. flush_delayed_work(&HFS_SB(sb)->mdb_work);
  587. /* .. finally sync the buffers to disk */
  588. err = sync_blockdev(sb->s_bdev);
  589. if (!ret)
  590. ret = err;
  591. mutex_unlock(&inode->i_mutex);
  592. return ret;
  593. }
  594. static const struct file_operations hfs_file_operations = {
  595. .llseek = generic_file_llseek,
  596. .read_iter = generic_file_read_iter,
  597. .write_iter = generic_file_write_iter,
  598. .mmap = generic_file_mmap,
  599. .splice_read = generic_file_splice_read,
  600. .fsync = hfs_file_fsync,
  601. .open = hfs_file_open,
  602. .release = hfs_file_release,
  603. };
  604. static const struct inode_operations hfs_file_inode_operations = {
  605. .lookup = hfs_file_lookup,
  606. .setattr = hfs_inode_setattr,
  607. .setxattr = hfs_setxattr,
  608. .getxattr = hfs_getxattr,
  609. .listxattr = hfs_listxattr,
  610. };