file.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983
  1. /*
  2. * linux/fs/affs/file.c
  3. *
  4. * (c) 1996 Hans-Joachim Widmaier - Rewritten
  5. *
  6. * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
  7. *
  8. * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
  9. *
  10. * (C) 1991 Linus Torvalds - minix filesystem
  11. *
  12. * affs regular file handling primitives
  13. */
  14. #include <linux/uio.h>
  15. #include "affs.h"
  16. static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
  17. static int
  18. affs_file_open(struct inode *inode, struct file *filp)
  19. {
  20. pr_debug("open(%lu,%d)\n",
  21. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  22. atomic_inc(&AFFS_I(inode)->i_opencnt);
  23. return 0;
  24. }
  25. static int
  26. affs_file_release(struct inode *inode, struct file *filp)
  27. {
  28. pr_debug("release(%lu, %d)\n",
  29. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  30. if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) {
  31. inode_lock(inode);
  32. if (inode->i_size != AFFS_I(inode)->mmu_private)
  33. affs_truncate(inode);
  34. affs_free_prealloc(inode);
  35. inode_unlock(inode);
  36. }
  37. return 0;
  38. }
  39. static int
  40. affs_grow_extcache(struct inode *inode, u32 lc_idx)
  41. {
  42. struct super_block *sb = inode->i_sb;
  43. struct buffer_head *bh;
  44. u32 lc_max;
  45. int i, j, key;
  46. if (!AFFS_I(inode)->i_lc) {
  47. char *ptr = (char *)get_zeroed_page(GFP_NOFS);
  48. if (!ptr)
  49. return -ENOMEM;
  50. AFFS_I(inode)->i_lc = (u32 *)ptr;
  51. AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
  52. }
  53. lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
  54. if (AFFS_I(inode)->i_extcnt > lc_max) {
  55. u32 lc_shift, lc_mask, tmp, off;
  56. /* need to recalculate linear cache, start from old size */
  57. lc_shift = AFFS_I(inode)->i_lc_shift;
  58. tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
  59. for (; tmp; tmp >>= 1)
  60. lc_shift++;
  61. lc_mask = (1 << lc_shift) - 1;
  62. /* fix idx and old size to new shift */
  63. lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  64. AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  65. /* first shrink old cache to make more space */
  66. off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
  67. for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
  68. AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
  69. AFFS_I(inode)->i_lc_shift = lc_shift;
  70. AFFS_I(inode)->i_lc_mask = lc_mask;
  71. }
  72. /* fill cache to the needed index */
  73. i = AFFS_I(inode)->i_lc_size;
  74. AFFS_I(inode)->i_lc_size = lc_idx + 1;
  75. for (; i <= lc_idx; i++) {
  76. if (!i) {
  77. AFFS_I(inode)->i_lc[0] = inode->i_ino;
  78. continue;
  79. }
  80. key = AFFS_I(inode)->i_lc[i - 1];
  81. j = AFFS_I(inode)->i_lc_mask + 1;
  82. // unlock cache
  83. for (; j > 0; j--) {
  84. bh = affs_bread(sb, key);
  85. if (!bh)
  86. goto err;
  87. key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  88. affs_brelse(bh);
  89. }
  90. // lock cache
  91. AFFS_I(inode)->i_lc[i] = key;
  92. }
  93. return 0;
  94. err:
  95. // lock cache
  96. return -EIO;
  97. }
  98. static struct buffer_head *
  99. affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
  100. {
  101. struct super_block *sb = inode->i_sb;
  102. struct buffer_head *new_bh;
  103. u32 blocknr, tmp;
  104. blocknr = affs_alloc_block(inode, bh->b_blocknr);
  105. if (!blocknr)
  106. return ERR_PTR(-ENOSPC);
  107. new_bh = affs_getzeroblk(sb, blocknr);
  108. if (!new_bh) {
  109. affs_free_block(sb, blocknr);
  110. return ERR_PTR(-EIO);
  111. }
  112. AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
  113. AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
  114. AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
  115. AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
  116. affs_fix_checksum(sb, new_bh);
  117. mark_buffer_dirty_inode(new_bh, inode);
  118. tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  119. if (tmp)
  120. affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
  121. AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
  122. affs_adjust_checksum(bh, blocknr - tmp);
  123. mark_buffer_dirty_inode(bh, inode);
  124. AFFS_I(inode)->i_extcnt++;
  125. mark_inode_dirty(inode);
  126. return new_bh;
  127. }
  128. static inline struct buffer_head *
  129. affs_get_extblock(struct inode *inode, u32 ext)
  130. {
  131. /* inline the simplest case: same extended block as last time */
  132. struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
  133. if (ext == AFFS_I(inode)->i_ext_last)
  134. get_bh(bh);
  135. else
  136. /* we have to do more (not inlined) */
  137. bh = affs_get_extblock_slow(inode, ext);
  138. return bh;
  139. }
  140. static struct buffer_head *
  141. affs_get_extblock_slow(struct inode *inode, u32 ext)
  142. {
  143. struct super_block *sb = inode->i_sb;
  144. struct buffer_head *bh;
  145. u32 ext_key;
  146. u32 lc_idx, lc_off, ac_idx;
  147. u32 tmp, idx;
  148. if (ext == AFFS_I(inode)->i_ext_last + 1) {
  149. /* read the next extended block from the current one */
  150. bh = AFFS_I(inode)->i_ext_bh;
  151. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  152. if (ext < AFFS_I(inode)->i_extcnt)
  153. goto read_ext;
  154. BUG_ON(ext > AFFS_I(inode)->i_extcnt);
  155. bh = affs_alloc_extblock(inode, bh, ext);
  156. if (IS_ERR(bh))
  157. return bh;
  158. goto store_ext;
  159. }
  160. if (ext == 0) {
  161. /* we seek back to the file header block */
  162. ext_key = inode->i_ino;
  163. goto read_ext;
  164. }
  165. if (ext >= AFFS_I(inode)->i_extcnt) {
  166. struct buffer_head *prev_bh;
  167. /* allocate a new extended block */
  168. BUG_ON(ext > AFFS_I(inode)->i_extcnt);
  169. /* get previous extended block */
  170. prev_bh = affs_get_extblock(inode, ext - 1);
  171. if (IS_ERR(prev_bh))
  172. return prev_bh;
  173. bh = affs_alloc_extblock(inode, prev_bh, ext);
  174. affs_brelse(prev_bh);
  175. if (IS_ERR(bh))
  176. return bh;
  177. goto store_ext;
  178. }
  179. again:
  180. /* check if there is an extended cache and whether it's large enough */
  181. lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
  182. lc_off = ext & AFFS_I(inode)->i_lc_mask;
  183. if (lc_idx >= AFFS_I(inode)->i_lc_size) {
  184. int err;
  185. err = affs_grow_extcache(inode, lc_idx);
  186. if (err)
  187. return ERR_PTR(err);
  188. goto again;
  189. }
  190. /* every n'th key we find in the linear cache */
  191. if (!lc_off) {
  192. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  193. goto read_ext;
  194. }
  195. /* maybe it's still in the associative cache */
  196. ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
  197. if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
  198. ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
  199. goto read_ext;
  200. }
  201. /* try to find one of the previous extended blocks */
  202. tmp = ext;
  203. idx = ac_idx;
  204. while (--tmp, --lc_off > 0) {
  205. idx = (idx - 1) & AFFS_AC_MASK;
  206. if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
  207. ext_key = AFFS_I(inode)->i_ac[idx].key;
  208. goto find_ext;
  209. }
  210. }
  211. /* fall back to the linear cache */
  212. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  213. find_ext:
  214. /* read all extended blocks until we find the one we need */
  215. //unlock cache
  216. do {
  217. bh = affs_bread(sb, ext_key);
  218. if (!bh)
  219. goto err_bread;
  220. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  221. affs_brelse(bh);
  222. tmp++;
  223. } while (tmp < ext);
  224. //lock cache
  225. /* store it in the associative cache */
  226. // recalculate ac_idx?
  227. AFFS_I(inode)->i_ac[ac_idx].ext = ext;
  228. AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
  229. read_ext:
  230. /* finally read the right extended block */
  231. //unlock cache
  232. bh = affs_bread(sb, ext_key);
  233. if (!bh)
  234. goto err_bread;
  235. //lock cache
  236. store_ext:
  237. /* release old cached extended block and store the new one */
  238. affs_brelse(AFFS_I(inode)->i_ext_bh);
  239. AFFS_I(inode)->i_ext_last = ext;
  240. AFFS_I(inode)->i_ext_bh = bh;
  241. get_bh(bh);
  242. return bh;
  243. err_bread:
  244. affs_brelse(bh);
  245. return ERR_PTR(-EIO);
  246. }
  247. static int
  248. affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
  249. {
  250. struct super_block *sb = inode->i_sb;
  251. struct buffer_head *ext_bh;
  252. u32 ext;
  253. pr_debug("%s(%lu, %llu)\n", __func__, inode->i_ino,
  254. (unsigned long long)block);
  255. BUG_ON(block > (sector_t)0x7fffffffUL);
  256. if (block >= AFFS_I(inode)->i_blkcnt) {
  257. if (block > AFFS_I(inode)->i_blkcnt || !create)
  258. goto err_big;
  259. } else
  260. create = 0;
  261. //lock cache
  262. affs_lock_ext(inode);
  263. ext = (u32)block / AFFS_SB(sb)->s_hashsize;
  264. block -= ext * AFFS_SB(sb)->s_hashsize;
  265. ext_bh = affs_get_extblock(inode, ext);
  266. if (IS_ERR(ext_bh))
  267. goto err_ext;
  268. map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
  269. if (create) {
  270. u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
  271. if (!blocknr)
  272. goto err_alloc;
  273. set_buffer_new(bh_result);
  274. AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
  275. AFFS_I(inode)->i_blkcnt++;
  276. /* store new block */
  277. if (bh_result->b_blocknr)
  278. affs_warning(sb, "get_block",
  279. "block already set (%llx)",
  280. (unsigned long long)bh_result->b_blocknr);
  281. AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
  282. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
  283. affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
  284. bh_result->b_blocknr = blocknr;
  285. if (!block) {
  286. /* insert first block into header block */
  287. u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
  288. if (tmp)
  289. affs_warning(sb, "get_block", "first block already set (%d)", tmp);
  290. AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
  291. affs_adjust_checksum(ext_bh, blocknr - tmp);
  292. }
  293. }
  294. affs_brelse(ext_bh);
  295. //unlock cache
  296. affs_unlock_ext(inode);
  297. return 0;
  298. err_big:
  299. affs_error(inode->i_sb, "get_block", "strange block request %llu",
  300. (unsigned long long)block);
  301. return -EIO;
  302. err_ext:
  303. // unlock cache
  304. affs_unlock_ext(inode);
  305. return PTR_ERR(ext_bh);
  306. err_alloc:
  307. brelse(ext_bh);
  308. clear_buffer_mapped(bh_result);
  309. bh_result->b_bdev = NULL;
  310. // unlock cache
  311. affs_unlock_ext(inode);
  312. return -ENOSPC;
  313. }
  314. static int affs_writepage(struct page *page, struct writeback_control *wbc)
  315. {
  316. return block_write_full_page(page, affs_get_block, wbc);
  317. }
  318. static int affs_readpage(struct file *file, struct page *page)
  319. {
  320. return block_read_full_page(page, affs_get_block);
  321. }
  322. static void affs_write_failed(struct address_space *mapping, loff_t to)
  323. {
  324. struct inode *inode = mapping->host;
  325. if (to > inode->i_size) {
  326. truncate_pagecache(inode, inode->i_size);
  327. affs_truncate(inode);
  328. }
  329. }
  330. static ssize_t
  331. affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  332. {
  333. struct file *file = iocb->ki_filp;
  334. struct address_space *mapping = file->f_mapping;
  335. struct inode *inode = mapping->host;
  336. size_t count = iov_iter_count(iter);
  337. loff_t offset = iocb->ki_pos;
  338. ssize_t ret;
  339. if (iov_iter_rw(iter) == WRITE) {
  340. loff_t size = offset + count;
  341. if (AFFS_I(inode)->mmu_private < size)
  342. return 0;
  343. }
  344. ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block);
  345. if (ret < 0 && iov_iter_rw(iter) == WRITE)
  346. affs_write_failed(mapping, offset + count);
  347. return ret;
  348. }
  349. static int affs_write_begin(struct file *file, struct address_space *mapping,
  350. loff_t pos, unsigned len, unsigned flags,
  351. struct page **pagep, void **fsdata)
  352. {
  353. int ret;
  354. *pagep = NULL;
  355. ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  356. affs_get_block,
  357. &AFFS_I(mapping->host)->mmu_private);
  358. if (unlikely(ret))
  359. affs_write_failed(mapping, pos + len);
  360. return ret;
  361. }
  362. static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
  363. {
  364. return generic_block_bmap(mapping,block,affs_get_block);
  365. }
  366. const struct address_space_operations affs_aops = {
  367. .readpage = affs_readpage,
  368. .writepage = affs_writepage,
  369. .write_begin = affs_write_begin,
  370. .write_end = generic_write_end,
  371. .direct_IO = affs_direct_IO,
  372. .bmap = _affs_bmap
  373. };
  374. static inline struct buffer_head *
  375. affs_bread_ino(struct inode *inode, int block, int create)
  376. {
  377. struct buffer_head *bh, tmp_bh;
  378. int err;
  379. tmp_bh.b_state = 0;
  380. err = affs_get_block(inode, block, &tmp_bh, create);
  381. if (!err) {
  382. bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
  383. if (bh) {
  384. bh->b_state |= tmp_bh.b_state;
  385. return bh;
  386. }
  387. err = -EIO;
  388. }
  389. return ERR_PTR(err);
  390. }
  391. static inline struct buffer_head *
  392. affs_getzeroblk_ino(struct inode *inode, int block)
  393. {
  394. struct buffer_head *bh, tmp_bh;
  395. int err;
  396. tmp_bh.b_state = 0;
  397. err = affs_get_block(inode, block, &tmp_bh, 1);
  398. if (!err) {
  399. bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
  400. if (bh) {
  401. bh->b_state |= tmp_bh.b_state;
  402. return bh;
  403. }
  404. err = -EIO;
  405. }
  406. return ERR_PTR(err);
  407. }
  408. static inline struct buffer_head *
  409. affs_getemptyblk_ino(struct inode *inode, int block)
  410. {
  411. struct buffer_head *bh, tmp_bh;
  412. int err;
  413. tmp_bh.b_state = 0;
  414. err = affs_get_block(inode, block, &tmp_bh, 1);
  415. if (!err) {
  416. bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
  417. if (bh) {
  418. bh->b_state |= tmp_bh.b_state;
  419. return bh;
  420. }
  421. err = -EIO;
  422. }
  423. return ERR_PTR(err);
  424. }
  425. static int
  426. affs_do_readpage_ofs(struct page *page, unsigned to)
  427. {
  428. struct inode *inode = page->mapping->host;
  429. struct super_block *sb = inode->i_sb;
  430. struct buffer_head *bh;
  431. char *data;
  432. unsigned pos = 0;
  433. u32 bidx, boff, bsize;
  434. u32 tmp;
  435. pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
  436. page->index, to);
  437. BUG_ON(to > PAGE_SIZE);
  438. bsize = AFFS_SB(sb)->s_data_blksize;
  439. tmp = page->index << PAGE_SHIFT;
  440. bidx = tmp / bsize;
  441. boff = tmp % bsize;
  442. while (pos < to) {
  443. bh = affs_bread_ino(inode, bidx, 0);
  444. if (IS_ERR(bh))
  445. return PTR_ERR(bh);
  446. tmp = min(bsize - boff, to - pos);
  447. BUG_ON(pos + tmp > to || tmp > bsize);
  448. data = kmap_atomic(page);
  449. memcpy(data + pos, AFFS_DATA(bh) + boff, tmp);
  450. kunmap_atomic(data);
  451. affs_brelse(bh);
  452. bidx++;
  453. pos += tmp;
  454. boff = 0;
  455. }
  456. flush_dcache_page(page);
  457. return 0;
  458. }
  459. static int
  460. affs_extent_file_ofs(struct inode *inode, u32 newsize)
  461. {
  462. struct super_block *sb = inode->i_sb;
  463. struct buffer_head *bh, *prev_bh;
  464. u32 bidx, boff;
  465. u32 size, bsize;
  466. u32 tmp;
  467. pr_debug("%s(%lu, %d)\n", __func__, inode->i_ino, newsize);
  468. bsize = AFFS_SB(sb)->s_data_blksize;
  469. bh = NULL;
  470. size = AFFS_I(inode)->mmu_private;
  471. bidx = size / bsize;
  472. boff = size % bsize;
  473. if (boff) {
  474. bh = affs_bread_ino(inode, bidx, 0);
  475. if (IS_ERR(bh))
  476. return PTR_ERR(bh);
  477. tmp = min(bsize - boff, newsize - size);
  478. BUG_ON(boff + tmp > bsize || tmp > bsize);
  479. memset(AFFS_DATA(bh) + boff, 0, tmp);
  480. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  481. affs_fix_checksum(sb, bh);
  482. mark_buffer_dirty_inode(bh, inode);
  483. size += tmp;
  484. bidx++;
  485. } else if (bidx) {
  486. bh = affs_bread_ino(inode, bidx - 1, 0);
  487. if (IS_ERR(bh))
  488. return PTR_ERR(bh);
  489. }
  490. while (size < newsize) {
  491. prev_bh = bh;
  492. bh = affs_getzeroblk_ino(inode, bidx);
  493. if (IS_ERR(bh))
  494. goto out;
  495. tmp = min(bsize, newsize - size);
  496. BUG_ON(tmp > bsize);
  497. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  498. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  499. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  500. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  501. affs_fix_checksum(sb, bh);
  502. bh->b_state &= ~(1UL << BH_New);
  503. mark_buffer_dirty_inode(bh, inode);
  504. if (prev_bh) {
  505. u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  506. if (tmp_next)
  507. affs_warning(sb, "extent_file_ofs",
  508. "next block already set for %d (%d)",
  509. bidx, tmp_next);
  510. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  511. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
  512. mark_buffer_dirty_inode(prev_bh, inode);
  513. affs_brelse(prev_bh);
  514. }
  515. size += bsize;
  516. bidx++;
  517. }
  518. affs_brelse(bh);
  519. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  520. return 0;
  521. out:
  522. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  523. return PTR_ERR(bh);
  524. }
  525. static int
  526. affs_readpage_ofs(struct file *file, struct page *page)
  527. {
  528. struct inode *inode = page->mapping->host;
  529. u32 to;
  530. int err;
  531. pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
  532. to = PAGE_SIZE;
  533. if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
  534. to = inode->i_size & ~PAGE_MASK;
  535. memset(page_address(page) + to, 0, PAGE_SIZE - to);
  536. }
  537. err = affs_do_readpage_ofs(page, to);
  538. if (!err)
  539. SetPageUptodate(page);
  540. unlock_page(page);
  541. return err;
  542. }
  543. static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
  544. loff_t pos, unsigned len, unsigned flags,
  545. struct page **pagep, void **fsdata)
  546. {
  547. struct inode *inode = mapping->host;
  548. struct page *page;
  549. pgoff_t index;
  550. int err = 0;
  551. pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
  552. pos + len);
  553. if (pos > AFFS_I(inode)->mmu_private) {
  554. /* XXX: this probably leaves a too-big i_size in case of
  555. * failure. Should really be updating i_size at write_end time
  556. */
  557. err = affs_extent_file_ofs(inode, pos);
  558. if (err)
  559. return err;
  560. }
  561. index = pos >> PAGE_SHIFT;
  562. page = grab_cache_page_write_begin(mapping, index, flags);
  563. if (!page)
  564. return -ENOMEM;
  565. *pagep = page;
  566. if (PageUptodate(page))
  567. return 0;
  568. /* XXX: inefficient but safe in the face of short writes */
  569. err = affs_do_readpage_ofs(page, PAGE_SIZE);
  570. if (err) {
  571. unlock_page(page);
  572. put_page(page);
  573. }
  574. return err;
  575. }
  576. static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
  577. loff_t pos, unsigned len, unsigned copied,
  578. struct page *page, void *fsdata)
  579. {
  580. struct inode *inode = mapping->host;
  581. struct super_block *sb = inode->i_sb;
  582. struct buffer_head *bh, *prev_bh;
  583. char *data;
  584. u32 bidx, boff, bsize;
  585. unsigned from, to;
  586. u32 tmp;
  587. int written;
  588. from = pos & (PAGE_SIZE - 1);
  589. to = pos + len;
  590. /*
  591. * XXX: not sure if this can handle short copies (len < copied), but
  592. * we don't have to, because the page should always be uptodate here,
  593. * due to write_begin.
  594. */
  595. pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
  596. pos + len);
  597. bsize = AFFS_SB(sb)->s_data_blksize;
  598. data = page_address(page);
  599. bh = NULL;
  600. written = 0;
  601. tmp = (page->index << PAGE_SHIFT) + from;
  602. bidx = tmp / bsize;
  603. boff = tmp % bsize;
  604. if (boff) {
  605. bh = affs_bread_ino(inode, bidx, 0);
  606. if (IS_ERR(bh)) {
  607. written = PTR_ERR(bh);
  608. goto err_first_bh;
  609. }
  610. tmp = min(bsize - boff, to - from);
  611. BUG_ON(boff + tmp > bsize || tmp > bsize);
  612. memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
  613. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  614. affs_fix_checksum(sb, bh);
  615. mark_buffer_dirty_inode(bh, inode);
  616. written += tmp;
  617. from += tmp;
  618. bidx++;
  619. } else if (bidx) {
  620. bh = affs_bread_ino(inode, bidx - 1, 0);
  621. if (IS_ERR(bh)) {
  622. written = PTR_ERR(bh);
  623. goto err_first_bh;
  624. }
  625. }
  626. while (from + bsize <= to) {
  627. prev_bh = bh;
  628. bh = affs_getemptyblk_ino(inode, bidx);
  629. if (IS_ERR(bh))
  630. goto err_bh;
  631. memcpy(AFFS_DATA(bh), data + from, bsize);
  632. if (buffer_new(bh)) {
  633. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  634. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  635. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  636. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
  637. AFFS_DATA_HEAD(bh)->next = 0;
  638. bh->b_state &= ~(1UL << BH_New);
  639. if (prev_bh) {
  640. u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  641. if (tmp_next)
  642. affs_warning(sb, "commit_write_ofs",
  643. "next block already set for %d (%d)",
  644. bidx, tmp_next);
  645. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  646. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
  647. mark_buffer_dirty_inode(prev_bh, inode);
  648. }
  649. }
  650. affs_brelse(prev_bh);
  651. affs_fix_checksum(sb, bh);
  652. mark_buffer_dirty_inode(bh, inode);
  653. written += bsize;
  654. from += bsize;
  655. bidx++;
  656. }
  657. if (from < to) {
  658. prev_bh = bh;
  659. bh = affs_bread_ino(inode, bidx, 1);
  660. if (IS_ERR(bh))
  661. goto err_bh;
  662. tmp = min(bsize, to - from);
  663. BUG_ON(tmp > bsize);
  664. memcpy(AFFS_DATA(bh), data + from, tmp);
  665. if (buffer_new(bh)) {
  666. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  667. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  668. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  669. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  670. AFFS_DATA_HEAD(bh)->next = 0;
  671. bh->b_state &= ~(1UL << BH_New);
  672. if (prev_bh) {
  673. u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  674. if (tmp_next)
  675. affs_warning(sb, "commit_write_ofs",
  676. "next block already set for %d (%d)",
  677. bidx, tmp_next);
  678. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  679. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
  680. mark_buffer_dirty_inode(prev_bh, inode);
  681. }
  682. } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
  683. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  684. affs_brelse(prev_bh);
  685. affs_fix_checksum(sb, bh);
  686. mark_buffer_dirty_inode(bh, inode);
  687. written += tmp;
  688. from += tmp;
  689. bidx++;
  690. }
  691. SetPageUptodate(page);
  692. done:
  693. affs_brelse(bh);
  694. tmp = (page->index << PAGE_SHIFT) + from;
  695. if (tmp > inode->i_size)
  696. inode->i_size = AFFS_I(inode)->mmu_private = tmp;
  697. err_first_bh:
  698. unlock_page(page);
  699. put_page(page);
  700. return written;
  701. err_bh:
  702. bh = prev_bh;
  703. if (!written)
  704. written = PTR_ERR(bh);
  705. goto done;
  706. }
  707. const struct address_space_operations affs_aops_ofs = {
  708. .readpage = affs_readpage_ofs,
  709. //.writepage = affs_writepage_ofs,
  710. .write_begin = affs_write_begin_ofs,
  711. .write_end = affs_write_end_ofs
  712. };
  713. /* Free any preallocated blocks. */
  714. void
  715. affs_free_prealloc(struct inode *inode)
  716. {
  717. struct super_block *sb = inode->i_sb;
  718. pr_debug("free_prealloc(ino=%lu)\n", inode->i_ino);
  719. while (AFFS_I(inode)->i_pa_cnt) {
  720. AFFS_I(inode)->i_pa_cnt--;
  721. affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
  722. }
  723. }
  724. /* Truncate (or enlarge) a file to the requested size. */
  725. void
  726. affs_truncate(struct inode *inode)
  727. {
  728. struct super_block *sb = inode->i_sb;
  729. u32 ext, ext_key;
  730. u32 last_blk, blkcnt, blk;
  731. u32 size;
  732. struct buffer_head *ext_bh;
  733. int i;
  734. pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n",
  735. inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size);
  736. last_blk = 0;
  737. ext = 0;
  738. if (inode->i_size) {
  739. last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
  740. ext = last_blk / AFFS_SB(sb)->s_hashsize;
  741. }
  742. if (inode->i_size > AFFS_I(inode)->mmu_private) {
  743. struct address_space *mapping = inode->i_mapping;
  744. struct page *page;
  745. void *fsdata;
  746. loff_t isize = inode->i_size;
  747. int res;
  748. res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, 0, &page, &fsdata);
  749. if (!res)
  750. res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata);
  751. else
  752. inode->i_size = AFFS_I(inode)->mmu_private;
  753. mark_inode_dirty(inode);
  754. return;
  755. } else if (inode->i_size == AFFS_I(inode)->mmu_private)
  756. return;
  757. // lock cache
  758. ext_bh = affs_get_extblock(inode, ext);
  759. if (IS_ERR(ext_bh)) {
  760. affs_warning(sb, "truncate",
  761. "unexpected read error for ext block %u (%ld)",
  762. ext, PTR_ERR(ext_bh));
  763. return;
  764. }
  765. if (AFFS_I(inode)->i_lc) {
  766. /* clear linear cache */
  767. i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
  768. if (AFFS_I(inode)->i_lc_size > i) {
  769. AFFS_I(inode)->i_lc_size = i;
  770. for (; i < AFFS_LC_SIZE; i++)
  771. AFFS_I(inode)->i_lc[i] = 0;
  772. }
  773. /* clear associative cache */
  774. for (i = 0; i < AFFS_AC_SIZE; i++)
  775. if (AFFS_I(inode)->i_ac[i].ext >= ext)
  776. AFFS_I(inode)->i_ac[i].ext = 0;
  777. }
  778. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  779. blkcnt = AFFS_I(inode)->i_blkcnt;
  780. i = 0;
  781. blk = last_blk;
  782. if (inode->i_size) {
  783. i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
  784. blk++;
  785. } else
  786. AFFS_HEAD(ext_bh)->first_data = 0;
  787. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i);
  788. size = AFFS_SB(sb)->s_hashsize;
  789. if (size > blkcnt - blk + i)
  790. size = blkcnt - blk + i;
  791. for (; i < size; i++, blk++) {
  792. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  793. AFFS_BLOCK(sb, ext_bh, i) = 0;
  794. }
  795. AFFS_TAIL(sb, ext_bh)->extension = 0;
  796. affs_fix_checksum(sb, ext_bh);
  797. mark_buffer_dirty_inode(ext_bh, inode);
  798. affs_brelse(ext_bh);
  799. if (inode->i_size) {
  800. AFFS_I(inode)->i_blkcnt = last_blk + 1;
  801. AFFS_I(inode)->i_extcnt = ext + 1;
  802. if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS)) {
  803. struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
  804. u32 tmp;
  805. if (IS_ERR(bh)) {
  806. affs_warning(sb, "truncate",
  807. "unexpected read error for last block %u (%ld)",
  808. ext, PTR_ERR(bh));
  809. return;
  810. }
  811. tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
  812. AFFS_DATA_HEAD(bh)->next = 0;
  813. affs_adjust_checksum(bh, -tmp);
  814. affs_brelse(bh);
  815. }
  816. } else {
  817. AFFS_I(inode)->i_blkcnt = 0;
  818. AFFS_I(inode)->i_extcnt = 1;
  819. }
  820. AFFS_I(inode)->mmu_private = inode->i_size;
  821. // unlock cache
  822. while (ext_key) {
  823. ext_bh = affs_bread(sb, ext_key);
  824. size = AFFS_SB(sb)->s_hashsize;
  825. if (size > blkcnt - blk)
  826. size = blkcnt - blk;
  827. for (i = 0; i < size; i++, blk++)
  828. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  829. affs_free_block(sb, ext_key);
  830. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  831. affs_brelse(ext_bh);
  832. }
  833. affs_free_prealloc(inode);
  834. }
  835. int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
  836. {
  837. struct inode *inode = filp->f_mapping->host;
  838. int ret, err;
  839. err = filemap_write_and_wait_range(inode->i_mapping, start, end);
  840. if (err)
  841. return err;
  842. inode_lock(inode);
  843. ret = write_inode_now(inode, 0);
  844. err = sync_blockdev(inode->i_sb->s_bdev);
  845. if (!ret)
  846. ret = err;
  847. inode_unlock(inode);
  848. return ret;
  849. }
  850. const struct file_operations affs_file_operations = {
  851. .llseek = generic_file_llseek,
  852. .read_iter = generic_file_read_iter,
  853. .write_iter = generic_file_write_iter,
  854. .mmap = generic_file_mmap,
  855. .open = affs_file_open,
  856. .release = affs_file_release,
  857. .fsync = affs_file_fsync,
  858. .splice_read = generic_file_splice_read,
  859. };
  860. const struct inode_operations affs_file_inode_operations = {
  861. .setattr = affs_notify_change,
  862. };