data.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/drivers/staging/erofs/data.c
  4. *
  5. * Copyright (C) 2017-2018 HUAWEI, Inc.
  6. * http://www.huawei.com/
  7. * Created by Gao Xiang <gaoxiang25@huawei.com>
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file COPYING in the main directory of the Linux
  11. * distribution for more details.
  12. */
  13. #include "internal.h"
  14. #include <linux/prefetch.h>
  15. #include <trace/events/erofs.h>
  16. static inline void read_endio(struct bio *bio)
  17. {
  18. int i;
  19. struct bio_vec *bvec;
  20. const blk_status_t err = bio->bi_status;
  21. bio_for_each_segment_all(bvec, bio, i) {
  22. struct page *page = bvec->bv_page;
  23. /* page is already locked */
  24. DBG_BUGON(PageUptodate(page));
  25. if (unlikely(err))
  26. SetPageError(page);
  27. else
  28. SetPageUptodate(page);
  29. unlock_page(page);
  30. /* page could be reclaimed now */
  31. }
  32. bio_put(bio);
  33. }
  34. /* prio -- true is used for dir */
  35. struct page *erofs_get_meta_page(struct super_block *sb,
  36. erofs_blk_t blkaddr, bool prio)
  37. {
  38. struct inode *bd_inode = sb->s_bdev->bd_inode;
  39. struct address_space *mapping = bd_inode->i_mapping;
  40. struct page *page;
  41. repeat:
  42. page = find_or_create_page(mapping, blkaddr,
  43. /*
  44. * Prefer looping in the allocator rather than here,
  45. * at least that code knows what it's doing.
  46. */
  47. mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL);
  48. BUG_ON(!page || !PageLocked(page));
  49. if (!PageUptodate(page)) {
  50. struct bio *bio;
  51. int err;
  52. bio = prepare_bio(sb, blkaddr, 1, read_endio);
  53. err = bio_add_page(bio, page, PAGE_SIZE, 0);
  54. BUG_ON(err != PAGE_SIZE);
  55. __submit_bio(bio, REQ_OP_READ,
  56. REQ_META | (prio ? REQ_PRIO : 0));
  57. lock_page(page);
  58. /* the page has been truncated by others? */
  59. if (unlikely(page->mapping != mapping)) {
  60. unlock_page(page);
  61. put_page(page);
  62. goto repeat;
  63. }
  64. /* more likely a read error */
  65. if (unlikely(!PageUptodate(page))) {
  66. unlock_page(page);
  67. put_page(page);
  68. page = ERR_PTR(-EIO);
  69. }
  70. }
  71. return page;
  72. }
  73. static int erofs_map_blocks_flatmode(struct inode *inode,
  74. struct erofs_map_blocks *map,
  75. int flags)
  76. {
  77. int err = 0;
  78. erofs_blk_t nblocks, lastblk;
  79. u64 offset = map->m_la;
  80. struct erofs_vnode *vi = EROFS_V(inode);
  81. trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
  82. nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
  83. lastblk = nblocks - is_inode_layout_inline(inode);
  84. if (unlikely(offset >= inode->i_size)) {
  85. /* leave out-of-bound access unmapped */
  86. map->m_flags = 0;
  87. map->m_plen = 0;
  88. goto out;
  89. }
  90. /* there is no hole in flatmode */
  91. map->m_flags = EROFS_MAP_MAPPED;
  92. if (offset < blknr_to_addr(lastblk)) {
  93. map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
  94. map->m_plen = blknr_to_addr(lastblk) - offset;
  95. } else if (is_inode_layout_inline(inode)) {
  96. /* 2 - inode inline B: inode, [xattrs], inline last blk... */
  97. struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
  98. map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
  99. vi->xattr_isize + erofs_blkoff(map->m_la);
  100. map->m_plen = inode->i_size - offset;
  101. /* inline data should locate in one meta block */
  102. if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
  103. DBG_BUGON(1);
  104. err = -EIO;
  105. goto err_out;
  106. }
  107. map->m_flags |= EROFS_MAP_META;
  108. } else {
  109. errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
  110. vi->nid, inode->i_size, map->m_la);
  111. DBG_BUGON(1);
  112. err = -EIO;
  113. goto err_out;
  114. }
  115. out:
  116. map->m_llen = map->m_plen;
  117. err_out:
  118. trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
  119. return err;
  120. }
  121. #ifdef CONFIG_EROFS_FS_ZIP
  122. extern int z_erofs_map_blocks_iter(struct inode *,
  123. struct erofs_map_blocks *, struct page **, int);
  124. #endif
  125. int erofs_map_blocks_iter(struct inode *inode,
  126. struct erofs_map_blocks *map,
  127. struct page **mpage_ret, int flags)
  128. {
  129. /* by default, reading raw data never use erofs_map_blocks_iter */
  130. if (unlikely(!is_inode_layout_compression(inode))) {
  131. if (*mpage_ret != NULL)
  132. put_page(*mpage_ret);
  133. *mpage_ret = NULL;
  134. return erofs_map_blocks(inode, map, flags);
  135. }
  136. #ifdef CONFIG_EROFS_FS_ZIP
  137. return z_erofs_map_blocks_iter(inode, map, mpage_ret, flags);
  138. #else
  139. /* data compression is not available */
  140. return -ENOTSUPP;
  141. #endif
  142. }
  143. int erofs_map_blocks(struct inode *inode,
  144. struct erofs_map_blocks *map, int flags)
  145. {
  146. if (unlikely(is_inode_layout_compression(inode))) {
  147. struct page *mpage = NULL;
  148. int err;
  149. err = erofs_map_blocks_iter(inode, map, &mpage, flags);
  150. if (mpage != NULL)
  151. put_page(mpage);
  152. return err;
  153. }
  154. return erofs_map_blocks_flatmode(inode, map, flags);
  155. }
  156. static inline struct bio *erofs_read_raw_page(
  157. struct bio *bio,
  158. struct address_space *mapping,
  159. struct page *page,
  160. erofs_off_t *last_block,
  161. unsigned nblocks,
  162. bool ra)
  163. {
  164. struct inode *inode = mapping->host;
  165. erofs_off_t current_block = (erofs_off_t)page->index;
  166. int err;
  167. DBG_BUGON(!nblocks);
  168. if (PageUptodate(page)) {
  169. err = 0;
  170. goto has_updated;
  171. }
  172. if (cleancache_get_page(page) == 0) {
  173. err = 0;
  174. SetPageUptodate(page);
  175. goto has_updated;
  176. }
  177. /* note that for readpage case, bio also equals to NULL */
  178. if (bio != NULL &&
  179. /* not continuous */
  180. *last_block + 1 != current_block) {
  181. submit_bio_retry:
  182. __submit_bio(bio, REQ_OP_READ, 0);
  183. bio = NULL;
  184. }
  185. if (bio == NULL) {
  186. struct erofs_map_blocks map = {
  187. .m_la = blknr_to_addr(current_block),
  188. };
  189. erofs_blk_t blknr;
  190. unsigned blkoff;
  191. err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
  192. if (unlikely(err))
  193. goto err_out;
  194. /* zero out the holed page */
  195. if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) {
  196. zero_user_segment(page, 0, PAGE_SIZE);
  197. SetPageUptodate(page);
  198. /* imply err = 0, see erofs_map_blocks */
  199. goto has_updated;
  200. }
  201. /* for RAW access mode, m_plen must be equal to m_llen */
  202. DBG_BUGON(map.m_plen != map.m_llen);
  203. blknr = erofs_blknr(map.m_pa);
  204. blkoff = erofs_blkoff(map.m_pa);
  205. /* deal with inline page */
  206. if (map.m_flags & EROFS_MAP_META) {
  207. void *vsrc, *vto;
  208. struct page *ipage;
  209. DBG_BUGON(map.m_plen > PAGE_SIZE);
  210. ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
  211. if (IS_ERR(ipage)) {
  212. err = PTR_ERR(ipage);
  213. goto err_out;
  214. }
  215. vsrc = kmap_atomic(ipage);
  216. vto = kmap_atomic(page);
  217. memcpy(vto, vsrc + blkoff, map.m_plen);
  218. memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
  219. kunmap_atomic(vto);
  220. kunmap_atomic(vsrc);
  221. flush_dcache_page(page);
  222. SetPageUptodate(page);
  223. /* TODO: could we unlock the page earlier? */
  224. unlock_page(ipage);
  225. put_page(ipage);
  226. /* imply err = 0, see erofs_map_blocks */
  227. goto has_updated;
  228. }
  229. /* pa must be block-aligned for raw reading */
  230. DBG_BUGON(erofs_blkoff(map.m_pa));
  231. /* max # of continuous pages */
  232. if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
  233. nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
  234. if (nblocks > BIO_MAX_PAGES)
  235. nblocks = BIO_MAX_PAGES;
  236. bio = prepare_bio(inode->i_sb, blknr, nblocks, read_endio);
  237. }
  238. err = bio_add_page(bio, page, PAGE_SIZE, 0);
  239. /* out of the extent or bio is full */
  240. if (err < PAGE_SIZE)
  241. goto submit_bio_retry;
  242. *last_block = current_block;
  243. /* shift in advance in case of it followed by too many gaps */
  244. if (unlikely(bio->bi_vcnt >= bio->bi_max_vecs)) {
  245. /* err should reassign to 0 after submitting */
  246. err = 0;
  247. goto submit_bio_out;
  248. }
  249. return bio;
  250. err_out:
  251. /* for sync reading, set page error immediately */
  252. if (!ra) {
  253. SetPageError(page);
  254. ClearPageUptodate(page);
  255. }
  256. has_updated:
  257. unlock_page(page);
  258. /* if updated manually, continuous pages has a gap */
  259. if (bio != NULL)
  260. submit_bio_out:
  261. __submit_bio(bio, REQ_OP_READ, 0);
  262. return unlikely(err) ? ERR_PTR(err) : NULL;
  263. }
  264. /*
  265. * since we dont have write or truncate flows, so no inode
  266. * locking needs to be held at the moment.
  267. */
  268. static int erofs_raw_access_readpage(struct file *file, struct page *page)
  269. {
  270. erofs_off_t last_block;
  271. struct bio *bio;
  272. trace_erofs_readpage(page, true);
  273. bio = erofs_read_raw_page(NULL, page->mapping,
  274. page, &last_block, 1, false);
  275. if (IS_ERR(bio))
  276. return PTR_ERR(bio);
  277. DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
  278. return 0;
  279. }
  280. static int erofs_raw_access_readpages(struct file *filp,
  281. struct address_space *mapping,
  282. struct list_head *pages, unsigned nr_pages)
  283. {
  284. erofs_off_t last_block;
  285. struct bio *bio = NULL;
  286. gfp_t gfp = readahead_gfp_mask(mapping);
  287. struct page *page = list_last_entry(pages, struct page, lru);
  288. trace_erofs_readpages(mapping->host, page, nr_pages, true);
  289. for (; nr_pages; --nr_pages) {
  290. page = list_entry(pages->prev, struct page, lru);
  291. prefetchw(&page->flags);
  292. list_del(&page->lru);
  293. if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
  294. bio = erofs_read_raw_page(bio, mapping, page,
  295. &last_block, nr_pages, true);
  296. /* all the page errors are ignored when readahead */
  297. if (IS_ERR(bio)) {
  298. pr_err("%s, readahead error at page %lu of nid %llu\n",
  299. __func__, page->index,
  300. EROFS_V(mapping->host)->nid);
  301. bio = NULL;
  302. }
  303. }
  304. /* pages could still be locked */
  305. put_page(page);
  306. }
  307. DBG_BUGON(!list_empty(pages));
  308. /* the rare case (end in gaps) */
  309. if (unlikely(bio != NULL))
  310. __submit_bio(bio, REQ_OP_READ, 0);
  311. return 0;
  312. }
  313. /* for uncompressed (aligned) files and raw access for other files */
  314. const struct address_space_operations erofs_raw_access_aops = {
  315. .readpage = erofs_raw_access_readpage,
  316. .readpages = erofs_raw_access_readpages,
  317. };