inode.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. /*
  2. * Compressed rom filesystem for Linux.
  3. *
  4. * Copyright (C) 1999 Linus Torvalds.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. /*
  9. * These are the VFS interfaces to the compressed rom filesystem.
  10. * The actual compression is based on zlib, see the other files.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/module.h>
  14. #include <linux/fs.h>
  15. #include <linux/file.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/pfn_t.h>
  18. #include <linux/ramfs.h>
  19. #include <linux/init.h>
  20. #include <linux/string.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/mtd/mtd.h>
  23. #include <linux/mtd/super.h>
  24. #include <linux/slab.h>
  25. #include <linux/vfs.h>
  26. #include <linux/mutex.h>
  27. #include <uapi/linux/cramfs_fs.h>
  28. #include <linux/uaccess.h>
  29. #include "internal.h"
  30. /*
  31. * cramfs super-block data in memory
  32. */
  33. struct cramfs_sb_info {
  34. unsigned long magic;
  35. unsigned long size;
  36. unsigned long blocks;
  37. unsigned long files;
  38. unsigned long flags;
  39. void *linear_virt_addr;
  40. resource_size_t linear_phys_addr;
  41. size_t mtd_point_size;
  42. };
  43. static inline struct cramfs_sb_info *CRAMFS_SB(struct super_block *sb)
  44. {
  45. return sb->s_fs_info;
  46. }
  47. static const struct super_operations cramfs_ops;
  48. static const struct inode_operations cramfs_dir_inode_operations;
  49. static const struct file_operations cramfs_directory_operations;
  50. static const struct file_operations cramfs_physmem_fops;
  51. static const struct address_space_operations cramfs_aops;
  52. static DEFINE_MUTEX(read_mutex);
  53. /* These macros may change in future, to provide better st_ino semantics. */
  54. #define OFFSET(x) ((x)->i_ino)
  55. static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset)
  56. {
  57. if (!cino->offset)
  58. return offset + 1;
  59. if (!cino->size)
  60. return offset + 1;
  61. /*
  62. * The file mode test fixes buggy mkcramfs implementations where
  63. * cramfs_inode->offset is set to a non zero value for entries
  64. * which did not contain data, like devices node and fifos.
  65. */
  66. switch (cino->mode & S_IFMT) {
  67. case S_IFREG:
  68. case S_IFDIR:
  69. case S_IFLNK:
  70. return cino->offset << 2;
  71. default:
  72. break;
  73. }
  74. return offset + 1;
  75. }
  76. static struct inode *get_cramfs_inode(struct super_block *sb,
  77. const struct cramfs_inode *cramfs_inode, unsigned int offset)
  78. {
  79. struct inode *inode;
  80. static struct timespec64 zerotime;
  81. inode = iget_locked(sb, cramino(cramfs_inode, offset));
  82. if (!inode)
  83. return ERR_PTR(-ENOMEM);
  84. if (!(inode->i_state & I_NEW))
  85. return inode;
  86. switch (cramfs_inode->mode & S_IFMT) {
  87. case S_IFREG:
  88. inode->i_fop = &generic_ro_fops;
  89. inode->i_data.a_ops = &cramfs_aops;
  90. if (IS_ENABLED(CONFIG_CRAMFS_MTD) &&
  91. CRAMFS_SB(sb)->flags & CRAMFS_FLAG_EXT_BLOCK_POINTERS &&
  92. CRAMFS_SB(sb)->linear_phys_addr)
  93. inode->i_fop = &cramfs_physmem_fops;
  94. break;
  95. case S_IFDIR:
  96. inode->i_op = &cramfs_dir_inode_operations;
  97. inode->i_fop = &cramfs_directory_operations;
  98. break;
  99. case S_IFLNK:
  100. inode->i_op = &page_symlink_inode_operations;
  101. inode_nohighmem(inode);
  102. inode->i_data.a_ops = &cramfs_aops;
  103. break;
  104. default:
  105. init_special_inode(inode, cramfs_inode->mode,
  106. old_decode_dev(cramfs_inode->size));
  107. }
  108. inode->i_mode = cramfs_inode->mode;
  109. i_uid_write(inode, cramfs_inode->uid);
  110. i_gid_write(inode, cramfs_inode->gid);
  111. /* if the lower 2 bits are zero, the inode contains data */
  112. if (!(inode->i_ino & 3)) {
  113. inode->i_size = cramfs_inode->size;
  114. inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
  115. }
  116. /* Struct copy intentional */
  117. inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
  118. /* inode->i_nlink is left 1 - arguably wrong for directories,
  119. but it's the best we can do without reading the directory
  120. contents. 1 yields the right result in GNU find, even
  121. without -noleaf option. */
  122. unlock_new_inode(inode);
  123. return inode;
  124. }
  125. /*
  126. * We have our own block cache: don't fill up the buffer cache
  127. * with the rom-image, because the way the filesystem is set
  128. * up the accesses should be fairly regular and cached in the
  129. * page cache and dentry tree anyway..
  130. *
  131. * This also acts as a way to guarantee contiguous areas of up to
  132. * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
  133. * worry about end-of-buffer issues even when decompressing a full
  134. * page cache.
  135. *
  136. * Note: This is all optimized away at compile time when
  137. * CONFIG_CRAMFS_BLOCKDEV=n.
  138. */
  139. #define READ_BUFFERS (2)
  140. /* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */
  141. #define NEXT_BUFFER(_ix) ((_ix) ^ 1)
  142. /*
  143. * BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed"
  144. * data that takes up more space than the original and with unlucky
  145. * alignment.
  146. */
  147. #define BLKS_PER_BUF_SHIFT (2)
  148. #define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT)
  149. #define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE)
  150. static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
  151. static unsigned buffer_blocknr[READ_BUFFERS];
  152. static struct super_block *buffer_dev[READ_BUFFERS];
  153. static int next_buffer;
  154. /*
  155. * Populate our block cache and return a pointer to it.
  156. */
  157. static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
  158. unsigned int len)
  159. {
  160. struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
  161. struct page *pages[BLKS_PER_BUF];
  162. unsigned i, blocknr, buffer;
  163. unsigned long devsize;
  164. char *data;
  165. if (!len)
  166. return NULL;
  167. blocknr = offset >> PAGE_SHIFT;
  168. offset &= PAGE_SIZE - 1;
  169. /* Check if an existing buffer already has the data.. */
  170. for (i = 0; i < READ_BUFFERS; i++) {
  171. unsigned int blk_offset;
  172. if (buffer_dev[i] != sb)
  173. continue;
  174. if (blocknr < buffer_blocknr[i])
  175. continue;
  176. blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
  177. blk_offset += offset;
  178. if (blk_offset > BUFFER_SIZE ||
  179. blk_offset + len > BUFFER_SIZE)
  180. continue;
  181. return read_buffers[i] + blk_offset;
  182. }
  183. devsize = mapping->host->i_size >> PAGE_SHIFT;
  184. /* Ok, read in BLKS_PER_BUF pages completely first. */
  185. for (i = 0; i < BLKS_PER_BUF; i++) {
  186. struct page *page = NULL;
  187. if (blocknr + i < devsize) {
  188. page = read_mapping_page(mapping, blocknr + i, NULL);
  189. /* synchronous error? */
  190. if (IS_ERR(page))
  191. page = NULL;
  192. }
  193. pages[i] = page;
  194. }
  195. for (i = 0; i < BLKS_PER_BUF; i++) {
  196. struct page *page = pages[i];
  197. if (page) {
  198. wait_on_page_locked(page);
  199. if (!PageUptodate(page)) {
  200. /* asynchronous error */
  201. put_page(page);
  202. pages[i] = NULL;
  203. }
  204. }
  205. }
  206. buffer = next_buffer;
  207. next_buffer = NEXT_BUFFER(buffer);
  208. buffer_blocknr[buffer] = blocknr;
  209. buffer_dev[buffer] = sb;
  210. data = read_buffers[buffer];
  211. for (i = 0; i < BLKS_PER_BUF; i++) {
  212. struct page *page = pages[i];
  213. if (page) {
  214. memcpy(data, kmap(page), PAGE_SIZE);
  215. kunmap(page);
  216. put_page(page);
  217. } else
  218. memset(data, 0, PAGE_SIZE);
  219. data += PAGE_SIZE;
  220. }
  221. return read_buffers[buffer] + offset;
  222. }
  223. /*
  224. * Return a pointer to the linearly addressed cramfs image in memory.
  225. */
  226. static void *cramfs_direct_read(struct super_block *sb, unsigned int offset,
  227. unsigned int len)
  228. {
  229. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  230. if (!len)
  231. return NULL;
  232. if (len > sbi->size || offset > sbi->size - len)
  233. return page_address(ZERO_PAGE(0));
  234. return sbi->linear_virt_addr + offset;
  235. }
  236. /*
  237. * Returns a pointer to a buffer containing at least LEN bytes of
  238. * filesystem starting at byte offset OFFSET into the filesystem.
  239. */
  240. static void *cramfs_read(struct super_block *sb, unsigned int offset,
  241. unsigned int len)
  242. {
  243. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  244. if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sbi->linear_virt_addr)
  245. return cramfs_direct_read(sb, offset, len);
  246. else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV))
  247. return cramfs_blkdev_read(sb, offset, len);
  248. else
  249. return NULL;
  250. }
  251. /*
  252. * For a mapping to be possible, we need a range of uncompressed and
  253. * contiguous blocks. Return the offset for the first block and number of
  254. * valid blocks for which that is true, or zero otherwise.
  255. */
  256. static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 *pages)
  257. {
  258. struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
  259. int i;
  260. u32 *blockptrs, first_block_addr;
  261. /*
  262. * We can dereference memory directly here as this code may be
  263. * reached only when there is a direct filesystem image mapping
  264. * available in memory.
  265. */
  266. blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode) + pgoff * 4);
  267. first_block_addr = blockptrs[0] & ~CRAMFS_BLK_FLAGS;
  268. i = 0;
  269. do {
  270. u32 block_off = i * (PAGE_SIZE >> CRAMFS_BLK_DIRECT_PTR_SHIFT);
  271. u32 expect = (first_block_addr + block_off) |
  272. CRAMFS_BLK_FLAG_DIRECT_PTR |
  273. CRAMFS_BLK_FLAG_UNCOMPRESSED;
  274. if (blockptrs[i] != expect) {
  275. pr_debug("range: block %d/%d got %#x expects %#x\n",
  276. pgoff+i, pgoff + *pages - 1,
  277. blockptrs[i], expect);
  278. if (i == 0)
  279. return 0;
  280. break;
  281. }
  282. } while (++i < *pages);
  283. *pages = i;
  284. return first_block_addr << CRAMFS_BLK_DIRECT_PTR_SHIFT;
  285. }
  286. #ifdef CONFIG_MMU
  287. /*
  288. * Return true if the last page of a file in the filesystem image contains
  289. * some other data that doesn't belong to that file. It is assumed that the
  290. * last block is CRAMFS_BLK_FLAG_DIRECT_PTR | CRAMFS_BLK_FLAG_UNCOMPRESSED
  291. * (verified by cramfs_get_block_range() and directly accessible in memory.
  292. */
  293. static bool cramfs_last_page_is_shared(struct inode *inode)
  294. {
  295. struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
  296. u32 partial, last_page, blockaddr, *blockptrs;
  297. char *tail_data;
  298. partial = offset_in_page(inode->i_size);
  299. if (!partial)
  300. return false;
  301. last_page = inode->i_size >> PAGE_SHIFT;
  302. blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode));
  303. blockaddr = blockptrs[last_page] & ~CRAMFS_BLK_FLAGS;
  304. blockaddr <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
  305. tail_data = sbi->linear_virt_addr + blockaddr + partial;
  306. return memchr_inv(tail_data, 0, PAGE_SIZE - partial) ? true : false;
  307. }
  308. static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
  309. {
  310. struct inode *inode = file_inode(file);
  311. struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
  312. unsigned int pages, max_pages, offset;
  313. unsigned long address, pgoff = vma->vm_pgoff;
  314. char *bailout_reason;
  315. int ret;
  316. ret = generic_file_readonly_mmap(file, vma);
  317. if (ret)
  318. return ret;
  319. /*
  320. * Now try to pre-populate ptes for this vma with a direct
  321. * mapping avoiding memory allocation when possible.
  322. */
  323. /* Could COW work here? */
  324. bailout_reason = "vma is writable";
  325. if (vma->vm_flags & VM_WRITE)
  326. goto bailout;
  327. max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  328. bailout_reason = "beyond file limit";
  329. if (pgoff >= max_pages)
  330. goto bailout;
  331. pages = min(vma_pages(vma), max_pages - pgoff);
  332. offset = cramfs_get_block_range(inode, pgoff, &pages);
  333. bailout_reason = "unsuitable block layout";
  334. if (!offset)
  335. goto bailout;
  336. address = sbi->linear_phys_addr + offset;
  337. bailout_reason = "data is not page aligned";
  338. if (!PAGE_ALIGNED(address))
  339. goto bailout;
  340. /* Don't map the last page if it contains some other data */
  341. if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) {
  342. pr_debug("mmap: %s: last page is shared\n",
  343. file_dentry(file)->d_name.name);
  344. pages--;
  345. }
  346. if (!pages) {
  347. bailout_reason = "no suitable block remaining";
  348. goto bailout;
  349. }
  350. if (pages == vma_pages(vma)) {
  351. /*
  352. * The entire vma is mappable. remap_pfn_range() will
  353. * make it distinguishable from a non-direct mapping
  354. * in /proc/<pid>/maps by substituting the file offset
  355. * with the actual physical address.
  356. */
  357. ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
  358. pages * PAGE_SIZE, vma->vm_page_prot);
  359. } else {
  360. /*
  361. * Let's create a mixed map if we can't map it all.
  362. * The normal paging machinery will take care of the
  363. * unpopulated ptes via cramfs_readpage().
  364. */
  365. int i;
  366. vma->vm_flags |= VM_MIXEDMAP;
  367. for (i = 0; i < pages && !ret; i++) {
  368. unsigned long off = i * PAGE_SIZE;
  369. pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV);
  370. ret = vm_insert_mixed(vma, vma->vm_start + off, pfn);
  371. }
  372. }
  373. if (!ret)
  374. pr_debug("mapped %s[%lu] at 0x%08lx (%u/%lu pages) "
  375. "to vma 0x%08lx, page_prot 0x%llx\n",
  376. file_dentry(file)->d_name.name, pgoff,
  377. address, pages, vma_pages(vma), vma->vm_start,
  378. (unsigned long long)pgprot_val(vma->vm_page_prot));
  379. return ret;
  380. bailout:
  381. pr_debug("%s[%lu]: direct mmap impossible: %s\n",
  382. file_dentry(file)->d_name.name, pgoff, bailout_reason);
  383. /* Didn't manage any direct map, but normal paging is still possible */
  384. return 0;
  385. }
  386. #else /* CONFIG_MMU */
  387. static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
  388. {
  389. return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS;
  390. }
  391. static unsigned long cramfs_physmem_get_unmapped_area(struct file *file,
  392. unsigned long addr, unsigned long len,
  393. unsigned long pgoff, unsigned long flags)
  394. {
  395. struct inode *inode = file_inode(file);
  396. struct super_block *sb = inode->i_sb;
  397. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  398. unsigned int pages, block_pages, max_pages, offset;
  399. pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  400. max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  401. if (pgoff >= max_pages || pages > max_pages - pgoff)
  402. return -EINVAL;
  403. block_pages = pages;
  404. offset = cramfs_get_block_range(inode, pgoff, &block_pages);
  405. if (!offset || block_pages != pages)
  406. return -ENOSYS;
  407. addr = sbi->linear_phys_addr + offset;
  408. pr_debug("get_unmapped for %s ofs %#lx siz %lu at 0x%08lx\n",
  409. file_dentry(file)->d_name.name, pgoff*PAGE_SIZE, len, addr);
  410. return addr;
  411. }
  412. static unsigned int cramfs_physmem_mmap_capabilities(struct file *file)
  413. {
  414. return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT |
  415. NOMMU_MAP_READ | NOMMU_MAP_EXEC;
  416. }
  417. #endif /* CONFIG_MMU */
  418. static const struct file_operations cramfs_physmem_fops = {
  419. .llseek = generic_file_llseek,
  420. .read_iter = generic_file_read_iter,
  421. .splice_read = generic_file_splice_read,
  422. .mmap = cramfs_physmem_mmap,
  423. #ifndef CONFIG_MMU
  424. .get_unmapped_area = cramfs_physmem_get_unmapped_area,
  425. .mmap_capabilities = cramfs_physmem_mmap_capabilities,
  426. #endif
  427. };
  428. static void cramfs_kill_sb(struct super_block *sb)
  429. {
  430. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  431. if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sb->s_mtd) {
  432. if (sbi && sbi->mtd_point_size)
  433. mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size);
  434. kill_mtd_super(sb);
  435. } else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
  436. kill_block_super(sb);
  437. }
  438. kfree(sbi);
  439. }
  440. static int cramfs_remount(struct super_block *sb, int *flags, char *data)
  441. {
  442. sync_filesystem(sb);
  443. *flags |= SB_RDONLY;
  444. return 0;
  445. }
  446. static int cramfs_read_super(struct super_block *sb,
  447. struct cramfs_super *super, int silent)
  448. {
  449. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  450. unsigned long root_offset;
  451. /* We don't know the real size yet */
  452. sbi->size = PAGE_SIZE;
  453. /* Read the first block and get the superblock from it */
  454. mutex_lock(&read_mutex);
  455. memcpy(super, cramfs_read(sb, 0, sizeof(*super)), sizeof(*super));
  456. mutex_unlock(&read_mutex);
  457. /* Do sanity checks on the superblock */
  458. if (super->magic != CRAMFS_MAGIC) {
  459. /* check for wrong endianness */
  460. if (super->magic == CRAMFS_MAGIC_WEND) {
  461. if (!silent)
  462. pr_err("wrong endianness\n");
  463. return -EINVAL;
  464. }
  465. /* check at 512 byte offset */
  466. mutex_lock(&read_mutex);
  467. memcpy(super,
  468. cramfs_read(sb, 512, sizeof(*super)),
  469. sizeof(*super));
  470. mutex_unlock(&read_mutex);
  471. if (super->magic != CRAMFS_MAGIC) {
  472. if (super->magic == CRAMFS_MAGIC_WEND && !silent)
  473. pr_err("wrong endianness\n");
  474. else if (!silent)
  475. pr_err("wrong magic\n");
  476. return -EINVAL;
  477. }
  478. }
  479. /* get feature flags first */
  480. if (super->flags & ~CRAMFS_SUPPORTED_FLAGS) {
  481. pr_err("unsupported filesystem features\n");
  482. return -EINVAL;
  483. }
  484. /* Check that the root inode is in a sane state */
  485. if (!S_ISDIR(super->root.mode)) {
  486. pr_err("root is not a directory\n");
  487. return -EINVAL;
  488. }
  489. /* correct strange, hard-coded permissions of mkcramfs */
  490. super->root.mode |= 0555;
  491. root_offset = super->root.offset << 2;
  492. if (super->flags & CRAMFS_FLAG_FSID_VERSION_2) {
  493. sbi->size = super->size;
  494. sbi->blocks = super->fsid.blocks;
  495. sbi->files = super->fsid.files;
  496. } else {
  497. sbi->size = 1<<28;
  498. sbi->blocks = 0;
  499. sbi->files = 0;
  500. }
  501. sbi->magic = super->magic;
  502. sbi->flags = super->flags;
  503. if (root_offset == 0)
  504. pr_info("empty filesystem");
  505. else if (!(super->flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
  506. ((root_offset != sizeof(struct cramfs_super)) &&
  507. (root_offset != 512 + sizeof(struct cramfs_super))))
  508. {
  509. pr_err("bad root offset %lu\n", root_offset);
  510. return -EINVAL;
  511. }
  512. return 0;
  513. }
  514. static int cramfs_finalize_super(struct super_block *sb,
  515. struct cramfs_inode *cramfs_root)
  516. {
  517. struct inode *root;
  518. /* Set it all up.. */
  519. sb->s_flags |= SB_RDONLY;
  520. sb->s_op = &cramfs_ops;
  521. root = get_cramfs_inode(sb, cramfs_root, 0);
  522. if (IS_ERR(root))
  523. return PTR_ERR(root);
  524. sb->s_root = d_make_root(root);
  525. if (!sb->s_root)
  526. return -ENOMEM;
  527. return 0;
  528. }
  529. static int cramfs_blkdev_fill_super(struct super_block *sb, void *data,
  530. int silent)
  531. {
  532. struct cramfs_sb_info *sbi;
  533. struct cramfs_super super;
  534. int i, err;
  535. sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
  536. if (!sbi)
  537. return -ENOMEM;
  538. sb->s_fs_info = sbi;
  539. /* Invalidate the read buffers on mount: think disk change.. */
  540. for (i = 0; i < READ_BUFFERS; i++)
  541. buffer_blocknr[i] = -1;
  542. err = cramfs_read_super(sb, &super, silent);
  543. if (err)
  544. return err;
  545. return cramfs_finalize_super(sb, &super.root);
  546. }
  547. static int cramfs_mtd_fill_super(struct super_block *sb, void *data,
  548. int silent)
  549. {
  550. struct cramfs_sb_info *sbi;
  551. struct cramfs_super super;
  552. int err;
  553. sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
  554. if (!sbi)
  555. return -ENOMEM;
  556. sb->s_fs_info = sbi;
  557. /* Map only one page for now. Will remap it when fs size is known. */
  558. err = mtd_point(sb->s_mtd, 0, PAGE_SIZE, &sbi->mtd_point_size,
  559. &sbi->linear_virt_addr, &sbi->linear_phys_addr);
  560. if (err || sbi->mtd_point_size != PAGE_SIZE) {
  561. pr_err("unable to get direct memory access to mtd:%s\n",
  562. sb->s_mtd->name);
  563. return err ? : -ENODATA;
  564. }
  565. pr_info("checking physical address %pap for linear cramfs image\n",
  566. &sbi->linear_phys_addr);
  567. err = cramfs_read_super(sb, &super, silent);
  568. if (err)
  569. return err;
  570. /* Remap the whole filesystem now */
  571. pr_info("linear cramfs image on mtd:%s appears to be %lu KB in size\n",
  572. sb->s_mtd->name, sbi->size/1024);
  573. mtd_unpoint(sb->s_mtd, 0, PAGE_SIZE);
  574. err = mtd_point(sb->s_mtd, 0, sbi->size, &sbi->mtd_point_size,
  575. &sbi->linear_virt_addr, &sbi->linear_phys_addr);
  576. if (err || sbi->mtd_point_size != sbi->size) {
  577. pr_err("unable to get direct memory access to mtd:%s\n",
  578. sb->s_mtd->name);
  579. return err ? : -ENODATA;
  580. }
  581. return cramfs_finalize_super(sb, &super.root);
  582. }
  583. static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  584. {
  585. struct super_block *sb = dentry->d_sb;
  586. u64 id = 0;
  587. if (sb->s_bdev)
  588. id = huge_encode_dev(sb->s_bdev->bd_dev);
  589. else if (sb->s_dev)
  590. id = huge_encode_dev(sb->s_dev);
  591. buf->f_type = CRAMFS_MAGIC;
  592. buf->f_bsize = PAGE_SIZE;
  593. buf->f_blocks = CRAMFS_SB(sb)->blocks;
  594. buf->f_bfree = 0;
  595. buf->f_bavail = 0;
  596. buf->f_files = CRAMFS_SB(sb)->files;
  597. buf->f_ffree = 0;
  598. buf->f_fsid.val[0] = (u32)id;
  599. buf->f_fsid.val[1] = (u32)(id >> 32);
  600. buf->f_namelen = CRAMFS_MAXPATHLEN;
  601. return 0;
  602. }
  603. /*
  604. * Read a cramfs directory entry.
  605. */
  606. static int cramfs_readdir(struct file *file, struct dir_context *ctx)
  607. {
  608. struct inode *inode = file_inode(file);
  609. struct super_block *sb = inode->i_sb;
  610. char *buf;
  611. unsigned int offset;
  612. /* Offset within the thing. */
  613. if (ctx->pos >= inode->i_size)
  614. return 0;
  615. offset = ctx->pos;
  616. /* Directory entries are always 4-byte aligned */
  617. if (offset & 3)
  618. return -EINVAL;
  619. buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL);
  620. if (!buf)
  621. return -ENOMEM;
  622. while (offset < inode->i_size) {
  623. struct cramfs_inode *de;
  624. unsigned long nextoffset;
  625. char *name;
  626. ino_t ino;
  627. umode_t mode;
  628. int namelen;
  629. mutex_lock(&read_mutex);
  630. de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
  631. name = (char *)(de+1);
  632. /*
  633. * Namelengths on disk are shifted by two
  634. * and the name padded out to 4-byte boundaries
  635. * with zeroes.
  636. */
  637. namelen = de->namelen << 2;
  638. memcpy(buf, name, namelen);
  639. ino = cramino(de, OFFSET(inode) + offset);
  640. mode = de->mode;
  641. mutex_unlock(&read_mutex);
  642. nextoffset = offset + sizeof(*de) + namelen;
  643. for (;;) {
  644. if (!namelen) {
  645. kfree(buf);
  646. return -EIO;
  647. }
  648. if (buf[namelen-1])
  649. break;
  650. namelen--;
  651. }
  652. if (!dir_emit(ctx, buf, namelen, ino, mode >> 12))
  653. break;
  654. ctx->pos = offset = nextoffset;
  655. }
  656. kfree(buf);
  657. return 0;
  658. }
  659. /*
  660. * Lookup and fill in the inode data..
  661. */
  662. static struct dentry *cramfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
  663. {
  664. unsigned int offset = 0;
  665. struct inode *inode = NULL;
  666. int sorted;
  667. mutex_lock(&read_mutex);
  668. sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
  669. while (offset < dir->i_size) {
  670. struct cramfs_inode *de;
  671. char *name;
  672. int namelen, retval;
  673. int dir_off = OFFSET(dir) + offset;
  674. de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN);
  675. name = (char *)(de+1);
  676. /* Try to take advantage of sorted directories */
  677. if (sorted && (dentry->d_name.name[0] < name[0]))
  678. break;
  679. namelen = de->namelen << 2;
  680. offset += sizeof(*de) + namelen;
  681. /* Quick check that the name is roughly the right length */
  682. if (((dentry->d_name.len + 3) & ~3) != namelen)
  683. continue;
  684. for (;;) {
  685. if (!namelen) {
  686. inode = ERR_PTR(-EIO);
  687. goto out;
  688. }
  689. if (name[namelen-1])
  690. break;
  691. namelen--;
  692. }
  693. if (namelen != dentry->d_name.len)
  694. continue;
  695. retval = memcmp(dentry->d_name.name, name, namelen);
  696. if (retval > 0)
  697. continue;
  698. if (!retval) {
  699. inode = get_cramfs_inode(dir->i_sb, de, dir_off);
  700. break;
  701. }
  702. /* else (retval < 0) */
  703. if (sorted)
  704. break;
  705. }
  706. out:
  707. mutex_unlock(&read_mutex);
  708. return d_splice_alias(inode, dentry);
  709. }
  710. static int cramfs_readpage(struct file *file, struct page *page)
  711. {
  712. struct inode *inode = page->mapping->host;
  713. u32 maxblock;
  714. int bytes_filled;
  715. void *pgdata;
  716. maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  717. bytes_filled = 0;
  718. pgdata = kmap(page);
  719. if (page->index < maxblock) {
  720. struct super_block *sb = inode->i_sb;
  721. u32 blkptr_offset = OFFSET(inode) + page->index * 4;
  722. u32 block_ptr, block_start, block_len;
  723. bool uncompressed, direct;
  724. mutex_lock(&read_mutex);
  725. block_ptr = *(u32 *) cramfs_read(sb, blkptr_offset, 4);
  726. uncompressed = (block_ptr & CRAMFS_BLK_FLAG_UNCOMPRESSED);
  727. direct = (block_ptr & CRAMFS_BLK_FLAG_DIRECT_PTR);
  728. block_ptr &= ~CRAMFS_BLK_FLAGS;
  729. if (direct) {
  730. /*
  731. * The block pointer is an absolute start pointer,
  732. * shifted by 2 bits. The size is included in the
  733. * first 2 bytes of the data block when compressed,
  734. * or PAGE_SIZE otherwise.
  735. */
  736. block_start = block_ptr << CRAMFS_BLK_DIRECT_PTR_SHIFT;
  737. if (uncompressed) {
  738. block_len = PAGE_SIZE;
  739. /* if last block: cap to file length */
  740. if (page->index == maxblock - 1)
  741. block_len =
  742. offset_in_page(inode->i_size);
  743. } else {
  744. block_len = *(u16 *)
  745. cramfs_read(sb, block_start, 2);
  746. block_start += 2;
  747. }
  748. } else {
  749. /*
  750. * The block pointer indicates one past the end of
  751. * the current block (start of next block). If this
  752. * is the first block then it starts where the block
  753. * pointer table ends, otherwise its start comes
  754. * from the previous block's pointer.
  755. */
  756. block_start = OFFSET(inode) + maxblock * 4;
  757. if (page->index)
  758. block_start = *(u32 *)
  759. cramfs_read(sb, blkptr_offset - 4, 4);
  760. /* Beware... previous ptr might be a direct ptr */
  761. if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) {
  762. /* See comments on earlier code. */
  763. u32 prev_start = block_start;
  764. block_start = prev_start & ~CRAMFS_BLK_FLAGS;
  765. block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
  766. if (prev_start & CRAMFS_BLK_FLAG_UNCOMPRESSED) {
  767. block_start += PAGE_SIZE;
  768. } else {
  769. block_len = *(u16 *)
  770. cramfs_read(sb, block_start, 2);
  771. block_start += 2 + block_len;
  772. }
  773. }
  774. block_start &= ~CRAMFS_BLK_FLAGS;
  775. block_len = block_ptr - block_start;
  776. }
  777. if (block_len == 0)
  778. ; /* hole */
  779. else if (unlikely(block_len > 2*PAGE_SIZE ||
  780. (uncompressed && block_len > PAGE_SIZE))) {
  781. mutex_unlock(&read_mutex);
  782. pr_err("bad data blocksize %u\n", block_len);
  783. goto err;
  784. } else if (uncompressed) {
  785. memcpy(pgdata,
  786. cramfs_read(sb, block_start, block_len),
  787. block_len);
  788. bytes_filled = block_len;
  789. } else {
  790. bytes_filled = cramfs_uncompress_block(pgdata,
  791. PAGE_SIZE,
  792. cramfs_read(sb, block_start, block_len),
  793. block_len);
  794. }
  795. mutex_unlock(&read_mutex);
  796. if (unlikely(bytes_filled < 0))
  797. goto err;
  798. }
  799. memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
  800. flush_dcache_page(page);
  801. kunmap(page);
  802. SetPageUptodate(page);
  803. unlock_page(page);
  804. return 0;
  805. err:
  806. kunmap(page);
  807. ClearPageUptodate(page);
  808. SetPageError(page);
  809. unlock_page(page);
  810. return 0;
  811. }
  812. static const struct address_space_operations cramfs_aops = {
  813. .readpage = cramfs_readpage
  814. };
  815. /*
  816. * Our operations:
  817. */
  818. /*
  819. * A directory can only readdir
  820. */
  821. static const struct file_operations cramfs_directory_operations = {
  822. .llseek = generic_file_llseek,
  823. .read = generic_read_dir,
  824. .iterate_shared = cramfs_readdir,
  825. };
  826. static const struct inode_operations cramfs_dir_inode_operations = {
  827. .lookup = cramfs_lookup,
  828. };
  829. static const struct super_operations cramfs_ops = {
  830. .remount_fs = cramfs_remount,
  831. .statfs = cramfs_statfs,
  832. };
  833. static struct dentry *cramfs_mount(struct file_system_type *fs_type, int flags,
  834. const char *dev_name, void *data)
  835. {
  836. struct dentry *ret = ERR_PTR(-ENOPROTOOPT);
  837. if (IS_ENABLED(CONFIG_CRAMFS_MTD)) {
  838. ret = mount_mtd(fs_type, flags, dev_name, data,
  839. cramfs_mtd_fill_super);
  840. if (!IS_ERR(ret))
  841. return ret;
  842. }
  843. if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV)) {
  844. ret = mount_bdev(fs_type, flags, dev_name, data,
  845. cramfs_blkdev_fill_super);
  846. }
  847. return ret;
  848. }
  849. static struct file_system_type cramfs_fs_type = {
  850. .owner = THIS_MODULE,
  851. .name = "cramfs",
  852. .mount = cramfs_mount,
  853. .kill_sb = cramfs_kill_sb,
  854. .fs_flags = FS_REQUIRES_DEV,
  855. };
  856. MODULE_ALIAS_FS("cramfs");
  857. static int __init init_cramfs_fs(void)
  858. {
  859. int rv;
  860. rv = cramfs_uncompress_init();
  861. if (rv < 0)
  862. return rv;
  863. rv = register_filesystem(&cramfs_fs_type);
  864. if (rv < 0)
  865. cramfs_uncompress_exit();
  866. return rv;
  867. }
  868. static void __exit exit_cramfs_fs(void)
  869. {
  870. cramfs_uncompress_exit();
  871. unregister_filesystem(&cramfs_fs_type);
  872. }
  873. module_init(init_cramfs_fs)
  874. module_exit(exit_cramfs_fs)
  875. MODULE_LICENSE("GPL");