mmap.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/arch/arm/mm/mmap.c
  4. */
  5. #include <linux/fs.h>
  6. #include <linux/mm.h>
  7. #include <linux/mman.h>
  8. #include <linux/shm.h>
  9. #include <linux/sched/signal.h>
  10. #include <linux/sched/mm.h>
  11. #include <linux/io.h>
  12. #include <linux/personality.h>
  13. #include <linux/random.h>
  14. #include <asm/cachetype.h>
  15. #define COLOUR_ALIGN(addr,pgoff) \
  16. ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
  17. (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
  18. /* gap between mmap and stack */
  19. #define MIN_GAP (128*1024*1024UL)
  20. #define MAX_GAP ((STACK_TOP)/6*5)
  21. #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
  22. static int mmap_is_legacy(struct rlimit *rlim_stack)
  23. {
  24. if (current->personality & ADDR_COMPAT_LAYOUT)
  25. return 1;
  26. if (rlim_stack->rlim_cur == RLIM_INFINITY)
  27. return 1;
  28. return sysctl_legacy_va_layout;
  29. }
  30. static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
  31. {
  32. unsigned long gap = rlim_stack->rlim_cur;
  33. unsigned long pad = stack_guard_gap;
  34. /* Account for stack randomization if necessary */
  35. if (current->flags & PF_RANDOMIZE)
  36. pad += (STACK_RND_MASK << PAGE_SHIFT);
  37. /* Values close to RLIM_INFINITY can overflow. */
  38. if (gap + pad > gap)
  39. gap += pad;
  40. if (gap < MIN_GAP)
  41. gap = MIN_GAP;
  42. else if (gap > MAX_GAP)
  43. gap = MAX_GAP;
  44. return PAGE_ALIGN(STACK_TOP - gap - rnd);
  45. }
  46. /*
  47. * We need to ensure that shared mappings are correctly aligned to
  48. * avoid aliasing issues with VIPT caches. We need to ensure that
  49. * a specific page of an object is always mapped at a multiple of
  50. * SHMLBA bytes.
  51. *
  52. * We unconditionally provide this function for all cases, however
  53. * in the VIVT case, we optimise out the alignment rules.
  54. */
  55. unsigned long
  56. arch_get_unmapped_area(struct file *filp, unsigned long addr,
  57. unsigned long len, unsigned long pgoff, unsigned long flags)
  58. {
  59. struct mm_struct *mm = current->mm;
  60. struct vm_area_struct *vma;
  61. int do_align = 0;
  62. int aliasing = cache_is_vipt_aliasing();
  63. struct vm_unmapped_area_info info;
  64. /*
  65. * We only need to do colour alignment if either the I or D
  66. * caches alias.
  67. */
  68. if (aliasing)
  69. do_align = filp || (flags & MAP_SHARED);
  70. /*
  71. * We enforce the MAP_FIXED case.
  72. */
  73. if (flags & MAP_FIXED) {
  74. if (aliasing && flags & MAP_SHARED &&
  75. (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
  76. return -EINVAL;
  77. return addr;
  78. }
  79. if (len > TASK_SIZE)
  80. return -ENOMEM;
  81. if (addr) {
  82. if (do_align)
  83. addr = COLOUR_ALIGN(addr, pgoff);
  84. else
  85. addr = PAGE_ALIGN(addr);
  86. vma = find_vma(mm, addr);
  87. if (TASK_SIZE - len >= addr &&
  88. (!vma || addr + len <= vm_start_gap(vma)))
  89. return addr;
  90. }
  91. info.flags = 0;
  92. info.length = len;
  93. info.low_limit = mm->mmap_base;
  94. info.high_limit = TASK_SIZE;
  95. info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
  96. info.align_offset = pgoff << PAGE_SHIFT;
  97. return vm_unmapped_area(&info);
  98. }
  99. unsigned long
  100. arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  101. const unsigned long len, const unsigned long pgoff,
  102. const unsigned long flags)
  103. {
  104. struct vm_area_struct *vma;
  105. struct mm_struct *mm = current->mm;
  106. unsigned long addr = addr0;
  107. int do_align = 0;
  108. int aliasing = cache_is_vipt_aliasing();
  109. struct vm_unmapped_area_info info;
  110. /*
  111. * We only need to do colour alignment if either the I or D
  112. * caches alias.
  113. */
  114. if (aliasing)
  115. do_align = filp || (flags & MAP_SHARED);
  116. /* requested length too big for entire address space */
  117. if (len > TASK_SIZE)
  118. return -ENOMEM;
  119. if (flags & MAP_FIXED) {
  120. if (aliasing && flags & MAP_SHARED &&
  121. (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
  122. return -EINVAL;
  123. return addr;
  124. }
  125. /* requesting a specific address */
  126. if (addr) {
  127. if (do_align)
  128. addr = COLOUR_ALIGN(addr, pgoff);
  129. else
  130. addr = PAGE_ALIGN(addr);
  131. vma = find_vma(mm, addr);
  132. if (TASK_SIZE - len >= addr &&
  133. (!vma || addr + len <= vm_start_gap(vma)))
  134. return addr;
  135. }
  136. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  137. info.length = len;
  138. info.low_limit = FIRST_USER_ADDRESS;
  139. info.high_limit = mm->mmap_base;
  140. info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
  141. info.align_offset = pgoff << PAGE_SHIFT;
  142. addr = vm_unmapped_area(&info);
  143. /*
  144. * A failed mmap() very likely causes application failure,
  145. * so fall back to the bottom-up function here. This scenario
  146. * can happen with large stack limits and large mmap()
  147. * allocations.
  148. */
  149. if (addr & ~PAGE_MASK) {
  150. VM_BUG_ON(addr != -ENOMEM);
  151. info.flags = 0;
  152. info.low_limit = mm->mmap_base;
  153. info.high_limit = TASK_SIZE;
  154. addr = vm_unmapped_area(&info);
  155. }
  156. return addr;
  157. }
  158. unsigned long arch_mmap_rnd(void)
  159. {
  160. unsigned long rnd;
  161. rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
  162. return rnd << PAGE_SHIFT;
  163. }
  164. void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
  165. {
  166. unsigned long random_factor = 0UL;
  167. if (current->flags & PF_RANDOMIZE)
  168. random_factor = arch_mmap_rnd();
  169. if (mmap_is_legacy(rlim_stack)) {
  170. mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
  171. mm->get_unmapped_area = arch_get_unmapped_area;
  172. } else {
  173. mm->mmap_base = mmap_base(random_factor, rlim_stack);
  174. mm->get_unmapped_area = arch_get_unmapped_area_topdown;
  175. }
  176. }
  177. /*
  178. * You really shouldn't be using read() or write() on /dev/mem. This
  179. * might go away in the future.
  180. */
  181. int valid_phys_addr_range(phys_addr_t addr, size_t size)
  182. {
  183. if (addr < PHYS_OFFSET)
  184. return 0;
  185. if (addr + size > __pa(high_memory - 1) + 1)
  186. return 0;
  187. return 1;
  188. }
  189. /*
  190. * Do not allow /dev/mem mappings beyond the supported physical range.
  191. */
  192. int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  193. {
  194. return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
  195. }
  196. #ifdef CONFIG_STRICT_DEVMEM
  197. #include <linux/ioport.h>
  198. /*
  199. * devmem_is_allowed() checks to see if /dev/mem access to a certain
  200. * address is valid. The argument is a physical page number.
  201. * We mimic x86 here by disallowing access to system RAM as well as
  202. * device-exclusive MMIO regions. This effectively disable read()/write()
  203. * on /dev/mem.
  204. */
  205. int devmem_is_allowed(unsigned long pfn)
  206. {
  207. if (iomem_is_exclusive(pfn << PAGE_SHIFT))
  208. return 0;
  209. if (!page_is_ram(pfn))
  210. return 1;
  211. return 0;
  212. }
  213. #endif