mmap.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2011 Wind River Systems,
  7. * written by Ralf Baechle <ralf@linux-mips.org>
  8. */
  9. #include <linux/compiler.h>
  10. #include <linux/errno.h>
  11. #include <linux/mm.h>
  12. #include <linux/mman.h>
  13. #include <linux/module.h>
  14. #include <linux/personality.h>
  15. #include <linux/random.h>
  16. #include <linux/sched.h>
  17. unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
  18. EXPORT_SYMBOL(shm_align_mask);
  19. /* gap between mmap and stack */
  20. #define MIN_GAP (128*1024*1024UL)
  21. #define MAX_GAP ((TASK_SIZE)/6*5)
  22. static int mmap_is_legacy(void)
  23. {
  24. if (current->personality & ADDR_COMPAT_LAYOUT)
  25. return 1;
  26. if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
  27. return 1;
  28. return sysctl_legacy_va_layout;
  29. }
  30. static unsigned long mmap_base(unsigned long rnd)
  31. {
  32. unsigned long gap = rlimit(RLIMIT_STACK);
  33. if (gap < MIN_GAP)
  34. gap = MIN_GAP;
  35. else if (gap > MAX_GAP)
  36. gap = MAX_GAP;
  37. return PAGE_ALIGN(TASK_SIZE - gap - rnd);
  38. }
  39. #define COLOUR_ALIGN(addr, pgoff) \
  40. ((((addr) + shm_align_mask) & ~shm_align_mask) + \
  41. (((pgoff) << PAGE_SHIFT) & shm_align_mask))
  42. enum mmap_allocation_direction {UP, DOWN};
  43. static unsigned long arch_get_unmapped_area_common(struct file *filp,
  44. unsigned long addr0, unsigned long len, unsigned long pgoff,
  45. unsigned long flags, enum mmap_allocation_direction dir)
  46. {
  47. struct mm_struct *mm = current->mm;
  48. struct vm_area_struct *vma;
  49. unsigned long addr = addr0;
  50. int do_color_align;
  51. struct vm_unmapped_area_info info;
  52. if (unlikely(len > TASK_SIZE))
  53. return -ENOMEM;
  54. if (flags & MAP_FIXED) {
  55. /* Even MAP_FIXED mappings must reside within TASK_SIZE */
  56. if (TASK_SIZE - len < addr)
  57. return -EINVAL;
  58. /*
  59. * We do not accept a shared mapping if it would violate
  60. * cache aliasing constraints.
  61. */
  62. if ((flags & MAP_SHARED) &&
  63. ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
  64. return -EINVAL;
  65. return addr;
  66. }
  67. do_color_align = 0;
  68. if (filp || (flags & MAP_SHARED))
  69. do_color_align = 1;
  70. /* requesting a specific address */
  71. if (addr) {
  72. if (do_color_align)
  73. addr = COLOUR_ALIGN(addr, pgoff);
  74. else
  75. addr = PAGE_ALIGN(addr);
  76. vma = find_vma(mm, addr);
  77. if (TASK_SIZE - len >= addr &&
  78. (!vma || addr + len <= vma->vm_start))
  79. return addr;
  80. }
  81. info.length = len;
  82. info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
  83. info.align_offset = pgoff << PAGE_SHIFT;
  84. if (dir == DOWN) {
  85. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  86. info.low_limit = PAGE_SIZE;
  87. info.high_limit = mm->mmap_base;
  88. addr = vm_unmapped_area(&info);
  89. if (!(addr & ~PAGE_MASK))
  90. return addr;
  91. /*
  92. * A failed mmap() very likely causes application failure,
  93. * so fall back to the bottom-up function here. This scenario
  94. * can happen with large stack limits and large mmap()
  95. * allocations.
  96. */
  97. }
  98. info.flags = 0;
  99. info.low_limit = mm->mmap_base;
  100. info.high_limit = TASK_SIZE;
  101. return vm_unmapped_area(&info);
  102. }
  103. unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
  104. unsigned long len, unsigned long pgoff, unsigned long flags)
  105. {
  106. return arch_get_unmapped_area_common(filp,
  107. addr0, len, pgoff, flags, UP);
  108. }
  109. /*
  110. * There is no need to export this but sched.h declares the function as
  111. * extern so making it static here results in an error.
  112. */
  113. unsigned long arch_get_unmapped_area_topdown(struct file *filp,
  114. unsigned long addr0, unsigned long len, unsigned long pgoff,
  115. unsigned long flags)
  116. {
  117. return arch_get_unmapped_area_common(filp,
  118. addr0, len, pgoff, flags, DOWN);
  119. }
  120. unsigned long arch_mmap_rnd(void)
  121. {
  122. unsigned long rnd;
  123. rnd = (unsigned long)get_random_int();
  124. rnd <<= PAGE_SHIFT;
  125. if (TASK_IS_32BIT_ADDR)
  126. rnd &= 0xfffffful;
  127. else
  128. rnd &= 0xffffffful;
  129. return rnd;
  130. }
  131. void arch_pick_mmap_layout(struct mm_struct *mm)
  132. {
  133. unsigned long random_factor = 0UL;
  134. if (current->flags & PF_RANDOMIZE)
  135. random_factor = arch_mmap_rnd();
  136. if (mmap_is_legacy()) {
  137. mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
  138. mm->get_unmapped_area = arch_get_unmapped_area;
  139. } else {
  140. mm->mmap_base = mmap_base(random_factor);
  141. mm->get_unmapped_area = arch_get_unmapped_area_topdown;
  142. }
  143. }
  144. static inline unsigned long brk_rnd(void)
  145. {
  146. unsigned long rnd = get_random_int();
  147. rnd = rnd << PAGE_SHIFT;
  148. /* 8MB for 32bit, 256MB for 64bit */
  149. if (TASK_IS_32BIT_ADDR)
  150. rnd = rnd & 0x7ffffful;
  151. else
  152. rnd = rnd & 0xffffffful;
  153. return rnd;
  154. }
  155. unsigned long arch_randomize_brk(struct mm_struct *mm)
  156. {
  157. unsigned long base = mm->brk;
  158. unsigned long ret;
  159. ret = PAGE_ALIGN(base + brk_rnd());
  160. if (ret < mm->brk)
  161. return mm->brk;
  162. return ret;
  163. }
  164. int __virt_addr_valid(const volatile void *kaddr)
  165. {
  166. return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
  167. }
  168. EXPORT_SYMBOL_GPL(__virt_addr_valid);