vma.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. /*
  2. * Copyright 2007 Andi Kleen, SUSE Labs.
  3. * Subject to the GPL, v.2
  4. *
  5. * This contains most of the x86 vDSO kernel-side code.
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/err.h>
  9. #include <linux/sched.h>
  10. #include <linux/sched/task_stack.h>
  11. #include <linux/slab.h>
  12. #include <linux/init.h>
  13. #include <linux/random.h>
  14. #include <linux/elf.h>
  15. #include <linux/cpu.h>
  16. #include <linux/ptrace.h>
  17. #include <asm/pvclock.h>
  18. #include <asm/vgtod.h>
  19. #include <asm/proto.h>
  20. #include <asm/vdso.h>
  21. #include <asm/vvar.h>
  22. #include <asm/page.h>
  23. #include <asm/desc.h>
  24. #include <asm/cpufeature.h>
  25. #include <asm/mshyperv.h>
  26. #if defined(CONFIG_X86_64)
  27. unsigned int __read_mostly vdso64_enabled = 1;
  28. #endif
  29. void __init init_vdso_image(const struct vdso_image *image)
  30. {
  31. BUG_ON(image->size % PAGE_SIZE != 0);
  32. apply_alternatives((struct alt_instr *)(image->data + image->alt),
  33. (struct alt_instr *)(image->data + image->alt +
  34. image->alt_len));
  35. }
  36. struct linux_binprm;
  37. static int vdso_fault(const struct vm_special_mapping *sm,
  38. struct vm_area_struct *vma, struct vm_fault *vmf)
  39. {
  40. const struct vdso_image *image = vma->vm_mm->context.vdso_image;
  41. if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
  42. return VM_FAULT_SIGBUS;
  43. vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
  44. get_page(vmf->page);
  45. return 0;
  46. }
  47. static void vdso_fix_landing(const struct vdso_image *image,
  48. struct vm_area_struct *new_vma)
  49. {
  50. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  51. if (in_ia32_syscall() && image == &vdso_image_32) {
  52. struct pt_regs *regs = current_pt_regs();
  53. unsigned long vdso_land = image->sym_int80_landing_pad;
  54. unsigned long old_land_addr = vdso_land +
  55. (unsigned long)current->mm->context.vdso;
  56. /* Fixing userspace landing - look at do_fast_syscall_32 */
  57. if (regs->ip == old_land_addr)
  58. regs->ip = new_vma->vm_start + vdso_land;
  59. }
  60. #endif
  61. }
  62. static int vdso_mremap(const struct vm_special_mapping *sm,
  63. struct vm_area_struct *new_vma)
  64. {
  65. unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
  66. const struct vdso_image *image = current->mm->context.vdso_image;
  67. if (image->size != new_size)
  68. return -EINVAL;
  69. vdso_fix_landing(image, new_vma);
  70. current->mm->context.vdso = (void __user *)new_vma->vm_start;
  71. return 0;
  72. }
  73. static int vvar_fault(const struct vm_special_mapping *sm,
  74. struct vm_area_struct *vma, struct vm_fault *vmf)
  75. {
  76. const struct vdso_image *image = vma->vm_mm->context.vdso_image;
  77. long sym_offset;
  78. int ret = -EFAULT;
  79. if (!image)
  80. return VM_FAULT_SIGBUS;
  81. sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
  82. image->sym_vvar_start;
  83. /*
  84. * Sanity check: a symbol offset of zero means that the page
  85. * does not exist for this vdso image, not that the page is at
  86. * offset zero relative to the text mapping. This should be
  87. * impossible here, because sym_offset should only be zero for
  88. * the page past the end of the vvar mapping.
  89. */
  90. if (sym_offset == 0)
  91. return VM_FAULT_SIGBUS;
  92. if (sym_offset == image->sym_vvar_page) {
  93. ret = vm_insert_pfn(vma, vmf->address,
  94. __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
  95. } else if (sym_offset == image->sym_pvclock_page) {
  96. struct pvclock_vsyscall_time_info *pvti =
  97. pvclock_get_pvti_cpu0_va();
  98. if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
  99. ret = vm_insert_pfn_prot(
  100. vma,
  101. vmf->address,
  102. __pa(pvti) >> PAGE_SHIFT,
  103. pgprot_decrypted(vma->vm_page_prot));
  104. }
  105. } else if (sym_offset == image->sym_hvclock_page) {
  106. struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
  107. if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
  108. ret = vm_insert_pfn(vma, vmf->address,
  109. vmalloc_to_pfn(tsc_pg));
  110. }
  111. if (ret == 0 || ret == -EBUSY)
  112. return VM_FAULT_NOPAGE;
  113. return VM_FAULT_SIGBUS;
  114. }
  115. static const struct vm_special_mapping vdso_mapping = {
  116. .name = "[vdso]",
  117. .fault = vdso_fault,
  118. .mremap = vdso_mremap,
  119. };
  120. static const struct vm_special_mapping vvar_mapping = {
  121. .name = "[vvar]",
  122. .fault = vvar_fault,
  123. };
  124. /*
  125. * Add vdso and vvar mappings to current process.
  126. * @image - blob to map
  127. * @addr - request a specific address (zero to map at free addr)
  128. */
  129. static int map_vdso(const struct vdso_image *image, unsigned long addr)
  130. {
  131. struct mm_struct *mm = current->mm;
  132. struct vm_area_struct *vma;
  133. unsigned long text_start;
  134. int ret = 0;
  135. if (down_write_killable(&mm->mmap_sem))
  136. return -EINTR;
  137. addr = get_unmapped_area(NULL, addr,
  138. image->size - image->sym_vvar_start, 0, 0);
  139. if (IS_ERR_VALUE(addr)) {
  140. ret = addr;
  141. goto up_fail;
  142. }
  143. text_start = addr - image->sym_vvar_start;
  144. /*
  145. * MAYWRITE to allow gdb to COW and set breakpoints
  146. */
  147. vma = _install_special_mapping(mm,
  148. text_start,
  149. image->size,
  150. VM_READ|VM_EXEC|
  151. VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
  152. &vdso_mapping);
  153. if (IS_ERR(vma)) {
  154. ret = PTR_ERR(vma);
  155. goto up_fail;
  156. }
  157. vma = _install_special_mapping(mm,
  158. addr,
  159. -image->sym_vvar_start,
  160. VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
  161. VM_PFNMAP,
  162. &vvar_mapping);
  163. if (IS_ERR(vma)) {
  164. ret = PTR_ERR(vma);
  165. do_munmap(mm, text_start, image->size, NULL);
  166. } else {
  167. current->mm->context.vdso = (void __user *)text_start;
  168. current->mm->context.vdso_image = image;
  169. }
  170. up_fail:
  171. up_write(&mm->mmap_sem);
  172. return ret;
  173. }
  174. #ifdef CONFIG_X86_64
  175. /*
  176. * Put the vdso above the (randomized) stack with another randomized
  177. * offset. This way there is no hole in the middle of address space.
  178. * To save memory make sure it is still in the same PTE as the stack
  179. * top. This doesn't give that many random bits.
  180. *
  181. * Note that this algorithm is imperfect: the distribution of the vdso
  182. * start address within a PMD is biased toward the end.
  183. *
  184. * Only used for the 64-bit and x32 vdsos.
  185. */
  186. static unsigned long vdso_addr(unsigned long start, unsigned len)
  187. {
  188. unsigned long addr, end;
  189. unsigned offset;
  190. /*
  191. * Round up the start address. It can start out unaligned as a result
  192. * of stack start randomization.
  193. */
  194. start = PAGE_ALIGN(start);
  195. /* Round the lowest possible end address up to a PMD boundary. */
  196. end = (start + len + PMD_SIZE - 1) & PMD_MASK;
  197. if (end >= TASK_SIZE_MAX)
  198. end = TASK_SIZE_MAX;
  199. end -= len;
  200. if (end > start) {
  201. offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
  202. addr = start + (offset << PAGE_SHIFT);
  203. } else {
  204. addr = start;
  205. }
  206. /*
  207. * Forcibly align the final address in case we have a hardware
  208. * issue that requires alignment for performance reasons.
  209. */
  210. addr = align_vdso_addr(addr);
  211. return addr;
  212. }
  213. static int map_vdso_randomized(const struct vdso_image *image)
  214. {
  215. unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
  216. return map_vdso(image, addr);
  217. }
  218. #endif
  219. int map_vdso_once(const struct vdso_image *image, unsigned long addr)
  220. {
  221. struct mm_struct *mm = current->mm;
  222. struct vm_area_struct *vma;
  223. down_write(&mm->mmap_sem);
  224. /*
  225. * Check if we have already mapped vdso blob - fail to prevent
  226. * abusing from userspace install_speciall_mapping, which may
  227. * not do accounting and rlimit right.
  228. * We could search vma near context.vdso, but it's a slowpath,
  229. * so let's explicitely check all VMAs to be completely sure.
  230. */
  231. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  232. if (vma_is_special_mapping(vma, &vdso_mapping) ||
  233. vma_is_special_mapping(vma, &vvar_mapping)) {
  234. up_write(&mm->mmap_sem);
  235. return -EEXIST;
  236. }
  237. }
  238. up_write(&mm->mmap_sem);
  239. return map_vdso(image, addr);
  240. }
  241. #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
  242. static int load_vdso32(void)
  243. {
  244. if (vdso32_enabled != 1) /* Other values all mean "disabled" */
  245. return 0;
  246. return map_vdso(&vdso_image_32, 0);
  247. }
  248. #endif
  249. #ifdef CONFIG_X86_64
  250. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  251. {
  252. if (!vdso64_enabled)
  253. return 0;
  254. return map_vdso_randomized(&vdso_image_64);
  255. }
  256. #ifdef CONFIG_COMPAT
  257. int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
  258. int uses_interp)
  259. {
  260. #ifdef CONFIG_X86_X32_ABI
  261. if (test_thread_flag(TIF_X32)) {
  262. if (!vdso64_enabled)
  263. return 0;
  264. return map_vdso_randomized(&vdso_image_x32);
  265. }
  266. #endif
  267. #ifdef CONFIG_IA32_EMULATION
  268. return load_vdso32();
  269. #else
  270. return 0;
  271. #endif
  272. }
  273. #endif
  274. #else
  275. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  276. {
  277. return load_vdso32();
  278. }
  279. #endif
  280. #ifdef CONFIG_X86_64
  281. static __init int vdso_setup(char *s)
  282. {
  283. vdso64_enabled = simple_strtoul(s, NULL, 0);
  284. return 0;
  285. }
  286. __setup("vdso=", vdso_setup);
  287. #endif
  288. #ifdef CONFIG_X86_64
  289. static void vgetcpu_cpu_init(void *arg)
  290. {
  291. int cpu = smp_processor_id();
  292. struct desc_struct d = { };
  293. unsigned long node = 0;
  294. #ifdef CONFIG_NUMA
  295. node = cpu_to_node(cpu);
  296. #endif
  297. if (static_cpu_has(X86_FEATURE_RDTSCP))
  298. write_rdtscp_aux((node << 12) | cpu);
  299. /*
  300. * Store cpu number in limit so that it can be loaded
  301. * quickly in user space in vgetcpu. (12 bits for the CPU
  302. * and 8 bits for the node)
  303. */
  304. d.limit0 = cpu | ((node & 0xf) << 12);
  305. d.limit1 = node >> 4;
  306. d.type = 5; /* RO data, expand down, accessed */
  307. d.dpl = 3; /* Visible to user code */
  308. d.s = 1; /* Not a system segment */
  309. d.p = 1; /* Present */
  310. d.d = 1; /* 32-bit */
  311. write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
  312. }
  313. static int vgetcpu_online(unsigned int cpu)
  314. {
  315. return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
  316. }
  317. static int __init init_vdso(void)
  318. {
  319. init_vdso_image(&vdso_image_64);
  320. #ifdef CONFIG_X86_X32_ABI
  321. init_vdso_image(&vdso_image_x32);
  322. #endif
  323. /* notifier priority > KVM */
  324. return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
  325. "x86/vdso/vma:online", vgetcpu_online, NULL);
  326. }
  327. subsys_initcall(init_vdso);
  328. #endif /* CONFIG_X86_64 */