vma.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /*
  2. * Copyright 2007 Andi Kleen, SUSE Labs.
  3. * Subject to the GPL, v.2
  4. *
  5. * This contains most of the x86 vDSO kernel-side code.
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/err.h>
  9. #include <linux/sched.h>
  10. #include <linux/sched/task_stack.h>
  11. #include <linux/slab.h>
  12. #include <linux/init.h>
  13. #include <linux/random.h>
  14. #include <linux/elf.h>
  15. #include <linux/cpu.h>
  16. #include <linux/ptrace.h>
  17. #include <asm/pvclock.h>
  18. #include <asm/vgtod.h>
  19. #include <asm/proto.h>
  20. #include <asm/vdso.h>
  21. #include <asm/vvar.h>
  22. #include <asm/page.h>
  23. #include <asm/desc.h>
  24. #include <asm/cpufeature.h>
  25. #include <asm/mshyperv.h>
  26. #if defined(CONFIG_X86_64)
  27. unsigned int __read_mostly vdso64_enabled = 1;
  28. #endif
  29. void __init init_vdso_image(const struct vdso_image *image)
  30. {
  31. BUG_ON(image->size % PAGE_SIZE != 0);
  32. apply_alternatives((struct alt_instr *)(image->data + image->alt),
  33. (struct alt_instr *)(image->data + image->alt +
  34. image->alt_len));
  35. }
  36. struct linux_binprm;
  37. static int vdso_fault(const struct vm_special_mapping *sm,
  38. struct vm_area_struct *vma, struct vm_fault *vmf)
  39. {
  40. const struct vdso_image *image = vma->vm_mm->context.vdso_image;
  41. if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
  42. return VM_FAULT_SIGBUS;
  43. vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
  44. get_page(vmf->page);
  45. return 0;
  46. }
  47. static void vdso_fix_landing(const struct vdso_image *image,
  48. struct vm_area_struct *new_vma)
  49. {
  50. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  51. if (in_ia32_syscall() && image == &vdso_image_32) {
  52. struct pt_regs *regs = current_pt_regs();
  53. unsigned long vdso_land = image->sym_int80_landing_pad;
  54. unsigned long old_land_addr = vdso_land +
  55. (unsigned long)current->mm->context.vdso;
  56. /* Fixing userspace landing - look at do_fast_syscall_32 */
  57. if (regs->ip == old_land_addr)
  58. regs->ip = new_vma->vm_start + vdso_land;
  59. }
  60. #endif
  61. }
  62. static int vdso_mremap(const struct vm_special_mapping *sm,
  63. struct vm_area_struct *new_vma)
  64. {
  65. unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
  66. const struct vdso_image *image = current->mm->context.vdso_image;
  67. if (image->size != new_size)
  68. return -EINVAL;
  69. vdso_fix_landing(image, new_vma);
  70. current->mm->context.vdso = (void __user *)new_vma->vm_start;
  71. return 0;
  72. }
  73. static int vvar_fault(const struct vm_special_mapping *sm,
  74. struct vm_area_struct *vma, struct vm_fault *vmf)
  75. {
  76. const struct vdso_image *image = vma->vm_mm->context.vdso_image;
  77. long sym_offset;
  78. int ret = -EFAULT;
  79. if (!image)
  80. return VM_FAULT_SIGBUS;
  81. sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
  82. image->sym_vvar_start;
  83. /*
  84. * Sanity check: a symbol offset of zero means that the page
  85. * does not exist for this vdso image, not that the page is at
  86. * offset zero relative to the text mapping. This should be
  87. * impossible here, because sym_offset should only be zero for
  88. * the page past the end of the vvar mapping.
  89. */
  90. if (sym_offset == 0)
  91. return VM_FAULT_SIGBUS;
  92. if (sym_offset == image->sym_vvar_page) {
  93. ret = vm_insert_pfn(vma, vmf->address,
  94. __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
  95. } else if (sym_offset == image->sym_pvclock_page) {
  96. struct pvclock_vsyscall_time_info *pvti =
  97. pvclock_get_pvti_cpu0_va();
  98. if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
  99. ret = vm_insert_pfn(
  100. vma,
  101. vmf->address,
  102. __pa(pvti) >> PAGE_SHIFT);
  103. }
  104. } else if (sym_offset == image->sym_hvclock_page) {
  105. struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
  106. if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
  107. ret = vm_insert_pfn(vma, vmf->address,
  108. vmalloc_to_pfn(tsc_pg));
  109. }
  110. if (ret == 0 || ret == -EBUSY)
  111. return VM_FAULT_NOPAGE;
  112. return VM_FAULT_SIGBUS;
  113. }
  114. static const struct vm_special_mapping vdso_mapping = {
  115. .name = "[vdso]",
  116. .fault = vdso_fault,
  117. .mremap = vdso_mremap,
  118. };
  119. static const struct vm_special_mapping vvar_mapping = {
  120. .name = "[vvar]",
  121. .fault = vvar_fault,
  122. };
  123. /*
  124. * Add vdso and vvar mappings to current process.
  125. * @image - blob to map
  126. * @addr - request a specific address (zero to map at free addr)
  127. */
  128. static int map_vdso(const struct vdso_image *image, unsigned long addr)
  129. {
  130. struct mm_struct *mm = current->mm;
  131. struct vm_area_struct *vma;
  132. unsigned long text_start;
  133. int ret = 0;
  134. if (down_write_killable(&mm->mmap_sem))
  135. return -EINTR;
  136. addr = get_unmapped_area(NULL, addr,
  137. image->size - image->sym_vvar_start, 0, 0);
  138. if (IS_ERR_VALUE(addr)) {
  139. ret = addr;
  140. goto up_fail;
  141. }
  142. text_start = addr - image->sym_vvar_start;
  143. /*
  144. * MAYWRITE to allow gdb to COW and set breakpoints
  145. */
  146. vma = _install_special_mapping(mm,
  147. text_start,
  148. image->size,
  149. VM_READ|VM_EXEC|
  150. VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
  151. &vdso_mapping);
  152. if (IS_ERR(vma)) {
  153. ret = PTR_ERR(vma);
  154. goto up_fail;
  155. }
  156. vma = _install_special_mapping(mm,
  157. addr,
  158. -image->sym_vvar_start,
  159. VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
  160. VM_PFNMAP,
  161. &vvar_mapping);
  162. if (IS_ERR(vma)) {
  163. ret = PTR_ERR(vma);
  164. do_munmap(mm, text_start, image->size, NULL);
  165. } else {
  166. current->mm->context.vdso = (void __user *)text_start;
  167. current->mm->context.vdso_image = image;
  168. }
  169. up_fail:
  170. up_write(&mm->mmap_sem);
  171. return ret;
  172. }
  173. #ifdef CONFIG_X86_64
  174. /*
  175. * Put the vdso above the (randomized) stack with another randomized
  176. * offset. This way there is no hole in the middle of address space.
  177. * To save memory make sure it is still in the same PTE as the stack
  178. * top. This doesn't give that many random bits.
  179. *
  180. * Note that this algorithm is imperfect: the distribution of the vdso
  181. * start address within a PMD is biased toward the end.
  182. *
  183. * Only used for the 64-bit and x32 vdsos.
  184. */
  185. static unsigned long vdso_addr(unsigned long start, unsigned len)
  186. {
  187. unsigned long addr, end;
  188. unsigned offset;
  189. /*
  190. * Round up the start address. It can start out unaligned as a result
  191. * of stack start randomization.
  192. */
  193. start = PAGE_ALIGN(start);
  194. /* Round the lowest possible end address up to a PMD boundary. */
  195. end = (start + len + PMD_SIZE - 1) & PMD_MASK;
  196. if (end >= TASK_SIZE_MAX)
  197. end = TASK_SIZE_MAX;
  198. end -= len;
  199. if (end > start) {
  200. offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
  201. addr = start + (offset << PAGE_SHIFT);
  202. } else {
  203. addr = start;
  204. }
  205. /*
  206. * Forcibly align the final address in case we have a hardware
  207. * issue that requires alignment for performance reasons.
  208. */
  209. addr = align_vdso_addr(addr);
  210. return addr;
  211. }
  212. static int map_vdso_randomized(const struct vdso_image *image)
  213. {
  214. unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
  215. return map_vdso(image, addr);
  216. }
  217. #endif
  218. int map_vdso_once(const struct vdso_image *image, unsigned long addr)
  219. {
  220. struct mm_struct *mm = current->mm;
  221. struct vm_area_struct *vma;
  222. down_write(&mm->mmap_sem);
  223. /*
  224. * Check if we have already mapped vdso blob - fail to prevent
  225. * abusing from userspace install_speciall_mapping, which may
  226. * not do accounting and rlimit right.
  227. * We could search vma near context.vdso, but it's a slowpath,
  228. * so let's explicitely check all VMAs to be completely sure.
  229. */
  230. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  231. if (vma_is_special_mapping(vma, &vdso_mapping) ||
  232. vma_is_special_mapping(vma, &vvar_mapping)) {
  233. up_write(&mm->mmap_sem);
  234. return -EEXIST;
  235. }
  236. }
  237. up_write(&mm->mmap_sem);
  238. return map_vdso(image, addr);
  239. }
  240. #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
  241. static int load_vdso32(void)
  242. {
  243. if (vdso32_enabled != 1) /* Other values all mean "disabled" */
  244. return 0;
  245. return map_vdso(&vdso_image_32, 0);
  246. }
  247. #endif
  248. #ifdef CONFIG_X86_64
  249. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  250. {
  251. if (!vdso64_enabled)
  252. return 0;
  253. return map_vdso_randomized(&vdso_image_64);
  254. }
  255. #ifdef CONFIG_COMPAT
  256. int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
  257. int uses_interp)
  258. {
  259. #ifdef CONFIG_X86_X32_ABI
  260. if (test_thread_flag(TIF_X32)) {
  261. if (!vdso64_enabled)
  262. return 0;
  263. return map_vdso_randomized(&vdso_image_x32);
  264. }
  265. #endif
  266. #ifdef CONFIG_IA32_EMULATION
  267. return load_vdso32();
  268. #else
  269. return 0;
  270. #endif
  271. }
  272. #endif
  273. #else
  274. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  275. {
  276. return load_vdso32();
  277. }
  278. #endif
  279. #ifdef CONFIG_X86_64
  280. static __init int vdso_setup(char *s)
  281. {
  282. vdso64_enabled = simple_strtoul(s, NULL, 0);
  283. return 0;
  284. }
  285. __setup("vdso=", vdso_setup);
  286. #endif
  287. #ifdef CONFIG_X86_64
  288. static void vgetcpu_cpu_init(void *arg)
  289. {
  290. int cpu = smp_processor_id();
  291. struct desc_struct d = { };
  292. unsigned long node = 0;
  293. #ifdef CONFIG_NUMA
  294. node = cpu_to_node(cpu);
  295. #endif
  296. if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
  297. write_rdtscp_aux((node << 12) | cpu);
  298. /*
  299. * Store cpu number in limit so that it can be loaded
  300. * quickly in user space in vgetcpu. (12 bits for the CPU
  301. * and 8 bits for the node)
  302. */
  303. d.limit0 = cpu | ((node & 0xf) << 12);
  304. d.limit1 = node >> 4;
  305. d.type = 5; /* RO data, expand down, accessed */
  306. d.dpl = 3; /* Visible to user code */
  307. d.s = 1; /* Not a system segment */
  308. d.p = 1; /* Present */
  309. d.d = 1; /* 32-bit */
  310. write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
  311. }
  312. static int vgetcpu_online(unsigned int cpu)
  313. {
  314. return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
  315. }
  316. static int __init init_vdso(void)
  317. {
  318. init_vdso_image(&vdso_image_64);
  319. #ifdef CONFIG_X86_X32_ABI
  320. init_vdso_image(&vdso_image_x32);
  321. #endif
  322. /* notifier priority > KVM */
  323. return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
  324. "x86/vdso/vma:online", vgetcpu_online, NULL);
  325. }
  326. subsys_initcall(init_vdso);
  327. #endif /* CONFIG_X86_64 */