userfaultfd.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. /*
  2. * mm/userfaultfd.c
  3. *
  4. * Copyright (C) 2015 Red Hat, Inc.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2. See
  7. * the COPYING file in the top-level directory.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/rmap.h>
  12. #include <linux/swap.h>
  13. #include <linux/swapops.h>
  14. #include <linux/userfaultfd_k.h>
  15. #include <linux/mmu_notifier.h>
  16. #include <asm/tlbflush.h>
  17. #include "internal.h"
  18. static int mcopy_atomic_pte(struct mm_struct *dst_mm,
  19. pmd_t *dst_pmd,
  20. struct vm_area_struct *dst_vma,
  21. unsigned long dst_addr,
  22. unsigned long src_addr,
  23. struct page **pagep)
  24. {
  25. struct mem_cgroup *memcg;
  26. pte_t _dst_pte, *dst_pte;
  27. spinlock_t *ptl;
  28. void *page_kaddr;
  29. int ret;
  30. struct page *page;
  31. if (!*pagep) {
  32. ret = -ENOMEM;
  33. page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
  34. if (!page)
  35. goto out;
  36. page_kaddr = kmap_atomic(page);
  37. ret = copy_from_user(page_kaddr,
  38. (const void __user *) src_addr,
  39. PAGE_SIZE);
  40. kunmap_atomic(page_kaddr);
  41. /* fallback to copy_from_user outside mmap_sem */
  42. if (unlikely(ret)) {
  43. ret = -EFAULT;
  44. *pagep = page;
  45. /* don't free the page */
  46. goto out;
  47. }
  48. } else {
  49. page = *pagep;
  50. *pagep = NULL;
  51. }
  52. /*
  53. * The memory barrier inside __SetPageUptodate makes sure that
  54. * preceeding stores to the page contents become visible before
  55. * the set_pte_at() write.
  56. */
  57. __SetPageUptodate(page);
  58. ret = -ENOMEM;
  59. if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
  60. goto out_release;
  61. _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
  62. if (dst_vma->vm_flags & VM_WRITE)
  63. _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
  64. ret = -EEXIST;
  65. dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
  66. if (!pte_none(*dst_pte))
  67. goto out_release_uncharge_unlock;
  68. inc_mm_counter(dst_mm, MM_ANONPAGES);
  69. page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
  70. mem_cgroup_commit_charge(page, memcg, false, false);
  71. lru_cache_add_active_or_unevictable(page, dst_vma);
  72. set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
  73. /* No need to invalidate - it was non-present before */
  74. update_mmu_cache(dst_vma, dst_addr, dst_pte);
  75. pte_unmap_unlock(dst_pte, ptl);
  76. ret = 0;
  77. out:
  78. return ret;
  79. out_release_uncharge_unlock:
  80. pte_unmap_unlock(dst_pte, ptl);
  81. mem_cgroup_cancel_charge(page, memcg, false);
  82. out_release:
  83. put_page(page);
  84. goto out;
  85. }
  86. static int mfill_zeropage_pte(struct mm_struct *dst_mm,
  87. pmd_t *dst_pmd,
  88. struct vm_area_struct *dst_vma,
  89. unsigned long dst_addr)
  90. {
  91. pte_t _dst_pte, *dst_pte;
  92. spinlock_t *ptl;
  93. int ret;
  94. _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
  95. dst_vma->vm_page_prot));
  96. ret = -EEXIST;
  97. dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
  98. if (!pte_none(*dst_pte))
  99. goto out_unlock;
  100. set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
  101. /* No need to invalidate - it was non-present before */
  102. update_mmu_cache(dst_vma, dst_addr, dst_pte);
  103. ret = 0;
  104. out_unlock:
  105. pte_unmap_unlock(dst_pte, ptl);
  106. return ret;
  107. }
  108. static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
  109. {
  110. pgd_t *pgd;
  111. pud_t *pud;
  112. pmd_t *pmd = NULL;
  113. pgd = pgd_offset(mm, address);
  114. pud = pud_alloc(mm, pgd, address);
  115. if (pud)
  116. /*
  117. * Note that we didn't run this because the pmd was
  118. * missing, the *pmd may be already established and in
  119. * turn it may also be a trans_huge_pmd.
  120. */
  121. pmd = pmd_alloc(mm, pud, address);
  122. return pmd;
  123. }
  124. static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
  125. unsigned long dst_start,
  126. unsigned long src_start,
  127. unsigned long len,
  128. bool zeropage)
  129. {
  130. struct vm_area_struct *dst_vma;
  131. ssize_t err;
  132. pmd_t *dst_pmd;
  133. unsigned long src_addr, dst_addr;
  134. long copied;
  135. struct page *page;
  136. /*
  137. * Sanitize the command parameters:
  138. */
  139. BUG_ON(dst_start & ~PAGE_MASK);
  140. BUG_ON(len & ~PAGE_MASK);
  141. /* Does the address range wrap, or is the span zero-sized? */
  142. BUG_ON(src_start + len <= src_start);
  143. BUG_ON(dst_start + len <= dst_start);
  144. src_addr = src_start;
  145. dst_addr = dst_start;
  146. copied = 0;
  147. page = NULL;
  148. retry:
  149. down_read(&dst_mm->mmap_sem);
  150. /*
  151. * Make sure the vma is not shared, that the dst range is
  152. * both valid and fully within a single existing vma.
  153. */
  154. err = -EINVAL;
  155. dst_vma = find_vma(dst_mm, dst_start);
  156. if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
  157. goto out_unlock;
  158. if (dst_start < dst_vma->vm_start ||
  159. dst_start + len > dst_vma->vm_end)
  160. goto out_unlock;
  161. /*
  162. * Be strict and only allow __mcopy_atomic on userfaultfd
  163. * registered ranges to prevent userland errors going
  164. * unnoticed. As far as the VM consistency is concerned, it
  165. * would be perfectly safe to remove this check, but there's
  166. * no useful usage for __mcopy_atomic ouside of userfaultfd
  167. * registered ranges. This is after all why these are ioctls
  168. * belonging to the userfaultfd and not syscalls.
  169. */
  170. if (!dst_vma->vm_userfaultfd_ctx.ctx)
  171. goto out_unlock;
  172. /*
  173. * FIXME: only allow copying on anonymous vmas, tmpfs should
  174. * be added.
  175. */
  176. if (dst_vma->vm_ops)
  177. goto out_unlock;
  178. /*
  179. * Ensure the dst_vma has a anon_vma or this page
  180. * would get a NULL anon_vma when moved in the
  181. * dst_vma.
  182. */
  183. err = -ENOMEM;
  184. if (unlikely(anon_vma_prepare(dst_vma)))
  185. goto out_unlock;
  186. while (src_addr < src_start + len) {
  187. pmd_t dst_pmdval;
  188. BUG_ON(dst_addr >= dst_start + len);
  189. dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
  190. if (unlikely(!dst_pmd)) {
  191. err = -ENOMEM;
  192. break;
  193. }
  194. dst_pmdval = pmd_read_atomic(dst_pmd);
  195. /*
  196. * If the dst_pmd is mapped as THP don't
  197. * override it and just be strict.
  198. */
  199. if (unlikely(pmd_trans_huge(dst_pmdval))) {
  200. err = -EEXIST;
  201. break;
  202. }
  203. if (unlikely(pmd_none(dst_pmdval)) &&
  204. unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) {
  205. err = -ENOMEM;
  206. break;
  207. }
  208. /* If an huge pmd materialized from under us fail */
  209. if (unlikely(pmd_trans_huge(*dst_pmd))) {
  210. err = -EFAULT;
  211. break;
  212. }
  213. BUG_ON(pmd_none(*dst_pmd));
  214. BUG_ON(pmd_trans_huge(*dst_pmd));
  215. if (!zeropage)
  216. err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
  217. dst_addr, src_addr, &page);
  218. else
  219. err = mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma,
  220. dst_addr);
  221. cond_resched();
  222. if (unlikely(err == -EFAULT)) {
  223. void *page_kaddr;
  224. up_read(&dst_mm->mmap_sem);
  225. BUG_ON(!page);
  226. page_kaddr = kmap(page);
  227. err = copy_from_user(page_kaddr,
  228. (const void __user *) src_addr,
  229. PAGE_SIZE);
  230. kunmap(page);
  231. if (unlikely(err)) {
  232. err = -EFAULT;
  233. goto out;
  234. }
  235. goto retry;
  236. } else
  237. BUG_ON(page);
  238. if (!err) {
  239. dst_addr += PAGE_SIZE;
  240. src_addr += PAGE_SIZE;
  241. copied += PAGE_SIZE;
  242. if (fatal_signal_pending(current))
  243. err = -EINTR;
  244. }
  245. if (err)
  246. break;
  247. }
  248. out_unlock:
  249. up_read(&dst_mm->mmap_sem);
  250. out:
  251. if (page)
  252. put_page(page);
  253. BUG_ON(copied < 0);
  254. BUG_ON(err > 0);
  255. BUG_ON(!copied && !err);
  256. return copied ? copied : err;
  257. }
  258. ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
  259. unsigned long src_start, unsigned long len)
  260. {
  261. return __mcopy_atomic(dst_mm, dst_start, src_start, len, false);
  262. }
  263. ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
  264. unsigned long len)
  265. {
  266. return __mcopy_atomic(dst_mm, start, 0, len, true);
  267. }