gup.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. /*
  2. * Lockless get_user_pages_fast for SuperH
  3. *
  4. * Copyright (C) 2009 - 2010 Paul Mundt
  5. *
  6. * Cloned from the x86 and PowerPC versions, by:
  7. *
  8. * Copyright (C) 2008 Nick Piggin
  9. * Copyright (C) 2008 Novell Inc.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/mm.h>
  13. #include <linux/vmstat.h>
  14. #include <linux/highmem.h>
  15. #include <asm/pgtable.h>
  16. static inline pte_t gup_get_pte(pte_t *ptep)
  17. {
  18. #ifndef CONFIG_X2TLB
  19. return READ_ONCE(*ptep);
  20. #else
  21. /*
  22. * With get_user_pages_fast, we walk down the pagetables without
  23. * taking any locks. For this we would like to load the pointers
  24. * atomically, but that is not possible with 64-bit PTEs. What
  25. * we do have is the guarantee that a pte will only either go
  26. * from not present to present, or present to not present or both
  27. * -- it will not switch to a completely different present page
  28. * without a TLB flush in between; something that we are blocking
  29. * by holding interrupts off.
  30. *
  31. * Setting ptes from not present to present goes:
  32. * ptep->pte_high = h;
  33. * smp_wmb();
  34. * ptep->pte_low = l;
  35. *
  36. * And present to not present goes:
  37. * ptep->pte_low = 0;
  38. * smp_wmb();
  39. * ptep->pte_high = 0;
  40. *
  41. * We must ensure here that the load of pte_low sees l iff pte_high
  42. * sees h. We load pte_high *after* loading pte_low, which ensures we
  43. * don't see an older value of pte_high. *Then* we recheck pte_low,
  44. * which ensures that we haven't picked up a changed pte high. We might
  45. * have got rubbish values from pte_low and pte_high, but we are
  46. * guaranteed that pte_low will not have the present bit set *unless*
  47. * it is 'l'. And get_user_pages_fast only operates on present ptes, so
  48. * we're safe.
  49. *
  50. * gup_get_pte should not be used or copied outside gup.c without being
  51. * very careful -- it does not atomically load the pte or anything that
  52. * is likely to be useful for you.
  53. */
  54. pte_t pte;
  55. retry:
  56. pte.pte_low = ptep->pte_low;
  57. smp_rmb();
  58. pte.pte_high = ptep->pte_high;
  59. smp_rmb();
  60. if (unlikely(pte.pte_low != ptep->pte_low))
  61. goto retry;
  62. return pte;
  63. #endif
  64. }
  65. /*
  66. * The performance critical leaf functions are made noinline otherwise gcc
  67. * inlines everything into a single function which results in too much
  68. * register pressure.
  69. */
  70. static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
  71. unsigned long end, int write, struct page **pages, int *nr)
  72. {
  73. u64 mask, result;
  74. pte_t *ptep;
  75. #ifdef CONFIG_X2TLB
  76. result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
  77. if (write)
  78. result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
  79. #elif defined(CONFIG_SUPERH64)
  80. result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
  81. if (write)
  82. result |= _PAGE_WRITE;
  83. #else
  84. result = _PAGE_PRESENT | _PAGE_USER;
  85. if (write)
  86. result |= _PAGE_RW;
  87. #endif
  88. mask = result | _PAGE_SPECIAL;
  89. ptep = pte_offset_map(&pmd, addr);
  90. do {
  91. pte_t pte = gup_get_pte(ptep);
  92. struct page *page;
  93. if ((pte_val(pte) & mask) != result) {
  94. pte_unmap(ptep);
  95. return 0;
  96. }
  97. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  98. page = pte_page(pte);
  99. get_page(page);
  100. __flush_anon_page(page, addr);
  101. flush_dcache_page(page);
  102. pages[*nr] = page;
  103. (*nr)++;
  104. } while (ptep++, addr += PAGE_SIZE, addr != end);
  105. pte_unmap(ptep - 1);
  106. return 1;
  107. }
  108. static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
  109. int write, struct page **pages, int *nr)
  110. {
  111. unsigned long next;
  112. pmd_t *pmdp;
  113. pmdp = pmd_offset(&pud, addr);
  114. do {
  115. pmd_t pmd = *pmdp;
  116. next = pmd_addr_end(addr, end);
  117. if (pmd_none(pmd))
  118. return 0;
  119. if (!gup_pte_range(pmd, addr, next, write, pages, nr))
  120. return 0;
  121. } while (pmdp++, addr = next, addr != end);
  122. return 1;
  123. }
  124. static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
  125. int write, struct page **pages, int *nr)
  126. {
  127. unsigned long next;
  128. pud_t *pudp;
  129. pudp = pud_offset(&pgd, addr);
  130. do {
  131. pud_t pud = *pudp;
  132. next = pud_addr_end(addr, end);
  133. if (pud_none(pud))
  134. return 0;
  135. if (!gup_pmd_range(pud, addr, next, write, pages, nr))
  136. return 0;
  137. } while (pudp++, addr = next, addr != end);
  138. return 1;
  139. }
  140. /*
  141. * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
  142. * back to the regular GUP.
  143. */
  144. int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
  145. struct page **pages)
  146. {
  147. struct mm_struct *mm = current->mm;
  148. unsigned long addr, len, end;
  149. unsigned long next;
  150. unsigned long flags;
  151. pgd_t *pgdp;
  152. int nr = 0;
  153. start &= PAGE_MASK;
  154. addr = start;
  155. len = (unsigned long) nr_pages << PAGE_SHIFT;
  156. end = start + len;
  157. if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
  158. (void __user *)start, len)))
  159. return 0;
  160. /*
  161. * This doesn't prevent pagetable teardown, but does prevent
  162. * the pagetables and pages from being freed.
  163. */
  164. local_irq_save(flags);
  165. pgdp = pgd_offset(mm, addr);
  166. do {
  167. pgd_t pgd = *pgdp;
  168. next = pgd_addr_end(addr, end);
  169. if (pgd_none(pgd))
  170. break;
  171. if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  172. break;
  173. } while (pgdp++, addr = next, addr != end);
  174. local_irq_restore(flags);
  175. return nr;
  176. }
  177. /**
  178. * get_user_pages_fast() - pin user pages in memory
  179. * @start: starting user address
  180. * @nr_pages: number of pages from start to pin
  181. * @write: whether pages will be written to
  182. * @pages: array that receives pointers to the pages pinned.
  183. * Should be at least nr_pages long.
  184. *
  185. * Attempt to pin user pages in memory without taking mm->mmap_sem.
  186. * If not successful, it will fall back to taking the lock and
  187. * calling get_user_pages().
  188. *
  189. * Returns number of pages pinned. This may be fewer than the number
  190. * requested. If nr_pages is 0 or negative, returns 0. If no pages
  191. * were pinned, returns -errno.
  192. */
  193. int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  194. struct page **pages)
  195. {
  196. struct mm_struct *mm = current->mm;
  197. unsigned long addr, len, end;
  198. unsigned long next;
  199. pgd_t *pgdp;
  200. int nr = 0;
  201. start &= PAGE_MASK;
  202. addr = start;
  203. len = (unsigned long) nr_pages << PAGE_SHIFT;
  204. end = start + len;
  205. if (end < start)
  206. goto slow_irqon;
  207. local_irq_disable();
  208. pgdp = pgd_offset(mm, addr);
  209. do {
  210. pgd_t pgd = *pgdp;
  211. next = pgd_addr_end(addr, end);
  212. if (pgd_none(pgd))
  213. goto slow;
  214. if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  215. goto slow;
  216. } while (pgdp++, addr = next, addr != end);
  217. local_irq_enable();
  218. VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
  219. return nr;
  220. {
  221. int ret;
  222. slow:
  223. local_irq_enable();
  224. slow_irqon:
  225. /* Try to get the remaining pages with get_user_pages */
  226. start += nr << PAGE_SHIFT;
  227. pages += nr;
  228. ret = get_user_pages_unlocked(current, mm, start,
  229. (end - start) >> PAGE_SHIFT, write, 0, pages);
  230. /* Have to be a bit careful with return values */
  231. if (nr > 0) {
  232. if (ret < 0)
  233. ret = nr;
  234. else
  235. ret += nr;
  236. }
  237. return ret;
  238. }
  239. }