hugetlbpage.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PARISC64 Huge TLB page support.
  4. *
  5. * This parisc implementation is heavily based on the SPARC and x86 code.
  6. *
  7. * Copyright (C) 2015 Helge Deller <deller@gmx.de>
  8. */
  9. #include <linux/fs.h>
  10. #include <linux/mm.h>
  11. #include <linux/sched/mm.h>
  12. #include <linux/hugetlb.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/sysctl.h>
  15. #include <asm/mman.h>
  16. #include <asm/pgalloc.h>
  17. #include <asm/tlb.h>
  18. #include <asm/tlbflush.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/mmu_context.h>
  21. unsigned long
  22. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  23. unsigned long len, unsigned long pgoff, unsigned long flags)
  24. {
  25. struct hstate *h = hstate_file(file);
  26. if (len & ~huge_page_mask(h))
  27. return -EINVAL;
  28. if (len > TASK_SIZE)
  29. return -ENOMEM;
  30. if (flags & MAP_FIXED)
  31. if (prepare_hugepage_range(file, addr, len))
  32. return -EINVAL;
  33. if (addr)
  34. addr = ALIGN(addr, huge_page_size(h));
  35. /* we need to make sure the colouring is OK */
  36. return arch_get_unmapped_area(file, addr, len, pgoff, flags);
  37. }
  38. pte_t *huge_pte_alloc(struct mm_struct *mm,
  39. unsigned long addr, unsigned long sz)
  40. {
  41. pgd_t *pgd;
  42. pud_t *pud;
  43. pmd_t *pmd;
  44. pte_t *pte = NULL;
  45. /* We must align the address, because our caller will run
  46. * set_huge_pte_at() on whatever we return, which writes out
  47. * all of the sub-ptes for the hugepage range. So we have
  48. * to give it the first such sub-pte.
  49. */
  50. addr &= HPAGE_MASK;
  51. pgd = pgd_offset(mm, addr);
  52. pud = pud_alloc(mm, pgd, addr);
  53. if (pud) {
  54. pmd = pmd_alloc(mm, pud, addr);
  55. if (pmd)
  56. pte = pte_alloc_map(mm, pmd, addr);
  57. }
  58. return pte;
  59. }
  60. pte_t *huge_pte_offset(struct mm_struct *mm,
  61. unsigned long addr, unsigned long sz)
  62. {
  63. pgd_t *pgd;
  64. pud_t *pud;
  65. pmd_t *pmd;
  66. pte_t *pte = NULL;
  67. addr &= HPAGE_MASK;
  68. pgd = pgd_offset(mm, addr);
  69. if (!pgd_none(*pgd)) {
  70. pud = pud_offset(pgd, addr);
  71. if (!pud_none(*pud)) {
  72. pmd = pmd_offset(pud, addr);
  73. if (!pmd_none(*pmd))
  74. pte = pte_offset_map(pmd, addr);
  75. }
  76. }
  77. return pte;
  78. }
  79. /* Purge data and instruction TLB entries. Must be called holding
  80. * the pa_tlb_lock. The TLB purge instructions are slow on SMP
  81. * machines since the purge must be broadcast to all CPUs.
  82. */
  83. static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
  84. {
  85. int i;
  86. /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
  87. * Linux standard huge pages (e.g. 2 MB) */
  88. BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
  89. addr &= HPAGE_MASK;
  90. addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
  91. for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
  92. purge_tlb_entries(mm, addr);
  93. addr += (1UL << REAL_HPAGE_SHIFT);
  94. }
  95. }
  96. /* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
  97. static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  98. pte_t *ptep, pte_t entry)
  99. {
  100. unsigned long addr_start;
  101. int i;
  102. addr &= HPAGE_MASK;
  103. addr_start = addr;
  104. for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
  105. set_pte(ptep, entry);
  106. ptep++;
  107. addr += PAGE_SIZE;
  108. pte_val(entry) += PAGE_SIZE;
  109. }
  110. purge_tlb_entries_huge(mm, addr_start);
  111. }
  112. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  113. pte_t *ptep, pte_t entry)
  114. {
  115. unsigned long flags;
  116. purge_tlb_start(flags);
  117. __set_huge_pte_at(mm, addr, ptep, entry);
  118. purge_tlb_end(flags);
  119. }
  120. pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  121. pte_t *ptep)
  122. {
  123. unsigned long flags;
  124. pte_t entry;
  125. purge_tlb_start(flags);
  126. entry = *ptep;
  127. __set_huge_pte_at(mm, addr, ptep, __pte(0));
  128. purge_tlb_end(flags);
  129. return entry;
  130. }
  131. void huge_ptep_set_wrprotect(struct mm_struct *mm,
  132. unsigned long addr, pte_t *ptep)
  133. {
  134. unsigned long flags;
  135. pte_t old_pte;
  136. purge_tlb_start(flags);
  137. old_pte = *ptep;
  138. __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
  139. purge_tlb_end(flags);
  140. }
  141. int huge_ptep_set_access_flags(struct vm_area_struct *vma,
  142. unsigned long addr, pte_t *ptep,
  143. pte_t pte, int dirty)
  144. {
  145. unsigned long flags;
  146. int changed;
  147. purge_tlb_start(flags);
  148. changed = !pte_same(*ptep, pte);
  149. if (changed) {
  150. __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
  151. }
  152. purge_tlb_end(flags);
  153. return changed;
  154. }
  155. int pmd_huge(pmd_t pmd)
  156. {
  157. return 0;
  158. }
  159. int pud_huge(pud_t pud)
  160. {
  161. return 0;
  162. }