hugetlbpage.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * IA-64 Huge TLB Page Support for Kernel.
  4. *
  5. * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
  6. * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
  7. *
  8. * Sep, 2003: add numa support
  9. * Feb, 2004: dynamic hugetlb page size via boot parameter
  10. */
  11. #include <linux/init.h>
  12. #include <linux/fs.h>
  13. #include <linux/mm.h>
  14. #include <linux/hugetlb.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/module.h>
  17. #include <linux/sysctl.h>
  18. #include <linux/log2.h>
  19. #include <asm/mman.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/tlb.h>
  22. #include <asm/tlbflush.h>
  23. unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
  24. EXPORT_SYMBOL(hpage_shift);
  25. pte_t *
  26. huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
  27. {
  28. unsigned long taddr = htlbpage_to_page(addr);
  29. pgd_t *pgd;
  30. pud_t *pud;
  31. pmd_t *pmd;
  32. pte_t *pte = NULL;
  33. pgd = pgd_offset(mm, taddr);
  34. pud = pud_alloc(mm, pgd, taddr);
  35. if (pud) {
  36. pmd = pmd_alloc(mm, pud, taddr);
  37. if (pmd)
  38. pte = pte_alloc_map(mm, pmd, taddr);
  39. }
  40. return pte;
  41. }
  42. pte_t *
  43. huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
  44. {
  45. unsigned long taddr = htlbpage_to_page(addr);
  46. pgd_t *pgd;
  47. pud_t *pud;
  48. pmd_t *pmd;
  49. pte_t *pte = NULL;
  50. pgd = pgd_offset(mm, taddr);
  51. if (pgd_present(*pgd)) {
  52. pud = pud_offset(pgd, taddr);
  53. if (pud_present(*pud)) {
  54. pmd = pmd_offset(pud, taddr);
  55. if (pmd_present(*pmd))
  56. pte = pte_offset_map(pmd, taddr);
  57. }
  58. }
  59. return pte;
  60. }
  61. #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
  62. /*
  63. * Don't actually need to do any preparation, but need to make sure
  64. * the address is in the right region.
  65. */
  66. int prepare_hugepage_range(struct file *file,
  67. unsigned long addr, unsigned long len)
  68. {
  69. if (len & ~HPAGE_MASK)
  70. return -EINVAL;
  71. if (addr & ~HPAGE_MASK)
  72. return -EINVAL;
  73. if (REGION_NUMBER(addr) != RGN_HPAGE)
  74. return -EINVAL;
  75. return 0;
  76. }
  77. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
  78. {
  79. struct page *page;
  80. pte_t *ptep;
  81. if (REGION_NUMBER(addr) != RGN_HPAGE)
  82. return ERR_PTR(-EINVAL);
  83. ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
  84. if (!ptep || pte_none(*ptep))
  85. return NULL;
  86. page = pte_page(*ptep);
  87. page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
  88. return page;
  89. }
  90. int pmd_huge(pmd_t pmd)
  91. {
  92. return 0;
  93. }
  94. int pud_huge(pud_t pud)
  95. {
  96. return 0;
  97. }
  98. void hugetlb_free_pgd_range(struct mmu_gather *tlb,
  99. unsigned long addr, unsigned long end,
  100. unsigned long floor, unsigned long ceiling)
  101. {
  102. /*
  103. * This is called to free hugetlb page tables.
  104. *
  105. * The offset of these addresses from the base of the hugetlb
  106. * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
  107. * the standard free_pgd_range will free the right page tables.
  108. *
  109. * If floor and ceiling are also in the hugetlb region, they
  110. * must likewise be scaled down; but if outside, left unchanged.
  111. */
  112. addr = htlbpage_to_page(addr);
  113. end = htlbpage_to_page(end);
  114. if (REGION_NUMBER(floor) == RGN_HPAGE)
  115. floor = htlbpage_to_page(floor);
  116. if (REGION_NUMBER(ceiling) == RGN_HPAGE)
  117. ceiling = htlbpage_to_page(ceiling);
  118. free_pgd_range(tlb, addr, end, floor, ceiling);
  119. }
  120. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
  121. unsigned long pgoff, unsigned long flags)
  122. {
  123. struct vm_unmapped_area_info info;
  124. if (len > RGN_MAP_LIMIT)
  125. return -ENOMEM;
  126. if (len & ~HPAGE_MASK)
  127. return -EINVAL;
  128. /* Handle MAP_FIXED */
  129. if (flags & MAP_FIXED) {
  130. if (prepare_hugepage_range(file, addr, len))
  131. return -EINVAL;
  132. return addr;
  133. }
  134. /* This code assumes that RGN_HPAGE != 0. */
  135. if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
  136. addr = HPAGE_REGION_BASE;
  137. info.flags = 0;
  138. info.length = len;
  139. info.low_limit = addr;
  140. info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
  141. info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
  142. info.align_offset = 0;
  143. return vm_unmapped_area(&info);
  144. }
  145. static int __init hugetlb_setup_sz(char *str)
  146. {
  147. u64 tr_pages;
  148. unsigned long long size;
  149. if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
  150. /*
  151. * shouldn't happen, but just in case.
  152. */
  153. tr_pages = 0x15557000UL;
  154. size = memparse(str, &str);
  155. if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
  156. size <= PAGE_SIZE ||
  157. size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
  158. printk(KERN_WARNING "Invalid huge page size specified\n");
  159. return 1;
  160. }
  161. hpage_shift = __ffs(size);
  162. /*
  163. * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
  164. * override here with new page shift.
  165. */
  166. ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
  167. return 0;
  168. }
  169. early_param("hugepagesz", hugetlb_setup_sz);