pageattr.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /*
  2. * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/mm.h>
  15. #include <linux/module.h>
  16. #include <linux/sched.h>
  17. #include <linux/vmalloc.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/set_memory.h>
  20. #include <asm/tlbflush.h>
  21. struct page_change_data {
  22. pgprot_t set_mask;
  23. pgprot_t clear_mask;
  24. };
  25. static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
  26. void *data)
  27. {
  28. struct page_change_data *cdata = data;
  29. pte_t pte = READ_ONCE(*ptep);
  30. pte = clear_pte_bit(pte, cdata->clear_mask);
  31. pte = set_pte_bit(pte, cdata->set_mask);
  32. set_pte(ptep, pte);
  33. return 0;
  34. }
  35. /*
  36. * This function assumes that the range is mapped with PAGE_SIZE pages.
  37. */
  38. static int __change_memory_common(unsigned long start, unsigned long size,
  39. pgprot_t set_mask, pgprot_t clear_mask)
  40. {
  41. struct page_change_data data;
  42. int ret;
  43. data.set_mask = set_mask;
  44. data.clear_mask = clear_mask;
  45. ret = apply_to_page_range(&init_mm, start, size, change_page_range,
  46. &data);
  47. flush_tlb_kernel_range(start, start + size);
  48. return ret;
  49. }
  50. static int change_memory_common(unsigned long addr, int numpages,
  51. pgprot_t set_mask, pgprot_t clear_mask)
  52. {
  53. unsigned long start = addr;
  54. unsigned long size = PAGE_SIZE*numpages;
  55. unsigned long end = start + size;
  56. struct vm_struct *area;
  57. if (!PAGE_ALIGNED(addr)) {
  58. start &= PAGE_MASK;
  59. end = start + size;
  60. WARN_ON_ONCE(1);
  61. }
  62. /*
  63. * Kernel VA mappings are always live, and splitting live section
  64. * mappings into page mappings may cause TLB conflicts. This means
  65. * we have to ensure that changing the permission bits of the range
  66. * we are operating on does not result in such splitting.
  67. *
  68. * Let's restrict ourselves to mappings created by vmalloc (or vmap).
  69. * Those are guaranteed to consist entirely of page mappings, and
  70. * splitting is never needed.
  71. *
  72. * So check whether the [addr, addr + size) interval is entirely
  73. * covered by precisely one VM area that has the VM_ALLOC flag set.
  74. */
  75. area = find_vm_area((void *)addr);
  76. if (!area ||
  77. end > (unsigned long)area->addr + area->size ||
  78. !(area->flags & VM_ALLOC))
  79. return -EINVAL;
  80. if (!numpages)
  81. return 0;
  82. return __change_memory_common(start, size, set_mask, clear_mask);
  83. }
  84. int set_memory_ro(unsigned long addr, int numpages)
  85. {
  86. return change_memory_common(addr, numpages,
  87. __pgprot(PTE_RDONLY),
  88. __pgprot(PTE_WRITE));
  89. }
  90. int set_memory_rw(unsigned long addr, int numpages)
  91. {
  92. return change_memory_common(addr, numpages,
  93. __pgprot(PTE_WRITE),
  94. __pgprot(PTE_RDONLY));
  95. }
  96. int set_memory_nx(unsigned long addr, int numpages)
  97. {
  98. return change_memory_common(addr, numpages,
  99. __pgprot(PTE_PXN),
  100. __pgprot(0));
  101. }
  102. EXPORT_SYMBOL_GPL(set_memory_nx);
  103. int set_memory_x(unsigned long addr, int numpages)
  104. {
  105. return change_memory_common(addr, numpages,
  106. __pgprot(0),
  107. __pgprot(PTE_PXN));
  108. }
  109. EXPORT_SYMBOL_GPL(set_memory_x);
  110. int set_memory_valid(unsigned long addr, int numpages, int enable)
  111. {
  112. if (enable)
  113. return __change_memory_common(addr, PAGE_SIZE * numpages,
  114. __pgprot(PTE_VALID),
  115. __pgprot(0));
  116. else
  117. return __change_memory_common(addr, PAGE_SIZE * numpages,
  118. __pgprot(0),
  119. __pgprot(PTE_VALID));
  120. }
  121. #ifdef CONFIG_DEBUG_PAGEALLOC
  122. void __kernel_map_pages(struct page *page, int numpages, int enable)
  123. {
  124. set_memory_valid((unsigned long)page_address(page), numpages, enable);
  125. }
  126. #ifdef CONFIG_HIBERNATION
  127. /*
  128. * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
  129. * is used to determine if a linear map page has been marked as not-valid by
  130. * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
  131. * This is based on kern_addr_valid(), which almost does what we need.
  132. *
  133. * Because this is only called on the kernel linear map, p?d_sect() implies
  134. * p?d_present(). When debug_pagealloc is enabled, sections mappings are
  135. * disabled.
  136. */
  137. bool kernel_page_present(struct page *page)
  138. {
  139. pgd_t *pgdp;
  140. pud_t *pudp, pud;
  141. pmd_t *pmdp, pmd;
  142. pte_t *ptep;
  143. unsigned long addr = (unsigned long)page_address(page);
  144. pgdp = pgd_offset_k(addr);
  145. if (pgd_none(READ_ONCE(*pgdp)))
  146. return false;
  147. pudp = pud_offset(pgdp, addr);
  148. pud = READ_ONCE(*pudp);
  149. if (pud_none(pud))
  150. return false;
  151. if (pud_sect(pud))
  152. return true;
  153. pmdp = pmd_offset(pudp, addr);
  154. pmd = READ_ONCE(*pmdp);
  155. if (pmd_none(pmd))
  156. return false;
  157. if (pmd_sect(pmd))
  158. return true;
  159. ptep = pte_offset_kernel(pmdp, addr);
  160. return pte_valid(READ_ONCE(*ptep));
  161. }
  162. #endif /* CONFIG_HIBERNATION */
  163. #endif /* CONFIG_DEBUG_PAGEALLOC */