pageattr.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. /*
  2. * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/mm.h>
  15. #include <linux/module.h>
  16. #include <linux/sched.h>
  17. #include <linux/vmalloc.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/tlbflush.h>
  20. struct page_change_data {
  21. pgprot_t set_mask;
  22. pgprot_t clear_mask;
  23. };
  24. static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
  25. void *data)
  26. {
  27. struct page_change_data *cdata = data;
  28. pte_t pte = *ptep;
  29. pte = clear_pte_bit(pte, cdata->clear_mask);
  30. pte = set_pte_bit(pte, cdata->set_mask);
  31. set_pte(ptep, pte);
  32. return 0;
  33. }
  34. /*
  35. * This function assumes that the range is mapped with PAGE_SIZE pages.
  36. */
  37. static int __change_memory_common(unsigned long start, unsigned long size,
  38. pgprot_t set_mask, pgprot_t clear_mask)
  39. {
  40. struct page_change_data data;
  41. int ret;
  42. data.set_mask = set_mask;
  43. data.clear_mask = clear_mask;
  44. ret = apply_to_page_range(&init_mm, start, size, change_page_range,
  45. &data);
  46. flush_tlb_kernel_range(start, start + size);
  47. return ret;
  48. }
  49. static int change_memory_common(unsigned long addr, int numpages,
  50. pgprot_t set_mask, pgprot_t clear_mask)
  51. {
  52. unsigned long start = addr;
  53. unsigned long size = PAGE_SIZE*numpages;
  54. unsigned long end = start + size;
  55. struct vm_struct *area;
  56. if (!PAGE_ALIGNED(addr)) {
  57. start &= PAGE_MASK;
  58. end = start + size;
  59. WARN_ON_ONCE(1);
  60. }
  61. /*
  62. * Kernel VA mappings are always live, and splitting live section
  63. * mappings into page mappings may cause TLB conflicts. This means
  64. * we have to ensure that changing the permission bits of the range
  65. * we are operating on does not result in such splitting.
  66. *
  67. * Let's restrict ourselves to mappings created by vmalloc (or vmap).
  68. * Those are guaranteed to consist entirely of page mappings, and
  69. * splitting is never needed.
  70. *
  71. * So check whether the [addr, addr + size) interval is entirely
  72. * covered by precisely one VM area that has the VM_ALLOC flag set.
  73. */
  74. area = find_vm_area((void *)addr);
  75. if (!area ||
  76. end > (unsigned long)area->addr + area->size ||
  77. !(area->flags & VM_ALLOC))
  78. return -EINVAL;
  79. if (!numpages)
  80. return 0;
  81. return __change_memory_common(start, size, set_mask, clear_mask);
  82. }
  83. int set_memory_ro(unsigned long addr, int numpages)
  84. {
  85. return change_memory_common(addr, numpages,
  86. __pgprot(PTE_RDONLY),
  87. __pgprot(PTE_WRITE));
  88. }
  89. int set_memory_rw(unsigned long addr, int numpages)
  90. {
  91. return change_memory_common(addr, numpages,
  92. __pgprot(PTE_WRITE),
  93. __pgprot(PTE_RDONLY));
  94. }
  95. int set_memory_nx(unsigned long addr, int numpages)
  96. {
  97. return change_memory_common(addr, numpages,
  98. __pgprot(PTE_PXN),
  99. __pgprot(0));
  100. }
  101. EXPORT_SYMBOL_GPL(set_memory_nx);
  102. int set_memory_x(unsigned long addr, int numpages)
  103. {
  104. return change_memory_common(addr, numpages,
  105. __pgprot(0),
  106. __pgprot(PTE_PXN));
  107. }
  108. EXPORT_SYMBOL_GPL(set_memory_x);
  109. #ifdef CONFIG_DEBUG_PAGEALLOC
  110. void __kernel_map_pages(struct page *page, int numpages, int enable)
  111. {
  112. unsigned long addr = (unsigned long) page_address(page);
  113. if (enable)
  114. __change_memory_common(addr, PAGE_SIZE * numpages,
  115. __pgprot(PTE_VALID),
  116. __pgprot(0));
  117. else
  118. __change_memory_common(addr, PAGE_SIZE * numpages,
  119. __pgprot(0),
  120. __pgprot(PTE_VALID));
  121. }
  122. #ifdef CONFIG_HIBERNATION
  123. /*
  124. * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
  125. * is used to determine if a linear map page has been marked as not-valid by
  126. * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
  127. * This is based on kern_addr_valid(), which almost does what we need.
  128. *
  129. * Because this is only called on the kernel linear map, p?d_sect() implies
  130. * p?d_present(). When debug_pagealloc is enabled, sections mappings are
  131. * disabled.
  132. */
  133. bool kernel_page_present(struct page *page)
  134. {
  135. pgd_t *pgd;
  136. pud_t *pud;
  137. pmd_t *pmd;
  138. pte_t *pte;
  139. unsigned long addr = (unsigned long)page_address(page);
  140. pgd = pgd_offset_k(addr);
  141. if (pgd_none(*pgd))
  142. return false;
  143. pud = pud_offset(pgd, addr);
  144. if (pud_none(*pud))
  145. return false;
  146. if (pud_sect(*pud))
  147. return true;
  148. pmd = pmd_offset(pud, addr);
  149. if (pmd_none(*pmd))
  150. return false;
  151. if (pmd_sect(*pmd))
  152. return true;
  153. pte = pte_offset_kernel(pmd, addr);
  154. return pte_valid(*pte);
  155. }
  156. #endif /* CONFIG_HIBERNATION */
  157. #endif /* CONFIG_DEBUG_PAGEALLOC */