page-states.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2008
  4. *
  5. * Guest page hinting for unused pages.
  6. *
  7. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/errno.h>
  11. #include <linux/types.h>
  12. #include <linux/mm.h>
  13. #include <linux/memblock.h>
  14. #include <linux/gfp.h>
  15. #include <linux/init.h>
  16. #include <asm/facility.h>
  17. #include <asm/page-states.h>
  18. static int cmma_flag = 1;
  19. static int __init cmma(char *str)
  20. {
  21. char *parm;
  22. parm = strstrip(str);
  23. if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
  24. cmma_flag = 1;
  25. return 1;
  26. }
  27. cmma_flag = 0;
  28. if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
  29. return 1;
  30. return 0;
  31. }
  32. __setup("cmma=", cmma);
  33. static inline int cmma_test_essa(void)
  34. {
  35. register unsigned long tmp asm("0") = 0;
  36. register int rc asm("1");
  37. /* test ESSA_GET_STATE */
  38. asm volatile(
  39. " .insn rrf,0xb9ab0000,%1,%1,%2,0\n"
  40. "0: la %0,0\n"
  41. "1:\n"
  42. EX_TABLE(0b,1b)
  43. : "=&d" (rc), "+&d" (tmp)
  44. : "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP));
  45. return rc;
  46. }
  47. void __init cmma_init(void)
  48. {
  49. if (!cmma_flag)
  50. return;
  51. if (cmma_test_essa()) {
  52. cmma_flag = 0;
  53. return;
  54. }
  55. if (test_facility(147))
  56. cmma_flag = 2;
  57. }
  58. static inline unsigned char get_page_state(struct page *page)
  59. {
  60. unsigned char state;
  61. asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0"
  62. : "=&d" (state)
  63. : "a" (page_to_phys(page)),
  64. "i" (ESSA_GET_STATE));
  65. return state & 0x3f;
  66. }
  67. static inline void set_page_unused(struct page *page, int order)
  68. {
  69. int i, rc;
  70. for (i = 0; i < (1 << order); i++)
  71. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  72. : "=&d" (rc)
  73. : "a" (page_to_phys(page + i)),
  74. "i" (ESSA_SET_UNUSED));
  75. }
  76. static inline void set_page_stable_dat(struct page *page, int order)
  77. {
  78. int i, rc;
  79. for (i = 0; i < (1 << order); i++)
  80. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  81. : "=&d" (rc)
  82. : "a" (page_to_phys(page + i)),
  83. "i" (ESSA_SET_STABLE));
  84. }
  85. static inline void set_page_stable_nodat(struct page *page, int order)
  86. {
  87. int i, rc;
  88. for (i = 0; i < (1 << order); i++)
  89. asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
  90. : "=&d" (rc)
  91. : "a" (page_to_phys(page + i)),
  92. "i" (ESSA_SET_STABLE_NODAT));
  93. }
  94. static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
  95. {
  96. unsigned long next;
  97. struct page *page;
  98. pmd_t *pmd;
  99. pmd = pmd_offset(pud, addr);
  100. do {
  101. next = pmd_addr_end(addr, end);
  102. if (pmd_none(*pmd) || pmd_large(*pmd))
  103. continue;
  104. page = virt_to_page(pmd_val(*pmd));
  105. set_bit(PG_arch_1, &page->flags);
  106. } while (pmd++, addr = next, addr != end);
  107. }
  108. static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
  109. {
  110. unsigned long next;
  111. struct page *page;
  112. pud_t *pud;
  113. int i;
  114. pud = pud_offset(p4d, addr);
  115. do {
  116. next = pud_addr_end(addr, end);
  117. if (pud_none(*pud) || pud_large(*pud))
  118. continue;
  119. if (!pud_folded(*pud)) {
  120. page = virt_to_page(pud_val(*pud));
  121. for (i = 0; i < 3; i++)
  122. set_bit(PG_arch_1, &page[i].flags);
  123. }
  124. mark_kernel_pmd(pud, addr, next);
  125. } while (pud++, addr = next, addr != end);
  126. }
  127. static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
  128. {
  129. unsigned long next;
  130. struct page *page;
  131. p4d_t *p4d;
  132. int i;
  133. p4d = p4d_offset(pgd, addr);
  134. do {
  135. next = p4d_addr_end(addr, end);
  136. if (p4d_none(*p4d))
  137. continue;
  138. if (!p4d_folded(*p4d)) {
  139. page = virt_to_page(p4d_val(*p4d));
  140. for (i = 0; i < 3; i++)
  141. set_bit(PG_arch_1, &page[i].flags);
  142. }
  143. mark_kernel_pud(p4d, addr, next);
  144. } while (p4d++, addr = next, addr != end);
  145. }
  146. static void mark_kernel_pgd(void)
  147. {
  148. unsigned long addr, next;
  149. struct page *page;
  150. pgd_t *pgd;
  151. int i;
  152. addr = 0;
  153. pgd = pgd_offset_k(addr);
  154. do {
  155. next = pgd_addr_end(addr, MODULES_END);
  156. if (pgd_none(*pgd))
  157. continue;
  158. if (!pgd_folded(*pgd)) {
  159. page = virt_to_page(pgd_val(*pgd));
  160. for (i = 0; i < 3; i++)
  161. set_bit(PG_arch_1, &page[i].flags);
  162. }
  163. mark_kernel_p4d(pgd, addr, next);
  164. } while (pgd++, addr = next, addr != MODULES_END);
  165. }
  166. void __init cmma_init_nodat(void)
  167. {
  168. struct memblock_region *reg;
  169. struct page *page;
  170. unsigned long start, end, ix;
  171. if (cmma_flag < 2)
  172. return;
  173. /* Mark pages used in kernel page tables */
  174. mark_kernel_pgd();
  175. /* Set all kernel pages not used for page tables to stable/no-dat */
  176. for_each_memblock(memory, reg) {
  177. start = memblock_region_memory_base_pfn(reg);
  178. end = memblock_region_memory_end_pfn(reg);
  179. page = pfn_to_page(start);
  180. for (ix = start; ix < end; ix++, page++) {
  181. if (__test_and_clear_bit(PG_arch_1, &page->flags))
  182. continue; /* skip page table pages */
  183. if (!list_empty(&page->lru))
  184. continue; /* skip free pages */
  185. set_page_stable_nodat(page, 0);
  186. }
  187. }
  188. }
  189. void arch_free_page(struct page *page, int order)
  190. {
  191. if (!cmma_flag)
  192. return;
  193. set_page_unused(page, order);
  194. }
  195. void arch_alloc_page(struct page *page, int order)
  196. {
  197. if (!cmma_flag)
  198. return;
  199. if (cmma_flag < 2)
  200. set_page_stable_dat(page, order);
  201. else
  202. set_page_stable_nodat(page, order);
  203. }
  204. void arch_set_page_dat(struct page *page, int order)
  205. {
  206. if (!cmma_flag)
  207. return;
  208. set_page_stable_dat(page, order);
  209. }
  210. void arch_set_page_nodat(struct page *page, int order)
  211. {
  212. if (cmma_flag < 2)
  213. return;
  214. set_page_stable_nodat(page, order);
  215. }
  216. int arch_test_page_nodat(struct page *page)
  217. {
  218. unsigned char state;
  219. if (cmma_flag < 2)
  220. return 0;
  221. state = get_page_state(page);
  222. return !!(state & 0x20);
  223. }
  224. void arch_set_page_states(int make_stable)
  225. {
  226. unsigned long flags, order, t;
  227. struct list_head *l;
  228. struct page *page;
  229. struct zone *zone;
  230. if (!cmma_flag)
  231. return;
  232. if (make_stable)
  233. drain_local_pages(NULL);
  234. for_each_populated_zone(zone) {
  235. spin_lock_irqsave(&zone->lock, flags);
  236. for_each_migratetype_order(order, t) {
  237. list_for_each(l, &zone->free_area[order].free_list[t]) {
  238. page = list_entry(l, struct page, lru);
  239. if (make_stable)
  240. set_page_stable_dat(page, order);
  241. else
  242. set_page_unused(page, order);
  243. }
  244. }
  245. spin_unlock_irqrestore(&zone->lock, flags);
  246. }
  247. }