dump_pagetables.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/seq_file.h>
  3. #include <linux/debugfs.h>
  4. #include <linux/sched.h>
  5. #include <linux/mm.h>
  6. #include <asm/sections.h>
  7. #include <asm/pgtable.h>
  8. static unsigned long max_addr;
  9. struct addr_marker {
  10. unsigned long start_address;
  11. const char *name;
  12. };
  13. enum address_markers_idx {
  14. IDENTITY_NR = 0,
  15. KERNEL_START_NR,
  16. KERNEL_END_NR,
  17. VMEMMAP_NR,
  18. VMALLOC_NR,
  19. MODULES_NR,
  20. };
  21. static struct addr_marker address_markers[] = {
  22. [IDENTITY_NR] = {0, "Identity Mapping"},
  23. [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
  24. [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
  25. [VMEMMAP_NR] = {0, "vmemmap Area"},
  26. [VMALLOC_NR] = {0, "vmalloc Area"},
  27. [MODULES_NR] = {0, "Modules Area"},
  28. { -1, NULL }
  29. };
  30. struct pg_state {
  31. int level;
  32. unsigned int current_prot;
  33. unsigned long start_address;
  34. unsigned long current_address;
  35. const struct addr_marker *marker;
  36. };
  37. static void print_prot(struct seq_file *m, unsigned int pr, int level)
  38. {
  39. static const char * const level_name[] =
  40. { "ASCE", "PGD", "PUD", "PMD", "PTE" };
  41. seq_printf(m, "%s ", level_name[level]);
  42. if (pr & _PAGE_INVALID) {
  43. seq_printf(m, "I\n");
  44. return;
  45. }
  46. seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW ");
  47. seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n");
  48. }
  49. static void note_page(struct seq_file *m, struct pg_state *st,
  50. unsigned int new_prot, int level)
  51. {
  52. static const char units[] = "KMGTPE";
  53. int width = sizeof(unsigned long) * 2;
  54. const char *unit = units;
  55. unsigned int prot, cur;
  56. unsigned long delta;
  57. /*
  58. * If we have a "break" in the series, we need to flush the state
  59. * that we have now. "break" is either changing perms, levels or
  60. * address space marker.
  61. */
  62. prot = new_prot;
  63. cur = st->current_prot;
  64. if (!st->level) {
  65. /* First entry */
  66. st->current_prot = new_prot;
  67. st->level = level;
  68. st->marker = address_markers;
  69. seq_printf(m, "---[ %s ]---\n", st->marker->name);
  70. } else if (prot != cur || level != st->level ||
  71. st->current_address >= st->marker[1].start_address) {
  72. /* Print the actual finished series */
  73. seq_printf(m, "0x%0*lx-0x%0*lx",
  74. width, st->start_address,
  75. width, st->current_address);
  76. delta = (st->current_address - st->start_address) >> 10;
  77. while (!(delta & 0x3ff) && unit[1]) {
  78. delta >>= 10;
  79. unit++;
  80. }
  81. seq_printf(m, "%9lu%c ", delta, *unit);
  82. print_prot(m, st->current_prot, st->level);
  83. if (st->current_address >= st->marker[1].start_address) {
  84. st->marker++;
  85. seq_printf(m, "---[ %s ]---\n", st->marker->name);
  86. }
  87. st->start_address = st->current_address;
  88. st->current_prot = new_prot;
  89. st->level = level;
  90. }
  91. }
  92. /*
  93. * The actual page table walker functions. In order to keep the
  94. * implementation of print_prot() short, we only check and pass
  95. * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
  96. * segment or page table entry is invalid or read-only.
  97. * After all it's just a hint that the current level being walked
  98. * contains an invalid or read-only entry.
  99. */
  100. static void walk_pte_level(struct seq_file *m, struct pg_state *st,
  101. pmd_t *pmd, unsigned long addr)
  102. {
  103. unsigned int prot;
  104. pte_t *pte;
  105. int i;
  106. for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
  107. st->current_address = addr;
  108. pte = pte_offset_kernel(pmd, addr);
  109. prot = pte_val(*pte) &
  110. (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
  111. note_page(m, st, prot, 4);
  112. addr += PAGE_SIZE;
  113. }
  114. }
  115. static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
  116. pud_t *pud, unsigned long addr)
  117. {
  118. unsigned int prot;
  119. pmd_t *pmd;
  120. int i;
  121. for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
  122. st->current_address = addr;
  123. pmd = pmd_offset(pud, addr);
  124. if (!pmd_none(*pmd)) {
  125. if (pmd_large(*pmd)) {
  126. prot = pmd_val(*pmd) &
  127. (_SEGMENT_ENTRY_PROTECT |
  128. _SEGMENT_ENTRY_NOEXEC);
  129. note_page(m, st, prot, 3);
  130. } else
  131. walk_pte_level(m, st, pmd, addr);
  132. } else
  133. note_page(m, st, _PAGE_INVALID, 3);
  134. addr += PMD_SIZE;
  135. }
  136. }
  137. static void walk_pud_level(struct seq_file *m, struct pg_state *st,
  138. p4d_t *p4d, unsigned long addr)
  139. {
  140. unsigned int prot;
  141. pud_t *pud;
  142. int i;
  143. for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
  144. st->current_address = addr;
  145. pud = pud_offset(p4d, addr);
  146. if (!pud_none(*pud))
  147. if (pud_large(*pud)) {
  148. prot = pud_val(*pud) &
  149. (_REGION_ENTRY_PROTECT |
  150. _REGION_ENTRY_NOEXEC);
  151. note_page(m, st, prot, 2);
  152. } else
  153. walk_pmd_level(m, st, pud, addr);
  154. else
  155. note_page(m, st, _PAGE_INVALID, 2);
  156. addr += PUD_SIZE;
  157. }
  158. }
  159. static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
  160. pgd_t *pgd, unsigned long addr)
  161. {
  162. p4d_t *p4d;
  163. int i;
  164. for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
  165. st->current_address = addr;
  166. p4d = p4d_offset(pgd, addr);
  167. if (!p4d_none(*p4d))
  168. walk_pud_level(m, st, p4d, addr);
  169. else
  170. note_page(m, st, _PAGE_INVALID, 2);
  171. addr += P4D_SIZE;
  172. }
  173. }
  174. static void walk_pgd_level(struct seq_file *m)
  175. {
  176. unsigned long addr = 0;
  177. struct pg_state st;
  178. pgd_t *pgd;
  179. int i;
  180. memset(&st, 0, sizeof(st));
  181. for (i = 0; i < PTRS_PER_PGD && addr < max_addr; i++) {
  182. st.current_address = addr;
  183. pgd = pgd_offset_k(addr);
  184. if (!pgd_none(*pgd))
  185. walk_p4d_level(m, &st, pgd, addr);
  186. else
  187. note_page(m, &st, _PAGE_INVALID, 1);
  188. addr += PGDIR_SIZE;
  189. cond_resched();
  190. }
  191. /* Flush out the last page */
  192. st.current_address = max_addr;
  193. note_page(m, &st, 0, 0);
  194. }
  195. static int ptdump_show(struct seq_file *m, void *v)
  196. {
  197. walk_pgd_level(m);
  198. return 0;
  199. }
  200. static int ptdump_open(struct inode *inode, struct file *filp)
  201. {
  202. return single_open(filp, ptdump_show, NULL);
  203. }
  204. static const struct file_operations ptdump_fops = {
  205. .open = ptdump_open,
  206. .read = seq_read,
  207. .llseek = seq_lseek,
  208. .release = single_release,
  209. };
  210. static int pt_dump_init(void)
  211. {
  212. /*
  213. * Figure out the maximum virtual address being accessible with the
  214. * kernel ASCE. We need this to keep the page table walker functions
  215. * from accessing non-existent entries.
  216. */
  217. max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
  218. max_addr = 1UL << (max_addr * 11 + 31);
  219. address_markers[MODULES_NR].start_address = MODULES_VADDR;
  220. address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
  221. address_markers[VMALLOC_NR].start_address = VMALLOC_START;
  222. debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
  223. return 0;
  224. }
  225. device_initcall(pt_dump_init);