dump.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. /*
  2. * Debug helper to dump the current kernel pagetables of the system
  3. * so that we can see what the various memory ranges are set to.
  4. *
  5. * Derived from x86 implementation:
  6. * (C) Copyright 2008 Intel Corporation
  7. *
  8. * Author: Arjan van de Ven <arjan@linux.intel.com>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; version 2
  13. * of the License.
  14. */
  15. #include <linux/debugfs.h>
  16. #include <linux/fs.h>
  17. #include <linux/mm.h>
  18. #include <linux/seq_file.h>
  19. #include <asm/domain.h>
  20. #include <asm/fixmap.h>
  21. #include <asm/memory.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/ptdump.h>
  24. static struct addr_marker address_markers[] = {
  25. { MODULES_VADDR, "Modules" },
  26. { PAGE_OFFSET, "Kernel Mapping" },
  27. { 0, "vmalloc() Area" },
  28. { VMALLOC_END, "vmalloc() End" },
  29. { FIXADDR_START, "Fixmap Area" },
  30. { VECTORS_BASE, "Vectors" },
  31. { VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
  32. { -1, NULL },
  33. };
  34. #define pt_dump_seq_printf(m, fmt, args...) \
  35. ({ \
  36. if (m) \
  37. seq_printf(m, fmt, ##args); \
  38. })
  39. #define pt_dump_seq_puts(m, fmt) \
  40. ({ \
  41. if (m) \
  42. seq_printf(m, fmt); \
  43. })
  44. struct pg_state {
  45. struct seq_file *seq;
  46. const struct addr_marker *marker;
  47. unsigned long start_address;
  48. unsigned level;
  49. u64 current_prot;
  50. bool check_wx;
  51. unsigned long wx_pages;
  52. const char *current_domain;
  53. };
  54. struct prot_bits {
  55. u64 mask;
  56. u64 val;
  57. const char *set;
  58. const char *clear;
  59. bool ro_bit;
  60. bool nx_bit;
  61. };
  62. static const struct prot_bits pte_bits[] = {
  63. {
  64. .mask = L_PTE_USER,
  65. .val = L_PTE_USER,
  66. .set = "USR",
  67. .clear = " ",
  68. }, {
  69. .mask = L_PTE_RDONLY,
  70. .val = L_PTE_RDONLY,
  71. .set = "ro",
  72. .clear = "RW",
  73. .ro_bit = true,
  74. }, {
  75. .mask = L_PTE_XN,
  76. .val = L_PTE_XN,
  77. .set = "NX",
  78. .clear = "x ",
  79. .nx_bit = true,
  80. }, {
  81. .mask = L_PTE_SHARED,
  82. .val = L_PTE_SHARED,
  83. .set = "SHD",
  84. .clear = " ",
  85. }, {
  86. .mask = L_PTE_MT_MASK,
  87. .val = L_PTE_MT_UNCACHED,
  88. .set = "SO/UNCACHED",
  89. }, {
  90. .mask = L_PTE_MT_MASK,
  91. .val = L_PTE_MT_BUFFERABLE,
  92. .set = "MEM/BUFFERABLE/WC",
  93. }, {
  94. .mask = L_PTE_MT_MASK,
  95. .val = L_PTE_MT_WRITETHROUGH,
  96. .set = "MEM/CACHED/WT",
  97. }, {
  98. .mask = L_PTE_MT_MASK,
  99. .val = L_PTE_MT_WRITEBACK,
  100. .set = "MEM/CACHED/WBRA",
  101. #ifndef CONFIG_ARM_LPAE
  102. }, {
  103. .mask = L_PTE_MT_MASK,
  104. .val = L_PTE_MT_MINICACHE,
  105. .set = "MEM/MINICACHE",
  106. #endif
  107. }, {
  108. .mask = L_PTE_MT_MASK,
  109. .val = L_PTE_MT_WRITEALLOC,
  110. .set = "MEM/CACHED/WBWA",
  111. }, {
  112. .mask = L_PTE_MT_MASK,
  113. .val = L_PTE_MT_DEV_SHARED,
  114. .set = "DEV/SHARED",
  115. #ifndef CONFIG_ARM_LPAE
  116. }, {
  117. .mask = L_PTE_MT_MASK,
  118. .val = L_PTE_MT_DEV_NONSHARED,
  119. .set = "DEV/NONSHARED",
  120. #endif
  121. }, {
  122. .mask = L_PTE_MT_MASK,
  123. .val = L_PTE_MT_DEV_WC,
  124. .set = "DEV/WC",
  125. }, {
  126. .mask = L_PTE_MT_MASK,
  127. .val = L_PTE_MT_DEV_CACHED,
  128. .set = "DEV/CACHED",
  129. },
  130. };
  131. static const struct prot_bits section_bits[] = {
  132. #ifdef CONFIG_ARM_LPAE
  133. {
  134. .mask = PMD_SECT_USER,
  135. .val = PMD_SECT_USER,
  136. .set = "USR",
  137. }, {
  138. .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
  139. .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
  140. .set = "ro",
  141. .clear = "RW",
  142. .ro_bit = true,
  143. #elif __LINUX_ARM_ARCH__ >= 6
  144. {
  145. .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
  146. .val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
  147. .set = " ro",
  148. .ro_bit = true,
  149. }, {
  150. .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
  151. .val = PMD_SECT_AP_WRITE,
  152. .set = " RW",
  153. }, {
  154. .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
  155. .val = PMD_SECT_AP_READ,
  156. .set = "USR ro",
  157. }, {
  158. .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
  159. .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
  160. .set = "USR RW",
  161. #else /* ARMv4/ARMv5 */
  162. /* These are approximate */
  163. {
  164. .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
  165. .val = 0,
  166. .set = " ro",
  167. .ro_bit = true,
  168. }, {
  169. .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
  170. .val = PMD_SECT_AP_WRITE,
  171. .set = " RW",
  172. }, {
  173. .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
  174. .val = PMD_SECT_AP_READ,
  175. .set = "USR ro",
  176. }, {
  177. .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
  178. .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
  179. .set = "USR RW",
  180. #endif
  181. }, {
  182. .mask = PMD_SECT_XN,
  183. .val = PMD_SECT_XN,
  184. .set = "NX",
  185. .clear = "x ",
  186. .nx_bit = true,
  187. }, {
  188. .mask = PMD_SECT_S,
  189. .val = PMD_SECT_S,
  190. .set = "SHD",
  191. .clear = " ",
  192. },
  193. };
  194. struct pg_level {
  195. const struct prot_bits *bits;
  196. size_t num;
  197. u64 mask;
  198. const struct prot_bits *ro_bit;
  199. const struct prot_bits *nx_bit;
  200. };
  201. static struct pg_level pg_level[] = {
  202. {
  203. }, { /* pgd */
  204. }, { /* pud */
  205. }, { /* pmd */
  206. .bits = section_bits,
  207. .num = ARRAY_SIZE(section_bits),
  208. }, { /* pte */
  209. .bits = pte_bits,
  210. .num = ARRAY_SIZE(pte_bits),
  211. },
  212. };
  213. static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num)
  214. {
  215. unsigned i;
  216. for (i = 0; i < num; i++, bits++) {
  217. const char *s;
  218. if ((st->current_prot & bits->mask) == bits->val)
  219. s = bits->set;
  220. else
  221. s = bits->clear;
  222. if (s)
  223. pt_dump_seq_printf(st->seq, " %s", s);
  224. }
  225. }
  226. static void note_prot_wx(struct pg_state *st, unsigned long addr)
  227. {
  228. if (!st->check_wx)
  229. return;
  230. if ((st->current_prot & pg_level[st->level].ro_bit->mask) ==
  231. pg_level[st->level].ro_bit->val)
  232. return;
  233. if ((st->current_prot & pg_level[st->level].nx_bit->mask) ==
  234. pg_level[st->level].nx_bit->val)
  235. return;
  236. WARN_ONCE(1, "arm/mm: Found insecure W+X mapping at address %pS\n",
  237. (void *)st->start_address);
  238. st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
  239. }
  240. static void note_page(struct pg_state *st, unsigned long addr,
  241. unsigned int level, u64 val, const char *domain)
  242. {
  243. static const char units[] = "KMGTPE";
  244. u64 prot = val & pg_level[level].mask;
  245. if (!st->level) {
  246. st->level = level;
  247. st->current_prot = prot;
  248. st->current_domain = domain;
  249. pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
  250. } else if (prot != st->current_prot || level != st->level ||
  251. domain != st->current_domain ||
  252. addr >= st->marker[1].start_address) {
  253. const char *unit = units;
  254. unsigned long delta;
  255. if (st->current_prot) {
  256. note_prot_wx(st, addr);
  257. pt_dump_seq_printf(st->seq, "0x%08lx-0x%08lx ",
  258. st->start_address, addr);
  259. delta = (addr - st->start_address) >> 10;
  260. while (!(delta & 1023) && unit[1]) {
  261. delta >>= 10;
  262. unit++;
  263. }
  264. pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
  265. if (st->current_domain)
  266. pt_dump_seq_printf(st->seq, " %s",
  267. st->current_domain);
  268. if (pg_level[st->level].bits)
  269. dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
  270. pt_dump_seq_printf(st->seq, "\n");
  271. }
  272. if (addr >= st->marker[1].start_address) {
  273. st->marker++;
  274. pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
  275. st->marker->name);
  276. }
  277. st->start_address = addr;
  278. st->current_prot = prot;
  279. st->current_domain = domain;
  280. st->level = level;
  281. }
  282. }
  283. static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start,
  284. const char *domain)
  285. {
  286. pte_t *pte = pte_offset_kernel(pmd, 0);
  287. unsigned long addr;
  288. unsigned i;
  289. for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
  290. addr = start + i * PAGE_SIZE;
  291. note_page(st, addr, 4, pte_val(*pte), domain);
  292. }
  293. }
  294. static const char *get_domain_name(pmd_t *pmd)
  295. {
  296. #ifndef CONFIG_ARM_LPAE
  297. switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) {
  298. case PMD_DOMAIN(DOMAIN_KERNEL):
  299. return "KERNEL ";
  300. case PMD_DOMAIN(DOMAIN_USER):
  301. return "USER ";
  302. case PMD_DOMAIN(DOMAIN_IO):
  303. return "IO ";
  304. case PMD_DOMAIN(DOMAIN_VECTORS):
  305. return "VECTORS";
  306. default:
  307. return "unknown";
  308. }
  309. #endif
  310. return NULL;
  311. }
  312. static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
  313. {
  314. pmd_t *pmd = pmd_offset(pud, 0);
  315. unsigned long addr;
  316. unsigned i;
  317. const char *domain;
  318. for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
  319. addr = start + i * PMD_SIZE;
  320. domain = get_domain_name(pmd);
  321. if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
  322. note_page(st, addr, 3, pmd_val(*pmd), domain);
  323. else
  324. walk_pte(st, pmd, addr, domain);
  325. if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
  326. addr += SECTION_SIZE;
  327. pmd++;
  328. domain = get_domain_name(pmd);
  329. note_page(st, addr, 3, pmd_val(*pmd), domain);
  330. }
  331. }
  332. }
  333. static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
  334. {
  335. pud_t *pud = pud_offset(pgd, 0);
  336. unsigned long addr;
  337. unsigned i;
  338. for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
  339. addr = start + i * PUD_SIZE;
  340. if (!pud_none(*pud)) {
  341. walk_pmd(st, pud, addr);
  342. } else {
  343. note_page(st, addr, 2, pud_val(*pud), NULL);
  344. }
  345. }
  346. }
  347. static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
  348. unsigned long start)
  349. {
  350. pgd_t *pgd = pgd_offset(mm, 0UL);
  351. unsigned i;
  352. unsigned long addr;
  353. for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
  354. addr = start + i * PGDIR_SIZE;
  355. if (!pgd_none(*pgd)) {
  356. walk_pud(st, pgd, addr);
  357. } else {
  358. note_page(st, addr, 1, pgd_val(*pgd), NULL);
  359. }
  360. }
  361. }
  362. void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
  363. {
  364. struct pg_state st = {
  365. .seq = m,
  366. .marker = info->markers,
  367. .check_wx = false,
  368. };
  369. walk_pgd(&st, info->mm, info->base_addr);
  370. note_page(&st, 0, 0, 0, NULL);
  371. }
  372. static void ptdump_initialize(void)
  373. {
  374. unsigned i, j;
  375. for (i = 0; i < ARRAY_SIZE(pg_level); i++)
  376. if (pg_level[i].bits)
  377. for (j = 0; j < pg_level[i].num; j++) {
  378. pg_level[i].mask |= pg_level[i].bits[j].mask;
  379. if (pg_level[i].bits[j].ro_bit)
  380. pg_level[i].ro_bit = &pg_level[i].bits[j];
  381. if (pg_level[i].bits[j].nx_bit)
  382. pg_level[i].nx_bit = &pg_level[i].bits[j];
  383. }
  384. address_markers[2].start_address = VMALLOC_START;
  385. }
  386. static struct ptdump_info kernel_ptdump_info = {
  387. .mm = &init_mm,
  388. .markers = address_markers,
  389. .base_addr = 0,
  390. };
  391. void ptdump_check_wx(void)
  392. {
  393. struct pg_state st = {
  394. .seq = NULL,
  395. .marker = (struct addr_marker[]) {
  396. { 0, NULL},
  397. { -1, NULL},
  398. },
  399. .check_wx = true,
  400. };
  401. walk_pgd(&st, &init_mm, 0);
  402. note_page(&st, 0, 0, 0, NULL);
  403. if (st.wx_pages)
  404. pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
  405. st.wx_pages);
  406. else
  407. pr_info("Checked W+X mappings: passed, no W+X pages found\n");
  408. }
  409. static int ptdump_init(void)
  410. {
  411. ptdump_initialize();
  412. return ptdump_debugfs_register(&kernel_ptdump_info,
  413. "kernel_page_tables");
  414. }
  415. __initcall(ptdump_init);