page_poison.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. #include <linux/kernel.h>
  2. #include <linux/string.h>
  3. #include <linux/mm.h>
  4. #include <linux/highmem.h>
  5. #include <linux/page_ext.h>
  6. #include <linux/poison.h>
  7. #include <linux/ratelimit.h>
  8. static bool __page_poisoning_enabled __read_mostly;
  9. static bool want_page_poisoning __read_mostly;
  10. static int early_page_poison_param(char *buf)
  11. {
  12. if (!buf)
  13. return -EINVAL;
  14. return strtobool(buf, &want_page_poisoning);
  15. }
  16. early_param("page_poison", early_page_poison_param);
  17. bool page_poisoning_enabled(void)
  18. {
  19. return __page_poisoning_enabled;
  20. }
  21. static bool need_page_poisoning(void)
  22. {
  23. return want_page_poisoning;
  24. }
  25. static void init_page_poisoning(void)
  26. {
  27. /*
  28. * page poisoning is debug page alloc for some arches. If either
  29. * of those options are enabled, enable poisoning
  30. */
  31. if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) {
  32. if (!want_page_poisoning && !debug_pagealloc_enabled())
  33. return;
  34. } else {
  35. if (!want_page_poisoning)
  36. return;
  37. }
  38. __page_poisoning_enabled = true;
  39. }
  40. struct page_ext_operations page_poisoning_ops = {
  41. .need = need_page_poisoning,
  42. .init = init_page_poisoning,
  43. };
  44. static inline void set_page_poison(struct page *page)
  45. {
  46. struct page_ext *page_ext;
  47. page_ext = lookup_page_ext(page);
  48. if (unlikely(!page_ext))
  49. return;
  50. __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
  51. }
  52. static inline void clear_page_poison(struct page *page)
  53. {
  54. struct page_ext *page_ext;
  55. page_ext = lookup_page_ext(page);
  56. if (unlikely(!page_ext))
  57. return;
  58. __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
  59. }
  60. bool page_is_poisoned(struct page *page)
  61. {
  62. struct page_ext *page_ext;
  63. page_ext = lookup_page_ext(page);
  64. if (unlikely(!page_ext))
  65. return false;
  66. return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
  67. }
  68. static void poison_page(struct page *page)
  69. {
  70. void *addr = kmap_atomic(page);
  71. set_page_poison(page);
  72. memset(addr, PAGE_POISON, PAGE_SIZE);
  73. kunmap_atomic(addr);
  74. }
  75. static void poison_pages(struct page *page, int n)
  76. {
  77. int i;
  78. for (i = 0; i < n; i++)
  79. poison_page(page + i);
  80. }
  81. static bool single_bit_flip(unsigned char a, unsigned char b)
  82. {
  83. unsigned char error = a ^ b;
  84. return error && !(error & (error - 1));
  85. }
  86. static void check_poison_mem(unsigned char *mem, size_t bytes)
  87. {
  88. static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
  89. unsigned char *start;
  90. unsigned char *end;
  91. if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
  92. return;
  93. start = memchr_inv(mem, PAGE_POISON, bytes);
  94. if (!start)
  95. return;
  96. for (end = mem + bytes - 1; end > start; end--) {
  97. if (*end != PAGE_POISON)
  98. break;
  99. }
  100. if (!__ratelimit(&ratelimit))
  101. return;
  102. else if (start == end && single_bit_flip(*start, PAGE_POISON))
  103. pr_err("pagealloc: single bit error\n");
  104. else
  105. pr_err("pagealloc: memory corruption\n");
  106. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
  107. end - start + 1, 1);
  108. dump_stack();
  109. }
  110. static void unpoison_page(struct page *page)
  111. {
  112. void *addr;
  113. if (!page_is_poisoned(page))
  114. return;
  115. addr = kmap_atomic(page);
  116. check_poison_mem(addr, PAGE_SIZE);
  117. clear_page_poison(page);
  118. kunmap_atomic(addr);
  119. }
  120. static void unpoison_pages(struct page *page, int n)
  121. {
  122. int i;
  123. for (i = 0; i < n; i++)
  124. unpoison_page(page + i);
  125. }
  126. void kernel_poison_pages(struct page *page, int numpages, int enable)
  127. {
  128. if (!page_poisoning_enabled())
  129. return;
  130. if (enable)
  131. unpoison_pages(page, numpages);
  132. else
  133. poison_pages(page, numpages);
  134. }
  135. #ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
  136. void __kernel_map_pages(struct page *page, int numpages, int enable)
  137. {
  138. /* This function does nothing, all work is done via poison pages */
  139. }
  140. #endif