debug-pagealloc.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. #include <linux/kernel.h>
  2. #include <linux/mm.h>
  3. #include <linux/page-debug-flags.h>
  4. #include <linux/poison.h>
  5. static inline void set_page_poison(struct page *page)
  6. {
  7. __set_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
  8. }
  9. static inline void clear_page_poison(struct page *page)
  10. {
  11. __clear_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
  12. }
  13. static inline bool page_poison(struct page *page)
  14. {
  15. return test_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
  16. }
  17. static void poison_highpage(struct page *page)
  18. {
  19. /*
  20. * Page poisoning for highmem pages is not implemented.
  21. *
  22. * This can be called from interrupt contexts.
  23. * So we need to create a new kmap_atomic slot for this
  24. * application and it will need interrupt protection.
  25. */
  26. }
  27. static void poison_page(struct page *page)
  28. {
  29. void *addr;
  30. if (PageHighMem(page)) {
  31. poison_highpage(page);
  32. return;
  33. }
  34. set_page_poison(page);
  35. addr = page_address(page);
  36. memset(addr, PAGE_POISON, PAGE_SIZE);
  37. }
  38. static void poison_pages(struct page *page, int n)
  39. {
  40. int i;
  41. for (i = 0; i < n; i++)
  42. poison_page(page + i);
  43. }
  44. static bool single_bit_flip(unsigned char a, unsigned char b)
  45. {
  46. unsigned char error = a ^ b;
  47. return error && !(error & (error - 1));
  48. }
  49. static void check_poison_mem(unsigned char *mem, size_t bytes)
  50. {
  51. unsigned char *start;
  52. unsigned char *end;
  53. for (start = mem; start < mem + bytes; start++) {
  54. if (*start != PAGE_POISON)
  55. break;
  56. }
  57. if (start == mem + bytes)
  58. return;
  59. for (end = mem + bytes - 1; end > start; end--) {
  60. if (*end != PAGE_POISON)
  61. break;
  62. }
  63. if (!printk_ratelimit())
  64. return;
  65. else if (start == end && single_bit_flip(*start, PAGE_POISON))
  66. printk(KERN_ERR "pagealloc: single bit error\n");
  67. else
  68. printk(KERN_ERR "pagealloc: memory corruption\n");
  69. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
  70. end - start + 1, 1);
  71. dump_stack();
  72. }
  73. static void unpoison_highpage(struct page *page)
  74. {
  75. /*
  76. * See comment in poison_highpage().
  77. * Highmem pages should not be poisoned for now
  78. */
  79. BUG_ON(page_poison(page));
  80. }
  81. static void unpoison_page(struct page *page)
  82. {
  83. if (PageHighMem(page)) {
  84. unpoison_highpage(page);
  85. return;
  86. }
  87. if (page_poison(page)) {
  88. void *addr = page_address(page);
  89. check_poison_mem(addr, PAGE_SIZE);
  90. clear_page_poison(page);
  91. }
  92. }
  93. static void unpoison_pages(struct page *page, int n)
  94. {
  95. int i;
  96. for (i = 0; i < n; i++)
  97. unpoison_page(page + i);
  98. }
  99. void kernel_map_pages(struct page *page, int numpages, int enable)
  100. {
  101. if (!debug_pagealloc_enabled)
  102. return;
  103. if (enable)
  104. unpoison_pages(page, numpages);
  105. else
  106. poison_pages(page, numpages);
  107. }