kmemcheck.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. #include <linux/gfp.h>
  2. #include <linux/mm_types.h>
  3. #include <linux/mm.h>
  4. #include <linux/slab.h>
  5. #include "slab.h"
  6. #include <linux/kmemcheck.h>
  7. void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
  8. {
  9. struct page *shadow;
  10. int pages;
  11. int i;
  12. pages = 1 << order;
  13. /*
  14. * With kmemcheck enabled, we need to allocate a memory area for the
  15. * shadow bits as well.
  16. */
  17. shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
  18. if (!shadow) {
  19. if (printk_ratelimit())
  20. pr_err("kmemcheck: failed to allocate shadow bitmap\n");
  21. return;
  22. }
  23. for(i = 0; i < pages; ++i)
  24. page[i].shadow = page_address(&shadow[i]);
  25. /*
  26. * Mark it as non-present for the MMU so that our accesses to
  27. * this memory will trigger a page fault and let us analyze
  28. * the memory accesses.
  29. */
  30. kmemcheck_hide_pages(page, pages);
  31. }
  32. void kmemcheck_free_shadow(struct page *page, int order)
  33. {
  34. struct page *shadow;
  35. int pages;
  36. int i;
  37. if (!kmemcheck_page_is_tracked(page))
  38. return;
  39. pages = 1 << order;
  40. kmemcheck_show_pages(page, pages);
  41. shadow = virt_to_page(page[0].shadow);
  42. for(i = 0; i < pages; ++i)
  43. page[i].shadow = NULL;
  44. __free_pages(shadow, order);
  45. }
  46. void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  47. size_t size)
  48. {
  49. if (unlikely(!object)) /* Skip object if allocation failed */
  50. return;
  51. /*
  52. * Has already been memset(), which initializes the shadow for us
  53. * as well.
  54. */
  55. if (gfpflags & __GFP_ZERO)
  56. return;
  57. /* No need to initialize the shadow of a non-tracked slab. */
  58. if (s->flags & SLAB_NOTRACK)
  59. return;
  60. if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
  61. /*
  62. * Allow notracked objects to be allocated from
  63. * tracked caches. Note however that these objects
  64. * will still get page faults on access, they just
  65. * won't ever be flagged as uninitialized. If page
  66. * faults are not acceptable, the slab cache itself
  67. * should be marked NOTRACK.
  68. */
  69. kmemcheck_mark_initialized(object, size);
  70. } else if (!s->ctor) {
  71. /*
  72. * New objects should be marked uninitialized before
  73. * they're returned to the called.
  74. */
  75. kmemcheck_mark_uninitialized(object, size);
  76. }
  77. }
  78. void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
  79. {
  80. /* TODO: RCU freeing is unsupported for now; hide false positives. */
  81. if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
  82. kmemcheck_mark_freed(object, size);
  83. }
  84. void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
  85. gfp_t gfpflags)
  86. {
  87. int pages;
  88. if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
  89. return;
  90. pages = 1 << order;
  91. /*
  92. * NOTE: We choose to track GFP_ZERO pages too; in fact, they
  93. * can become uninitialized by copying uninitialized memory
  94. * into them.
  95. */
  96. /* XXX: Can use zone->node for node? */
  97. kmemcheck_alloc_shadow(page, order, gfpflags, -1);
  98. if (gfpflags & __GFP_ZERO)
  99. kmemcheck_mark_initialized_pages(page, pages);
  100. else
  101. kmemcheck_mark_uninitialized_pages(page, pages);
  102. }