lsan_common_linux.cc 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. //=-- lsan_common_linux.cc ------------------------------------------------===//
  2. //
  3. // This file is distributed under the University of Illinois Open Source
  4. // License. See LICENSE.TXT for details.
  5. //
  6. //===----------------------------------------------------------------------===//
  7. //
  8. // This file is a part of LeakSanitizer.
  9. // Implementation of common leak checking functionality. Linux-specific code.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "sanitizer_common/sanitizer_platform.h"
  13. #include "lsan_common.h"
  14. #if CAN_SANITIZE_LEAKS && SANITIZER_LINUX
  15. #include <link.h>
  16. #include "sanitizer_common/sanitizer_common.h"
  17. #include "sanitizer_common/sanitizer_flags.h"
  18. #include "sanitizer_common/sanitizer_linux.h"
  19. #include "sanitizer_common/sanitizer_stackdepot.h"
  20. namespace __lsan {
  21. static const char kLinkerName[] = "ld";
  22. // We request 2 modules matching "ld", so we can print a warning if there's more
  23. // than one match. But only the first one is actually used.
  24. static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64);
  25. static LoadedModule *linker = 0;
  26. static bool IsLinker(const char* full_name) {
  27. return LibraryNameIs(full_name, kLinkerName);
  28. }
  29. void InitializePlatformSpecificModules() {
  30. internal_memset(linker_placeholder, 0, sizeof(linker_placeholder));
  31. uptr num_matches = GetListOfModules(
  32. reinterpret_cast<LoadedModule *>(linker_placeholder), 2, IsLinker);
  33. if (num_matches == 1) {
  34. linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
  35. return;
  36. }
  37. if (num_matches == 0)
  38. VReport(1, "LeakSanitizer: Dynamic linker not found. "
  39. "TLS will not be handled correctly.\n");
  40. else if (num_matches > 1)
  41. VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
  42. "TLS will not be handled correctly.\n", kLinkerName);
  43. linker = 0;
  44. }
  45. static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
  46. void *data) {
  47. Frontier *frontier = reinterpret_cast<Frontier *>(data);
  48. for (uptr j = 0; j < info->dlpi_phnum; j++) {
  49. const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
  50. // We're looking for .data and .bss sections, which reside in writeable,
  51. // loadable segments.
  52. if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) ||
  53. (phdr->p_memsz == 0))
  54. continue;
  55. uptr begin = info->dlpi_addr + phdr->p_vaddr;
  56. uptr end = begin + phdr->p_memsz;
  57. uptr allocator_begin = 0, allocator_end = 0;
  58. GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
  59. if (begin <= allocator_begin && allocator_begin < end) {
  60. CHECK_LE(allocator_begin, allocator_end);
  61. CHECK_LT(allocator_end, end);
  62. if (begin < allocator_begin)
  63. ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
  64. kReachable);
  65. if (allocator_end < end)
  66. ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL",
  67. kReachable);
  68. } else {
  69. ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
  70. }
  71. }
  72. return 0;
  73. }
  74. // Scans global variables for heap pointers.
  75. void ProcessGlobalRegions(Frontier *frontier) {
  76. if (!flags()->use_globals) return;
  77. // FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
  78. // deadlocking by running this under StopTheWorld. However, the lock is
  79. // reentrant, so we should be able to fix this by acquiring the lock before
  80. // suspending threads.
  81. dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
  82. }
  83. static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
  84. CHECK(stack_id);
  85. StackTrace stack = map->Get(stack_id);
  86. // The top frame is our malloc/calloc/etc. The next frame is the caller.
  87. if (stack.size >= 2)
  88. return stack.trace[1];
  89. return 0;
  90. }
  91. struct ProcessPlatformAllocParam {
  92. Frontier *frontier;
  93. StackDepotReverseMap *stack_depot_reverse_map;
  94. };
  95. // ForEachChunk callback. Identifies unreachable chunks which must be treated as
  96. // reachable. Marks them as reachable and adds them to the frontier.
  97. static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
  98. CHECK(arg);
  99. ProcessPlatformAllocParam *param =
  100. reinterpret_cast<ProcessPlatformAllocParam *>(arg);
  101. chunk = GetUserBegin(chunk);
  102. LsanMetadata m(chunk);
  103. if (m.allocated() && m.tag() != kReachable) {
  104. u32 stack_id = m.stack_trace_id();
  105. uptr caller_pc = 0;
  106. if (stack_id > 0)
  107. caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
  108. // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
  109. // it as reachable, as we can't properly report its allocation stack anyway.
  110. if (caller_pc == 0 || linker->containsAddress(caller_pc)) {
  111. m.set_tag(kReachable);
  112. param->frontier->push_back(chunk);
  113. }
  114. }
  115. }
  116. // Handles dynamically allocated TLS blocks by treating all chunks allocated
  117. // from ld-linux.so as reachable.
  118. // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
  119. // They are allocated with a __libc_memalign() call in allocate_and_init()
  120. // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
  121. // blocks, but we can make sure they come from our own allocator by intercepting
  122. // __libc_memalign(). On top of that, there is no easy way to reach them. Their
  123. // addresses are stored in a dynamically allocated array (the DTV) which is
  124. // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
  125. // being reachable from the static TLS, and the dynamic TLS being reachable from
  126. // the DTV. This is because the initial DTV is allocated before our interception
  127. // mechanism kicks in, and thus we don't recognize it as allocated memory. We
  128. // can't special-case it either, since we don't know its size.
  129. // Our solution is to include in the root set all allocations made from
  130. // ld-linux.so (which is where allocate_and_init() is implemented). This is
  131. // guaranteed to include all dynamic TLS blocks (and possibly other allocations
  132. // which we don't care about).
  133. void ProcessPlatformSpecificAllocations(Frontier *frontier) {
  134. if (!flags()->use_tls) return;
  135. if (!linker) return;
  136. StackDepotReverseMap stack_depot_reverse_map;
  137. ProcessPlatformAllocParam arg = {frontier, &stack_depot_reverse_map};
  138. ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
  139. }
  140. } // namespace __lsan
  141. #endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX