tsan_sync.cc 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. //===-- tsan_sync.cc ------------------------------------------------------===//
  2. //
  3. // This file is distributed under the University of Illinois Open Source
  4. // License. See LICENSE.TXT for details.
  5. //
  6. //===----------------------------------------------------------------------===//
  7. //
  8. // This file is a part of ThreadSanitizer (TSan), a race detector.
  9. //
  10. //===----------------------------------------------------------------------===//
  11. #include "sanitizer_common/sanitizer_placement_new.h"
  12. #include "tsan_sync.h"
  13. #include "tsan_rtl.h"
  14. #include "tsan_mman.h"
  15. namespace __tsan {
  16. void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
  17. SyncVar::SyncVar()
  18. : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
  19. Reset(0);
  20. }
  21. void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
  22. this->addr = addr;
  23. this->uid = uid;
  24. this->next = 0;
  25. creation_stack_id = 0;
  26. if (kCppMode) // Go does not use them
  27. creation_stack_id = CurrentStackId(thr, pc);
  28. if (common_flags()->detect_deadlocks)
  29. DDMutexInit(thr, pc, this);
  30. }
  31. void SyncVar::Reset(ThreadState *thr) {
  32. uid = 0;
  33. creation_stack_id = 0;
  34. owner_tid = kInvalidTid;
  35. last_lock = 0;
  36. recursion = 0;
  37. is_rw = 0;
  38. is_recursive = 0;
  39. is_broken = 0;
  40. is_linker_init = 0;
  41. if (thr == 0) {
  42. CHECK_EQ(clock.size(), 0);
  43. CHECK_EQ(read_clock.size(), 0);
  44. } else {
  45. clock.Reset(&thr->clock_cache);
  46. read_clock.Reset(&thr->clock_cache);
  47. }
  48. }
  49. MetaMap::MetaMap() {
  50. atomic_store(&uid_gen_, 0, memory_order_relaxed);
  51. }
  52. void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
  53. u32 idx = block_alloc_.Alloc(&thr->block_cache);
  54. MBlock *b = block_alloc_.Map(idx);
  55. b->siz = sz;
  56. b->tid = thr->tid;
  57. b->stk = CurrentStackId(thr, pc);
  58. u32 *meta = MemToMeta(p);
  59. DCHECK_EQ(*meta, 0);
  60. *meta = idx | kFlagBlock;
  61. }
  62. uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
  63. MBlock* b = GetBlock(p);
  64. if (b == 0)
  65. return 0;
  66. uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
  67. FreeRange(thr, pc, p, sz);
  68. return sz;
  69. }
  70. void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
  71. u32 *meta = MemToMeta(p);
  72. u32 *end = MemToMeta(p + sz);
  73. if (end == meta)
  74. end++;
  75. for (; meta < end; meta++) {
  76. u32 idx = *meta;
  77. *meta = 0;
  78. for (;;) {
  79. if (idx == 0)
  80. break;
  81. if (idx & kFlagBlock) {
  82. block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
  83. break;
  84. } else if (idx & kFlagSync) {
  85. DCHECK(idx & kFlagSync);
  86. SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
  87. u32 next = s->next;
  88. s->Reset(thr);
  89. sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
  90. idx = next;
  91. } else {
  92. CHECK(0);
  93. }
  94. }
  95. }
  96. }
  97. MBlock* MetaMap::GetBlock(uptr p) {
  98. u32 *meta = MemToMeta(p);
  99. u32 idx = *meta;
  100. for (;;) {
  101. if (idx == 0)
  102. return 0;
  103. if (idx & kFlagBlock)
  104. return block_alloc_.Map(idx & ~kFlagMask);
  105. DCHECK(idx & kFlagSync);
  106. SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
  107. idx = s->next;
  108. }
  109. }
  110. SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
  111. uptr addr, bool write_lock) {
  112. return GetAndLock(thr, pc, addr, write_lock, true);
  113. }
  114. SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
  115. return GetAndLock(0, 0, addr, true, false);
  116. }
  117. SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
  118. uptr addr, bool write_lock, bool create) {
  119. u32 *meta = MemToMeta(addr);
  120. u32 idx0 = *meta;
  121. u32 myidx = 0;
  122. SyncVar *mys = 0;
  123. for (;;) {
  124. u32 idx = idx0;
  125. for (;;) {
  126. if (idx == 0)
  127. break;
  128. if (idx & kFlagBlock)
  129. break;
  130. DCHECK(idx & kFlagSync);
  131. SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
  132. if (s->addr == addr) {
  133. if (myidx != 0) {
  134. mys->Reset(thr);
  135. sync_alloc_.Free(&thr->sync_cache, myidx);
  136. }
  137. if (write_lock)
  138. s->mtx.Lock();
  139. else
  140. s->mtx.ReadLock();
  141. return s;
  142. }
  143. idx = s->next;
  144. }
  145. if (!create)
  146. return 0;
  147. if (*meta != idx0) {
  148. idx0 = *meta;
  149. continue;
  150. }
  151. if (myidx == 0) {
  152. const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
  153. myidx = sync_alloc_.Alloc(&thr->sync_cache);
  154. mys = sync_alloc_.Map(myidx);
  155. mys->Init(thr, pc, addr, uid);
  156. }
  157. mys->next = idx0;
  158. if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
  159. myidx | kFlagSync, memory_order_release)) {
  160. if (write_lock)
  161. mys->mtx.Lock();
  162. else
  163. mys->mtx.ReadLock();
  164. return mys;
  165. }
  166. }
  167. }
  168. void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
  169. // src and dst can overlap,
  170. // there are no concurrent accesses to the regions (e.g. stop-the-world).
  171. CHECK_NE(src, dst);
  172. CHECK_NE(sz, 0);
  173. uptr diff = dst - src;
  174. u32 *src_meta = MemToMeta(src);
  175. u32 *dst_meta = MemToMeta(dst);
  176. u32 *src_meta_end = MemToMeta(src + sz);
  177. uptr inc = 1;
  178. if (dst > src) {
  179. src_meta = MemToMeta(src + sz) - 1;
  180. dst_meta = MemToMeta(dst + sz) - 1;
  181. src_meta_end = MemToMeta(src) - 1;
  182. inc = -1;
  183. }
  184. for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
  185. CHECK_EQ(*dst_meta, 0);
  186. u32 idx = *src_meta;
  187. *src_meta = 0;
  188. *dst_meta = idx;
  189. // Patch the addresses in sync objects.
  190. while (idx != 0) {
  191. if (idx & kFlagBlock)
  192. break;
  193. CHECK(idx & kFlagSync);
  194. SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
  195. s->addr += diff;
  196. idx = s->next;
  197. }
  198. }
  199. }
  200. void MetaMap::OnThreadIdle(ThreadState *thr) {
  201. block_alloc_.FlushCache(&thr->block_cache);
  202. sync_alloc_.FlushCache(&thr->sync_cache);
  203. }
  204. } // namespace __tsan