tsan_rtl_report.cc 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. //===-- tsan_rtl_report.cc ------------------------------------------------===//
  2. //
  3. // This file is distributed under the University of Illinois Open Source
  4. // License. See LICENSE.TXT for details.
  5. //
  6. //===----------------------------------------------------------------------===//
  7. //
  8. // This file is a part of ThreadSanitizer (TSan), a race detector.
  9. //
  10. //===----------------------------------------------------------------------===//
  11. #include "sanitizer_common/sanitizer_libc.h"
  12. #include "sanitizer_common/sanitizer_placement_new.h"
  13. #include "sanitizer_common/sanitizer_stackdepot.h"
  14. #include "sanitizer_common/sanitizer_common.h"
  15. #include "sanitizer_common/sanitizer_stacktrace.h"
  16. #include "tsan_platform.h"
  17. #include "tsan_rtl.h"
  18. #include "tsan_suppressions.h"
  19. #include "tsan_symbolize.h"
  20. #include "tsan_report.h"
  21. #include "tsan_sync.h"
  22. #include "tsan_mman.h"
  23. #include "tsan_flags.h"
  24. #include "tsan_fd.h"
  25. namespace __tsan {
  26. using namespace __sanitizer; // NOLINT
  27. static ReportStack *SymbolizeStack(StackTrace trace);
  28. void TsanCheckFailed(const char *file, int line, const char *cond,
  29. u64 v1, u64 v2) {
  30. // There is high probability that interceptors will check-fail as well,
  31. // on the other hand there is no sense in processing interceptors
  32. // since we are going to die soon.
  33. ScopedIgnoreInterceptors ignore;
  34. Printf("FATAL: ThreadSanitizer CHECK failed: "
  35. "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
  36. file, line, cond, (uptr)v1, (uptr)v2);
  37. PrintCurrentStackSlow(StackTrace::GetCurrentPc());
  38. Die();
  39. }
  40. // Can be overriden by an application/test to intercept reports.
  41. #ifdef TSAN_EXTERNAL_HOOKS
  42. bool OnReport(const ReportDesc *rep, bool suppressed);
  43. #else
  44. SANITIZER_INTERFACE_ATTRIBUTE
  45. bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
  46. (void)rep;
  47. return suppressed;
  48. }
  49. #endif
  50. static void StackStripMain(ReportStack *stack) {
  51. ReportStack *last_frame = 0;
  52. ReportStack *last_frame2 = 0;
  53. for (ReportStack *ent = stack; ent; ent = ent->next) {
  54. last_frame2 = last_frame;
  55. last_frame = ent;
  56. }
  57. if (last_frame2 == 0)
  58. return;
  59. const char *last = last_frame->info.function;
  60. #ifndef TSAN_GO
  61. const char *last2 = last_frame2->info.function;
  62. // Strip frame above 'main'
  63. if (last2 && 0 == internal_strcmp(last2, "main")) {
  64. last_frame2->next = 0;
  65. // Strip our internal thread start routine.
  66. } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
  67. last_frame2->next = 0;
  68. // Strip global ctors init.
  69. } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
  70. last_frame2->next = 0;
  71. // If both are 0, then we probably just failed to symbolize.
  72. } else if (last || last2) {
  73. // Ensure that we recovered stack completely. Trimmed stack
  74. // can actually happen if we do not instrument some code,
  75. // so it's only a debug print. However we must try hard to not miss it
  76. // due to our fault.
  77. DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
  78. }
  79. #else
  80. // The last frame always point into runtime (gosched0, goexit0, runtime.main).
  81. last_frame2->next = 0;
  82. (void)last;
  83. #endif
  84. }
  85. ReportStack *SymbolizeStackId(u32 stack_id) {
  86. if (stack_id == 0)
  87. return 0;
  88. StackTrace stack = StackDepotGet(stack_id);
  89. if (stack.trace == nullptr)
  90. return nullptr;
  91. return SymbolizeStack(stack);
  92. }
  93. static ReportStack *SymbolizeStack(StackTrace trace) {
  94. if (trace.size == 0)
  95. return 0;
  96. ReportStack *stack = 0;
  97. for (uptr si = 0; si < trace.size; si++) {
  98. const uptr pc = trace.trace[si];
  99. #ifndef TSAN_GO
  100. // We obtain the return address, that is, address of the next instruction,
  101. // so offset it by 1 byte.
  102. const uptr pc1 = StackTrace::GetPreviousInstructionPc(pc);
  103. #else
  104. // FIXME(dvyukov): Go sometimes uses address of a function as top pc.
  105. uptr pc1 = pc;
  106. if (si != trace.size - 1)
  107. pc1 -= 1;
  108. #endif
  109. ReportStack *ent = SymbolizeCode(pc1);
  110. CHECK_NE(ent, 0);
  111. ReportStack *last = ent;
  112. while (last->next) {
  113. last->info.address = pc; // restore original pc for report
  114. last = last->next;
  115. }
  116. last->info.address = pc; // restore original pc for report
  117. last->next = stack;
  118. stack = ent;
  119. }
  120. StackStripMain(stack);
  121. return stack;
  122. }
  123. ScopedReport::ScopedReport(ReportType typ) {
  124. ctx->thread_registry->CheckLocked();
  125. void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
  126. rep_ = new(mem) ReportDesc;
  127. rep_->typ = typ;
  128. ctx->report_mtx.Lock();
  129. CommonSanitizerReportMutex.Lock();
  130. }
  131. ScopedReport::~ScopedReport() {
  132. CommonSanitizerReportMutex.Unlock();
  133. ctx->report_mtx.Unlock();
  134. DestroyAndFree(rep_);
  135. }
  136. void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
  137. ReportStack **rs = rep_->stacks.PushBack();
  138. *rs = SymbolizeStack(stack);
  139. (*rs)->suppressable = suppressable;
  140. }
  141. void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
  142. const MutexSet *mset) {
  143. void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
  144. ReportMop *mop = new(mem) ReportMop;
  145. rep_->mops.PushBack(mop);
  146. mop->tid = s.tid();
  147. mop->addr = addr + s.addr0();
  148. mop->size = s.size();
  149. mop->write = s.IsWrite();
  150. mop->atomic = s.IsAtomic();
  151. mop->stack = SymbolizeStack(stack);
  152. if (mop->stack)
  153. mop->stack->suppressable = true;
  154. for (uptr i = 0; i < mset->Size(); i++) {
  155. MutexSet::Desc d = mset->Get(i);
  156. u64 mid = this->AddMutex(d.id);
  157. ReportMopMutex mtx = {mid, d.write};
  158. mop->mset.PushBack(mtx);
  159. }
  160. }
  161. void ScopedReport::AddUniqueTid(int unique_tid) {
  162. rep_->unique_tids.PushBack(unique_tid);
  163. }
  164. void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
  165. for (uptr i = 0; i < rep_->threads.Size(); i++) {
  166. if ((u32)rep_->threads[i]->id == tctx->tid)
  167. return;
  168. }
  169. void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
  170. ReportThread *rt = new(mem) ReportThread();
  171. rep_->threads.PushBack(rt);
  172. rt->id = tctx->tid;
  173. rt->pid = tctx->os_id;
  174. rt->running = (tctx->status == ThreadStatusRunning);
  175. rt->name = internal_strdup(tctx->name);
  176. rt->parent_tid = tctx->parent_tid;
  177. rt->stack = 0;
  178. rt->stack = SymbolizeStackId(tctx->creation_stack_id);
  179. if (rt->stack)
  180. rt->stack->suppressable = suppressable;
  181. }
  182. #ifndef TSAN_GO
  183. static ThreadContext *FindThreadByUidLocked(int unique_id) {
  184. ctx->thread_registry->CheckLocked();
  185. for (unsigned i = 0; i < kMaxTid; i++) {
  186. ThreadContext *tctx = static_cast<ThreadContext*>(
  187. ctx->thread_registry->GetThreadLocked(i));
  188. if (tctx && tctx->unique_id == (u32)unique_id) {
  189. return tctx;
  190. }
  191. }
  192. return 0;
  193. }
  194. static ThreadContext *FindThreadByTidLocked(int tid) {
  195. ctx->thread_registry->CheckLocked();
  196. return static_cast<ThreadContext*>(
  197. ctx->thread_registry->GetThreadLocked(tid));
  198. }
  199. static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
  200. uptr addr = (uptr)arg;
  201. ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
  202. if (tctx->status != ThreadStatusRunning)
  203. return false;
  204. ThreadState *thr = tctx->thr;
  205. CHECK(thr);
  206. return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
  207. (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
  208. }
  209. ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
  210. ctx->thread_registry->CheckLocked();
  211. ThreadContext *tctx = static_cast<ThreadContext*>(
  212. ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
  213. (void*)addr));
  214. if (!tctx)
  215. return 0;
  216. ThreadState *thr = tctx->thr;
  217. CHECK(thr);
  218. *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
  219. return tctx;
  220. }
  221. #endif
  222. void ScopedReport::AddThread(int unique_tid, bool suppressable) {
  223. #ifndef TSAN_GO
  224. if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
  225. AddThread(tctx, suppressable);
  226. #endif
  227. }
  228. void ScopedReport::AddMutex(const SyncVar *s) {
  229. for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
  230. if (rep_->mutexes[i]->id == s->uid)
  231. return;
  232. }
  233. void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
  234. ReportMutex *rm = new(mem) ReportMutex();
  235. rep_->mutexes.PushBack(rm);
  236. rm->id = s->uid;
  237. rm->addr = s->addr;
  238. rm->destroyed = false;
  239. rm->stack = SymbolizeStackId(s->creation_stack_id);
  240. }
  241. u64 ScopedReport::AddMutex(u64 id) {
  242. u64 uid = 0;
  243. u64 mid = id;
  244. uptr addr = SyncVar::SplitId(id, &uid);
  245. SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
  246. // Check that the mutex is still alive.
  247. // Another mutex can be created at the same address,
  248. // so check uid as well.
  249. if (s && s->CheckId(uid)) {
  250. mid = s->uid;
  251. AddMutex(s);
  252. } else {
  253. AddDeadMutex(id);
  254. }
  255. if (s)
  256. s->mtx.Unlock();
  257. return mid;
  258. }
  259. void ScopedReport::AddDeadMutex(u64 id) {
  260. for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
  261. if (rep_->mutexes[i]->id == id)
  262. return;
  263. }
  264. void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
  265. ReportMutex *rm = new(mem) ReportMutex();
  266. rep_->mutexes.PushBack(rm);
  267. rm->id = id;
  268. rm->addr = 0;
  269. rm->destroyed = true;
  270. rm->stack = 0;
  271. }
  272. void ScopedReport::AddLocation(uptr addr, uptr size) {
  273. if (addr == 0)
  274. return;
  275. #ifndef TSAN_GO
  276. int fd = -1;
  277. int creat_tid = -1;
  278. u32 creat_stack = 0;
  279. if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
  280. ReportLocation *loc = ReportLocation::New(ReportLocationFD);
  281. loc->fd = fd;
  282. loc->tid = creat_tid;
  283. loc->stack = SymbolizeStackId(creat_stack);
  284. rep_->locs.PushBack(loc);
  285. ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
  286. if (tctx)
  287. AddThread(tctx);
  288. return;
  289. }
  290. MBlock *b = 0;
  291. Allocator *a = allocator();
  292. if (a->PointerIsMine((void*)addr)) {
  293. void *block_begin = a->GetBlockBegin((void*)addr);
  294. if (block_begin)
  295. b = ctx->metamap.GetBlock((uptr)block_begin);
  296. }
  297. if (b != 0) {
  298. ThreadContext *tctx = FindThreadByTidLocked(b->tid);
  299. ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
  300. loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
  301. loc->heap_chunk_size = b->siz;
  302. loc->tid = tctx ? tctx->tid : b->tid;
  303. loc->stack = SymbolizeStackId(b->stk);
  304. rep_->locs.PushBack(loc);
  305. if (tctx)
  306. AddThread(tctx);
  307. return;
  308. }
  309. bool is_stack = false;
  310. if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
  311. ReportLocation *loc =
  312. ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
  313. loc->tid = tctx->tid;
  314. rep_->locs.PushBack(loc);
  315. AddThread(tctx);
  316. }
  317. if (ReportLocation *loc = SymbolizeData(addr)) {
  318. loc->suppressable = true;
  319. rep_->locs.PushBack(loc);
  320. return;
  321. }
  322. #endif
  323. }
  324. #ifndef TSAN_GO
  325. void ScopedReport::AddSleep(u32 stack_id) {
  326. rep_->sleep = SymbolizeStackId(stack_id);
  327. }
  328. #endif
  329. void ScopedReport::SetCount(int count) {
  330. rep_->count = count;
  331. }
  332. const ReportDesc *ScopedReport::GetReport() const {
  333. return rep_;
  334. }
  335. void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
  336. MutexSet *mset) {
  337. // This function restores stack trace and mutex set for the thread/epoch.
  338. // It does so by getting stack trace and mutex set at the beginning of
  339. // trace part, and then replaying the trace till the given epoch.
  340. ctx->thread_registry->CheckLocked();
  341. ThreadContext *tctx = static_cast<ThreadContext*>(
  342. ctx->thread_registry->GetThreadLocked(tid));
  343. if (tctx == 0)
  344. return;
  345. if (tctx->status != ThreadStatusRunning
  346. && tctx->status != ThreadStatusFinished
  347. && tctx->status != ThreadStatusDead)
  348. return;
  349. Trace* trace = ThreadTrace(tctx->tid);
  350. Lock l(&trace->mtx);
  351. const int partidx = (epoch / kTracePartSize) % TraceParts();
  352. TraceHeader* hdr = &trace->headers[partidx];
  353. if (epoch < hdr->epoch0)
  354. return;
  355. const u64 epoch0 = RoundDown(epoch, TraceSize());
  356. const u64 eend = epoch % TraceSize();
  357. const u64 ebegin = RoundDown(eend, kTracePartSize);
  358. DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
  359. tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
  360. InternalScopedBuffer<uptr> stack(kShadowStackSize);
  361. for (uptr i = 0; i < hdr->stack0.size; i++) {
  362. stack[i] = hdr->stack0.trace[i];
  363. DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
  364. }
  365. if (mset)
  366. *mset = hdr->mset0;
  367. uptr pos = hdr->stack0.size;
  368. Event *events = (Event*)GetThreadTrace(tid);
  369. for (uptr i = ebegin; i <= eend; i++) {
  370. Event ev = events[i];
  371. EventType typ = (EventType)(ev >> 61);
  372. uptr pc = (uptr)(ev & ((1ull << 61) - 1));
  373. DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
  374. if (typ == EventTypeMop) {
  375. stack[pos] = pc;
  376. } else if (typ == EventTypeFuncEnter) {
  377. stack[pos++] = pc;
  378. } else if (typ == EventTypeFuncExit) {
  379. if (pos > 0)
  380. pos--;
  381. }
  382. if (mset) {
  383. if (typ == EventTypeLock) {
  384. mset->Add(pc, true, epoch0 + i);
  385. } else if (typ == EventTypeUnlock) {
  386. mset->Del(pc, true);
  387. } else if (typ == EventTypeRLock) {
  388. mset->Add(pc, false, epoch0 + i);
  389. } else if (typ == EventTypeRUnlock) {
  390. mset->Del(pc, false);
  391. }
  392. }
  393. for (uptr j = 0; j <= pos; j++)
  394. DPrintf2(" #%zu: %zx\n", j, stack[j]);
  395. }
  396. if (pos == 0 && stack[0] == 0)
  397. return;
  398. pos++;
  399. stk->Init(stack.data(), pos);
  400. }
  401. static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
  402. uptr addr_min, uptr addr_max) {
  403. bool equal_stack = false;
  404. RacyStacks hash;
  405. if (flags()->suppress_equal_stacks) {
  406. hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
  407. hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
  408. for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
  409. if (hash == ctx->racy_stacks[i]) {
  410. DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
  411. equal_stack = true;
  412. break;
  413. }
  414. }
  415. }
  416. bool equal_address = false;
  417. RacyAddress ra0 = {addr_min, addr_max};
  418. if (flags()->suppress_equal_addresses) {
  419. for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
  420. RacyAddress ra2 = ctx->racy_addresses[i];
  421. uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
  422. uptr minend = min(ra0.addr_max, ra2.addr_max);
  423. if (maxbeg < minend) {
  424. DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
  425. equal_address = true;
  426. break;
  427. }
  428. }
  429. }
  430. if (equal_stack || equal_address) {
  431. if (!equal_stack)
  432. ctx->racy_stacks.PushBack(hash);
  433. if (!equal_address)
  434. ctx->racy_addresses.PushBack(ra0);
  435. return true;
  436. }
  437. return false;
  438. }
  439. static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
  440. uptr addr_min, uptr addr_max) {
  441. if (flags()->suppress_equal_stacks) {
  442. RacyStacks hash;
  443. hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
  444. hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
  445. ctx->racy_stacks.PushBack(hash);
  446. }
  447. if (flags()->suppress_equal_addresses) {
  448. RacyAddress ra0 = {addr_min, addr_max};
  449. ctx->racy_addresses.PushBack(ra0);
  450. }
  451. }
  452. bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
  453. atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
  454. const ReportDesc *rep = srep.GetReport();
  455. Suppression *supp = 0;
  456. uptr suppress_pc = 0;
  457. for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++)
  458. suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
  459. for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++)
  460. suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp);
  461. for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++)
  462. suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
  463. for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++)
  464. suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp);
  465. if (suppress_pc != 0) {
  466. FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
  467. ctx->fired_suppressions.push_back(s);
  468. }
  469. {
  470. bool old_is_freeing = thr->is_freeing;
  471. thr->is_freeing = false;
  472. bool suppressed = OnReport(rep, suppress_pc != 0);
  473. thr->is_freeing = old_is_freeing;
  474. if (suppressed)
  475. return false;
  476. }
  477. PrintReport(rep);
  478. ctx->nreported++;
  479. if (flags()->halt_on_error)
  480. internal__exit(flags()->exitcode);
  481. return true;
  482. }
  483. bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
  484. StackTrace trace) {
  485. for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
  486. if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
  487. continue;
  488. for (uptr j = 0; j < trace.size; j++) {
  489. FiredSuppression *s = &ctx->fired_suppressions[k];
  490. if (trace.trace[j] == s->pc) {
  491. if (s->supp)
  492. s->supp->hit_count++;
  493. return true;
  494. }
  495. }
  496. }
  497. return false;
  498. }
  499. static bool IsFiredSuppression(Context *ctx,
  500. const ScopedReport &srep,
  501. uptr addr) {
  502. for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
  503. if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
  504. continue;
  505. FiredSuppression *s = &ctx->fired_suppressions[k];
  506. if (addr == s->pc) {
  507. if (s->supp)
  508. s->supp->hit_count++;
  509. return true;
  510. }
  511. }
  512. return false;
  513. }
  514. bool FrameIsInternal(const ReportStack *frame) {
  515. if (frame == 0)
  516. return false;
  517. const char *file = frame->info.file;
  518. return file != 0 &&
  519. (internal_strstr(file, "tsan_interceptors.cc") ||
  520. internal_strstr(file, "sanitizer_common_interceptors.inc") ||
  521. internal_strstr(file, "tsan_interface_"));
  522. }
  523. static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
  524. Shadow s0(thr->racy_state[0]);
  525. Shadow s1(thr->racy_state[1]);
  526. CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
  527. if (!s0.IsAtomic() && !s1.IsAtomic())
  528. return true;
  529. if (s0.IsAtomic() && s1.IsFreed())
  530. return true;
  531. if (s1.IsAtomic() && thr->is_freeing)
  532. return true;
  533. return false;
  534. }
  535. void ReportRace(ThreadState *thr) {
  536. CheckNoLocks(thr);
  537. // Symbolizer makes lots of intercepted calls. If we try to process them,
  538. // at best it will cause deadlocks on internal mutexes.
  539. ScopedIgnoreInterceptors ignore;
  540. if (!flags()->report_bugs)
  541. return;
  542. if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
  543. return;
  544. bool freed = false;
  545. {
  546. Shadow s(thr->racy_state[1]);
  547. freed = s.GetFreedAndReset();
  548. thr->racy_state[1] = s.raw();
  549. }
  550. uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
  551. uptr addr_min = 0;
  552. uptr addr_max = 0;
  553. {
  554. uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
  555. uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
  556. uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
  557. uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
  558. addr_min = min(a0, a1);
  559. addr_max = max(e0, e1);
  560. if (IsExpectedReport(addr_min, addr_max - addr_min))
  561. return;
  562. }
  563. ThreadRegistryLock l0(ctx->thread_registry);
  564. ReportType typ = ReportTypeRace;
  565. if (thr->is_vptr_access && freed)
  566. typ = ReportTypeVptrUseAfterFree;
  567. else if (thr->is_vptr_access)
  568. typ = ReportTypeVptrRace;
  569. else if (freed)
  570. typ = ReportTypeUseAfterFree;
  571. ScopedReport rep(typ);
  572. if (IsFiredSuppression(ctx, rep, addr))
  573. return;
  574. const uptr kMop = 2;
  575. VarSizeStackTrace traces[kMop];
  576. const uptr toppc = TraceTopPC(thr);
  577. ObtainCurrentStack(thr, toppc, &traces[0]);
  578. if (IsFiredSuppression(ctx, rep, traces[0]))
  579. return;
  580. InternalScopedBuffer<MutexSet> mset2(1);
  581. new(mset2.data()) MutexSet();
  582. Shadow s2(thr->racy_state[1]);
  583. RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
  584. if (IsFiredSuppression(ctx, rep, traces[1]))
  585. return;
  586. if (HandleRacyStacks(thr, traces, addr_min, addr_max))
  587. return;
  588. for (uptr i = 0; i < kMop; i++) {
  589. Shadow s(thr->racy_state[i]);
  590. rep.AddMemoryAccess(addr, s, traces[i],
  591. i == 0 ? &thr->mset : mset2.data());
  592. }
  593. for (uptr i = 0; i < kMop; i++) {
  594. FastState s(thr->racy_state[i]);
  595. ThreadContext *tctx = static_cast<ThreadContext*>(
  596. ctx->thread_registry->GetThreadLocked(s.tid()));
  597. if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
  598. continue;
  599. rep.AddThread(tctx);
  600. }
  601. rep.AddLocation(addr_min, addr_max - addr_min);
  602. #ifndef TSAN_GO
  603. { // NOLINT
  604. Shadow s(thr->racy_state[1]);
  605. if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
  606. rep.AddSleep(thr->last_sleep_stack_id);
  607. }
  608. #endif
  609. if (!OutputReport(thr, rep))
  610. return;
  611. AddRacyStacks(thr, traces, addr_min, addr_max);
  612. }
  613. void PrintCurrentStack(ThreadState *thr, uptr pc) {
  614. VarSizeStackTrace trace;
  615. ObtainCurrentStack(thr, pc, &trace);
  616. PrintStack(SymbolizeStack(trace));
  617. }
  618. void PrintCurrentStackSlow(uptr pc) {
  619. #ifndef TSAN_GO
  620. BufferedStackTrace *ptrace =
  621. new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
  622. BufferedStackTrace();
  623. ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
  624. for (uptr i = 0; i < ptrace->size / 2; i++) {
  625. uptr tmp = ptrace->trace_buffer[i];
  626. ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
  627. ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
  628. }
  629. PrintStack(SymbolizeStack(*ptrace));
  630. #endif
  631. }
  632. } // namespace __tsan
  633. using namespace __tsan;
  634. extern "C" {
  635. SANITIZER_INTERFACE_ATTRIBUTE
  636. void __sanitizer_print_stack_trace() {
  637. PrintCurrentStackSlow(StackTrace::GetCurrentPc());
  638. }
  639. } // extern "C"