sanitizer_deadlock_detector2.cc 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. //===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
  2. //
  3. // This file is distributed under the University of Illinois Open Source
  4. // License. See LICENSE.TXT for details.
  5. //
  6. //===----------------------------------------------------------------------===//
  7. //
  8. // Deadlock detector implementation based on adjacency lists.
  9. //
  10. //===----------------------------------------------------------------------===//
  11. #include "sanitizer_deadlock_detector_interface.h"
  12. #include "sanitizer_common.h"
  13. #include "sanitizer_allocator_internal.h"
  14. #include "sanitizer_placement_new.h"
  15. #include "sanitizer_mutex.h"
  16. #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
  17. namespace __sanitizer {
  18. const int kMaxNesting = 64;
  19. const u32 kNoId = -1;
  20. const u32 kEndId = -2;
  21. const int kMaxLink = 8;
  22. const int kL1Size = 1024;
  23. const int kL2Size = 1024;
  24. const int kMaxMutex = kL1Size * kL2Size;
  25. struct Id {
  26. u32 id;
  27. u32 seq;
  28. explicit Id(u32 id = 0, u32 seq = 0)
  29. : id(id)
  30. , seq(seq) {
  31. }
  32. };
  33. struct Link {
  34. u32 id;
  35. u32 seq;
  36. u32 tid;
  37. u32 stk0;
  38. u32 stk1;
  39. explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
  40. : id(id)
  41. , seq(seq)
  42. , tid(tid)
  43. , stk0(s0)
  44. , stk1(s1) {
  45. }
  46. };
  47. struct DDPhysicalThread {
  48. DDReport rep;
  49. bool report_pending;
  50. bool visited[kMaxMutex];
  51. Link pending[kMaxMutex];
  52. Link path[kMaxMutex];
  53. };
  54. struct ThreadMutex {
  55. u32 id;
  56. u32 stk;
  57. };
  58. struct DDLogicalThread {
  59. u64 ctx;
  60. ThreadMutex locked[kMaxNesting];
  61. int nlocked;
  62. };
  63. struct Mutex {
  64. StaticSpinMutex mtx;
  65. u32 seq;
  66. int nlink;
  67. Link link[kMaxLink];
  68. };
  69. struct DD : public DDetector {
  70. explicit DD(const DDFlags *flags);
  71. DDPhysicalThread* CreatePhysicalThread();
  72. void DestroyPhysicalThread(DDPhysicalThread *pt);
  73. DDLogicalThread* CreateLogicalThread(u64 ctx);
  74. void DestroyLogicalThread(DDLogicalThread *lt);
  75. void MutexInit(DDCallback *cb, DDMutex *m);
  76. void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
  77. void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
  78. bool trylock);
  79. void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
  80. void MutexDestroy(DDCallback *cb, DDMutex *m);
  81. DDReport *GetReport(DDCallback *cb);
  82. void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
  83. void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
  84. u32 allocateId(DDCallback *cb);
  85. Mutex *getMutex(u32 id);
  86. u32 getMutexId(Mutex *m);
  87. DDFlags flags;
  88. Mutex* mutex[kL1Size];
  89. SpinMutex mtx;
  90. InternalMmapVector<u32> free_id;
  91. int id_gen;
  92. };
  93. DDetector *DDetector::Create(const DDFlags *flags) {
  94. (void)flags;
  95. void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
  96. return new(mem) DD(flags);
  97. }
  98. DD::DD(const DDFlags *flags)
  99. : flags(*flags)
  100. , free_id(1024) {
  101. id_gen = 0;
  102. }
  103. DDPhysicalThread* DD::CreatePhysicalThread() {
  104. DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
  105. "deadlock detector (physical thread)");
  106. return pt;
  107. }
  108. void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
  109. pt->~DDPhysicalThread();
  110. UnmapOrDie(pt, sizeof(DDPhysicalThread));
  111. }
  112. DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
  113. DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
  114. sizeof(DDLogicalThread));
  115. lt->ctx = ctx;
  116. lt->nlocked = 0;
  117. return lt;
  118. }
  119. void DD::DestroyLogicalThread(DDLogicalThread *lt) {
  120. lt->~DDLogicalThread();
  121. InternalFree(lt);
  122. }
  123. void DD::MutexInit(DDCallback *cb, DDMutex *m) {
  124. VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
  125. m->id = kNoId;
  126. m->recursion = 0;
  127. atomic_store(&m->owner, 0, memory_order_relaxed);
  128. }
  129. Mutex *DD::getMutex(u32 id) {
  130. return &mutex[id / kL2Size][id % kL2Size];
  131. }
  132. u32 DD::getMutexId(Mutex *m) {
  133. for (int i = 0; i < kL1Size; i++) {
  134. Mutex *tab = mutex[i];
  135. if (tab == 0)
  136. break;
  137. if (m >= tab && m < tab + kL2Size)
  138. return i * kL2Size + (m - tab);
  139. }
  140. return -1;
  141. }
  142. u32 DD::allocateId(DDCallback *cb) {
  143. u32 id = -1;
  144. SpinMutexLock l(&mtx);
  145. if (free_id.size() > 0) {
  146. id = free_id.back();
  147. free_id.pop_back();
  148. } else {
  149. CHECK_LT(id_gen, kMaxMutex);
  150. if ((id_gen % kL2Size) == 0) {
  151. mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
  152. "deadlock detector (mutex table)");
  153. }
  154. id = id_gen++;
  155. }
  156. CHECK_LE(id, kMaxMutex);
  157. VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
  158. return id;
  159. }
  160. void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
  161. VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
  162. cb->lt->ctx, m, wlock, cb->lt->nlocked);
  163. DDPhysicalThread *pt = cb->pt;
  164. DDLogicalThread *lt = cb->lt;
  165. uptr owner = atomic_load(&m->owner, memory_order_relaxed);
  166. if (owner == (uptr)cb->lt) {
  167. VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
  168. cb->lt->ctx);
  169. return;
  170. }
  171. CHECK_LE(lt->nlocked, kMaxNesting);
  172. // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
  173. if (m->id == kNoId)
  174. m->id = allocateId(cb);
  175. ThreadMutex *tm = &lt->locked[lt->nlocked++];
  176. tm->id = m->id;
  177. if (flags.second_deadlock_stack)
  178. tm->stk = cb->Unwind();
  179. if (lt->nlocked == 1) {
  180. VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
  181. cb->lt->ctx);
  182. return;
  183. }
  184. bool added = false;
  185. Mutex *mtx = getMutex(m->id);
  186. for (int i = 0; i < lt->nlocked - 1; i++) {
  187. u32 id1 = lt->locked[i].id;
  188. u32 stk1 = lt->locked[i].stk;
  189. Mutex *mtx1 = getMutex(id1);
  190. SpinMutexLock l(&mtx1->mtx);
  191. if (mtx1->nlink == kMaxLink) {
  192. // FIXME(dvyukov): check stale links
  193. continue;
  194. }
  195. int li = 0;
  196. for (; li < mtx1->nlink; li++) {
  197. Link *link = &mtx1->link[li];
  198. if (link->id == m->id) {
  199. if (link->seq != mtx->seq) {
  200. link->seq = mtx->seq;
  201. link->tid = lt->ctx;
  202. link->stk0 = stk1;
  203. link->stk1 = cb->Unwind();
  204. added = true;
  205. VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
  206. cb->lt->ctx, getMutexId(mtx1), m->id);
  207. }
  208. break;
  209. }
  210. }
  211. if (li == mtx1->nlink) {
  212. // FIXME(dvyukov): check stale links
  213. Link *link = &mtx1->link[mtx1->nlink++];
  214. link->id = m->id;
  215. link->seq = mtx->seq;
  216. link->tid = lt->ctx;
  217. link->stk0 = stk1;
  218. link->stk1 = cb->Unwind();
  219. added = true;
  220. VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
  221. cb->lt->ctx, getMutexId(mtx1), m->id);
  222. }
  223. }
  224. if (!added || mtx->nlink == 0) {
  225. VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
  226. cb->lt->ctx);
  227. return;
  228. }
  229. CycleCheck(pt, lt, m);
  230. }
  231. void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
  232. bool trylock) {
  233. VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
  234. cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
  235. DDLogicalThread *lt = cb->lt;
  236. uptr owner = atomic_load(&m->owner, memory_order_relaxed);
  237. if (owner == (uptr)cb->lt) {
  238. VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
  239. CHECK(wlock);
  240. m->recursion++;
  241. return;
  242. }
  243. CHECK_EQ(owner, 0);
  244. if (wlock) {
  245. VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
  246. CHECK_EQ(m->recursion, 0);
  247. m->recursion = 1;
  248. atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
  249. }
  250. if (!trylock)
  251. return;
  252. CHECK_LE(lt->nlocked, kMaxNesting);
  253. if (m->id == kNoId)
  254. m->id = allocateId(cb);
  255. ThreadMutex *tm = &lt->locked[lt->nlocked++];
  256. tm->id = m->id;
  257. if (flags.second_deadlock_stack)
  258. tm->stk = cb->Unwind();
  259. }
  260. void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
  261. VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
  262. cb->lt->ctx, m, wlock, cb->lt->nlocked);
  263. DDLogicalThread *lt = cb->lt;
  264. uptr owner = atomic_load(&m->owner, memory_order_relaxed);
  265. if (owner == (uptr)cb->lt) {
  266. VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
  267. if (--m->recursion > 0)
  268. return;
  269. VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
  270. atomic_store(&m->owner, 0, memory_order_relaxed);
  271. }
  272. CHECK_NE(m->id, kNoId);
  273. int last = lt->nlocked - 1;
  274. for (int i = last; i >= 0; i--) {
  275. if (cb->lt->locked[i].id == m->id) {
  276. lt->locked[i] = lt->locked[last];
  277. lt->nlocked--;
  278. break;
  279. }
  280. }
  281. }
  282. void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
  283. VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
  284. cb->lt->ctx, m);
  285. DDLogicalThread *lt = cb->lt;
  286. if (m->id == kNoId)
  287. return;
  288. // Remove the mutex from lt->locked if there.
  289. int last = lt->nlocked - 1;
  290. for (int i = last; i >= 0; i--) {
  291. if (lt->locked[i].id == m->id) {
  292. lt->locked[i] = lt->locked[last];
  293. lt->nlocked--;
  294. break;
  295. }
  296. }
  297. // Clear and invalidate the mutex descriptor.
  298. {
  299. Mutex *mtx = getMutex(m->id);
  300. SpinMutexLock l(&mtx->mtx);
  301. mtx->seq++;
  302. mtx->nlink = 0;
  303. }
  304. // Return id to cache.
  305. {
  306. SpinMutexLock l(&mtx);
  307. free_id.push_back(m->id);
  308. }
  309. }
  310. void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
  311. DDMutex *m) {
  312. internal_memset(pt->visited, 0, sizeof(pt->visited));
  313. int npath = 0;
  314. int npending = 0;
  315. {
  316. Mutex *mtx = getMutex(m->id);
  317. SpinMutexLock l(&mtx->mtx);
  318. for (int li = 0; li < mtx->nlink; li++)
  319. pt->pending[npending++] = mtx->link[li];
  320. }
  321. while (npending > 0) {
  322. Link link = pt->pending[--npending];
  323. if (link.id == kEndId) {
  324. npath--;
  325. continue;
  326. }
  327. if (pt->visited[link.id])
  328. continue;
  329. Mutex *mtx1 = getMutex(link.id);
  330. SpinMutexLock l(&mtx1->mtx);
  331. if (mtx1->seq != link.seq)
  332. continue;
  333. pt->visited[link.id] = true;
  334. if (mtx1->nlink == 0)
  335. continue;
  336. pt->path[npath++] = link;
  337. pt->pending[npending++] = Link(kEndId);
  338. if (link.id == m->id)
  339. return Report(pt, lt, npath); // Bingo!
  340. for (int li = 0; li < mtx1->nlink; li++) {
  341. Link *link1 = &mtx1->link[li];
  342. // Mutex *mtx2 = getMutex(link->id);
  343. // FIXME(dvyukov): fast seq check
  344. // FIXME(dvyukov): fast nlink != 0 check
  345. // FIXME(dvyukov): fast pending check?
  346. // FIXME(dvyukov): npending can be larger than kMaxMutex
  347. pt->pending[npending++] = *link1;
  348. }
  349. }
  350. }
  351. void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
  352. DDReport *rep = &pt->rep;
  353. rep->n = npath;
  354. for (int i = 0; i < npath; i++) {
  355. Link *link = &pt->path[i];
  356. Link *link0 = &pt->path[i ? i - 1 : npath - 1];
  357. rep->loop[i].thr_ctx = link->tid;
  358. rep->loop[i].mtx_ctx0 = link0->id;
  359. rep->loop[i].mtx_ctx1 = link->id;
  360. rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
  361. rep->loop[i].stk[1] = link->stk1;
  362. }
  363. pt->report_pending = true;
  364. }
  365. DDReport *DD::GetReport(DDCallback *cb) {
  366. if (!cb->pt->report_pending)
  367. return 0;
  368. cb->pt->report_pending = false;
  369. return &cb->pt->rep;
  370. }
  371. } // namespace __sanitizer
  372. #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2