sanitizer_addrhashmap.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. //===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
  2. //
  3. // This file is distributed under the University of Illinois Open Source
  4. // License. See LICENSE.TXT for details.
  5. //
  6. //===----------------------------------------------------------------------===//
  7. //
  8. // Concurrent uptr->T hashmap.
  9. //
  10. //===----------------------------------------------------------------------===//
  11. #ifndef SANITIZER_ADDRHASHMAP_H
  12. #define SANITIZER_ADDRHASHMAP_H
  13. #include "sanitizer_common.h"
  14. #include "sanitizer_mutex.h"
  15. #include "sanitizer_atomic.h"
  16. #include "sanitizer_allocator_internal.h"
  17. namespace __sanitizer {
  18. // Concurrent uptr->T hashmap.
  19. // T must be a POD type, kSize is preferably a prime but can be any number.
  20. // Usage example:
  21. //
  22. // typedef AddrHashMap<uptr, 11> Map;
  23. // Map m;
  24. // {
  25. // Map::Handle h(&m, addr);
  26. // use h.operator->() to access the data
  27. // if h.created() then the element was just created, and the current thread
  28. // has exclusive access to it
  29. // otherwise the current thread has only read access to the data
  30. // }
  31. // {
  32. // Map::Handle h(&m, addr, true);
  33. // this will remove the data from the map in Handle dtor
  34. // the current thread has exclusive access to the data
  35. // if !h.exists() then the element never existed
  36. // }
  37. template<typename T, uptr kSize>
  38. class AddrHashMap {
  39. private:
  40. struct Cell {
  41. atomic_uintptr_t addr;
  42. T val;
  43. };
  44. struct AddBucket {
  45. uptr cap;
  46. uptr size;
  47. Cell cells[1]; // variable len
  48. };
  49. static const uptr kBucketSize = 3;
  50. struct Bucket {
  51. RWMutex mtx;
  52. atomic_uintptr_t add;
  53. Cell cells[kBucketSize];
  54. };
  55. public:
  56. AddrHashMap();
  57. class Handle {
  58. public:
  59. Handle(AddrHashMap<T, kSize> *map, uptr addr);
  60. Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
  61. Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
  62. ~Handle();
  63. T *operator->();
  64. bool created() const;
  65. bool exists() const;
  66. private:
  67. friend AddrHashMap<T, kSize>;
  68. AddrHashMap<T, kSize> *map_;
  69. Bucket *bucket_;
  70. Cell *cell_;
  71. uptr addr_;
  72. uptr addidx_;
  73. bool created_;
  74. bool remove_;
  75. bool create_;
  76. };
  77. private:
  78. friend class Handle;
  79. Bucket *table_;
  80. void acquire(Handle *h);
  81. void release(Handle *h);
  82. uptr calcHash(uptr addr);
  83. };
  84. template<typename T, uptr kSize>
  85. AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
  86. map_ = map;
  87. addr_ = addr;
  88. remove_ = false;
  89. create_ = true;
  90. map_->acquire(this);
  91. }
  92. template<typename T, uptr kSize>
  93. AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
  94. bool remove) {
  95. map_ = map;
  96. addr_ = addr;
  97. remove_ = remove;
  98. create_ = true;
  99. map_->acquire(this);
  100. }
  101. template<typename T, uptr kSize>
  102. AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
  103. bool remove, bool create) {
  104. map_ = map;
  105. addr_ = addr;
  106. remove_ = remove;
  107. create_ = create;
  108. map_->acquire(this);
  109. }
  110. template<typename T, uptr kSize>
  111. AddrHashMap<T, kSize>::Handle::~Handle() {
  112. map_->release(this);
  113. }
  114. template <typename T, uptr kSize>
  115. T *AddrHashMap<T, kSize>::Handle::operator->() {
  116. return &cell_->val;
  117. }
  118. template<typename T, uptr kSize>
  119. bool AddrHashMap<T, kSize>::Handle::created() const {
  120. return created_;
  121. }
  122. template<typename T, uptr kSize>
  123. bool AddrHashMap<T, kSize>::Handle::exists() const {
  124. return cell_ != 0;
  125. }
  126. template<typename T, uptr kSize>
  127. AddrHashMap<T, kSize>::AddrHashMap() {
  128. table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
  129. }
  130. template<typename T, uptr kSize>
  131. void AddrHashMap<T, kSize>::acquire(Handle *h) {
  132. uptr addr = h->addr_;
  133. uptr hash = calcHash(addr);
  134. Bucket *b = &table_[hash];
  135. h->created_ = false;
  136. h->addidx_ = -1U;
  137. h->bucket_ = b;
  138. h->cell_ = 0;
  139. // If we want to remove the element, we need exclusive access to the bucket,
  140. // so skip the lock-free phase.
  141. if (h->remove_)
  142. goto locked;
  143. retry:
  144. // First try to find an existing element w/o read mutex.
  145. CHECK(!h->remove_);
  146. // Check the embed cells.
  147. for (uptr i = 0; i < kBucketSize; i++) {
  148. Cell *c = &b->cells[i];
  149. uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
  150. if (addr1 == addr) {
  151. h->cell_ = c;
  152. return;
  153. }
  154. }
  155. // Check the add cells with read lock.
  156. if (atomic_load(&b->add, memory_order_relaxed)) {
  157. b->mtx.ReadLock();
  158. AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
  159. for (uptr i = 0; i < add->size; i++) {
  160. Cell *c = &add->cells[i];
  161. uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
  162. if (addr1 == addr) {
  163. h->addidx_ = i;
  164. h->cell_ = c;
  165. return;
  166. }
  167. }
  168. b->mtx.ReadUnlock();
  169. }
  170. locked:
  171. // Re-check existence under write lock.
  172. // Embed cells.
  173. b->mtx.Lock();
  174. for (uptr i = 0; i < kBucketSize; i++) {
  175. Cell *c = &b->cells[i];
  176. uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
  177. if (addr1 == addr) {
  178. if (h->remove_) {
  179. h->cell_ = c;
  180. return;
  181. }
  182. b->mtx.Unlock();
  183. goto retry;
  184. }
  185. }
  186. // Add cells.
  187. AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
  188. if (add) {
  189. for (uptr i = 0; i < add->size; i++) {
  190. Cell *c = &add->cells[i];
  191. uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
  192. if (addr1 == addr) {
  193. if (h->remove_) {
  194. h->addidx_ = i;
  195. h->cell_ = c;
  196. return;
  197. }
  198. b->mtx.Unlock();
  199. goto retry;
  200. }
  201. }
  202. }
  203. // The element does not exist, no need to create it if we want to remove.
  204. if (h->remove_ || !h->create_) {
  205. b->mtx.Unlock();
  206. return;
  207. }
  208. // Now try to create it under the mutex.
  209. h->created_ = true;
  210. // See if we have a free embed cell.
  211. for (uptr i = 0; i < kBucketSize; i++) {
  212. Cell *c = &b->cells[i];
  213. uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
  214. if (addr1 == 0) {
  215. h->cell_ = c;
  216. return;
  217. }
  218. }
  219. // Store in the add cells.
  220. if (add == 0) {
  221. // Allocate a new add array.
  222. const uptr kInitSize = 64;
  223. add = (AddBucket*)InternalAlloc(kInitSize);
  224. internal_memset(add, 0, kInitSize);
  225. add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
  226. add->size = 0;
  227. atomic_store(&b->add, (uptr)add, memory_order_relaxed);
  228. }
  229. if (add->size == add->cap) {
  230. // Grow existing add array.
  231. uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
  232. uptr newsize = oldsize * 2;
  233. AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
  234. internal_memset(add1, 0, newsize);
  235. add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
  236. add1->size = add->size;
  237. internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
  238. InternalFree(add);
  239. atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
  240. add = add1;
  241. }
  242. // Store.
  243. uptr i = add->size++;
  244. Cell *c = &add->cells[i];
  245. CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
  246. h->addidx_ = i;
  247. h->cell_ = c;
  248. }
  249. template<typename T, uptr kSize>
  250. void AddrHashMap<T, kSize>::release(Handle *h) {
  251. if (h->cell_ == 0)
  252. return;
  253. Bucket *b = h->bucket_;
  254. Cell *c = h->cell_;
  255. uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
  256. if (h->created_) {
  257. // Denote completion of insertion.
  258. CHECK_EQ(addr1, 0);
  259. // After the following store, the element becomes available
  260. // for lock-free reads.
  261. atomic_store(&c->addr, h->addr_, memory_order_release);
  262. b->mtx.Unlock();
  263. } else if (h->remove_) {
  264. // Denote that the cell is empty now.
  265. CHECK_EQ(addr1, h->addr_);
  266. atomic_store(&c->addr, 0, memory_order_release);
  267. // See if we need to compact the bucket.
  268. AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
  269. if (h->addidx_ == -1U) {
  270. // Removed from embed array, move an add element into the freed cell.
  271. if (add && add->size != 0) {
  272. uptr last = --add->size;
  273. Cell *c1 = &add->cells[last];
  274. c->val = c1->val;
  275. uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
  276. atomic_store(&c->addr, addr1, memory_order_release);
  277. atomic_store(&c1->addr, 0, memory_order_release);
  278. }
  279. } else {
  280. // Removed from add array, compact it.
  281. uptr last = --add->size;
  282. Cell *c1 = &add->cells[last];
  283. if (c != c1) {
  284. *c = *c1;
  285. atomic_store(&c1->addr, 0, memory_order_relaxed);
  286. }
  287. }
  288. if (add && add->size == 0) {
  289. // FIXME(dvyukov): free add?
  290. }
  291. b->mtx.Unlock();
  292. } else {
  293. CHECK_EQ(addr1, h->addr_);
  294. if (h->addidx_ != -1U)
  295. b->mtx.ReadUnlock();
  296. }
  297. }
  298. template<typename T, uptr kSize>
  299. uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
  300. addr += addr << 10;
  301. addr ^= addr >> 6;
  302. return addr % kSize;
  303. }
  304. } // namespace __sanitizer
  305. #endif // SANITIZER_ADDRHASHMAP_H