method-ml.cc 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. /* Copyright (C) 2012-2015 Free Software Foundation, Inc.
  2. Contributed by Torvald Riegel <triegel@redhat.com>.
  3. This file is part of the GNU Transactional Memory Library (libitm).
  4. Libitm is free software; you can redistribute it and/or modify it
  5. under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 3 of the License, or
  7. (at your option) any later version.
  8. Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
  9. WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  10. FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. more details.
  12. Under Section 7 of GPL version 3, you are granted additional
  13. permissions described in the GCC Runtime Library Exception, version
  14. 3.1, as published by the Free Software Foundation.
  15. You should have received a copy of the GNU General Public License and
  16. a copy of the GCC Runtime Library Exception along with this program;
  17. see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  18. <http://www.gnu.org/licenses/>. */
  19. #include "libitm_i.h"
  20. using namespace GTM;
  21. namespace {
  22. // This group consists of all TM methods that synchronize via multiple locks
  23. // (or ownership records).
  24. struct ml_mg : public method_group
  25. {
  26. static const gtm_word LOCK_BIT = (~(gtm_word)0 >> 1) + 1;
  27. static const gtm_word INCARNATION_BITS = 3;
  28. static const gtm_word INCARNATION_MASK = 7;
  29. // Maximum time is all bits except the lock bit, the overflow reserve bit,
  30. // and the incarnation bits).
  31. static const gtm_word TIME_MAX = (~(gtm_word)0 >> (2 + INCARNATION_BITS));
  32. // The overflow reserve bit is the MSB of the timestamp part of an orec,
  33. // so we can have TIME_MAX+1 pending timestamp increases before we overflow.
  34. static const gtm_word OVERFLOW_RESERVE = TIME_MAX + 1;
  35. static bool is_locked(gtm_word o) { return o & LOCK_BIT; }
  36. static gtm_word set_locked(gtm_thread *tx)
  37. {
  38. return ((uintptr_t)tx >> 1) | LOCK_BIT;
  39. }
  40. // Returns a time that includes the lock bit, which is required by both
  41. // validate() and is_more_recent_or_locked().
  42. static gtm_word get_time(gtm_word o) { return o >> INCARNATION_BITS; }
  43. static gtm_word set_time(gtm_word time) { return time << INCARNATION_BITS; }
  44. static bool is_more_recent_or_locked(gtm_word o, gtm_word than_time)
  45. {
  46. // LOCK_BIT is the MSB; thus, if O is locked, it is larger than TIME_MAX.
  47. return get_time(o) > than_time;
  48. }
  49. static bool has_incarnation_left(gtm_word o)
  50. {
  51. return (o & INCARNATION_MASK) < INCARNATION_MASK;
  52. }
  53. static gtm_word inc_incarnation(gtm_word o) { return o + 1; }
  54. // The shared time base.
  55. atomic<gtm_word> time __attribute__((aligned(HW_CACHELINE_SIZE)));
  56. // The array of ownership records.
  57. atomic<gtm_word>* orecs __attribute__((aligned(HW_CACHELINE_SIZE)));
  58. char tailpadding[HW_CACHELINE_SIZE - sizeof(atomic<gtm_word>*)];
  59. // Location-to-orec mapping. Stripes of 16B mapped to 2^19 orecs.
  60. static const gtm_word L2O_ORECS = 1 << 19;
  61. static const gtm_word L2O_SHIFT = 4;
  62. static size_t get_orec(const void* addr)
  63. {
  64. return ((uintptr_t)addr >> L2O_SHIFT) & (L2O_ORECS - 1);
  65. }
  66. static size_t get_next_orec(size_t orec)
  67. {
  68. return (orec + 1) & (L2O_ORECS - 1);
  69. }
  70. // Returns the next orec after the region.
  71. static size_t get_orec_end(const void* addr, size_t len)
  72. {
  73. return (((uintptr_t)addr + len + (1 << L2O_SHIFT) - 1) >> L2O_SHIFT)
  74. & (L2O_ORECS - 1);
  75. }
  76. virtual void init()
  77. {
  78. // We assume that an atomic<gtm_word> is backed by just a gtm_word, so
  79. // starting with zeroed memory is fine.
  80. orecs = (atomic<gtm_word>*) xcalloc(
  81. sizeof(atomic<gtm_word>) * L2O_ORECS, true);
  82. // This store is only executed while holding the serial lock, so relaxed
  83. // memory order is sufficient here.
  84. time.store(0, memory_order_relaxed);
  85. }
  86. virtual void fini()
  87. {
  88. free(orecs);
  89. }
  90. // We only re-initialize when our time base overflows. Thus, only reset
  91. // the time base and the orecs but do not re-allocate the orec array.
  92. virtual void reinit()
  93. {
  94. // This store is only executed while holding the serial lock, so relaxed
  95. // memory order is sufficient here. Same holds for the memset.
  96. time.store(0, memory_order_relaxed);
  97. memset(orecs, 0, sizeof(atomic<gtm_word>) * L2O_ORECS);
  98. }
  99. };
  100. static ml_mg o_ml_mg;
  101. // The multiple lock, write-through TM method.
  102. // Maps each memory location to one of the orecs in the orec array, and then
  103. // acquires the associated orec eagerly before writing through.
  104. // Writes require undo-logging because we are dealing with several locks/orecs
  105. // and need to resolve deadlocks if necessary by aborting one of the
  106. // transactions.
  107. // Reads do time-based validation with snapshot time extensions. Incarnation
  108. // numbers are used to decrease contention on the time base (with those,
  109. // aborted transactions do not need to acquire a new version number for the
  110. // data that has been previously written in the transaction and needs to be
  111. // rolled back).
  112. // gtm_thread::shared_state is used to store a transaction's current
  113. // snapshot time (or commit time). The serial lock uses ~0 for inactive
  114. // transactions and 0 for active ones. Thus, we always have a meaningful
  115. // timestamp in shared_state that can be used to implement quiescence-based
  116. // privatization safety.
  117. class ml_wt_dispatch : public abi_dispatch
  118. {
  119. protected:
  120. static void pre_write(gtm_thread *tx, const void *addr, size_t len)
  121. {
  122. gtm_word snapshot = tx->shared_state.load(memory_order_relaxed);
  123. gtm_word locked_by_tx = ml_mg::set_locked(tx);
  124. // Lock all orecs that cover the region.
  125. size_t orec = ml_mg::get_orec(addr);
  126. size_t orec_end = ml_mg::get_orec_end(addr, len);
  127. do
  128. {
  129. // Load the orec. Relaxed memory order is sufficient here because
  130. // either we have acquired the orec or we will try to acquire it with
  131. // a CAS with stronger memory order.
  132. gtm_word o = o_ml_mg.orecs[orec].load(memory_order_relaxed);
  133. // Check whether we have acquired the orec already.
  134. if (likely (locked_by_tx != o))
  135. {
  136. // If not, acquire. Make sure that our snapshot time is larger or
  137. // equal than the orec's version to avoid masking invalidations of
  138. // our snapshot with our own writes.
  139. if (unlikely (ml_mg::is_locked(o)))
  140. tx->restart(RESTART_LOCKED_WRITE);
  141. if (unlikely (ml_mg::get_time(o) > snapshot))
  142. {
  143. // We only need to extend the snapshot if we have indeed read
  144. // from this orec before. Given that we are an update
  145. // transaction, we will have to extend anyway during commit.
  146. // ??? Scan the read log instead, aborting if we have read
  147. // from data covered by this orec before?
  148. snapshot = extend(tx);
  149. }
  150. // We need acquire memory order here to synchronize with other
  151. // (ownership) releases of the orec. We do not need acq_rel order
  152. // because whenever another thread reads from this CAS'
  153. // modification, then it will abort anyway and does not rely on
  154. // any further happens-before relation to be established.
  155. if (unlikely (!o_ml_mg.orecs[orec].compare_exchange_strong(
  156. o, locked_by_tx, memory_order_acquire)))
  157. tx->restart(RESTART_LOCKED_WRITE);
  158. // We use an explicit fence here to avoid having to use release
  159. // memory order for all subsequent data stores. This fence will
  160. // synchronize with loads of the data with acquire memory order.
  161. // See post_load() for why this is necessary.
  162. // Adding require memory order to the prior CAS is not sufficient,
  163. // at least according to the Batty et al. formalization of the
  164. // memory model.
  165. atomic_thread_fence(memory_order_release);
  166. // We log the previous value here to be able to use incarnation
  167. // numbers when we have to roll back.
  168. // ??? Reserve capacity early to avoid capacity checks here?
  169. gtm_rwlog_entry *e = tx->writelog.push();
  170. e->orec = o_ml_mg.orecs + orec;
  171. e->value = o;
  172. }
  173. orec = o_ml_mg.get_next_orec(orec);
  174. }
  175. while (orec != orec_end);
  176. // Do undo logging. We do not know which region prior writes logged
  177. // (even if orecs have been acquired), so just log everything.
  178. tx->undolog.log(addr, len);
  179. }
  180. static void pre_write(const void *addr, size_t len)
  181. {
  182. gtm_thread *tx = gtm_thr();
  183. pre_write(tx, addr, len);
  184. }
  185. // Returns true iff all the orecs in our read log still have the same time
  186. // or have been locked by the transaction itself.
  187. static bool validate(gtm_thread *tx)
  188. {
  189. gtm_word locked_by_tx = ml_mg::set_locked(tx);
  190. // ??? This might get called from pre_load() via extend(). In that case,
  191. // we don't really need to check the new entries that pre_load() is
  192. // adding. Stop earlier?
  193. for (gtm_rwlog_entry *i = tx->readlog.begin(), *ie = tx->readlog.end();
  194. i != ie; i++)
  195. {
  196. // Relaxed memory order is sufficient here because we do not need to
  197. // establish any new synchronizes-with relationships. We only need
  198. // to read a value that is as least as current as enforced by the
  199. // callers: extend() loads global time with acquire, and trycommit()
  200. // increments global time with acquire. Therefore, we will see the
  201. // most recent orec updates before the global time that we load.
  202. gtm_word o = i->orec->load(memory_order_relaxed);
  203. // We compare only the time stamp and the lock bit here. We know that
  204. // we have read only committed data before, so we can ignore
  205. // intermediate yet rolled-back updates presented by the incarnation
  206. // number bits.
  207. if (ml_mg::get_time(o) != ml_mg::get_time(i->value)
  208. && o != locked_by_tx)
  209. return false;
  210. }
  211. return true;
  212. }
  213. // Tries to extend the snapshot to a more recent time. Returns the new
  214. // snapshot time and updates TX->SHARED_STATE. If the snapshot cannot be
  215. // extended to the current global time, TX is restarted.
  216. static gtm_word extend(gtm_thread *tx)
  217. {
  218. // We read global time here, even if this isn't strictly necessary
  219. // because we could just return the maximum of the timestamps that
  220. // validate sees. However, the potential cache miss on global time is
  221. // probably a reasonable price to pay for avoiding unnecessary extensions
  222. // in the future.
  223. // We need acquire memory oder because we have to synchronize with the
  224. // increment of global time by update transactions, whose lock
  225. // acquisitions we have to observe (also see trycommit()).
  226. gtm_word snapshot = o_ml_mg.time.load(memory_order_acquire);
  227. if (!validate(tx))
  228. tx->restart(RESTART_VALIDATE_READ);
  229. // Update our public snapshot time. Probably useful to decrease waiting
  230. // due to quiescence-based privatization safety.
  231. // Use release memory order to establish synchronizes-with with the
  232. // privatizers; prior data loads should happen before the privatizers
  233. // potentially modify anything.
  234. tx->shared_state.store(snapshot, memory_order_release);
  235. return snapshot;
  236. }
  237. // First pass over orecs. Load and check all orecs that cover the region.
  238. // Write to read log, extend snapshot time if necessary.
  239. static gtm_rwlog_entry* pre_load(gtm_thread *tx, const void* addr,
  240. size_t len)
  241. {
  242. // Don't obtain an iterator yet because the log might get resized.
  243. size_t log_start = tx->readlog.size();
  244. gtm_word snapshot = tx->shared_state.load(memory_order_relaxed);
  245. gtm_word locked_by_tx = ml_mg::set_locked(tx);
  246. size_t orec = ml_mg::get_orec(addr);
  247. size_t orec_end = ml_mg::get_orec_end(addr, len);
  248. do
  249. {
  250. // We need acquire memory order here so that this load will
  251. // synchronize with the store that releases the orec in trycommit().
  252. // In turn, this makes sure that subsequent data loads will read from
  253. // a visible sequence of side effects that starts with the most recent
  254. // store to the data right before the release of the orec.
  255. gtm_word o = o_ml_mg.orecs[orec].load(memory_order_acquire);
  256. if (likely (!ml_mg::is_more_recent_or_locked(o, snapshot)))
  257. {
  258. success:
  259. gtm_rwlog_entry *e = tx->readlog.push();
  260. e->orec = o_ml_mg.orecs + orec;
  261. e->value = o;
  262. }
  263. else if (!ml_mg::is_locked(o))
  264. {
  265. // We cannot read this part of the region because it has been
  266. // updated more recently than our snapshot time. If we can extend
  267. // our snapshot, then we can read.
  268. snapshot = extend(tx);
  269. goto success;
  270. }
  271. else
  272. {
  273. // If the orec is locked by us, just skip it because we can just
  274. // read from it. Otherwise, restart the transaction.
  275. if (o != locked_by_tx)
  276. tx->restart(RESTART_LOCKED_READ);
  277. }
  278. orec = o_ml_mg.get_next_orec(orec);
  279. }
  280. while (orec != orec_end);
  281. return &tx->readlog[log_start];
  282. }
  283. // Second pass over orecs, verifying that the we had a consistent read.
  284. // Restart the transaction if any of the orecs is locked by another
  285. // transaction.
  286. static void post_load(gtm_thread *tx, gtm_rwlog_entry* log)
  287. {
  288. for (gtm_rwlog_entry *end = tx->readlog.end(); log != end; log++)
  289. {
  290. // Check that the snapshot is consistent. We expect the previous data
  291. // load to have acquire memory order, or be atomic and followed by an
  292. // acquire fence.
  293. // As a result, the data load will synchronize with the release fence
  294. // issued by the transactions whose data updates the data load has read
  295. // from. This forces the orec load to read from a visible sequence of
  296. // side effects that starts with the other updating transaction's
  297. // store that acquired the orec and set it to locked.
  298. // We therefore either read a value with the locked bit set (and
  299. // restart) or read an orec value that was written after the data had
  300. // been written. Either will allow us to detect inconsistent reads
  301. // because it will have a higher/different value.
  302. // Also note that differently to validate(), we compare the raw value
  303. // of the orec here, including incarnation numbers. We must prevent
  304. // returning uncommitted data from loads (whereas when validating, we
  305. // already performed a consistent load).
  306. gtm_word o = log->orec->load(memory_order_relaxed);
  307. if (log->value != o)
  308. tx->restart(RESTART_VALIDATE_READ);
  309. }
  310. }
  311. template <typename V> static V load(const V* addr, ls_modifier mod)
  312. {
  313. // Read-for-write should be unlikely, but we need to handle it or will
  314. // break later WaW optimizations.
  315. if (unlikely(mod == RfW))
  316. {
  317. pre_write(addr, sizeof(V));
  318. return *addr;
  319. }
  320. if (unlikely(mod == RaW))
  321. return *addr;
  322. // ??? Optimize for RaR?
  323. gtm_thread *tx = gtm_thr();
  324. gtm_rwlog_entry* log = pre_load(tx, addr, sizeof(V));
  325. // Load the data.
  326. // This needs to have acquire memory order (see post_load()).
  327. // Alternatively, we can put an acquire fence after the data load but this
  328. // is probably less efficient.
  329. // FIXME We would need an atomic load with acquire memory order here but
  330. // we can't just forge an atomic load for nonatomic data because this
  331. // might not work on all implementations of atomics. However, we need
  332. // the acquire memory order and we can only establish this if we link
  333. // it to the matching release using a reads-from relation between atomic
  334. // loads. Also, the compiler is allowed to optimize nonatomic accesses
  335. // differently than atomic accesses (e.g., if the load would be moved to
  336. // after the fence, we potentially don't synchronize properly anymore).
  337. // Instead of the following, just use an ordinary load followed by an
  338. // acquire fence, and hope that this is good enough for now:
  339. // V v = atomic_load_explicit((atomic<V>*)addr, memory_order_acquire);
  340. V v = *addr;
  341. atomic_thread_fence(memory_order_acquire);
  342. // ??? Retry the whole load if it wasn't consistent?
  343. post_load(tx, log);
  344. return v;
  345. }
  346. template <typename V> static void store(V* addr, const V value,
  347. ls_modifier mod)
  348. {
  349. if (likely(mod != WaW))
  350. pre_write(addr, sizeof(V));
  351. // FIXME We would need an atomic store here but we can't just forge an
  352. // atomic load for nonatomic data because this might not work on all
  353. // implementations of atomics. However, we need this store to link the
  354. // release fence in pre_write() to the acquire operation in load, which
  355. // is only guaranteed if we have a reads-from relation between atomic
  356. // accesses. Also, the compiler is allowed to optimize nonatomic accesses
  357. // differently than atomic accesses (e.g., if the store would be moved
  358. // to before the release fence in pre_write(), things could go wrong).
  359. // atomic_store_explicit((atomic<V>*)addr, value, memory_order_relaxed);
  360. *addr = value;
  361. }
  362. public:
  363. static void memtransfer_static(void *dst, const void* src, size_t size,
  364. bool may_overlap, ls_modifier dst_mod, ls_modifier src_mod)
  365. {
  366. gtm_rwlog_entry* log = 0;
  367. gtm_thread *tx = 0;
  368. if (src_mod == RfW)
  369. {
  370. tx = gtm_thr();
  371. pre_write(tx, src, size);
  372. }
  373. else if (src_mod != RaW && src_mod != NONTXNAL)
  374. {
  375. tx = gtm_thr();
  376. log = pre_load(tx, src, size);
  377. }
  378. // ??? Optimize for RaR?
  379. if (dst_mod != NONTXNAL && dst_mod != WaW)
  380. {
  381. if (src_mod != RfW && (src_mod == RaW || src_mod == NONTXNAL))
  382. tx = gtm_thr();
  383. pre_write(tx, dst, size);
  384. }
  385. // FIXME We should use atomics here (see store()). Let's just hope that
  386. // memcpy/memmove are good enough.
  387. if (!may_overlap)
  388. ::memcpy(dst, src, size);
  389. else
  390. ::memmove(dst, src, size);
  391. // ??? Retry the whole memtransfer if it wasn't consistent?
  392. if (src_mod != RfW && src_mod != RaW && src_mod != NONTXNAL)
  393. {
  394. // See load() for why we need the acquire fence here.
  395. atomic_thread_fence(memory_order_acquire);
  396. post_load(tx, log);
  397. }
  398. }
  399. static void memset_static(void *dst, int c, size_t size, ls_modifier mod)
  400. {
  401. if (mod != WaW)
  402. pre_write(dst, size);
  403. // FIXME We should use atomics here (see store()). Let's just hope that
  404. // memset is good enough.
  405. ::memset(dst, c, size);
  406. }
  407. virtual gtm_restart_reason begin_or_restart()
  408. {
  409. // We don't need to do anything for nested transactions.
  410. gtm_thread *tx = gtm_thr();
  411. if (tx->parent_txns.size() > 0)
  412. return NO_RESTART;
  413. // Read the current time, which becomes our snapshot time.
  414. // Use acquire memory oder so that we see the lock acquisitions by update
  415. // transcations that incremented the global time (see trycommit()).
  416. gtm_word snapshot = o_ml_mg.time.load(memory_order_acquire);
  417. // Re-initialize method group on time overflow.
  418. if (snapshot >= o_ml_mg.TIME_MAX)
  419. return RESTART_INIT_METHOD_GROUP;
  420. // We don't need to enforce any ordering for the following store. There
  421. // are no earlier data loads in this transaction, so the store cannot
  422. // become visible before those (which could lead to the violation of
  423. // privatization safety). The store can become visible after later loads
  424. // but this does not matter because the previous value will have been
  425. // smaller or equal (the serial lock will set shared_state to zero when
  426. // marking the transaction as active, and restarts enforce immediate
  427. // visibility of a smaller or equal value with a barrier (see
  428. // rollback()).
  429. tx->shared_state.store(snapshot, memory_order_relaxed);
  430. return NO_RESTART;
  431. }
  432. virtual bool trycommit(gtm_word& priv_time)
  433. {
  434. gtm_thread* tx = gtm_thr();
  435. // If we haven't updated anything, we can commit.
  436. if (!tx->writelog.size())
  437. {
  438. tx->readlog.clear();
  439. return true;
  440. }
  441. // Get a commit time.
  442. // Overflow of o_ml_mg.time is prevented in begin_or_restart().
  443. // We need acq_rel here because (1) the acquire part is required for our
  444. // own subsequent call to validate(), and the release part is necessary to
  445. // make other threads' validate() work as explained there and in extend().
  446. gtm_word ct = o_ml_mg.time.fetch_add(1, memory_order_acq_rel) + 1;
  447. // Extend our snapshot time to at least our commit time.
  448. // Note that we do not need to validate if our snapshot time is right
  449. // before the commit time because we are never sharing the same commit
  450. // time with other transactions.
  451. // No need to reset shared_state, which will be modified by the serial
  452. // lock right after our commit anyway.
  453. gtm_word snapshot = tx->shared_state.load(memory_order_relaxed);
  454. if (snapshot < ct - 1 && !validate(tx))
  455. return false;
  456. // Release orecs.
  457. // See pre_load() / post_load() for why we need release memory order.
  458. // ??? Can we use a release fence and relaxed stores?
  459. gtm_word v = ml_mg::set_time(ct);
  460. for (gtm_rwlog_entry *i = tx->writelog.begin(), *ie = tx->writelog.end();
  461. i != ie; i++)
  462. i->orec->store(v, memory_order_release);
  463. // We're done, clear the logs.
  464. tx->writelog.clear();
  465. tx->readlog.clear();
  466. // Need to ensure privatization safety. Every other transaction must
  467. // have a snapshot time that is at least as high as our commit time
  468. // (i.e., our commit must be visible to them).
  469. priv_time = ct;
  470. return true;
  471. }
  472. virtual void rollback(gtm_transaction_cp *cp)
  473. {
  474. // We don't do anything for rollbacks of nested transactions.
  475. // ??? We could release locks here if we snapshot writelog size. readlog
  476. // is similar. This is just a performance optimization though. Nested
  477. // aborts should be rather infrequent, so the additional save/restore
  478. // overhead for the checkpoints could be higher.
  479. if (cp != 0)
  480. return;
  481. gtm_thread *tx = gtm_thr();
  482. gtm_word overflow_value = 0;
  483. // Release orecs.
  484. for (gtm_rwlog_entry *i = tx->writelog.begin(), *ie = tx->writelog.end();
  485. i != ie; i++)
  486. {
  487. // If possible, just increase the incarnation number.
  488. // See pre_load() / post_load() for why we need release memory order.
  489. // ??? Can we use a release fence and relaxed stores? (Same below.)
  490. if (ml_mg::has_incarnation_left(i->value))
  491. i->orec->store(ml_mg::inc_incarnation(i->value),
  492. memory_order_release);
  493. else
  494. {
  495. // We have an incarnation overflow. Acquire a new timestamp, and
  496. // use it from now on as value for each orec whose incarnation
  497. // number cannot be increased.
  498. // Overflow of o_ml_mg.time is prevented in begin_or_restart().
  499. // See pre_load() / post_load() for why we need release memory
  500. // order.
  501. if (!overflow_value)
  502. // Release memory order is sufficient but required here.
  503. // In contrast to the increment in trycommit(), we need release
  504. // for the same reason but do not need the acquire because we
  505. // do not validate subsequently.
  506. overflow_value = ml_mg::set_time(
  507. o_ml_mg.time.fetch_add(1, memory_order_release) + 1);
  508. i->orec->store(overflow_value, memory_order_release);
  509. }
  510. }
  511. // We need this release fence to ensure that privatizers see the
  512. // rolled-back original state (not any uncommitted values) when they read
  513. // the new snapshot time that we write in begin_or_restart().
  514. atomic_thread_fence(memory_order_release);
  515. // We're done, clear the logs.
  516. tx->writelog.clear();
  517. tx->readlog.clear();
  518. }
  519. virtual bool supports(unsigned number_of_threads)
  520. {
  521. // Each txn can commit and fail and rollback once before checking for
  522. // overflow, so this bounds the number of threads that we can support.
  523. // In practice, this won't be a problem but we check it anyway so that
  524. // we never break in the occasional weird situation.
  525. return (number_of_threads * 2 <= ml_mg::OVERFLOW_RESERVE);
  526. }
  527. CREATE_DISPATCH_METHODS(virtual, )
  528. CREATE_DISPATCH_METHODS_MEM()
  529. ml_wt_dispatch() : abi_dispatch(false, true, false, false, 0, &o_ml_mg)
  530. { }
  531. };
  532. } // anon namespace
  533. static const ml_wt_dispatch o_ml_wt_dispatch;
  534. abi_dispatch *
  535. GTM::dispatch_ml_wt ()
  536. {
  537. return const_cast<ml_wt_dispatch *>(&o_ml_wt_dispatch);
  538. }