blob: 85fb4f13ed5b787fdda9627d5cd505b817795c80 [file] [log] [blame]
Jakub Jelinek818ab712016-01-04 15:30:50 +01001/* Copyright (C) 2008-2016 Free Software Foundation, Inc.
Aldy Hernandez0a355132011-11-08 11:13:41 +00002 Contributed by Richard Henderson <rth@redhat.com>.
3
4 This file is part of the GNU Transactional Memory Library (libitm).
5
6 Libitm is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
19
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
24
25#include "libitm_i.h"
26#include <pthread.h>
27
28
29using namespace GTM;
30
31#if !defined(HAVE_ARCH_GTM_THREAD) || !defined(HAVE_ARCH_GTM_THREAD_DISP)
32extern __thread gtm_thread_tls _gtm_thr_tls;
33#endif
34
35gtm_rwlock GTM::gtm_thread::serial_lock;
36gtm_thread *GTM::gtm_thread::list_of_threads = 0;
37unsigned GTM::gtm_thread::number_of_threads = 0;
38
Aldy Hernandez0a355132011-11-08 11:13:41 +000039/* ??? Move elsewhere when we figure out library initialization. */
40uint64_t GTM::gtm_spin_count_var = 1000;
41
Richard Henderson36cfbee2011-12-13 11:11:25 -080042#ifdef HAVE_64BIT_SYNC_BUILTINS
43static atomic<_ITM_transactionId_t> global_tid;
44#else
Aldy Hernandez0a355132011-11-08 11:13:41 +000045static _ITM_transactionId_t global_tid;
Richard Henderson36cfbee2011-12-13 11:11:25 -080046static pthread_mutex_t global_tid_lock = PTHREAD_MUTEX_INITIALIZER;
47#endif
48
Aldy Hernandez0a355132011-11-08 11:13:41 +000049
50// Provides a on-thread-exit callback used to release per-thread data.
51static pthread_key_t thr_release_key;
52static pthread_once_t thr_release_once = PTHREAD_ONCE_INIT;
53
Torvald Riegel64fbcc72012-11-09 17:04:40 +000054// See gtm_thread::begin_transaction.
55uint32_t GTM::htm_fastpath = 0;
Aldy Hernandez0a355132011-11-08 11:13:41 +000056
57/* Allocate a transaction structure. */
58void *
59GTM::gtm_thread::operator new (size_t s)
60{
61 void *tx;
62
63 assert(s == sizeof(gtm_thread));
64
65 tx = xmalloc (sizeof (gtm_thread), true);
66 memset (tx, 0, sizeof (gtm_thread));
67
68 return tx;
69}
70
71/* Free the given transaction. Raises an error if the transaction is still
72 in use. */
73void
74GTM::gtm_thread::operator delete(void *tx)
75{
76 free(tx);
77}
78
79static void
80thread_exit_handler(void *)
81{
82 gtm_thread *thr = gtm_thr();
83 if (thr)
84 delete thr;
85 set_gtm_thr(0);
86}
87
88static void
89thread_exit_init()
90{
91 if (pthread_key_create(&thr_release_key, thread_exit_handler))
92 GTM_fatal("Creating thread release TLS key failed.");
93}
94
95
96GTM::gtm_thread::~gtm_thread()
97{
98 if (nesting > 0)
99 GTM_fatal("Thread exit while a transaction is still active.");
100
101 // Deregister this transaction.
102 serial_lock.write_lock ();
103 gtm_thread **prev = &list_of_threads;
104 for (; *prev; prev = &(*prev)->next_thread)
105 {
106 if (*prev == this)
107 {
108 *prev = (*prev)->next_thread;
109 break;
110 }
111 }
112 number_of_threads--;
113 number_of_threads_changed(number_of_threads + 1, number_of_threads);
114 serial_lock.write_unlock ();
115}
116
117GTM::gtm_thread::gtm_thread ()
118{
119 // This object's memory has been set to zero by operator new, so no need
120 // to initialize any of the other primitive-type members that do not have
121 // constructors.
Richard Henderson36cfbee2011-12-13 11:11:25 -0800122 shared_state.store(-1, memory_order_relaxed);
Aldy Hernandez0a355132011-11-08 11:13:41 +0000123
124 // Register this transaction with the list of all threads' transactions.
125 serial_lock.write_lock ();
126 next_thread = list_of_threads;
127 list_of_threads = this;
128 number_of_threads++;
129 number_of_threads_changed(number_of_threads - 1, number_of_threads);
130 serial_lock.write_unlock ();
131
Torvald Riegel258c1d02015-11-20 00:10:08 +0000132 init_cpp_exceptions ();
133
Aldy Hernandez0a355132011-11-08 11:13:41 +0000134 if (pthread_once(&thr_release_once, thread_exit_init))
135 GTM_fatal("Initializing thread release TLS key failed.");
136 // Any non-null value is sufficient to trigger destruction of this
137 // transaction when the current thread terminates.
138 if (pthread_setspecific(thr_release_key, this))
139 GTM_fatal("Setting thread release TLS key failed.");
140}
141
Richard Henderson36cfbee2011-12-13 11:11:25 -0800142static inline uint32_t
143choose_code_path(uint32_t prop, abi_dispatch *disp)
Aldy Hernandez0a355132011-11-08 11:13:41 +0000144{
145 if ((prop & pr_uninstrumentedCode) && disp->can_run_uninstrumented_code())
146 return a_runUninstrumentedCode;
147 else
148 return a_runInstrumentedCode;
149}
150
151uint32_t
152GTM::gtm_thread::begin_transaction (uint32_t prop, const gtm_jmpbuf *jb)
153{
154 static const _ITM_transactionId_t tid_block_size = 1 << 16;
155
156 gtm_thread *tx;
157 abi_dispatch *disp;
158 uint32_t ret;
159
160 // ??? pr_undoLogCode is not properly defined in the ABI. Are barriers
161 // omitted because they are not necessary (e.g., a transaction on thread-
162 // local data) or because the compiler thinks that some kind of global
163 // synchronization might perform better?
164 if (unlikely(prop & pr_undoLogCode))
165 GTM_fatal("pr_undoLogCode not supported");
166
Torvald Riegelbec9ec32013-08-30 10:33:41 +0000167#ifdef USE_HTM_FASTPATH
Torvald Riegel64fbcc72012-11-09 17:04:40 +0000168 // HTM fastpath. Only chosen in the absence of transaction_cancel to allow
169 // using an uninstrumented code path.
170 // The fastpath is enabled only by dispatch_htm's method group, which uses
171 // serial-mode methods as fallback. Serial-mode transactions cannot execute
172 // concurrently with HW transactions because the latter monitor the serial
173 // lock's writer flag and thus abort if another thread is or becomes a
174 // serial transaction. Therefore, if the fastpath is enabled, then a
175 // transaction is not executing as a HW transaction iff the serial lock is
176 // write-locked. This allows us to use htm_fastpath and the serial lock's
177 // writer flag to reliable determine whether the current thread runs a HW
178 // transaction, and thus we do not need to maintain this information in
179 // per-thread state.
180 // If an uninstrumented code path is not available, we can still run
181 // instrumented code from a HW transaction because the HTM fastpath kicks
182 // in early in both begin and commit, and the transaction is not canceled.
183 // HW transactions might get requests to switch to serial-irrevocable mode,
184 // but these can be ignored because the HTM provides all necessary
185 // correctness guarantees. Transactions cannot detect whether they are
186 // indeed in serial mode, and HW transactions should never need serial mode
187 // for any internal changes (e.g., they never abort visibly to the STM code
188 // and thus do not trigger the standard retry handling).
Torvald Riegelbec9ec32013-08-30 10:33:41 +0000189#ifndef HTM_CUSTOM_FASTPATH
Torvald Riegel64fbcc72012-11-09 17:04:40 +0000190 if (likely(htm_fastpath && (prop & pr_hasNoAbort)))
191 {
192 for (uint32_t t = htm_fastpath; t; t--)
193 {
194 uint32_t ret = htm_begin();
195 if (htm_begin_success(ret))
196 {
197 // We are executing a transaction now.
198 // Monitor the writer flag in the serial-mode lock, and abort
199 // if there is an active or waiting serial-mode transaction.
Torvald Riegelb1db4572013-06-20 16:40:38 +0000200 // Note that this can also happen due to an enclosing
201 // serial-mode transaction; we handle this case below.
Torvald Riegel64fbcc72012-11-09 17:04:40 +0000202 if (unlikely(serial_lock.is_write_locked()))
203 htm_abort();
204 else
205 // We do not need to set a_saveLiveVariables because of HTM.
206 return (prop & pr_uninstrumentedCode) ?
207 a_runUninstrumentedCode : a_runInstrumentedCode;
208 }
209 // The transaction has aborted. Don't retry if it's unlikely that
210 // retrying the transaction will be successful.
211 if (!htm_abort_should_retry(ret))
212 break;
213 // Wait until any concurrent serial-mode transactions have finished.
214 // This is an empty critical section, but won't be elided.
215 if (serial_lock.is_write_locked())
216 {
217 tx = gtm_thr();
218 if (unlikely(tx == NULL))
219 {
220 // See below.
221 tx = new gtm_thread();
222 set_gtm_thr(tx);
223 }
Torvald Riegelb1db4572013-06-20 16:40:38 +0000224 // Check whether there is an enclosing serial-mode transaction;
225 // if so, we just continue as a nested transaction and don't
226 // try to use the HTM fastpath. This case can happen when an
227 // outermost relaxed transaction calls unsafe code that starts
228 // a transaction.
229 if (tx->nesting > 0)
230 break;
231 // Another thread is running a serial-mode transaction. Wait.
Torvald Riegel64fbcc72012-11-09 17:04:40 +0000232 serial_lock.read_lock(tx);
233 serial_lock.read_unlock(tx);
234 // TODO We should probably reset the retry count t here, unless
235 // we have retried so often that we should go serial to avoid
236 // starvation.
237 }
238 }
239 }
Torvald Riegelbec9ec32013-08-30 10:33:41 +0000240#else
241 // If we have a custom HTM fastpath in ITM_beginTransaction, we implement
242 // just the retry policy here. We communicate with the custom fastpath
243 // through additional property bits and return codes, and either transfer
244 // control back to the custom fastpath or run the fallback mechanism. The
245 // fastpath synchronization algorithm itself is the same.
246 // pr_HTMRetryableAbort states that a HW transaction started by the custom
247 // HTM fastpath aborted, and that we thus have to decide whether to retry
248 // the fastpath (returning a_tryHTMFastPath) or just proceed with the
249 // fallback method.
250 if (likely(htm_fastpath && (prop & pr_HTMRetryableAbort)))
251 {
252 tx = gtm_thr();
253 if (unlikely(tx == NULL))
254 {
255 // See below.
256 tx = new gtm_thread();
257 set_gtm_thr(tx);
258 }
259 // If this is the first abort, reset the retry count. We abuse
260 // restart_total for the retry count, which is fine because our only
261 // other fallback will use serial transactions, which don't use
262 // restart_total but will reset it when committing.
263 if (!(prop & pr_HTMRetriedAfterAbort))
264 tx->restart_total = htm_fastpath;
265
266 if (--tx->restart_total > 0)
267 {
268 // Wait until any concurrent serial-mode transactions have finished.
269 // Essentially the same code as above.
270 if (serial_lock.is_write_locked())
271 {
272 if (tx->nesting > 0)
273 goto stop_custom_htm_fastpath;
274 serial_lock.read_lock(tx);
275 serial_lock.read_unlock(tx);
276 }
277 // Let ITM_beginTransaction retry the custom HTM fastpath.
278 return a_tryHTMFastPath;
279 }
280 }
281 stop_custom_htm_fastpath:
282#endif
Torvald Riegel64fbcc72012-11-09 17:04:40 +0000283#endif
284
Aldy Hernandez0a355132011-11-08 11:13:41 +0000285 tx = gtm_thr();
286 if (unlikely(tx == NULL))
287 {
288 // Create the thread object. The constructor will also set up automatic
289 // deletion on thread termination.
290 tx = new gtm_thread();
291 set_gtm_thr(tx);
292 }
293
294 if (tx->nesting > 0)
295 {
296 // This is a nested transaction.
297 // Check prop compatibility:
298 // The ABI requires pr_hasNoFloatUpdate, pr_hasNoVectorUpdate,
299 // pr_hasNoIrrevocable, pr_aWBarriersOmitted, pr_RaRBarriersOmitted, and
300 // pr_hasNoSimpleReads to hold for the full dynamic scope of a
301 // transaction. We could check that these are set for the nested
302 // transaction if they are also set for the parent transaction, but the
303 // ABI does not require these flags to be set if they could be set,
304 // so the check could be too strict.
305 // ??? For pr_readOnly, lexical or dynamic scope is unspecified.
306
307 if (prop & pr_hasNoAbort)
308 {
309 // We can use flat nesting, so elide this transaction.
310 if (!(prop & pr_instrumentedCode))
311 {
312 if (!(tx->state & STATE_SERIAL) ||
313 !(tx->state & STATE_IRREVOCABLE))
314 tx->serialirr_mode();
315 }
316 // Increment nesting level after checking that we have a method that
317 // allows us to continue.
318 tx->nesting++;
319 return choose_code_path(prop, abi_disp());
320 }
321
322 // The transaction might abort, so use closed nesting if possible.
323 // pr_hasNoAbort has lexical scope, so the compiler should really have
324 // generated an instrumented code path.
325 assert(prop & pr_instrumentedCode);
326
327 // Create a checkpoint of the current transaction.
328 gtm_transaction_cp *cp = tx->parent_txns.push();
329 cp->save(tx);
330 new (&tx->alloc_actions) aa_tree<uintptr_t, gtm_alloc_action>();
331
332 // Check whether the current method actually supports closed nesting.
333 // If we can switch to another one, do so.
334 // If not, we assume that actual aborts are infrequent, and rather
335 // restart in _ITM_abortTransaction when we really have to.
336 disp = abi_disp();
337 if (!disp->closed_nesting())
338 {
339 // ??? Should we elide the transaction if there is no alternative
340 // method that supports closed nesting? If we do, we need to set
341 // some flag to prevent _ITM_abortTransaction from aborting the
342 // wrong transaction (i.e., some parent transaction).
343 abi_dispatch *cn_disp = disp->closed_nesting_alternative();
344 if (cn_disp)
345 {
346 disp = cn_disp;
347 set_abi_disp(disp);
348 }
349 }
350 }
351 else
352 {
353 // Outermost transaction
354 disp = tx->decide_begin_dispatch (prop);
Aldy Hernandez0a355132011-11-08 11:13:41 +0000355 set_abi_disp (disp);
356 }
357
358 // Initialization that is common for outermost and nested transactions.
359 tx->prop = prop;
360 tx->nesting++;
361
362 tx->jb = *jb;
363
364 // As long as we have not exhausted a previously allocated block of TIDs,
365 // we can avoid an atomic operation on a shared cacheline.
366 if (tx->local_tid & (tid_block_size - 1))
367 tx->id = tx->local_tid++;
368 else
369 {
370#ifdef HAVE_64BIT_SYNC_BUILTINS
Torvald Riegel799142b2011-12-24 01:42:20 +0000371 // We don't really care which block of TIDs we get but only that we
372 // acquire one atomically; therefore, relaxed memory order is
373 // sufficient.
Richard Henderson36cfbee2011-12-13 11:11:25 -0800374 tx->id = global_tid.fetch_add(tid_block_size, memory_order_relaxed);
Aldy Hernandez0a355132011-11-08 11:13:41 +0000375 tx->local_tid = tx->id + 1;
376#else
377 pthread_mutex_lock (&global_tid_lock);
378 global_tid += tid_block_size;
379 tx->id = global_tid;
380 tx->local_tid = tx->id + 1;
381 pthread_mutex_unlock (&global_tid_lock);
382#endif
383 }
384
Torvald Riegel258c1d02015-11-20 00:10:08 +0000385 // Log the number of uncaught exceptions if we might have to roll back this
386 // state.
387 if (tx->cxa_uncaught_count_ptr != 0)
388 tx->cxa_uncaught_count = *tx->cxa_uncaught_count_ptr;
389
Aldy Hernandez0a355132011-11-08 11:13:41 +0000390 // Run dispatch-specific restart code. Retry until we succeed.
391 GTM::gtm_restart_reason rr;
392 while ((rr = disp->begin_or_restart()) != NO_RESTART)
393 {
394 tx->decide_retry_strategy(rr);
395 disp = abi_disp();
396 }
397
398 // Determine the code path to run. Only irrevocable transactions cannot be
399 // restarted, so all other transactions need to save live variables.
400 ret = choose_code_path(prop, disp);
401 if (!(tx->state & STATE_IRREVOCABLE))
402 ret |= a_saveLiveVariables;
403 return ret;
404}
405
406
407void
408GTM::gtm_transaction_cp::save(gtm_thread* tx)
409{
410 // Save everything that we might have to restore on restarts or aborts.
411 jb = tx->jb;
412 undolog_size = tx->undolog.size();
413 memcpy(&alloc_actions, &tx->alloc_actions, sizeof(alloc_actions));
414 user_actions_size = tx->user_actions.size();
415 id = tx->id;
416 prop = tx->prop;
417 cxa_catch_count = tx->cxa_catch_count;
Torvald Riegel258c1d02015-11-20 00:10:08 +0000418 cxa_uncaught_count = tx->cxa_uncaught_count;
Aldy Hernandez0a355132011-11-08 11:13:41 +0000419 disp = abi_disp();
420 nesting = tx->nesting;
421}
422
423void
424GTM::gtm_transaction_cp::commit(gtm_thread* tx)
425{
426 // Restore state that is not persistent across commits. Exception handling,
427 // information, nesting level, and any logs do not need to be restored on
428 // commits of nested transactions. Allocation actions must be committed
429 // before committing the snapshot.
430 tx->jb = jb;
431 memcpy(&tx->alloc_actions, &alloc_actions, sizeof(alloc_actions));
432 tx->id = id;
433 tx->prop = prop;
434}
435
436
437void
438GTM::gtm_thread::rollback (gtm_transaction_cp *cp, bool aborting)
439{
440 // The undo log is special in that it used for both thread-local and shared
441 // data. Because of the latter, we have to roll it back before any
442 // dispatch-specific rollback (which handles synchronization with other
443 // transactions).
Torvald Riegel07b66422012-01-13 23:45:06 +0000444 undolog.rollback (this, cp ? cp->undolog_size : 0);
Aldy Hernandez0a355132011-11-08 11:13:41 +0000445
446 // Perform dispatch-specific rollback.
447 abi_disp()->rollback (cp);
448
449 // Roll back all actions that are supposed to happen around the transaction.
450 rollback_user_actions (cp ? cp->user_actions_size : 0);
451 commit_allocations (true, (cp ? &cp->alloc_actions : 0));
452 revert_cpp_exceptions (cp);
453
454 if (cp)
455 {
456 // We do not yet handle restarts of nested transactions. To do that, we
457 // would have to restore some state (jb, id, prop, nesting) not to the
458 // checkpoint but to the transaction that was started from this
459 // checkpoint (e.g., nesting = cp->nesting + 1);
460 assert(aborting);
461 // Roll back the rest of the state to the checkpoint.
462 jb = cp->jb;
463 id = cp->id;
464 prop = cp->prop;
465 if (cp->disp != abi_disp())
466 set_abi_disp(cp->disp);
467 memcpy(&alloc_actions, &cp->alloc_actions, sizeof(alloc_actions));
468 nesting = cp->nesting;
469 }
470 else
471 {
472 // Roll back to the outermost transaction.
473 // Restore the jump buffer and transaction properties, which we will
474 // need for the longjmp used to restart or abort the transaction.
475 if (parent_txns.size() > 0)
476 {
477 jb = parent_txns[0].jb;
478 id = parent_txns[0].id;
479 prop = parent_txns[0].prop;
480 }
481 // Reset the transaction. Do not reset this->state, which is handled by
482 // the callers. Note that if we are not aborting, we reset the
483 // transaction to the point after having executed begin_transaction
484 // (we will return from it), so the nesting level must be one, not zero.
485 nesting = (aborting ? 0 : 1);
486 parent_txns.clear();
487 }
488
489 if (this->eh_in_flight)
490 {
491 _Unwind_DeleteException ((_Unwind_Exception *) this->eh_in_flight);
492 this->eh_in_flight = NULL;
493 }
494}
495
496void ITM_REGPARM
497_ITM_abortTransaction (_ITM_abortReason reason)
498{
499 gtm_thread *tx = gtm_thr();
500
501 assert (reason == userAbort || reason == (userAbort | outerAbort));
502 assert ((tx->prop & pr_hasNoAbort) == 0);
503
504 if (tx->state & gtm_thread::STATE_IRREVOCABLE)
505 abort ();
506
507 // Roll back to innermost transaction.
508 if (tx->parent_txns.size() > 0 && !(reason & outerAbort))
509 {
510 // If the current method does not support closed nesting but we are
511 // nested and must only roll back the innermost transaction, then
512 // restart with a method that supports closed nesting.
513 abi_dispatch *disp = abi_disp();
514 if (!disp->closed_nesting())
515 tx->restart(RESTART_CLOSED_NESTING);
516
517 // The innermost transaction is a closed nested transaction.
518 gtm_transaction_cp *cp = tx->parent_txns.pop();
519 uint32_t longjmp_prop = tx->prop;
520 gtm_jmpbuf longjmp_jb = tx->jb;
521
522 tx->rollback (cp, true);
523
524 // Jump to nested transaction (use the saved jump buffer).
Richard Henderson062f93f2011-11-30 14:29:33 -0800525 GTM_longjmp (a_abortTransaction | a_restoreLiveVariables,
526 &longjmp_jb, longjmp_prop);
Aldy Hernandez0a355132011-11-08 11:13:41 +0000527 }
528 else
529 {
530 // There is no nested transaction or an abort of the outermost
531 // transaction was requested, so roll back to the outermost transaction.
532 tx->rollback (0, true);
533
534 // Aborting an outermost transaction finishes execution of the whole
535 // transaction. Therefore, reset transaction state.
536 if (tx->state & gtm_thread::STATE_SERIAL)
537 gtm_thread::serial_lock.write_unlock ();
538 else
539 gtm_thread::serial_lock.read_unlock (tx);
540 tx->state = 0;
541
Richard Henderson062f93f2011-11-30 14:29:33 -0800542 GTM_longjmp (a_abortTransaction | a_restoreLiveVariables,
543 &tx->jb, tx->prop);
Aldy Hernandez0a355132011-11-08 11:13:41 +0000544 }
545}
546
547bool
548GTM::gtm_thread::trycommit ()
549{
550 nesting--;
551
552 // Skip any real commit for elided transactions.
553 if (nesting > 0 && (parent_txns.size() == 0 ||
554 nesting > parent_txns[parent_txns.size() - 1].nesting))
555 return true;
556
557 if (nesting > 0)
558 {
559 // Commit of a closed-nested transaction. Remove one checkpoint and add
560 // any effects of this transaction to the parent transaction.
561 gtm_transaction_cp *cp = parent_txns.pop();
562 commit_allocations(false, &cp->alloc_actions);
563 cp->commit(this);
564 return true;
565 }
566
567 // Commit of an outermost transaction.
568 gtm_word priv_time = 0;
569 if (abi_disp()->trycommit (priv_time))
570 {
Torvald Riegel629e4722016-01-13 12:40:34 +0000571 // The transaction is now finished but we will still access some shared
572 // data if we have to ensure privatization safety.
573 bool do_read_unlock = false;
Aldy Hernandez0a355132011-11-08 11:13:41 +0000574 if (state & gtm_thread::STATE_SERIAL)
Torvald Riegelc898f7b2011-12-24 01:42:48 +0000575 {
576 gtm_thread::serial_lock.write_unlock ();
577 // There are no other active transactions, so there's no need to
578 // enforce privatization safety.
579 priv_time = 0;
580 }
Aldy Hernandez0a355132011-11-08 11:13:41 +0000581 else
Torvald Riegel629e4722016-01-13 12:40:34 +0000582 {
583 // If we have to ensure privatization safety, we must not yet
584 // release the read lock and become inactive because (1) we still
585 // have to go through the list of all transactions, which can be
586 // modified by serial mode threads, and (2) we interpret each
587 // transactions' shared_state in the context of what we believe to
588 // be the current method group (and serial mode transactions can
589 // change the method group). Therefore, if we have to ensure
590 // privatization safety, delay becoming inactive but set a maximum
591 // snapshot time (we have committed and thus have an empty snapshot,
592 // so it will always be most recent). Use release MO so that this
593 // synchronizes with other threads observing our snapshot time.
594 if (priv_time)
595 {
596 do_read_unlock = true;
597 shared_state.store((~(typeof gtm_thread::shared_state)0) - 1,
598 memory_order_release);
599 }
600 else
601 gtm_thread::serial_lock.read_unlock (this);
602 }
Aldy Hernandez0a355132011-11-08 11:13:41 +0000603 state = 0;
604
605 // We can commit the undo log after dispatch-specific commit and after
606 // making the transaction inactive because we only have to reset
607 // gtm_thread state.
Torvald Riegel11f30bb2012-01-08 14:13:49 +0000608 undolog.commit ();
Aldy Hernandez0a355132011-11-08 11:13:41 +0000609 // Reset further transaction state.
610 cxa_catch_count = 0;
Aldy Hernandez0a355132011-11-08 11:13:41 +0000611 restart_total = 0;
612
613 // Ensure privatization safety, if necessary.
614 if (priv_time)
615 {
Torvald Riegel799142b2011-12-24 01:42:20 +0000616 // There must be a seq_cst fence between the following loads of the
617 // other transactions' shared_state and the dispatch-specific stores
618 // that signal updates by this transaction (e.g., lock
619 // acquisitions). This ensures that if we read prior to other
620 // reader transactions setting their shared_state to 0, then those
621 // readers will observe our updates. We can reuse the seq_cst fence
622 // in serial_lock.read_unlock() however, so we don't need another
623 // one here.
Aldy Hernandez0a355132011-11-08 11:13:41 +0000624 // TODO Don't just spin but also block using cond vars / futexes
625 // here. Should probably be integrated with the serial lock code.
Aldy Hernandez0a355132011-11-08 11:13:41 +0000626 for (gtm_thread *it = gtm_thread::list_of_threads; it != 0;
627 it = it->next_thread)
628 {
629 if (it == this) continue;
Torvald Riegel799142b2011-12-24 01:42:20 +0000630 // We need to load other threads' shared_state using acquire
631 // semantics (matching the release semantics of the respective
632 // updates). This is necessary to ensure that the other
633 // threads' memory accesses happen before our actions that
634 // assume privatization safety.
635 // TODO Are there any platform-specific optimizations (e.g.,
636 // merging barriers)?
637 while (it->shared_state.load(memory_order_acquire) < priv_time)
Aldy Hernandez0a355132011-11-08 11:13:41 +0000638 cpu_relax();
639 }
640 }
641
Torvald Riegel629e4722016-01-13 12:40:34 +0000642 // After ensuring privatization safety, we are now truly inactive and
643 // thus can release the read lock. We will also execute potentially
644 // privatizing actions (e.g., calling free()). User actions are first.
645 if (do_read_unlock)
646 gtm_thread::serial_lock.read_unlock (this);
Aldy Hernandez0a355132011-11-08 11:13:41 +0000647 commit_user_actions ();
648 commit_allocations (false, 0);
649
650 return true;
651 }
652 return false;
653}
654
655void ITM_NORETURN
Torvald Riegel610e3902011-12-24 01:42:35 +0000656GTM::gtm_thread::restart (gtm_restart_reason r, bool finish_serial_upgrade)
Aldy Hernandez0a355132011-11-08 11:13:41 +0000657{
658 // Roll back to outermost transaction. Do not reset transaction state because
659 // we will continue executing this transaction.
660 rollback ();
Torvald Riegel610e3902011-12-24 01:42:35 +0000661
662 // If we have to restart while an upgrade of the serial lock is happening,
663 // we need to finish this here, after rollback (to ensure privatization
664 // safety despite undo writes) and before deciding about the retry strategy
665 // (which could switch to/from serial mode).
666 if (finish_serial_upgrade)
667 gtm_thread::serial_lock.write_upgrade_finish(this);
668
Aldy Hernandez0a355132011-11-08 11:13:41 +0000669 decide_retry_strategy (r);
670
671 // Run dispatch-specific restart code. Retry until we succeed.
672 abi_dispatch* disp = abi_disp();
673 GTM::gtm_restart_reason rr;
674 while ((rr = disp->begin_or_restart()) != NO_RESTART)
675 {
676 decide_retry_strategy(rr);
677 disp = abi_disp();
678 }
679
Richard Henderson062f93f2011-11-30 14:29:33 -0800680 GTM_longjmp (choose_code_path(prop, disp) | a_restoreLiveVariables,
681 &jb, prop);
Aldy Hernandez0a355132011-11-08 11:13:41 +0000682}
683
684void ITM_REGPARM
685_ITM_commitTransaction(void)
686{
Torvald Riegel64fbcc72012-11-09 17:04:40 +0000687#if defined(USE_HTM_FASTPATH)
688 // HTM fastpath. If we are not executing a HW transaction, then we will be
689 // a serial-mode transaction. If we are, then there will be no other
690 // concurrent serial-mode transaction.
691 // See gtm_thread::begin_transaction.
692 if (likely(htm_fastpath && !gtm_thread::serial_lock.is_write_locked()))
693 {
694 htm_commit();
695 return;
696 }
697#endif
Aldy Hernandez0a355132011-11-08 11:13:41 +0000698 gtm_thread *tx = gtm_thr();
699 if (!tx->trycommit ())
700 tx->restart (RESTART_VALIDATE_COMMIT);
701}
702
703void ITM_REGPARM
704_ITM_commitTransactionEH(void *exc_ptr)
705{
Torvald Riegel64fbcc72012-11-09 17:04:40 +0000706#if defined(USE_HTM_FASTPATH)
707 // See _ITM_commitTransaction.
708 if (likely(htm_fastpath && !gtm_thread::serial_lock.is_write_locked()))
709 {
710 htm_commit();
711 return;
712 }
713#endif
Aldy Hernandez0a355132011-11-08 11:13:41 +0000714 gtm_thread *tx = gtm_thr();
715 if (!tx->trycommit ())
716 {
717 tx->eh_in_flight = exc_ptr;
718 tx->restart (RESTART_VALIDATE_COMMIT);
719 }
720}