Jakub Jelinek | 818ab71 | 2016-01-04 15:30:50 +0100 | [diff] [blame] | 1 | /* Copyright (C) 2008-2016 Free Software Foundation, Inc. |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 2 | Contributed by Richard Henderson <rth@redhat.com>. |
| 3 | |
| 4 | This file is part of the GNU Transactional Memory Library (libitm). |
| 5 | |
| 6 | Libitm is free software; you can redistribute it and/or modify it |
| 7 | under the terms of the GNU General Public License as published by |
| 8 | the Free Software Foundation; either version 3 of the License, or |
| 9 | (at your option) any later version. |
| 10 | |
| 11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY |
| 12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
| 13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 14 | more details. |
| 15 | |
| 16 | Under Section 7 of GPL version 3, you are granted additional |
| 17 | permissions described in the GCC Runtime Library Exception, version |
| 18 | 3.1, as published by the Free Software Foundation. |
| 19 | |
| 20 | You should have received a copy of the GNU General Public License and |
| 21 | a copy of the GCC Runtime Library Exception along with this program; |
| 22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
| 23 | <http://www.gnu.org/licenses/>. */ |
| 24 | |
| 25 | #include "libitm_i.h" |
| 26 | #include <pthread.h> |
| 27 | |
| 28 | |
| 29 | using namespace GTM; |
| 30 | |
| 31 | #if !defined(HAVE_ARCH_GTM_THREAD) || !defined(HAVE_ARCH_GTM_THREAD_DISP) |
| 32 | extern __thread gtm_thread_tls _gtm_thr_tls; |
| 33 | #endif |
| 34 | |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 35 | // Put this at the start of a cacheline so that serial_lock's writers and |
| 36 | // htm_fastpath fields are on the same cacheline, so that HW transactions |
| 37 | // only have to pay one cacheline capacity to monitor both. |
| 38 | gtm_rwlock GTM::gtm_thread::serial_lock |
| 39 | __attribute__((aligned(HW_CACHELINE_SIZE))); |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 40 | gtm_thread *GTM::gtm_thread::list_of_threads = 0; |
| 41 | unsigned GTM::gtm_thread::number_of_threads = 0; |
| 42 | |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 43 | /* ??? Move elsewhere when we figure out library initialization. */ |
| 44 | uint64_t GTM::gtm_spin_count_var = 1000; |
| 45 | |
Richard Henderson | 36cfbee | 2011-12-13 11:11:25 -0800 | [diff] [blame] | 46 | #ifdef HAVE_64BIT_SYNC_BUILTINS |
| 47 | static atomic<_ITM_transactionId_t> global_tid; |
| 48 | #else |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 49 | static _ITM_transactionId_t global_tid; |
Richard Henderson | 36cfbee | 2011-12-13 11:11:25 -0800 | [diff] [blame] | 50 | static pthread_mutex_t global_tid_lock = PTHREAD_MUTEX_INITIALIZER; |
| 51 | #endif |
| 52 | |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 53 | |
| 54 | // Provides a on-thread-exit callback used to release per-thread data. |
| 55 | static pthread_key_t thr_release_key; |
| 56 | static pthread_once_t thr_release_once = PTHREAD_ONCE_INIT; |
| 57 | |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 58 | /* Allocate a transaction structure. */ |
| 59 | void * |
| 60 | GTM::gtm_thread::operator new (size_t s) |
| 61 | { |
| 62 | void *tx; |
| 63 | |
| 64 | assert(s == sizeof(gtm_thread)); |
| 65 | |
| 66 | tx = xmalloc (sizeof (gtm_thread), true); |
| 67 | memset (tx, 0, sizeof (gtm_thread)); |
| 68 | |
| 69 | return tx; |
| 70 | } |
| 71 | |
| 72 | /* Free the given transaction. Raises an error if the transaction is still |
| 73 | in use. */ |
| 74 | void |
| 75 | GTM::gtm_thread::operator delete(void *tx) |
| 76 | { |
| 77 | free(tx); |
| 78 | } |
| 79 | |
| 80 | static void |
| 81 | thread_exit_handler(void *) |
| 82 | { |
| 83 | gtm_thread *thr = gtm_thr(); |
| 84 | if (thr) |
| 85 | delete thr; |
| 86 | set_gtm_thr(0); |
| 87 | } |
| 88 | |
| 89 | static void |
| 90 | thread_exit_init() |
| 91 | { |
| 92 | if (pthread_key_create(&thr_release_key, thread_exit_handler)) |
| 93 | GTM_fatal("Creating thread release TLS key failed."); |
| 94 | } |
| 95 | |
| 96 | |
| 97 | GTM::gtm_thread::~gtm_thread() |
| 98 | { |
| 99 | if (nesting > 0) |
| 100 | GTM_fatal("Thread exit while a transaction is still active."); |
| 101 | |
| 102 | // Deregister this transaction. |
| 103 | serial_lock.write_lock (); |
| 104 | gtm_thread **prev = &list_of_threads; |
| 105 | for (; *prev; prev = &(*prev)->next_thread) |
| 106 | { |
| 107 | if (*prev == this) |
| 108 | { |
| 109 | *prev = (*prev)->next_thread; |
| 110 | break; |
| 111 | } |
| 112 | } |
| 113 | number_of_threads--; |
| 114 | number_of_threads_changed(number_of_threads + 1, number_of_threads); |
| 115 | serial_lock.write_unlock (); |
| 116 | } |
| 117 | |
| 118 | GTM::gtm_thread::gtm_thread () |
| 119 | { |
| 120 | // This object's memory has been set to zero by operator new, so no need |
| 121 | // to initialize any of the other primitive-type members that do not have |
| 122 | // constructors. |
Richard Henderson | 36cfbee | 2011-12-13 11:11:25 -0800 | [diff] [blame] | 123 | shared_state.store(-1, memory_order_relaxed); |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 124 | |
| 125 | // Register this transaction with the list of all threads' transactions. |
| 126 | serial_lock.write_lock (); |
| 127 | next_thread = list_of_threads; |
| 128 | list_of_threads = this; |
| 129 | number_of_threads++; |
| 130 | number_of_threads_changed(number_of_threads - 1, number_of_threads); |
| 131 | serial_lock.write_unlock (); |
| 132 | |
Torvald Riegel | 258c1d0 | 2015-11-20 00:10:08 +0000 | [diff] [blame] | 133 | init_cpp_exceptions (); |
| 134 | |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 135 | if (pthread_once(&thr_release_once, thread_exit_init)) |
| 136 | GTM_fatal("Initializing thread release TLS key failed."); |
| 137 | // Any non-null value is sufficient to trigger destruction of this |
| 138 | // transaction when the current thread terminates. |
| 139 | if (pthread_setspecific(thr_release_key, this)) |
| 140 | GTM_fatal("Setting thread release TLS key failed."); |
| 141 | } |
| 142 | |
Richard Henderson | 36cfbee | 2011-12-13 11:11:25 -0800 | [diff] [blame] | 143 | static inline uint32_t |
| 144 | choose_code_path(uint32_t prop, abi_dispatch *disp) |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 145 | { |
| 146 | if ((prop & pr_uninstrumentedCode) && disp->can_run_uninstrumented_code()) |
| 147 | return a_runUninstrumentedCode; |
| 148 | else |
| 149 | return a_runInstrumentedCode; |
| 150 | } |
| 151 | |
| 152 | uint32_t |
| 153 | GTM::gtm_thread::begin_transaction (uint32_t prop, const gtm_jmpbuf *jb) |
| 154 | { |
| 155 | static const _ITM_transactionId_t tid_block_size = 1 << 16; |
| 156 | |
| 157 | gtm_thread *tx; |
| 158 | abi_dispatch *disp; |
| 159 | uint32_t ret; |
| 160 | |
| 161 | // ??? pr_undoLogCode is not properly defined in the ABI. Are barriers |
| 162 | // omitted because they are not necessary (e.g., a transaction on thread- |
| 163 | // local data) or because the compiler thinks that some kind of global |
| 164 | // synchronization might perform better? |
| 165 | if (unlikely(prop & pr_undoLogCode)) |
| 166 | GTM_fatal("pr_undoLogCode not supported"); |
| 167 | |
Torvald Riegel | bec9ec3 | 2013-08-30 10:33:41 +0000 | [diff] [blame] | 168 | #ifdef USE_HTM_FASTPATH |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 169 | // HTM fastpath. Only chosen in the absence of transaction_cancel to allow |
| 170 | // using an uninstrumented code path. |
| 171 | // The fastpath is enabled only by dispatch_htm's method group, which uses |
| 172 | // serial-mode methods as fallback. Serial-mode transactions cannot execute |
| 173 | // concurrently with HW transactions because the latter monitor the serial |
| 174 | // lock's writer flag and thus abort if another thread is or becomes a |
| 175 | // serial transaction. Therefore, if the fastpath is enabled, then a |
| 176 | // transaction is not executing as a HW transaction iff the serial lock is |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 177 | // write-locked. Also, HW transactions monitor the fastpath control |
| 178 | // variable, so that they will only execute if dispatch_htm is still the |
| 179 | // current method group. This allows us to use htm_fastpath and the serial |
| 180 | // lock's writers flag to reliable determine whether the current thread runs |
| 181 | // a HW transaction, and thus we do not need to maintain this information in |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 182 | // per-thread state. |
| 183 | // If an uninstrumented code path is not available, we can still run |
| 184 | // instrumented code from a HW transaction because the HTM fastpath kicks |
| 185 | // in early in both begin and commit, and the transaction is not canceled. |
| 186 | // HW transactions might get requests to switch to serial-irrevocable mode, |
| 187 | // but these can be ignored because the HTM provides all necessary |
| 188 | // correctness guarantees. Transactions cannot detect whether they are |
| 189 | // indeed in serial mode, and HW transactions should never need serial mode |
| 190 | // for any internal changes (e.g., they never abort visibly to the STM code |
| 191 | // and thus do not trigger the standard retry handling). |
Torvald Riegel | bec9ec3 | 2013-08-30 10:33:41 +0000 | [diff] [blame] | 192 | #ifndef HTM_CUSTOM_FASTPATH |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 193 | if (likely(serial_lock.get_htm_fastpath() && (prop & pr_hasNoAbort))) |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 194 | { |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 195 | // Note that the snapshot of htm_fastpath that we take here could be |
| 196 | // outdated, and a different method group than dispatch_htm may have |
| 197 | // been chosen in the meantime. Therefore, take care not not touch |
| 198 | // anything besides the serial lock, which is independent of method |
| 199 | // groups. |
| 200 | for (uint32_t t = serial_lock.get_htm_fastpath(); t; t--) |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 201 | { |
| 202 | uint32_t ret = htm_begin(); |
| 203 | if (htm_begin_success(ret)) |
| 204 | { |
| 205 | // We are executing a transaction now. |
| 206 | // Monitor the writer flag in the serial-mode lock, and abort |
| 207 | // if there is an active or waiting serial-mode transaction. |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 208 | // Also checks that htm_fastpath is still nonzero and thus |
| 209 | // HW transactions are allowed to run. |
Torvald Riegel | b1db457 | 2013-06-20 16:40:38 +0000 | [diff] [blame] | 210 | // Note that this can also happen due to an enclosing |
| 211 | // serial-mode transaction; we handle this case below. |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 212 | if (unlikely(serial_lock.htm_fastpath_disabled())) |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 213 | htm_abort(); |
| 214 | else |
| 215 | // We do not need to set a_saveLiveVariables because of HTM. |
| 216 | return (prop & pr_uninstrumentedCode) ? |
| 217 | a_runUninstrumentedCode : a_runInstrumentedCode; |
| 218 | } |
| 219 | // The transaction has aborted. Don't retry if it's unlikely that |
| 220 | // retrying the transaction will be successful. |
| 221 | if (!htm_abort_should_retry(ret)) |
| 222 | break; |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 223 | // Check whether the HTM fastpath has been disabled. |
| 224 | if (!serial_lock.get_htm_fastpath()) |
| 225 | break; |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 226 | // Wait until any concurrent serial-mode transactions have finished. |
| 227 | // This is an empty critical section, but won't be elided. |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 228 | if (serial_lock.htm_fastpath_disabled()) |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 229 | { |
| 230 | tx = gtm_thr(); |
| 231 | if (unlikely(tx == NULL)) |
| 232 | { |
| 233 | // See below. |
| 234 | tx = new gtm_thread(); |
| 235 | set_gtm_thr(tx); |
| 236 | } |
Torvald Riegel | b1db457 | 2013-06-20 16:40:38 +0000 | [diff] [blame] | 237 | // Check whether there is an enclosing serial-mode transaction; |
| 238 | // if so, we just continue as a nested transaction and don't |
| 239 | // try to use the HTM fastpath. This case can happen when an |
| 240 | // outermost relaxed transaction calls unsafe code that starts |
| 241 | // a transaction. |
| 242 | if (tx->nesting > 0) |
| 243 | break; |
| 244 | // Another thread is running a serial-mode transaction. Wait. |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 245 | serial_lock.read_lock(tx); |
| 246 | serial_lock.read_unlock(tx); |
| 247 | // TODO We should probably reset the retry count t here, unless |
| 248 | // we have retried so often that we should go serial to avoid |
| 249 | // starvation. |
| 250 | } |
| 251 | } |
| 252 | } |
Torvald Riegel | bec9ec3 | 2013-08-30 10:33:41 +0000 | [diff] [blame] | 253 | #else |
| 254 | // If we have a custom HTM fastpath in ITM_beginTransaction, we implement |
| 255 | // just the retry policy here. We communicate with the custom fastpath |
| 256 | // through additional property bits and return codes, and either transfer |
| 257 | // control back to the custom fastpath or run the fallback mechanism. The |
| 258 | // fastpath synchronization algorithm itself is the same. |
| 259 | // pr_HTMRetryableAbort states that a HW transaction started by the custom |
| 260 | // HTM fastpath aborted, and that we thus have to decide whether to retry |
| 261 | // the fastpath (returning a_tryHTMFastPath) or just proceed with the |
| 262 | // fallback method. |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 263 | if (likely(serial_lock.get_htm_fastpath() && (prop & pr_HTMRetryableAbort))) |
Torvald Riegel | bec9ec3 | 2013-08-30 10:33:41 +0000 | [diff] [blame] | 264 | { |
| 265 | tx = gtm_thr(); |
| 266 | if (unlikely(tx == NULL)) |
| 267 | { |
| 268 | // See below. |
| 269 | tx = new gtm_thread(); |
| 270 | set_gtm_thr(tx); |
| 271 | } |
| 272 | // If this is the first abort, reset the retry count. We abuse |
| 273 | // restart_total for the retry count, which is fine because our only |
| 274 | // other fallback will use serial transactions, which don't use |
| 275 | // restart_total but will reset it when committing. |
| 276 | if (!(prop & pr_HTMRetriedAfterAbort)) |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 277 | tx->restart_total = gtm_thread::serial_lock.get_htm_fastpath(); |
Torvald Riegel | bec9ec3 | 2013-08-30 10:33:41 +0000 | [diff] [blame] | 278 | |
| 279 | if (--tx->restart_total > 0) |
| 280 | { |
| 281 | // Wait until any concurrent serial-mode transactions have finished. |
| 282 | // Essentially the same code as above. |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 283 | if (!serial_lock.get_htm_fastpath()) |
| 284 | goto stop_custom_htm_fastpath; |
| 285 | if (serial_lock.htm_fastpath_disabled()) |
Torvald Riegel | bec9ec3 | 2013-08-30 10:33:41 +0000 | [diff] [blame] | 286 | { |
| 287 | if (tx->nesting > 0) |
| 288 | goto stop_custom_htm_fastpath; |
| 289 | serial_lock.read_lock(tx); |
| 290 | serial_lock.read_unlock(tx); |
| 291 | } |
| 292 | // Let ITM_beginTransaction retry the custom HTM fastpath. |
| 293 | return a_tryHTMFastPath; |
| 294 | } |
| 295 | } |
| 296 | stop_custom_htm_fastpath: |
| 297 | #endif |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 298 | #endif |
| 299 | |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 300 | tx = gtm_thr(); |
| 301 | if (unlikely(tx == NULL)) |
| 302 | { |
| 303 | // Create the thread object. The constructor will also set up automatic |
| 304 | // deletion on thread termination. |
| 305 | tx = new gtm_thread(); |
| 306 | set_gtm_thr(tx); |
| 307 | } |
| 308 | |
| 309 | if (tx->nesting > 0) |
| 310 | { |
| 311 | // This is a nested transaction. |
| 312 | // Check prop compatibility: |
| 313 | // The ABI requires pr_hasNoFloatUpdate, pr_hasNoVectorUpdate, |
| 314 | // pr_hasNoIrrevocable, pr_aWBarriersOmitted, pr_RaRBarriersOmitted, and |
| 315 | // pr_hasNoSimpleReads to hold for the full dynamic scope of a |
| 316 | // transaction. We could check that these are set for the nested |
| 317 | // transaction if they are also set for the parent transaction, but the |
| 318 | // ABI does not require these flags to be set if they could be set, |
| 319 | // so the check could be too strict. |
| 320 | // ??? For pr_readOnly, lexical or dynamic scope is unspecified. |
| 321 | |
| 322 | if (prop & pr_hasNoAbort) |
| 323 | { |
| 324 | // We can use flat nesting, so elide this transaction. |
| 325 | if (!(prop & pr_instrumentedCode)) |
| 326 | { |
| 327 | if (!(tx->state & STATE_SERIAL) || |
| 328 | !(tx->state & STATE_IRREVOCABLE)) |
| 329 | tx->serialirr_mode(); |
| 330 | } |
| 331 | // Increment nesting level after checking that we have a method that |
| 332 | // allows us to continue. |
| 333 | tx->nesting++; |
| 334 | return choose_code_path(prop, abi_disp()); |
| 335 | } |
| 336 | |
| 337 | // The transaction might abort, so use closed nesting if possible. |
| 338 | // pr_hasNoAbort has lexical scope, so the compiler should really have |
| 339 | // generated an instrumented code path. |
| 340 | assert(prop & pr_instrumentedCode); |
| 341 | |
| 342 | // Create a checkpoint of the current transaction. |
| 343 | gtm_transaction_cp *cp = tx->parent_txns.push(); |
| 344 | cp->save(tx); |
| 345 | new (&tx->alloc_actions) aa_tree<uintptr_t, gtm_alloc_action>(); |
| 346 | |
| 347 | // Check whether the current method actually supports closed nesting. |
| 348 | // If we can switch to another one, do so. |
| 349 | // If not, we assume that actual aborts are infrequent, and rather |
| 350 | // restart in _ITM_abortTransaction when we really have to. |
| 351 | disp = abi_disp(); |
| 352 | if (!disp->closed_nesting()) |
| 353 | { |
| 354 | // ??? Should we elide the transaction if there is no alternative |
| 355 | // method that supports closed nesting? If we do, we need to set |
| 356 | // some flag to prevent _ITM_abortTransaction from aborting the |
| 357 | // wrong transaction (i.e., some parent transaction). |
| 358 | abi_dispatch *cn_disp = disp->closed_nesting_alternative(); |
| 359 | if (cn_disp) |
| 360 | { |
| 361 | disp = cn_disp; |
| 362 | set_abi_disp(disp); |
| 363 | } |
| 364 | } |
| 365 | } |
| 366 | else |
| 367 | { |
| 368 | // Outermost transaction |
| 369 | disp = tx->decide_begin_dispatch (prop); |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 370 | set_abi_disp (disp); |
| 371 | } |
| 372 | |
| 373 | // Initialization that is common for outermost and nested transactions. |
| 374 | tx->prop = prop; |
| 375 | tx->nesting++; |
| 376 | |
| 377 | tx->jb = *jb; |
| 378 | |
| 379 | // As long as we have not exhausted a previously allocated block of TIDs, |
| 380 | // we can avoid an atomic operation on a shared cacheline. |
| 381 | if (tx->local_tid & (tid_block_size - 1)) |
| 382 | tx->id = tx->local_tid++; |
| 383 | else |
| 384 | { |
| 385 | #ifdef HAVE_64BIT_SYNC_BUILTINS |
Torvald Riegel | 799142b | 2011-12-24 01:42:20 +0000 | [diff] [blame] | 386 | // We don't really care which block of TIDs we get but only that we |
| 387 | // acquire one atomically; therefore, relaxed memory order is |
| 388 | // sufficient. |
Richard Henderson | 36cfbee | 2011-12-13 11:11:25 -0800 | [diff] [blame] | 389 | tx->id = global_tid.fetch_add(tid_block_size, memory_order_relaxed); |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 390 | tx->local_tid = tx->id + 1; |
| 391 | #else |
| 392 | pthread_mutex_lock (&global_tid_lock); |
| 393 | global_tid += tid_block_size; |
| 394 | tx->id = global_tid; |
| 395 | tx->local_tid = tx->id + 1; |
| 396 | pthread_mutex_unlock (&global_tid_lock); |
| 397 | #endif |
| 398 | } |
| 399 | |
Torvald Riegel | 258c1d0 | 2015-11-20 00:10:08 +0000 | [diff] [blame] | 400 | // Log the number of uncaught exceptions if we might have to roll back this |
| 401 | // state. |
| 402 | if (tx->cxa_uncaught_count_ptr != 0) |
| 403 | tx->cxa_uncaught_count = *tx->cxa_uncaught_count_ptr; |
| 404 | |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 405 | // Run dispatch-specific restart code. Retry until we succeed. |
| 406 | GTM::gtm_restart_reason rr; |
| 407 | while ((rr = disp->begin_or_restart()) != NO_RESTART) |
| 408 | { |
| 409 | tx->decide_retry_strategy(rr); |
| 410 | disp = abi_disp(); |
| 411 | } |
| 412 | |
| 413 | // Determine the code path to run. Only irrevocable transactions cannot be |
| 414 | // restarted, so all other transactions need to save live variables. |
| 415 | ret = choose_code_path(prop, disp); |
| 416 | if (!(tx->state & STATE_IRREVOCABLE)) |
| 417 | ret |= a_saveLiveVariables; |
| 418 | return ret; |
| 419 | } |
| 420 | |
| 421 | |
| 422 | void |
| 423 | GTM::gtm_transaction_cp::save(gtm_thread* tx) |
| 424 | { |
| 425 | // Save everything that we might have to restore on restarts or aborts. |
| 426 | jb = tx->jb; |
| 427 | undolog_size = tx->undolog.size(); |
| 428 | memcpy(&alloc_actions, &tx->alloc_actions, sizeof(alloc_actions)); |
| 429 | user_actions_size = tx->user_actions.size(); |
| 430 | id = tx->id; |
| 431 | prop = tx->prop; |
| 432 | cxa_catch_count = tx->cxa_catch_count; |
Torvald Riegel | 258c1d0 | 2015-11-20 00:10:08 +0000 | [diff] [blame] | 433 | cxa_uncaught_count = tx->cxa_uncaught_count; |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 434 | disp = abi_disp(); |
| 435 | nesting = tx->nesting; |
| 436 | } |
| 437 | |
| 438 | void |
| 439 | GTM::gtm_transaction_cp::commit(gtm_thread* tx) |
| 440 | { |
| 441 | // Restore state that is not persistent across commits. Exception handling, |
| 442 | // information, nesting level, and any logs do not need to be restored on |
| 443 | // commits of nested transactions. Allocation actions must be committed |
| 444 | // before committing the snapshot. |
| 445 | tx->jb = jb; |
| 446 | memcpy(&tx->alloc_actions, &alloc_actions, sizeof(alloc_actions)); |
| 447 | tx->id = id; |
| 448 | tx->prop = prop; |
| 449 | } |
| 450 | |
| 451 | |
| 452 | void |
| 453 | GTM::gtm_thread::rollback (gtm_transaction_cp *cp, bool aborting) |
| 454 | { |
| 455 | // The undo log is special in that it used for both thread-local and shared |
| 456 | // data. Because of the latter, we have to roll it back before any |
| 457 | // dispatch-specific rollback (which handles synchronization with other |
| 458 | // transactions). |
Torvald Riegel | 07b6642 | 2012-01-13 23:45:06 +0000 | [diff] [blame] | 459 | undolog.rollback (this, cp ? cp->undolog_size : 0); |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 460 | |
| 461 | // Perform dispatch-specific rollback. |
| 462 | abi_disp()->rollback (cp); |
| 463 | |
| 464 | // Roll back all actions that are supposed to happen around the transaction. |
| 465 | rollback_user_actions (cp ? cp->user_actions_size : 0); |
| 466 | commit_allocations (true, (cp ? &cp->alloc_actions : 0)); |
| 467 | revert_cpp_exceptions (cp); |
| 468 | |
| 469 | if (cp) |
| 470 | { |
| 471 | // We do not yet handle restarts of nested transactions. To do that, we |
| 472 | // would have to restore some state (jb, id, prop, nesting) not to the |
| 473 | // checkpoint but to the transaction that was started from this |
| 474 | // checkpoint (e.g., nesting = cp->nesting + 1); |
| 475 | assert(aborting); |
| 476 | // Roll back the rest of the state to the checkpoint. |
| 477 | jb = cp->jb; |
| 478 | id = cp->id; |
| 479 | prop = cp->prop; |
| 480 | if (cp->disp != abi_disp()) |
| 481 | set_abi_disp(cp->disp); |
| 482 | memcpy(&alloc_actions, &cp->alloc_actions, sizeof(alloc_actions)); |
| 483 | nesting = cp->nesting; |
| 484 | } |
| 485 | else |
| 486 | { |
| 487 | // Roll back to the outermost transaction. |
| 488 | // Restore the jump buffer and transaction properties, which we will |
| 489 | // need for the longjmp used to restart or abort the transaction. |
| 490 | if (parent_txns.size() > 0) |
| 491 | { |
| 492 | jb = parent_txns[0].jb; |
| 493 | id = parent_txns[0].id; |
| 494 | prop = parent_txns[0].prop; |
| 495 | } |
| 496 | // Reset the transaction. Do not reset this->state, which is handled by |
| 497 | // the callers. Note that if we are not aborting, we reset the |
| 498 | // transaction to the point after having executed begin_transaction |
| 499 | // (we will return from it), so the nesting level must be one, not zero. |
| 500 | nesting = (aborting ? 0 : 1); |
| 501 | parent_txns.clear(); |
| 502 | } |
| 503 | |
| 504 | if (this->eh_in_flight) |
| 505 | { |
| 506 | _Unwind_DeleteException ((_Unwind_Exception *) this->eh_in_flight); |
| 507 | this->eh_in_flight = NULL; |
| 508 | } |
| 509 | } |
| 510 | |
| 511 | void ITM_REGPARM |
| 512 | _ITM_abortTransaction (_ITM_abortReason reason) |
| 513 | { |
| 514 | gtm_thread *tx = gtm_thr(); |
| 515 | |
| 516 | assert (reason == userAbort || reason == (userAbort | outerAbort)); |
| 517 | assert ((tx->prop & pr_hasNoAbort) == 0); |
| 518 | |
| 519 | if (tx->state & gtm_thread::STATE_IRREVOCABLE) |
| 520 | abort (); |
| 521 | |
| 522 | // Roll back to innermost transaction. |
| 523 | if (tx->parent_txns.size() > 0 && !(reason & outerAbort)) |
| 524 | { |
| 525 | // If the current method does not support closed nesting but we are |
| 526 | // nested and must only roll back the innermost transaction, then |
| 527 | // restart with a method that supports closed nesting. |
| 528 | abi_dispatch *disp = abi_disp(); |
| 529 | if (!disp->closed_nesting()) |
| 530 | tx->restart(RESTART_CLOSED_NESTING); |
| 531 | |
| 532 | // The innermost transaction is a closed nested transaction. |
| 533 | gtm_transaction_cp *cp = tx->parent_txns.pop(); |
| 534 | uint32_t longjmp_prop = tx->prop; |
| 535 | gtm_jmpbuf longjmp_jb = tx->jb; |
| 536 | |
| 537 | tx->rollback (cp, true); |
| 538 | |
| 539 | // Jump to nested transaction (use the saved jump buffer). |
Richard Henderson | 062f93f | 2011-11-30 14:29:33 -0800 | [diff] [blame] | 540 | GTM_longjmp (a_abortTransaction | a_restoreLiveVariables, |
| 541 | &longjmp_jb, longjmp_prop); |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 542 | } |
| 543 | else |
| 544 | { |
| 545 | // There is no nested transaction or an abort of the outermost |
| 546 | // transaction was requested, so roll back to the outermost transaction. |
| 547 | tx->rollback (0, true); |
| 548 | |
| 549 | // Aborting an outermost transaction finishes execution of the whole |
| 550 | // transaction. Therefore, reset transaction state. |
| 551 | if (tx->state & gtm_thread::STATE_SERIAL) |
| 552 | gtm_thread::serial_lock.write_unlock (); |
| 553 | else |
| 554 | gtm_thread::serial_lock.read_unlock (tx); |
| 555 | tx->state = 0; |
| 556 | |
Richard Henderson | 062f93f | 2011-11-30 14:29:33 -0800 | [diff] [blame] | 557 | GTM_longjmp (a_abortTransaction | a_restoreLiveVariables, |
| 558 | &tx->jb, tx->prop); |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 559 | } |
| 560 | } |
| 561 | |
| 562 | bool |
| 563 | GTM::gtm_thread::trycommit () |
| 564 | { |
| 565 | nesting--; |
| 566 | |
| 567 | // Skip any real commit for elided transactions. |
| 568 | if (nesting > 0 && (parent_txns.size() == 0 || |
| 569 | nesting > parent_txns[parent_txns.size() - 1].nesting)) |
| 570 | return true; |
| 571 | |
| 572 | if (nesting > 0) |
| 573 | { |
| 574 | // Commit of a closed-nested transaction. Remove one checkpoint and add |
| 575 | // any effects of this transaction to the parent transaction. |
| 576 | gtm_transaction_cp *cp = parent_txns.pop(); |
| 577 | commit_allocations(false, &cp->alloc_actions); |
| 578 | cp->commit(this); |
| 579 | return true; |
| 580 | } |
| 581 | |
| 582 | // Commit of an outermost transaction. |
| 583 | gtm_word priv_time = 0; |
| 584 | if (abi_disp()->trycommit (priv_time)) |
| 585 | { |
Torvald Riegel | 629e472 | 2016-01-13 12:40:34 +0000 | [diff] [blame] | 586 | // The transaction is now finished but we will still access some shared |
| 587 | // data if we have to ensure privatization safety. |
| 588 | bool do_read_unlock = false; |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 589 | if (state & gtm_thread::STATE_SERIAL) |
Torvald Riegel | c898f7b | 2011-12-24 01:42:48 +0000 | [diff] [blame] | 590 | { |
| 591 | gtm_thread::serial_lock.write_unlock (); |
| 592 | // There are no other active transactions, so there's no need to |
| 593 | // enforce privatization safety. |
| 594 | priv_time = 0; |
| 595 | } |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 596 | else |
Torvald Riegel | 629e472 | 2016-01-13 12:40:34 +0000 | [diff] [blame] | 597 | { |
| 598 | // If we have to ensure privatization safety, we must not yet |
| 599 | // release the read lock and become inactive because (1) we still |
| 600 | // have to go through the list of all transactions, which can be |
| 601 | // modified by serial mode threads, and (2) we interpret each |
| 602 | // transactions' shared_state in the context of what we believe to |
| 603 | // be the current method group (and serial mode transactions can |
| 604 | // change the method group). Therefore, if we have to ensure |
| 605 | // privatization safety, delay becoming inactive but set a maximum |
| 606 | // snapshot time (we have committed and thus have an empty snapshot, |
| 607 | // so it will always be most recent). Use release MO so that this |
| 608 | // synchronizes with other threads observing our snapshot time. |
| 609 | if (priv_time) |
| 610 | { |
| 611 | do_read_unlock = true; |
| 612 | shared_state.store((~(typeof gtm_thread::shared_state)0) - 1, |
| 613 | memory_order_release); |
| 614 | } |
| 615 | else |
| 616 | gtm_thread::serial_lock.read_unlock (this); |
| 617 | } |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 618 | state = 0; |
| 619 | |
| 620 | // We can commit the undo log after dispatch-specific commit and after |
| 621 | // making the transaction inactive because we only have to reset |
| 622 | // gtm_thread state. |
Torvald Riegel | 11f30bb | 2012-01-08 14:13:49 +0000 | [diff] [blame] | 623 | undolog.commit (); |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 624 | // Reset further transaction state. |
| 625 | cxa_catch_count = 0; |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 626 | restart_total = 0; |
| 627 | |
| 628 | // Ensure privatization safety, if necessary. |
| 629 | if (priv_time) |
| 630 | { |
Torvald Riegel | 799142b | 2011-12-24 01:42:20 +0000 | [diff] [blame] | 631 | // There must be a seq_cst fence between the following loads of the |
| 632 | // other transactions' shared_state and the dispatch-specific stores |
| 633 | // that signal updates by this transaction (e.g., lock |
| 634 | // acquisitions). This ensures that if we read prior to other |
| 635 | // reader transactions setting their shared_state to 0, then those |
| 636 | // readers will observe our updates. We can reuse the seq_cst fence |
Torvald Riegel | e89137c | 2016-01-13 21:39:50 +0000 | [diff] [blame] | 637 | // in serial_lock.read_unlock() if we performed that; if not, we |
| 638 | // issue the fence. |
| 639 | if (do_read_unlock) |
| 640 | atomic_thread_fence (memory_order_seq_cst); |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 641 | // TODO Don't just spin but also block using cond vars / futexes |
| 642 | // here. Should probably be integrated with the serial lock code. |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 643 | for (gtm_thread *it = gtm_thread::list_of_threads; it != 0; |
| 644 | it = it->next_thread) |
| 645 | { |
| 646 | if (it == this) continue; |
Torvald Riegel | 799142b | 2011-12-24 01:42:20 +0000 | [diff] [blame] | 647 | // We need to load other threads' shared_state using acquire |
| 648 | // semantics (matching the release semantics of the respective |
| 649 | // updates). This is necessary to ensure that the other |
| 650 | // threads' memory accesses happen before our actions that |
| 651 | // assume privatization safety. |
| 652 | // TODO Are there any platform-specific optimizations (e.g., |
| 653 | // merging barriers)? |
| 654 | while (it->shared_state.load(memory_order_acquire) < priv_time) |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 655 | cpu_relax(); |
| 656 | } |
| 657 | } |
| 658 | |
Torvald Riegel | 629e472 | 2016-01-13 12:40:34 +0000 | [diff] [blame] | 659 | // After ensuring privatization safety, we are now truly inactive and |
| 660 | // thus can release the read lock. We will also execute potentially |
| 661 | // privatizing actions (e.g., calling free()). User actions are first. |
| 662 | if (do_read_unlock) |
| 663 | gtm_thread::serial_lock.read_unlock (this); |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 664 | commit_user_actions (); |
| 665 | commit_allocations (false, 0); |
| 666 | |
| 667 | return true; |
| 668 | } |
| 669 | return false; |
| 670 | } |
| 671 | |
| 672 | void ITM_NORETURN |
Torvald Riegel | 610e390 | 2011-12-24 01:42:35 +0000 | [diff] [blame] | 673 | GTM::gtm_thread::restart (gtm_restart_reason r, bool finish_serial_upgrade) |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 674 | { |
| 675 | // Roll back to outermost transaction. Do not reset transaction state because |
| 676 | // we will continue executing this transaction. |
| 677 | rollback (); |
Torvald Riegel | 610e390 | 2011-12-24 01:42:35 +0000 | [diff] [blame] | 678 | |
| 679 | // If we have to restart while an upgrade of the serial lock is happening, |
| 680 | // we need to finish this here, after rollback (to ensure privatization |
| 681 | // safety despite undo writes) and before deciding about the retry strategy |
| 682 | // (which could switch to/from serial mode). |
| 683 | if (finish_serial_upgrade) |
| 684 | gtm_thread::serial_lock.write_upgrade_finish(this); |
| 685 | |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 686 | decide_retry_strategy (r); |
| 687 | |
| 688 | // Run dispatch-specific restart code. Retry until we succeed. |
| 689 | abi_dispatch* disp = abi_disp(); |
| 690 | GTM::gtm_restart_reason rr; |
| 691 | while ((rr = disp->begin_or_restart()) != NO_RESTART) |
| 692 | { |
| 693 | decide_retry_strategy(rr); |
| 694 | disp = abi_disp(); |
| 695 | } |
| 696 | |
Richard Henderson | 062f93f | 2011-11-30 14:29:33 -0800 | [diff] [blame] | 697 | GTM_longjmp (choose_code_path(prop, disp) | a_restoreLiveVariables, |
| 698 | &jb, prop); |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 699 | } |
| 700 | |
| 701 | void ITM_REGPARM |
| 702 | _ITM_commitTransaction(void) |
| 703 | { |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 704 | #if defined(USE_HTM_FASTPATH) |
| 705 | // HTM fastpath. If we are not executing a HW transaction, then we will be |
| 706 | // a serial-mode transaction. If we are, then there will be no other |
| 707 | // concurrent serial-mode transaction. |
| 708 | // See gtm_thread::begin_transaction. |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 709 | if (likely(!gtm_thread::serial_lock.htm_fastpath_disabled())) |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 710 | { |
| 711 | htm_commit(); |
| 712 | return; |
| 713 | } |
| 714 | #endif |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 715 | gtm_thread *tx = gtm_thr(); |
| 716 | if (!tx->trycommit ()) |
| 717 | tx->restart (RESTART_VALIDATE_COMMIT); |
| 718 | } |
| 719 | |
| 720 | void ITM_REGPARM |
| 721 | _ITM_commitTransactionEH(void *exc_ptr) |
| 722 | { |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 723 | #if defined(USE_HTM_FASTPATH) |
| 724 | // See _ITM_commitTransaction. |
Torvald Riegel | 6041f70 | 2016-01-22 16:13:06 +0000 | [diff] [blame^] | 725 | if (likely(!gtm_thread::serial_lock.htm_fastpath_disabled())) |
Torvald Riegel | 64fbcc7 | 2012-11-09 17:04:40 +0000 | [diff] [blame] | 726 | { |
| 727 | htm_commit(); |
| 728 | return; |
| 729 | } |
| 730 | #endif |
Aldy Hernandez | 0a35513 | 2011-11-08 11:13:41 +0000 | [diff] [blame] | 731 | gtm_thread *tx = gtm_thr(); |
| 732 | if (!tx->trycommit ()) |
| 733 | { |
| 734 | tx->eh_in_flight = exc_ptr; |
| 735 | tx->restart (RESTART_VALIDATE_COMMIT); |
| 736 | } |
| 737 | } |