Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
michael@0 | 2 | // Use of this source code is governed by a BSD-style license that can be |
michael@0 | 3 | // found in the LICENSE file. |
michael@0 | 4 | |
michael@0 | 5 | // This file is an internal atomic implementation for compiler-based |
michael@0 | 6 | // ThreadSanitizer. Use base/atomicops.h instead. |
michael@0 | 7 | |
michael@0 | 8 | #ifndef BASE_ATOMICOPS_INTERNALS_TSAN_H_ |
michael@0 | 9 | #define BASE_ATOMICOPS_INTERNALS_TSAN_H_ |
michael@0 | 10 | |
michael@0 | 11 | #include "base/base_export.h" |
michael@0 | 12 | |
michael@0 | 13 | // This struct is not part of the public API of this module; clients may not |
michael@0 | 14 | // use it. (However, it's exported via BASE_EXPORT because clients implicitly |
michael@0 | 15 | // do use it at link time by inlining these functions.) |
michael@0 | 16 | // Features of this x86. Values may not be correct before main() is run, |
michael@0 | 17 | // but are set conservatively. |
michael@0 | 18 | struct AtomicOps_x86CPUFeatureStruct { |
michael@0 | 19 | bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence |
michael@0 | 20 | // after acquire compare-and-swap. |
michael@0 | 21 | bool has_sse2; // Processor has SSE2. |
michael@0 | 22 | }; |
michael@0 | 23 | BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct |
michael@0 | 24 | AtomicOps_Internalx86CPUFeatures; |
michael@0 | 25 | |
michael@0 | 26 | #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
michael@0 | 27 | |
michael@0 | 28 | namespace base { |
michael@0 | 29 | namespace subtle { |
michael@0 | 30 | |
michael@0 | 31 | #ifndef TSAN_INTERFACE_ATOMIC_H |
michael@0 | 32 | #define TSAN_INTERFACE_ATOMIC_H |
michael@0 | 33 | |
michael@0 | 34 | #ifdef __cplusplus |
michael@0 | 35 | extern "C" { |
michael@0 | 36 | #endif |
michael@0 | 37 | |
michael@0 | 38 | typedef char __tsan_atomic8; |
michael@0 | 39 | typedef short __tsan_atomic16; // NOLINT |
michael@0 | 40 | typedef int __tsan_atomic32; |
michael@0 | 41 | typedef long __tsan_atomic64; // NOLINT |
michael@0 | 42 | |
michael@0 | 43 | #if defined(__SIZEOF_INT128__) \ |
michael@0 | 44 | || (__clang_major__ * 100 + __clang_minor__ >= 302) |
michael@0 | 45 | typedef __int128 __tsan_atomic128; |
michael@0 | 46 | #define __TSAN_HAS_INT128 1 |
michael@0 | 47 | #else |
michael@0 | 48 | typedef char __tsan_atomic128; |
michael@0 | 49 | #define __TSAN_HAS_INT128 0 |
michael@0 | 50 | #endif |
michael@0 | 51 | |
michael@0 | 52 | typedef enum { |
michael@0 | 53 | __tsan_memory_order_relaxed, |
michael@0 | 54 | __tsan_memory_order_consume, |
michael@0 | 55 | __tsan_memory_order_acquire, |
michael@0 | 56 | __tsan_memory_order_release, |
michael@0 | 57 | __tsan_memory_order_acq_rel, |
michael@0 | 58 | __tsan_memory_order_seq_cst, |
michael@0 | 59 | } __tsan_memory_order; |
michael@0 | 60 | |
michael@0 | 61 | __tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a, |
michael@0 | 62 | __tsan_memory_order mo); |
michael@0 | 63 | __tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a, |
michael@0 | 64 | __tsan_memory_order mo); |
michael@0 | 65 | __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a, |
michael@0 | 66 | __tsan_memory_order mo); |
michael@0 | 67 | __tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a, |
michael@0 | 68 | __tsan_memory_order mo); |
michael@0 | 69 | __tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a, |
michael@0 | 70 | __tsan_memory_order mo); |
michael@0 | 71 | |
michael@0 | 72 | void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v, |
michael@0 | 73 | __tsan_memory_order mo); |
michael@0 | 74 | void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v, |
michael@0 | 75 | __tsan_memory_order mo); |
michael@0 | 76 | void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v, |
michael@0 | 77 | __tsan_memory_order mo); |
michael@0 | 78 | void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v, |
michael@0 | 79 | __tsan_memory_order mo); |
michael@0 | 80 | void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v, |
michael@0 | 81 | __tsan_memory_order mo); |
michael@0 | 82 | |
michael@0 | 83 | __tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a, |
michael@0 | 84 | __tsan_atomic8 v, __tsan_memory_order mo); |
michael@0 | 85 | __tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a, |
michael@0 | 86 | __tsan_atomic16 v, __tsan_memory_order mo); |
michael@0 | 87 | __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a, |
michael@0 | 88 | __tsan_atomic32 v, __tsan_memory_order mo); |
michael@0 | 89 | __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a, |
michael@0 | 90 | __tsan_atomic64 v, __tsan_memory_order mo); |
michael@0 | 91 | __tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a, |
michael@0 | 92 | __tsan_atomic128 v, __tsan_memory_order mo); |
michael@0 | 93 | |
michael@0 | 94 | __tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, |
michael@0 | 95 | __tsan_atomic8 v, __tsan_memory_order mo); |
michael@0 | 96 | __tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a, |
michael@0 | 97 | __tsan_atomic16 v, __tsan_memory_order mo); |
michael@0 | 98 | __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a, |
michael@0 | 99 | __tsan_atomic32 v, __tsan_memory_order mo); |
michael@0 | 100 | __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a, |
michael@0 | 101 | __tsan_atomic64 v, __tsan_memory_order mo); |
michael@0 | 102 | __tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a, |
michael@0 | 103 | __tsan_atomic128 v, __tsan_memory_order mo); |
michael@0 | 104 | |
michael@0 | 105 | __tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, |
michael@0 | 106 | __tsan_atomic8 v, __tsan_memory_order mo); |
michael@0 | 107 | __tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a, |
michael@0 | 108 | __tsan_atomic16 v, __tsan_memory_order mo); |
michael@0 | 109 | __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a, |
michael@0 | 110 | __tsan_atomic32 v, __tsan_memory_order mo); |
michael@0 | 111 | __tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a, |
michael@0 | 112 | __tsan_atomic64 v, __tsan_memory_order mo); |
michael@0 | 113 | __tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a, |
michael@0 | 114 | __tsan_atomic128 v, __tsan_memory_order mo); |
michael@0 | 115 | |
michael@0 | 116 | __tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, |
michael@0 | 117 | __tsan_atomic8 v, __tsan_memory_order mo); |
michael@0 | 118 | __tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a, |
michael@0 | 119 | __tsan_atomic16 v, __tsan_memory_order mo); |
michael@0 | 120 | __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a, |
michael@0 | 121 | __tsan_atomic32 v, __tsan_memory_order mo); |
michael@0 | 122 | __tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a, |
michael@0 | 123 | __tsan_atomic64 v, __tsan_memory_order mo); |
michael@0 | 124 | __tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a, |
michael@0 | 125 | __tsan_atomic128 v, __tsan_memory_order mo); |
michael@0 | 126 | |
michael@0 | 127 | __tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, |
michael@0 | 128 | __tsan_atomic8 v, __tsan_memory_order mo); |
michael@0 | 129 | __tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a, |
michael@0 | 130 | __tsan_atomic16 v, __tsan_memory_order mo); |
michael@0 | 131 | __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a, |
michael@0 | 132 | __tsan_atomic32 v, __tsan_memory_order mo); |
michael@0 | 133 | __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a, |
michael@0 | 134 | __tsan_atomic64 v, __tsan_memory_order mo); |
michael@0 | 135 | __tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a, |
michael@0 | 136 | __tsan_atomic128 v, __tsan_memory_order mo); |
michael@0 | 137 | |
michael@0 | 138 | __tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, |
michael@0 | 139 | __tsan_atomic8 v, __tsan_memory_order mo); |
michael@0 | 140 | __tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a, |
michael@0 | 141 | __tsan_atomic16 v, __tsan_memory_order mo); |
michael@0 | 142 | __tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a, |
michael@0 | 143 | __tsan_atomic32 v, __tsan_memory_order mo); |
michael@0 | 144 | __tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a, |
michael@0 | 145 | __tsan_atomic64 v, __tsan_memory_order mo); |
michael@0 | 146 | __tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a, |
michael@0 | 147 | __tsan_atomic128 v, __tsan_memory_order mo); |
michael@0 | 148 | |
michael@0 | 149 | int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a, |
michael@0 | 150 | __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, |
michael@0 | 151 | __tsan_memory_order fail_mo); |
michael@0 | 152 | int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a, |
michael@0 | 153 | __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, |
michael@0 | 154 | __tsan_memory_order fail_mo); |
michael@0 | 155 | int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a, |
michael@0 | 156 | __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, |
michael@0 | 157 | __tsan_memory_order fail_mo); |
michael@0 | 158 | int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a, |
michael@0 | 159 | __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, |
michael@0 | 160 | __tsan_memory_order fail_mo); |
michael@0 | 161 | int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a, |
michael@0 | 162 | __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, |
michael@0 | 163 | __tsan_memory_order fail_mo); |
michael@0 | 164 | |
michael@0 | 165 | int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a, |
michael@0 | 166 | __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, |
michael@0 | 167 | __tsan_memory_order fail_mo); |
michael@0 | 168 | int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a, |
michael@0 | 169 | __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, |
michael@0 | 170 | __tsan_memory_order fail_mo); |
michael@0 | 171 | int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a, |
michael@0 | 172 | __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, |
michael@0 | 173 | __tsan_memory_order fail_mo); |
michael@0 | 174 | int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a, |
michael@0 | 175 | __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, |
michael@0 | 176 | __tsan_memory_order fail_mo); |
michael@0 | 177 | int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a, |
michael@0 | 178 | __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, |
michael@0 | 179 | __tsan_memory_order fail_mo); |
michael@0 | 180 | |
michael@0 | 181 | __tsan_atomic8 __tsan_atomic8_compare_exchange_val( |
michael@0 | 182 | volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, |
michael@0 | 183 | __tsan_memory_order mo, __tsan_memory_order fail_mo); |
michael@0 | 184 | __tsan_atomic16 __tsan_atomic16_compare_exchange_val( |
michael@0 | 185 | volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, |
michael@0 | 186 | __tsan_memory_order mo, __tsan_memory_order fail_mo); |
michael@0 | 187 | __tsan_atomic32 __tsan_atomic32_compare_exchange_val( |
michael@0 | 188 | volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, |
michael@0 | 189 | __tsan_memory_order mo, __tsan_memory_order fail_mo); |
michael@0 | 190 | __tsan_atomic64 __tsan_atomic64_compare_exchange_val( |
michael@0 | 191 | volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, |
michael@0 | 192 | __tsan_memory_order mo, __tsan_memory_order fail_mo); |
michael@0 | 193 | __tsan_atomic128 __tsan_atomic128_compare_exchange_val( |
michael@0 | 194 | volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v, |
michael@0 | 195 | __tsan_memory_order mo, __tsan_memory_order fail_mo); |
michael@0 | 196 | |
michael@0 | 197 | void __tsan_atomic_thread_fence(__tsan_memory_order mo); |
michael@0 | 198 | void __tsan_atomic_signal_fence(__tsan_memory_order mo); |
michael@0 | 199 | |
michael@0 | 200 | #ifdef __cplusplus |
michael@0 | 201 | } // extern "C" |
michael@0 | 202 | #endif |
michael@0 | 203 | |
michael@0 | 204 | #endif // #ifndef TSAN_INTERFACE_ATOMIC_H |
michael@0 | 205 | |
michael@0 | 206 | inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, |
michael@0 | 207 | Atomic32 old_value, |
michael@0 | 208 | Atomic32 new_value) { |
michael@0 | 209 | Atomic32 cmp = old_value; |
michael@0 | 210 | __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, |
michael@0 | 211 | __tsan_memory_order_relaxed, __tsan_memory_order_relaxed); |
michael@0 | 212 | return cmp; |
michael@0 | 213 | } |
michael@0 | 214 | |
michael@0 | 215 | inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, |
michael@0 | 216 | Atomic32 new_value) { |
michael@0 | 217 | return __tsan_atomic32_exchange(ptr, new_value, |
michael@0 | 218 | __tsan_memory_order_relaxed); |
michael@0 | 219 | } |
michael@0 | 220 | |
michael@0 | 221 | inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr, |
michael@0 | 222 | Atomic32 new_value) { |
michael@0 | 223 | return __tsan_atomic32_exchange(ptr, new_value, |
michael@0 | 224 | __tsan_memory_order_acquire); |
michael@0 | 225 | } |
michael@0 | 226 | |
michael@0 | 227 | inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr, |
michael@0 | 228 | Atomic32 new_value) { |
michael@0 | 229 | return __tsan_atomic32_exchange(ptr, new_value, |
michael@0 | 230 | __tsan_memory_order_release); |
michael@0 | 231 | } |
michael@0 | 232 | |
michael@0 | 233 | inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, |
michael@0 | 234 | Atomic32 increment) { |
michael@0 | 235 | return increment + __tsan_atomic32_fetch_add(ptr, increment, |
michael@0 | 236 | __tsan_memory_order_relaxed); |
michael@0 | 237 | } |
michael@0 | 238 | |
michael@0 | 239 | inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, |
michael@0 | 240 | Atomic32 increment) { |
michael@0 | 241 | return increment + __tsan_atomic32_fetch_add(ptr, increment, |
michael@0 | 242 | __tsan_memory_order_acq_rel); |
michael@0 | 243 | } |
michael@0 | 244 | |
michael@0 | 245 | inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, |
michael@0 | 246 | Atomic32 old_value, |
michael@0 | 247 | Atomic32 new_value) { |
michael@0 | 248 | Atomic32 cmp = old_value; |
michael@0 | 249 | __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, |
michael@0 | 250 | __tsan_memory_order_acquire, __tsan_memory_order_acquire); |
michael@0 | 251 | return cmp; |
michael@0 | 252 | } |
michael@0 | 253 | |
michael@0 | 254 | inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, |
michael@0 | 255 | Atomic32 old_value, |
michael@0 | 256 | Atomic32 new_value) { |
michael@0 | 257 | Atomic32 cmp = old_value; |
michael@0 | 258 | __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, |
michael@0 | 259 | __tsan_memory_order_release, __tsan_memory_order_relaxed); |
michael@0 | 260 | return cmp; |
michael@0 | 261 | } |
michael@0 | 262 | |
michael@0 | 263 | inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) { |
michael@0 | 264 | __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); |
michael@0 | 265 | } |
michael@0 | 266 | |
michael@0 | 267 | inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { |
michael@0 | 268 | __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); |
michael@0 | 269 | __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
michael@0 | 270 | } |
michael@0 | 271 | |
michael@0 | 272 | inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { |
michael@0 | 273 | __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); |
michael@0 | 274 | } |
michael@0 | 275 | |
michael@0 | 276 | inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) { |
michael@0 | 277 | return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); |
michael@0 | 278 | } |
michael@0 | 279 | |
michael@0 | 280 | inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { |
michael@0 | 281 | return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); |
michael@0 | 282 | } |
michael@0 | 283 | |
michael@0 | 284 | inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { |
michael@0 | 285 | __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
michael@0 | 286 | return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); |
michael@0 | 287 | } |
michael@0 | 288 | |
michael@0 | 289 | inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, |
michael@0 | 290 | Atomic64 old_value, |
michael@0 | 291 | Atomic64 new_value) { |
michael@0 | 292 | Atomic64 cmp = old_value; |
michael@0 | 293 | __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, |
michael@0 | 294 | __tsan_memory_order_relaxed, __tsan_memory_order_relaxed); |
michael@0 | 295 | return cmp; |
michael@0 | 296 | } |
michael@0 | 297 | |
michael@0 | 298 | inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, |
michael@0 | 299 | Atomic64 new_value) { |
michael@0 | 300 | return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed); |
michael@0 | 301 | } |
michael@0 | 302 | |
michael@0 | 303 | inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr, |
michael@0 | 304 | Atomic64 new_value) { |
michael@0 | 305 | return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire); |
michael@0 | 306 | } |
michael@0 | 307 | |
michael@0 | 308 | inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr, |
michael@0 | 309 | Atomic64 new_value) { |
michael@0 | 310 | return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release); |
michael@0 | 311 | } |
michael@0 | 312 | |
michael@0 | 313 | inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, |
michael@0 | 314 | Atomic64 increment) { |
michael@0 | 315 | return increment + __tsan_atomic64_fetch_add(ptr, increment, |
michael@0 | 316 | __tsan_memory_order_relaxed); |
michael@0 | 317 | } |
michael@0 | 318 | |
michael@0 | 319 | inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr, |
michael@0 | 320 | Atomic64 increment) { |
michael@0 | 321 | return increment + __tsan_atomic64_fetch_add(ptr, increment, |
michael@0 | 322 | __tsan_memory_order_acq_rel); |
michael@0 | 323 | } |
michael@0 | 324 | |
michael@0 | 325 | inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) { |
michael@0 | 326 | __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); |
michael@0 | 327 | } |
michael@0 | 328 | |
michael@0 | 329 | inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { |
michael@0 | 330 | __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); |
michael@0 | 331 | __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
michael@0 | 332 | } |
michael@0 | 333 | |
michael@0 | 334 | inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { |
michael@0 | 335 | __tsan_atomic64_store(ptr, value, __tsan_memory_order_release); |
michael@0 | 336 | } |
michael@0 | 337 | |
michael@0 | 338 | inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) { |
michael@0 | 339 | return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); |
michael@0 | 340 | } |
michael@0 | 341 | |
michael@0 | 342 | inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { |
michael@0 | 343 | return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire); |
michael@0 | 344 | } |
michael@0 | 345 | |
michael@0 | 346 | inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { |
michael@0 | 347 | __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
michael@0 | 348 | return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); |
michael@0 | 349 | } |
michael@0 | 350 | |
michael@0 | 351 | inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, |
michael@0 | 352 | Atomic64 old_value, |
michael@0 | 353 | Atomic64 new_value) { |
michael@0 | 354 | Atomic64 cmp = old_value; |
michael@0 | 355 | __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, |
michael@0 | 356 | __tsan_memory_order_acquire, __tsan_memory_order_acquire); |
michael@0 | 357 | return cmp; |
michael@0 | 358 | } |
michael@0 | 359 | |
michael@0 | 360 | inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, |
michael@0 | 361 | Atomic64 old_value, |
michael@0 | 362 | Atomic64 new_value) { |
michael@0 | 363 | Atomic64 cmp = old_value; |
michael@0 | 364 | __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, |
michael@0 | 365 | __tsan_memory_order_release, __tsan_memory_order_relaxed); |
michael@0 | 366 | return cmp; |
michael@0 | 367 | } |
michael@0 | 368 | |
michael@0 | 369 | inline void MemoryBarrier() { |
michael@0 | 370 | __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
michael@0 | 371 | } |
michael@0 | 372 | |
michael@0 | 373 | } // namespace base::subtle |
michael@0 | 374 | } // namespace base |
michael@0 | 375 | |
michael@0 | 376 | #undef ATOMICOPS_COMPILER_BARRIER |
michael@0 | 377 | |
michael@0 | 378 | #endif // BASE_ATOMICOPS_INTERNALS_TSAN_H_ |