security/sandbox/chromium/base/atomicops_internals_tsan.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/security/sandbox/chromium/base/atomicops_internals_tsan.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,378 @@
     1.4 +// Copyright (c) 2012 The Chromium Authors. All rights reserved.
     1.5 +// Use of this source code is governed by a BSD-style license that can be
     1.6 +// found in the LICENSE file.
     1.7 +
     1.8 +// This file is an internal atomic implementation for compiler-based
     1.9 +// ThreadSanitizer. Use base/atomicops.h instead.
    1.10 +
    1.11 +#ifndef BASE_ATOMICOPS_INTERNALS_TSAN_H_
    1.12 +#define BASE_ATOMICOPS_INTERNALS_TSAN_H_
    1.13 +
    1.14 +#include "base/base_export.h"
    1.15 +
    1.16 +// This struct is not part of the public API of this module; clients may not
    1.17 +// use it.  (However, it's exported via BASE_EXPORT because clients implicitly
    1.18 +// do use it at link time by inlining these functions.)
    1.19 +// Features of this x86.  Values may not be correct before main() is run,
    1.20 +// but are set conservatively.
    1.21 +struct AtomicOps_x86CPUFeatureStruct {
    1.22 +  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
    1.23 +                             // after acquire compare-and-swap.
    1.24 +  bool has_sse2;             // Processor has SSE2.
    1.25 +};
    1.26 +BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
    1.27 +    AtomicOps_Internalx86CPUFeatures;
    1.28 +
    1.29 +#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
    1.30 +
    1.31 +namespace base {
    1.32 +namespace subtle {
    1.33 +
    1.34 +#ifndef TSAN_INTERFACE_ATOMIC_H
    1.35 +#define TSAN_INTERFACE_ATOMIC_H
    1.36 +
    1.37 +#ifdef __cplusplus
    1.38 +extern "C" {
    1.39 +#endif
    1.40 +
    1.41 +typedef char  __tsan_atomic8;
    1.42 +typedef short __tsan_atomic16;  // NOLINT
    1.43 +typedef int   __tsan_atomic32;
    1.44 +typedef long  __tsan_atomic64;  // NOLINT
    1.45 +
    1.46 +#if defined(__SIZEOF_INT128__) \
    1.47 +    || (__clang_major__ * 100 + __clang_minor__ >= 302)
    1.48 +typedef __int128 __tsan_atomic128;
    1.49 +#define __TSAN_HAS_INT128 1
    1.50 +#else
    1.51 +typedef char     __tsan_atomic128;
    1.52 +#define __TSAN_HAS_INT128 0
    1.53 +#endif
    1.54 +
    1.55 +typedef enum {
    1.56 +  __tsan_memory_order_relaxed,
    1.57 +  __tsan_memory_order_consume,
    1.58 +  __tsan_memory_order_acquire,
    1.59 +  __tsan_memory_order_release,
    1.60 +  __tsan_memory_order_acq_rel,
    1.61 +  __tsan_memory_order_seq_cst,
    1.62 +} __tsan_memory_order;
    1.63 +
    1.64 +__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
    1.65 +    __tsan_memory_order mo);
    1.66 +__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
    1.67 +    __tsan_memory_order mo);
    1.68 +__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
    1.69 +    __tsan_memory_order mo);
    1.70 +__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
    1.71 +    __tsan_memory_order mo);
    1.72 +__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
    1.73 +    __tsan_memory_order mo);
    1.74 +
    1.75 +void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
    1.76 +    __tsan_memory_order mo);
    1.77 +void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
    1.78 +    __tsan_memory_order mo);
    1.79 +void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
    1.80 +    __tsan_memory_order mo);
    1.81 +void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
    1.82 +    __tsan_memory_order mo);
    1.83 +void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
    1.84 +    __tsan_memory_order mo);
    1.85 +
    1.86 +__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
    1.87 +    __tsan_atomic8 v, __tsan_memory_order mo);
    1.88 +__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
    1.89 +    __tsan_atomic16 v, __tsan_memory_order mo);
    1.90 +__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
    1.91 +    __tsan_atomic32 v, __tsan_memory_order mo);
    1.92 +__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
    1.93 +    __tsan_atomic64 v, __tsan_memory_order mo);
    1.94 +__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
    1.95 +    __tsan_atomic128 v, __tsan_memory_order mo);
    1.96 +
    1.97 +__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
    1.98 +    __tsan_atomic8 v, __tsan_memory_order mo);
    1.99 +__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
   1.100 +    __tsan_atomic16 v, __tsan_memory_order mo);
   1.101 +__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
   1.102 +    __tsan_atomic32 v, __tsan_memory_order mo);
   1.103 +__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
   1.104 +    __tsan_atomic64 v, __tsan_memory_order mo);
   1.105 +__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
   1.106 +    __tsan_atomic128 v, __tsan_memory_order mo);
   1.107 +
   1.108 +__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
   1.109 +    __tsan_atomic8 v, __tsan_memory_order mo);
   1.110 +__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
   1.111 +    __tsan_atomic16 v, __tsan_memory_order mo);
   1.112 +__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
   1.113 +    __tsan_atomic32 v, __tsan_memory_order mo);
   1.114 +__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
   1.115 +    __tsan_atomic64 v, __tsan_memory_order mo);
   1.116 +__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
   1.117 +    __tsan_atomic128 v, __tsan_memory_order mo);
   1.118 +
   1.119 +__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
   1.120 +    __tsan_atomic8 v, __tsan_memory_order mo);
   1.121 +__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
   1.122 +    __tsan_atomic16 v, __tsan_memory_order mo);
   1.123 +__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
   1.124 +    __tsan_atomic32 v, __tsan_memory_order mo);
   1.125 +__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
   1.126 +    __tsan_atomic64 v, __tsan_memory_order mo);
   1.127 +__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
   1.128 +    __tsan_atomic128 v, __tsan_memory_order mo);
   1.129 +
   1.130 +__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
   1.131 +    __tsan_atomic8 v, __tsan_memory_order mo);
   1.132 +__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
   1.133 +    __tsan_atomic16 v, __tsan_memory_order mo);
   1.134 +__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
   1.135 +    __tsan_atomic32 v, __tsan_memory_order mo);
   1.136 +__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
   1.137 +    __tsan_atomic64 v, __tsan_memory_order mo);
   1.138 +__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
   1.139 +    __tsan_atomic128 v, __tsan_memory_order mo);
   1.140 +
   1.141 +__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
   1.142 +    __tsan_atomic8 v, __tsan_memory_order mo);
   1.143 +__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
   1.144 +    __tsan_atomic16 v, __tsan_memory_order mo);
   1.145 +__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
   1.146 +    __tsan_atomic32 v, __tsan_memory_order mo);
   1.147 +__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
   1.148 +    __tsan_atomic64 v, __tsan_memory_order mo);
   1.149 +__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
   1.150 +    __tsan_atomic128 v, __tsan_memory_order mo);
   1.151 +
   1.152 +int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
   1.153 +    __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
   1.154 +    __tsan_memory_order fail_mo);
   1.155 +int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
   1.156 +    __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
   1.157 +    __tsan_memory_order fail_mo);
   1.158 +int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
   1.159 +    __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
   1.160 +    __tsan_memory_order fail_mo);
   1.161 +int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
   1.162 +    __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
   1.163 +    __tsan_memory_order fail_mo);
   1.164 +int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
   1.165 +    __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
   1.166 +    __tsan_memory_order fail_mo);
   1.167 +
   1.168 +int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
   1.169 +    __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
   1.170 +    __tsan_memory_order fail_mo);
   1.171 +int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
   1.172 +    __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
   1.173 +    __tsan_memory_order fail_mo);
   1.174 +int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
   1.175 +    __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
   1.176 +    __tsan_memory_order fail_mo);
   1.177 +int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
   1.178 +    __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
   1.179 +    __tsan_memory_order fail_mo);
   1.180 +int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
   1.181 +    __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
   1.182 +    __tsan_memory_order fail_mo);
   1.183 +
   1.184 +__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
   1.185 +    volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
   1.186 +    __tsan_memory_order mo, __tsan_memory_order fail_mo);
   1.187 +__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
   1.188 +    volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
   1.189 +    __tsan_memory_order mo, __tsan_memory_order fail_mo);
   1.190 +__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
   1.191 +    volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
   1.192 +    __tsan_memory_order mo, __tsan_memory_order fail_mo);
   1.193 +__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
   1.194 +    volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
   1.195 +    __tsan_memory_order mo, __tsan_memory_order fail_mo);
   1.196 +__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
   1.197 +    volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
   1.198 +    __tsan_memory_order mo, __tsan_memory_order fail_mo);
   1.199 +
   1.200 +void __tsan_atomic_thread_fence(__tsan_memory_order mo);
   1.201 +void __tsan_atomic_signal_fence(__tsan_memory_order mo);
   1.202 +
   1.203 +#ifdef __cplusplus
   1.204 +}  // extern "C"
   1.205 +#endif
   1.206 +
   1.207 +#endif  // #ifndef TSAN_INTERFACE_ATOMIC_H
   1.208 +
   1.209 +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
   1.210 +                                  Atomic32 old_value,
   1.211 +                                  Atomic32 new_value) {
   1.212 +  Atomic32 cmp = old_value;
   1.213 +  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
   1.214 +      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
   1.215 +  return cmp;
   1.216 +}
   1.217 +
   1.218 +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
   1.219 +                                  Atomic32 new_value) {
   1.220 +  return __tsan_atomic32_exchange(ptr, new_value,
   1.221 +      __tsan_memory_order_relaxed);
   1.222 +}
   1.223 +
   1.224 +inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
   1.225 +                                Atomic32 new_value) {
   1.226 +  return __tsan_atomic32_exchange(ptr, new_value,
   1.227 +      __tsan_memory_order_acquire);
   1.228 +}
   1.229 +
   1.230 +inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
   1.231 +                                Atomic32 new_value) {
   1.232 +  return __tsan_atomic32_exchange(ptr, new_value,
   1.233 +      __tsan_memory_order_release);
   1.234 +}
   1.235 +
   1.236 +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
   1.237 +                                   Atomic32 increment) {
   1.238 +  return increment + __tsan_atomic32_fetch_add(ptr, increment,
   1.239 +      __tsan_memory_order_relaxed);
   1.240 +}
   1.241 +
   1.242 +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
   1.243 +                                 Atomic32 increment) {
   1.244 +  return increment + __tsan_atomic32_fetch_add(ptr, increment,
   1.245 +      __tsan_memory_order_acq_rel);
   1.246 +}
   1.247 +
   1.248 +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
   1.249 +                                Atomic32 old_value,
   1.250 +                                Atomic32 new_value) {
   1.251 +  Atomic32 cmp = old_value;
   1.252 +  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
   1.253 +      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
   1.254 +  return cmp;
   1.255 +}
   1.256 +
   1.257 +inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
   1.258 +                                Atomic32 old_value,
   1.259 +                                Atomic32 new_value) {
   1.260 +  Atomic32 cmp = old_value;
   1.261 +  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
   1.262 +      __tsan_memory_order_release, __tsan_memory_order_relaxed);
   1.263 +  return cmp;
   1.264 +}
   1.265 +
   1.266 +inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
   1.267 +  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
   1.268 +}
   1.269 +
   1.270 +inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
   1.271 +  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
   1.272 +  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
   1.273 +}
   1.274 +
   1.275 +inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
   1.276 +  __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
   1.277 +}
   1.278 +
   1.279 +inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
   1.280 +  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
   1.281 +}
   1.282 +
   1.283 +inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
   1.284 +  return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
   1.285 +}
   1.286 +
   1.287 +inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
   1.288 +  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
   1.289 +  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
   1.290 +}
   1.291 +
   1.292 +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
   1.293 +                                      Atomic64 old_value,
   1.294 +                                      Atomic64 new_value) {
   1.295 +  Atomic64 cmp = old_value;
   1.296 +  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
   1.297 +      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
   1.298 +  return cmp;
   1.299 +}
   1.300 +
   1.301 +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
   1.302 +                                      Atomic64 new_value) {
   1.303 +  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
   1.304 +}
   1.305 +
   1.306 +inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
   1.307 +                                    Atomic64 new_value) {
   1.308 +  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
   1.309 +}
   1.310 +
   1.311 +inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
   1.312 +                                    Atomic64 new_value) {
   1.313 +  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
   1.314 +}
   1.315 +
   1.316 +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
   1.317 +                                       Atomic64 increment) {
   1.318 +  return increment + __tsan_atomic64_fetch_add(ptr, increment,
   1.319 +      __tsan_memory_order_relaxed);
   1.320 +}
   1.321 +
   1.322 +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
   1.323 +                                     Atomic64 increment) {
   1.324 +  return increment + __tsan_atomic64_fetch_add(ptr, increment,
   1.325 +      __tsan_memory_order_acq_rel);
   1.326 +}
   1.327 +
   1.328 +inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
   1.329 +  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
   1.330 +}
   1.331 +
   1.332 +inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
   1.333 +  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
   1.334 +  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
   1.335 +}
   1.336 +
   1.337 +inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
   1.338 +  __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
   1.339 +}
   1.340 +
   1.341 +inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
   1.342 +  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
   1.343 +}
   1.344 +
   1.345 +inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
   1.346 +  return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
   1.347 +}
   1.348 +
   1.349 +inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
   1.350 +  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
   1.351 +  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
   1.352 +}
   1.353 +
   1.354 +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
   1.355 +                                    Atomic64 old_value,
   1.356 +                                    Atomic64 new_value) {
   1.357 +  Atomic64 cmp = old_value;
   1.358 +  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
   1.359 +      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
   1.360 +  return cmp;
   1.361 +}
   1.362 +
   1.363 +inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
   1.364 +                                    Atomic64 old_value,
   1.365 +                                    Atomic64 new_value) {
   1.366 +  Atomic64 cmp = old_value;
   1.367 +  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
   1.368 +      __tsan_memory_order_release, __tsan_memory_order_relaxed);
   1.369 +  return cmp;
   1.370 +}
   1.371 +
   1.372 +inline void MemoryBarrier() {
   1.373 +  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
   1.374 +}
   1.375 +
   1.376 +}  // namespace base::subtle
   1.377 +}  // namespace base
   1.378 +
   1.379 +#undef ATOMICOPS_COMPILER_BARRIER
   1.380 +
   1.381 +#endif  // BASE_ATOMICOPS_INTERNALS_TSAN_H_

mercurial