michael@0: // Copyright (c) 2012 The Chromium Authors. All rights reserved. michael@0: // Use of this source code is governed by a BSD-style license that can be michael@0: // found in the LICENSE file. michael@0: michael@0: // This file is an internal atomic implementation for compiler-based michael@0: // ThreadSanitizer. Use base/atomicops.h instead. michael@0: michael@0: #ifndef BASE_ATOMICOPS_INTERNALS_TSAN_H_ michael@0: #define BASE_ATOMICOPS_INTERNALS_TSAN_H_ michael@0: michael@0: #include "base/base_export.h" michael@0: michael@0: // This struct is not part of the public API of this module; clients may not michael@0: // use it. (However, it's exported via BASE_EXPORT because clients implicitly michael@0: // do use it at link time by inlining these functions.) michael@0: // Features of this x86. Values may not be correct before main() is run, michael@0: // but are set conservatively. michael@0: struct AtomicOps_x86CPUFeatureStruct { michael@0: bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence michael@0: // after acquire compare-and-swap. michael@0: bool has_sse2; // Processor has SSE2. michael@0: }; michael@0: BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct michael@0: AtomicOps_Internalx86CPUFeatures; michael@0: michael@0: #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") michael@0: michael@0: namespace base { michael@0: namespace subtle { michael@0: michael@0: #ifndef TSAN_INTERFACE_ATOMIC_H michael@0: #define TSAN_INTERFACE_ATOMIC_H michael@0: michael@0: #ifdef __cplusplus michael@0: extern "C" { michael@0: #endif michael@0: michael@0: typedef char __tsan_atomic8; michael@0: typedef short __tsan_atomic16; // NOLINT michael@0: typedef int __tsan_atomic32; michael@0: typedef long __tsan_atomic64; // NOLINT michael@0: michael@0: #if defined(__SIZEOF_INT128__) \ michael@0: || (__clang_major__ * 100 + __clang_minor__ >= 302) michael@0: typedef __int128 __tsan_atomic128; michael@0: #define __TSAN_HAS_INT128 1 michael@0: #else michael@0: typedef char __tsan_atomic128; michael@0: #define __TSAN_HAS_INT128 0 michael@0: #endif michael@0: michael@0: typedef enum { michael@0: __tsan_memory_order_relaxed, michael@0: __tsan_memory_order_consume, michael@0: __tsan_memory_order_acquire, michael@0: __tsan_memory_order_release, michael@0: __tsan_memory_order_acq_rel, michael@0: __tsan_memory_order_seq_cst, michael@0: } __tsan_memory_order; michael@0: michael@0: __tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a, michael@0: __tsan_memory_order mo); michael@0: __tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a, michael@0: __tsan_memory_order mo); michael@0: __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a, michael@0: __tsan_memory_order mo); michael@0: __tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a, michael@0: __tsan_memory_order mo); michael@0: __tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a, michael@0: __tsan_memory_order mo); michael@0: michael@0: void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v, michael@0: __tsan_memory_order mo); michael@0: void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v, michael@0: __tsan_memory_order mo); michael@0: void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v, michael@0: __tsan_memory_order mo); michael@0: void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v, michael@0: __tsan_memory_order mo); michael@0: void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v, michael@0: __tsan_memory_order mo); michael@0: michael@0: __tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a, michael@0: __tsan_atomic8 v, __tsan_memory_order mo); michael@0: __tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a, michael@0: __tsan_atomic16 v, __tsan_memory_order mo); michael@0: __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a, michael@0: __tsan_atomic32 v, __tsan_memory_order mo); michael@0: __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a, michael@0: __tsan_atomic64 v, __tsan_memory_order mo); michael@0: __tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a, michael@0: __tsan_atomic128 v, __tsan_memory_order mo); michael@0: michael@0: __tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, michael@0: __tsan_atomic8 v, __tsan_memory_order mo); michael@0: __tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a, michael@0: __tsan_atomic16 v, __tsan_memory_order mo); michael@0: __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a, michael@0: __tsan_atomic32 v, __tsan_memory_order mo); michael@0: __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a, michael@0: __tsan_atomic64 v, __tsan_memory_order mo); michael@0: __tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a, michael@0: __tsan_atomic128 v, __tsan_memory_order mo); michael@0: michael@0: __tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, michael@0: __tsan_atomic8 v, __tsan_memory_order mo); michael@0: __tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a, michael@0: __tsan_atomic16 v, __tsan_memory_order mo); michael@0: __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a, michael@0: __tsan_atomic32 v, __tsan_memory_order mo); michael@0: __tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a, michael@0: __tsan_atomic64 v, __tsan_memory_order mo); michael@0: __tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a, michael@0: __tsan_atomic128 v, __tsan_memory_order mo); michael@0: michael@0: __tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, michael@0: __tsan_atomic8 v, __tsan_memory_order mo); michael@0: __tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a, michael@0: __tsan_atomic16 v, __tsan_memory_order mo); michael@0: __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a, michael@0: __tsan_atomic32 v, __tsan_memory_order mo); michael@0: __tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a, michael@0: __tsan_atomic64 v, __tsan_memory_order mo); michael@0: __tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a, michael@0: __tsan_atomic128 v, __tsan_memory_order mo); michael@0: michael@0: __tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, michael@0: __tsan_atomic8 v, __tsan_memory_order mo); michael@0: __tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a, michael@0: __tsan_atomic16 v, __tsan_memory_order mo); michael@0: __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a, michael@0: __tsan_atomic32 v, __tsan_memory_order mo); michael@0: __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a, michael@0: __tsan_atomic64 v, __tsan_memory_order mo); michael@0: __tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a, michael@0: __tsan_atomic128 v, __tsan_memory_order mo); michael@0: michael@0: __tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, michael@0: __tsan_atomic8 v, __tsan_memory_order mo); michael@0: __tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a, michael@0: __tsan_atomic16 v, __tsan_memory_order mo); michael@0: __tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a, michael@0: __tsan_atomic32 v, __tsan_memory_order mo); michael@0: __tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a, michael@0: __tsan_atomic64 v, __tsan_memory_order mo); michael@0: __tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a, michael@0: __tsan_atomic128 v, __tsan_memory_order mo); michael@0: michael@0: int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a, michael@0: __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, michael@0: __tsan_memory_order fail_mo); michael@0: int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a, michael@0: __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, michael@0: __tsan_memory_order fail_mo); michael@0: int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a, michael@0: __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, michael@0: __tsan_memory_order fail_mo); michael@0: int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a, michael@0: __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, michael@0: __tsan_memory_order fail_mo); michael@0: int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a, michael@0: __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, michael@0: __tsan_memory_order fail_mo); michael@0: michael@0: int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a, michael@0: __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, michael@0: __tsan_memory_order fail_mo); michael@0: int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a, michael@0: __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, michael@0: __tsan_memory_order fail_mo); michael@0: int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a, michael@0: __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, michael@0: __tsan_memory_order fail_mo); michael@0: int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a, michael@0: __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, michael@0: __tsan_memory_order fail_mo); michael@0: int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a, michael@0: __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, michael@0: __tsan_memory_order fail_mo); michael@0: michael@0: __tsan_atomic8 __tsan_atomic8_compare_exchange_val( michael@0: volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, michael@0: __tsan_memory_order mo, __tsan_memory_order fail_mo); michael@0: __tsan_atomic16 __tsan_atomic16_compare_exchange_val( michael@0: volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, michael@0: __tsan_memory_order mo, __tsan_memory_order fail_mo); michael@0: __tsan_atomic32 __tsan_atomic32_compare_exchange_val( michael@0: volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, michael@0: __tsan_memory_order mo, __tsan_memory_order fail_mo); michael@0: __tsan_atomic64 __tsan_atomic64_compare_exchange_val( michael@0: volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, michael@0: __tsan_memory_order mo, __tsan_memory_order fail_mo); michael@0: __tsan_atomic128 __tsan_atomic128_compare_exchange_val( michael@0: volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v, michael@0: __tsan_memory_order mo, __tsan_memory_order fail_mo); michael@0: michael@0: void __tsan_atomic_thread_fence(__tsan_memory_order mo); michael@0: void __tsan_atomic_signal_fence(__tsan_memory_order mo); michael@0: michael@0: #ifdef __cplusplus michael@0: } // extern "C" michael@0: #endif michael@0: michael@0: #endif // #ifndef TSAN_INTERFACE_ATOMIC_H michael@0: michael@0: inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, michael@0: Atomic32 old_value, michael@0: Atomic32 new_value) { michael@0: Atomic32 cmp = old_value; michael@0: __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, michael@0: __tsan_memory_order_relaxed, __tsan_memory_order_relaxed); michael@0: return cmp; michael@0: } michael@0: michael@0: inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, michael@0: Atomic32 new_value) { michael@0: return __tsan_atomic32_exchange(ptr, new_value, michael@0: __tsan_memory_order_relaxed); michael@0: } michael@0: michael@0: inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr, michael@0: Atomic32 new_value) { michael@0: return __tsan_atomic32_exchange(ptr, new_value, michael@0: __tsan_memory_order_acquire); michael@0: } michael@0: michael@0: inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr, michael@0: Atomic32 new_value) { michael@0: return __tsan_atomic32_exchange(ptr, new_value, michael@0: __tsan_memory_order_release); michael@0: } michael@0: michael@0: inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, michael@0: Atomic32 increment) { michael@0: return increment + __tsan_atomic32_fetch_add(ptr, increment, michael@0: __tsan_memory_order_relaxed); michael@0: } michael@0: michael@0: inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, michael@0: Atomic32 increment) { michael@0: return increment + __tsan_atomic32_fetch_add(ptr, increment, michael@0: __tsan_memory_order_acq_rel); michael@0: } michael@0: michael@0: inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, michael@0: Atomic32 old_value, michael@0: Atomic32 new_value) { michael@0: Atomic32 cmp = old_value; michael@0: __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, michael@0: __tsan_memory_order_acquire, __tsan_memory_order_acquire); michael@0: return cmp; michael@0: } michael@0: michael@0: inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, michael@0: Atomic32 old_value, michael@0: Atomic32 new_value) { michael@0: Atomic32 cmp = old_value; michael@0: __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, michael@0: __tsan_memory_order_release, __tsan_memory_order_relaxed); michael@0: return cmp; michael@0: } michael@0: michael@0: inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) { michael@0: __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); michael@0: } michael@0: michael@0: inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { michael@0: __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); michael@0: __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); michael@0: } michael@0: michael@0: inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { michael@0: __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); michael@0: } michael@0: michael@0: inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) { michael@0: return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); michael@0: } michael@0: michael@0: inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { michael@0: return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); michael@0: } michael@0: michael@0: inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { michael@0: __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); michael@0: return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); michael@0: } michael@0: michael@0: inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, michael@0: Atomic64 old_value, michael@0: Atomic64 new_value) { michael@0: Atomic64 cmp = old_value; michael@0: __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, michael@0: __tsan_memory_order_relaxed, __tsan_memory_order_relaxed); michael@0: return cmp; michael@0: } michael@0: michael@0: inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, michael@0: Atomic64 new_value) { michael@0: return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed); michael@0: } michael@0: michael@0: inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr, michael@0: Atomic64 new_value) { michael@0: return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire); michael@0: } michael@0: michael@0: inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr, michael@0: Atomic64 new_value) { michael@0: return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release); michael@0: } michael@0: michael@0: inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, michael@0: Atomic64 increment) { michael@0: return increment + __tsan_atomic64_fetch_add(ptr, increment, michael@0: __tsan_memory_order_relaxed); michael@0: } michael@0: michael@0: inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr, michael@0: Atomic64 increment) { michael@0: return increment + __tsan_atomic64_fetch_add(ptr, increment, michael@0: __tsan_memory_order_acq_rel); michael@0: } michael@0: michael@0: inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) { michael@0: __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); michael@0: } michael@0: michael@0: inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { michael@0: __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); michael@0: __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); michael@0: } michael@0: michael@0: inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { michael@0: __tsan_atomic64_store(ptr, value, __tsan_memory_order_release); michael@0: } michael@0: michael@0: inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) { michael@0: return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); michael@0: } michael@0: michael@0: inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { michael@0: return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire); michael@0: } michael@0: michael@0: inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { michael@0: __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); michael@0: return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); michael@0: } michael@0: michael@0: inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, michael@0: Atomic64 old_value, michael@0: Atomic64 new_value) { michael@0: Atomic64 cmp = old_value; michael@0: __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, michael@0: __tsan_memory_order_acquire, __tsan_memory_order_acquire); michael@0: return cmp; michael@0: } michael@0: michael@0: inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, michael@0: Atomic64 old_value, michael@0: Atomic64 new_value) { michael@0: Atomic64 cmp = old_value; michael@0: __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, michael@0: __tsan_memory_order_release, __tsan_memory_order_relaxed); michael@0: return cmp; michael@0: } michael@0: michael@0: inline void MemoryBarrier() { michael@0: __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); michael@0: } michael@0: michael@0: } // namespace base::subtle michael@0: } // namespace base michael@0: michael@0: #undef ATOMICOPS_COMPILER_BARRIER michael@0: michael@0: #endif // BASE_ATOMICOPS_INTERNALS_TSAN_H_