michael@0: // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. michael@0: // Use of this source code is governed by a BSD-style license that can be michael@0: // found in the LICENSE file. michael@0: michael@0: // For atomic operations on reference counts, see atomic_refcount.h. michael@0: // For atomic operations on sequence numbers, see atomic_sequence_num.h. michael@0: michael@0: // The routines exported by this module are subtle. If you use them, even if michael@0: // you get the code right, it will depend on careful reasoning about atomicity michael@0: // and memory ordering; it will be less readable, and harder to maintain. If michael@0: // you plan to use these routines, you should have a good reason, such as solid michael@0: // evidence that performance would otherwise suffer, or there being no michael@0: // alternative. You should assume only properties explicitly guaranteed by the michael@0: // specifications in this file. You are almost certainly _not_ writing code michael@0: // just for the x86; if you assume x86 semantics, x86 hardware bugs and michael@0: // implementations on other archtectures will cause your code to break. If you michael@0: // do not know what you are doing, avoid these routines, and use a Mutex. michael@0: // michael@0: // It is incorrect to make direct assignments to/from an atomic variable. michael@0: // You should use one of the Load or Store routines. The NoBarrier michael@0: // versions are provided when no barriers are needed: michael@0: // NoBarrier_Store() michael@0: // NoBarrier_Load() michael@0: // Although there are currently no compiler enforcement, you are encouraged michael@0: // to use these. michael@0: // michael@0: michael@0: #ifndef BASE_ATOMICOPS_H_ michael@0: #define BASE_ATOMICOPS_H_ michael@0: michael@0: #include "base/basictypes.h" michael@0: #include "base/port.h" michael@0: michael@0: namespace base { michael@0: namespace subtle { michael@0: michael@0: // Bug 1308991. We need this for /Wp64, to mark it safe for AtomicWord casting. michael@0: #ifndef OS_WIN michael@0: #define __w64 michael@0: #endif michael@0: typedef __w64 int32_t Atomic32; michael@0: #ifdef ARCH_CPU_64_BITS michael@0: typedef int64_t Atomic64; michael@0: #endif michael@0: michael@0: // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or michael@0: // Atomic64 routines below, depending on your architecture. michael@0: #ifdef OS_OPENBSD michael@0: #ifdef ARCH_CPU_64_BITS michael@0: typedef Atomic64 AtomicWord; michael@0: #else michael@0: typedef Atomic32 AtomicWord; michael@0: #endif // ARCH_CPU_64_BITS michael@0: #else michael@0: typedef intptr_t AtomicWord; michael@0: #endif // OS_OPENBSD michael@0: michael@0: // Atomically execute: michael@0: // result = *ptr; michael@0: // if (*ptr == old_value) michael@0: // *ptr = new_value; michael@0: // return result; michael@0: // michael@0: // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". michael@0: // Always return the old value of "*ptr" michael@0: // michael@0: // This routine implies no memory barriers. michael@0: Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, michael@0: Atomic32 old_value, michael@0: Atomic32 new_value); michael@0: michael@0: // Atomically store new_value into *ptr, returning the previous value held in michael@0: // *ptr. This routine implies no memory barriers. michael@0: Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); michael@0: michael@0: // Atomically increment *ptr by "increment". Returns the new value of michael@0: // *ptr with the increment applied. This routine implies no memory barriers. michael@0: Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); michael@0: michael@0: Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, michael@0: Atomic32 increment); michael@0: michael@0: // These following lower-level operations are typically useful only to people michael@0: // implementing higher-level synchronization operations like spinlocks, michael@0: // mutexes, and condition-variables. They combine CompareAndSwap(), a load, or michael@0: // a store with appropriate memory-ordering instructions. "Acquire" operations michael@0: // ensure that no later memory access can be reordered ahead of the operation. michael@0: // "Release" operations ensure that no previous memory access can be reordered michael@0: // after the operation. "Barrier" operations have both "Acquire" and "Release" michael@0: // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory michael@0: // access. michael@0: Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, michael@0: Atomic32 old_value, michael@0: Atomic32 new_value); michael@0: Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, michael@0: Atomic32 old_value, michael@0: Atomic32 new_value); michael@0: michael@0: void MemoryBarrier(); michael@0: void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); michael@0: void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); michael@0: void Release_Store(volatile Atomic32* ptr, Atomic32 value); michael@0: michael@0: Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); michael@0: Atomic32 Acquire_Load(volatile const Atomic32* ptr); michael@0: Atomic32 Release_Load(volatile const Atomic32* ptr); michael@0: michael@0: // 64-bit atomic operations (only available on 64-bit processors). michael@0: #ifdef ARCH_CPU_64_BITS michael@0: Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, michael@0: Atomic64 old_value, michael@0: Atomic64 new_value); michael@0: Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); michael@0: Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); michael@0: Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); michael@0: michael@0: Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, michael@0: Atomic64 old_value, michael@0: Atomic64 new_value); michael@0: Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, michael@0: Atomic64 old_value, michael@0: Atomic64 new_value); michael@0: void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); michael@0: void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); michael@0: void Release_Store(volatile Atomic64* ptr, Atomic64 value); michael@0: Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); michael@0: Atomic64 Acquire_Load(volatile const Atomic64* ptr); michael@0: Atomic64 Release_Load(volatile const Atomic64* ptr); michael@0: #endif // CPU_ARCH_64_BITS michael@0: michael@0: } // namespace base::subtle michael@0: } // namespace base michael@0: michael@0: // Include our platform specific implementation. michael@0: #if defined(OS_WIN) && defined(ARCH_CPU_X86_FAMILY) michael@0: #include "base/atomicops_internals_x86_msvc.h" michael@0: #elif defined(OS_MACOSX) && defined(ARCH_CPU_X86_FAMILY) michael@0: #include "base/atomicops_internals_x86_macosx.h" michael@0: #elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY) michael@0: #include "base/atomicops_internals_x86_gcc.h" michael@0: #elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM_FAMILY) michael@0: #include "base/atomicops_internals_arm_gcc.h" michael@0: #elif defined(COMPILER_GCC) && defined(ARCH_CPU_MIPS) michael@0: #include "base/atomicops_internals_mips_gcc.h" michael@0: #else michael@0: #include "base/atomicops_internals_mutex.h" michael@0: #endif michael@0: michael@0: #endif // BASE_ATOMICOPS_H_