ipc/chromium/src/base/atomicops.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/ipc/chromium/src/base/atomicops.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,149 @@
     1.4 +// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
     1.5 +// Use of this source code is governed by a BSD-style license that can be
     1.6 +// found in the LICENSE file.
     1.7 +
     1.8 +// For atomic operations on reference counts, see atomic_refcount.h.
     1.9 +// For atomic operations on sequence numbers, see atomic_sequence_num.h.
    1.10 +
    1.11 +// The routines exported by this module are subtle.  If you use them, even if
    1.12 +// you get the code right, it will depend on careful reasoning about atomicity
    1.13 +// and memory ordering; it will be less readable, and harder to maintain.  If
    1.14 +// you plan to use these routines, you should have a good reason, such as solid
    1.15 +// evidence that performance would otherwise suffer, or there being no
    1.16 +// alternative.  You should assume only properties explicitly guaranteed by the
    1.17 +// specifications in this file.  You are almost certainly _not_ writing code
    1.18 +// just for the x86; if you assume x86 semantics, x86 hardware bugs and
    1.19 +// implementations on other archtectures will cause your code to break.  If you
    1.20 +// do not know what you are doing, avoid these routines, and use a Mutex.
    1.21 +//
    1.22 +// It is incorrect to make direct assignments to/from an atomic variable.
    1.23 +// You should use one of the Load or Store routines.  The NoBarrier
    1.24 +// versions are provided when no barriers are needed:
    1.25 +//   NoBarrier_Store()
    1.26 +//   NoBarrier_Load()
    1.27 +// Although there are currently no compiler enforcement, you are encouraged
    1.28 +// to use these.
    1.29 +//
    1.30 +
    1.31 +#ifndef BASE_ATOMICOPS_H_
    1.32 +#define BASE_ATOMICOPS_H_
    1.33 +
    1.34 +#include "base/basictypes.h"
    1.35 +#include "base/port.h"
    1.36 +
    1.37 +namespace base {
    1.38 +namespace subtle {
    1.39 +
    1.40 +// Bug 1308991.  We need this for /Wp64, to mark it safe for AtomicWord casting.
    1.41 +#ifndef OS_WIN
    1.42 +#define __w64
    1.43 +#endif
    1.44 +typedef __w64 int32_t Atomic32;
    1.45 +#ifdef ARCH_CPU_64_BITS
    1.46 +typedef int64_t Atomic64;
    1.47 +#endif
    1.48 +
    1.49 +// Use AtomicWord for a machine-sized pointer.  It will use the Atomic32 or
    1.50 +// Atomic64 routines below, depending on your architecture.
    1.51 +#ifdef OS_OPENBSD
    1.52 +#ifdef ARCH_CPU_64_BITS
    1.53 +typedef Atomic64 AtomicWord;
    1.54 +#else
    1.55 +typedef Atomic32 AtomicWord;
    1.56 +#endif // ARCH_CPU_64_BITS
    1.57 +#else
    1.58 +typedef intptr_t AtomicWord;
    1.59 +#endif // OS_OPENBSD
    1.60 +
    1.61 +// Atomically execute:
    1.62 +//      result = *ptr;
    1.63 +//      if (*ptr == old_value)
    1.64 +//        *ptr = new_value;
    1.65 +//      return result;
    1.66 +//
    1.67 +// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
    1.68 +// Always return the old value of "*ptr"
    1.69 +//
    1.70 +// This routine implies no memory barriers.
    1.71 +Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
    1.72 +                                  Atomic32 old_value,
    1.73 +                                  Atomic32 new_value);
    1.74 +
    1.75 +// Atomically store new_value into *ptr, returning the previous value held in
    1.76 +// *ptr.  This routine implies no memory barriers.
    1.77 +Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
    1.78 +
    1.79 +// Atomically increment *ptr by "increment".  Returns the new value of
    1.80 +// *ptr with the increment applied.  This routine implies no memory barriers.
    1.81 +Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
    1.82 +
    1.83 +Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
    1.84 +                                 Atomic32 increment);
    1.85 +
    1.86 +// These following lower-level operations are typically useful only to people
    1.87 +// implementing higher-level synchronization operations like spinlocks,
    1.88 +// mutexes, and condition-variables.  They combine CompareAndSwap(), a load, or
    1.89 +// a store with appropriate memory-ordering instructions.  "Acquire" operations
    1.90 +// ensure that no later memory access can be reordered ahead of the operation.
    1.91 +// "Release" operations ensure that no previous memory access can be reordered
    1.92 +// after the operation.  "Barrier" operations have both "Acquire" and "Release"
    1.93 +// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
    1.94 +// access.
    1.95 +Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
    1.96 +                                Atomic32 old_value,
    1.97 +                                Atomic32 new_value);
    1.98 +Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
    1.99 +                                Atomic32 old_value,
   1.100 +                                Atomic32 new_value);
   1.101 +
   1.102 +void MemoryBarrier();
   1.103 +void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
   1.104 +void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
   1.105 +void Release_Store(volatile Atomic32* ptr, Atomic32 value);
   1.106 +
   1.107 +Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
   1.108 +Atomic32 Acquire_Load(volatile const Atomic32* ptr);
   1.109 +Atomic32 Release_Load(volatile const Atomic32* ptr);
   1.110 +
   1.111 +// 64-bit atomic operations (only available on 64-bit processors).
   1.112 +#ifdef ARCH_CPU_64_BITS
   1.113 +Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
   1.114 +                                  Atomic64 old_value,
   1.115 +                                  Atomic64 new_value);
   1.116 +Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
   1.117 +Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
   1.118 +Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
   1.119 +
   1.120 +Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
   1.121 +                                Atomic64 old_value,
   1.122 +                                Atomic64 new_value);
   1.123 +Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
   1.124 +                                Atomic64 old_value,
   1.125 +                                Atomic64 new_value);
   1.126 +void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
   1.127 +void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
   1.128 +void Release_Store(volatile Atomic64* ptr, Atomic64 value);
   1.129 +Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
   1.130 +Atomic64 Acquire_Load(volatile const Atomic64* ptr);
   1.131 +Atomic64 Release_Load(volatile const Atomic64* ptr);
   1.132 +#endif  // CPU_ARCH_64_BITS
   1.133 +
   1.134 +}  // namespace base::subtle
   1.135 +}  // namespace base
   1.136 +
   1.137 +// Include our platform specific implementation.
   1.138 +#if defined(OS_WIN) && defined(ARCH_CPU_X86_FAMILY)
   1.139 +#include "base/atomicops_internals_x86_msvc.h"
   1.140 +#elif defined(OS_MACOSX) && defined(ARCH_CPU_X86_FAMILY)
   1.141 +#include "base/atomicops_internals_x86_macosx.h"
   1.142 +#elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY)
   1.143 +#include "base/atomicops_internals_x86_gcc.h"
   1.144 +#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM_FAMILY)
   1.145 +#include "base/atomicops_internals_arm_gcc.h"
   1.146 +#elif defined(COMPILER_GCC) && defined(ARCH_CPU_MIPS)
   1.147 +#include "base/atomicops_internals_mips_gcc.h"
   1.148 +#else
   1.149 +#include "base/atomicops_internals_mutex.h"
   1.150 +#endif
   1.151 +
   1.152 +#endif  // BASE_ATOMICOPS_H_

mercurial