ipc/chromium/src/base/atomicops_internals_x86_macosx.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/ipc/chromium/src/base/atomicops_internals_x86_macosx.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,279 @@
     1.4 +// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
     1.5 +// Use of this source code is governed by a BSD-style license that can be
     1.6 +// found in the LICENSE file.
     1.7 +
     1.8 +// This file is an internal atomic implementation, use base/atomicops.h instead.
     1.9 +
    1.10 +#ifndef BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_
    1.11 +#define BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_
    1.12 +
    1.13 +#include <libkern/OSAtomic.h>
    1.14 +
    1.15 +namespace base {
    1.16 +namespace subtle {
    1.17 +
    1.18 +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
    1.19 +                                         Atomic32 old_value,
    1.20 +                                         Atomic32 new_value) {
    1.21 +  Atomic32 prev_value;
    1.22 +  do {
    1.23 +    if (OSAtomicCompareAndSwap32(old_value, new_value,
    1.24 +                                 const_cast<Atomic32*>(ptr))) {
    1.25 +      return old_value;
    1.26 +    }
    1.27 +    prev_value = *ptr;
    1.28 +  } while (prev_value == old_value);
    1.29 +  return prev_value;
    1.30 +}
    1.31 +
    1.32 +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
    1.33 +                                         Atomic32 new_value) {
    1.34 +  Atomic32 old_value;
    1.35 +  do {
    1.36 +    old_value = *ptr;
    1.37 +  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
    1.38 +                                     const_cast<Atomic32*>(ptr)));
    1.39 +  return old_value;
    1.40 +}
    1.41 +
    1.42 +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
    1.43 +                                          Atomic32 increment) {
    1.44 +  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
    1.45 +}
    1.46 +
    1.47 +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
    1.48 +                                          Atomic32 increment) {
    1.49 +  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
    1.50 +}
    1.51 +
    1.52 +inline void MemoryBarrier() {
    1.53 +  OSMemoryBarrier();
    1.54 +}
    1.55 +
    1.56 +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
    1.57 +                                       Atomic32 old_value,
    1.58 +                                       Atomic32 new_value) {
    1.59 +  Atomic32 prev_value;
    1.60 +  do {
    1.61 +    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
    1.62 +                                        const_cast<Atomic32*>(ptr))) {
    1.63 +      return old_value;
    1.64 +    }
    1.65 +    prev_value = *ptr;
    1.66 +  } while (prev_value == old_value);
    1.67 +  return prev_value;
    1.68 +}
    1.69 +
    1.70 +inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
    1.71 +                                       Atomic32 old_value,
    1.72 +                                       Atomic32 new_value) {
    1.73 +  return Acquire_CompareAndSwap(ptr, old_value, new_value);
    1.74 +}
    1.75 +
    1.76 +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
    1.77 +  *ptr = value;
    1.78 +}
    1.79 +
    1.80 +inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
    1.81 +  *ptr = value;
    1.82 +  MemoryBarrier();
    1.83 +}
    1.84 +
    1.85 +inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
    1.86 +  MemoryBarrier();
    1.87 +  *ptr = value;
    1.88 +}
    1.89 +
    1.90 +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
    1.91 +  return *ptr;
    1.92 +}
    1.93 +
    1.94 +inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
    1.95 +  Atomic32 value = *ptr;
    1.96 +  MemoryBarrier();
    1.97 +  return value;
    1.98 +}
    1.99 +
   1.100 +inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
   1.101 +  MemoryBarrier();
   1.102 +  return *ptr;
   1.103 +}
   1.104 +
   1.105 +#ifdef __LP64__
   1.106 +
   1.107 +// 64-bit implementation on 64-bit platform
   1.108 +
   1.109 +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
   1.110 +                                         Atomic64 old_value,
   1.111 +                                         Atomic64 new_value) {
   1.112 +  Atomic64 prev_value;
   1.113 +  do {
   1.114 +    if (OSAtomicCompareAndSwap64(old_value, new_value,
   1.115 +                                 const_cast<Atomic64*>(ptr))) {
   1.116 +      return old_value;
   1.117 +    }
   1.118 +    prev_value = *ptr;
   1.119 +  } while (prev_value == old_value);
   1.120 +  return prev_value;
   1.121 +}
   1.122 +
   1.123 +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
   1.124 +                                         Atomic64 new_value) {
   1.125 +  Atomic64 old_value;
   1.126 +  do {
   1.127 +    old_value = *ptr;
   1.128 +  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
   1.129 +                                     const_cast<Atomic64*>(ptr)));
   1.130 +  return old_value;
   1.131 +}
   1.132 +
   1.133 +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
   1.134 +                                          Atomic64 increment) {
   1.135 +  return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
   1.136 +}
   1.137 +
   1.138 +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
   1.139 +                                        Atomic64 increment) {
   1.140 +  return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
   1.141 +}
   1.142 +
   1.143 +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
   1.144 +                                       Atomic64 old_value,
   1.145 +                                       Atomic64 new_value) {
   1.146 +  Atomic64 prev_value;
   1.147 +  do {
   1.148 +    if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
   1.149 +                                        const_cast<Atomic64*>(ptr))) {
   1.150 +      return old_value;
   1.151 +    }
   1.152 +    prev_value = *ptr;
   1.153 +  } while (prev_value == old_value);
   1.154 +  return prev_value;
   1.155 +}
   1.156 +
   1.157 +inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
   1.158 +                                       Atomic64 old_value,
   1.159 +                                       Atomic64 new_value) {
   1.160 +  // The lib kern interface does not distinguish between
   1.161 +  // Acquire and Release memory barriers; they are equivalent.
   1.162 +  return Acquire_CompareAndSwap(ptr, old_value, new_value);
   1.163 +}
   1.164 +
   1.165 +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
   1.166 +  *ptr = value;
   1.167 +}
   1.168 +
   1.169 +inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
   1.170 +  *ptr = value;
   1.171 +  MemoryBarrier();
   1.172 +}
   1.173 +
   1.174 +inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
   1.175 +  MemoryBarrier();
   1.176 +  *ptr = value;
   1.177 +}
   1.178 +
   1.179 +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
   1.180 +  return *ptr;
   1.181 +}
   1.182 +
   1.183 +inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
   1.184 +  Atomic64 value = *ptr;
   1.185 +  MemoryBarrier();
   1.186 +  return value;
   1.187 +}
   1.188 +
   1.189 +inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
   1.190 +  MemoryBarrier();
   1.191 +  return *ptr;
   1.192 +}
   1.193 +
   1.194 +#endif  // defined(__LP64__)
   1.195 +
   1.196 +// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
   1.197 +// on the Mac, even when they are the same size.  We need to explicitly cast
   1.198 +// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
   1.199 +#ifdef __LP64__
   1.200 +#define AtomicWordCastType Atomic64
   1.201 +#else
   1.202 +#define AtomicWordCastType Atomic32
   1.203 +#endif
   1.204 +
   1.205 +inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
   1.206 +                                           AtomicWord old_value,
   1.207 +                                           AtomicWord new_value) {
   1.208 +  return NoBarrier_CompareAndSwap(
   1.209 +      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
   1.210 +      old_value, new_value);
   1.211 +}
   1.212 +
   1.213 +inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
   1.214 +                                           AtomicWord new_value) {
   1.215 +  return NoBarrier_AtomicExchange(
   1.216 +      reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
   1.217 +}
   1.218 +
   1.219 +inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
   1.220 +                                            AtomicWord increment) {
   1.221 +  return NoBarrier_AtomicIncrement(
   1.222 +      reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
   1.223 +}
   1.224 +
   1.225 +inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
   1.226 +                                          AtomicWord increment) {
   1.227 +  return Barrier_AtomicIncrement(
   1.228 +      reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
   1.229 +}
   1.230 +
   1.231 +inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
   1.232 +                                         AtomicWord old_value,
   1.233 +                                         AtomicWord new_value) {
   1.234 +  return base::subtle::Acquire_CompareAndSwap(
   1.235 +      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
   1.236 +      old_value, new_value);
   1.237 +}
   1.238 +
   1.239 +inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
   1.240 +                                         AtomicWord old_value,
   1.241 +                                         AtomicWord new_value) {
   1.242 +  return base::subtle::Release_CompareAndSwap(
   1.243 +      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
   1.244 +      old_value, new_value);
   1.245 +}
   1.246 +
   1.247 +inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
   1.248 +  NoBarrier_Store(
   1.249 +      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
   1.250 +}
   1.251 +
   1.252 +inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
   1.253 +  return base::subtle::Acquire_Store(
   1.254 +      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
   1.255 +}
   1.256 +
   1.257 +inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
   1.258 +  return base::subtle::Release_Store(
   1.259 +      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
   1.260 +}
   1.261 +
   1.262 +inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
   1.263 +  return NoBarrier_Load(
   1.264 +      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
   1.265 +}
   1.266 +
   1.267 +inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
   1.268 +  return base::subtle::Acquire_Load(
   1.269 +      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
   1.270 +}
   1.271 +
   1.272 +inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
   1.273 +  return base::subtle::Release_Load(
   1.274 +      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
   1.275 +}
   1.276 +
   1.277 +#undef AtomicWordCastType
   1.278 +
   1.279 +}   // namespace base::subtle
   1.280 +}   // namespace base
   1.281 +
   1.282 +#endif  // BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_

mercurial