ipc/chromium/src/base/atomicops_internals_x86_msvc.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/ipc/chromium/src/base/atomicops_internals_x86_msvc.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,179 @@
     1.4 +// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
     1.5 +// Use of this source code is governed by a BSD-style license that can be
     1.6 +// found in the LICENSE file.
     1.7 +
     1.8 +// This file is an internal atomic implementation, use base/atomicops.h instead.
     1.9 +
    1.10 +#ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
    1.11 +#define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
    1.12 +
    1.13 +#include <windows.h>
    1.14 +
    1.15 +namespace base {
    1.16 +namespace subtle {
    1.17 +
    1.18 +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
    1.19 +                                         Atomic32 old_value,
    1.20 +                                         Atomic32 new_value) {
    1.21 +  LONG result = InterlockedCompareExchange(
    1.22 +      reinterpret_cast<volatile LONG*>(ptr),
    1.23 +      static_cast<LONG>(new_value),
    1.24 +      static_cast<LONG>(old_value));
    1.25 +  return static_cast<Atomic32>(result);
    1.26 +}
    1.27 +
    1.28 +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
    1.29 +                                         Atomic32 new_value) {
    1.30 +  LONG result = InterlockedExchange(
    1.31 +      reinterpret_cast<volatile LONG*>(ptr),
    1.32 +      static_cast<LONG>(new_value));
    1.33 +  return static_cast<Atomic32>(result);
    1.34 +}
    1.35 +
    1.36 +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
    1.37 +                                        Atomic32 increment) {
    1.38 +  return InterlockedExchangeAdd(
    1.39 +      reinterpret_cast<volatile LONG*>(ptr),
    1.40 +      static_cast<LONG>(increment)) + increment;
    1.41 +}
    1.42 +
    1.43 +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
    1.44 +                                          Atomic32 increment) {
    1.45 +  return Barrier_AtomicIncrement(ptr, increment);
    1.46 +}
    1.47 +
    1.48 +#if defined(_MSC_VER) && (_MSC_VER < 1400)
    1.49 +#error "We require at least vs2005 for MemoryBarrier"
    1.50 +#endif
    1.51 +inline void MemoryBarrier() {
    1.52 +  // We use MemoryBarrier from WinNT.h
    1.53 +  ::MemoryBarrier();
    1.54 +}
    1.55 +
    1.56 +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
    1.57 +                                       Atomic32 old_value,
    1.58 +                                       Atomic32 new_value) {
    1.59 +  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    1.60 +}
    1.61 +
    1.62 +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
    1.63 +                                       Atomic32 old_value,
    1.64 +                                       Atomic32 new_value) {
    1.65 +  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    1.66 +}
    1.67 +
    1.68 +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
    1.69 +  *ptr = value;
    1.70 +}
    1.71 +
    1.72 +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
    1.73 +  NoBarrier_AtomicExchange(ptr, value);
    1.74 +              // acts as a barrier in this implementation
    1.75 +}
    1.76 +
    1.77 +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
    1.78 +  *ptr = value; // works w/o barrier for current Intel chips as of June 2005
    1.79 +  // See comments in Atomic64 version of Release_Store() below.
    1.80 +}
    1.81 +
    1.82 +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
    1.83 +  return *ptr;
    1.84 +}
    1.85 +
    1.86 +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
    1.87 +  Atomic32 value = *ptr;
    1.88 +  return value;
    1.89 +}
    1.90 +
    1.91 +inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
    1.92 +  MemoryBarrier();
    1.93 +  return *ptr;
    1.94 +}
    1.95 +
    1.96 +#if defined(_WIN64)
    1.97 +
    1.98 +// 64-bit low-level operations on 64-bit platform.
    1.99 +
   1.100 +COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
   1.101 +
   1.102 +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
   1.103 +                                         Atomic64 old_value,
   1.104 +                                         Atomic64 new_value) {
   1.105 +  PVOID result = InterlockedCompareExchangePointer(
   1.106 +    reinterpret_cast<volatile PVOID*>(ptr),
   1.107 +    reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
   1.108 +  return reinterpret_cast<Atomic64>(result);
   1.109 +}
   1.110 +
   1.111 +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
   1.112 +                                         Atomic64 new_value) {
   1.113 +  PVOID result = InterlockedExchangePointer(
   1.114 +    reinterpret_cast<volatile PVOID*>(ptr),
   1.115 +    reinterpret_cast<PVOID>(new_value));
   1.116 +  return reinterpret_cast<Atomic64>(result);
   1.117 +}
   1.118 +
   1.119 +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
   1.120 +                                        Atomic64 increment) {
   1.121 +  return InterlockedExchangeAdd64(
   1.122 +      reinterpret_cast<volatile LONGLONG*>(ptr),
   1.123 +      static_cast<LONGLONG>(increment)) + increment;
   1.124 +}
   1.125 +
   1.126 +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
   1.127 +                                          Atomic64 increment) {
   1.128 +  return Barrier_AtomicIncrement(ptr, increment);
   1.129 +}
   1.130 +
   1.131 +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
   1.132 +                                       Atomic64 old_value,
   1.133 +                                       Atomic64 new_value) {
   1.134 +  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   1.135 +}
   1.136 +
   1.137 +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
   1.138 +                                       Atomic64 old_value,
   1.139 +                                       Atomic64 new_value) {
   1.140 +  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   1.141 +}
   1.142 +
   1.143 +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
   1.144 +  *ptr = value;
   1.145 +}
   1.146 +
   1.147 +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
   1.148 +  NoBarrier_AtomicExchange(ptr, value);
   1.149 +              // acts as a barrier in this implementation
   1.150 +}
   1.151 +
   1.152 +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
   1.153 +  *ptr = value; // works w/o barrier for current Intel chips as of June 2005
   1.154 +
   1.155 +  // When new chips come out, check:
   1.156 +  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
   1.157 +  //  System Programming Guide, Chatper 7: Multiple-processor management,
   1.158 +  //  Section 7.2, Memory Ordering.
   1.159 +  // Last seen at:
   1.160 +  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
   1.161 +}
   1.162 +
   1.163 +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
   1.164 +  return *ptr;
   1.165 +}
   1.166 +
   1.167 +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
   1.168 +  Atomic64 value = *ptr;
   1.169 +  return value;
   1.170 +}
   1.171 +
   1.172 +inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
   1.173 +  MemoryBarrier();
   1.174 +  return *ptr;
   1.175 +}
   1.176 +
   1.177 +#endif  // defined(_WIN64)
   1.178 +
   1.179 +}  // namespace base::subtle
   1.180 +}  // namespace base
   1.181 +
   1.182 +#endif  // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_

mercurial