security/sandbox/chromium/base/atomicops_internals_x86_msvc.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/security/sandbox/chromium/base/atomicops_internals_x86_msvc.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,194 @@
     1.4 +// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
     1.5 +// Use of this source code is governed by a BSD-style license that can be
     1.6 +// found in the LICENSE file.
     1.7 +
     1.8 +// This file is an internal atomic implementation, use base/atomicops.h instead.
     1.9 +
    1.10 +#ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
    1.11 +#define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
    1.12 +
    1.13 +#include <windows.h>
    1.14 +
    1.15 +#if defined(ARCH_CPU_64_BITS)
    1.16 +// windows.h #defines this (only on x64). This causes problems because the
    1.17 +// public API also uses MemoryBarrier at the public name for this fence. So, on
    1.18 +// X64, undef it, and call its documented
    1.19 +// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
    1.20 +// implementation directly.
    1.21 +#undef MemoryBarrier
    1.22 +#endif
    1.23 +
    1.24 +namespace base {
    1.25 +namespace subtle {
    1.26 +
    1.27 +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
    1.28 +                                         Atomic32 old_value,
    1.29 +                                         Atomic32 new_value) {
    1.30 +  LONG result = InterlockedCompareExchange(
    1.31 +      reinterpret_cast<volatile LONG*>(ptr),
    1.32 +      static_cast<LONG>(new_value),
    1.33 +      static_cast<LONG>(old_value));
    1.34 +  return static_cast<Atomic32>(result);
    1.35 +}
    1.36 +
    1.37 +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
    1.38 +                                         Atomic32 new_value) {
    1.39 +  LONG result = InterlockedExchange(
    1.40 +      reinterpret_cast<volatile LONG*>(ptr),
    1.41 +      static_cast<LONG>(new_value));
    1.42 +  return static_cast<Atomic32>(result);
    1.43 +}
    1.44 +
    1.45 +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
    1.46 +                                        Atomic32 increment) {
    1.47 +  return InterlockedExchangeAdd(
    1.48 +      reinterpret_cast<volatile LONG*>(ptr),
    1.49 +      static_cast<LONG>(increment)) + increment;
    1.50 +}
    1.51 +
    1.52 +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
    1.53 +                                          Atomic32 increment) {
    1.54 +  return Barrier_AtomicIncrement(ptr, increment);
    1.55 +}
    1.56 +
    1.57 +#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
    1.58 +#error "We require at least vs2005 for MemoryBarrier"
    1.59 +#endif
    1.60 +inline void MemoryBarrier() {
    1.61 +#if defined(ARCH_CPU_64_BITS)
    1.62 +  // See #undef and note at the top of this file.
    1.63 +  __faststorefence();
    1.64 +#else
    1.65 +  // We use MemoryBarrier from WinNT.h
    1.66 +  ::MemoryBarrier();
    1.67 +#endif
    1.68 +}
    1.69 +
    1.70 +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
    1.71 +                                       Atomic32 old_value,
    1.72 +                                       Atomic32 new_value) {
    1.73 +  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    1.74 +}
    1.75 +
    1.76 +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
    1.77 +                                       Atomic32 old_value,
    1.78 +                                       Atomic32 new_value) {
    1.79 +  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    1.80 +}
    1.81 +
    1.82 +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
    1.83 +  *ptr = value;
    1.84 +}
    1.85 +
    1.86 +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
    1.87 +  NoBarrier_AtomicExchange(ptr, value);
    1.88 +              // acts as a barrier in this implementation
    1.89 +}
    1.90 +
    1.91 +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
    1.92 +  *ptr = value; // works w/o barrier for current Intel chips as of June 2005
    1.93 +  // See comments in Atomic64 version of Release_Store() below.
    1.94 +}
    1.95 +
    1.96 +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
    1.97 +  return *ptr;
    1.98 +}
    1.99 +
   1.100 +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
   1.101 +  Atomic32 value = *ptr;
   1.102 +  return value;
   1.103 +}
   1.104 +
   1.105 +inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
   1.106 +  MemoryBarrier();
   1.107 +  return *ptr;
   1.108 +}
   1.109 +
   1.110 +#if defined(_WIN64)
   1.111 +
   1.112 +// 64-bit low-level operations on 64-bit platform.
   1.113 +
   1.114 +COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
   1.115 +
   1.116 +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
   1.117 +                                         Atomic64 old_value,
   1.118 +                                         Atomic64 new_value) {
   1.119 +  PVOID result = InterlockedCompareExchangePointer(
   1.120 +    reinterpret_cast<volatile PVOID*>(ptr),
   1.121 +    reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
   1.122 +  return reinterpret_cast<Atomic64>(result);
   1.123 +}
   1.124 +
   1.125 +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
   1.126 +                                         Atomic64 new_value) {
   1.127 +  PVOID result = InterlockedExchangePointer(
   1.128 +    reinterpret_cast<volatile PVOID*>(ptr),
   1.129 +    reinterpret_cast<PVOID>(new_value));
   1.130 +  return reinterpret_cast<Atomic64>(result);
   1.131 +}
   1.132 +
   1.133 +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
   1.134 +                                        Atomic64 increment) {
   1.135 +  return InterlockedExchangeAdd64(
   1.136 +      reinterpret_cast<volatile LONGLONG*>(ptr),
   1.137 +      static_cast<LONGLONG>(increment)) + increment;
   1.138 +}
   1.139 +
   1.140 +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
   1.141 +                                          Atomic64 increment) {
   1.142 +  return Barrier_AtomicIncrement(ptr, increment);
   1.143 +}
   1.144 +
   1.145 +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
   1.146 +  *ptr = value;
   1.147 +}
   1.148 +
   1.149 +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
   1.150 +  NoBarrier_AtomicExchange(ptr, value);
   1.151 +              // acts as a barrier in this implementation
   1.152 +}
   1.153 +
   1.154 +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
   1.155 +  *ptr = value; // works w/o barrier for current Intel chips as of June 2005
   1.156 +
   1.157 +  // When new chips come out, check:
   1.158 +  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
   1.159 +  //  System Programming Guide, Chatper 7: Multiple-processor management,
   1.160 +  //  Section 7.2, Memory Ordering.
   1.161 +  // Last seen at:
   1.162 +  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
   1.163 +}
   1.164 +
   1.165 +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
   1.166 +  return *ptr;
   1.167 +}
   1.168 +
   1.169 +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
   1.170 +  Atomic64 value = *ptr;
   1.171 +  return value;
   1.172 +}
   1.173 +
   1.174 +inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
   1.175 +  MemoryBarrier();
   1.176 +  return *ptr;
   1.177 +}
   1.178 +
   1.179 +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
   1.180 +                                       Atomic64 old_value,
   1.181 +                                       Atomic64 new_value) {
   1.182 +  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   1.183 +}
   1.184 +
   1.185 +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
   1.186 +                                       Atomic64 old_value,
   1.187 +                                       Atomic64 new_value) {
   1.188 +  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   1.189 +}
   1.190 +
   1.191 +
   1.192 +#endif  // defined(_WIN64)
   1.193 +
   1.194 +}  // namespace base::subtle
   1.195 +}  // namespace base
   1.196 +
   1.197 +#endif  // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_

mercurial