security/sandbox/chromium/base/atomicops_internals_x86_msvc.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

     1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
     2 // Use of this source code is governed by a BSD-style license that can be
     3 // found in the LICENSE file.
     5 // This file is an internal atomic implementation, use base/atomicops.h instead.
     7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
     8 #define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
    10 #include <windows.h>
    12 #if defined(ARCH_CPU_64_BITS)
    13 // windows.h #defines this (only on x64). This causes problems because the
    14 // public API also uses MemoryBarrier at the public name for this fence. So, on
    15 // X64, undef it, and call its documented
    16 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
    17 // implementation directly.
    18 #undef MemoryBarrier
    19 #endif
    21 namespace base {
    22 namespace subtle {
    24 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
    25                                          Atomic32 old_value,
    26                                          Atomic32 new_value) {
    27   LONG result = InterlockedCompareExchange(
    28       reinterpret_cast<volatile LONG*>(ptr),
    29       static_cast<LONG>(new_value),
    30       static_cast<LONG>(old_value));
    31   return static_cast<Atomic32>(result);
    32 }
    34 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
    35                                          Atomic32 new_value) {
    36   LONG result = InterlockedExchange(
    37       reinterpret_cast<volatile LONG*>(ptr),
    38       static_cast<LONG>(new_value));
    39   return static_cast<Atomic32>(result);
    40 }
    42 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
    43                                         Atomic32 increment) {
    44   return InterlockedExchangeAdd(
    45       reinterpret_cast<volatile LONG*>(ptr),
    46       static_cast<LONG>(increment)) + increment;
    47 }
    49 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
    50                                           Atomic32 increment) {
    51   return Barrier_AtomicIncrement(ptr, increment);
    52 }
    54 #if !(defined(_MSC_VER) && _MSC_VER >= 1400)
    55 #error "We require at least vs2005 for MemoryBarrier"
    56 #endif
    57 inline void MemoryBarrier() {
    58 #if defined(ARCH_CPU_64_BITS)
    59   // See #undef and note at the top of this file.
    60   __faststorefence();
    61 #else
    62   // We use MemoryBarrier from WinNT.h
    63   ::MemoryBarrier();
    64 #endif
    65 }
    67 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
    68                                        Atomic32 old_value,
    69                                        Atomic32 new_value) {
    70   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    71 }
    73 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
    74                                        Atomic32 old_value,
    75                                        Atomic32 new_value) {
    76   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    77 }
    79 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
    80   *ptr = value;
    81 }
    83 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
    84   NoBarrier_AtomicExchange(ptr, value);
    85               // acts as a barrier in this implementation
    86 }
    88 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
    89   *ptr = value; // works w/o barrier for current Intel chips as of June 2005
    90   // See comments in Atomic64 version of Release_Store() below.
    91 }
    93 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
    94   return *ptr;
    95 }
    97 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
    98   Atomic32 value = *ptr;
    99   return value;
   100 }
   102 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
   103   MemoryBarrier();
   104   return *ptr;
   105 }
   107 #if defined(_WIN64)
   109 // 64-bit low-level operations on 64-bit platform.
   111 COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
   113 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
   114                                          Atomic64 old_value,
   115                                          Atomic64 new_value) {
   116   PVOID result = InterlockedCompareExchangePointer(
   117     reinterpret_cast<volatile PVOID*>(ptr),
   118     reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
   119   return reinterpret_cast<Atomic64>(result);
   120 }
   122 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
   123                                          Atomic64 new_value) {
   124   PVOID result = InterlockedExchangePointer(
   125     reinterpret_cast<volatile PVOID*>(ptr),
   126     reinterpret_cast<PVOID>(new_value));
   127   return reinterpret_cast<Atomic64>(result);
   128 }
   130 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
   131                                         Atomic64 increment) {
   132   return InterlockedExchangeAdd64(
   133       reinterpret_cast<volatile LONGLONG*>(ptr),
   134       static_cast<LONGLONG>(increment)) + increment;
   135 }
   137 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
   138                                           Atomic64 increment) {
   139   return Barrier_AtomicIncrement(ptr, increment);
   140 }
   142 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
   143   *ptr = value;
   144 }
   146 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
   147   NoBarrier_AtomicExchange(ptr, value);
   148               // acts as a barrier in this implementation
   149 }
   151 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
   152   *ptr = value; // works w/o barrier for current Intel chips as of June 2005
   154   // When new chips come out, check:
   155   //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
   156   //  System Programming Guide, Chatper 7: Multiple-processor management,
   157   //  Section 7.2, Memory Ordering.
   158   // Last seen at:
   159   //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
   160 }
   162 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
   163   return *ptr;
   164 }
   166 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
   167   Atomic64 value = *ptr;
   168   return value;
   169 }
   171 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
   172   MemoryBarrier();
   173   return *ptr;
   174 }
   176 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
   177                                        Atomic64 old_value,
   178                                        Atomic64 new_value) {
   179   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   180 }
   182 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
   183                                        Atomic64 old_value,
   184                                        Atomic64 new_value) {
   185   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   186 }
   189 #endif  // defined(_WIN64)
   191 }  // namespace base::subtle
   192 }  // namespace base
   194 #endif  // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_

mercurial