Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file is an internal atomic implementation, use base/atomicops.h instead.
7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
8 #define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
10 #include <windows.h>
12 namespace base {
13 namespace subtle {
15 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
16 Atomic32 old_value,
17 Atomic32 new_value) {
18 LONG result = InterlockedCompareExchange(
19 reinterpret_cast<volatile LONG*>(ptr),
20 static_cast<LONG>(new_value),
21 static_cast<LONG>(old_value));
22 return static_cast<Atomic32>(result);
23 }
25 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
26 Atomic32 new_value) {
27 LONG result = InterlockedExchange(
28 reinterpret_cast<volatile LONG*>(ptr),
29 static_cast<LONG>(new_value));
30 return static_cast<Atomic32>(result);
31 }
33 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
34 Atomic32 increment) {
35 return InterlockedExchangeAdd(
36 reinterpret_cast<volatile LONG*>(ptr),
37 static_cast<LONG>(increment)) + increment;
38 }
40 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
41 Atomic32 increment) {
42 return Barrier_AtomicIncrement(ptr, increment);
43 }
45 #if defined(_MSC_VER) && (_MSC_VER < 1400)
46 #error "We require at least vs2005 for MemoryBarrier"
47 #endif
48 inline void MemoryBarrier() {
49 // We use MemoryBarrier from WinNT.h
50 ::MemoryBarrier();
51 }
53 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
54 Atomic32 old_value,
55 Atomic32 new_value) {
56 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
57 }
59 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
60 Atomic32 old_value,
61 Atomic32 new_value) {
62 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
63 }
65 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
66 *ptr = value;
67 }
69 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
70 NoBarrier_AtomicExchange(ptr, value);
71 // acts as a barrier in this implementation
72 }
74 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
75 *ptr = value; // works w/o barrier for current Intel chips as of June 2005
76 // See comments in Atomic64 version of Release_Store() below.
77 }
79 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
80 return *ptr;
81 }
83 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
84 Atomic32 value = *ptr;
85 return value;
86 }
88 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
89 MemoryBarrier();
90 return *ptr;
91 }
93 #if defined(_WIN64)
95 // 64-bit low-level operations on 64-bit platform.
97 COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
99 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
100 Atomic64 old_value,
101 Atomic64 new_value) {
102 PVOID result = InterlockedCompareExchangePointer(
103 reinterpret_cast<volatile PVOID*>(ptr),
104 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
105 return reinterpret_cast<Atomic64>(result);
106 }
108 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
109 Atomic64 new_value) {
110 PVOID result = InterlockedExchangePointer(
111 reinterpret_cast<volatile PVOID*>(ptr),
112 reinterpret_cast<PVOID>(new_value));
113 return reinterpret_cast<Atomic64>(result);
114 }
116 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
117 Atomic64 increment) {
118 return InterlockedExchangeAdd64(
119 reinterpret_cast<volatile LONGLONG*>(ptr),
120 static_cast<LONGLONG>(increment)) + increment;
121 }
123 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
124 Atomic64 increment) {
125 return Barrier_AtomicIncrement(ptr, increment);
126 }
128 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
129 Atomic64 old_value,
130 Atomic64 new_value) {
131 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
132 }
134 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
135 Atomic64 old_value,
136 Atomic64 new_value) {
137 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
138 }
140 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
141 *ptr = value;
142 }
144 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
145 NoBarrier_AtomicExchange(ptr, value);
146 // acts as a barrier in this implementation
147 }
149 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
150 *ptr = value; // works w/o barrier for current Intel chips as of June 2005
152 // When new chips come out, check:
153 // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
154 // System Programming Guide, Chatper 7: Multiple-processor management,
155 // Section 7.2, Memory Ordering.
156 // Last seen at:
157 // http://developer.intel.com/design/pentium4/manuals/index_new.htm
158 }
160 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
161 return *ptr;
162 }
164 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
165 Atomic64 value = *ptr;
166 return value;
167 }
169 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
170 MemoryBarrier();
171 return *ptr;
172 }
174 #endif // defined(_WIN64)
176 } // namespace base::subtle
177 } // namespace base
179 #endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_