|
1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
|
2 // Use of this source code is governed by a BSD-style license that can be |
|
3 // found in the LICENSE file. |
|
4 |
|
5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
|
6 |
|
7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ |
|
8 #define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ |
|
9 |
|
10 #include <windows.h> |
|
11 |
|
12 #if defined(ARCH_CPU_64_BITS) |
|
13 // windows.h #defines this (only on x64). This causes problems because the |
|
14 // public API also uses MemoryBarrier at the public name for this fence. So, on |
|
15 // X64, undef it, and call its documented |
|
16 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx) |
|
17 // implementation directly. |
|
18 #undef MemoryBarrier |
|
19 #endif |
|
20 |
|
21 namespace base { |
|
22 namespace subtle { |
|
23 |
|
24 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
|
25 Atomic32 old_value, |
|
26 Atomic32 new_value) { |
|
27 LONG result = InterlockedCompareExchange( |
|
28 reinterpret_cast<volatile LONG*>(ptr), |
|
29 static_cast<LONG>(new_value), |
|
30 static_cast<LONG>(old_value)); |
|
31 return static_cast<Atomic32>(result); |
|
32 } |
|
33 |
|
34 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
|
35 Atomic32 new_value) { |
|
36 LONG result = InterlockedExchange( |
|
37 reinterpret_cast<volatile LONG*>(ptr), |
|
38 static_cast<LONG>(new_value)); |
|
39 return static_cast<Atomic32>(result); |
|
40 } |
|
41 |
|
42 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
|
43 Atomic32 increment) { |
|
44 return InterlockedExchangeAdd( |
|
45 reinterpret_cast<volatile LONG*>(ptr), |
|
46 static_cast<LONG>(increment)) + increment; |
|
47 } |
|
48 |
|
49 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
|
50 Atomic32 increment) { |
|
51 return Barrier_AtomicIncrement(ptr, increment); |
|
52 } |
|
53 |
|
54 #if !(defined(_MSC_VER) && _MSC_VER >= 1400) |
|
55 #error "We require at least vs2005 for MemoryBarrier" |
|
56 #endif |
|
57 inline void MemoryBarrier() { |
|
58 #if defined(ARCH_CPU_64_BITS) |
|
59 // See #undef and note at the top of this file. |
|
60 __faststorefence(); |
|
61 #else |
|
62 // We use MemoryBarrier from WinNT.h |
|
63 ::MemoryBarrier(); |
|
64 #endif |
|
65 } |
|
66 |
|
67 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
|
68 Atomic32 old_value, |
|
69 Atomic32 new_value) { |
|
70 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|
71 } |
|
72 |
|
73 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
|
74 Atomic32 old_value, |
|
75 Atomic32 new_value) { |
|
76 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|
77 } |
|
78 |
|
79 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
|
80 *ptr = value; |
|
81 } |
|
82 |
|
83 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
|
84 NoBarrier_AtomicExchange(ptr, value); |
|
85 // acts as a barrier in this implementation |
|
86 } |
|
87 |
|
88 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
|
89 *ptr = value; // works w/o barrier for current Intel chips as of June 2005 |
|
90 // See comments in Atomic64 version of Release_Store() below. |
|
91 } |
|
92 |
|
93 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
|
94 return *ptr; |
|
95 } |
|
96 |
|
97 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
|
98 Atomic32 value = *ptr; |
|
99 return value; |
|
100 } |
|
101 |
|
102 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
|
103 MemoryBarrier(); |
|
104 return *ptr; |
|
105 } |
|
106 |
|
107 #if defined(_WIN64) |
|
108 |
|
109 // 64-bit low-level operations on 64-bit platform. |
|
110 |
|
111 COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic); |
|
112 |
|
113 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
|
114 Atomic64 old_value, |
|
115 Atomic64 new_value) { |
|
116 PVOID result = InterlockedCompareExchangePointer( |
|
117 reinterpret_cast<volatile PVOID*>(ptr), |
|
118 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); |
|
119 return reinterpret_cast<Atomic64>(result); |
|
120 } |
|
121 |
|
122 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
|
123 Atomic64 new_value) { |
|
124 PVOID result = InterlockedExchangePointer( |
|
125 reinterpret_cast<volatile PVOID*>(ptr), |
|
126 reinterpret_cast<PVOID>(new_value)); |
|
127 return reinterpret_cast<Atomic64>(result); |
|
128 } |
|
129 |
|
130 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
|
131 Atomic64 increment) { |
|
132 return InterlockedExchangeAdd64( |
|
133 reinterpret_cast<volatile LONGLONG*>(ptr), |
|
134 static_cast<LONGLONG>(increment)) + increment; |
|
135 } |
|
136 |
|
137 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
|
138 Atomic64 increment) { |
|
139 return Barrier_AtomicIncrement(ptr, increment); |
|
140 } |
|
141 |
|
142 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
|
143 *ptr = value; |
|
144 } |
|
145 |
|
146 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
|
147 NoBarrier_AtomicExchange(ptr, value); |
|
148 // acts as a barrier in this implementation |
|
149 } |
|
150 |
|
151 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
|
152 *ptr = value; // works w/o barrier for current Intel chips as of June 2005 |
|
153 |
|
154 // When new chips come out, check: |
|
155 // IA-32 Intel Architecture Software Developer's Manual, Volume 3: |
|
156 // System Programming Guide, Chatper 7: Multiple-processor management, |
|
157 // Section 7.2, Memory Ordering. |
|
158 // Last seen at: |
|
159 // http://developer.intel.com/design/pentium4/manuals/index_new.htm |
|
160 } |
|
161 |
|
162 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
|
163 return *ptr; |
|
164 } |
|
165 |
|
166 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
|
167 Atomic64 value = *ptr; |
|
168 return value; |
|
169 } |
|
170 |
|
171 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
|
172 MemoryBarrier(); |
|
173 return *ptr; |
|
174 } |
|
175 |
|
176 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
|
177 Atomic64 old_value, |
|
178 Atomic64 new_value) { |
|
179 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|
180 } |
|
181 |
|
182 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
|
183 Atomic64 old_value, |
|
184 Atomic64 new_value) { |
|
185 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|
186 } |
|
187 |
|
188 |
|
189 #endif // defined(_WIN64) |
|
190 |
|
191 } // namespace base::subtle |
|
192 } // namespace base |
|
193 |
|
194 #endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ |