|
1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
|
2 // Use of this source code is governed by a BSD-style license that can be |
|
3 // found in the LICENSE file. |
|
4 |
|
5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
|
6 |
|
7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ |
|
8 #define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ |
|
9 |
|
10 #include <windows.h> |
|
11 |
|
12 namespace base { |
|
13 namespace subtle { |
|
14 |
|
15 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
|
16 Atomic32 old_value, |
|
17 Atomic32 new_value) { |
|
18 LONG result = InterlockedCompareExchange( |
|
19 reinterpret_cast<volatile LONG*>(ptr), |
|
20 static_cast<LONG>(new_value), |
|
21 static_cast<LONG>(old_value)); |
|
22 return static_cast<Atomic32>(result); |
|
23 } |
|
24 |
|
25 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
|
26 Atomic32 new_value) { |
|
27 LONG result = InterlockedExchange( |
|
28 reinterpret_cast<volatile LONG*>(ptr), |
|
29 static_cast<LONG>(new_value)); |
|
30 return static_cast<Atomic32>(result); |
|
31 } |
|
32 |
|
33 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
|
34 Atomic32 increment) { |
|
35 return InterlockedExchangeAdd( |
|
36 reinterpret_cast<volatile LONG*>(ptr), |
|
37 static_cast<LONG>(increment)) + increment; |
|
38 } |
|
39 |
|
40 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
|
41 Atomic32 increment) { |
|
42 return Barrier_AtomicIncrement(ptr, increment); |
|
43 } |
|
44 |
|
45 #if defined(_MSC_VER) && (_MSC_VER < 1400) |
|
46 #error "We require at least vs2005 for MemoryBarrier" |
|
47 #endif |
|
48 inline void MemoryBarrier() { |
|
49 // We use MemoryBarrier from WinNT.h |
|
50 ::MemoryBarrier(); |
|
51 } |
|
52 |
|
53 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
|
54 Atomic32 old_value, |
|
55 Atomic32 new_value) { |
|
56 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|
57 } |
|
58 |
|
59 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
|
60 Atomic32 old_value, |
|
61 Atomic32 new_value) { |
|
62 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|
63 } |
|
64 |
|
65 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
|
66 *ptr = value; |
|
67 } |
|
68 |
|
69 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
|
70 NoBarrier_AtomicExchange(ptr, value); |
|
71 // acts as a barrier in this implementation |
|
72 } |
|
73 |
|
74 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
|
75 *ptr = value; // works w/o barrier for current Intel chips as of June 2005 |
|
76 // See comments in Atomic64 version of Release_Store() below. |
|
77 } |
|
78 |
|
79 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
|
80 return *ptr; |
|
81 } |
|
82 |
|
83 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
|
84 Atomic32 value = *ptr; |
|
85 return value; |
|
86 } |
|
87 |
|
88 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
|
89 MemoryBarrier(); |
|
90 return *ptr; |
|
91 } |
|
92 |
|
93 #if defined(_WIN64) |
|
94 |
|
95 // 64-bit low-level operations on 64-bit platform. |
|
96 |
|
97 COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic); |
|
98 |
|
99 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
|
100 Atomic64 old_value, |
|
101 Atomic64 new_value) { |
|
102 PVOID result = InterlockedCompareExchangePointer( |
|
103 reinterpret_cast<volatile PVOID*>(ptr), |
|
104 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); |
|
105 return reinterpret_cast<Atomic64>(result); |
|
106 } |
|
107 |
|
108 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
|
109 Atomic64 new_value) { |
|
110 PVOID result = InterlockedExchangePointer( |
|
111 reinterpret_cast<volatile PVOID*>(ptr), |
|
112 reinterpret_cast<PVOID>(new_value)); |
|
113 return reinterpret_cast<Atomic64>(result); |
|
114 } |
|
115 |
|
116 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
|
117 Atomic64 increment) { |
|
118 return InterlockedExchangeAdd64( |
|
119 reinterpret_cast<volatile LONGLONG*>(ptr), |
|
120 static_cast<LONGLONG>(increment)) + increment; |
|
121 } |
|
122 |
|
123 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
|
124 Atomic64 increment) { |
|
125 return Barrier_AtomicIncrement(ptr, increment); |
|
126 } |
|
127 |
|
128 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
|
129 Atomic64 old_value, |
|
130 Atomic64 new_value) { |
|
131 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|
132 } |
|
133 |
|
134 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
|
135 Atomic64 old_value, |
|
136 Atomic64 new_value) { |
|
137 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|
138 } |
|
139 |
|
140 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
|
141 *ptr = value; |
|
142 } |
|
143 |
|
144 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
|
145 NoBarrier_AtomicExchange(ptr, value); |
|
146 // acts as a barrier in this implementation |
|
147 } |
|
148 |
|
149 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
|
150 *ptr = value; // works w/o barrier for current Intel chips as of June 2005 |
|
151 |
|
152 // When new chips come out, check: |
|
153 // IA-32 Intel Architecture Software Developer's Manual, Volume 3: |
|
154 // System Programming Guide, Chatper 7: Multiple-processor management, |
|
155 // Section 7.2, Memory Ordering. |
|
156 // Last seen at: |
|
157 // http://developer.intel.com/design/pentium4/manuals/index_new.htm |
|
158 } |
|
159 |
|
160 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
|
161 return *ptr; |
|
162 } |
|
163 |
|
164 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
|
165 Atomic64 value = *ptr; |
|
166 return value; |
|
167 } |
|
168 |
|
169 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
|
170 MemoryBarrier(); |
|
171 return *ptr; |
|
172 } |
|
173 |
|
174 #endif // defined(_WIN64) |
|
175 |
|
176 } // namespace base::subtle |
|
177 } // namespace base |
|
178 |
|
179 #endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ |