|
1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
|
2 // Use of this source code is governed by a BSD-style license that can be |
|
3 // found in the LICENSE file. |
|
4 |
|
5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
|
6 |
|
7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |
|
8 #define BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |
|
9 |
|
10 #include <libkern/OSAtomic.h> |
|
11 |
|
12 namespace base { |
|
13 namespace subtle { |
|
14 |
|
15 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, |
|
16 Atomic32 old_value, |
|
17 Atomic32 new_value) { |
|
18 Atomic32 prev_value; |
|
19 do { |
|
20 if (OSAtomicCompareAndSwap32(old_value, new_value, |
|
21 const_cast<Atomic32*>(ptr))) { |
|
22 return old_value; |
|
23 } |
|
24 prev_value = *ptr; |
|
25 } while (prev_value == old_value); |
|
26 return prev_value; |
|
27 } |
|
28 |
|
29 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, |
|
30 Atomic32 new_value) { |
|
31 Atomic32 old_value; |
|
32 do { |
|
33 old_value = *ptr; |
|
34 } while (!OSAtomicCompareAndSwap32(old_value, new_value, |
|
35 const_cast<Atomic32*>(ptr))); |
|
36 return old_value; |
|
37 } |
|
38 |
|
39 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, |
|
40 Atomic32 increment) { |
|
41 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); |
|
42 } |
|
43 |
|
44 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, |
|
45 Atomic32 increment) { |
|
46 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); |
|
47 } |
|
48 |
|
49 inline void MemoryBarrier() { |
|
50 OSMemoryBarrier(); |
|
51 } |
|
52 |
|
53 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, |
|
54 Atomic32 old_value, |
|
55 Atomic32 new_value) { |
|
56 Atomic32 prev_value; |
|
57 do { |
|
58 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, |
|
59 const_cast<Atomic32*>(ptr))) { |
|
60 return old_value; |
|
61 } |
|
62 prev_value = *ptr; |
|
63 } while (prev_value == old_value); |
|
64 return prev_value; |
|
65 } |
|
66 |
|
67 inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, |
|
68 Atomic32 old_value, |
|
69 Atomic32 new_value) { |
|
70 return Acquire_CompareAndSwap(ptr, old_value, new_value); |
|
71 } |
|
72 |
|
73 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
|
74 *ptr = value; |
|
75 } |
|
76 |
|
77 inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { |
|
78 *ptr = value; |
|
79 MemoryBarrier(); |
|
80 } |
|
81 |
|
82 inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { |
|
83 MemoryBarrier(); |
|
84 *ptr = value; |
|
85 } |
|
86 |
|
87 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
|
88 return *ptr; |
|
89 } |
|
90 |
|
91 inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { |
|
92 Atomic32 value = *ptr; |
|
93 MemoryBarrier(); |
|
94 return value; |
|
95 } |
|
96 |
|
97 inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { |
|
98 MemoryBarrier(); |
|
99 return *ptr; |
|
100 } |
|
101 |
|
102 #ifdef __LP64__ |
|
103 |
|
104 // 64-bit implementation on 64-bit platform |
|
105 |
|
106 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, |
|
107 Atomic64 old_value, |
|
108 Atomic64 new_value) { |
|
109 Atomic64 prev_value; |
|
110 do { |
|
111 if (OSAtomicCompareAndSwap64(old_value, new_value, |
|
112 const_cast<Atomic64*>(ptr))) { |
|
113 return old_value; |
|
114 } |
|
115 prev_value = *ptr; |
|
116 } while (prev_value == old_value); |
|
117 return prev_value; |
|
118 } |
|
119 |
|
120 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, |
|
121 Atomic64 new_value) { |
|
122 Atomic64 old_value; |
|
123 do { |
|
124 old_value = *ptr; |
|
125 } while (!OSAtomicCompareAndSwap64(old_value, new_value, |
|
126 const_cast<Atomic64*>(ptr))); |
|
127 return old_value; |
|
128 } |
|
129 |
|
130 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, |
|
131 Atomic64 increment) { |
|
132 return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr)); |
|
133 } |
|
134 |
|
135 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr, |
|
136 Atomic64 increment) { |
|
137 return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr)); |
|
138 } |
|
139 |
|
140 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, |
|
141 Atomic64 old_value, |
|
142 Atomic64 new_value) { |
|
143 Atomic64 prev_value; |
|
144 do { |
|
145 if (OSAtomicCompareAndSwap64Barrier(old_value, new_value, |
|
146 const_cast<Atomic64*>(ptr))) { |
|
147 return old_value; |
|
148 } |
|
149 prev_value = *ptr; |
|
150 } while (prev_value == old_value); |
|
151 return prev_value; |
|
152 } |
|
153 |
|
154 inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, |
|
155 Atomic64 old_value, |
|
156 Atomic64 new_value) { |
|
157 // The lib kern interface does not distinguish between |
|
158 // Acquire and Release memory barriers; they are equivalent. |
|
159 return Acquire_CompareAndSwap(ptr, old_value, new_value); |
|
160 } |
|
161 |
|
162 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
|
163 *ptr = value; |
|
164 } |
|
165 |
|
166 inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { |
|
167 *ptr = value; |
|
168 MemoryBarrier(); |
|
169 } |
|
170 |
|
171 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { |
|
172 MemoryBarrier(); |
|
173 *ptr = value; |
|
174 } |
|
175 |
|
176 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
|
177 return *ptr; |
|
178 } |
|
179 |
|
180 inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { |
|
181 Atomic64 value = *ptr; |
|
182 MemoryBarrier(); |
|
183 return value; |
|
184 } |
|
185 |
|
186 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { |
|
187 MemoryBarrier(); |
|
188 return *ptr; |
|
189 } |
|
190 |
|
191 #endif // defined(__LP64__) |
|
192 |
|
193 // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different |
|
194 // on the Mac, even when they are the same size. We need to explicitly cast |
|
195 // from AtomicWord to Atomic32/64 to implement the AtomicWord interface. |
|
196 #ifdef __LP64__ |
|
197 #define AtomicWordCastType Atomic64 |
|
198 #else |
|
199 #define AtomicWordCastType Atomic32 |
|
200 #endif |
|
201 |
|
202 inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, |
|
203 AtomicWord old_value, |
|
204 AtomicWord new_value) { |
|
205 return NoBarrier_CompareAndSwap( |
|
206 reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
|
207 old_value, new_value); |
|
208 } |
|
209 |
|
210 inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, |
|
211 AtomicWord new_value) { |
|
212 return NoBarrier_AtomicExchange( |
|
213 reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); |
|
214 } |
|
215 |
|
216 inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, |
|
217 AtomicWord increment) { |
|
218 return NoBarrier_AtomicIncrement( |
|
219 reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); |
|
220 } |
|
221 |
|
222 inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, |
|
223 AtomicWord increment) { |
|
224 return Barrier_AtomicIncrement( |
|
225 reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); |
|
226 } |
|
227 |
|
228 inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, |
|
229 AtomicWord old_value, |
|
230 AtomicWord new_value) { |
|
231 return base::subtle::Acquire_CompareAndSwap( |
|
232 reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
|
233 old_value, new_value); |
|
234 } |
|
235 |
|
236 inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, |
|
237 AtomicWord old_value, |
|
238 AtomicWord new_value) { |
|
239 return base::subtle::Release_CompareAndSwap( |
|
240 reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
|
241 old_value, new_value); |
|
242 } |
|
243 |
|
244 inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) { |
|
245 NoBarrier_Store( |
|
246 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
|
247 } |
|
248 |
|
249 inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { |
|
250 return base::subtle::Acquire_Store( |
|
251 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
|
252 } |
|
253 |
|
254 inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { |
|
255 return base::subtle::Release_Store( |
|
256 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
|
257 } |
|
258 |
|
259 inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) { |
|
260 return NoBarrier_Load( |
|
261 reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
|
262 } |
|
263 |
|
264 inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { |
|
265 return base::subtle::Acquire_Load( |
|
266 reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
|
267 } |
|
268 |
|
269 inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { |
|
270 return base::subtle::Release_Load( |
|
271 reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
|
272 } |
|
273 |
|
274 #undef AtomicWordCastType |
|
275 |
|
276 } // namespace base::subtle |
|
277 } // namespace base |
|
278 |
|
279 #endif // BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |