|
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
|
2 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
3 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
5 |
|
6 /* |
|
7 ** PR Atomic operations |
|
8 */ |
|
9 |
|
10 |
|
11 #include "pratom.h" |
|
12 #include "primpl.h" |
|
13 |
|
14 #include <string.h> |
|
15 |
|
16 /* |
|
17 * The following is a fallback implementation that emulates |
|
18 * atomic operations for platforms without atomic operations. |
|
19 * If a platform has atomic operations, it should define the |
|
20 * macro _PR_HAVE_ATOMIC_OPS, and the following will not be |
|
21 * compiled in. |
|
22 */ |
|
23 |
|
24 #if !defined(_PR_HAVE_ATOMIC_OPS) |
|
25 |
|
26 #if defined(_PR_PTHREADS) && !defined(_PR_DCETHREADS) |
|
27 /* |
|
28 * PR_AtomicDecrement() is used in NSPR's thread-specific data |
|
29 * destructor. Because thread-specific data destructors may be |
|
30 * invoked after a PR_Cleanup() call, we need an implementation |
|
31 * of the atomic routines that doesn't need NSPR to be initialized. |
|
32 */ |
|
33 |
|
34 /* |
|
35 * We use a set of locks for all the emulated atomic operations. |
|
36 * By hashing on the address of the integer to be locked the |
|
37 * contention between multiple threads should be lessened. |
|
38 * |
|
39 * The number of atomic locks can be set by the environment variable |
|
40 * NSPR_ATOMIC_HASH_LOCKS |
|
41 */ |
|
42 |
|
43 /* |
|
44 * lock counts should be a power of 2 |
|
45 */ |
|
46 #define DEFAULT_ATOMIC_LOCKS 16 /* should be in sync with the number of initializers |
|
47 below */ |
|
48 #define MAX_ATOMIC_LOCKS (4 * 1024) |
|
49 |
|
50 static pthread_mutex_t static_atomic_locks[DEFAULT_ATOMIC_LOCKS] = { |
|
51 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
|
52 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
|
53 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
|
54 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
|
55 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
|
56 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
|
57 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
|
58 PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER }; |
|
59 |
|
60 #ifdef DEBUG |
|
61 static PRInt32 static_hash_lock_counts[DEFAULT_ATOMIC_LOCKS]; |
|
62 static PRInt32 *hash_lock_counts = static_hash_lock_counts; |
|
63 #endif |
|
64 |
|
65 static PRUint32 num_atomic_locks = DEFAULT_ATOMIC_LOCKS; |
|
66 static pthread_mutex_t *atomic_locks = static_atomic_locks; |
|
67 static PRUint32 atomic_hash_mask = DEFAULT_ATOMIC_LOCKS - 1; |
|
68 |
|
69 #define _PR_HASH_FOR_LOCK(ptr) \ |
|
70 ((PRUint32) (((PRUptrdiff) (ptr) >> 2) ^ \ |
|
71 ((PRUptrdiff) (ptr) >> 8)) & \ |
|
72 atomic_hash_mask) |
|
73 |
|
74 void _PR_MD_INIT_ATOMIC() |
|
75 { |
|
76 char *eval; |
|
77 int index; |
|
78 |
|
79 |
|
80 PR_ASSERT(PR_FloorLog2(MAX_ATOMIC_LOCKS) == |
|
81 PR_CeilingLog2(MAX_ATOMIC_LOCKS)); |
|
82 |
|
83 PR_ASSERT(PR_FloorLog2(DEFAULT_ATOMIC_LOCKS) == |
|
84 PR_CeilingLog2(DEFAULT_ATOMIC_LOCKS)); |
|
85 |
|
86 if (((eval = getenv("NSPR_ATOMIC_HASH_LOCKS")) != NULL) && |
|
87 ((num_atomic_locks = atoi(eval)) != DEFAULT_ATOMIC_LOCKS)) { |
|
88 |
|
89 if (num_atomic_locks > MAX_ATOMIC_LOCKS) |
|
90 num_atomic_locks = MAX_ATOMIC_LOCKS; |
|
91 else if (num_atomic_locks < 1) |
|
92 num_atomic_locks = 1; |
|
93 else { |
|
94 num_atomic_locks = PR_FloorLog2(num_atomic_locks); |
|
95 num_atomic_locks = 1L << num_atomic_locks; |
|
96 } |
|
97 atomic_locks = (pthread_mutex_t *) PR_Malloc(sizeof(pthread_mutex_t) * |
|
98 num_atomic_locks); |
|
99 if (atomic_locks) { |
|
100 for (index = 0; index < num_atomic_locks; index++) { |
|
101 if (pthread_mutex_init(&atomic_locks[index], NULL)) { |
|
102 PR_DELETE(atomic_locks); |
|
103 atomic_locks = NULL; |
|
104 break; |
|
105 } |
|
106 } |
|
107 } |
|
108 #ifdef DEBUG |
|
109 if (atomic_locks) { |
|
110 hash_lock_counts = PR_CALLOC(num_atomic_locks * sizeof(PRInt32)); |
|
111 if (hash_lock_counts == NULL) { |
|
112 PR_DELETE(atomic_locks); |
|
113 atomic_locks = NULL; |
|
114 } |
|
115 } |
|
116 #endif |
|
117 if (atomic_locks == NULL) { |
|
118 /* |
|
119 * Use statically allocated locks |
|
120 */ |
|
121 atomic_locks = static_atomic_locks; |
|
122 num_atomic_locks = DEFAULT_ATOMIC_LOCKS; |
|
123 #ifdef DEBUG |
|
124 hash_lock_counts = static_hash_lock_counts; |
|
125 #endif |
|
126 } |
|
127 atomic_hash_mask = num_atomic_locks - 1; |
|
128 } |
|
129 PR_ASSERT(PR_FloorLog2(num_atomic_locks) == |
|
130 PR_CeilingLog2(num_atomic_locks)); |
|
131 } |
|
132 |
|
133 PRInt32 |
|
134 _PR_MD_ATOMIC_INCREMENT(PRInt32 *val) |
|
135 { |
|
136 PRInt32 rv; |
|
137 PRInt32 idx = _PR_HASH_FOR_LOCK(val); |
|
138 |
|
139 pthread_mutex_lock(&atomic_locks[idx]); |
|
140 rv = ++(*val); |
|
141 #ifdef DEBUG |
|
142 hash_lock_counts[idx]++; |
|
143 #endif |
|
144 pthread_mutex_unlock(&atomic_locks[idx]); |
|
145 return rv; |
|
146 } |
|
147 |
|
148 PRInt32 |
|
149 _PR_MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val) |
|
150 { |
|
151 PRInt32 rv; |
|
152 PRInt32 idx = _PR_HASH_FOR_LOCK(ptr); |
|
153 |
|
154 pthread_mutex_lock(&atomic_locks[idx]); |
|
155 rv = ((*ptr) += val); |
|
156 #ifdef DEBUG |
|
157 hash_lock_counts[idx]++; |
|
158 #endif |
|
159 pthread_mutex_unlock(&atomic_locks[idx]); |
|
160 return rv; |
|
161 } |
|
162 |
|
163 PRInt32 |
|
164 _PR_MD_ATOMIC_DECREMENT(PRInt32 *val) |
|
165 { |
|
166 PRInt32 rv; |
|
167 PRInt32 idx = _PR_HASH_FOR_LOCK(val); |
|
168 |
|
169 pthread_mutex_lock(&atomic_locks[idx]); |
|
170 rv = --(*val); |
|
171 #ifdef DEBUG |
|
172 hash_lock_counts[idx]++; |
|
173 #endif |
|
174 pthread_mutex_unlock(&atomic_locks[idx]); |
|
175 return rv; |
|
176 } |
|
177 |
|
178 PRInt32 |
|
179 _PR_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval) |
|
180 { |
|
181 PRInt32 rv; |
|
182 PRInt32 idx = _PR_HASH_FOR_LOCK(val); |
|
183 |
|
184 pthread_mutex_lock(&atomic_locks[idx]); |
|
185 rv = *val; |
|
186 *val = newval; |
|
187 #ifdef DEBUG |
|
188 hash_lock_counts[idx]++; |
|
189 #endif |
|
190 pthread_mutex_unlock(&atomic_locks[idx]); |
|
191 return rv; |
|
192 } |
|
193 #else /* _PR_PTHREADS && !_PR_DCETHREADS */ |
|
194 /* |
|
195 * We use a single lock for all the emulated atomic operations. |
|
196 * The lock contention should be acceptable. |
|
197 */ |
|
198 static PRLock *atomic_lock = NULL; |
|
199 void _PR_MD_INIT_ATOMIC(void) |
|
200 { |
|
201 if (atomic_lock == NULL) { |
|
202 atomic_lock = PR_NewLock(); |
|
203 } |
|
204 } |
|
205 |
|
206 PRInt32 |
|
207 _PR_MD_ATOMIC_INCREMENT(PRInt32 *val) |
|
208 { |
|
209 PRInt32 rv; |
|
210 |
|
211 if (!_pr_initialized) { |
|
212 _PR_ImplicitInitialization(); |
|
213 } |
|
214 PR_Lock(atomic_lock); |
|
215 rv = ++(*val); |
|
216 PR_Unlock(atomic_lock); |
|
217 return rv; |
|
218 } |
|
219 |
|
220 PRInt32 |
|
221 _PR_MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val) |
|
222 { |
|
223 PRInt32 rv; |
|
224 |
|
225 if (!_pr_initialized) { |
|
226 _PR_ImplicitInitialization(); |
|
227 } |
|
228 PR_Lock(atomic_lock); |
|
229 rv = ((*ptr) += val); |
|
230 PR_Unlock(atomic_lock); |
|
231 return rv; |
|
232 } |
|
233 |
|
234 PRInt32 |
|
235 _PR_MD_ATOMIC_DECREMENT(PRInt32 *val) |
|
236 { |
|
237 PRInt32 rv; |
|
238 |
|
239 if (!_pr_initialized) { |
|
240 _PR_ImplicitInitialization(); |
|
241 } |
|
242 PR_Lock(atomic_lock); |
|
243 rv = --(*val); |
|
244 PR_Unlock(atomic_lock); |
|
245 return rv; |
|
246 } |
|
247 |
|
248 PRInt32 |
|
249 _PR_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval) |
|
250 { |
|
251 PRInt32 rv; |
|
252 |
|
253 if (!_pr_initialized) { |
|
254 _PR_ImplicitInitialization(); |
|
255 } |
|
256 PR_Lock(atomic_lock); |
|
257 rv = *val; |
|
258 *val = newval; |
|
259 PR_Unlock(atomic_lock); |
|
260 return rv; |
|
261 } |
|
262 #endif /* _PR_PTHREADS && !_PR_DCETHREADS */ |
|
263 |
|
264 #endif /* !_PR_HAVE_ATOMIC_OPS */ |
|
265 |
|
266 void _PR_InitAtomic(void) |
|
267 { |
|
268 _PR_MD_INIT_ATOMIC(); |
|
269 } |
|
270 |
|
271 PR_IMPLEMENT(PRInt32) |
|
272 PR_AtomicIncrement(PRInt32 *val) |
|
273 { |
|
274 return _PR_MD_ATOMIC_INCREMENT(val); |
|
275 } |
|
276 |
|
277 PR_IMPLEMENT(PRInt32) |
|
278 PR_AtomicDecrement(PRInt32 *val) |
|
279 { |
|
280 return _PR_MD_ATOMIC_DECREMENT(val); |
|
281 } |
|
282 |
|
283 PR_IMPLEMENT(PRInt32) |
|
284 PR_AtomicSet(PRInt32 *val, PRInt32 newval) |
|
285 { |
|
286 return _PR_MD_ATOMIC_SET(val, newval); |
|
287 } |
|
288 |
|
289 PR_IMPLEMENT(PRInt32) |
|
290 PR_AtomicAdd(PRInt32 *ptr, PRInt32 val) |
|
291 { |
|
292 return _PR_MD_ATOMIC_ADD(ptr, val); |
|
293 } |
|
294 /* |
|
295 * For platforms, which don't support the CAS (compare-and-swap) instruction |
|
296 * (or an equivalent), the stack operations are implemented by use of PRLock |
|
297 */ |
|
298 |
|
299 PR_IMPLEMENT(PRStack *) |
|
300 PR_CreateStack(const char *stack_name) |
|
301 { |
|
302 PRStack *stack; |
|
303 |
|
304 if (!_pr_initialized) { |
|
305 _PR_ImplicitInitialization(); |
|
306 } |
|
307 |
|
308 if ((stack = PR_NEW(PRStack)) == NULL) { |
|
309 return NULL; |
|
310 } |
|
311 if (stack_name) { |
|
312 stack->prstk_name = (char *) PR_Malloc(strlen(stack_name) + 1); |
|
313 if (stack->prstk_name == NULL) { |
|
314 PR_DELETE(stack); |
|
315 return NULL; |
|
316 } |
|
317 strcpy(stack->prstk_name, stack_name); |
|
318 } else |
|
319 stack->prstk_name = NULL; |
|
320 |
|
321 #ifndef _PR_HAVE_ATOMIC_CAS |
|
322 stack->prstk_lock = PR_NewLock(); |
|
323 if (stack->prstk_lock == NULL) { |
|
324 PR_Free(stack->prstk_name); |
|
325 PR_DELETE(stack); |
|
326 return NULL; |
|
327 } |
|
328 #endif /* !_PR_HAVE_ATOMIC_CAS */ |
|
329 |
|
330 stack->prstk_head.prstk_elem_next = NULL; |
|
331 |
|
332 return stack; |
|
333 } |
|
334 |
|
335 PR_IMPLEMENT(PRStatus) |
|
336 PR_DestroyStack(PRStack *stack) |
|
337 { |
|
338 if (stack->prstk_head.prstk_elem_next != NULL) { |
|
339 PR_SetError(PR_INVALID_STATE_ERROR, 0); |
|
340 return PR_FAILURE; |
|
341 } |
|
342 |
|
343 if (stack->prstk_name) |
|
344 PR_Free(stack->prstk_name); |
|
345 #ifndef _PR_HAVE_ATOMIC_CAS |
|
346 PR_DestroyLock(stack->prstk_lock); |
|
347 #endif /* !_PR_HAVE_ATOMIC_CAS */ |
|
348 PR_DELETE(stack); |
|
349 |
|
350 return PR_SUCCESS; |
|
351 } |
|
352 |
|
353 #ifndef _PR_HAVE_ATOMIC_CAS |
|
354 |
|
355 PR_IMPLEMENT(void) |
|
356 PR_StackPush(PRStack *stack, PRStackElem *stack_elem) |
|
357 { |
|
358 PR_Lock(stack->prstk_lock); |
|
359 stack_elem->prstk_elem_next = stack->prstk_head.prstk_elem_next; |
|
360 stack->prstk_head.prstk_elem_next = stack_elem; |
|
361 PR_Unlock(stack->prstk_lock); |
|
362 return; |
|
363 } |
|
364 |
|
365 PR_IMPLEMENT(PRStackElem *) |
|
366 PR_StackPop(PRStack *stack) |
|
367 { |
|
368 PRStackElem *element; |
|
369 |
|
370 PR_Lock(stack->prstk_lock); |
|
371 element = stack->prstk_head.prstk_elem_next; |
|
372 if (element != NULL) { |
|
373 stack->prstk_head.prstk_elem_next = element->prstk_elem_next; |
|
374 element->prstk_elem_next = NULL; /* debugging aid */ |
|
375 } |
|
376 PR_Unlock(stack->prstk_lock); |
|
377 return element; |
|
378 } |
|
379 #endif /* !_PR_HAVE_ATOMIC_CAS */ |