Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | /* |
michael@0 | 7 | ** PR Atomic operations |
michael@0 | 8 | */ |
michael@0 | 9 | |
michael@0 | 10 | |
michael@0 | 11 | #include "pratom.h" |
michael@0 | 12 | #include "primpl.h" |
michael@0 | 13 | |
michael@0 | 14 | #include <string.h> |
michael@0 | 15 | |
michael@0 | 16 | /* |
michael@0 | 17 | * The following is a fallback implementation that emulates |
michael@0 | 18 | * atomic operations for platforms without atomic operations. |
michael@0 | 19 | * If a platform has atomic operations, it should define the |
michael@0 | 20 | * macro _PR_HAVE_ATOMIC_OPS, and the following will not be |
michael@0 | 21 | * compiled in. |
michael@0 | 22 | */ |
michael@0 | 23 | |
michael@0 | 24 | #if !defined(_PR_HAVE_ATOMIC_OPS) |
michael@0 | 25 | |
michael@0 | 26 | #if defined(_PR_PTHREADS) && !defined(_PR_DCETHREADS) |
michael@0 | 27 | /* |
michael@0 | 28 | * PR_AtomicDecrement() is used in NSPR's thread-specific data |
michael@0 | 29 | * destructor. Because thread-specific data destructors may be |
michael@0 | 30 | * invoked after a PR_Cleanup() call, we need an implementation |
michael@0 | 31 | * of the atomic routines that doesn't need NSPR to be initialized. |
michael@0 | 32 | */ |
michael@0 | 33 | |
michael@0 | 34 | /* |
michael@0 | 35 | * We use a set of locks for all the emulated atomic operations. |
michael@0 | 36 | * By hashing on the address of the integer to be locked the |
michael@0 | 37 | * contention between multiple threads should be lessened. |
michael@0 | 38 | * |
michael@0 | 39 | * The number of atomic locks can be set by the environment variable |
michael@0 | 40 | * NSPR_ATOMIC_HASH_LOCKS |
michael@0 | 41 | */ |
michael@0 | 42 | |
michael@0 | 43 | /* |
michael@0 | 44 | * lock counts should be a power of 2 |
michael@0 | 45 | */ |
michael@0 | 46 | #define DEFAULT_ATOMIC_LOCKS 16 /* should be in sync with the number of initializers |
michael@0 | 47 | below */ |
michael@0 | 48 | #define MAX_ATOMIC_LOCKS (4 * 1024) |
michael@0 | 49 | |
michael@0 | 50 | static pthread_mutex_t static_atomic_locks[DEFAULT_ATOMIC_LOCKS] = { |
michael@0 | 51 | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
michael@0 | 52 | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
michael@0 | 53 | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
michael@0 | 54 | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
michael@0 | 55 | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
michael@0 | 56 | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
michael@0 | 57 | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, |
michael@0 | 58 | PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER }; |
michael@0 | 59 | |
michael@0 | 60 | #ifdef DEBUG |
michael@0 | 61 | static PRInt32 static_hash_lock_counts[DEFAULT_ATOMIC_LOCKS]; |
michael@0 | 62 | static PRInt32 *hash_lock_counts = static_hash_lock_counts; |
michael@0 | 63 | #endif |
michael@0 | 64 | |
michael@0 | 65 | static PRUint32 num_atomic_locks = DEFAULT_ATOMIC_LOCKS; |
michael@0 | 66 | static pthread_mutex_t *atomic_locks = static_atomic_locks; |
michael@0 | 67 | static PRUint32 atomic_hash_mask = DEFAULT_ATOMIC_LOCKS - 1; |
michael@0 | 68 | |
michael@0 | 69 | #define _PR_HASH_FOR_LOCK(ptr) \ |
michael@0 | 70 | ((PRUint32) (((PRUptrdiff) (ptr) >> 2) ^ \ |
michael@0 | 71 | ((PRUptrdiff) (ptr) >> 8)) & \ |
michael@0 | 72 | atomic_hash_mask) |
michael@0 | 73 | |
michael@0 | 74 | void _PR_MD_INIT_ATOMIC() |
michael@0 | 75 | { |
michael@0 | 76 | char *eval; |
michael@0 | 77 | int index; |
michael@0 | 78 | |
michael@0 | 79 | |
michael@0 | 80 | PR_ASSERT(PR_FloorLog2(MAX_ATOMIC_LOCKS) == |
michael@0 | 81 | PR_CeilingLog2(MAX_ATOMIC_LOCKS)); |
michael@0 | 82 | |
michael@0 | 83 | PR_ASSERT(PR_FloorLog2(DEFAULT_ATOMIC_LOCKS) == |
michael@0 | 84 | PR_CeilingLog2(DEFAULT_ATOMIC_LOCKS)); |
michael@0 | 85 | |
michael@0 | 86 | if (((eval = getenv("NSPR_ATOMIC_HASH_LOCKS")) != NULL) && |
michael@0 | 87 | ((num_atomic_locks = atoi(eval)) != DEFAULT_ATOMIC_LOCKS)) { |
michael@0 | 88 | |
michael@0 | 89 | if (num_atomic_locks > MAX_ATOMIC_LOCKS) |
michael@0 | 90 | num_atomic_locks = MAX_ATOMIC_LOCKS; |
michael@0 | 91 | else if (num_atomic_locks < 1) |
michael@0 | 92 | num_atomic_locks = 1; |
michael@0 | 93 | else { |
michael@0 | 94 | num_atomic_locks = PR_FloorLog2(num_atomic_locks); |
michael@0 | 95 | num_atomic_locks = 1L << num_atomic_locks; |
michael@0 | 96 | } |
michael@0 | 97 | atomic_locks = (pthread_mutex_t *) PR_Malloc(sizeof(pthread_mutex_t) * |
michael@0 | 98 | num_atomic_locks); |
michael@0 | 99 | if (atomic_locks) { |
michael@0 | 100 | for (index = 0; index < num_atomic_locks; index++) { |
michael@0 | 101 | if (pthread_mutex_init(&atomic_locks[index], NULL)) { |
michael@0 | 102 | PR_DELETE(atomic_locks); |
michael@0 | 103 | atomic_locks = NULL; |
michael@0 | 104 | break; |
michael@0 | 105 | } |
michael@0 | 106 | } |
michael@0 | 107 | } |
michael@0 | 108 | #ifdef DEBUG |
michael@0 | 109 | if (atomic_locks) { |
michael@0 | 110 | hash_lock_counts = PR_CALLOC(num_atomic_locks * sizeof(PRInt32)); |
michael@0 | 111 | if (hash_lock_counts == NULL) { |
michael@0 | 112 | PR_DELETE(atomic_locks); |
michael@0 | 113 | atomic_locks = NULL; |
michael@0 | 114 | } |
michael@0 | 115 | } |
michael@0 | 116 | #endif |
michael@0 | 117 | if (atomic_locks == NULL) { |
michael@0 | 118 | /* |
michael@0 | 119 | * Use statically allocated locks |
michael@0 | 120 | */ |
michael@0 | 121 | atomic_locks = static_atomic_locks; |
michael@0 | 122 | num_atomic_locks = DEFAULT_ATOMIC_LOCKS; |
michael@0 | 123 | #ifdef DEBUG |
michael@0 | 124 | hash_lock_counts = static_hash_lock_counts; |
michael@0 | 125 | #endif |
michael@0 | 126 | } |
michael@0 | 127 | atomic_hash_mask = num_atomic_locks - 1; |
michael@0 | 128 | } |
michael@0 | 129 | PR_ASSERT(PR_FloorLog2(num_atomic_locks) == |
michael@0 | 130 | PR_CeilingLog2(num_atomic_locks)); |
michael@0 | 131 | } |
michael@0 | 132 | |
michael@0 | 133 | PRInt32 |
michael@0 | 134 | _PR_MD_ATOMIC_INCREMENT(PRInt32 *val) |
michael@0 | 135 | { |
michael@0 | 136 | PRInt32 rv; |
michael@0 | 137 | PRInt32 idx = _PR_HASH_FOR_LOCK(val); |
michael@0 | 138 | |
michael@0 | 139 | pthread_mutex_lock(&atomic_locks[idx]); |
michael@0 | 140 | rv = ++(*val); |
michael@0 | 141 | #ifdef DEBUG |
michael@0 | 142 | hash_lock_counts[idx]++; |
michael@0 | 143 | #endif |
michael@0 | 144 | pthread_mutex_unlock(&atomic_locks[idx]); |
michael@0 | 145 | return rv; |
michael@0 | 146 | } |
michael@0 | 147 | |
michael@0 | 148 | PRInt32 |
michael@0 | 149 | _PR_MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val) |
michael@0 | 150 | { |
michael@0 | 151 | PRInt32 rv; |
michael@0 | 152 | PRInt32 idx = _PR_HASH_FOR_LOCK(ptr); |
michael@0 | 153 | |
michael@0 | 154 | pthread_mutex_lock(&atomic_locks[idx]); |
michael@0 | 155 | rv = ((*ptr) += val); |
michael@0 | 156 | #ifdef DEBUG |
michael@0 | 157 | hash_lock_counts[idx]++; |
michael@0 | 158 | #endif |
michael@0 | 159 | pthread_mutex_unlock(&atomic_locks[idx]); |
michael@0 | 160 | return rv; |
michael@0 | 161 | } |
michael@0 | 162 | |
michael@0 | 163 | PRInt32 |
michael@0 | 164 | _PR_MD_ATOMIC_DECREMENT(PRInt32 *val) |
michael@0 | 165 | { |
michael@0 | 166 | PRInt32 rv; |
michael@0 | 167 | PRInt32 idx = _PR_HASH_FOR_LOCK(val); |
michael@0 | 168 | |
michael@0 | 169 | pthread_mutex_lock(&atomic_locks[idx]); |
michael@0 | 170 | rv = --(*val); |
michael@0 | 171 | #ifdef DEBUG |
michael@0 | 172 | hash_lock_counts[idx]++; |
michael@0 | 173 | #endif |
michael@0 | 174 | pthread_mutex_unlock(&atomic_locks[idx]); |
michael@0 | 175 | return rv; |
michael@0 | 176 | } |
michael@0 | 177 | |
michael@0 | 178 | PRInt32 |
michael@0 | 179 | _PR_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval) |
michael@0 | 180 | { |
michael@0 | 181 | PRInt32 rv; |
michael@0 | 182 | PRInt32 idx = _PR_HASH_FOR_LOCK(val); |
michael@0 | 183 | |
michael@0 | 184 | pthread_mutex_lock(&atomic_locks[idx]); |
michael@0 | 185 | rv = *val; |
michael@0 | 186 | *val = newval; |
michael@0 | 187 | #ifdef DEBUG |
michael@0 | 188 | hash_lock_counts[idx]++; |
michael@0 | 189 | #endif |
michael@0 | 190 | pthread_mutex_unlock(&atomic_locks[idx]); |
michael@0 | 191 | return rv; |
michael@0 | 192 | } |
michael@0 | 193 | #else /* _PR_PTHREADS && !_PR_DCETHREADS */ |
michael@0 | 194 | /* |
michael@0 | 195 | * We use a single lock for all the emulated atomic operations. |
michael@0 | 196 | * The lock contention should be acceptable. |
michael@0 | 197 | */ |
michael@0 | 198 | static PRLock *atomic_lock = NULL; |
michael@0 | 199 | void _PR_MD_INIT_ATOMIC(void) |
michael@0 | 200 | { |
michael@0 | 201 | if (atomic_lock == NULL) { |
michael@0 | 202 | atomic_lock = PR_NewLock(); |
michael@0 | 203 | } |
michael@0 | 204 | } |
michael@0 | 205 | |
michael@0 | 206 | PRInt32 |
michael@0 | 207 | _PR_MD_ATOMIC_INCREMENT(PRInt32 *val) |
michael@0 | 208 | { |
michael@0 | 209 | PRInt32 rv; |
michael@0 | 210 | |
michael@0 | 211 | if (!_pr_initialized) { |
michael@0 | 212 | _PR_ImplicitInitialization(); |
michael@0 | 213 | } |
michael@0 | 214 | PR_Lock(atomic_lock); |
michael@0 | 215 | rv = ++(*val); |
michael@0 | 216 | PR_Unlock(atomic_lock); |
michael@0 | 217 | return rv; |
michael@0 | 218 | } |
michael@0 | 219 | |
michael@0 | 220 | PRInt32 |
michael@0 | 221 | _PR_MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val) |
michael@0 | 222 | { |
michael@0 | 223 | PRInt32 rv; |
michael@0 | 224 | |
michael@0 | 225 | if (!_pr_initialized) { |
michael@0 | 226 | _PR_ImplicitInitialization(); |
michael@0 | 227 | } |
michael@0 | 228 | PR_Lock(atomic_lock); |
michael@0 | 229 | rv = ((*ptr) += val); |
michael@0 | 230 | PR_Unlock(atomic_lock); |
michael@0 | 231 | return rv; |
michael@0 | 232 | } |
michael@0 | 233 | |
michael@0 | 234 | PRInt32 |
michael@0 | 235 | _PR_MD_ATOMIC_DECREMENT(PRInt32 *val) |
michael@0 | 236 | { |
michael@0 | 237 | PRInt32 rv; |
michael@0 | 238 | |
michael@0 | 239 | if (!_pr_initialized) { |
michael@0 | 240 | _PR_ImplicitInitialization(); |
michael@0 | 241 | } |
michael@0 | 242 | PR_Lock(atomic_lock); |
michael@0 | 243 | rv = --(*val); |
michael@0 | 244 | PR_Unlock(atomic_lock); |
michael@0 | 245 | return rv; |
michael@0 | 246 | } |
michael@0 | 247 | |
michael@0 | 248 | PRInt32 |
michael@0 | 249 | _PR_MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval) |
michael@0 | 250 | { |
michael@0 | 251 | PRInt32 rv; |
michael@0 | 252 | |
michael@0 | 253 | if (!_pr_initialized) { |
michael@0 | 254 | _PR_ImplicitInitialization(); |
michael@0 | 255 | } |
michael@0 | 256 | PR_Lock(atomic_lock); |
michael@0 | 257 | rv = *val; |
michael@0 | 258 | *val = newval; |
michael@0 | 259 | PR_Unlock(atomic_lock); |
michael@0 | 260 | return rv; |
michael@0 | 261 | } |
michael@0 | 262 | #endif /* _PR_PTHREADS && !_PR_DCETHREADS */ |
michael@0 | 263 | |
michael@0 | 264 | #endif /* !_PR_HAVE_ATOMIC_OPS */ |
michael@0 | 265 | |
michael@0 | 266 | void _PR_InitAtomic(void) |
michael@0 | 267 | { |
michael@0 | 268 | _PR_MD_INIT_ATOMIC(); |
michael@0 | 269 | } |
michael@0 | 270 | |
michael@0 | 271 | PR_IMPLEMENT(PRInt32) |
michael@0 | 272 | PR_AtomicIncrement(PRInt32 *val) |
michael@0 | 273 | { |
michael@0 | 274 | return _PR_MD_ATOMIC_INCREMENT(val); |
michael@0 | 275 | } |
michael@0 | 276 | |
michael@0 | 277 | PR_IMPLEMENT(PRInt32) |
michael@0 | 278 | PR_AtomicDecrement(PRInt32 *val) |
michael@0 | 279 | { |
michael@0 | 280 | return _PR_MD_ATOMIC_DECREMENT(val); |
michael@0 | 281 | } |
michael@0 | 282 | |
michael@0 | 283 | PR_IMPLEMENT(PRInt32) |
michael@0 | 284 | PR_AtomicSet(PRInt32 *val, PRInt32 newval) |
michael@0 | 285 | { |
michael@0 | 286 | return _PR_MD_ATOMIC_SET(val, newval); |
michael@0 | 287 | } |
michael@0 | 288 | |
michael@0 | 289 | PR_IMPLEMENT(PRInt32) |
michael@0 | 290 | PR_AtomicAdd(PRInt32 *ptr, PRInt32 val) |
michael@0 | 291 | { |
michael@0 | 292 | return _PR_MD_ATOMIC_ADD(ptr, val); |
michael@0 | 293 | } |
michael@0 | 294 | /* |
michael@0 | 295 | * For platforms, which don't support the CAS (compare-and-swap) instruction |
michael@0 | 296 | * (or an equivalent), the stack operations are implemented by use of PRLock |
michael@0 | 297 | */ |
michael@0 | 298 | |
michael@0 | 299 | PR_IMPLEMENT(PRStack *) |
michael@0 | 300 | PR_CreateStack(const char *stack_name) |
michael@0 | 301 | { |
michael@0 | 302 | PRStack *stack; |
michael@0 | 303 | |
michael@0 | 304 | if (!_pr_initialized) { |
michael@0 | 305 | _PR_ImplicitInitialization(); |
michael@0 | 306 | } |
michael@0 | 307 | |
michael@0 | 308 | if ((stack = PR_NEW(PRStack)) == NULL) { |
michael@0 | 309 | return NULL; |
michael@0 | 310 | } |
michael@0 | 311 | if (stack_name) { |
michael@0 | 312 | stack->prstk_name = (char *) PR_Malloc(strlen(stack_name) + 1); |
michael@0 | 313 | if (stack->prstk_name == NULL) { |
michael@0 | 314 | PR_DELETE(stack); |
michael@0 | 315 | return NULL; |
michael@0 | 316 | } |
michael@0 | 317 | strcpy(stack->prstk_name, stack_name); |
michael@0 | 318 | } else |
michael@0 | 319 | stack->prstk_name = NULL; |
michael@0 | 320 | |
michael@0 | 321 | #ifndef _PR_HAVE_ATOMIC_CAS |
michael@0 | 322 | stack->prstk_lock = PR_NewLock(); |
michael@0 | 323 | if (stack->prstk_lock == NULL) { |
michael@0 | 324 | PR_Free(stack->prstk_name); |
michael@0 | 325 | PR_DELETE(stack); |
michael@0 | 326 | return NULL; |
michael@0 | 327 | } |
michael@0 | 328 | #endif /* !_PR_HAVE_ATOMIC_CAS */ |
michael@0 | 329 | |
michael@0 | 330 | stack->prstk_head.prstk_elem_next = NULL; |
michael@0 | 331 | |
michael@0 | 332 | return stack; |
michael@0 | 333 | } |
michael@0 | 334 | |
michael@0 | 335 | PR_IMPLEMENT(PRStatus) |
michael@0 | 336 | PR_DestroyStack(PRStack *stack) |
michael@0 | 337 | { |
michael@0 | 338 | if (stack->prstk_head.prstk_elem_next != NULL) { |
michael@0 | 339 | PR_SetError(PR_INVALID_STATE_ERROR, 0); |
michael@0 | 340 | return PR_FAILURE; |
michael@0 | 341 | } |
michael@0 | 342 | |
michael@0 | 343 | if (stack->prstk_name) |
michael@0 | 344 | PR_Free(stack->prstk_name); |
michael@0 | 345 | #ifndef _PR_HAVE_ATOMIC_CAS |
michael@0 | 346 | PR_DestroyLock(stack->prstk_lock); |
michael@0 | 347 | #endif /* !_PR_HAVE_ATOMIC_CAS */ |
michael@0 | 348 | PR_DELETE(stack); |
michael@0 | 349 | |
michael@0 | 350 | return PR_SUCCESS; |
michael@0 | 351 | } |
michael@0 | 352 | |
michael@0 | 353 | #ifndef _PR_HAVE_ATOMIC_CAS |
michael@0 | 354 | |
michael@0 | 355 | PR_IMPLEMENT(void) |
michael@0 | 356 | PR_StackPush(PRStack *stack, PRStackElem *stack_elem) |
michael@0 | 357 | { |
michael@0 | 358 | PR_Lock(stack->prstk_lock); |
michael@0 | 359 | stack_elem->prstk_elem_next = stack->prstk_head.prstk_elem_next; |
michael@0 | 360 | stack->prstk_head.prstk_elem_next = stack_elem; |
michael@0 | 361 | PR_Unlock(stack->prstk_lock); |
michael@0 | 362 | return; |
michael@0 | 363 | } |
michael@0 | 364 | |
michael@0 | 365 | PR_IMPLEMENT(PRStackElem *) |
michael@0 | 366 | PR_StackPop(PRStack *stack) |
michael@0 | 367 | { |
michael@0 | 368 | PRStackElem *element; |
michael@0 | 369 | |
michael@0 | 370 | PR_Lock(stack->prstk_lock); |
michael@0 | 371 | element = stack->prstk_head.prstk_elem_next; |
michael@0 | 372 | if (element != NULL) { |
michael@0 | 373 | stack->prstk_head.prstk_elem_next = element->prstk_elem_next; |
michael@0 | 374 | element->prstk_elem_next = NULL; /* debugging aid */ |
michael@0 | 375 | } |
michael@0 | 376 | PR_Unlock(stack->prstk_lock); |
michael@0 | 377 | return element; |
michael@0 | 378 | } |
michael@0 | 379 | #endif /* !_PR_HAVE_ATOMIC_CAS */ |