Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | #include "primpl.h" |
michael@0 | 7 | |
michael@0 | 8 | #include <string.h> |
michael@0 | 9 | |
michael@0 | 10 | #if defined(HPUX) && defined(_PR_PTHREADS) && !defined(_PR_DCETHREADS) |
michael@0 | 11 | |
michael@0 | 12 | #include <pthread.h> |
michael@0 | 13 | #define HAVE_UNIX98_RWLOCK |
michael@0 | 14 | #define RWLOCK_T pthread_rwlock_t |
michael@0 | 15 | #define RWLOCK_INIT(lock) pthread_rwlock_init(lock, NULL) |
michael@0 | 16 | #define RWLOCK_DESTROY(lock) pthread_rwlock_destroy(lock) |
michael@0 | 17 | #define RWLOCK_RDLOCK(lock) pthread_rwlock_rdlock(lock) |
michael@0 | 18 | #define RWLOCK_WRLOCK(lock) pthread_rwlock_wrlock(lock) |
michael@0 | 19 | #define RWLOCK_UNLOCK(lock) pthread_rwlock_unlock(lock) |
michael@0 | 20 | |
michael@0 | 21 | #elif defined(SOLARIS) && (defined(_PR_PTHREADS) \ |
michael@0 | 22 | || defined(_PR_GLOBAL_THREADS_ONLY)) |
michael@0 | 23 | |
michael@0 | 24 | #include <synch.h> |
michael@0 | 25 | #define HAVE_UI_RWLOCK |
michael@0 | 26 | #define RWLOCK_T rwlock_t |
michael@0 | 27 | #define RWLOCK_INIT(lock) rwlock_init(lock, USYNC_THREAD, NULL) |
michael@0 | 28 | #define RWLOCK_DESTROY(lock) rwlock_destroy(lock) |
michael@0 | 29 | #define RWLOCK_RDLOCK(lock) rw_rdlock(lock) |
michael@0 | 30 | #define RWLOCK_WRLOCK(lock) rw_wrlock(lock) |
michael@0 | 31 | #define RWLOCK_UNLOCK(lock) rw_unlock(lock) |
michael@0 | 32 | |
michael@0 | 33 | #endif |
michael@0 | 34 | |
michael@0 | 35 | /* |
michael@0 | 36 | * Reader-writer lock |
michael@0 | 37 | */ |
michael@0 | 38 | struct PRRWLock { |
michael@0 | 39 | char *rw_name; /* lock name */ |
michael@0 | 40 | PRUint32 rw_rank; /* rank of the lock */ |
michael@0 | 41 | |
michael@0 | 42 | #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) |
michael@0 | 43 | RWLOCK_T rw_lock; |
michael@0 | 44 | #else |
michael@0 | 45 | PRLock *rw_lock; |
michael@0 | 46 | PRInt32 rw_lock_cnt; /* == 0, if unlocked */ |
michael@0 | 47 | /* == -1, if write-locked */ |
michael@0 | 48 | /* > 0 , # of read locks */ |
michael@0 | 49 | PRUint32 rw_reader_cnt; /* number of waiting readers */ |
michael@0 | 50 | PRUint32 rw_writer_cnt; /* number of waiting writers */ |
michael@0 | 51 | PRCondVar *rw_reader_waitq; /* cvar for readers */ |
michael@0 | 52 | PRCondVar *rw_writer_waitq; /* cvar for writers */ |
michael@0 | 53 | #ifdef DEBUG |
michael@0 | 54 | PRThread *rw_owner; /* lock owner for write-lock */ |
michael@0 | 55 | #endif |
michael@0 | 56 | #endif |
michael@0 | 57 | }; |
michael@0 | 58 | |
michael@0 | 59 | #ifdef DEBUG |
michael@0 | 60 | #define _PR_RWLOCK_RANK_ORDER_DEBUG /* enable deadlock detection using |
michael@0 | 61 | rank-order for locks |
michael@0 | 62 | */ |
michael@0 | 63 | #endif |
michael@0 | 64 | |
michael@0 | 65 | #ifdef _PR_RWLOCK_RANK_ORDER_DEBUG |
michael@0 | 66 | |
michael@0 | 67 | static PRUintn pr_thread_rwlock_key; /* TPD key for lock stack */ |
michael@0 | 68 | static PRUintn pr_thread_rwlock_alloc_failed; |
michael@0 | 69 | |
michael@0 | 70 | #define _PR_RWLOCK_RANK_ORDER_LIMIT 10 |
michael@0 | 71 | |
michael@0 | 72 | typedef struct thread_rwlock_stack { |
michael@0 | 73 | PRInt32 trs_index; /* top of stack */ |
michael@0 | 74 | PRRWLock *trs_stack[_PR_RWLOCK_RANK_ORDER_LIMIT]; /* stack of lock |
michael@0 | 75 | pointers */ |
michael@0 | 76 | |
michael@0 | 77 | } thread_rwlock_stack; |
michael@0 | 78 | |
michael@0 | 79 | static void _PR_SET_THREAD_RWLOCK_RANK(PRRWLock *rwlock); |
michael@0 | 80 | static PRUint32 _PR_GET_THREAD_RWLOCK_RANK(void); |
michael@0 | 81 | static void _PR_UNSET_THREAD_RWLOCK_RANK(PRRWLock *rwlock); |
michael@0 | 82 | static void _PR_RELEASE_LOCK_STACK(void *lock_stack); |
michael@0 | 83 | |
michael@0 | 84 | #endif |
michael@0 | 85 | |
michael@0 | 86 | /* |
michael@0 | 87 | * Reader/Writer Locks |
michael@0 | 88 | */ |
michael@0 | 89 | |
michael@0 | 90 | /* |
michael@0 | 91 | * PR_NewRWLock |
michael@0 | 92 | * Create a reader-writer lock, with the given lock rank and lock name |
michael@0 | 93 | * |
michael@0 | 94 | */ |
michael@0 | 95 | |
michael@0 | 96 | PR_IMPLEMENT(PRRWLock *) |
michael@0 | 97 | PR_NewRWLock(PRUint32 lock_rank, const char *lock_name) |
michael@0 | 98 | { |
michael@0 | 99 | PRRWLock *rwlock; |
michael@0 | 100 | #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) |
michael@0 | 101 | int err; |
michael@0 | 102 | #endif |
michael@0 | 103 | |
michael@0 | 104 | if (!_pr_initialized) _PR_ImplicitInitialization(); |
michael@0 | 105 | |
michael@0 | 106 | rwlock = PR_NEWZAP(PRRWLock); |
michael@0 | 107 | if (rwlock == NULL) |
michael@0 | 108 | return NULL; |
michael@0 | 109 | |
michael@0 | 110 | rwlock->rw_rank = lock_rank; |
michael@0 | 111 | if (lock_name != NULL) { |
michael@0 | 112 | rwlock->rw_name = (char*) PR_Malloc(strlen(lock_name) + 1); |
michael@0 | 113 | if (rwlock->rw_name == NULL) { |
michael@0 | 114 | PR_DELETE(rwlock); |
michael@0 | 115 | return(NULL); |
michael@0 | 116 | } |
michael@0 | 117 | strcpy(rwlock->rw_name, lock_name); |
michael@0 | 118 | } else { |
michael@0 | 119 | rwlock->rw_name = NULL; |
michael@0 | 120 | } |
michael@0 | 121 | |
michael@0 | 122 | #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) |
michael@0 | 123 | err = RWLOCK_INIT(&rwlock->rw_lock); |
michael@0 | 124 | if (err != 0) { |
michael@0 | 125 | PR_SetError(PR_UNKNOWN_ERROR, err); |
michael@0 | 126 | PR_Free(rwlock->rw_name); |
michael@0 | 127 | PR_DELETE(rwlock); |
michael@0 | 128 | return NULL; |
michael@0 | 129 | } |
michael@0 | 130 | return rwlock; |
michael@0 | 131 | #else |
michael@0 | 132 | rwlock->rw_lock = PR_NewLock(); |
michael@0 | 133 | if (rwlock->rw_lock == NULL) { |
michael@0 | 134 | goto failed; |
michael@0 | 135 | } |
michael@0 | 136 | rwlock->rw_reader_waitq = PR_NewCondVar(rwlock->rw_lock); |
michael@0 | 137 | if (rwlock->rw_reader_waitq == NULL) { |
michael@0 | 138 | goto failed; |
michael@0 | 139 | } |
michael@0 | 140 | rwlock->rw_writer_waitq = PR_NewCondVar(rwlock->rw_lock); |
michael@0 | 141 | if (rwlock->rw_writer_waitq == NULL) { |
michael@0 | 142 | goto failed; |
michael@0 | 143 | } |
michael@0 | 144 | rwlock->rw_reader_cnt = 0; |
michael@0 | 145 | rwlock->rw_writer_cnt = 0; |
michael@0 | 146 | rwlock->rw_lock_cnt = 0; |
michael@0 | 147 | return rwlock; |
michael@0 | 148 | |
michael@0 | 149 | failed: |
michael@0 | 150 | if (rwlock->rw_reader_waitq != NULL) { |
michael@0 | 151 | PR_DestroyCondVar(rwlock->rw_reader_waitq); |
michael@0 | 152 | } |
michael@0 | 153 | if (rwlock->rw_lock != NULL) { |
michael@0 | 154 | PR_DestroyLock(rwlock->rw_lock); |
michael@0 | 155 | } |
michael@0 | 156 | PR_Free(rwlock->rw_name); |
michael@0 | 157 | PR_DELETE(rwlock); |
michael@0 | 158 | return NULL; |
michael@0 | 159 | #endif |
michael@0 | 160 | } |
michael@0 | 161 | |
michael@0 | 162 | /* |
michael@0 | 163 | ** Destroy the given RWLock "lock". |
michael@0 | 164 | */ |
michael@0 | 165 | PR_IMPLEMENT(void) |
michael@0 | 166 | PR_DestroyRWLock(PRRWLock *rwlock) |
michael@0 | 167 | { |
michael@0 | 168 | #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) |
michael@0 | 169 | int err; |
michael@0 | 170 | err = RWLOCK_DESTROY(&rwlock->rw_lock); |
michael@0 | 171 | PR_ASSERT(err == 0); |
michael@0 | 172 | #else |
michael@0 | 173 | PR_ASSERT(rwlock->rw_reader_cnt == 0); |
michael@0 | 174 | PR_DestroyCondVar(rwlock->rw_reader_waitq); |
michael@0 | 175 | PR_DestroyCondVar(rwlock->rw_writer_waitq); |
michael@0 | 176 | PR_DestroyLock(rwlock->rw_lock); |
michael@0 | 177 | #endif |
michael@0 | 178 | if (rwlock->rw_name != NULL) |
michael@0 | 179 | PR_Free(rwlock->rw_name); |
michael@0 | 180 | PR_DELETE(rwlock); |
michael@0 | 181 | } |
michael@0 | 182 | |
michael@0 | 183 | /* |
michael@0 | 184 | ** Read-lock the RWLock. |
michael@0 | 185 | */ |
michael@0 | 186 | PR_IMPLEMENT(void) |
michael@0 | 187 | PR_RWLock_Rlock(PRRWLock *rwlock) |
michael@0 | 188 | { |
michael@0 | 189 | #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) |
michael@0 | 190 | int err; |
michael@0 | 191 | #endif |
michael@0 | 192 | |
michael@0 | 193 | #ifdef _PR_RWLOCK_RANK_ORDER_DEBUG |
michael@0 | 194 | /* |
michael@0 | 195 | * assert that rank ordering is not violated; the rank of 'rwlock' should |
michael@0 | 196 | * be equal to or greater than the highest rank of all the locks held by |
michael@0 | 197 | * the thread. |
michael@0 | 198 | */ |
michael@0 | 199 | PR_ASSERT((rwlock->rw_rank == PR_RWLOCK_RANK_NONE) || |
michael@0 | 200 | (rwlock->rw_rank >= _PR_GET_THREAD_RWLOCK_RANK())); |
michael@0 | 201 | #endif |
michael@0 | 202 | |
michael@0 | 203 | #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) |
michael@0 | 204 | err = RWLOCK_RDLOCK(&rwlock->rw_lock); |
michael@0 | 205 | PR_ASSERT(err == 0); |
michael@0 | 206 | #else |
michael@0 | 207 | PR_Lock(rwlock->rw_lock); |
michael@0 | 208 | /* |
michael@0 | 209 | * wait if write-locked or if a writer is waiting; preference for writers |
michael@0 | 210 | */ |
michael@0 | 211 | while ((rwlock->rw_lock_cnt < 0) || |
michael@0 | 212 | (rwlock->rw_writer_cnt > 0)) { |
michael@0 | 213 | rwlock->rw_reader_cnt++; |
michael@0 | 214 | PR_WaitCondVar(rwlock->rw_reader_waitq, PR_INTERVAL_NO_TIMEOUT); |
michael@0 | 215 | rwlock->rw_reader_cnt--; |
michael@0 | 216 | } |
michael@0 | 217 | /* |
michael@0 | 218 | * Increment read-lock count |
michael@0 | 219 | */ |
michael@0 | 220 | rwlock->rw_lock_cnt++; |
michael@0 | 221 | |
michael@0 | 222 | PR_Unlock(rwlock->rw_lock); |
michael@0 | 223 | #endif |
michael@0 | 224 | |
michael@0 | 225 | #ifdef _PR_RWLOCK_RANK_ORDER_DEBUG |
michael@0 | 226 | /* |
michael@0 | 227 | * update thread's lock rank |
michael@0 | 228 | */ |
michael@0 | 229 | if (rwlock->rw_rank != PR_RWLOCK_RANK_NONE) |
michael@0 | 230 | _PR_SET_THREAD_RWLOCK_RANK(rwlock); |
michael@0 | 231 | #endif |
michael@0 | 232 | } |
michael@0 | 233 | |
michael@0 | 234 | /* |
michael@0 | 235 | ** Write-lock the RWLock. |
michael@0 | 236 | */ |
michael@0 | 237 | PR_IMPLEMENT(void) |
michael@0 | 238 | PR_RWLock_Wlock(PRRWLock *rwlock) |
michael@0 | 239 | { |
michael@0 | 240 | #if defined(DEBUG) |
michael@0 | 241 | PRThread *me = PR_GetCurrentThread(); |
michael@0 | 242 | #endif |
michael@0 | 243 | #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) |
michael@0 | 244 | int err; |
michael@0 | 245 | #endif |
michael@0 | 246 | |
michael@0 | 247 | #ifdef _PR_RWLOCK_RANK_ORDER_DEBUG |
michael@0 | 248 | /* |
michael@0 | 249 | * assert that rank ordering is not violated; the rank of 'rwlock' should |
michael@0 | 250 | * be equal to or greater than the highest rank of all the locks held by |
michael@0 | 251 | * the thread. |
michael@0 | 252 | */ |
michael@0 | 253 | PR_ASSERT((rwlock->rw_rank == PR_RWLOCK_RANK_NONE) || |
michael@0 | 254 | (rwlock->rw_rank >= _PR_GET_THREAD_RWLOCK_RANK())); |
michael@0 | 255 | #endif |
michael@0 | 256 | |
michael@0 | 257 | #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) |
michael@0 | 258 | err = RWLOCK_WRLOCK(&rwlock->rw_lock); |
michael@0 | 259 | PR_ASSERT(err == 0); |
michael@0 | 260 | #else |
michael@0 | 261 | PR_Lock(rwlock->rw_lock); |
michael@0 | 262 | /* |
michael@0 | 263 | * wait if read locked |
michael@0 | 264 | */ |
michael@0 | 265 | while (rwlock->rw_lock_cnt != 0) { |
michael@0 | 266 | rwlock->rw_writer_cnt++; |
michael@0 | 267 | PR_WaitCondVar(rwlock->rw_writer_waitq, PR_INTERVAL_NO_TIMEOUT); |
michael@0 | 268 | rwlock->rw_writer_cnt--; |
michael@0 | 269 | } |
michael@0 | 270 | /* |
michael@0 | 271 | * apply write lock |
michael@0 | 272 | */ |
michael@0 | 273 | rwlock->rw_lock_cnt--; |
michael@0 | 274 | PR_ASSERT(rwlock->rw_lock_cnt == -1); |
michael@0 | 275 | #ifdef DEBUG |
michael@0 | 276 | PR_ASSERT(me != NULL); |
michael@0 | 277 | rwlock->rw_owner = me; |
michael@0 | 278 | #endif |
michael@0 | 279 | PR_Unlock(rwlock->rw_lock); |
michael@0 | 280 | #endif |
michael@0 | 281 | |
michael@0 | 282 | #ifdef _PR_RWLOCK_RANK_ORDER_DEBUG |
michael@0 | 283 | /* |
michael@0 | 284 | * update thread's lock rank |
michael@0 | 285 | */ |
michael@0 | 286 | if (rwlock->rw_rank != PR_RWLOCK_RANK_NONE) |
michael@0 | 287 | _PR_SET_THREAD_RWLOCK_RANK(rwlock); |
michael@0 | 288 | #endif |
michael@0 | 289 | } |
michael@0 | 290 | |
michael@0 | 291 | /* |
michael@0 | 292 | ** Unlock the RW lock. |
michael@0 | 293 | */ |
michael@0 | 294 | PR_IMPLEMENT(void) |
michael@0 | 295 | PR_RWLock_Unlock(PRRWLock *rwlock) |
michael@0 | 296 | { |
michael@0 | 297 | #if defined(DEBUG) |
michael@0 | 298 | PRThread *me = PR_GetCurrentThread(); |
michael@0 | 299 | #endif |
michael@0 | 300 | #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) |
michael@0 | 301 | int err; |
michael@0 | 302 | #endif |
michael@0 | 303 | |
michael@0 | 304 | #if defined(HAVE_UNIX98_RWLOCK) || defined(HAVE_UI_RWLOCK) |
michael@0 | 305 | err = RWLOCK_UNLOCK(&rwlock->rw_lock); |
michael@0 | 306 | PR_ASSERT(err == 0); |
michael@0 | 307 | #else |
michael@0 | 308 | PR_Lock(rwlock->rw_lock); |
michael@0 | 309 | /* |
michael@0 | 310 | * lock must be read or write-locked |
michael@0 | 311 | */ |
michael@0 | 312 | PR_ASSERT(rwlock->rw_lock_cnt != 0); |
michael@0 | 313 | if (rwlock->rw_lock_cnt > 0) { |
michael@0 | 314 | |
michael@0 | 315 | /* |
michael@0 | 316 | * decrement read-lock count |
michael@0 | 317 | */ |
michael@0 | 318 | rwlock->rw_lock_cnt--; |
michael@0 | 319 | if (rwlock->rw_lock_cnt == 0) { |
michael@0 | 320 | /* |
michael@0 | 321 | * lock is not read-locked anymore; wakeup a waiting writer |
michael@0 | 322 | */ |
michael@0 | 323 | if (rwlock->rw_writer_cnt > 0) |
michael@0 | 324 | PR_NotifyCondVar(rwlock->rw_writer_waitq); |
michael@0 | 325 | } |
michael@0 | 326 | } else { |
michael@0 | 327 | PR_ASSERT(rwlock->rw_lock_cnt == -1); |
michael@0 | 328 | |
michael@0 | 329 | rwlock->rw_lock_cnt = 0; |
michael@0 | 330 | #ifdef DEBUG |
michael@0 | 331 | PR_ASSERT(rwlock->rw_owner == me); |
michael@0 | 332 | rwlock->rw_owner = NULL; |
michael@0 | 333 | #endif |
michael@0 | 334 | /* |
michael@0 | 335 | * wakeup a writer, if present; preference for writers |
michael@0 | 336 | */ |
michael@0 | 337 | if (rwlock->rw_writer_cnt > 0) |
michael@0 | 338 | PR_NotifyCondVar(rwlock->rw_writer_waitq); |
michael@0 | 339 | /* |
michael@0 | 340 | * else, wakeup all readers, if any |
michael@0 | 341 | */ |
michael@0 | 342 | else if (rwlock->rw_reader_cnt > 0) |
michael@0 | 343 | PR_NotifyAllCondVar(rwlock->rw_reader_waitq); |
michael@0 | 344 | } |
michael@0 | 345 | PR_Unlock(rwlock->rw_lock); |
michael@0 | 346 | #endif |
michael@0 | 347 | |
michael@0 | 348 | #ifdef _PR_RWLOCK_RANK_ORDER_DEBUG |
michael@0 | 349 | /* |
michael@0 | 350 | * update thread's lock rank |
michael@0 | 351 | */ |
michael@0 | 352 | if (rwlock->rw_rank != PR_RWLOCK_RANK_NONE) |
michael@0 | 353 | _PR_UNSET_THREAD_RWLOCK_RANK(rwlock); |
michael@0 | 354 | #endif |
michael@0 | 355 | return; |
michael@0 | 356 | } |
michael@0 | 357 | |
michael@0 | 358 | #ifndef _PR_RWLOCK_RANK_ORDER_DEBUG |
michael@0 | 359 | |
michael@0 | 360 | void _PR_InitRWLocks(void) { } |
michael@0 | 361 | |
michael@0 | 362 | #else |
michael@0 | 363 | |
michael@0 | 364 | void _PR_InitRWLocks(void) |
michael@0 | 365 | { |
michael@0 | 366 | /* |
michael@0 | 367 | * allocated thread-private-data index for rwlock list |
michael@0 | 368 | */ |
michael@0 | 369 | if (PR_NewThreadPrivateIndex(&pr_thread_rwlock_key, |
michael@0 | 370 | _PR_RELEASE_LOCK_STACK) == PR_FAILURE) { |
michael@0 | 371 | pr_thread_rwlock_alloc_failed = 1; |
michael@0 | 372 | return; |
michael@0 | 373 | } |
michael@0 | 374 | } |
michael@0 | 375 | |
michael@0 | 376 | /* |
michael@0 | 377 | * _PR_SET_THREAD_RWLOCK_RANK |
michael@0 | 378 | * Set a thread's lock rank, which is the highest of the ranks of all |
michael@0 | 379 | * the locks held by the thread. Pointers to the locks are added to a |
michael@0 | 380 | * per-thread list, which is anchored off a thread-private data key. |
michael@0 | 381 | */ |
michael@0 | 382 | |
michael@0 | 383 | static void |
michael@0 | 384 | _PR_SET_THREAD_RWLOCK_RANK(PRRWLock *rwlock) |
michael@0 | 385 | { |
michael@0 | 386 | thread_rwlock_stack *lock_stack; |
michael@0 | 387 | PRStatus rv; |
michael@0 | 388 | |
michael@0 | 389 | /* |
michael@0 | 390 | * allocate a lock stack |
michael@0 | 391 | */ |
michael@0 | 392 | if ((lock_stack = PR_GetThreadPrivate(pr_thread_rwlock_key)) == NULL) { |
michael@0 | 393 | lock_stack = (thread_rwlock_stack *) |
michael@0 | 394 | PR_CALLOC(1 * sizeof(thread_rwlock_stack)); |
michael@0 | 395 | if (lock_stack) { |
michael@0 | 396 | rv = PR_SetThreadPrivate(pr_thread_rwlock_key, lock_stack); |
michael@0 | 397 | if (rv == PR_FAILURE) { |
michael@0 | 398 | PR_DELETE(lock_stack); |
michael@0 | 399 | pr_thread_rwlock_alloc_failed = 1; |
michael@0 | 400 | return; |
michael@0 | 401 | } |
michael@0 | 402 | } else { |
michael@0 | 403 | pr_thread_rwlock_alloc_failed = 1; |
michael@0 | 404 | return; |
michael@0 | 405 | } |
michael@0 | 406 | } |
michael@0 | 407 | /* |
michael@0 | 408 | * add rwlock to lock stack, if limit is not exceeded |
michael@0 | 409 | */ |
michael@0 | 410 | if (lock_stack) { |
michael@0 | 411 | if (lock_stack->trs_index < _PR_RWLOCK_RANK_ORDER_LIMIT) |
michael@0 | 412 | lock_stack->trs_stack[lock_stack->trs_index++] = rwlock; |
michael@0 | 413 | } |
michael@0 | 414 | } |
michael@0 | 415 | |
michael@0 | 416 | static void |
michael@0 | 417 | _PR_RELEASE_LOCK_STACK(void *lock_stack) |
michael@0 | 418 | { |
michael@0 | 419 | PR_ASSERT(lock_stack); |
michael@0 | 420 | PR_DELETE(lock_stack); |
michael@0 | 421 | } |
michael@0 | 422 | |
michael@0 | 423 | /* |
michael@0 | 424 | * _PR_GET_THREAD_RWLOCK_RANK |
michael@0 | 425 | * |
michael@0 | 426 | * return thread's lock rank. If thread-private-data for the lock |
michael@0 | 427 | * stack is not allocated, return PR_RWLOCK_RANK_NONE. |
michael@0 | 428 | */ |
michael@0 | 429 | |
michael@0 | 430 | static PRUint32 |
michael@0 | 431 | _PR_GET_THREAD_RWLOCK_RANK(void) |
michael@0 | 432 | { |
michael@0 | 433 | thread_rwlock_stack *lock_stack; |
michael@0 | 434 | |
michael@0 | 435 | lock_stack = PR_GetThreadPrivate(pr_thread_rwlock_key); |
michael@0 | 436 | if (lock_stack == NULL || lock_stack->trs_index == 0) |
michael@0 | 437 | return (PR_RWLOCK_RANK_NONE); |
michael@0 | 438 | else |
michael@0 | 439 | return(lock_stack->trs_stack[lock_stack->trs_index - 1]->rw_rank); |
michael@0 | 440 | } |
michael@0 | 441 | |
michael@0 | 442 | /* |
michael@0 | 443 | * _PR_UNSET_THREAD_RWLOCK_RANK |
michael@0 | 444 | * |
michael@0 | 445 | * remove the rwlock from the lock stack. Since locks may not be |
michael@0 | 446 | * unlocked in a FIFO order, the entire lock stack is searched. |
michael@0 | 447 | */ |
michael@0 | 448 | |
michael@0 | 449 | static void |
michael@0 | 450 | _PR_UNSET_THREAD_RWLOCK_RANK(PRRWLock *rwlock) |
michael@0 | 451 | { |
michael@0 | 452 | thread_rwlock_stack *lock_stack; |
michael@0 | 453 | int new_index = 0, index, done = 0; |
michael@0 | 454 | |
michael@0 | 455 | lock_stack = PR_GetThreadPrivate(pr_thread_rwlock_key); |
michael@0 | 456 | |
michael@0 | 457 | PR_ASSERT(lock_stack != NULL); |
michael@0 | 458 | |
michael@0 | 459 | for (index = lock_stack->trs_index - 1; index >= 0; index--) { |
michael@0 | 460 | if (!done && (lock_stack->trs_stack[index] == rwlock)) { |
michael@0 | 461 | /* |
michael@0 | 462 | * reset the slot for rwlock |
michael@0 | 463 | */ |
michael@0 | 464 | lock_stack->trs_stack[index] = NULL; |
michael@0 | 465 | done = 1; |
michael@0 | 466 | } |
michael@0 | 467 | /* |
michael@0 | 468 | * search for the lowest-numbered empty slot, above which there are |
michael@0 | 469 | * no non-empty slots |
michael@0 | 470 | */ |
michael@0 | 471 | if (!new_index && (lock_stack->trs_stack[index] != NULL)) |
michael@0 | 472 | new_index = index + 1; |
michael@0 | 473 | if (done && new_index) |
michael@0 | 474 | break; |
michael@0 | 475 | } |
michael@0 | 476 | /* |
michael@0 | 477 | * set top of stack to highest numbered empty slot |
michael@0 | 478 | */ |
michael@0 | 479 | lock_stack->trs_index = new_index; |
michael@0 | 480 | |
michael@0 | 481 | } |
michael@0 | 482 | |
michael@0 | 483 | #endif /* _PR_RWLOCK_RANK_ORDER_DEBUG */ |