Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | #include "primpl.h" |
michael@0 | 7 | |
michael@0 | 8 | #if defined(WIN95) |
michael@0 | 9 | /* |
michael@0 | 10 | ** Some local variables report warnings on Win95 because the code paths |
michael@0 | 11 | ** using them are conditioned on HAVE_CUSTOME_USER_THREADS. |
michael@0 | 12 | ** The pragma suppresses the warning. |
michael@0 | 13 | ** |
michael@0 | 14 | */ |
michael@0 | 15 | #pragma warning(disable : 4101) |
michael@0 | 16 | #endif |
michael@0 | 17 | |
michael@0 | 18 | |
michael@0 | 19 | void _PR_InitLocks(void) |
michael@0 | 20 | { |
michael@0 | 21 | _PR_MD_INIT_LOCKS(); |
michael@0 | 22 | } |
michael@0 | 23 | |
michael@0 | 24 | /* |
michael@0 | 25 | ** Deal with delayed interrupts/requested reschedule during interrupt |
michael@0 | 26 | ** re-enables. |
michael@0 | 27 | */ |
michael@0 | 28 | void _PR_IntsOn(_PRCPU *cpu) |
michael@0 | 29 | { |
michael@0 | 30 | PRUintn missed, pri, i; |
michael@0 | 31 | _PRInterruptTable *it; |
michael@0 | 32 | PRThread *me; |
michael@0 | 33 | |
michael@0 | 34 | PR_ASSERT(cpu); /* Global threads don't have CPUs */ |
michael@0 | 35 | PR_ASSERT(_PR_MD_GET_INTSOFF() > 0); |
michael@0 | 36 | me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 37 | PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); |
michael@0 | 38 | |
michael@0 | 39 | /* |
michael@0 | 40 | ** Process delayed interrupts. This logic is kinda scary because we |
michael@0 | 41 | ** need to avoid losing an interrupt (it's ok to delay an interrupt |
michael@0 | 42 | ** until later). |
michael@0 | 43 | ** |
michael@0 | 44 | ** There are two missed state words. _pr_ints.where indicates to the |
michael@0 | 45 | ** interrupt handler which state word is currently safe for |
michael@0 | 46 | ** modification. |
michael@0 | 47 | ** |
michael@0 | 48 | ** This code scans both interrupt state words, using the where flag |
michael@0 | 49 | ** to indicate to the interrupt which state word is safe for writing. |
michael@0 | 50 | ** If an interrupt comes in during a scan the other word will be |
michael@0 | 51 | ** modified. This modification will be noticed during the next |
michael@0 | 52 | ** iteration of the loop or during the next call to this routine. |
michael@0 | 53 | */ |
michael@0 | 54 | for (i = 0; i < 2; i++) { |
michael@0 | 55 | cpu->where = (1 - i); |
michael@0 | 56 | missed = cpu->u.missed[i]; |
michael@0 | 57 | if (missed != 0) { |
michael@0 | 58 | cpu->u.missed[i] = 0; |
michael@0 | 59 | for (it = _pr_interruptTable; it->name; it++) { |
michael@0 | 60 | if (missed & it->missed_bit) { |
michael@0 | 61 | PR_LOG(_pr_sched_lm, PR_LOG_MIN, |
michael@0 | 62 | ("IntsOn[0]: %s intr", it->name)); |
michael@0 | 63 | (*it->handler)(); |
michael@0 | 64 | } |
michael@0 | 65 | } |
michael@0 | 66 | } |
michael@0 | 67 | } |
michael@0 | 68 | |
michael@0 | 69 | if (cpu->u.missed[3] != 0) { |
michael@0 | 70 | _PRCPU *cpu; |
michael@0 | 71 | |
michael@0 | 72 | _PR_THREAD_LOCK(me); |
michael@0 | 73 | me->state = _PR_RUNNABLE; |
michael@0 | 74 | pri = me->priority; |
michael@0 | 75 | |
michael@0 | 76 | cpu = me->cpu; |
michael@0 | 77 | _PR_RUNQ_LOCK(cpu); |
michael@0 | 78 | _PR_ADD_RUNQ(me, cpu, pri); |
michael@0 | 79 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 80 | _PR_THREAD_UNLOCK(me); |
michael@0 | 81 | _PR_MD_SWITCH_CONTEXT(me); |
michael@0 | 82 | } |
michael@0 | 83 | } |
michael@0 | 84 | |
michael@0 | 85 | /* |
michael@0 | 86 | ** Unblock the first runnable waiting thread. Skip over |
michael@0 | 87 | ** threads that are trying to be suspended |
michael@0 | 88 | ** Note: Caller must hold _PR_LOCK_LOCK() |
michael@0 | 89 | */ |
michael@0 | 90 | void _PR_UnblockLockWaiter(PRLock *lock) |
michael@0 | 91 | { |
michael@0 | 92 | PRThread *t = NULL; |
michael@0 | 93 | PRThread *me; |
michael@0 | 94 | PRCList *q; |
michael@0 | 95 | |
michael@0 | 96 | q = lock->waitQ.next; |
michael@0 | 97 | PR_ASSERT(q != &lock->waitQ); |
michael@0 | 98 | while (q != &lock->waitQ) { |
michael@0 | 99 | /* Unblock first waiter */ |
michael@0 | 100 | t = _PR_THREAD_CONDQ_PTR(q); |
michael@0 | 101 | |
michael@0 | 102 | /* |
michael@0 | 103 | ** We are about to change the thread's state to runnable and for local |
michael@0 | 104 | ** threads, we are going to assign a cpu to it. So, protect thread's |
michael@0 | 105 | ** data structure. |
michael@0 | 106 | */ |
michael@0 | 107 | _PR_THREAD_LOCK(t); |
michael@0 | 108 | |
michael@0 | 109 | if (t->flags & _PR_SUSPENDING) { |
michael@0 | 110 | q = q->next; |
michael@0 | 111 | _PR_THREAD_UNLOCK(t); |
michael@0 | 112 | continue; |
michael@0 | 113 | } |
michael@0 | 114 | |
michael@0 | 115 | /* Found a runnable thread */ |
michael@0 | 116 | PR_ASSERT(t->state == _PR_LOCK_WAIT); |
michael@0 | 117 | PR_ASSERT(t->wait.lock == lock); |
michael@0 | 118 | t->wait.lock = 0; |
michael@0 | 119 | PR_REMOVE_LINK(&t->waitQLinks); /* take it off lock's waitQ */ |
michael@0 | 120 | |
michael@0 | 121 | /* |
michael@0 | 122 | ** If this is a native thread, nothing else to do except to wake it |
michael@0 | 123 | ** up by calling the machine dependent wakeup routine. |
michael@0 | 124 | ** |
michael@0 | 125 | ** If this is a local thread, we need to assign it a cpu and |
michael@0 | 126 | ** put the thread on that cpu's run queue. There are two cases to |
michael@0 | 127 | ** take care of. If the currently running thread is also a local |
michael@0 | 128 | ** thread, we just assign our own cpu to that thread and put it on |
michael@0 | 129 | ** the cpu's run queue. If the the currently running thread is a |
michael@0 | 130 | ** native thread, we assign the primordial cpu to it (on NT, |
michael@0 | 131 | ** MD_WAKEUP handles the cpu assignment). |
michael@0 | 132 | */ |
michael@0 | 133 | |
michael@0 | 134 | if ( !_PR_IS_NATIVE_THREAD(t) ) { |
michael@0 | 135 | |
michael@0 | 136 | t->state = _PR_RUNNABLE; |
michael@0 | 137 | |
michael@0 | 138 | me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 139 | |
michael@0 | 140 | _PR_AddThreadToRunQ(me, t); |
michael@0 | 141 | _PR_THREAD_UNLOCK(t); |
michael@0 | 142 | } else { |
michael@0 | 143 | t->state = _PR_RUNNING; |
michael@0 | 144 | _PR_THREAD_UNLOCK(t); |
michael@0 | 145 | } |
michael@0 | 146 | _PR_MD_WAKEUP_WAITER(t); |
michael@0 | 147 | break; |
michael@0 | 148 | } |
michael@0 | 149 | return; |
michael@0 | 150 | } |
michael@0 | 151 | |
michael@0 | 152 | /************************************************************************/ |
michael@0 | 153 | |
michael@0 | 154 | |
michael@0 | 155 | PR_IMPLEMENT(PRLock*) PR_NewLock(void) |
michael@0 | 156 | { |
michael@0 | 157 | PRLock *lock; |
michael@0 | 158 | |
michael@0 | 159 | if (!_pr_initialized) _PR_ImplicitInitialization(); |
michael@0 | 160 | |
michael@0 | 161 | lock = PR_NEWZAP(PRLock); |
michael@0 | 162 | if (lock) { |
michael@0 | 163 | if (_PR_InitLock(lock) != PR_SUCCESS) { |
michael@0 | 164 | PR_DELETE(lock); |
michael@0 | 165 | return NULL; |
michael@0 | 166 | } |
michael@0 | 167 | } |
michael@0 | 168 | return lock; |
michael@0 | 169 | } |
michael@0 | 170 | |
michael@0 | 171 | PRStatus _PR_InitLock(PRLock *lock) |
michael@0 | 172 | { |
michael@0 | 173 | if (_PR_MD_NEW_LOCK(&lock->ilock) != PR_SUCCESS) { |
michael@0 | 174 | return PR_FAILURE; |
michael@0 | 175 | } |
michael@0 | 176 | PR_INIT_CLIST(&lock->links); |
michael@0 | 177 | PR_INIT_CLIST(&lock->waitQ); |
michael@0 | 178 | return PR_SUCCESS; |
michael@0 | 179 | } |
michael@0 | 180 | |
michael@0 | 181 | /* |
michael@0 | 182 | ** Destroy the given lock "lock". There is no point in making this race |
michael@0 | 183 | ** free because if some other thread has the pointer to this lock all |
michael@0 | 184 | ** bets are off. |
michael@0 | 185 | */ |
michael@0 | 186 | PR_IMPLEMENT(void) PR_DestroyLock(PRLock *lock) |
michael@0 | 187 | { |
michael@0 | 188 | _PR_FreeLock(lock); |
michael@0 | 189 | PR_DELETE(lock); |
michael@0 | 190 | } |
michael@0 | 191 | |
michael@0 | 192 | void _PR_FreeLock(PRLock *lock) |
michael@0 | 193 | { |
michael@0 | 194 | PR_ASSERT(lock->owner == 0); |
michael@0 | 195 | _PR_MD_FREE_LOCK(&lock->ilock); |
michael@0 | 196 | } |
michael@0 | 197 | |
michael@0 | 198 | extern PRThread *suspendAllThread; |
michael@0 | 199 | /* |
michael@0 | 200 | ** Lock the lock. |
michael@0 | 201 | */ |
michael@0 | 202 | PR_IMPLEMENT(void) PR_Lock(PRLock *lock) |
michael@0 | 203 | { |
michael@0 | 204 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 205 | PRIntn is; |
michael@0 | 206 | PRThread *t; |
michael@0 | 207 | PRCList *q; |
michael@0 | 208 | |
michael@0 | 209 | PR_ASSERT(me != suspendAllThread); |
michael@0 | 210 | PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); |
michael@0 | 211 | PR_ASSERT(lock != NULL); |
michael@0 | 212 | #ifdef _PR_GLOBAL_THREADS_ONLY |
michael@0 | 213 | _PR_MD_LOCK(&lock->ilock); |
michael@0 | 214 | PR_ASSERT(lock->owner == 0); |
michael@0 | 215 | lock->owner = me; |
michael@0 | 216 | return; |
michael@0 | 217 | #else /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 218 | |
michael@0 | 219 | if (_native_threads_only) { |
michael@0 | 220 | _PR_MD_LOCK(&lock->ilock); |
michael@0 | 221 | PR_ASSERT(lock->owner == 0); |
michael@0 | 222 | lock->owner = me; |
michael@0 | 223 | return; |
michael@0 | 224 | } |
michael@0 | 225 | |
michael@0 | 226 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 227 | _PR_INTSOFF(is); |
michael@0 | 228 | |
michael@0 | 229 | PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); |
michael@0 | 230 | |
michael@0 | 231 | retry: |
michael@0 | 232 | _PR_LOCK_LOCK(lock); |
michael@0 | 233 | if (lock->owner == 0) { |
michael@0 | 234 | /* Just got the lock */ |
michael@0 | 235 | lock->owner = me; |
michael@0 | 236 | lock->priority = me->priority; |
michael@0 | 237 | /* Add the granted lock to this owning thread's lock list */ |
michael@0 | 238 | PR_APPEND_LINK(&lock->links, &me->lockList); |
michael@0 | 239 | _PR_LOCK_UNLOCK(lock); |
michael@0 | 240 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 241 | _PR_FAST_INTSON(is); |
michael@0 | 242 | return; |
michael@0 | 243 | } |
michael@0 | 244 | |
michael@0 | 245 | /* If this thread already owns this lock, then it is a deadlock */ |
michael@0 | 246 | PR_ASSERT(lock->owner != me); |
michael@0 | 247 | |
michael@0 | 248 | PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); |
michael@0 | 249 | |
michael@0 | 250 | #if 0 |
michael@0 | 251 | if (me->priority > lock->owner->priority) { |
michael@0 | 252 | /* |
michael@0 | 253 | ** Give the lock owner a priority boost until we get the |
michael@0 | 254 | ** lock. Record the priority we boosted it to. |
michael@0 | 255 | */ |
michael@0 | 256 | lock->boostPriority = me->priority; |
michael@0 | 257 | _PR_SetThreadPriority(lock->owner, me->priority); |
michael@0 | 258 | } |
michael@0 | 259 | #endif |
michael@0 | 260 | |
michael@0 | 261 | /* |
michael@0 | 262 | Add this thread to the asked for lock's list of waiting threads. We |
michael@0 | 263 | add this thread thread in the right priority order so when the unlock |
michael@0 | 264 | occurs, the thread with the higher priority will get the lock. |
michael@0 | 265 | */ |
michael@0 | 266 | q = lock->waitQ.next; |
michael@0 | 267 | if (q == &lock->waitQ || _PR_THREAD_CONDQ_PTR(q)->priority == |
michael@0 | 268 | _PR_THREAD_CONDQ_PTR(lock->waitQ.prev)->priority) { |
michael@0 | 269 | /* |
michael@0 | 270 | * If all the threads in the lock waitQ have the same priority, |
michael@0 | 271 | * then avoid scanning the list: insert the element at the end. |
michael@0 | 272 | */ |
michael@0 | 273 | q = &lock->waitQ; |
michael@0 | 274 | } else { |
michael@0 | 275 | /* Sort thread into lock's waitQ at appropriate point */ |
michael@0 | 276 | /* Now scan the list for where to insert this entry */ |
michael@0 | 277 | while (q != &lock->waitQ) { |
michael@0 | 278 | t = _PR_THREAD_CONDQ_PTR(lock->waitQ.next); |
michael@0 | 279 | if (me->priority > t->priority) { |
michael@0 | 280 | /* Found a lower priority thread to insert in front of */ |
michael@0 | 281 | break; |
michael@0 | 282 | } |
michael@0 | 283 | q = q->next; |
michael@0 | 284 | } |
michael@0 | 285 | } |
michael@0 | 286 | PR_INSERT_BEFORE(&me->waitQLinks, q); |
michael@0 | 287 | |
michael@0 | 288 | /* |
michael@0 | 289 | Now grab the threadLock since we are about to change the state. We have |
michael@0 | 290 | to do this since a PR_Suspend or PR_SetThreadPriority type call that takes |
michael@0 | 291 | a PRThread* as an argument could be changing the state of this thread from |
michael@0 | 292 | a thread running on a different cpu. |
michael@0 | 293 | */ |
michael@0 | 294 | |
michael@0 | 295 | _PR_THREAD_LOCK(me); |
michael@0 | 296 | me->state = _PR_LOCK_WAIT; |
michael@0 | 297 | me->wait.lock = lock; |
michael@0 | 298 | _PR_THREAD_UNLOCK(me); |
michael@0 | 299 | |
michael@0 | 300 | _PR_LOCK_UNLOCK(lock); |
michael@0 | 301 | |
michael@0 | 302 | _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT); |
michael@0 | 303 | goto retry; |
michael@0 | 304 | |
michael@0 | 305 | #endif /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 306 | } |
michael@0 | 307 | |
michael@0 | 308 | /* |
michael@0 | 309 | ** Unlock the lock. |
michael@0 | 310 | */ |
michael@0 | 311 | PR_IMPLEMENT(PRStatus) PR_Unlock(PRLock *lock) |
michael@0 | 312 | { |
michael@0 | 313 | PRCList *q; |
michael@0 | 314 | PRThreadPriority pri, boost; |
michael@0 | 315 | PRIntn is; |
michael@0 | 316 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 317 | |
michael@0 | 318 | PR_ASSERT(lock != NULL); |
michael@0 | 319 | PR_ASSERT(lock->owner == me); |
michael@0 | 320 | PR_ASSERT(me != suspendAllThread); |
michael@0 | 321 | PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); |
michael@0 | 322 | if (lock->owner != me) { |
michael@0 | 323 | return PR_FAILURE; |
michael@0 | 324 | } |
michael@0 | 325 | |
michael@0 | 326 | #ifdef _PR_GLOBAL_THREADS_ONLY |
michael@0 | 327 | lock->owner = 0; |
michael@0 | 328 | _PR_MD_UNLOCK(&lock->ilock); |
michael@0 | 329 | return PR_SUCCESS; |
michael@0 | 330 | #else /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 331 | |
michael@0 | 332 | if (_native_threads_only) { |
michael@0 | 333 | lock->owner = 0; |
michael@0 | 334 | _PR_MD_UNLOCK(&lock->ilock); |
michael@0 | 335 | return PR_SUCCESS; |
michael@0 | 336 | } |
michael@0 | 337 | |
michael@0 | 338 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 339 | _PR_INTSOFF(is); |
michael@0 | 340 | _PR_LOCK_LOCK(lock); |
michael@0 | 341 | |
michael@0 | 342 | /* Remove the lock from the owning thread's lock list */ |
michael@0 | 343 | PR_REMOVE_LINK(&lock->links); |
michael@0 | 344 | pri = lock->priority; |
michael@0 | 345 | boost = lock->boostPriority; |
michael@0 | 346 | if (boost > pri) { |
michael@0 | 347 | /* |
michael@0 | 348 | ** We received a priority boost during the time we held the lock. |
michael@0 | 349 | ** We need to figure out what priority to move to by scanning |
michael@0 | 350 | ** down our list of lock's that we are still holding and using |
michael@0 | 351 | ** the highest boosted priority found. |
michael@0 | 352 | */ |
michael@0 | 353 | q = me->lockList.next; |
michael@0 | 354 | while (q != &me->lockList) { |
michael@0 | 355 | PRLock *ll = _PR_LOCK_PTR(q); |
michael@0 | 356 | if (ll->boostPriority > pri) { |
michael@0 | 357 | pri = ll->boostPriority; |
michael@0 | 358 | } |
michael@0 | 359 | q = q->next; |
michael@0 | 360 | } |
michael@0 | 361 | if (pri != me->priority) { |
michael@0 | 362 | _PR_SetThreadPriority(me, pri); |
michael@0 | 363 | } |
michael@0 | 364 | } |
michael@0 | 365 | |
michael@0 | 366 | /* Unblock the first waiting thread */ |
michael@0 | 367 | q = lock->waitQ.next; |
michael@0 | 368 | if (q != &lock->waitQ) |
michael@0 | 369 | _PR_UnblockLockWaiter(lock); |
michael@0 | 370 | lock->boostPriority = PR_PRIORITY_LOW; |
michael@0 | 371 | lock->owner = 0; |
michael@0 | 372 | _PR_LOCK_UNLOCK(lock); |
michael@0 | 373 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 374 | _PR_INTSON(is); |
michael@0 | 375 | return PR_SUCCESS; |
michael@0 | 376 | #endif /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 377 | } |
michael@0 | 378 | |
michael@0 | 379 | /* |
michael@0 | 380 | ** If the current thread owns |lock|, this assertion is guaranteed to |
michael@0 | 381 | ** succeed. Otherwise, the behavior of this function is undefined. |
michael@0 | 382 | */ |
michael@0 | 383 | PR_IMPLEMENT(void) PR_AssertCurrentThreadOwnsLock(PRLock *lock) |
michael@0 | 384 | { |
michael@0 | 385 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 386 | PR_ASSERT(lock->owner == me); |
michael@0 | 387 | } |
michael@0 | 388 | |
michael@0 | 389 | /* |
michael@0 | 390 | ** Test and then lock the lock if it's not already locked by some other |
michael@0 | 391 | ** thread. Return PR_FALSE if some other thread owned the lock at the |
michael@0 | 392 | ** time of the call. |
michael@0 | 393 | */ |
michael@0 | 394 | PR_IMPLEMENT(PRBool) PR_TestAndLock(PRLock *lock) |
michael@0 | 395 | { |
michael@0 | 396 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 397 | PRBool rv = PR_FALSE; |
michael@0 | 398 | PRIntn is; |
michael@0 | 399 | |
michael@0 | 400 | #ifdef _PR_GLOBAL_THREADS_ONLY |
michael@0 | 401 | is = _PR_MD_TEST_AND_LOCK(&lock->ilock); |
michael@0 | 402 | if (is == 0) { |
michael@0 | 403 | lock->owner = me; |
michael@0 | 404 | return PR_TRUE; |
michael@0 | 405 | } |
michael@0 | 406 | return PR_FALSE; |
michael@0 | 407 | #else /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 408 | |
michael@0 | 409 | #ifndef _PR_LOCAL_THREADS_ONLY |
michael@0 | 410 | if (_native_threads_only) { |
michael@0 | 411 | is = _PR_MD_TEST_AND_LOCK(&lock->ilock); |
michael@0 | 412 | if (is == 0) { |
michael@0 | 413 | lock->owner = me; |
michael@0 | 414 | return PR_TRUE; |
michael@0 | 415 | } |
michael@0 | 416 | return PR_FALSE; |
michael@0 | 417 | } |
michael@0 | 418 | #endif |
michael@0 | 419 | |
michael@0 | 420 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 421 | _PR_INTSOFF(is); |
michael@0 | 422 | |
michael@0 | 423 | _PR_LOCK_LOCK(lock); |
michael@0 | 424 | if (lock->owner == 0) { |
michael@0 | 425 | /* Just got the lock */ |
michael@0 | 426 | lock->owner = me; |
michael@0 | 427 | lock->priority = me->priority; |
michael@0 | 428 | /* Add the granted lock to this owning thread's lock list */ |
michael@0 | 429 | PR_APPEND_LINK(&lock->links, &me->lockList); |
michael@0 | 430 | rv = PR_TRUE; |
michael@0 | 431 | } |
michael@0 | 432 | _PR_LOCK_UNLOCK(lock); |
michael@0 | 433 | |
michael@0 | 434 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 435 | _PR_INTSON(is); |
michael@0 | 436 | return rv; |
michael@0 | 437 | #endif /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 438 | } |
michael@0 | 439 | |
michael@0 | 440 | /************************************************************************/ |
michael@0 | 441 | /************************************************************************/ |
michael@0 | 442 | /***********************ROUTINES FOR DCE EMULATION***********************/ |
michael@0 | 443 | /************************************************************************/ |
michael@0 | 444 | /************************************************************************/ |
michael@0 | 445 | PR_IMPLEMENT(PRStatus) PRP_TryLock(PRLock *lock) |
michael@0 | 446 | { return (PR_TestAndLock(lock)) ? PR_SUCCESS : PR_FAILURE; } |