Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | |
michael@0 | 7 | #include "primpl.h" |
michael@0 | 8 | #include "prinrval.h" |
michael@0 | 9 | #include "prtypes.h" |
michael@0 | 10 | |
michael@0 | 11 | #if defined(WIN95) |
michael@0 | 12 | /* |
michael@0 | 13 | ** Some local variables report warnings on Win95 because the code paths |
michael@0 | 14 | ** using them are conditioned on HAVE_CUSTOME_USER_THREADS. |
michael@0 | 15 | ** The pragma suppresses the warning. |
michael@0 | 16 | ** |
michael@0 | 17 | */ |
michael@0 | 18 | #pragma warning(disable : 4101) |
michael@0 | 19 | #endif |
michael@0 | 20 | |
michael@0 | 21 | |
michael@0 | 22 | /* |
michael@0 | 23 | ** Notify one thread that it has finished waiting on a condition variable |
michael@0 | 24 | ** Caller must hold the _PR_CVAR_LOCK(cv) |
michael@0 | 25 | */ |
michael@0 | 26 | PRBool _PR_NotifyThread (PRThread *thread, PRThread *me) |
michael@0 | 27 | { |
michael@0 | 28 | PRBool rv; |
michael@0 | 29 | |
michael@0 | 30 | PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); |
michael@0 | 31 | |
michael@0 | 32 | _PR_THREAD_LOCK(thread); |
michael@0 | 33 | PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); |
michael@0 | 34 | if ( !_PR_IS_NATIVE_THREAD(thread) ) { |
michael@0 | 35 | if (thread->wait.cvar != NULL) { |
michael@0 | 36 | thread->wait.cvar = NULL; |
michael@0 | 37 | |
michael@0 | 38 | _PR_SLEEPQ_LOCK(thread->cpu); |
michael@0 | 39 | /* The notify and timeout can collide; in which case both may |
michael@0 | 40 | * attempt to delete from the sleepQ; only let one do it. |
michael@0 | 41 | */ |
michael@0 | 42 | if (thread->flags & (_PR_ON_SLEEPQ|_PR_ON_PAUSEQ)) |
michael@0 | 43 | _PR_DEL_SLEEPQ(thread, PR_TRUE); |
michael@0 | 44 | _PR_SLEEPQ_UNLOCK(thread->cpu); |
michael@0 | 45 | |
michael@0 | 46 | if (thread->flags & _PR_SUSPENDING) { |
michael@0 | 47 | /* |
michael@0 | 48 | * set thread state to SUSPENDED; a Resume operation |
michael@0 | 49 | * on the thread will move it to the runQ |
michael@0 | 50 | */ |
michael@0 | 51 | thread->state = _PR_SUSPENDED; |
michael@0 | 52 | _PR_MISCQ_LOCK(thread->cpu); |
michael@0 | 53 | _PR_ADD_SUSPENDQ(thread, thread->cpu); |
michael@0 | 54 | _PR_MISCQ_UNLOCK(thread->cpu); |
michael@0 | 55 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 56 | } else { |
michael@0 | 57 | /* Make thread runnable */ |
michael@0 | 58 | thread->state = _PR_RUNNABLE; |
michael@0 | 59 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 60 | |
michael@0 | 61 | _PR_AddThreadToRunQ(me, thread); |
michael@0 | 62 | _PR_MD_WAKEUP_WAITER(thread); |
michael@0 | 63 | } |
michael@0 | 64 | |
michael@0 | 65 | rv = PR_TRUE; |
michael@0 | 66 | } else { |
michael@0 | 67 | /* Thread has already been notified */ |
michael@0 | 68 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 69 | rv = PR_FALSE; |
michael@0 | 70 | } |
michael@0 | 71 | } else { /* If the thread is a native thread */ |
michael@0 | 72 | if (thread->wait.cvar) { |
michael@0 | 73 | thread->wait.cvar = NULL; |
michael@0 | 74 | |
michael@0 | 75 | if (thread->flags & _PR_SUSPENDING) { |
michael@0 | 76 | /* |
michael@0 | 77 | * set thread state to SUSPENDED; a Resume operation |
michael@0 | 78 | * on the thread will enable the thread to run |
michael@0 | 79 | */ |
michael@0 | 80 | thread->state = _PR_SUSPENDED; |
michael@0 | 81 | } else |
michael@0 | 82 | thread->state = _PR_RUNNING; |
michael@0 | 83 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 84 | _PR_MD_WAKEUP_WAITER(thread); |
michael@0 | 85 | rv = PR_TRUE; |
michael@0 | 86 | } else { |
michael@0 | 87 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 88 | rv = PR_FALSE; |
michael@0 | 89 | } |
michael@0 | 90 | } |
michael@0 | 91 | |
michael@0 | 92 | return rv; |
michael@0 | 93 | } |
michael@0 | 94 | |
michael@0 | 95 | /* |
michael@0 | 96 | * Notify thread waiting on cvar; called when thread is interrupted |
michael@0 | 97 | * The thread lock is held on entry and released before return |
michael@0 | 98 | */ |
michael@0 | 99 | void _PR_NotifyLockedThread (PRThread *thread) |
michael@0 | 100 | { |
michael@0 | 101 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 102 | PRCondVar *cvar; |
michael@0 | 103 | PRThreadPriority pri; |
michael@0 | 104 | |
michael@0 | 105 | if ( !_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 106 | PR_ASSERT(_PR_MD_GET_INTSOFF() != 0); |
michael@0 | 107 | |
michael@0 | 108 | cvar = thread->wait.cvar; |
michael@0 | 109 | thread->wait.cvar = NULL; |
michael@0 | 110 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 111 | |
michael@0 | 112 | _PR_CVAR_LOCK(cvar); |
michael@0 | 113 | _PR_THREAD_LOCK(thread); |
michael@0 | 114 | |
michael@0 | 115 | if (!_PR_IS_NATIVE_THREAD(thread)) { |
michael@0 | 116 | _PR_SLEEPQ_LOCK(thread->cpu); |
michael@0 | 117 | /* The notify and timeout can collide; in which case both may |
michael@0 | 118 | * attempt to delete from the sleepQ; only let one do it. |
michael@0 | 119 | */ |
michael@0 | 120 | if (thread->flags & (_PR_ON_SLEEPQ|_PR_ON_PAUSEQ)) |
michael@0 | 121 | _PR_DEL_SLEEPQ(thread, PR_TRUE); |
michael@0 | 122 | _PR_SLEEPQ_UNLOCK(thread->cpu); |
michael@0 | 123 | |
michael@0 | 124 | /* Make thread runnable */ |
michael@0 | 125 | pri = thread->priority; |
michael@0 | 126 | thread->state = _PR_RUNNABLE; |
michael@0 | 127 | |
michael@0 | 128 | PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); |
michael@0 | 129 | |
michael@0 | 130 | _PR_AddThreadToRunQ(me, thread); |
michael@0 | 131 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 132 | |
michael@0 | 133 | _PR_MD_WAKEUP_WAITER(thread); |
michael@0 | 134 | } else { |
michael@0 | 135 | if (thread->flags & _PR_SUSPENDING) { |
michael@0 | 136 | /* |
michael@0 | 137 | * set thread state to SUSPENDED; a Resume operation |
michael@0 | 138 | * on the thread will enable the thread to run |
michael@0 | 139 | */ |
michael@0 | 140 | thread->state = _PR_SUSPENDED; |
michael@0 | 141 | } else |
michael@0 | 142 | thread->state = _PR_RUNNING; |
michael@0 | 143 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 144 | _PR_MD_WAKEUP_WAITER(thread); |
michael@0 | 145 | } |
michael@0 | 146 | |
michael@0 | 147 | _PR_CVAR_UNLOCK(cvar); |
michael@0 | 148 | return; |
michael@0 | 149 | } |
michael@0 | 150 | |
michael@0 | 151 | /* |
michael@0 | 152 | ** Make the given thread wait for the given condition variable |
michael@0 | 153 | */ |
michael@0 | 154 | PRStatus _PR_WaitCondVar( |
michael@0 | 155 | PRThread *thread, PRCondVar *cvar, PRLock *lock, PRIntervalTime timeout) |
michael@0 | 156 | { |
michael@0 | 157 | PRIntn is; |
michael@0 | 158 | PRStatus rv = PR_SUCCESS; |
michael@0 | 159 | |
michael@0 | 160 | PR_ASSERT(thread == _PR_MD_CURRENT_THREAD()); |
michael@0 | 161 | PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); |
michael@0 | 162 | |
michael@0 | 163 | #ifdef _PR_GLOBAL_THREADS_ONLY |
michael@0 | 164 | if (_PR_PENDING_INTERRUPT(thread)) { |
michael@0 | 165 | PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); |
michael@0 | 166 | thread->flags &= ~_PR_INTERRUPT; |
michael@0 | 167 | return PR_FAILURE; |
michael@0 | 168 | } |
michael@0 | 169 | |
michael@0 | 170 | thread->wait.cvar = cvar; |
michael@0 | 171 | lock->owner = NULL; |
michael@0 | 172 | _PR_MD_WAIT_CV(&cvar->md,&lock->ilock, timeout); |
michael@0 | 173 | thread->wait.cvar = NULL; |
michael@0 | 174 | lock->owner = thread; |
michael@0 | 175 | if (_PR_PENDING_INTERRUPT(thread)) { |
michael@0 | 176 | PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); |
michael@0 | 177 | thread->flags &= ~_PR_INTERRUPT; |
michael@0 | 178 | return PR_FAILURE; |
michael@0 | 179 | } |
michael@0 | 180 | |
michael@0 | 181 | return PR_SUCCESS; |
michael@0 | 182 | #else /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 183 | |
michael@0 | 184 | if ( !_PR_IS_NATIVE_THREAD(thread)) |
michael@0 | 185 | _PR_INTSOFF(is); |
michael@0 | 186 | |
michael@0 | 187 | _PR_CVAR_LOCK(cvar); |
michael@0 | 188 | _PR_THREAD_LOCK(thread); |
michael@0 | 189 | |
michael@0 | 190 | if (_PR_PENDING_INTERRUPT(thread)) { |
michael@0 | 191 | PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); |
michael@0 | 192 | thread->flags &= ~_PR_INTERRUPT; |
michael@0 | 193 | _PR_CVAR_UNLOCK(cvar); |
michael@0 | 194 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 195 | if ( !_PR_IS_NATIVE_THREAD(thread)) |
michael@0 | 196 | _PR_INTSON(is); |
michael@0 | 197 | return PR_FAILURE; |
michael@0 | 198 | } |
michael@0 | 199 | |
michael@0 | 200 | thread->state = _PR_COND_WAIT; |
michael@0 | 201 | thread->wait.cvar = cvar; |
michael@0 | 202 | |
michael@0 | 203 | /* |
michael@0 | 204 | ** Put the caller thread on the condition variable's wait Q |
michael@0 | 205 | */ |
michael@0 | 206 | PR_APPEND_LINK(&thread->waitQLinks, &cvar->condQ); |
michael@0 | 207 | |
michael@0 | 208 | /* Note- for global scope threads, we don't put them on the |
michael@0 | 209 | * global sleepQ, so each global thread must put itself |
michael@0 | 210 | * to sleep only for the time it wants to. |
michael@0 | 211 | */ |
michael@0 | 212 | if ( !_PR_IS_NATIVE_THREAD(thread) ) { |
michael@0 | 213 | _PR_SLEEPQ_LOCK(thread->cpu); |
michael@0 | 214 | _PR_ADD_SLEEPQ(thread, timeout); |
michael@0 | 215 | _PR_SLEEPQ_UNLOCK(thread->cpu); |
michael@0 | 216 | } |
michael@0 | 217 | _PR_CVAR_UNLOCK(cvar); |
michael@0 | 218 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 219 | |
michael@0 | 220 | /* |
michael@0 | 221 | ** Release lock protecting the condition variable and thereby giving time |
michael@0 | 222 | ** to the next thread which can potentially notify on the condition variable |
michael@0 | 223 | */ |
michael@0 | 224 | PR_Unlock(lock); |
michael@0 | 225 | |
michael@0 | 226 | PR_LOG(_pr_cvar_lm, PR_LOG_MIN, |
michael@0 | 227 | ("PR_Wait: cvar=%p waiting for %d", cvar, timeout)); |
michael@0 | 228 | |
michael@0 | 229 | rv = _PR_MD_WAIT(thread, timeout); |
michael@0 | 230 | |
michael@0 | 231 | _PR_CVAR_LOCK(cvar); |
michael@0 | 232 | PR_REMOVE_LINK(&thread->waitQLinks); |
michael@0 | 233 | _PR_CVAR_UNLOCK(cvar); |
michael@0 | 234 | |
michael@0 | 235 | PR_LOG(_pr_cvar_lm, PR_LOG_MIN, |
michael@0 | 236 | ("PR_Wait: cvar=%p done waiting", cvar)); |
michael@0 | 237 | |
michael@0 | 238 | if ( !_PR_IS_NATIVE_THREAD(thread)) |
michael@0 | 239 | _PR_INTSON(is); |
michael@0 | 240 | |
michael@0 | 241 | /* Acquire lock again that we had just relinquished */ |
michael@0 | 242 | PR_Lock(lock); |
michael@0 | 243 | |
michael@0 | 244 | if (_PR_PENDING_INTERRUPT(thread)) { |
michael@0 | 245 | PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); |
michael@0 | 246 | thread->flags &= ~_PR_INTERRUPT; |
michael@0 | 247 | return PR_FAILURE; |
michael@0 | 248 | } |
michael@0 | 249 | |
michael@0 | 250 | return rv; |
michael@0 | 251 | #endif /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 252 | } |
michael@0 | 253 | |
michael@0 | 254 | void _PR_NotifyCondVar(PRCondVar *cvar, PRThread *me) |
michael@0 | 255 | { |
michael@0 | 256 | #ifdef _PR_GLOBAL_THREADS_ONLY |
michael@0 | 257 | _PR_MD_NOTIFY_CV(&cvar->md, &cvar->lock->ilock); |
michael@0 | 258 | #else /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 259 | |
michael@0 | 260 | PRCList *q; |
michael@0 | 261 | PRIntn is; |
michael@0 | 262 | |
michael@0 | 263 | if ( !_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 264 | _PR_INTSOFF(is); |
michael@0 | 265 | PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); |
michael@0 | 266 | |
michael@0 | 267 | _PR_CVAR_LOCK(cvar); |
michael@0 | 268 | q = cvar->condQ.next; |
michael@0 | 269 | while (q != &cvar->condQ) { |
michael@0 | 270 | PR_LOG(_pr_cvar_lm, PR_LOG_MIN, ("_PR_NotifyCondVar: cvar=%p", cvar)); |
michael@0 | 271 | if (_PR_THREAD_CONDQ_PTR(q)->wait.cvar) { |
michael@0 | 272 | if (_PR_NotifyThread(_PR_THREAD_CONDQ_PTR(q), me) == PR_TRUE) |
michael@0 | 273 | break; |
michael@0 | 274 | } |
michael@0 | 275 | q = q->next; |
michael@0 | 276 | } |
michael@0 | 277 | _PR_CVAR_UNLOCK(cvar); |
michael@0 | 278 | |
michael@0 | 279 | if ( !_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 280 | _PR_INTSON(is); |
michael@0 | 281 | |
michael@0 | 282 | #endif /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 283 | } |
michael@0 | 284 | |
michael@0 | 285 | /* |
michael@0 | 286 | ** Cndition variable debugging log info. |
michael@0 | 287 | */ |
michael@0 | 288 | PRUint32 _PR_CondVarToString(PRCondVar *cvar, char *buf, PRUint32 buflen) |
michael@0 | 289 | { |
michael@0 | 290 | PRUint32 nb; |
michael@0 | 291 | |
michael@0 | 292 | if (cvar->lock->owner) { |
michael@0 | 293 | nb = PR_snprintf(buf, buflen, "[%p] owner=%ld[%p]", |
michael@0 | 294 | cvar, cvar->lock->owner->id, cvar->lock->owner); |
michael@0 | 295 | } else { |
michael@0 | 296 | nb = PR_snprintf(buf, buflen, "[%p]", cvar); |
michael@0 | 297 | } |
michael@0 | 298 | return nb; |
michael@0 | 299 | } |
michael@0 | 300 | |
michael@0 | 301 | /* |
michael@0 | 302 | ** Expire condition variable waits that are ready to expire. "now" is the current |
michael@0 | 303 | ** time. |
michael@0 | 304 | */ |
michael@0 | 305 | void _PR_ClockInterrupt(void) |
michael@0 | 306 | { |
michael@0 | 307 | PRThread *thread, *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 308 | _PRCPU *cpu = me->cpu; |
michael@0 | 309 | PRIntervalTime elapsed, now; |
michael@0 | 310 | |
michael@0 | 311 | PR_ASSERT(_PR_MD_GET_INTSOFF() != 0); |
michael@0 | 312 | /* Figure out how much time elapsed since the last clock tick */ |
michael@0 | 313 | now = PR_IntervalNow(); |
michael@0 | 314 | elapsed = now - cpu->last_clock; |
michael@0 | 315 | cpu->last_clock = now; |
michael@0 | 316 | |
michael@0 | 317 | PR_LOG(_pr_clock_lm, PR_LOG_MAX, |
michael@0 | 318 | ("ExpireWaits: elapsed=%lld usec", elapsed)); |
michael@0 | 319 | |
michael@0 | 320 | while(1) { |
michael@0 | 321 | _PR_SLEEPQ_LOCK(cpu); |
michael@0 | 322 | if (_PR_SLEEPQ(cpu).next == &_PR_SLEEPQ(cpu)) { |
michael@0 | 323 | _PR_SLEEPQ_UNLOCK(cpu); |
michael@0 | 324 | break; |
michael@0 | 325 | } |
michael@0 | 326 | |
michael@0 | 327 | thread = _PR_THREAD_PTR(_PR_SLEEPQ(cpu).next); |
michael@0 | 328 | PR_ASSERT(thread->cpu == cpu); |
michael@0 | 329 | |
michael@0 | 330 | if (elapsed < thread->sleep) { |
michael@0 | 331 | thread->sleep -= elapsed; |
michael@0 | 332 | _PR_SLEEPQMAX(thread->cpu) -= elapsed; |
michael@0 | 333 | _PR_SLEEPQ_UNLOCK(cpu); |
michael@0 | 334 | break; |
michael@0 | 335 | } |
michael@0 | 336 | _PR_SLEEPQ_UNLOCK(cpu); |
michael@0 | 337 | |
michael@0 | 338 | PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread)); |
michael@0 | 339 | |
michael@0 | 340 | _PR_THREAD_LOCK(thread); |
michael@0 | 341 | |
michael@0 | 342 | if (thread->cpu != cpu) { |
michael@0 | 343 | /* |
michael@0 | 344 | ** The thread was switched to another CPU |
michael@0 | 345 | ** between the time we unlocked the sleep |
michael@0 | 346 | ** queue and the time we acquired the thread |
michael@0 | 347 | ** lock, so it is none of our business now. |
michael@0 | 348 | */ |
michael@0 | 349 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 350 | continue; |
michael@0 | 351 | } |
michael@0 | 352 | |
michael@0 | 353 | /* |
michael@0 | 354 | ** Consume this sleeper's amount of elapsed time from the elapsed |
michael@0 | 355 | ** time value. The next remaining piece of elapsed time will be |
michael@0 | 356 | ** available for the next sleeping thread's timer. |
michael@0 | 357 | */ |
michael@0 | 358 | _PR_SLEEPQ_LOCK(cpu); |
michael@0 | 359 | PR_ASSERT(!(thread->flags & _PR_ON_PAUSEQ)); |
michael@0 | 360 | if (thread->flags & _PR_ON_SLEEPQ) { |
michael@0 | 361 | _PR_DEL_SLEEPQ(thread, PR_FALSE); |
michael@0 | 362 | elapsed -= thread->sleep; |
michael@0 | 363 | _PR_SLEEPQ_UNLOCK(cpu); |
michael@0 | 364 | } else { |
michael@0 | 365 | /* Thread was already handled; Go get another one */ |
michael@0 | 366 | _PR_SLEEPQ_UNLOCK(cpu); |
michael@0 | 367 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 368 | continue; |
michael@0 | 369 | } |
michael@0 | 370 | |
michael@0 | 371 | /* Notify the thread waiting on the condition variable */ |
michael@0 | 372 | if (thread->flags & _PR_SUSPENDING) { |
michael@0 | 373 | PR_ASSERT((thread->state == _PR_IO_WAIT) || |
michael@0 | 374 | (thread->state == _PR_COND_WAIT)); |
michael@0 | 375 | /* |
michael@0 | 376 | ** Thread is suspended and its condition timeout |
michael@0 | 377 | ** expired. Transfer thread from sleepQ to suspendQ. |
michael@0 | 378 | */ |
michael@0 | 379 | thread->wait.cvar = NULL; |
michael@0 | 380 | _PR_MISCQ_LOCK(cpu); |
michael@0 | 381 | thread->state = _PR_SUSPENDED; |
michael@0 | 382 | _PR_ADD_SUSPENDQ(thread, cpu); |
michael@0 | 383 | _PR_MISCQ_UNLOCK(cpu); |
michael@0 | 384 | } else { |
michael@0 | 385 | if (thread->wait.cvar) { |
michael@0 | 386 | PRThreadPriority pri; |
michael@0 | 387 | |
michael@0 | 388 | /* Do work very similar to what _PR_NotifyThread does */ |
michael@0 | 389 | PR_ASSERT( !_PR_IS_NATIVE_THREAD(thread) ); |
michael@0 | 390 | |
michael@0 | 391 | /* Make thread runnable */ |
michael@0 | 392 | pri = thread->priority; |
michael@0 | 393 | thread->state = _PR_RUNNABLE; |
michael@0 | 394 | PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); |
michael@0 | 395 | |
michael@0 | 396 | PR_ASSERT(thread->cpu == cpu); |
michael@0 | 397 | _PR_RUNQ_LOCK(cpu); |
michael@0 | 398 | _PR_ADD_RUNQ(thread, cpu, pri); |
michael@0 | 399 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 400 | |
michael@0 | 401 | if (pri > me->priority) |
michael@0 | 402 | _PR_SET_RESCHED_FLAG(); |
michael@0 | 403 | |
michael@0 | 404 | thread->wait.cvar = NULL; |
michael@0 | 405 | |
michael@0 | 406 | _PR_MD_WAKEUP_WAITER(thread); |
michael@0 | 407 | |
michael@0 | 408 | } else if (thread->io_pending == PR_TRUE) { |
michael@0 | 409 | /* Need to put IO sleeper back on runq */ |
michael@0 | 410 | int pri = thread->priority; |
michael@0 | 411 | |
michael@0 | 412 | thread->io_suspended = PR_TRUE; |
michael@0 | 413 | #ifdef WINNT |
michael@0 | 414 | /* |
michael@0 | 415 | * For NT, record the cpu on which I/O was issued |
michael@0 | 416 | * I/O cancellation is done on the same cpu |
michael@0 | 417 | */ |
michael@0 | 418 | thread->md.thr_bound_cpu = cpu; |
michael@0 | 419 | #endif |
michael@0 | 420 | |
michael@0 | 421 | PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); |
michael@0 | 422 | PR_ASSERT(thread->cpu == cpu); |
michael@0 | 423 | thread->state = _PR_RUNNABLE; |
michael@0 | 424 | _PR_RUNQ_LOCK(cpu); |
michael@0 | 425 | _PR_ADD_RUNQ(thread, cpu, pri); |
michael@0 | 426 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 427 | } |
michael@0 | 428 | } |
michael@0 | 429 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 430 | } |
michael@0 | 431 | } |
michael@0 | 432 | |
michael@0 | 433 | /************************************************************************/ |
michael@0 | 434 | |
michael@0 | 435 | /* |
michael@0 | 436 | ** Create a new condition variable. |
michael@0 | 437 | ** "lock" is the lock to use with the condition variable. |
michael@0 | 438 | ** |
michael@0 | 439 | ** Condition variables are synchronization objects that threads can use |
michael@0 | 440 | ** to wait for some condition to occur. |
michael@0 | 441 | ** |
michael@0 | 442 | ** This may fail if memory is tight or if some operating system resource |
michael@0 | 443 | ** is low. |
michael@0 | 444 | */ |
michael@0 | 445 | PR_IMPLEMENT(PRCondVar*) PR_NewCondVar(PRLock *lock) |
michael@0 | 446 | { |
michael@0 | 447 | PRCondVar *cvar; |
michael@0 | 448 | |
michael@0 | 449 | cvar = PR_NEWZAP(PRCondVar); |
michael@0 | 450 | if (cvar) { |
michael@0 | 451 | if (_PR_InitCondVar(cvar, lock) != PR_SUCCESS) { |
michael@0 | 452 | PR_DELETE(cvar); |
michael@0 | 453 | return NULL; |
michael@0 | 454 | } |
michael@0 | 455 | } else { |
michael@0 | 456 | PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
michael@0 | 457 | } |
michael@0 | 458 | return cvar; |
michael@0 | 459 | } |
michael@0 | 460 | |
michael@0 | 461 | PRStatus _PR_InitCondVar(PRCondVar *cvar, PRLock *lock) |
michael@0 | 462 | { |
michael@0 | 463 | PR_ASSERT(lock != NULL); |
michael@0 | 464 | |
michael@0 | 465 | #ifdef _PR_GLOBAL_THREADS_ONLY |
michael@0 | 466 | if(_PR_MD_NEW_CV(&cvar->md)) { |
michael@0 | 467 | PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); |
michael@0 | 468 | return PR_FAILURE; |
michael@0 | 469 | } |
michael@0 | 470 | #endif |
michael@0 | 471 | if (_PR_MD_NEW_LOCK(&(cvar->ilock)) != PR_SUCCESS) { |
michael@0 | 472 | PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); |
michael@0 | 473 | return PR_FAILURE; |
michael@0 | 474 | } |
michael@0 | 475 | cvar->lock = lock; |
michael@0 | 476 | PR_INIT_CLIST(&cvar->condQ); |
michael@0 | 477 | return PR_SUCCESS; |
michael@0 | 478 | } |
michael@0 | 479 | |
michael@0 | 480 | /* |
michael@0 | 481 | ** Destroy a condition variable. There must be no thread |
michael@0 | 482 | ** waiting on the condvar. The caller is responsible for guaranteeing |
michael@0 | 483 | ** that the condvar is no longer in use. |
michael@0 | 484 | ** |
michael@0 | 485 | */ |
michael@0 | 486 | PR_IMPLEMENT(void) PR_DestroyCondVar(PRCondVar *cvar) |
michael@0 | 487 | { |
michael@0 | 488 | _PR_FreeCondVar(cvar); |
michael@0 | 489 | PR_DELETE(cvar); |
michael@0 | 490 | } |
michael@0 | 491 | |
michael@0 | 492 | void _PR_FreeCondVar(PRCondVar *cvar) |
michael@0 | 493 | { |
michael@0 | 494 | PR_ASSERT(cvar->condQ.next == &cvar->condQ); |
michael@0 | 495 | |
michael@0 | 496 | #ifdef _PR_GLOBAL_THREADS_ONLY |
michael@0 | 497 | _PR_MD_FREE_CV(&cvar->md); |
michael@0 | 498 | #endif |
michael@0 | 499 | _PR_MD_FREE_LOCK(&(cvar->ilock)); |
michael@0 | 500 | } |
michael@0 | 501 | |
michael@0 | 502 | /* |
michael@0 | 503 | ** Wait for a notify on the condition variable. Sleep for "tiemout" amount |
michael@0 | 504 | ** of ticks (if "timeout" is zero then the sleep is indefinite). While |
michael@0 | 505 | ** the thread is waiting it unlocks lock. When the wait has |
michael@0 | 506 | ** finished the thread regains control of the condition variable after |
michael@0 | 507 | ** locking the associated lock. |
michael@0 | 508 | ** |
michael@0 | 509 | ** The thread waiting on the condvar will be resumed when the condvar is |
michael@0 | 510 | ** notified (assuming the thread is the next in line to receive the |
michael@0 | 511 | ** notify) or when the timeout elapses. |
michael@0 | 512 | ** |
michael@0 | 513 | ** Returns PR_FAILURE if the caller has not locked the lock associated |
michael@0 | 514 | ** with the condition variable or the thread has been interrupted. |
michael@0 | 515 | */ |
michael@0 | 516 | extern PRThread *suspendAllThread; |
michael@0 | 517 | PR_IMPLEMENT(PRStatus) PR_WaitCondVar(PRCondVar *cvar, PRIntervalTime timeout) |
michael@0 | 518 | { |
michael@0 | 519 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 520 | |
michael@0 | 521 | PR_ASSERT(cvar->lock->owner == me); |
michael@0 | 522 | PR_ASSERT(me != suspendAllThread); |
michael@0 | 523 | if (cvar->lock->owner != me) return PR_FAILURE; |
michael@0 | 524 | |
michael@0 | 525 | return _PR_WaitCondVar(me, cvar, cvar->lock, timeout); |
michael@0 | 526 | } |
michael@0 | 527 | |
michael@0 | 528 | /* |
michael@0 | 529 | ** Notify the highest priority thread waiting on the condition |
michael@0 | 530 | ** variable. If a thread is waiting on the condition variable (using |
michael@0 | 531 | ** PR_Wait) then it is awakened and begins waiting on the lock. |
michael@0 | 532 | */ |
michael@0 | 533 | PR_IMPLEMENT(PRStatus) PR_NotifyCondVar(PRCondVar *cvar) |
michael@0 | 534 | { |
michael@0 | 535 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 536 | |
michael@0 | 537 | PR_ASSERT(cvar->lock->owner == me); |
michael@0 | 538 | PR_ASSERT(me != suspendAllThread); |
michael@0 | 539 | if (cvar->lock->owner != me) return PR_FAILURE; |
michael@0 | 540 | |
michael@0 | 541 | _PR_NotifyCondVar(cvar, me); |
michael@0 | 542 | return PR_SUCCESS; |
michael@0 | 543 | } |
michael@0 | 544 | |
michael@0 | 545 | /* |
michael@0 | 546 | ** Notify all of the threads waiting on the condition variable. All of |
michael@0 | 547 | ** threads are notified in turn. The highest priority thread will |
michael@0 | 548 | ** probably acquire the lock. |
michael@0 | 549 | */ |
michael@0 | 550 | PR_IMPLEMENT(PRStatus) PR_NotifyAllCondVar(PRCondVar *cvar) |
michael@0 | 551 | { |
michael@0 | 552 | PRCList *q; |
michael@0 | 553 | PRIntn is; |
michael@0 | 554 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 555 | |
michael@0 | 556 | PR_ASSERT(cvar->lock->owner == me); |
michael@0 | 557 | if (cvar->lock->owner != me) return PR_FAILURE; |
michael@0 | 558 | |
michael@0 | 559 | #ifdef _PR_GLOBAL_THREADS_ONLY |
michael@0 | 560 | _PR_MD_NOTIFYALL_CV(&cvar->md, &cvar->lock->ilock); |
michael@0 | 561 | return PR_SUCCESS; |
michael@0 | 562 | #else /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 563 | if ( !_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 564 | _PR_INTSOFF(is); |
michael@0 | 565 | _PR_CVAR_LOCK(cvar); |
michael@0 | 566 | q = cvar->condQ.next; |
michael@0 | 567 | while (q != &cvar->condQ) { |
michael@0 | 568 | PR_LOG(_pr_cvar_lm, PR_LOG_MIN, ("PR_NotifyAll: cvar=%p", cvar)); |
michael@0 | 569 | _PR_NotifyThread(_PR_THREAD_CONDQ_PTR(q), me); |
michael@0 | 570 | q = q->next; |
michael@0 | 571 | } |
michael@0 | 572 | _PR_CVAR_UNLOCK(cvar); |
michael@0 | 573 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 574 | _PR_INTSON(is); |
michael@0 | 575 | |
michael@0 | 576 | return PR_SUCCESS; |
michael@0 | 577 | #endif /* _PR_GLOBAL_THREADS_ONLY */ |
michael@0 | 578 | } |
michael@0 | 579 | |
michael@0 | 580 | |
michael@0 | 581 | /*********************************************************************/ |
michael@0 | 582 | /*********************************************************************/ |
michael@0 | 583 | /********************ROUTINES FOR DCE EMULATION***********************/ |
michael@0 | 584 | /*********************************************************************/ |
michael@0 | 585 | /*********************************************************************/ |
michael@0 | 586 | #include "prpdce.h" |
michael@0 | 587 | |
michael@0 | 588 | PR_IMPLEMENT(PRCondVar*) PRP_NewNakedCondVar(void) |
michael@0 | 589 | { |
michael@0 | 590 | PRCondVar *cvar = PR_NEWZAP(PRCondVar); |
michael@0 | 591 | if (NULL != cvar) |
michael@0 | 592 | { |
michael@0 | 593 | if (_PR_MD_NEW_LOCK(&(cvar->ilock)) == PR_FAILURE) |
michael@0 | 594 | { |
michael@0 | 595 | PR_DELETE(cvar); cvar = NULL; |
michael@0 | 596 | } |
michael@0 | 597 | else |
michael@0 | 598 | { |
michael@0 | 599 | PR_INIT_CLIST(&cvar->condQ); |
michael@0 | 600 | cvar->lock = _PR_NAKED_CV_LOCK; |
michael@0 | 601 | } |
michael@0 | 602 | |
michael@0 | 603 | } |
michael@0 | 604 | return cvar; |
michael@0 | 605 | } |
michael@0 | 606 | |
michael@0 | 607 | PR_IMPLEMENT(void) PRP_DestroyNakedCondVar(PRCondVar *cvar) |
michael@0 | 608 | { |
michael@0 | 609 | PR_ASSERT(cvar->condQ.next == &cvar->condQ); |
michael@0 | 610 | PR_ASSERT(_PR_NAKED_CV_LOCK == cvar->lock); |
michael@0 | 611 | |
michael@0 | 612 | _PR_MD_FREE_LOCK(&(cvar->ilock)); |
michael@0 | 613 | |
michael@0 | 614 | PR_DELETE(cvar); |
michael@0 | 615 | } |
michael@0 | 616 | |
michael@0 | 617 | PR_IMPLEMENT(PRStatus) PRP_NakedWait( |
michael@0 | 618 | PRCondVar *cvar, PRLock *lock, PRIntervalTime timeout) |
michael@0 | 619 | { |
michael@0 | 620 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 621 | PR_ASSERT(_PR_NAKED_CV_LOCK == cvar->lock); |
michael@0 | 622 | return _PR_WaitCondVar(me, cvar, lock, timeout); |
michael@0 | 623 | } /* PRP_NakedWait */ |
michael@0 | 624 | |
michael@0 | 625 | PR_IMPLEMENT(PRStatus) PRP_NakedNotify(PRCondVar *cvar) |
michael@0 | 626 | { |
michael@0 | 627 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 628 | PR_ASSERT(_PR_NAKED_CV_LOCK == cvar->lock); |
michael@0 | 629 | |
michael@0 | 630 | _PR_NotifyCondVar(cvar, me); |
michael@0 | 631 | |
michael@0 | 632 | return PR_SUCCESS; |
michael@0 | 633 | } /* PRP_NakedNotify */ |
michael@0 | 634 | |
michael@0 | 635 | PR_IMPLEMENT(PRStatus) PRP_NakedBroadcast(PRCondVar *cvar) |
michael@0 | 636 | { |
michael@0 | 637 | PRCList *q; |
michael@0 | 638 | PRIntn is; |
michael@0 | 639 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 640 | PR_ASSERT(_PR_NAKED_CV_LOCK == cvar->lock); |
michael@0 | 641 | |
michael@0 | 642 | if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is); |
michael@0 | 643 | _PR_MD_LOCK( &(cvar->ilock) ); |
michael@0 | 644 | q = cvar->condQ.next; |
michael@0 | 645 | while (q != &cvar->condQ) { |
michael@0 | 646 | PR_LOG(_pr_cvar_lm, PR_LOG_MIN, ("PR_NotifyAll: cvar=%p", cvar)); |
michael@0 | 647 | _PR_NotifyThread(_PR_THREAD_CONDQ_PTR(q), me); |
michael@0 | 648 | q = q->next; |
michael@0 | 649 | } |
michael@0 | 650 | _PR_MD_UNLOCK( &(cvar->ilock) ); |
michael@0 | 651 | if (!_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is); |
michael@0 | 652 | |
michael@0 | 653 | return PR_SUCCESS; |
michael@0 | 654 | } /* PRP_NakedBroadcast */ |
michael@0 | 655 |