Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | #include "primpl.h" |
michael@0 | 7 | #include <signal.h> |
michael@0 | 8 | #include <string.h> |
michael@0 | 9 | |
michael@0 | 10 | #if defined(WIN95) |
michael@0 | 11 | /* |
michael@0 | 12 | ** Some local variables report warnings on Win95 because the code paths |
michael@0 | 13 | ** using them are conditioned on HAVE_CUSTOME_USER_THREADS. |
michael@0 | 14 | ** The pragma suppresses the warning. |
michael@0 | 15 | ** |
michael@0 | 16 | */ |
michael@0 | 17 | #pragma warning(disable : 4101) |
michael@0 | 18 | #endif |
michael@0 | 19 | |
michael@0 | 20 | /* _pr_activeLock protects the following global variables */ |
michael@0 | 21 | PRLock *_pr_activeLock; |
michael@0 | 22 | PRInt32 _pr_primordialExitCount; /* In PR_Cleanup(), the primordial thread |
michael@0 | 23 | * waits until all other user (non-system) |
michael@0 | 24 | * threads have terminated before it exits. |
michael@0 | 25 | * So whenever we decrement _pr_userActive, |
michael@0 | 26 | * it is compared with |
michael@0 | 27 | * _pr_primordialExitCount. |
michael@0 | 28 | * If the primordial thread is a system |
michael@0 | 29 | * thread, then _pr_primordialExitCount |
michael@0 | 30 | * is 0. If the primordial thread is |
michael@0 | 31 | * itself a user thread, then |
michael@0 | 32 | * _pr_primordialThread is 1. |
michael@0 | 33 | */ |
michael@0 | 34 | PRCondVar *_pr_primordialExitCVar; /* When _pr_userActive is decremented to |
michael@0 | 35 | * _pr_primordialExitCount, this condition |
michael@0 | 36 | * variable is notified. |
michael@0 | 37 | */ |
michael@0 | 38 | |
michael@0 | 39 | PRLock *_pr_deadQLock; |
michael@0 | 40 | PRUint32 _pr_numNativeDead; |
michael@0 | 41 | PRUint32 _pr_numUserDead; |
michael@0 | 42 | PRCList _pr_deadNativeQ; |
michael@0 | 43 | PRCList _pr_deadUserQ; |
michael@0 | 44 | |
michael@0 | 45 | PRUint32 _pr_join_counter; |
michael@0 | 46 | |
michael@0 | 47 | PRUint32 _pr_local_threads; |
michael@0 | 48 | PRUint32 _pr_global_threads; |
michael@0 | 49 | |
michael@0 | 50 | PRBool suspendAllOn = PR_FALSE; |
michael@0 | 51 | PRThread *suspendAllThread = NULL; |
michael@0 | 52 | |
michael@0 | 53 | extern PRCList _pr_active_global_threadQ; |
michael@0 | 54 | extern PRCList _pr_active_local_threadQ; |
michael@0 | 55 | |
michael@0 | 56 | static void _PR_DecrActiveThreadCount(PRThread *thread); |
michael@0 | 57 | static PRThread *_PR_AttachThread(PRThreadType, PRThreadPriority, PRThreadStack *); |
michael@0 | 58 | static void _PR_InitializeNativeStack(PRThreadStack *ts); |
michael@0 | 59 | static void _PR_InitializeRecycledThread(PRThread *thread); |
michael@0 | 60 | static void _PR_UserRunThread(void); |
michael@0 | 61 | |
michael@0 | 62 | void _PR_InitThreads(PRThreadType type, PRThreadPriority priority, |
michael@0 | 63 | PRUintn maxPTDs) |
michael@0 | 64 | { |
michael@0 | 65 | PRThread *thread; |
michael@0 | 66 | PRThreadStack *stack; |
michael@0 | 67 | |
michael@0 | 68 | PR_ASSERT(priority == PR_PRIORITY_NORMAL); |
michael@0 | 69 | |
michael@0 | 70 | _pr_terminationCVLock = PR_NewLock(); |
michael@0 | 71 | _pr_activeLock = PR_NewLock(); |
michael@0 | 72 | |
michael@0 | 73 | #ifndef HAVE_CUSTOM_USER_THREADS |
michael@0 | 74 | stack = PR_NEWZAP(PRThreadStack); |
michael@0 | 75 | #ifdef HAVE_STACK_GROWING_UP |
michael@0 | 76 | stack->stackTop = (char*) ((((long)&type) >> _pr_pageShift) |
michael@0 | 77 | << _pr_pageShift); |
michael@0 | 78 | #else |
michael@0 | 79 | #if defined(SOLARIS) || defined (UNIXWARE) && defined (USR_SVR4_THREADS) |
michael@0 | 80 | stack->stackTop = (char*) &thread; |
michael@0 | 81 | #else |
michael@0 | 82 | stack->stackTop = (char*) ((((long)&type + _pr_pageSize - 1) |
michael@0 | 83 | >> _pr_pageShift) << _pr_pageShift); |
michael@0 | 84 | #endif |
michael@0 | 85 | #endif |
michael@0 | 86 | #else |
michael@0 | 87 | /* If stack is NULL, we're using custom user threads like NT fibers. */ |
michael@0 | 88 | stack = PR_NEWZAP(PRThreadStack); |
michael@0 | 89 | if (stack) { |
michael@0 | 90 | stack->stackSize = 0; |
michael@0 | 91 | _PR_InitializeNativeStack(stack); |
michael@0 | 92 | } |
michael@0 | 93 | #endif /* HAVE_CUSTOM_USER_THREADS */ |
michael@0 | 94 | |
michael@0 | 95 | thread = _PR_AttachThread(type, priority, stack); |
michael@0 | 96 | if (thread) { |
michael@0 | 97 | _PR_MD_SET_CURRENT_THREAD(thread); |
michael@0 | 98 | |
michael@0 | 99 | if (type == PR_SYSTEM_THREAD) { |
michael@0 | 100 | thread->flags = _PR_SYSTEM; |
michael@0 | 101 | _pr_systemActive++; |
michael@0 | 102 | _pr_primordialExitCount = 0; |
michael@0 | 103 | } else { |
michael@0 | 104 | _pr_userActive++; |
michael@0 | 105 | _pr_primordialExitCount = 1; |
michael@0 | 106 | } |
michael@0 | 107 | thread->no_sched = 1; |
michael@0 | 108 | _pr_primordialExitCVar = PR_NewCondVar(_pr_activeLock); |
michael@0 | 109 | } |
michael@0 | 110 | |
michael@0 | 111 | if (!thread) PR_Abort(); |
michael@0 | 112 | #ifdef _PR_LOCAL_THREADS_ONLY |
michael@0 | 113 | thread->flags |= _PR_PRIMORDIAL; |
michael@0 | 114 | #else |
michael@0 | 115 | thread->flags |= _PR_PRIMORDIAL | _PR_GLOBAL_SCOPE; |
michael@0 | 116 | #endif |
michael@0 | 117 | |
michael@0 | 118 | /* |
michael@0 | 119 | * Needs _PR_PRIMORDIAL flag set before calling |
michael@0 | 120 | * _PR_MD_INIT_THREAD() |
michael@0 | 121 | */ |
michael@0 | 122 | if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) { |
michael@0 | 123 | /* |
michael@0 | 124 | * XXX do what? |
michael@0 | 125 | */ |
michael@0 | 126 | } |
michael@0 | 127 | |
michael@0 | 128 | if (_PR_IS_NATIVE_THREAD(thread)) { |
michael@0 | 129 | PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ()); |
michael@0 | 130 | _pr_global_threads++; |
michael@0 | 131 | } else { |
michael@0 | 132 | PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ()); |
michael@0 | 133 | _pr_local_threads++; |
michael@0 | 134 | } |
michael@0 | 135 | |
michael@0 | 136 | _pr_recycleThreads = 0; |
michael@0 | 137 | _pr_deadQLock = PR_NewLock(); |
michael@0 | 138 | _pr_numNativeDead = 0; |
michael@0 | 139 | _pr_numUserDead = 0; |
michael@0 | 140 | PR_INIT_CLIST(&_pr_deadNativeQ); |
michael@0 | 141 | PR_INIT_CLIST(&_pr_deadUserQ); |
michael@0 | 142 | } |
michael@0 | 143 | |
michael@0 | 144 | void _PR_CleanupThreads(void) |
michael@0 | 145 | { |
michael@0 | 146 | if (_pr_terminationCVLock) { |
michael@0 | 147 | PR_DestroyLock(_pr_terminationCVLock); |
michael@0 | 148 | _pr_terminationCVLock = NULL; |
michael@0 | 149 | } |
michael@0 | 150 | if (_pr_activeLock) { |
michael@0 | 151 | PR_DestroyLock(_pr_activeLock); |
michael@0 | 152 | _pr_activeLock = NULL; |
michael@0 | 153 | } |
michael@0 | 154 | if (_pr_primordialExitCVar) { |
michael@0 | 155 | PR_DestroyCondVar(_pr_primordialExitCVar); |
michael@0 | 156 | _pr_primordialExitCVar = NULL; |
michael@0 | 157 | } |
michael@0 | 158 | /* TODO _pr_dead{Native,User}Q need to be deleted */ |
michael@0 | 159 | if (_pr_deadQLock) { |
michael@0 | 160 | PR_DestroyLock(_pr_deadQLock); |
michael@0 | 161 | _pr_deadQLock = NULL; |
michael@0 | 162 | } |
michael@0 | 163 | } |
michael@0 | 164 | |
michael@0 | 165 | /* |
michael@0 | 166 | ** Initialize a stack for a native thread |
michael@0 | 167 | */ |
michael@0 | 168 | static void _PR_InitializeNativeStack(PRThreadStack *ts) |
michael@0 | 169 | { |
michael@0 | 170 | if( ts && (ts->stackTop == 0) ) { |
michael@0 | 171 | ts->allocSize = ts->stackSize; |
michael@0 | 172 | |
michael@0 | 173 | /* |
michael@0 | 174 | ** Setup stackTop and stackBottom values. |
michael@0 | 175 | */ |
michael@0 | 176 | #ifdef HAVE_STACK_GROWING_UP |
michael@0 | 177 | ts->allocBase = (char*) ((((long)&ts) >> _pr_pageShift) |
michael@0 | 178 | << _pr_pageShift); |
michael@0 | 179 | ts->stackBottom = ts->allocBase + ts->stackSize; |
michael@0 | 180 | ts->stackTop = ts->allocBase; |
michael@0 | 181 | #else |
michael@0 | 182 | ts->allocBase = (char*) ((((long)&ts + _pr_pageSize - 1) |
michael@0 | 183 | >> _pr_pageShift) << _pr_pageShift); |
michael@0 | 184 | ts->stackTop = ts->allocBase; |
michael@0 | 185 | ts->stackBottom = ts->allocBase - ts->stackSize; |
michael@0 | 186 | #endif |
michael@0 | 187 | } |
michael@0 | 188 | } |
michael@0 | 189 | |
michael@0 | 190 | void _PR_NotifyJoinWaiters(PRThread *thread) |
michael@0 | 191 | { |
michael@0 | 192 | /* |
michael@0 | 193 | ** Handle joinable threads. Change the state to waiting for join. |
michael@0 | 194 | ** Remove from our run Q and put it on global waiting to join Q. |
michael@0 | 195 | ** Notify on our "termination" condition variable so that joining |
michael@0 | 196 | ** thread will know about our termination. Switch our context and |
michael@0 | 197 | ** come back later on to continue the cleanup. |
michael@0 | 198 | */ |
michael@0 | 199 | PR_ASSERT(thread == _PR_MD_CURRENT_THREAD()); |
michael@0 | 200 | if (thread->term != NULL) { |
michael@0 | 201 | PR_Lock(_pr_terminationCVLock); |
michael@0 | 202 | _PR_THREAD_LOCK(thread); |
michael@0 | 203 | thread->state = _PR_JOIN_WAIT; |
michael@0 | 204 | if ( !_PR_IS_NATIVE_THREAD(thread) ) { |
michael@0 | 205 | _PR_MISCQ_LOCK(thread->cpu); |
michael@0 | 206 | _PR_ADD_JOINQ(thread, thread->cpu); |
michael@0 | 207 | _PR_MISCQ_UNLOCK(thread->cpu); |
michael@0 | 208 | } |
michael@0 | 209 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 210 | PR_NotifyCondVar(thread->term); |
michael@0 | 211 | PR_Unlock(_pr_terminationCVLock); |
michael@0 | 212 | _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT); |
michael@0 | 213 | PR_ASSERT(thread->state != _PR_JOIN_WAIT); |
michael@0 | 214 | } |
michael@0 | 215 | |
michael@0 | 216 | } |
michael@0 | 217 | |
michael@0 | 218 | /* |
michael@0 | 219 | * Zero some of the data members of a recycled thread. |
michael@0 | 220 | * |
michael@0 | 221 | * Note that we can do this either when a dead thread is added to |
michael@0 | 222 | * the dead thread queue or when it is reused. Here, we are doing |
michael@0 | 223 | * this lazily, when the thread is reused in _PR_CreateThread(). |
michael@0 | 224 | */ |
michael@0 | 225 | static void _PR_InitializeRecycledThread(PRThread *thread) |
michael@0 | 226 | { |
michael@0 | 227 | /* |
michael@0 | 228 | * Assert that the following data members are already zeroed |
michael@0 | 229 | * by _PR_CleanupThread(). |
michael@0 | 230 | */ |
michael@0 | 231 | #ifdef DEBUG |
michael@0 | 232 | if (thread->privateData) { |
michael@0 | 233 | unsigned int i; |
michael@0 | 234 | for (i = 0; i < thread->tpdLength; i++) { |
michael@0 | 235 | PR_ASSERT(thread->privateData[i] == NULL); |
michael@0 | 236 | } |
michael@0 | 237 | } |
michael@0 | 238 | #endif |
michael@0 | 239 | PR_ASSERT(thread->dumpArg == 0 && thread->dump == 0); |
michael@0 | 240 | PR_ASSERT(thread->errorString == 0 && thread->errorStringSize == 0); |
michael@0 | 241 | PR_ASSERT(thread->errorStringLength == 0); |
michael@0 | 242 | PR_ASSERT(thread->name == 0); |
michael@0 | 243 | |
michael@0 | 244 | /* Reset data members in thread structure */ |
michael@0 | 245 | thread->errorCode = thread->osErrorCode = 0; |
michael@0 | 246 | thread->io_pending = thread->io_suspended = PR_FALSE; |
michael@0 | 247 | thread->environment = 0; |
michael@0 | 248 | PR_INIT_CLIST(&thread->lockList); |
michael@0 | 249 | } |
michael@0 | 250 | |
michael@0 | 251 | PRStatus _PR_RecycleThread(PRThread *thread) |
michael@0 | 252 | { |
michael@0 | 253 | if ( _PR_IS_NATIVE_THREAD(thread) && |
michael@0 | 254 | _PR_NUM_DEADNATIVE < _pr_recycleThreads) { |
michael@0 | 255 | _PR_DEADQ_LOCK; |
michael@0 | 256 | PR_APPEND_LINK(&thread->links, &_PR_DEADNATIVEQ); |
michael@0 | 257 | _PR_INC_DEADNATIVE; |
michael@0 | 258 | _PR_DEADQ_UNLOCK; |
michael@0 | 259 | return (PR_SUCCESS); |
michael@0 | 260 | } else if ( !_PR_IS_NATIVE_THREAD(thread) && |
michael@0 | 261 | _PR_NUM_DEADUSER < _pr_recycleThreads) { |
michael@0 | 262 | _PR_DEADQ_LOCK; |
michael@0 | 263 | PR_APPEND_LINK(&thread->links, &_PR_DEADUSERQ); |
michael@0 | 264 | _PR_INC_DEADUSER; |
michael@0 | 265 | _PR_DEADQ_UNLOCK; |
michael@0 | 266 | return (PR_SUCCESS); |
michael@0 | 267 | } |
michael@0 | 268 | return (PR_FAILURE); |
michael@0 | 269 | } |
michael@0 | 270 | |
michael@0 | 271 | /* |
michael@0 | 272 | * Decrement the active thread count, either _pr_systemActive or |
michael@0 | 273 | * _pr_userActive, depending on whether the thread is a system thread |
michael@0 | 274 | * or a user thread. If all the user threads, except possibly |
michael@0 | 275 | * the primordial thread, have terminated, we notify the primordial |
michael@0 | 276 | * thread of this condition. |
michael@0 | 277 | * |
michael@0 | 278 | * Since this function will lock _pr_activeLock, do not call this |
michael@0 | 279 | * function while holding the _pr_activeLock lock, as this will result |
michael@0 | 280 | * in a deadlock. |
michael@0 | 281 | */ |
michael@0 | 282 | |
michael@0 | 283 | static void |
michael@0 | 284 | _PR_DecrActiveThreadCount(PRThread *thread) |
michael@0 | 285 | { |
michael@0 | 286 | PR_Lock(_pr_activeLock); |
michael@0 | 287 | if (thread->flags & _PR_SYSTEM) { |
michael@0 | 288 | _pr_systemActive--; |
michael@0 | 289 | } else { |
michael@0 | 290 | _pr_userActive--; |
michael@0 | 291 | if (_pr_userActive == _pr_primordialExitCount) { |
michael@0 | 292 | PR_NotifyCondVar(_pr_primordialExitCVar); |
michael@0 | 293 | } |
michael@0 | 294 | } |
michael@0 | 295 | PR_Unlock(_pr_activeLock); |
michael@0 | 296 | } |
michael@0 | 297 | |
michael@0 | 298 | /* |
michael@0 | 299 | ** Detach thread structure |
michael@0 | 300 | */ |
michael@0 | 301 | static void |
michael@0 | 302 | _PR_DestroyThread(PRThread *thread) |
michael@0 | 303 | { |
michael@0 | 304 | _PR_MD_FREE_LOCK(&thread->threadLock); |
michael@0 | 305 | PR_DELETE(thread); |
michael@0 | 306 | } |
michael@0 | 307 | |
michael@0 | 308 | void |
michael@0 | 309 | _PR_NativeDestroyThread(PRThread *thread) |
michael@0 | 310 | { |
michael@0 | 311 | if(thread->term) { |
michael@0 | 312 | PR_DestroyCondVar(thread->term); |
michael@0 | 313 | thread->term = 0; |
michael@0 | 314 | } |
michael@0 | 315 | if (NULL != thread->privateData) { |
michael@0 | 316 | PR_ASSERT(0 != thread->tpdLength); |
michael@0 | 317 | PR_DELETE(thread->privateData); |
michael@0 | 318 | thread->tpdLength = 0; |
michael@0 | 319 | } |
michael@0 | 320 | PR_DELETE(thread->stack); |
michael@0 | 321 | _PR_DestroyThread(thread); |
michael@0 | 322 | } |
michael@0 | 323 | |
michael@0 | 324 | void |
michael@0 | 325 | _PR_UserDestroyThread(PRThread *thread) |
michael@0 | 326 | { |
michael@0 | 327 | if(thread->term) { |
michael@0 | 328 | PR_DestroyCondVar(thread->term); |
michael@0 | 329 | thread->term = 0; |
michael@0 | 330 | } |
michael@0 | 331 | if (NULL != thread->privateData) { |
michael@0 | 332 | PR_ASSERT(0 != thread->tpdLength); |
michael@0 | 333 | PR_DELETE(thread->privateData); |
michael@0 | 334 | thread->tpdLength = 0; |
michael@0 | 335 | } |
michael@0 | 336 | _PR_MD_FREE_LOCK(&thread->threadLock); |
michael@0 | 337 | if (thread->threadAllocatedOnStack == 1) { |
michael@0 | 338 | _PR_MD_CLEAN_THREAD(thread); |
michael@0 | 339 | /* |
michael@0 | 340 | * Because the no_sched field is set, this thread/stack will |
michael@0 | 341 | * will not be re-used until the flag is cleared by the thread |
michael@0 | 342 | * we will context switch to. |
michael@0 | 343 | */ |
michael@0 | 344 | _PR_FreeStack(thread->stack); |
michael@0 | 345 | } else { |
michael@0 | 346 | #ifdef WINNT |
michael@0 | 347 | _PR_MD_CLEAN_THREAD(thread); |
michael@0 | 348 | #else |
michael@0 | 349 | /* |
michael@0 | 350 | * This assertion does not apply to NT. On NT, every fiber |
michael@0 | 351 | * has its threadAllocatedOnStack equal to 0. Elsewhere, |
michael@0 | 352 | * only the primordial thread has its threadAllocatedOnStack |
michael@0 | 353 | * equal to 0. |
michael@0 | 354 | */ |
michael@0 | 355 | PR_ASSERT(thread->flags & _PR_PRIMORDIAL); |
michael@0 | 356 | #endif |
michael@0 | 357 | } |
michael@0 | 358 | } |
michael@0 | 359 | |
michael@0 | 360 | |
michael@0 | 361 | /* |
michael@0 | 362 | ** Run a thread's start function. When the start function returns the |
michael@0 | 363 | ** thread is done executing and no longer needs the CPU. If there are no |
michael@0 | 364 | ** more user threads running then we can exit the program. |
michael@0 | 365 | */ |
michael@0 | 366 | void _PR_NativeRunThread(void *arg) |
michael@0 | 367 | { |
michael@0 | 368 | PRThread *thread = (PRThread *)arg; |
michael@0 | 369 | |
michael@0 | 370 | _PR_MD_SET_CURRENT_THREAD(thread); |
michael@0 | 371 | |
michael@0 | 372 | _PR_MD_SET_CURRENT_CPU(NULL); |
michael@0 | 373 | |
michael@0 | 374 | /* Set up the thread stack information */ |
michael@0 | 375 | _PR_InitializeNativeStack(thread->stack); |
michael@0 | 376 | |
michael@0 | 377 | /* Set up the thread md information */ |
michael@0 | 378 | if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) { |
michael@0 | 379 | /* |
michael@0 | 380 | * thread failed to initialize itself, possibly due to |
michael@0 | 381 | * failure to allocate per-thread resources |
michael@0 | 382 | */ |
michael@0 | 383 | return; |
michael@0 | 384 | } |
michael@0 | 385 | |
michael@0 | 386 | while(1) { |
michael@0 | 387 | thread->state = _PR_RUNNING; |
michael@0 | 388 | |
michael@0 | 389 | /* |
michael@0 | 390 | * Add to list of active threads |
michael@0 | 391 | */ |
michael@0 | 392 | PR_Lock(_pr_activeLock); |
michael@0 | 393 | PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ()); |
michael@0 | 394 | _pr_global_threads++; |
michael@0 | 395 | PR_Unlock(_pr_activeLock); |
michael@0 | 396 | |
michael@0 | 397 | (*thread->startFunc)(thread->arg); |
michael@0 | 398 | |
michael@0 | 399 | /* |
michael@0 | 400 | * The following two assertions are meant for NT asynch io. |
michael@0 | 401 | * |
michael@0 | 402 | * The thread should have no asynch io in progress when it |
michael@0 | 403 | * exits, otherwise the overlapped buffer, which is part of |
michael@0 | 404 | * the thread structure, would become invalid. |
michael@0 | 405 | */ |
michael@0 | 406 | PR_ASSERT(thread->io_pending == PR_FALSE); |
michael@0 | 407 | /* |
michael@0 | 408 | * This assertion enforces the programming guideline that |
michael@0 | 409 | * if an io function times out or is interrupted, the thread |
michael@0 | 410 | * should close the fd to force the asynch io to abort |
michael@0 | 411 | * before it exits. Right now, closing the fd is the only |
michael@0 | 412 | * way to clear the io_suspended flag. |
michael@0 | 413 | */ |
michael@0 | 414 | PR_ASSERT(thread->io_suspended == PR_FALSE); |
michael@0 | 415 | |
michael@0 | 416 | /* |
michael@0 | 417 | * remove thread from list of active threads |
michael@0 | 418 | */ |
michael@0 | 419 | PR_Lock(_pr_activeLock); |
michael@0 | 420 | PR_REMOVE_LINK(&thread->active); |
michael@0 | 421 | _pr_global_threads--; |
michael@0 | 422 | PR_Unlock(_pr_activeLock); |
michael@0 | 423 | |
michael@0 | 424 | PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting")); |
michael@0 | 425 | |
michael@0 | 426 | /* All done, time to go away */ |
michael@0 | 427 | _PR_CleanupThread(thread); |
michael@0 | 428 | |
michael@0 | 429 | _PR_NotifyJoinWaiters(thread); |
michael@0 | 430 | |
michael@0 | 431 | _PR_DecrActiveThreadCount(thread); |
michael@0 | 432 | |
michael@0 | 433 | thread->state = _PR_DEAD_STATE; |
michael@0 | 434 | |
michael@0 | 435 | if (!_pr_recycleThreads || (_PR_RecycleThread(thread) == |
michael@0 | 436 | PR_FAILURE)) { |
michael@0 | 437 | /* |
michael@0 | 438 | * thread not recycled |
michael@0 | 439 | * platform-specific thread exit processing |
michael@0 | 440 | * - for stuff like releasing native-thread resources, etc. |
michael@0 | 441 | */ |
michael@0 | 442 | _PR_MD_EXIT_THREAD(thread); |
michael@0 | 443 | /* |
michael@0 | 444 | * Free memory allocated for the thread |
michael@0 | 445 | */ |
michael@0 | 446 | _PR_NativeDestroyThread(thread); |
michael@0 | 447 | /* |
michael@0 | 448 | * thread gone, cannot de-reference thread now |
michael@0 | 449 | */ |
michael@0 | 450 | return; |
michael@0 | 451 | } |
michael@0 | 452 | |
michael@0 | 453 | /* Now wait for someone to activate us again... */ |
michael@0 | 454 | _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT); |
michael@0 | 455 | } |
michael@0 | 456 | } |
michael@0 | 457 | |
michael@0 | 458 | static void _PR_UserRunThread(void) |
michael@0 | 459 | { |
michael@0 | 460 | PRThread *thread = _PR_MD_CURRENT_THREAD(); |
michael@0 | 461 | PRIntn is; |
michael@0 | 462 | |
michael@0 | 463 | if (_MD_LAST_THREAD()) |
michael@0 | 464 | _MD_LAST_THREAD()->no_sched = 0; |
michael@0 | 465 | |
michael@0 | 466 | #ifdef HAVE_CUSTOM_USER_THREADS |
michael@0 | 467 | if (thread->stack == NULL) { |
michael@0 | 468 | thread->stack = PR_NEWZAP(PRThreadStack); |
michael@0 | 469 | _PR_InitializeNativeStack(thread->stack); |
michael@0 | 470 | } |
michael@0 | 471 | #endif /* HAVE_CUSTOM_USER_THREADS */ |
michael@0 | 472 | |
michael@0 | 473 | while(1) { |
michael@0 | 474 | /* Run thread main */ |
michael@0 | 475 | if ( !_PR_IS_NATIVE_THREAD(thread)) _PR_MD_SET_INTSOFF(0); |
michael@0 | 476 | |
michael@0 | 477 | /* |
michael@0 | 478 | * Add to list of active threads |
michael@0 | 479 | */ |
michael@0 | 480 | if (!(thread->flags & _PR_IDLE_THREAD)) { |
michael@0 | 481 | PR_Lock(_pr_activeLock); |
michael@0 | 482 | PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ()); |
michael@0 | 483 | _pr_local_threads++; |
michael@0 | 484 | PR_Unlock(_pr_activeLock); |
michael@0 | 485 | } |
michael@0 | 486 | |
michael@0 | 487 | (*thread->startFunc)(thread->arg); |
michael@0 | 488 | |
michael@0 | 489 | /* |
michael@0 | 490 | * The following two assertions are meant for NT asynch io. |
michael@0 | 491 | * |
michael@0 | 492 | * The thread should have no asynch io in progress when it |
michael@0 | 493 | * exits, otherwise the overlapped buffer, which is part of |
michael@0 | 494 | * the thread structure, would become invalid. |
michael@0 | 495 | */ |
michael@0 | 496 | PR_ASSERT(thread->io_pending == PR_FALSE); |
michael@0 | 497 | /* |
michael@0 | 498 | * This assertion enforces the programming guideline that |
michael@0 | 499 | * if an io function times out or is interrupted, the thread |
michael@0 | 500 | * should close the fd to force the asynch io to abort |
michael@0 | 501 | * before it exits. Right now, closing the fd is the only |
michael@0 | 502 | * way to clear the io_suspended flag. |
michael@0 | 503 | */ |
michael@0 | 504 | PR_ASSERT(thread->io_suspended == PR_FALSE); |
michael@0 | 505 | |
michael@0 | 506 | PR_Lock(_pr_activeLock); |
michael@0 | 507 | /* |
michael@0 | 508 | * remove thread from list of active threads |
michael@0 | 509 | */ |
michael@0 | 510 | if (!(thread->flags & _PR_IDLE_THREAD)) { |
michael@0 | 511 | PR_REMOVE_LINK(&thread->active); |
michael@0 | 512 | _pr_local_threads--; |
michael@0 | 513 | } |
michael@0 | 514 | PR_Unlock(_pr_activeLock); |
michael@0 | 515 | PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting")); |
michael@0 | 516 | |
michael@0 | 517 | /* All done, time to go away */ |
michael@0 | 518 | _PR_CleanupThread(thread); |
michael@0 | 519 | |
michael@0 | 520 | _PR_INTSOFF(is); |
michael@0 | 521 | |
michael@0 | 522 | _PR_NotifyJoinWaiters(thread); |
michael@0 | 523 | |
michael@0 | 524 | _PR_DecrActiveThreadCount(thread); |
michael@0 | 525 | |
michael@0 | 526 | thread->state = _PR_DEAD_STATE; |
michael@0 | 527 | |
michael@0 | 528 | if (!_pr_recycleThreads || (_PR_RecycleThread(thread) == |
michael@0 | 529 | PR_FAILURE)) { |
michael@0 | 530 | /* |
michael@0 | 531 | ** Destroy the thread resources |
michael@0 | 532 | */ |
michael@0 | 533 | _PR_UserDestroyThread(thread); |
michael@0 | 534 | } |
michael@0 | 535 | |
michael@0 | 536 | /* |
michael@0 | 537 | ** Find another user thread to run. This cpu has finished the |
michael@0 | 538 | ** previous threads main and is now ready to run another thread. |
michael@0 | 539 | */ |
michael@0 | 540 | { |
michael@0 | 541 | PRInt32 is; |
michael@0 | 542 | _PR_INTSOFF(is); |
michael@0 | 543 | _PR_MD_SWITCH_CONTEXT(thread); |
michael@0 | 544 | } |
michael@0 | 545 | |
michael@0 | 546 | /* Will land here when we get scheduled again if we are recycling... */ |
michael@0 | 547 | } |
michael@0 | 548 | } |
michael@0 | 549 | |
michael@0 | 550 | void _PR_SetThreadPriority(PRThread *thread, PRThreadPriority newPri) |
michael@0 | 551 | { |
michael@0 | 552 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 553 | PRIntn is; |
michael@0 | 554 | |
michael@0 | 555 | if ( _PR_IS_NATIVE_THREAD(thread) ) { |
michael@0 | 556 | _PR_MD_SET_PRIORITY(&(thread->md), newPri); |
michael@0 | 557 | return; |
michael@0 | 558 | } |
michael@0 | 559 | |
michael@0 | 560 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 561 | _PR_INTSOFF(is); |
michael@0 | 562 | _PR_THREAD_LOCK(thread); |
michael@0 | 563 | if (newPri != thread->priority) { |
michael@0 | 564 | _PRCPU *cpu = thread->cpu; |
michael@0 | 565 | |
michael@0 | 566 | switch (thread->state) { |
michael@0 | 567 | case _PR_RUNNING: |
michael@0 | 568 | /* Change my priority */ |
michael@0 | 569 | |
michael@0 | 570 | _PR_RUNQ_LOCK(cpu); |
michael@0 | 571 | thread->priority = newPri; |
michael@0 | 572 | if (_PR_RUNQREADYMASK(cpu) >> (newPri + 1)) { |
michael@0 | 573 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 574 | _PR_SET_RESCHED_FLAG(); |
michael@0 | 575 | } |
michael@0 | 576 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 577 | break; |
michael@0 | 578 | |
michael@0 | 579 | case _PR_RUNNABLE: |
michael@0 | 580 | |
michael@0 | 581 | _PR_RUNQ_LOCK(cpu); |
michael@0 | 582 | /* Move to different runQ */ |
michael@0 | 583 | _PR_DEL_RUNQ(thread); |
michael@0 | 584 | thread->priority = newPri; |
michael@0 | 585 | PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); |
michael@0 | 586 | _PR_ADD_RUNQ(thread, cpu, newPri); |
michael@0 | 587 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 588 | |
michael@0 | 589 | if (newPri > me->priority) { |
michael@0 | 590 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 591 | _PR_SET_RESCHED_FLAG(); |
michael@0 | 592 | } |
michael@0 | 593 | |
michael@0 | 594 | break; |
michael@0 | 595 | |
michael@0 | 596 | case _PR_LOCK_WAIT: |
michael@0 | 597 | case _PR_COND_WAIT: |
michael@0 | 598 | case _PR_IO_WAIT: |
michael@0 | 599 | case _PR_SUSPENDED: |
michael@0 | 600 | |
michael@0 | 601 | thread->priority = newPri; |
michael@0 | 602 | break; |
michael@0 | 603 | } |
michael@0 | 604 | } |
michael@0 | 605 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 606 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 607 | _PR_INTSON(is); |
michael@0 | 608 | } |
michael@0 | 609 | |
michael@0 | 610 | /* |
michael@0 | 611 | ** Suspend the named thread and copy its gc registers into regBuf |
michael@0 | 612 | */ |
michael@0 | 613 | static void _PR_Suspend(PRThread *thread) |
michael@0 | 614 | { |
michael@0 | 615 | PRIntn is; |
michael@0 | 616 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 617 | |
michael@0 | 618 | PR_ASSERT(thread != me); |
michael@0 | 619 | PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread) || (!thread->cpu)); |
michael@0 | 620 | |
michael@0 | 621 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 622 | _PR_INTSOFF(is); |
michael@0 | 623 | _PR_THREAD_LOCK(thread); |
michael@0 | 624 | switch (thread->state) { |
michael@0 | 625 | case _PR_RUNNABLE: |
michael@0 | 626 | if (!_PR_IS_NATIVE_THREAD(thread)) { |
michael@0 | 627 | _PR_RUNQ_LOCK(thread->cpu); |
michael@0 | 628 | _PR_DEL_RUNQ(thread); |
michael@0 | 629 | _PR_RUNQ_UNLOCK(thread->cpu); |
michael@0 | 630 | |
michael@0 | 631 | _PR_MISCQ_LOCK(thread->cpu); |
michael@0 | 632 | _PR_ADD_SUSPENDQ(thread, thread->cpu); |
michael@0 | 633 | _PR_MISCQ_UNLOCK(thread->cpu); |
michael@0 | 634 | } else { |
michael@0 | 635 | /* |
michael@0 | 636 | * Only LOCAL threads are suspended by _PR_Suspend |
michael@0 | 637 | */ |
michael@0 | 638 | PR_ASSERT(0); |
michael@0 | 639 | } |
michael@0 | 640 | thread->state = _PR_SUSPENDED; |
michael@0 | 641 | break; |
michael@0 | 642 | |
michael@0 | 643 | case _PR_RUNNING: |
michael@0 | 644 | /* |
michael@0 | 645 | * The thread being suspended should be a LOCAL thread with |
michael@0 | 646 | * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state |
michael@0 | 647 | */ |
michael@0 | 648 | PR_ASSERT(0); |
michael@0 | 649 | break; |
michael@0 | 650 | |
michael@0 | 651 | case _PR_LOCK_WAIT: |
michael@0 | 652 | case _PR_IO_WAIT: |
michael@0 | 653 | case _PR_COND_WAIT: |
michael@0 | 654 | if (_PR_IS_NATIVE_THREAD(thread)) { |
michael@0 | 655 | _PR_MD_SUSPEND_THREAD(thread); |
michael@0 | 656 | } |
michael@0 | 657 | thread->flags |= _PR_SUSPENDING; |
michael@0 | 658 | break; |
michael@0 | 659 | |
michael@0 | 660 | default: |
michael@0 | 661 | PR_Abort(); |
michael@0 | 662 | } |
michael@0 | 663 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 664 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 665 | _PR_INTSON(is); |
michael@0 | 666 | } |
michael@0 | 667 | |
michael@0 | 668 | static void _PR_Resume(PRThread *thread) |
michael@0 | 669 | { |
michael@0 | 670 | PRThreadPriority pri; |
michael@0 | 671 | PRIntn is; |
michael@0 | 672 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 673 | |
michael@0 | 674 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 675 | _PR_INTSOFF(is); |
michael@0 | 676 | _PR_THREAD_LOCK(thread); |
michael@0 | 677 | switch (thread->state) { |
michael@0 | 678 | case _PR_SUSPENDED: |
michael@0 | 679 | thread->state = _PR_RUNNABLE; |
michael@0 | 680 | thread->flags &= ~_PR_SUSPENDING; |
michael@0 | 681 | if (!_PR_IS_NATIVE_THREAD(thread)) { |
michael@0 | 682 | _PR_MISCQ_LOCK(thread->cpu); |
michael@0 | 683 | _PR_DEL_SUSPENDQ(thread); |
michael@0 | 684 | _PR_MISCQ_UNLOCK(thread->cpu); |
michael@0 | 685 | |
michael@0 | 686 | pri = thread->priority; |
michael@0 | 687 | |
michael@0 | 688 | _PR_RUNQ_LOCK(thread->cpu); |
michael@0 | 689 | _PR_ADD_RUNQ(thread, thread->cpu, pri); |
michael@0 | 690 | _PR_RUNQ_UNLOCK(thread->cpu); |
michael@0 | 691 | |
michael@0 | 692 | if (pri > _PR_MD_CURRENT_THREAD()->priority) { |
michael@0 | 693 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 694 | _PR_SET_RESCHED_FLAG(); |
michael@0 | 695 | } |
michael@0 | 696 | } else { |
michael@0 | 697 | PR_ASSERT(0); |
michael@0 | 698 | } |
michael@0 | 699 | break; |
michael@0 | 700 | |
michael@0 | 701 | case _PR_IO_WAIT: |
michael@0 | 702 | case _PR_COND_WAIT: |
michael@0 | 703 | thread->flags &= ~_PR_SUSPENDING; |
michael@0 | 704 | /* PR_ASSERT(thread->wait.monitor->stickyCount == 0); */ |
michael@0 | 705 | break; |
michael@0 | 706 | |
michael@0 | 707 | case _PR_LOCK_WAIT: |
michael@0 | 708 | { |
michael@0 | 709 | PRLock *wLock = thread->wait.lock; |
michael@0 | 710 | |
michael@0 | 711 | thread->flags &= ~_PR_SUSPENDING; |
michael@0 | 712 | |
michael@0 | 713 | _PR_LOCK_LOCK(wLock); |
michael@0 | 714 | if (thread->wait.lock->owner == 0) { |
michael@0 | 715 | _PR_UnblockLockWaiter(thread->wait.lock); |
michael@0 | 716 | } |
michael@0 | 717 | _PR_LOCK_UNLOCK(wLock); |
michael@0 | 718 | break; |
michael@0 | 719 | } |
michael@0 | 720 | case _PR_RUNNABLE: |
michael@0 | 721 | break; |
michael@0 | 722 | case _PR_RUNNING: |
michael@0 | 723 | /* |
michael@0 | 724 | * The thread being suspended should be a LOCAL thread with |
michael@0 | 725 | * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state |
michael@0 | 726 | */ |
michael@0 | 727 | PR_ASSERT(0); |
michael@0 | 728 | break; |
michael@0 | 729 | |
michael@0 | 730 | default: |
michael@0 | 731 | /* |
michael@0 | 732 | * thread should have been in one of the above-listed blocked states |
michael@0 | 733 | * (_PR_JOIN_WAIT, _PR_IO_WAIT, _PR_UNBORN, _PR_DEAD_STATE) |
michael@0 | 734 | */ |
michael@0 | 735 | PR_Abort(); |
michael@0 | 736 | } |
michael@0 | 737 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 738 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 739 | _PR_INTSON(is); |
michael@0 | 740 | |
michael@0 | 741 | } |
michael@0 | 742 | |
michael@0 | 743 | #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) |
michael@0 | 744 | static PRThread *get_thread(_PRCPU *cpu, PRBool *wakeup_cpus) |
michael@0 | 745 | { |
michael@0 | 746 | PRThread *thread; |
michael@0 | 747 | PRIntn pri; |
michael@0 | 748 | PRUint32 r; |
michael@0 | 749 | PRCList *qp; |
michael@0 | 750 | PRIntn priMin, priMax; |
michael@0 | 751 | |
michael@0 | 752 | _PR_RUNQ_LOCK(cpu); |
michael@0 | 753 | r = _PR_RUNQREADYMASK(cpu); |
michael@0 | 754 | if (r==0) { |
michael@0 | 755 | priMin = priMax = PR_PRIORITY_FIRST; |
michael@0 | 756 | } else if (r == (1<<PR_PRIORITY_NORMAL) ) { |
michael@0 | 757 | priMin = priMax = PR_PRIORITY_NORMAL; |
michael@0 | 758 | } else { |
michael@0 | 759 | priMin = PR_PRIORITY_FIRST; |
michael@0 | 760 | priMax = PR_PRIORITY_LAST; |
michael@0 | 761 | } |
michael@0 | 762 | thread = NULL; |
michael@0 | 763 | for (pri = priMax; pri >= priMin ; pri-- ) { |
michael@0 | 764 | if (r & (1 << pri)) { |
michael@0 | 765 | for (qp = _PR_RUNQ(cpu)[pri].next; |
michael@0 | 766 | qp != &_PR_RUNQ(cpu)[pri]; |
michael@0 | 767 | qp = qp->next) { |
michael@0 | 768 | thread = _PR_THREAD_PTR(qp); |
michael@0 | 769 | /* |
michael@0 | 770 | * skip non-schedulable threads |
michael@0 | 771 | */ |
michael@0 | 772 | PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); |
michael@0 | 773 | if (thread->no_sched) { |
michael@0 | 774 | thread = NULL; |
michael@0 | 775 | /* |
michael@0 | 776 | * Need to wakeup cpus to avoid missing a |
michael@0 | 777 | * runnable thread |
michael@0 | 778 | * Waking up all CPU's need happen only once. |
michael@0 | 779 | */ |
michael@0 | 780 | |
michael@0 | 781 | *wakeup_cpus = PR_TRUE; |
michael@0 | 782 | continue; |
michael@0 | 783 | } else if (thread->flags & _PR_BOUND_THREAD) { |
michael@0 | 784 | /* |
michael@0 | 785 | * Thread bound to cpu 0 |
michael@0 | 786 | */ |
michael@0 | 787 | |
michael@0 | 788 | thread = NULL; |
michael@0 | 789 | #ifdef IRIX |
michael@0 | 790 | _PR_MD_WAKEUP_PRIMORDIAL_CPU(); |
michael@0 | 791 | #endif |
michael@0 | 792 | continue; |
michael@0 | 793 | } else if (thread->io_pending == PR_TRUE) { |
michael@0 | 794 | /* |
michael@0 | 795 | * A thread that is blocked for I/O needs to run |
michael@0 | 796 | * on the same cpu on which it was blocked. This is because |
michael@0 | 797 | * the cpu's ioq is accessed without lock protection and scheduling |
michael@0 | 798 | * the thread on a different cpu would preclude this optimization. |
michael@0 | 799 | */ |
michael@0 | 800 | thread = NULL; |
michael@0 | 801 | continue; |
michael@0 | 802 | } else { |
michael@0 | 803 | /* Pull thread off of its run queue */ |
michael@0 | 804 | _PR_DEL_RUNQ(thread); |
michael@0 | 805 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 806 | return(thread); |
michael@0 | 807 | } |
michael@0 | 808 | } |
michael@0 | 809 | } |
michael@0 | 810 | thread = NULL; |
michael@0 | 811 | } |
michael@0 | 812 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 813 | return(thread); |
michael@0 | 814 | } |
michael@0 | 815 | #endif /* !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) */ |
michael@0 | 816 | |
michael@0 | 817 | /* |
michael@0 | 818 | ** Schedule this native thread by finding the highest priority nspr |
michael@0 | 819 | ** thread that is ready to run. |
michael@0 | 820 | ** |
michael@0 | 821 | ** Note- everyone really needs to call _PR_MD_SWITCH_CONTEXT (which calls |
michael@0 | 822 | ** PR_Schedule() rather than calling PR_Schedule. Otherwise if there |
michael@0 | 823 | ** is initialization required for switching from SWITCH_CONTEXT, |
michael@0 | 824 | ** it will not get done! |
michael@0 | 825 | */ |
michael@0 | 826 | void _PR_Schedule(void) |
michael@0 | 827 | { |
michael@0 | 828 | PRThread *thread, *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 829 | _PRCPU *cpu = _PR_MD_CURRENT_CPU(); |
michael@0 | 830 | PRIntn pri; |
michael@0 | 831 | PRUint32 r; |
michael@0 | 832 | PRCList *qp; |
michael@0 | 833 | PRIntn priMin, priMax; |
michael@0 | 834 | #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) |
michael@0 | 835 | PRBool wakeup_cpus; |
michael@0 | 836 | #endif |
michael@0 | 837 | |
michael@0 | 838 | /* Interrupts must be disabled */ |
michael@0 | 839 | PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); |
michael@0 | 840 | |
michael@0 | 841 | /* Since we are rescheduling, we no longer want to */ |
michael@0 | 842 | _PR_CLEAR_RESCHED_FLAG(); |
michael@0 | 843 | |
michael@0 | 844 | /* |
michael@0 | 845 | ** Find highest priority thread to run. Bigger priority numbers are |
michael@0 | 846 | ** higher priority threads |
michael@0 | 847 | */ |
michael@0 | 848 | _PR_RUNQ_LOCK(cpu); |
michael@0 | 849 | /* |
michael@0 | 850 | * if we are in SuspendAll mode, can schedule only the thread |
michael@0 | 851 | * that called PR_SuspendAll |
michael@0 | 852 | * |
michael@0 | 853 | * The thread may be ready to run now, after completing an I/O |
michael@0 | 854 | * operation, for example |
michael@0 | 855 | */ |
michael@0 | 856 | if ((thread = suspendAllThread) != 0) { |
michael@0 | 857 | if ((!(thread->no_sched)) && (thread->state == _PR_RUNNABLE)) { |
michael@0 | 858 | /* Pull thread off of its run queue */ |
michael@0 | 859 | _PR_DEL_RUNQ(thread); |
michael@0 | 860 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 861 | goto found_thread; |
michael@0 | 862 | } else { |
michael@0 | 863 | thread = NULL; |
michael@0 | 864 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 865 | goto idle_thread; |
michael@0 | 866 | } |
michael@0 | 867 | } |
michael@0 | 868 | r = _PR_RUNQREADYMASK(cpu); |
michael@0 | 869 | if (r==0) { |
michael@0 | 870 | priMin = priMax = PR_PRIORITY_FIRST; |
michael@0 | 871 | } else if (r == (1<<PR_PRIORITY_NORMAL) ) { |
michael@0 | 872 | priMin = priMax = PR_PRIORITY_NORMAL; |
michael@0 | 873 | } else { |
michael@0 | 874 | priMin = PR_PRIORITY_FIRST; |
michael@0 | 875 | priMax = PR_PRIORITY_LAST; |
michael@0 | 876 | } |
michael@0 | 877 | thread = NULL; |
michael@0 | 878 | for (pri = priMax; pri >= priMin ; pri-- ) { |
michael@0 | 879 | if (r & (1 << pri)) { |
michael@0 | 880 | for (qp = _PR_RUNQ(cpu)[pri].next; |
michael@0 | 881 | qp != &_PR_RUNQ(cpu)[pri]; |
michael@0 | 882 | qp = qp->next) { |
michael@0 | 883 | thread = _PR_THREAD_PTR(qp); |
michael@0 | 884 | /* |
michael@0 | 885 | * skip non-schedulable threads |
michael@0 | 886 | */ |
michael@0 | 887 | PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); |
michael@0 | 888 | if ((thread->no_sched) && (me != thread)){ |
michael@0 | 889 | thread = NULL; |
michael@0 | 890 | continue; |
michael@0 | 891 | } else { |
michael@0 | 892 | /* Pull thread off of its run queue */ |
michael@0 | 893 | _PR_DEL_RUNQ(thread); |
michael@0 | 894 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 895 | goto found_thread; |
michael@0 | 896 | } |
michael@0 | 897 | } |
michael@0 | 898 | } |
michael@0 | 899 | thread = NULL; |
michael@0 | 900 | } |
michael@0 | 901 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 902 | |
michael@0 | 903 | #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) |
michael@0 | 904 | |
michael@0 | 905 | wakeup_cpus = PR_FALSE; |
michael@0 | 906 | _PR_CPU_LIST_LOCK(); |
michael@0 | 907 | for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) { |
michael@0 | 908 | if (cpu != _PR_CPU_PTR(qp)) { |
michael@0 | 909 | if ((thread = get_thread(_PR_CPU_PTR(qp), &wakeup_cpus)) |
michael@0 | 910 | != NULL) { |
michael@0 | 911 | thread->cpu = cpu; |
michael@0 | 912 | _PR_CPU_LIST_UNLOCK(); |
michael@0 | 913 | if (wakeup_cpus == PR_TRUE) |
michael@0 | 914 | _PR_MD_WAKEUP_CPUS(); |
michael@0 | 915 | goto found_thread; |
michael@0 | 916 | } |
michael@0 | 917 | } |
michael@0 | 918 | } |
michael@0 | 919 | _PR_CPU_LIST_UNLOCK(); |
michael@0 | 920 | if (wakeup_cpus == PR_TRUE) |
michael@0 | 921 | _PR_MD_WAKEUP_CPUS(); |
michael@0 | 922 | |
michael@0 | 923 | #endif /* _PR_LOCAL_THREADS_ONLY */ |
michael@0 | 924 | |
michael@0 | 925 | idle_thread: |
michael@0 | 926 | /* |
michael@0 | 927 | ** There are no threads to run. Switch to the idle thread |
michael@0 | 928 | */ |
michael@0 | 929 | PR_LOG(_pr_sched_lm, PR_LOG_MAX, ("pausing")); |
michael@0 | 930 | thread = _PR_MD_CURRENT_CPU()->idle_thread; |
michael@0 | 931 | |
michael@0 | 932 | found_thread: |
michael@0 | 933 | PR_ASSERT((me == thread) || ((thread->state == _PR_RUNNABLE) && |
michael@0 | 934 | (!(thread->no_sched)))); |
michael@0 | 935 | |
michael@0 | 936 | /* Resume the thread */ |
michael@0 | 937 | PR_LOG(_pr_sched_lm, PR_LOG_MAX, |
michael@0 | 938 | ("switching to %d[%p]", thread->id, thread)); |
michael@0 | 939 | PR_ASSERT(thread->state != _PR_RUNNING); |
michael@0 | 940 | thread->state = _PR_RUNNING; |
michael@0 | 941 | |
michael@0 | 942 | /* If we are on the runq, it just means that we went to sleep on some |
michael@0 | 943 | * resource, and by the time we got here another real native thread had |
michael@0 | 944 | * already given us the resource and put us back on the runqueue |
michael@0 | 945 | */ |
michael@0 | 946 | PR_ASSERT(thread->cpu == _PR_MD_CURRENT_CPU()); |
michael@0 | 947 | if (thread != me) |
michael@0 | 948 | _PR_MD_RESTORE_CONTEXT(thread); |
michael@0 | 949 | #if 0 |
michael@0 | 950 | /* XXXMB; with setjmp/longjmp it is impossible to land here, but |
michael@0 | 951 | * it is not with fibers... Is this a bad thing? I believe it is |
michael@0 | 952 | * still safe. |
michael@0 | 953 | */ |
michael@0 | 954 | PR_NOT_REACHED("impossible return from schedule"); |
michael@0 | 955 | #endif |
michael@0 | 956 | } |
michael@0 | 957 | |
michael@0 | 958 | /* |
michael@0 | 959 | ** Attaches a thread. |
michael@0 | 960 | ** Does not set the _PR_MD_CURRENT_THREAD. |
michael@0 | 961 | ** Does not specify the scope of the thread. |
michael@0 | 962 | */ |
michael@0 | 963 | static PRThread * |
michael@0 | 964 | _PR_AttachThread(PRThreadType type, PRThreadPriority priority, |
michael@0 | 965 | PRThreadStack *stack) |
michael@0 | 966 | { |
michael@0 | 967 | PRThread *thread; |
michael@0 | 968 | char *mem; |
michael@0 | 969 | |
michael@0 | 970 | if (priority > PR_PRIORITY_LAST) { |
michael@0 | 971 | priority = PR_PRIORITY_LAST; |
michael@0 | 972 | } else if (priority < PR_PRIORITY_FIRST) { |
michael@0 | 973 | priority = PR_PRIORITY_FIRST; |
michael@0 | 974 | } |
michael@0 | 975 | |
michael@0 | 976 | mem = (char*) PR_CALLOC(sizeof(PRThread)); |
michael@0 | 977 | if (mem) { |
michael@0 | 978 | thread = (PRThread*) mem; |
michael@0 | 979 | thread->priority = priority; |
michael@0 | 980 | thread->stack = stack; |
michael@0 | 981 | thread->state = _PR_RUNNING; |
michael@0 | 982 | PR_INIT_CLIST(&thread->lockList); |
michael@0 | 983 | if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) { |
michael@0 | 984 | PR_DELETE(thread); |
michael@0 | 985 | return 0; |
michael@0 | 986 | } |
michael@0 | 987 | |
michael@0 | 988 | return thread; |
michael@0 | 989 | } |
michael@0 | 990 | return 0; |
michael@0 | 991 | } |
michael@0 | 992 | |
michael@0 | 993 | |
michael@0 | 994 | |
michael@0 | 995 | PR_IMPLEMENT(PRThread*) |
michael@0 | 996 | _PR_NativeCreateThread(PRThreadType type, |
michael@0 | 997 | void (*start)(void *arg), |
michael@0 | 998 | void *arg, |
michael@0 | 999 | PRThreadPriority priority, |
michael@0 | 1000 | PRThreadScope scope, |
michael@0 | 1001 | PRThreadState state, |
michael@0 | 1002 | PRUint32 stackSize, |
michael@0 | 1003 | PRUint32 flags) |
michael@0 | 1004 | { |
michael@0 | 1005 | PRThread *thread; |
michael@0 | 1006 | |
michael@0 | 1007 | thread = _PR_AttachThread(type, priority, NULL); |
michael@0 | 1008 | |
michael@0 | 1009 | if (thread) { |
michael@0 | 1010 | PR_Lock(_pr_activeLock); |
michael@0 | 1011 | thread->flags = (flags | _PR_GLOBAL_SCOPE); |
michael@0 | 1012 | thread->id = ++_pr_utid; |
michael@0 | 1013 | if (type == PR_SYSTEM_THREAD) { |
michael@0 | 1014 | thread->flags |= _PR_SYSTEM; |
michael@0 | 1015 | _pr_systemActive++; |
michael@0 | 1016 | } else { |
michael@0 | 1017 | _pr_userActive++; |
michael@0 | 1018 | } |
michael@0 | 1019 | PR_Unlock(_pr_activeLock); |
michael@0 | 1020 | |
michael@0 | 1021 | thread->stack = PR_NEWZAP(PRThreadStack); |
michael@0 | 1022 | if (!thread->stack) { |
michael@0 | 1023 | PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
michael@0 | 1024 | goto done; |
michael@0 | 1025 | } |
michael@0 | 1026 | thread->stack->stackSize = stackSize?stackSize:_MD_DEFAULT_STACK_SIZE; |
michael@0 | 1027 | thread->stack->thr = thread; |
michael@0 | 1028 | thread->startFunc = start; |
michael@0 | 1029 | thread->arg = arg; |
michael@0 | 1030 | |
michael@0 | 1031 | /* |
michael@0 | 1032 | Set thread flags related to scope and joinable state. If joinable |
michael@0 | 1033 | thread, allocate a "termination" conidition variable. |
michael@0 | 1034 | */ |
michael@0 | 1035 | if (state == PR_JOINABLE_THREAD) { |
michael@0 | 1036 | thread->term = PR_NewCondVar(_pr_terminationCVLock); |
michael@0 | 1037 | if (thread->term == NULL) { |
michael@0 | 1038 | PR_DELETE(thread->stack); |
michael@0 | 1039 | goto done; |
michael@0 | 1040 | } |
michael@0 | 1041 | } |
michael@0 | 1042 | |
michael@0 | 1043 | thread->state = _PR_RUNNING; |
michael@0 | 1044 | if (_PR_MD_CREATE_THREAD(thread, _PR_NativeRunThread, priority, |
michael@0 | 1045 | scope,state,stackSize) == PR_SUCCESS) { |
michael@0 | 1046 | return thread; |
michael@0 | 1047 | } |
michael@0 | 1048 | if (thread->term) { |
michael@0 | 1049 | PR_DestroyCondVar(thread->term); |
michael@0 | 1050 | thread->term = NULL; |
michael@0 | 1051 | } |
michael@0 | 1052 | PR_DELETE(thread->stack); |
michael@0 | 1053 | } |
michael@0 | 1054 | |
michael@0 | 1055 | done: |
michael@0 | 1056 | if (thread) { |
michael@0 | 1057 | _PR_DecrActiveThreadCount(thread); |
michael@0 | 1058 | _PR_DestroyThread(thread); |
michael@0 | 1059 | } |
michael@0 | 1060 | return NULL; |
michael@0 | 1061 | } |
michael@0 | 1062 | |
michael@0 | 1063 | /************************************************************************/ |
michael@0 | 1064 | |
michael@0 | 1065 | PR_IMPLEMENT(PRThread*) _PR_CreateThread(PRThreadType type, |
michael@0 | 1066 | void (*start)(void *arg), |
michael@0 | 1067 | void *arg, |
michael@0 | 1068 | PRThreadPriority priority, |
michael@0 | 1069 | PRThreadScope scope, |
michael@0 | 1070 | PRThreadState state, |
michael@0 | 1071 | PRUint32 stackSize, |
michael@0 | 1072 | PRUint32 flags) |
michael@0 | 1073 | { |
michael@0 | 1074 | PRThread *me; |
michael@0 | 1075 | PRThread *thread = NULL; |
michael@0 | 1076 | PRThreadStack *stack; |
michael@0 | 1077 | char *top; |
michael@0 | 1078 | PRIntn is; |
michael@0 | 1079 | PRIntn native = 0; |
michael@0 | 1080 | PRIntn useRecycled = 0; |
michael@0 | 1081 | PRBool status; |
michael@0 | 1082 | |
michael@0 | 1083 | /* |
michael@0 | 1084 | First, pin down the priority. Not all compilers catch passing out of |
michael@0 | 1085 | range enum here. If we let bad values thru, priority queues won't work. |
michael@0 | 1086 | */ |
michael@0 | 1087 | if (priority > PR_PRIORITY_LAST) { |
michael@0 | 1088 | priority = PR_PRIORITY_LAST; |
michael@0 | 1089 | } else if (priority < PR_PRIORITY_FIRST) { |
michael@0 | 1090 | priority = PR_PRIORITY_FIRST; |
michael@0 | 1091 | } |
michael@0 | 1092 | |
michael@0 | 1093 | if (!_pr_initialized) _PR_ImplicitInitialization(); |
michael@0 | 1094 | |
michael@0 | 1095 | if (! (flags & _PR_IDLE_THREAD)) |
michael@0 | 1096 | me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 1097 | |
michael@0 | 1098 | #if defined(_PR_GLOBAL_THREADS_ONLY) |
michael@0 | 1099 | /* |
michael@0 | 1100 | * can create global threads only |
michael@0 | 1101 | */ |
michael@0 | 1102 | if (scope == PR_LOCAL_THREAD) |
michael@0 | 1103 | scope = PR_GLOBAL_THREAD; |
michael@0 | 1104 | #endif |
michael@0 | 1105 | |
michael@0 | 1106 | if (_native_threads_only) |
michael@0 | 1107 | scope = PR_GLOBAL_THREAD; |
michael@0 | 1108 | |
michael@0 | 1109 | native = (((scope == PR_GLOBAL_THREAD)|| (scope == PR_GLOBAL_BOUND_THREAD)) |
michael@0 | 1110 | && _PR_IS_NATIVE_THREAD_SUPPORTED()); |
michael@0 | 1111 | |
michael@0 | 1112 | _PR_ADJUST_STACKSIZE(stackSize); |
michael@0 | 1113 | |
michael@0 | 1114 | if (native) { |
michael@0 | 1115 | /* |
michael@0 | 1116 | * clear the IDLE_THREAD flag which applies to LOCAL |
michael@0 | 1117 | * threads only |
michael@0 | 1118 | */ |
michael@0 | 1119 | flags &= ~_PR_IDLE_THREAD; |
michael@0 | 1120 | flags |= _PR_GLOBAL_SCOPE; |
michael@0 | 1121 | if (_PR_NUM_DEADNATIVE > 0) { |
michael@0 | 1122 | _PR_DEADQ_LOCK; |
michael@0 | 1123 | |
michael@0 | 1124 | if (_PR_NUM_DEADNATIVE == 0) { /* Thread safe check */ |
michael@0 | 1125 | _PR_DEADQ_UNLOCK; |
michael@0 | 1126 | } else { |
michael@0 | 1127 | thread = _PR_THREAD_PTR(_PR_DEADNATIVEQ.next); |
michael@0 | 1128 | PR_REMOVE_LINK(&thread->links); |
michael@0 | 1129 | _PR_DEC_DEADNATIVE; |
michael@0 | 1130 | _PR_DEADQ_UNLOCK; |
michael@0 | 1131 | |
michael@0 | 1132 | _PR_InitializeRecycledThread(thread); |
michael@0 | 1133 | thread->startFunc = start; |
michael@0 | 1134 | thread->arg = arg; |
michael@0 | 1135 | thread->flags = (flags | _PR_GLOBAL_SCOPE); |
michael@0 | 1136 | if (type == PR_SYSTEM_THREAD) |
michael@0 | 1137 | { |
michael@0 | 1138 | thread->flags |= _PR_SYSTEM; |
michael@0 | 1139 | PR_ATOMIC_INCREMENT(&_pr_systemActive); |
michael@0 | 1140 | } |
michael@0 | 1141 | else PR_ATOMIC_INCREMENT(&_pr_userActive); |
michael@0 | 1142 | |
michael@0 | 1143 | if (state == PR_JOINABLE_THREAD) { |
michael@0 | 1144 | if (!thread->term) |
michael@0 | 1145 | thread->term = PR_NewCondVar(_pr_terminationCVLock); |
michael@0 | 1146 | } |
michael@0 | 1147 | else { |
michael@0 | 1148 | if(thread->term) { |
michael@0 | 1149 | PR_DestroyCondVar(thread->term); |
michael@0 | 1150 | thread->term = 0; |
michael@0 | 1151 | } |
michael@0 | 1152 | } |
michael@0 | 1153 | |
michael@0 | 1154 | thread->priority = priority; |
michael@0 | 1155 | _PR_MD_SET_PRIORITY(&(thread->md), priority); |
michael@0 | 1156 | /* XXX what about stackSize? */ |
michael@0 | 1157 | thread->state = _PR_RUNNING; |
michael@0 | 1158 | _PR_MD_WAKEUP_WAITER(thread); |
michael@0 | 1159 | return thread; |
michael@0 | 1160 | } |
michael@0 | 1161 | } |
michael@0 | 1162 | thread = _PR_NativeCreateThread(type, start, arg, priority, |
michael@0 | 1163 | scope, state, stackSize, flags); |
michael@0 | 1164 | } else { |
michael@0 | 1165 | if (_PR_NUM_DEADUSER > 0) { |
michael@0 | 1166 | _PR_DEADQ_LOCK; |
michael@0 | 1167 | |
michael@0 | 1168 | if (_PR_NUM_DEADUSER == 0) { /* thread safe check */ |
michael@0 | 1169 | _PR_DEADQ_UNLOCK; |
michael@0 | 1170 | } else { |
michael@0 | 1171 | PRCList *ptr; |
michael@0 | 1172 | |
michael@0 | 1173 | /* Go down list checking for a recycled thread with a |
michael@0 | 1174 | * large enough stack. XXXMB - this has a bad degenerate case. |
michael@0 | 1175 | */ |
michael@0 | 1176 | ptr = _PR_DEADUSERQ.next; |
michael@0 | 1177 | while( ptr != &_PR_DEADUSERQ ) { |
michael@0 | 1178 | thread = _PR_THREAD_PTR(ptr); |
michael@0 | 1179 | if ((thread->stack->stackSize >= stackSize) && |
michael@0 | 1180 | (!thread->no_sched)) { |
michael@0 | 1181 | PR_REMOVE_LINK(&thread->links); |
michael@0 | 1182 | _PR_DEC_DEADUSER; |
michael@0 | 1183 | break; |
michael@0 | 1184 | } else { |
michael@0 | 1185 | ptr = ptr->next; |
michael@0 | 1186 | thread = NULL; |
michael@0 | 1187 | } |
michael@0 | 1188 | } |
michael@0 | 1189 | |
michael@0 | 1190 | _PR_DEADQ_UNLOCK; |
michael@0 | 1191 | |
michael@0 | 1192 | if (thread) { |
michael@0 | 1193 | _PR_InitializeRecycledThread(thread); |
michael@0 | 1194 | thread->startFunc = start; |
michael@0 | 1195 | thread->arg = arg; |
michael@0 | 1196 | thread->priority = priority; |
michael@0 | 1197 | if (state == PR_JOINABLE_THREAD) { |
michael@0 | 1198 | if (!thread->term) |
michael@0 | 1199 | thread->term = PR_NewCondVar(_pr_terminationCVLock); |
michael@0 | 1200 | } else { |
michael@0 | 1201 | if(thread->term) { |
michael@0 | 1202 | PR_DestroyCondVar(thread->term); |
michael@0 | 1203 | thread->term = 0; |
michael@0 | 1204 | } |
michael@0 | 1205 | } |
michael@0 | 1206 | useRecycled++; |
michael@0 | 1207 | } |
michael@0 | 1208 | } |
michael@0 | 1209 | } |
michael@0 | 1210 | if (thread == NULL) { |
michael@0 | 1211 | #ifndef HAVE_CUSTOM_USER_THREADS |
michael@0 | 1212 | stack = _PR_NewStack(stackSize); |
michael@0 | 1213 | if (!stack) { |
michael@0 | 1214 | PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
michael@0 | 1215 | return NULL; |
michael@0 | 1216 | } |
michael@0 | 1217 | |
michael@0 | 1218 | /* Allocate thread object and per-thread data off the top of the stack*/ |
michael@0 | 1219 | top = stack->stackTop; |
michael@0 | 1220 | #ifdef HAVE_STACK_GROWING_UP |
michael@0 | 1221 | thread = (PRThread*) top; |
michael@0 | 1222 | top = top + sizeof(PRThread); |
michael@0 | 1223 | /* |
michael@0 | 1224 | * Make stack 64-byte aligned |
michael@0 | 1225 | */ |
michael@0 | 1226 | if ((PRUptrdiff)top & 0x3f) { |
michael@0 | 1227 | top = (char*)(((PRUptrdiff)top + 0x40) & ~0x3f); |
michael@0 | 1228 | } |
michael@0 | 1229 | #else |
michael@0 | 1230 | top = top - sizeof(PRThread); |
michael@0 | 1231 | thread = (PRThread*) top; |
michael@0 | 1232 | /* |
michael@0 | 1233 | * Make stack 64-byte aligned |
michael@0 | 1234 | */ |
michael@0 | 1235 | if ((PRUptrdiff)top & 0x3f) { |
michael@0 | 1236 | top = (char*)((PRUptrdiff)top & ~0x3f); |
michael@0 | 1237 | } |
michael@0 | 1238 | #endif |
michael@0 | 1239 | stack->thr = thread; |
michael@0 | 1240 | memset(thread, 0, sizeof(PRThread)); |
michael@0 | 1241 | thread->threadAllocatedOnStack = 1; |
michael@0 | 1242 | #else |
michael@0 | 1243 | thread = _PR_MD_CREATE_USER_THREAD(stackSize, start, arg); |
michael@0 | 1244 | if (!thread) { |
michael@0 | 1245 | PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
michael@0 | 1246 | return NULL; |
michael@0 | 1247 | } |
michael@0 | 1248 | thread->threadAllocatedOnStack = 0; |
michael@0 | 1249 | stack = NULL; |
michael@0 | 1250 | top = NULL; |
michael@0 | 1251 | #endif |
michael@0 | 1252 | |
michael@0 | 1253 | /* Initialize thread */ |
michael@0 | 1254 | thread->tpdLength = 0; |
michael@0 | 1255 | thread->privateData = NULL; |
michael@0 | 1256 | thread->stack = stack; |
michael@0 | 1257 | thread->priority = priority; |
michael@0 | 1258 | thread->startFunc = start; |
michael@0 | 1259 | thread->arg = arg; |
michael@0 | 1260 | PR_INIT_CLIST(&thread->lockList); |
michael@0 | 1261 | |
michael@0 | 1262 | if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) { |
michael@0 | 1263 | if (thread->threadAllocatedOnStack == 1) |
michael@0 | 1264 | _PR_FreeStack(thread->stack); |
michael@0 | 1265 | else { |
michael@0 | 1266 | PR_DELETE(thread); |
michael@0 | 1267 | } |
michael@0 | 1268 | PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); |
michael@0 | 1269 | return NULL; |
michael@0 | 1270 | } |
michael@0 | 1271 | |
michael@0 | 1272 | if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) { |
michael@0 | 1273 | if (thread->threadAllocatedOnStack == 1) |
michael@0 | 1274 | _PR_FreeStack(thread->stack); |
michael@0 | 1275 | else { |
michael@0 | 1276 | PR_DELETE(thread->privateData); |
michael@0 | 1277 | PR_DELETE(thread); |
michael@0 | 1278 | } |
michael@0 | 1279 | PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); |
michael@0 | 1280 | return NULL; |
michael@0 | 1281 | } |
michael@0 | 1282 | |
michael@0 | 1283 | _PR_MD_INIT_CONTEXT(thread, top, _PR_UserRunThread, &status); |
michael@0 | 1284 | |
michael@0 | 1285 | if (status == PR_FALSE) { |
michael@0 | 1286 | _PR_MD_FREE_LOCK(&thread->threadLock); |
michael@0 | 1287 | if (thread->threadAllocatedOnStack == 1) |
michael@0 | 1288 | _PR_FreeStack(thread->stack); |
michael@0 | 1289 | else { |
michael@0 | 1290 | PR_DELETE(thread->privateData); |
michael@0 | 1291 | PR_DELETE(thread); |
michael@0 | 1292 | } |
michael@0 | 1293 | return NULL; |
michael@0 | 1294 | } |
michael@0 | 1295 | |
michael@0 | 1296 | /* |
michael@0 | 1297 | Set thread flags related to scope and joinable state. If joinable |
michael@0 | 1298 | thread, allocate a "termination" condition variable. |
michael@0 | 1299 | */ |
michael@0 | 1300 | if (state == PR_JOINABLE_THREAD) { |
michael@0 | 1301 | thread->term = PR_NewCondVar(_pr_terminationCVLock); |
michael@0 | 1302 | if (thread->term == NULL) { |
michael@0 | 1303 | _PR_MD_FREE_LOCK(&thread->threadLock); |
michael@0 | 1304 | if (thread->threadAllocatedOnStack == 1) |
michael@0 | 1305 | _PR_FreeStack(thread->stack); |
michael@0 | 1306 | else { |
michael@0 | 1307 | PR_DELETE(thread->privateData); |
michael@0 | 1308 | PR_DELETE(thread); |
michael@0 | 1309 | } |
michael@0 | 1310 | return NULL; |
michael@0 | 1311 | } |
michael@0 | 1312 | } |
michael@0 | 1313 | |
michael@0 | 1314 | } |
michael@0 | 1315 | |
michael@0 | 1316 | /* Update thread type counter */ |
michael@0 | 1317 | PR_Lock(_pr_activeLock); |
michael@0 | 1318 | thread->flags = flags; |
michael@0 | 1319 | thread->id = ++_pr_utid; |
michael@0 | 1320 | if (type == PR_SYSTEM_THREAD) { |
michael@0 | 1321 | thread->flags |= _PR_SYSTEM; |
michael@0 | 1322 | _pr_systemActive++; |
michael@0 | 1323 | } else { |
michael@0 | 1324 | _pr_userActive++; |
michael@0 | 1325 | } |
michael@0 | 1326 | |
michael@0 | 1327 | /* Make thread runnable */ |
michael@0 | 1328 | thread->state = _PR_RUNNABLE; |
michael@0 | 1329 | /* |
michael@0 | 1330 | * Add to list of active threads |
michael@0 | 1331 | */ |
michael@0 | 1332 | PR_Unlock(_pr_activeLock); |
michael@0 | 1333 | |
michael@0 | 1334 | if ((! (thread->flags & _PR_IDLE_THREAD)) && _PR_IS_NATIVE_THREAD(me) ) |
michael@0 | 1335 | thread->cpu = _PR_GetPrimordialCPU(); |
michael@0 | 1336 | else |
michael@0 | 1337 | thread->cpu = _PR_MD_CURRENT_CPU(); |
michael@0 | 1338 | |
michael@0 | 1339 | PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread)); |
michael@0 | 1340 | |
michael@0 | 1341 | if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me)) { |
michael@0 | 1342 | _PR_INTSOFF(is); |
michael@0 | 1343 | _PR_RUNQ_LOCK(thread->cpu); |
michael@0 | 1344 | _PR_ADD_RUNQ(thread, thread->cpu, priority); |
michael@0 | 1345 | _PR_RUNQ_UNLOCK(thread->cpu); |
michael@0 | 1346 | } |
michael@0 | 1347 | |
michael@0 | 1348 | if (thread->flags & _PR_IDLE_THREAD) { |
michael@0 | 1349 | /* |
michael@0 | 1350 | ** If the creating thread is a kernel thread, we need to |
michael@0 | 1351 | ** awaken the user thread idle thread somehow; potentially |
michael@0 | 1352 | ** it could be sleeping in its idle loop, and we need to poke |
michael@0 | 1353 | ** it. To do so, wake the idle thread... |
michael@0 | 1354 | */ |
michael@0 | 1355 | _PR_MD_WAKEUP_WAITER(NULL); |
michael@0 | 1356 | } else if (_PR_IS_NATIVE_THREAD(me)) { |
michael@0 | 1357 | _PR_MD_WAKEUP_WAITER(thread); |
michael@0 | 1358 | } |
michael@0 | 1359 | if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me) ) |
michael@0 | 1360 | _PR_INTSON(is); |
michael@0 | 1361 | } |
michael@0 | 1362 | |
michael@0 | 1363 | return thread; |
michael@0 | 1364 | } |
michael@0 | 1365 | |
michael@0 | 1366 | PR_IMPLEMENT(PRThread*) PR_CreateThread(PRThreadType type, |
michael@0 | 1367 | void (*start)(void *arg), |
michael@0 | 1368 | void *arg, |
michael@0 | 1369 | PRThreadPriority priority, |
michael@0 | 1370 | PRThreadScope scope, |
michael@0 | 1371 | PRThreadState state, |
michael@0 | 1372 | PRUint32 stackSize) |
michael@0 | 1373 | { |
michael@0 | 1374 | return _PR_CreateThread(type, start, arg, priority, scope, state, |
michael@0 | 1375 | stackSize, 0); |
michael@0 | 1376 | } |
michael@0 | 1377 | |
michael@0 | 1378 | /* |
michael@0 | 1379 | ** Associate a thread object with an existing native thread. |
michael@0 | 1380 | ** "type" is the type of thread object to attach |
michael@0 | 1381 | ** "priority" is the priority to assign to the thread |
michael@0 | 1382 | ** "stack" defines the shape of the threads stack |
michael@0 | 1383 | ** |
michael@0 | 1384 | ** This can return NULL if some kind of error occurs, or if memory is |
michael@0 | 1385 | ** tight. |
michael@0 | 1386 | ** |
michael@0 | 1387 | ** This call is not normally needed unless you create your own native |
michael@0 | 1388 | ** thread. PR_Init does this automatically for the primordial thread. |
michael@0 | 1389 | */ |
michael@0 | 1390 | PRThread* _PRI_AttachThread(PRThreadType type, |
michael@0 | 1391 | PRThreadPriority priority, PRThreadStack *stack, PRUint32 flags) |
michael@0 | 1392 | { |
michael@0 | 1393 | PRThread *thread; |
michael@0 | 1394 | |
michael@0 | 1395 | if ((thread = _PR_MD_GET_ATTACHED_THREAD()) != NULL) { |
michael@0 | 1396 | return thread; |
michael@0 | 1397 | } |
michael@0 | 1398 | _PR_MD_SET_CURRENT_THREAD(NULL); |
michael@0 | 1399 | |
michael@0 | 1400 | /* Clear out any state if this thread was attached before */ |
michael@0 | 1401 | _PR_MD_SET_CURRENT_CPU(NULL); |
michael@0 | 1402 | |
michael@0 | 1403 | thread = _PR_AttachThread(type, priority, stack); |
michael@0 | 1404 | if (thread) { |
michael@0 | 1405 | PRIntn is; |
michael@0 | 1406 | |
michael@0 | 1407 | _PR_MD_SET_CURRENT_THREAD(thread); |
michael@0 | 1408 | |
michael@0 | 1409 | thread->flags = flags | _PR_GLOBAL_SCOPE | _PR_ATTACHED; |
michael@0 | 1410 | |
michael@0 | 1411 | if (!stack) { |
michael@0 | 1412 | thread->stack = PR_NEWZAP(PRThreadStack); |
michael@0 | 1413 | if (!thread->stack) { |
michael@0 | 1414 | _PR_DestroyThread(thread); |
michael@0 | 1415 | return NULL; |
michael@0 | 1416 | } |
michael@0 | 1417 | thread->stack->stackSize = _MD_DEFAULT_STACK_SIZE; |
michael@0 | 1418 | } |
michael@0 | 1419 | PR_INIT_CLIST(&thread->links); |
michael@0 | 1420 | |
michael@0 | 1421 | if (_PR_MD_INIT_ATTACHED_THREAD(thread) == PR_FAILURE) { |
michael@0 | 1422 | PR_DELETE(thread->stack); |
michael@0 | 1423 | _PR_DestroyThread(thread); |
michael@0 | 1424 | return NULL; |
michael@0 | 1425 | } |
michael@0 | 1426 | |
michael@0 | 1427 | _PR_MD_SET_CURRENT_CPU(NULL); |
michael@0 | 1428 | |
michael@0 | 1429 | if (_PR_MD_CURRENT_CPU()) { |
michael@0 | 1430 | _PR_INTSOFF(is); |
michael@0 | 1431 | PR_Lock(_pr_activeLock); |
michael@0 | 1432 | } |
michael@0 | 1433 | if (type == PR_SYSTEM_THREAD) { |
michael@0 | 1434 | thread->flags |= _PR_SYSTEM; |
michael@0 | 1435 | _pr_systemActive++; |
michael@0 | 1436 | } else { |
michael@0 | 1437 | _pr_userActive++; |
michael@0 | 1438 | } |
michael@0 | 1439 | if (_PR_MD_CURRENT_CPU()) { |
michael@0 | 1440 | PR_Unlock(_pr_activeLock); |
michael@0 | 1441 | _PR_INTSON(is); |
michael@0 | 1442 | } |
michael@0 | 1443 | } |
michael@0 | 1444 | return thread; |
michael@0 | 1445 | } |
michael@0 | 1446 | |
michael@0 | 1447 | PR_IMPLEMENT(PRThread*) PR_AttachThread(PRThreadType type, |
michael@0 | 1448 | PRThreadPriority priority, PRThreadStack *stack) |
michael@0 | 1449 | { |
michael@0 | 1450 | return PR_GetCurrentThread(); |
michael@0 | 1451 | } |
michael@0 | 1452 | |
michael@0 | 1453 | PR_IMPLEMENT(void) PR_DetachThread(void) |
michael@0 | 1454 | { |
michael@0 | 1455 | /* |
michael@0 | 1456 | * On IRIX, Solaris, and Windows, foreign threads are detached when |
michael@0 | 1457 | * they terminate. |
michael@0 | 1458 | */ |
michael@0 | 1459 | #if !defined(IRIX) && !defined(WIN32) \ |
michael@0 | 1460 | && !(defined(SOLARIS) && defined(_PR_GLOBAL_THREADS_ONLY)) |
michael@0 | 1461 | PRThread *me; |
michael@0 | 1462 | if (_pr_initialized) { |
michael@0 | 1463 | me = _PR_MD_GET_ATTACHED_THREAD(); |
michael@0 | 1464 | if ((me != NULL) && (me->flags & _PR_ATTACHED)) |
michael@0 | 1465 | _PRI_DetachThread(); |
michael@0 | 1466 | } |
michael@0 | 1467 | #endif |
michael@0 | 1468 | } |
michael@0 | 1469 | |
michael@0 | 1470 | void _PRI_DetachThread(void) |
michael@0 | 1471 | { |
michael@0 | 1472 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 1473 | |
michael@0 | 1474 | if (me->flags & _PR_PRIMORDIAL) { |
michael@0 | 1475 | /* |
michael@0 | 1476 | * ignore, if primordial thread |
michael@0 | 1477 | */ |
michael@0 | 1478 | return; |
michael@0 | 1479 | } |
michael@0 | 1480 | PR_ASSERT(me->flags & _PR_ATTACHED); |
michael@0 | 1481 | PR_ASSERT(_PR_IS_NATIVE_THREAD(me)); |
michael@0 | 1482 | _PR_CleanupThread(me); |
michael@0 | 1483 | PR_DELETE(me->privateData); |
michael@0 | 1484 | |
michael@0 | 1485 | _PR_DecrActiveThreadCount(me); |
michael@0 | 1486 | |
michael@0 | 1487 | _PR_MD_CLEAN_THREAD(me); |
michael@0 | 1488 | _PR_MD_SET_CURRENT_THREAD(NULL); |
michael@0 | 1489 | if (!me->threadAllocatedOnStack) |
michael@0 | 1490 | PR_DELETE(me->stack); |
michael@0 | 1491 | _PR_MD_FREE_LOCK(&me->threadLock); |
michael@0 | 1492 | PR_DELETE(me); |
michael@0 | 1493 | } |
michael@0 | 1494 | |
michael@0 | 1495 | /* |
michael@0 | 1496 | ** Wait for thread termination: |
michael@0 | 1497 | ** "thread" is the target thread |
michael@0 | 1498 | ** |
michael@0 | 1499 | ** This can return PR_FAILURE if no joinable thread could be found |
michael@0 | 1500 | ** corresponding to the specified target thread. |
michael@0 | 1501 | ** |
michael@0 | 1502 | ** The calling thread is suspended until the target thread completes. |
michael@0 | 1503 | ** Several threads cannot wait for the same thread to complete; one thread |
michael@0 | 1504 | ** will complete successfully and others will terminate with an error PR_FAILURE. |
michael@0 | 1505 | ** The calling thread will not be blocked if the target thread has already |
michael@0 | 1506 | ** terminated. |
michael@0 | 1507 | */ |
michael@0 | 1508 | PR_IMPLEMENT(PRStatus) PR_JoinThread(PRThread *thread) |
michael@0 | 1509 | { |
michael@0 | 1510 | PRIntn is; |
michael@0 | 1511 | PRCondVar *term; |
michael@0 | 1512 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 1513 | |
michael@0 | 1514 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 1515 | _PR_INTSOFF(is); |
michael@0 | 1516 | term = thread->term; |
michael@0 | 1517 | /* can't join a non-joinable thread */ |
michael@0 | 1518 | if (term == NULL) { |
michael@0 | 1519 | PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); |
michael@0 | 1520 | goto ErrorExit; |
michael@0 | 1521 | } |
michael@0 | 1522 | |
michael@0 | 1523 | /* multiple threads can't wait on the same joinable thread */ |
michael@0 | 1524 | if (term->condQ.next != &term->condQ) { |
michael@0 | 1525 | goto ErrorExit; |
michael@0 | 1526 | } |
michael@0 | 1527 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 1528 | _PR_INTSON(is); |
michael@0 | 1529 | |
michael@0 | 1530 | /* wait for the target thread's termination cv invariant */ |
michael@0 | 1531 | PR_Lock (_pr_terminationCVLock); |
michael@0 | 1532 | while (thread->state != _PR_JOIN_WAIT) { |
michael@0 | 1533 | (void) PR_WaitCondVar(term, PR_INTERVAL_NO_TIMEOUT); |
michael@0 | 1534 | } |
michael@0 | 1535 | (void) PR_Unlock (_pr_terminationCVLock); |
michael@0 | 1536 | |
michael@0 | 1537 | /* |
michael@0 | 1538 | Remove target thread from global waiting to join Q; make it runnable |
michael@0 | 1539 | again and put it back on its run Q. When it gets scheduled later in |
michael@0 | 1540 | _PR_RunThread code, it will clean up its stack. |
michael@0 | 1541 | */ |
michael@0 | 1542 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 1543 | _PR_INTSOFF(is); |
michael@0 | 1544 | thread->state = _PR_RUNNABLE; |
michael@0 | 1545 | if ( !_PR_IS_NATIVE_THREAD(thread) ) { |
michael@0 | 1546 | _PR_THREAD_LOCK(thread); |
michael@0 | 1547 | |
michael@0 | 1548 | _PR_MISCQ_LOCK(thread->cpu); |
michael@0 | 1549 | _PR_DEL_JOINQ(thread); |
michael@0 | 1550 | _PR_MISCQ_UNLOCK(thread->cpu); |
michael@0 | 1551 | |
michael@0 | 1552 | _PR_AddThreadToRunQ(me, thread); |
michael@0 | 1553 | _PR_THREAD_UNLOCK(thread); |
michael@0 | 1554 | } |
michael@0 | 1555 | if (!_PR_IS_NATIVE_THREAD(me)) |
michael@0 | 1556 | _PR_INTSON(is); |
michael@0 | 1557 | |
michael@0 | 1558 | _PR_MD_WAKEUP_WAITER(thread); |
michael@0 | 1559 | |
michael@0 | 1560 | return PR_SUCCESS; |
michael@0 | 1561 | |
michael@0 | 1562 | ErrorExit: |
michael@0 | 1563 | if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is); |
michael@0 | 1564 | return PR_FAILURE; |
michael@0 | 1565 | } |
michael@0 | 1566 | |
michael@0 | 1567 | PR_IMPLEMENT(void) PR_SetThreadPriority(PRThread *thread, |
michael@0 | 1568 | PRThreadPriority newPri) |
michael@0 | 1569 | { |
michael@0 | 1570 | |
michael@0 | 1571 | /* |
michael@0 | 1572 | First, pin down the priority. Not all compilers catch passing out of |
michael@0 | 1573 | range enum here. If we let bad values thru, priority queues won't work. |
michael@0 | 1574 | */ |
michael@0 | 1575 | if ((PRIntn)newPri > (PRIntn)PR_PRIORITY_LAST) { |
michael@0 | 1576 | newPri = PR_PRIORITY_LAST; |
michael@0 | 1577 | } else if ((PRIntn)newPri < (PRIntn)PR_PRIORITY_FIRST) { |
michael@0 | 1578 | newPri = PR_PRIORITY_FIRST; |
michael@0 | 1579 | } |
michael@0 | 1580 | |
michael@0 | 1581 | if ( _PR_IS_NATIVE_THREAD(thread) ) { |
michael@0 | 1582 | thread->priority = newPri; |
michael@0 | 1583 | _PR_MD_SET_PRIORITY(&(thread->md), newPri); |
michael@0 | 1584 | } else _PR_SetThreadPriority(thread, newPri); |
michael@0 | 1585 | } |
michael@0 | 1586 | |
michael@0 | 1587 | PR_IMPLEMENT(PRStatus) PR_SetCurrentThreadName(const char *name) |
michael@0 | 1588 | { |
michael@0 | 1589 | PRThread *thread; |
michael@0 | 1590 | size_t nameLen; |
michael@0 | 1591 | |
michael@0 | 1592 | if (!name) { |
michael@0 | 1593 | PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); |
michael@0 | 1594 | return PR_FAILURE; |
michael@0 | 1595 | } |
michael@0 | 1596 | |
michael@0 | 1597 | thread = PR_GetCurrentThread(); |
michael@0 | 1598 | if (!thread) |
michael@0 | 1599 | return PR_FAILURE; |
michael@0 | 1600 | |
michael@0 | 1601 | PR_Free(thread->name); |
michael@0 | 1602 | nameLen = strlen(name); |
michael@0 | 1603 | thread->name = (char *)PR_Malloc(nameLen + 1); |
michael@0 | 1604 | if (!thread->name) |
michael@0 | 1605 | return PR_FAILURE; |
michael@0 | 1606 | memcpy(thread->name, name, nameLen + 1); |
michael@0 | 1607 | _PR_MD_SET_CURRENT_THREAD_NAME(thread->name); |
michael@0 | 1608 | return PR_SUCCESS; |
michael@0 | 1609 | } |
michael@0 | 1610 | |
michael@0 | 1611 | PR_IMPLEMENT(const char *) PR_GetThreadName(const PRThread *thread) |
michael@0 | 1612 | { |
michael@0 | 1613 | if (!thread) |
michael@0 | 1614 | return NULL; |
michael@0 | 1615 | return thread->name; |
michael@0 | 1616 | } |
michael@0 | 1617 | |
michael@0 | 1618 | |
michael@0 | 1619 | /* |
michael@0 | 1620 | ** This routine prevents all other threads from running. This call is needed by |
michael@0 | 1621 | ** the garbage collector. |
michael@0 | 1622 | */ |
michael@0 | 1623 | PR_IMPLEMENT(void) PR_SuspendAll(void) |
michael@0 | 1624 | { |
michael@0 | 1625 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 1626 | PRCList *qp; |
michael@0 | 1627 | |
michael@0 | 1628 | /* |
michael@0 | 1629 | * Stop all user and native threads which are marked GC able. |
michael@0 | 1630 | */ |
michael@0 | 1631 | PR_Lock(_pr_activeLock); |
michael@0 | 1632 | suspendAllOn = PR_TRUE; |
michael@0 | 1633 | suspendAllThread = _PR_MD_CURRENT_THREAD(); |
michael@0 | 1634 | _PR_MD_BEGIN_SUSPEND_ALL(); |
michael@0 | 1635 | for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; |
michael@0 | 1636 | qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) { |
michael@0 | 1637 | if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && |
michael@0 | 1638 | _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) { |
michael@0 | 1639 | _PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); |
michael@0 | 1640 | PR_ASSERT((_PR_ACTIVE_THREAD_PTR(qp))->state != _PR_RUNNING); |
michael@0 | 1641 | } |
michael@0 | 1642 | } |
michael@0 | 1643 | for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; |
michael@0 | 1644 | qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) { |
michael@0 | 1645 | if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && |
michael@0 | 1646 | _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) |
michael@0 | 1647 | /* PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); */ |
michael@0 | 1648 | _PR_MD_SUSPEND_THREAD(_PR_ACTIVE_THREAD_PTR(qp)); |
michael@0 | 1649 | } |
michael@0 | 1650 | _PR_MD_END_SUSPEND_ALL(); |
michael@0 | 1651 | } |
michael@0 | 1652 | |
michael@0 | 1653 | /* |
michael@0 | 1654 | ** This routine unblocks all other threads that were suspended from running by |
michael@0 | 1655 | ** PR_SuspendAll(). This call is needed by the garbage collector. |
michael@0 | 1656 | */ |
michael@0 | 1657 | PR_IMPLEMENT(void) PR_ResumeAll(void) |
michael@0 | 1658 | { |
michael@0 | 1659 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 1660 | PRCList *qp; |
michael@0 | 1661 | |
michael@0 | 1662 | /* |
michael@0 | 1663 | * Resume all user and native threads which are marked GC able. |
michael@0 | 1664 | */ |
michael@0 | 1665 | _PR_MD_BEGIN_RESUME_ALL(); |
michael@0 | 1666 | for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; |
michael@0 | 1667 | qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) { |
michael@0 | 1668 | if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && |
michael@0 | 1669 | _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) |
michael@0 | 1670 | _PR_Resume(_PR_ACTIVE_THREAD_PTR(qp)); |
michael@0 | 1671 | } |
michael@0 | 1672 | for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; |
michael@0 | 1673 | qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) { |
michael@0 | 1674 | if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && |
michael@0 | 1675 | _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) |
michael@0 | 1676 | _PR_MD_RESUME_THREAD(_PR_ACTIVE_THREAD_PTR(qp)); |
michael@0 | 1677 | } |
michael@0 | 1678 | _PR_MD_END_RESUME_ALL(); |
michael@0 | 1679 | suspendAllThread = NULL; |
michael@0 | 1680 | suspendAllOn = PR_FALSE; |
michael@0 | 1681 | PR_Unlock(_pr_activeLock); |
michael@0 | 1682 | } |
michael@0 | 1683 | |
michael@0 | 1684 | PR_IMPLEMENT(PRStatus) PR_EnumerateThreads(PREnumerator func, void *arg) |
michael@0 | 1685 | { |
michael@0 | 1686 | PRCList *qp, *qp_next; |
michael@0 | 1687 | PRIntn i = 0; |
michael@0 | 1688 | PRStatus rv = PR_SUCCESS; |
michael@0 | 1689 | PRThread* t; |
michael@0 | 1690 | |
michael@0 | 1691 | /* |
michael@0 | 1692 | ** Currently Enumerate threads happen only with suspension and |
michael@0 | 1693 | ** pr_activeLock held |
michael@0 | 1694 | */ |
michael@0 | 1695 | PR_ASSERT(suspendAllOn); |
michael@0 | 1696 | |
michael@0 | 1697 | /* Steve Morse, 4-23-97: Note that we can't walk a queue by taking |
michael@0 | 1698 | * qp->next after applying the function "func". In particular, "func" |
michael@0 | 1699 | * might remove the thread from the queue and put it into another one in |
michael@0 | 1700 | * which case qp->next no longer points to the next entry in the original |
michael@0 | 1701 | * queue. |
michael@0 | 1702 | * |
michael@0 | 1703 | * To get around this problem, we save qp->next in qp_next before applying |
michael@0 | 1704 | * "func" and use that saved value as the next value after applying "func". |
michael@0 | 1705 | */ |
michael@0 | 1706 | |
michael@0 | 1707 | /* |
michael@0 | 1708 | * Traverse the list of local and global threads |
michael@0 | 1709 | */ |
michael@0 | 1710 | for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; |
michael@0 | 1711 | qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp_next) |
michael@0 | 1712 | { |
michael@0 | 1713 | qp_next = qp->next; |
michael@0 | 1714 | t = _PR_ACTIVE_THREAD_PTR(qp); |
michael@0 | 1715 | if (_PR_IS_GCABLE_THREAD(t)) |
michael@0 | 1716 | { |
michael@0 | 1717 | rv = (*func)(t, i, arg); |
michael@0 | 1718 | if (rv != PR_SUCCESS) |
michael@0 | 1719 | return rv; |
michael@0 | 1720 | i++; |
michael@0 | 1721 | } |
michael@0 | 1722 | } |
michael@0 | 1723 | for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; |
michael@0 | 1724 | qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp_next) |
michael@0 | 1725 | { |
michael@0 | 1726 | qp_next = qp->next; |
michael@0 | 1727 | t = _PR_ACTIVE_THREAD_PTR(qp); |
michael@0 | 1728 | if (_PR_IS_GCABLE_THREAD(t)) |
michael@0 | 1729 | { |
michael@0 | 1730 | rv = (*func)(t, i, arg); |
michael@0 | 1731 | if (rv != PR_SUCCESS) |
michael@0 | 1732 | return rv; |
michael@0 | 1733 | i++; |
michael@0 | 1734 | } |
michael@0 | 1735 | } |
michael@0 | 1736 | return rv; |
michael@0 | 1737 | } |
michael@0 | 1738 | |
michael@0 | 1739 | /* FUNCTION: _PR_AddSleepQ |
michael@0 | 1740 | ** DESCRIPTION: |
michael@0 | 1741 | ** Adds a thread to the sleep/pauseQ. |
michael@0 | 1742 | ** RESTRICTIONS: |
michael@0 | 1743 | ** Caller must have the RUNQ lock. |
michael@0 | 1744 | ** Caller must be a user level thread |
michael@0 | 1745 | */ |
michael@0 | 1746 | PR_IMPLEMENT(void) |
michael@0 | 1747 | _PR_AddSleepQ(PRThread *thread, PRIntervalTime timeout) |
michael@0 | 1748 | { |
michael@0 | 1749 | _PRCPU *cpu = thread->cpu; |
michael@0 | 1750 | |
michael@0 | 1751 | if (timeout == PR_INTERVAL_NO_TIMEOUT) { |
michael@0 | 1752 | /* append the thread to the global pause Q */ |
michael@0 | 1753 | PR_APPEND_LINK(&thread->links, &_PR_PAUSEQ(thread->cpu)); |
michael@0 | 1754 | thread->flags |= _PR_ON_PAUSEQ; |
michael@0 | 1755 | } else { |
michael@0 | 1756 | PRIntervalTime sleep; |
michael@0 | 1757 | PRCList *q; |
michael@0 | 1758 | PRThread *t; |
michael@0 | 1759 | |
michael@0 | 1760 | /* sort onto global sleepQ */ |
michael@0 | 1761 | sleep = timeout; |
michael@0 | 1762 | |
michael@0 | 1763 | /* Check if we are longest timeout */ |
michael@0 | 1764 | if (timeout >= _PR_SLEEPQMAX(cpu)) { |
michael@0 | 1765 | PR_INSERT_BEFORE(&thread->links, &_PR_SLEEPQ(cpu)); |
michael@0 | 1766 | thread->sleep = timeout - _PR_SLEEPQMAX(cpu); |
michael@0 | 1767 | _PR_SLEEPQMAX(cpu) = timeout; |
michael@0 | 1768 | } else { |
michael@0 | 1769 | /* Sort thread into global sleepQ at appropriate point */ |
michael@0 | 1770 | q = _PR_SLEEPQ(cpu).next; |
michael@0 | 1771 | |
michael@0 | 1772 | /* Now scan the list for where to insert this entry */ |
michael@0 | 1773 | while (q != &_PR_SLEEPQ(cpu)) { |
michael@0 | 1774 | t = _PR_THREAD_PTR(q); |
michael@0 | 1775 | if (sleep < t->sleep) { |
michael@0 | 1776 | /* Found sleeper to insert in front of */ |
michael@0 | 1777 | break; |
michael@0 | 1778 | } |
michael@0 | 1779 | sleep -= t->sleep; |
michael@0 | 1780 | q = q->next; |
michael@0 | 1781 | } |
michael@0 | 1782 | thread->sleep = sleep; |
michael@0 | 1783 | PR_INSERT_BEFORE(&thread->links, q); |
michael@0 | 1784 | |
michael@0 | 1785 | /* |
michael@0 | 1786 | ** Subtract our sleep time from the sleeper that follows us (there |
michael@0 | 1787 | ** must be one) so that they remain relative to us. |
michael@0 | 1788 | */ |
michael@0 | 1789 | PR_ASSERT (thread->links.next != &_PR_SLEEPQ(cpu)); |
michael@0 | 1790 | |
michael@0 | 1791 | t = _PR_THREAD_PTR(thread->links.next); |
michael@0 | 1792 | PR_ASSERT(_PR_THREAD_PTR(t->links.prev) == thread); |
michael@0 | 1793 | t->sleep -= sleep; |
michael@0 | 1794 | } |
michael@0 | 1795 | |
michael@0 | 1796 | thread->flags |= _PR_ON_SLEEPQ; |
michael@0 | 1797 | } |
michael@0 | 1798 | } |
michael@0 | 1799 | |
michael@0 | 1800 | /* FUNCTION: _PR_DelSleepQ |
michael@0 | 1801 | ** DESCRIPTION: |
michael@0 | 1802 | ** Removes a thread from the sleep/pauseQ. |
michael@0 | 1803 | ** INPUTS: |
michael@0 | 1804 | ** If propogate_time is true, then the thread following the deleted |
michael@0 | 1805 | ** thread will be get the time from the deleted thread. This is used |
michael@0 | 1806 | ** when deleting a sleeper that has not timed out. |
michael@0 | 1807 | ** RESTRICTIONS: |
michael@0 | 1808 | ** Caller must have the RUNQ lock. |
michael@0 | 1809 | ** Caller must be a user level thread |
michael@0 | 1810 | */ |
michael@0 | 1811 | PR_IMPLEMENT(void) |
michael@0 | 1812 | _PR_DelSleepQ(PRThread *thread, PRBool propogate_time) |
michael@0 | 1813 | { |
michael@0 | 1814 | _PRCPU *cpu = thread->cpu; |
michael@0 | 1815 | |
michael@0 | 1816 | /* Remove from pauseQ/sleepQ */ |
michael@0 | 1817 | if (thread->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { |
michael@0 | 1818 | if (thread->flags & _PR_ON_SLEEPQ) { |
michael@0 | 1819 | PRCList *q = thread->links.next; |
michael@0 | 1820 | if (q != &_PR_SLEEPQ(cpu)) { |
michael@0 | 1821 | if (propogate_time == PR_TRUE) { |
michael@0 | 1822 | PRThread *after = _PR_THREAD_PTR(q); |
michael@0 | 1823 | after->sleep += thread->sleep; |
michael@0 | 1824 | } else |
michael@0 | 1825 | _PR_SLEEPQMAX(cpu) -= thread->sleep; |
michael@0 | 1826 | } else { |
michael@0 | 1827 | /* Check if prev is the beggining of the list; if so, |
michael@0 | 1828 | * we are the only element on the list. |
michael@0 | 1829 | */ |
michael@0 | 1830 | if (thread->links.prev != &_PR_SLEEPQ(cpu)) |
michael@0 | 1831 | _PR_SLEEPQMAX(cpu) -= thread->sleep; |
michael@0 | 1832 | else |
michael@0 | 1833 | _PR_SLEEPQMAX(cpu) = 0; |
michael@0 | 1834 | } |
michael@0 | 1835 | thread->flags &= ~_PR_ON_SLEEPQ; |
michael@0 | 1836 | } else { |
michael@0 | 1837 | thread->flags &= ~_PR_ON_PAUSEQ; |
michael@0 | 1838 | } |
michael@0 | 1839 | PR_REMOVE_LINK(&thread->links); |
michael@0 | 1840 | } else |
michael@0 | 1841 | PR_ASSERT(0); |
michael@0 | 1842 | } |
michael@0 | 1843 | |
michael@0 | 1844 | void |
michael@0 | 1845 | _PR_AddThreadToRunQ( |
michael@0 | 1846 | PRThread *me, /* the current thread */ |
michael@0 | 1847 | PRThread *thread) /* the local thread to be added to a run queue */ |
michael@0 | 1848 | { |
michael@0 | 1849 | PRThreadPriority pri = thread->priority; |
michael@0 | 1850 | _PRCPU *cpu = thread->cpu; |
michael@0 | 1851 | |
michael@0 | 1852 | PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread)); |
michael@0 | 1853 | |
michael@0 | 1854 | #if defined(WINNT) |
michael@0 | 1855 | /* |
michael@0 | 1856 | * On NT, we can only reliably know that the current CPU |
michael@0 | 1857 | * is not idle. We add the awakened thread to the run |
michael@0 | 1858 | * queue of its CPU if its CPU is the current CPU. |
michael@0 | 1859 | * For any other CPU, we don't really know whether it |
michael@0 | 1860 | * is busy or idle. So in all other cases, we just |
michael@0 | 1861 | * "post" the awakened thread to the IO completion port |
michael@0 | 1862 | * for the next idle CPU to execute (this is done in |
michael@0 | 1863 | * _PR_MD_WAKEUP_WAITER). |
michael@0 | 1864 | * Threads with a suspended I/O operation remain bound to |
michael@0 | 1865 | * the same cpu until I/O is cancelled |
michael@0 | 1866 | * |
michael@0 | 1867 | * NOTE: the boolean expression below must be the exact |
michael@0 | 1868 | * opposite of the corresponding boolean expression in |
michael@0 | 1869 | * _PR_MD_WAKEUP_WAITER. |
michael@0 | 1870 | */ |
michael@0 | 1871 | if ((!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) || |
michael@0 | 1872 | (thread->md.thr_bound_cpu)) { |
michael@0 | 1873 | PR_ASSERT(!thread->md.thr_bound_cpu || |
michael@0 | 1874 | (thread->md.thr_bound_cpu == cpu)); |
michael@0 | 1875 | _PR_RUNQ_LOCK(cpu); |
michael@0 | 1876 | _PR_ADD_RUNQ(thread, cpu, pri); |
michael@0 | 1877 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 1878 | } |
michael@0 | 1879 | #else |
michael@0 | 1880 | _PR_RUNQ_LOCK(cpu); |
michael@0 | 1881 | _PR_ADD_RUNQ(thread, cpu, pri); |
michael@0 | 1882 | _PR_RUNQ_UNLOCK(cpu); |
michael@0 | 1883 | if (!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) { |
michael@0 | 1884 | if (pri > me->priority) { |
michael@0 | 1885 | _PR_SET_RESCHED_FLAG(); |
michael@0 | 1886 | } |
michael@0 | 1887 | } |
michael@0 | 1888 | #endif |
michael@0 | 1889 | } |