nsprpub/pr/src/bthreads/btthread.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
michael@0 2 /* This Source Code Form is subject to the terms of the Mozilla Public
michael@0 3 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 5
michael@0 6 #include <kernel/OS.h>
michael@0 7 #include <support/TLS.h>
michael@0 8
michael@0 9 #include "prlog.h"
michael@0 10 #include "primpl.h"
michael@0 11 #include "prcvar.h"
michael@0 12 #include "prpdce.h"
michael@0 13
michael@0 14 #include <stdlib.h>
michael@0 15 #include <string.h>
michael@0 16 #include <signal.h>
michael@0 17
michael@0 18 /* values for PRThread.state */
michael@0 19 #define BT_THREAD_PRIMORD 0x01 /* this is the primordial thread */
michael@0 20 #define BT_THREAD_SYSTEM 0x02 /* this is a system thread */
michael@0 21 #define BT_THREAD_JOINABLE 0x04 /* this is a joinable thread */
michael@0 22
michael@0 23 struct _BT_Bookeeping
michael@0 24 {
michael@0 25 PRLock *ml; /* a lock to protect ourselves */
michael@0 26 sem_id cleanUpSem; /* the primoridal thread will block on this
michael@0 27 sem while waiting for the user threads */
michael@0 28 PRInt32 threadCount; /* user thred count */
michael@0 29
michael@0 30 } bt_book = { NULL, B_ERROR, 0 };
michael@0 31
michael@0 32
michael@0 33 #define BT_TPD_LIMIT 128 /* number of TPD slots we'll provide (arbitrary) */
michael@0 34
michael@0 35 /* these will be used to map an index returned by PR_NewThreadPrivateIndex()
michael@0 36 to the corresponding beos native TLS slot number, and to the destructor
michael@0 37 for that slot - note that, because it is allocated globally, this data
michael@0 38 will be automatically zeroed for us when the program begins */
michael@0 39 static int32 tpd_beosTLSSlots[BT_TPD_LIMIT];
michael@0 40 static PRThreadPrivateDTOR tpd_dtors[BT_TPD_LIMIT];
michael@0 41
michael@0 42 static vint32 tpd_slotsUsed=0; /* number of currently-allocated TPD slots */
michael@0 43 static int32 tls_prThreadSlot; /* TLS slot in which PRThread will be stored */
michael@0 44
michael@0 45 /* this mutex will be used to synchronize access to every
michael@0 46 PRThread.md.joinSem and PRThread.md.is_joining (we could
michael@0 47 actually allocate one per thread, but that seems a bit excessive,
michael@0 48 especially considering that there will probably be little
michael@0 49 contention, PR_JoinThread() is allowed to block anyway, and the code
michael@0 50 protected by the mutex is short/fast) */
michael@0 51 static PRLock *joinSemLock;
michael@0 52
michael@0 53 static PRUint32 _bt_MapNSPRToNativePriority( PRThreadPriority priority );
michael@0 54 static PRThreadPriority _bt_MapNativeToNSPRPriority( PRUint32 priority );
michael@0 55 static void _bt_CleanupThread(void *arg);
michael@0 56 static PRThread *_bt_AttachThread();
michael@0 57
michael@0 58 void
michael@0 59 _PR_InitThreads (PRThreadType type, PRThreadPriority priority,
michael@0 60 PRUintn maxPTDs)
michael@0 61 {
michael@0 62 PRThread *primordialThread;
michael@0 63 PRUint32 beThreadPriority;
michael@0 64
michael@0 65 /* allocate joinSem mutex */
michael@0 66 joinSemLock = PR_NewLock();
michael@0 67 if (joinSemLock == NULL)
michael@0 68 {
michael@0 69 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
michael@0 70 return;
michael@0 71 }
michael@0 72
michael@0 73 /*
michael@0 74 ** Create and initialize NSPR structure for our primordial thread.
michael@0 75 */
michael@0 76
michael@0 77 primordialThread = PR_NEWZAP(PRThread);
michael@0 78 if( NULL == primordialThread )
michael@0 79 {
michael@0 80 PR_SetError( PR_OUT_OF_MEMORY_ERROR, 0 );
michael@0 81 return;
michael@0 82 }
michael@0 83
michael@0 84 primordialThread->md.joinSem = B_ERROR;
michael@0 85
michael@0 86 /*
michael@0 87 ** Set the priority to the desired level.
michael@0 88 */
michael@0 89
michael@0 90 beThreadPriority = _bt_MapNSPRToNativePriority( priority );
michael@0 91
michael@0 92 set_thread_priority( find_thread( NULL ), beThreadPriority );
michael@0 93
michael@0 94 primordialThread->priority = priority;
michael@0 95
michael@0 96
michael@0 97 /* set the thread's state - note that the thread is not joinable */
michael@0 98 primordialThread->state |= BT_THREAD_PRIMORD;
michael@0 99 if (type == PR_SYSTEM_THREAD)
michael@0 100 primordialThread->state |= BT_THREAD_SYSTEM;
michael@0 101
michael@0 102 /*
michael@0 103 ** Allocate a TLS slot for the PRThread structure (just using
michael@0 104 ** native TLS, as opposed to NSPR TPD, will make PR_GetCurrentThread()
michael@0 105 ** somewhat faster, and will leave one more TPD slot for our client)
michael@0 106 */
michael@0 107
michael@0 108 tls_prThreadSlot = tls_allocate();
michael@0 109
michael@0 110 /*
michael@0 111 ** Stuff our new PRThread structure into our thread specific
michael@0 112 ** slot.
michael@0 113 */
michael@0 114
michael@0 115 tls_set(tls_prThreadSlot, primordialThread);
michael@0 116
michael@0 117 /* allocate lock for bt_book */
michael@0 118 bt_book.ml = PR_NewLock();
michael@0 119 if( NULL == bt_book.ml )
michael@0 120 {
michael@0 121 PR_SetError( PR_OUT_OF_MEMORY_ERROR, 0 );
michael@0 122 return;
michael@0 123 }
michael@0 124 }
michael@0 125
michael@0 126 PRUint32
michael@0 127 _bt_MapNSPRToNativePriority( PRThreadPriority priority )
michael@0 128 {
michael@0 129 switch( priority )
michael@0 130 {
michael@0 131 case PR_PRIORITY_LOW: return( B_LOW_PRIORITY );
michael@0 132 case PR_PRIORITY_NORMAL: return( B_NORMAL_PRIORITY );
michael@0 133 case PR_PRIORITY_HIGH: return( B_DISPLAY_PRIORITY );
michael@0 134 case PR_PRIORITY_URGENT: return( B_URGENT_DISPLAY_PRIORITY );
michael@0 135 default: return( B_NORMAL_PRIORITY );
michael@0 136 }
michael@0 137 }
michael@0 138
michael@0 139 PRThreadPriority
michael@0 140 _bt_MapNativeToNSPRPriority(PRUint32 priority)
michael@0 141 {
michael@0 142 if (priority < B_NORMAL_PRIORITY)
michael@0 143 return PR_PRIORITY_LOW;
michael@0 144 if (priority < B_DISPLAY_PRIORITY)
michael@0 145 return PR_PRIORITY_NORMAL;
michael@0 146 if (priority < B_URGENT_DISPLAY_PRIORITY)
michael@0 147 return PR_PRIORITY_HIGH;
michael@0 148 return PR_PRIORITY_URGENT;
michael@0 149 }
michael@0 150
michael@0 151 PRUint32
michael@0 152 _bt_mapNativeToNSPRPriority( int32 priority )
michael@0 153 {
michael@0 154 switch( priority )
michael@0 155 {
michael@0 156 case PR_PRIORITY_LOW: return( B_LOW_PRIORITY );
michael@0 157 case PR_PRIORITY_NORMAL: return( B_NORMAL_PRIORITY );
michael@0 158 case PR_PRIORITY_HIGH: return( B_DISPLAY_PRIORITY );
michael@0 159 case PR_PRIORITY_URGENT: return( B_URGENT_DISPLAY_PRIORITY );
michael@0 160 default: return( B_NORMAL_PRIORITY );
michael@0 161 }
michael@0 162 }
michael@0 163
michael@0 164 /* This method is called by all NSPR threads as they exit */
michael@0 165 void _bt_CleanupThread(void *arg)
michael@0 166 {
michael@0 167 PRThread *me = PR_GetCurrentThread();
michael@0 168 int32 i;
michael@0 169
michael@0 170 /* first, clean up all thread-private data */
michael@0 171 for (i = 0; i < tpd_slotsUsed; i++)
michael@0 172 {
michael@0 173 void *oldValue = tls_get(tpd_beosTLSSlots[i]);
michael@0 174 if ( oldValue != NULL && tpd_dtors[i] != NULL )
michael@0 175 (*tpd_dtors[i])(oldValue);
michael@0 176 }
michael@0 177
michael@0 178 /* if this thread is joinable, wait for someone to join it */
michael@0 179 if (me->state & BT_THREAD_JOINABLE)
michael@0 180 {
michael@0 181 /* protect access to our joinSem */
michael@0 182 PR_Lock(joinSemLock);
michael@0 183
michael@0 184 if (me->md.is_joining)
michael@0 185 {
michael@0 186 /* someone is already waiting to join us (they've
michael@0 187 allocated a joinSem for us) - let them know we're
michael@0 188 ready */
michael@0 189 delete_sem(me->md.joinSem);
michael@0 190
michael@0 191 PR_Unlock(joinSemLock);
michael@0 192
michael@0 193 }
michael@0 194 else
michael@0 195 {
michael@0 196 /* noone is currently waiting for our demise - it
michael@0 197 is our responsibility to allocate the joinSem
michael@0 198 and block on it */
michael@0 199 me->md.joinSem = create_sem(0, "join sem");
michael@0 200
michael@0 201 /* we're done accessing our joinSem */
michael@0 202 PR_Unlock(joinSemLock);
michael@0 203
michael@0 204 /* wait for someone to join us */
michael@0 205 while (acquire_sem(me->md.joinSem) == B_INTERRUPTED);
michael@0 206 }
michael@0 207 }
michael@0 208
michael@0 209 /* if this is a user thread, we must update our books */
michael@0 210 if ((me->state & BT_THREAD_SYSTEM) == 0)
michael@0 211 {
michael@0 212 /* synchronize access to bt_book */
michael@0 213 PR_Lock( bt_book.ml );
michael@0 214
michael@0 215 /* decrement the number of currently-alive user threads */
michael@0 216 bt_book.threadCount--;
michael@0 217
michael@0 218 if (bt_book.threadCount == 0 && bt_book.cleanUpSem != B_ERROR) {
michael@0 219 /* we are the last user thread, and the primordial thread is
michael@0 220 blocked in PR_Cleanup() waiting for us to finish - notify
michael@0 221 it */
michael@0 222 delete_sem(bt_book.cleanUpSem);
michael@0 223 }
michael@0 224
michael@0 225 PR_Unlock( bt_book.ml );
michael@0 226 }
michael@0 227
michael@0 228 /* finally, delete this thread's PRThread */
michael@0 229 PR_DELETE(me);
michael@0 230 }
michael@0 231
michael@0 232 /**
michael@0 233 * This is a wrapper that all threads invoke that allows us to set some
michael@0 234 * things up prior to a thread's invocation and clean up after a thread has
michael@0 235 * exited.
michael@0 236 */
michael@0 237 static void*
michael@0 238 _bt_root (void* arg)
michael@0 239 {
michael@0 240 PRThread *thred = (PRThread*)arg;
michael@0 241 PRIntn rv;
michael@0 242 void *privData;
michael@0 243 status_t result;
michael@0 244 int i;
michael@0 245
michael@0 246 /* save our PRThread object into our TLS */
michael@0 247 tls_set(tls_prThreadSlot, thred);
michael@0 248
michael@0 249 thred->startFunc(thred->arg); /* run the dang thing */
michael@0 250
michael@0 251 /* clean up */
michael@0 252 _bt_CleanupThread(NULL);
michael@0 253
michael@0 254 return 0;
michael@0 255 }
michael@0 256
michael@0 257 PR_IMPLEMENT(PRThread*)
michael@0 258 PR_CreateThread (PRThreadType type, void (*start)(void* arg), void* arg,
michael@0 259 PRThreadPriority priority, PRThreadScope scope,
michael@0 260 PRThreadState state, PRUint32 stackSize)
michael@0 261 {
michael@0 262 PRUint32 bePriority;
michael@0 263
michael@0 264 PRThread* thred;
michael@0 265
michael@0 266 if (!_pr_initialized) _PR_ImplicitInitialization();
michael@0 267
michael@0 268 thred = PR_NEWZAP(PRThread);
michael@0 269 if (thred == NULL)
michael@0 270 {
michael@0 271 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
michael@0 272 return NULL;
michael@0 273 }
michael@0 274
michael@0 275 thred->md.joinSem = B_ERROR;
michael@0 276
michael@0 277 thred->arg = arg;
michael@0 278 thred->startFunc = start;
michael@0 279 thred->priority = priority;
michael@0 280
michael@0 281 if( state == PR_JOINABLE_THREAD )
michael@0 282 {
michael@0 283 thred->state |= BT_THREAD_JOINABLE;
michael@0 284 }
michael@0 285
michael@0 286 /* keep some books */
michael@0 287
michael@0 288 PR_Lock( bt_book.ml );
michael@0 289
michael@0 290 if (type == PR_USER_THREAD)
michael@0 291 {
michael@0 292 bt_book.threadCount++;
michael@0 293 }
michael@0 294
michael@0 295 PR_Unlock( bt_book.ml );
michael@0 296
michael@0 297 bePriority = _bt_MapNSPRToNativePriority( priority );
michael@0 298
michael@0 299 thred->md.tid = spawn_thread((thread_func)_bt_root, "moz-thread",
michael@0 300 bePriority, thred);
michael@0 301 if (thred->md.tid < B_OK) {
michael@0 302 PR_SetError(PR_UNKNOWN_ERROR, thred->md.tid);
michael@0 303 PR_DELETE(thred);
michael@0 304 return NULL;
michael@0 305 }
michael@0 306
michael@0 307 if (resume_thread(thred->md.tid) < B_OK) {
michael@0 308 PR_SetError(PR_UNKNOWN_ERROR, 0);
michael@0 309 PR_DELETE(thred);
michael@0 310 return NULL;
michael@0 311 }
michael@0 312
michael@0 313 return thred;
michael@0 314 }
michael@0 315
michael@0 316 PR_IMPLEMENT(PRThread*)
michael@0 317 PR_AttachThread(PRThreadType type, PRThreadPriority priority,
michael@0 318 PRThreadStack *stack)
michael@0 319 {
michael@0 320 /* PR_GetCurrentThread() will attach a thread if necessary */
michael@0 321 return PR_GetCurrentThread();
michael@0 322 }
michael@0 323
michael@0 324 PR_IMPLEMENT(void)
michael@0 325 PR_DetachThread()
michael@0 326 {
michael@0 327 /* we don't support detaching */
michael@0 328 }
michael@0 329
michael@0 330 PR_IMPLEMENT(PRStatus)
michael@0 331 PR_JoinThread (PRThread* thred)
michael@0 332 {
michael@0 333 status_t eval, status;
michael@0 334
michael@0 335 PR_ASSERT(thred != NULL);
michael@0 336
michael@0 337 if ((thred->state & BT_THREAD_JOINABLE) == 0)
michael@0 338 {
michael@0 339 PR_SetError( PR_INVALID_ARGUMENT_ERROR, 0 );
michael@0 340 return( PR_FAILURE );
michael@0 341 }
michael@0 342
michael@0 343 /* synchronize access to the thread's joinSem */
michael@0 344 PR_Lock(joinSemLock);
michael@0 345
michael@0 346 if (thred->md.is_joining)
michael@0 347 {
michael@0 348 /* another thread is already waiting to join the specified
michael@0 349 thread - we must fail */
michael@0 350 PR_Unlock(joinSemLock);
michael@0 351 return PR_FAILURE;
michael@0 352 }
michael@0 353
michael@0 354 /* let others know we are waiting to join */
michael@0 355 thred->md.is_joining = PR_TRUE;
michael@0 356
michael@0 357 if (thred->md.joinSem == B_ERROR)
michael@0 358 {
michael@0 359 /* the thread hasn't finished yet - it is our responsibility to
michael@0 360 allocate a joinSem and wait on it */
michael@0 361 thred->md.joinSem = create_sem(0, "join sem");
michael@0 362
michael@0 363 /* we're done changing the joinSem now */
michael@0 364 PR_Unlock(joinSemLock);
michael@0 365
michael@0 366 /* wait for the thread to finish */
michael@0 367 while (acquire_sem(thred->md.joinSem) == B_INTERRUPTED);
michael@0 368
michael@0 369 }
michael@0 370 else
michael@0 371 {
michael@0 372 /* the thread has already finished, and has allocated the
michael@0 373 joinSem itself - let it know it can finally die */
michael@0 374 delete_sem(thred->md.joinSem);
michael@0 375
michael@0 376 PR_Unlock(joinSemLock);
michael@0 377 }
michael@0 378
michael@0 379 /* make sure the thread is dead */
michael@0 380 wait_for_thread(thred->md.tid, &eval);
michael@0 381
michael@0 382 return PR_SUCCESS;
michael@0 383 }
michael@0 384
michael@0 385 PR_IMPLEMENT(PRThread*)
michael@0 386 PR_GetCurrentThread ()
michael@0 387 {
michael@0 388 PRThread* thred;
michael@0 389
michael@0 390 if (!_pr_initialized) _PR_ImplicitInitialization();
michael@0 391
michael@0 392 thred = (PRThread *)tls_get( tls_prThreadSlot);
michael@0 393 if (thred == NULL)
michael@0 394 {
michael@0 395 /* this thread doesn't have a PRThread structure (it must be
michael@0 396 a native thread not created by the NSPR) - assimilate it */
michael@0 397 thred = _bt_AttachThread();
michael@0 398 }
michael@0 399 PR_ASSERT(NULL != thred);
michael@0 400
michael@0 401 return thred;
michael@0 402 }
michael@0 403
michael@0 404 PR_IMPLEMENT(PRThreadScope)
michael@0 405 PR_GetThreadScope (const PRThread* thred)
michael@0 406 {
michael@0 407 PR_ASSERT(thred != NULL);
michael@0 408 return PR_GLOBAL_THREAD;
michael@0 409 }
michael@0 410
michael@0 411 PR_IMPLEMENT(PRThreadType)
michael@0 412 PR_GetThreadType (const PRThread* thred)
michael@0 413 {
michael@0 414 PR_ASSERT(thred != NULL);
michael@0 415 return (thred->state & BT_THREAD_SYSTEM) ?
michael@0 416 PR_SYSTEM_THREAD : PR_USER_THREAD;
michael@0 417 }
michael@0 418
michael@0 419 PR_IMPLEMENT(PRThreadState)
michael@0 420 PR_GetThreadState (const PRThread* thred)
michael@0 421 {
michael@0 422 PR_ASSERT(thred != NULL);
michael@0 423 return (thred->state & BT_THREAD_JOINABLE)?
michael@0 424 PR_JOINABLE_THREAD: PR_UNJOINABLE_THREAD;
michael@0 425 }
michael@0 426
michael@0 427 PR_IMPLEMENT(PRThreadPriority)
michael@0 428 PR_GetThreadPriority (const PRThread* thred)
michael@0 429 {
michael@0 430 PR_ASSERT(thred != NULL);
michael@0 431 return thred->priority;
michael@0 432 } /* PR_GetThreadPriority */
michael@0 433
michael@0 434 PR_IMPLEMENT(void) PR_SetThreadPriority(PRThread *thred,
michael@0 435 PRThreadPriority newPri)
michael@0 436 {
michael@0 437 PRUint32 bePriority;
michael@0 438
michael@0 439 PR_ASSERT( thred != NULL );
michael@0 440
michael@0 441 thred->priority = newPri;
michael@0 442 bePriority = _bt_MapNSPRToNativePriority( newPri );
michael@0 443 set_thread_priority( thred->md.tid, bePriority );
michael@0 444 }
michael@0 445
michael@0 446 PR_IMPLEMENT(PRStatus)
michael@0 447 PR_NewThreadPrivateIndex (PRUintn* newIndex,
michael@0 448 PRThreadPrivateDTOR destructor)
michael@0 449 {
michael@0 450 int32 index;
michael@0 451
michael@0 452 if (!_pr_initialized) _PR_ImplicitInitialization();
michael@0 453
michael@0 454 /* reserve the next available tpd slot */
michael@0 455 index = atomic_add( &tpd_slotsUsed, 1 );
michael@0 456 if (index >= BT_TPD_LIMIT)
michael@0 457 {
michael@0 458 /* no slots left - decrement value, then fail */
michael@0 459 atomic_add( &tpd_slotsUsed, -1 );
michael@0 460 PR_SetError( PR_TPD_RANGE_ERROR, 0 );
michael@0 461 return( PR_FAILURE );
michael@0 462 }
michael@0 463
michael@0 464 /* allocate a beos-native TLS slot for this index (the new slot
michael@0 465 automatically contains NULL) */
michael@0 466 tpd_beosTLSSlots[index] = tls_allocate();
michael@0 467
michael@0 468 /* remember the destructor */
michael@0 469 tpd_dtors[index] = destructor;
michael@0 470
michael@0 471 *newIndex = (PRUintn)index;
michael@0 472
michael@0 473 return( PR_SUCCESS );
michael@0 474 }
michael@0 475
michael@0 476 PR_IMPLEMENT(PRStatus)
michael@0 477 PR_SetThreadPrivate (PRUintn index, void* priv)
michael@0 478 {
michael@0 479 void *oldValue;
michael@0 480
michael@0 481 /*
michael@0 482 ** Sanity checking
michael@0 483 */
michael@0 484
michael@0 485 if(index < 0 || index >= tpd_slotsUsed || index >= BT_TPD_LIMIT)
michael@0 486 {
michael@0 487 PR_SetError( PR_TPD_RANGE_ERROR, 0 );
michael@0 488 return( PR_FAILURE );
michael@0 489 }
michael@0 490
michael@0 491 /* if the old value isn't NULL, and the dtor for this slot isn't
michael@0 492 NULL, we must destroy the data */
michael@0 493 oldValue = tls_get(tpd_beosTLSSlots[index]);
michael@0 494 if (oldValue != NULL && tpd_dtors[index] != NULL)
michael@0 495 (*tpd_dtors[index])(oldValue);
michael@0 496
michael@0 497 /* save new value */
michael@0 498 tls_set(tpd_beosTLSSlots[index], priv);
michael@0 499
michael@0 500 return( PR_SUCCESS );
michael@0 501 }
michael@0 502
michael@0 503 PR_IMPLEMENT(void*)
michael@0 504 PR_GetThreadPrivate (PRUintn index)
michael@0 505 {
michael@0 506 /* make sure the index is valid */
michael@0 507 if (index < 0 || index >= tpd_slotsUsed || index >= BT_TPD_LIMIT)
michael@0 508 {
michael@0 509 PR_SetError( PR_TPD_RANGE_ERROR, 0 );
michael@0 510 return NULL;
michael@0 511 }
michael@0 512
michael@0 513 /* return the value */
michael@0 514 return tls_get( tpd_beosTLSSlots[index] );
michael@0 515 }
michael@0 516
michael@0 517
michael@0 518 PR_IMPLEMENT(PRStatus)
michael@0 519 PR_Interrupt (PRThread* thred)
michael@0 520 {
michael@0 521 PRIntn rv;
michael@0 522
michael@0 523 PR_ASSERT(thred != NULL);
michael@0 524
michael@0 525 /*
michael@0 526 ** there seems to be a bug in beos R5 in which calling
michael@0 527 ** resume_thread() on a blocked thread returns B_OK instead
michael@0 528 ** of B_BAD_THREAD_STATE (beos bug #20000422-19095). as such,
michael@0 529 ** to interrupt a thread, we will simply suspend then resume it
michael@0 530 ** (no longer call resume_thread(), check for B_BAD_THREAD_STATE,
michael@0 531 ** the suspend/resume to wake up a blocked thread). this wakes
michael@0 532 ** up blocked threads properly, and doesn't hurt unblocked threads
michael@0 533 ** (they simply get stopped then re-started immediately)
michael@0 534 */
michael@0 535
michael@0 536 rv = suspend_thread( thred->md.tid );
michael@0 537 if( rv != B_NO_ERROR )
michael@0 538 {
michael@0 539 /* this doesn't appear to be a valid thread_id */
michael@0 540 PR_SetError( PR_UNKNOWN_ERROR, rv );
michael@0 541 return PR_FAILURE;
michael@0 542 }
michael@0 543
michael@0 544 rv = resume_thread( thred->md.tid );
michael@0 545 if( rv != B_NO_ERROR )
michael@0 546 {
michael@0 547 PR_SetError( PR_UNKNOWN_ERROR, rv );
michael@0 548 return PR_FAILURE;
michael@0 549 }
michael@0 550
michael@0 551 return PR_SUCCESS;
michael@0 552 }
michael@0 553
michael@0 554 PR_IMPLEMENT(void)
michael@0 555 PR_ClearInterrupt ()
michael@0 556 {
michael@0 557 }
michael@0 558
michael@0 559 PR_IMPLEMENT(PRStatus)
michael@0 560 PR_Yield ()
michael@0 561 {
michael@0 562 /* we just sleep for long enough to cause a reschedule (100
michael@0 563 microseconds) */
michael@0 564 snooze(100);
michael@0 565 }
michael@0 566
michael@0 567 #define BT_MILLION 1000000UL
michael@0 568
michael@0 569 PR_IMPLEMENT(PRStatus)
michael@0 570 PR_Sleep (PRIntervalTime ticks)
michael@0 571 {
michael@0 572 bigtime_t tps;
michael@0 573 status_t status;
michael@0 574
michael@0 575 if (!_pr_initialized) _PR_ImplicitInitialization();
michael@0 576
michael@0 577 tps = PR_IntervalToMicroseconds( ticks );
michael@0 578
michael@0 579 status = snooze(tps);
michael@0 580 if (status == B_NO_ERROR) return PR_SUCCESS;
michael@0 581
michael@0 582 PR_SetError(PR_NOT_IMPLEMENTED_ERROR, status);
michael@0 583 return PR_FAILURE;
michael@0 584 }
michael@0 585
michael@0 586 PR_IMPLEMENT(PRStatus)
michael@0 587 PR_Cleanup ()
michael@0 588 {
michael@0 589 PRThread *me = PR_GetCurrentThread();
michael@0 590
michael@0 591 PR_ASSERT(me->state & BT_THREAD_PRIMORD);
michael@0 592 if ((me->state & BT_THREAD_PRIMORD) == 0) {
michael@0 593 return PR_FAILURE;
michael@0 594 }
michael@0 595
michael@0 596 PR_Lock( bt_book.ml );
michael@0 597
michael@0 598 if (bt_book.threadCount != 0)
michael@0 599 {
michael@0 600 /* we'll have to wait for some threads to finish - create a
michael@0 601 sem to block on */
michael@0 602 bt_book.cleanUpSem = create_sem(0, "cleanup sem");
michael@0 603 }
michael@0 604
michael@0 605 PR_Unlock( bt_book.ml );
michael@0 606
michael@0 607 /* note that, if all the user threads were already dead, we
michael@0 608 wouldn't have created a sem above, so this acquire_sem()
michael@0 609 will fail immediately */
michael@0 610 while (acquire_sem(bt_book.cleanUpSem) == B_INTERRUPTED);
michael@0 611
michael@0 612 return PR_SUCCESS;
michael@0 613 }
michael@0 614
michael@0 615 PR_IMPLEMENT(void)
michael@0 616 PR_ProcessExit (PRIntn status)
michael@0 617 {
michael@0 618 exit(status);
michael@0 619 }
michael@0 620
michael@0 621 PRThread *_bt_AttachThread()
michael@0 622 {
michael@0 623 PRThread *thread;
michael@0 624 thread_info tInfo;
michael@0 625
michael@0 626 /* make sure this thread doesn't already have a PRThread structure */
michael@0 627 PR_ASSERT(tls_get(tls_prThreadSlot) == NULL);
michael@0 628
michael@0 629 /* allocate a PRThread structure for this thread */
michael@0 630 thread = PR_NEWZAP(PRThread);
michael@0 631 if (thread == NULL)
michael@0 632 {
michael@0 633 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
michael@0 634 return NULL;
michael@0 635 }
michael@0 636
michael@0 637 /* get the native thread's current state */
michael@0 638 get_thread_info(find_thread(NULL), &tInfo);
michael@0 639
michael@0 640 /* initialize new PRThread */
michael@0 641 thread->md.tid = tInfo.thread;
michael@0 642 thread->md.joinSem = B_ERROR;
michael@0 643 thread->priority = _bt_MapNativeToNSPRPriority(tInfo.priority);
michael@0 644
michael@0 645 /* attached threads are always non-joinable user threads */
michael@0 646 thread->state = 0;
michael@0 647
michael@0 648 /* increment user thread count */
michael@0 649 PR_Lock(bt_book.ml);
michael@0 650 bt_book.threadCount++;
michael@0 651 PR_Unlock(bt_book.ml);
michael@0 652
michael@0 653 /* store this thread's PRThread */
michael@0 654 tls_set(tls_prThreadSlot, thread);
michael@0 655
michael@0 656 /* the thread must call _bt_CleanupThread() before it dies, in order
michael@0 657 to clean up its PRThread, synchronize with the primordial thread,
michael@0 658 etc. */
michael@0 659 on_exit_thread(_bt_CleanupThread, NULL);
michael@0 660
michael@0 661 return thread;
michael@0 662 }

mercurial