|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ |
|
2 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
3 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
5 |
|
6 #include <kernel/OS.h> |
|
7 #include <support/TLS.h> |
|
8 |
|
9 #include "prlog.h" |
|
10 #include "primpl.h" |
|
11 #include "prcvar.h" |
|
12 #include "prpdce.h" |
|
13 |
|
14 #include <stdlib.h> |
|
15 #include <string.h> |
|
16 #include <signal.h> |
|
17 |
|
18 /* values for PRThread.state */ |
|
19 #define BT_THREAD_PRIMORD 0x01 /* this is the primordial thread */ |
|
20 #define BT_THREAD_SYSTEM 0x02 /* this is a system thread */ |
|
21 #define BT_THREAD_JOINABLE 0x04 /* this is a joinable thread */ |
|
22 |
|
23 struct _BT_Bookeeping |
|
24 { |
|
25 PRLock *ml; /* a lock to protect ourselves */ |
|
26 sem_id cleanUpSem; /* the primoridal thread will block on this |
|
27 sem while waiting for the user threads */ |
|
28 PRInt32 threadCount; /* user thred count */ |
|
29 |
|
30 } bt_book = { NULL, B_ERROR, 0 }; |
|
31 |
|
32 |
|
33 #define BT_TPD_LIMIT 128 /* number of TPD slots we'll provide (arbitrary) */ |
|
34 |
|
35 /* these will be used to map an index returned by PR_NewThreadPrivateIndex() |
|
36 to the corresponding beos native TLS slot number, and to the destructor |
|
37 for that slot - note that, because it is allocated globally, this data |
|
38 will be automatically zeroed for us when the program begins */ |
|
39 static int32 tpd_beosTLSSlots[BT_TPD_LIMIT]; |
|
40 static PRThreadPrivateDTOR tpd_dtors[BT_TPD_LIMIT]; |
|
41 |
|
42 static vint32 tpd_slotsUsed=0; /* number of currently-allocated TPD slots */ |
|
43 static int32 tls_prThreadSlot; /* TLS slot in which PRThread will be stored */ |
|
44 |
|
45 /* this mutex will be used to synchronize access to every |
|
46 PRThread.md.joinSem and PRThread.md.is_joining (we could |
|
47 actually allocate one per thread, but that seems a bit excessive, |
|
48 especially considering that there will probably be little |
|
49 contention, PR_JoinThread() is allowed to block anyway, and the code |
|
50 protected by the mutex is short/fast) */ |
|
51 static PRLock *joinSemLock; |
|
52 |
|
53 static PRUint32 _bt_MapNSPRToNativePriority( PRThreadPriority priority ); |
|
54 static PRThreadPriority _bt_MapNativeToNSPRPriority( PRUint32 priority ); |
|
55 static void _bt_CleanupThread(void *arg); |
|
56 static PRThread *_bt_AttachThread(); |
|
57 |
|
58 void |
|
59 _PR_InitThreads (PRThreadType type, PRThreadPriority priority, |
|
60 PRUintn maxPTDs) |
|
61 { |
|
62 PRThread *primordialThread; |
|
63 PRUint32 beThreadPriority; |
|
64 |
|
65 /* allocate joinSem mutex */ |
|
66 joinSemLock = PR_NewLock(); |
|
67 if (joinSemLock == NULL) |
|
68 { |
|
69 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
|
70 return; |
|
71 } |
|
72 |
|
73 /* |
|
74 ** Create and initialize NSPR structure for our primordial thread. |
|
75 */ |
|
76 |
|
77 primordialThread = PR_NEWZAP(PRThread); |
|
78 if( NULL == primordialThread ) |
|
79 { |
|
80 PR_SetError( PR_OUT_OF_MEMORY_ERROR, 0 ); |
|
81 return; |
|
82 } |
|
83 |
|
84 primordialThread->md.joinSem = B_ERROR; |
|
85 |
|
86 /* |
|
87 ** Set the priority to the desired level. |
|
88 */ |
|
89 |
|
90 beThreadPriority = _bt_MapNSPRToNativePriority( priority ); |
|
91 |
|
92 set_thread_priority( find_thread( NULL ), beThreadPriority ); |
|
93 |
|
94 primordialThread->priority = priority; |
|
95 |
|
96 |
|
97 /* set the thread's state - note that the thread is not joinable */ |
|
98 primordialThread->state |= BT_THREAD_PRIMORD; |
|
99 if (type == PR_SYSTEM_THREAD) |
|
100 primordialThread->state |= BT_THREAD_SYSTEM; |
|
101 |
|
102 /* |
|
103 ** Allocate a TLS slot for the PRThread structure (just using |
|
104 ** native TLS, as opposed to NSPR TPD, will make PR_GetCurrentThread() |
|
105 ** somewhat faster, and will leave one more TPD slot for our client) |
|
106 */ |
|
107 |
|
108 tls_prThreadSlot = tls_allocate(); |
|
109 |
|
110 /* |
|
111 ** Stuff our new PRThread structure into our thread specific |
|
112 ** slot. |
|
113 */ |
|
114 |
|
115 tls_set(tls_prThreadSlot, primordialThread); |
|
116 |
|
117 /* allocate lock for bt_book */ |
|
118 bt_book.ml = PR_NewLock(); |
|
119 if( NULL == bt_book.ml ) |
|
120 { |
|
121 PR_SetError( PR_OUT_OF_MEMORY_ERROR, 0 ); |
|
122 return; |
|
123 } |
|
124 } |
|
125 |
|
126 PRUint32 |
|
127 _bt_MapNSPRToNativePriority( PRThreadPriority priority ) |
|
128 { |
|
129 switch( priority ) |
|
130 { |
|
131 case PR_PRIORITY_LOW: return( B_LOW_PRIORITY ); |
|
132 case PR_PRIORITY_NORMAL: return( B_NORMAL_PRIORITY ); |
|
133 case PR_PRIORITY_HIGH: return( B_DISPLAY_PRIORITY ); |
|
134 case PR_PRIORITY_URGENT: return( B_URGENT_DISPLAY_PRIORITY ); |
|
135 default: return( B_NORMAL_PRIORITY ); |
|
136 } |
|
137 } |
|
138 |
|
139 PRThreadPriority |
|
140 _bt_MapNativeToNSPRPriority(PRUint32 priority) |
|
141 { |
|
142 if (priority < B_NORMAL_PRIORITY) |
|
143 return PR_PRIORITY_LOW; |
|
144 if (priority < B_DISPLAY_PRIORITY) |
|
145 return PR_PRIORITY_NORMAL; |
|
146 if (priority < B_URGENT_DISPLAY_PRIORITY) |
|
147 return PR_PRIORITY_HIGH; |
|
148 return PR_PRIORITY_URGENT; |
|
149 } |
|
150 |
|
151 PRUint32 |
|
152 _bt_mapNativeToNSPRPriority( int32 priority ) |
|
153 { |
|
154 switch( priority ) |
|
155 { |
|
156 case PR_PRIORITY_LOW: return( B_LOW_PRIORITY ); |
|
157 case PR_PRIORITY_NORMAL: return( B_NORMAL_PRIORITY ); |
|
158 case PR_PRIORITY_HIGH: return( B_DISPLAY_PRIORITY ); |
|
159 case PR_PRIORITY_URGENT: return( B_URGENT_DISPLAY_PRIORITY ); |
|
160 default: return( B_NORMAL_PRIORITY ); |
|
161 } |
|
162 } |
|
163 |
|
164 /* This method is called by all NSPR threads as they exit */ |
|
165 void _bt_CleanupThread(void *arg) |
|
166 { |
|
167 PRThread *me = PR_GetCurrentThread(); |
|
168 int32 i; |
|
169 |
|
170 /* first, clean up all thread-private data */ |
|
171 for (i = 0; i < tpd_slotsUsed; i++) |
|
172 { |
|
173 void *oldValue = tls_get(tpd_beosTLSSlots[i]); |
|
174 if ( oldValue != NULL && tpd_dtors[i] != NULL ) |
|
175 (*tpd_dtors[i])(oldValue); |
|
176 } |
|
177 |
|
178 /* if this thread is joinable, wait for someone to join it */ |
|
179 if (me->state & BT_THREAD_JOINABLE) |
|
180 { |
|
181 /* protect access to our joinSem */ |
|
182 PR_Lock(joinSemLock); |
|
183 |
|
184 if (me->md.is_joining) |
|
185 { |
|
186 /* someone is already waiting to join us (they've |
|
187 allocated a joinSem for us) - let them know we're |
|
188 ready */ |
|
189 delete_sem(me->md.joinSem); |
|
190 |
|
191 PR_Unlock(joinSemLock); |
|
192 |
|
193 } |
|
194 else |
|
195 { |
|
196 /* noone is currently waiting for our demise - it |
|
197 is our responsibility to allocate the joinSem |
|
198 and block on it */ |
|
199 me->md.joinSem = create_sem(0, "join sem"); |
|
200 |
|
201 /* we're done accessing our joinSem */ |
|
202 PR_Unlock(joinSemLock); |
|
203 |
|
204 /* wait for someone to join us */ |
|
205 while (acquire_sem(me->md.joinSem) == B_INTERRUPTED); |
|
206 } |
|
207 } |
|
208 |
|
209 /* if this is a user thread, we must update our books */ |
|
210 if ((me->state & BT_THREAD_SYSTEM) == 0) |
|
211 { |
|
212 /* synchronize access to bt_book */ |
|
213 PR_Lock( bt_book.ml ); |
|
214 |
|
215 /* decrement the number of currently-alive user threads */ |
|
216 bt_book.threadCount--; |
|
217 |
|
218 if (bt_book.threadCount == 0 && bt_book.cleanUpSem != B_ERROR) { |
|
219 /* we are the last user thread, and the primordial thread is |
|
220 blocked in PR_Cleanup() waiting for us to finish - notify |
|
221 it */ |
|
222 delete_sem(bt_book.cleanUpSem); |
|
223 } |
|
224 |
|
225 PR_Unlock( bt_book.ml ); |
|
226 } |
|
227 |
|
228 /* finally, delete this thread's PRThread */ |
|
229 PR_DELETE(me); |
|
230 } |
|
231 |
|
232 /** |
|
233 * This is a wrapper that all threads invoke that allows us to set some |
|
234 * things up prior to a thread's invocation and clean up after a thread has |
|
235 * exited. |
|
236 */ |
|
237 static void* |
|
238 _bt_root (void* arg) |
|
239 { |
|
240 PRThread *thred = (PRThread*)arg; |
|
241 PRIntn rv; |
|
242 void *privData; |
|
243 status_t result; |
|
244 int i; |
|
245 |
|
246 /* save our PRThread object into our TLS */ |
|
247 tls_set(tls_prThreadSlot, thred); |
|
248 |
|
249 thred->startFunc(thred->arg); /* run the dang thing */ |
|
250 |
|
251 /* clean up */ |
|
252 _bt_CleanupThread(NULL); |
|
253 |
|
254 return 0; |
|
255 } |
|
256 |
|
257 PR_IMPLEMENT(PRThread*) |
|
258 PR_CreateThread (PRThreadType type, void (*start)(void* arg), void* arg, |
|
259 PRThreadPriority priority, PRThreadScope scope, |
|
260 PRThreadState state, PRUint32 stackSize) |
|
261 { |
|
262 PRUint32 bePriority; |
|
263 |
|
264 PRThread* thred; |
|
265 |
|
266 if (!_pr_initialized) _PR_ImplicitInitialization(); |
|
267 |
|
268 thred = PR_NEWZAP(PRThread); |
|
269 if (thred == NULL) |
|
270 { |
|
271 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
|
272 return NULL; |
|
273 } |
|
274 |
|
275 thred->md.joinSem = B_ERROR; |
|
276 |
|
277 thred->arg = arg; |
|
278 thred->startFunc = start; |
|
279 thred->priority = priority; |
|
280 |
|
281 if( state == PR_JOINABLE_THREAD ) |
|
282 { |
|
283 thred->state |= BT_THREAD_JOINABLE; |
|
284 } |
|
285 |
|
286 /* keep some books */ |
|
287 |
|
288 PR_Lock( bt_book.ml ); |
|
289 |
|
290 if (type == PR_USER_THREAD) |
|
291 { |
|
292 bt_book.threadCount++; |
|
293 } |
|
294 |
|
295 PR_Unlock( bt_book.ml ); |
|
296 |
|
297 bePriority = _bt_MapNSPRToNativePriority( priority ); |
|
298 |
|
299 thred->md.tid = spawn_thread((thread_func)_bt_root, "moz-thread", |
|
300 bePriority, thred); |
|
301 if (thred->md.tid < B_OK) { |
|
302 PR_SetError(PR_UNKNOWN_ERROR, thred->md.tid); |
|
303 PR_DELETE(thred); |
|
304 return NULL; |
|
305 } |
|
306 |
|
307 if (resume_thread(thred->md.tid) < B_OK) { |
|
308 PR_SetError(PR_UNKNOWN_ERROR, 0); |
|
309 PR_DELETE(thred); |
|
310 return NULL; |
|
311 } |
|
312 |
|
313 return thred; |
|
314 } |
|
315 |
|
316 PR_IMPLEMENT(PRThread*) |
|
317 PR_AttachThread(PRThreadType type, PRThreadPriority priority, |
|
318 PRThreadStack *stack) |
|
319 { |
|
320 /* PR_GetCurrentThread() will attach a thread if necessary */ |
|
321 return PR_GetCurrentThread(); |
|
322 } |
|
323 |
|
324 PR_IMPLEMENT(void) |
|
325 PR_DetachThread() |
|
326 { |
|
327 /* we don't support detaching */ |
|
328 } |
|
329 |
|
330 PR_IMPLEMENT(PRStatus) |
|
331 PR_JoinThread (PRThread* thred) |
|
332 { |
|
333 status_t eval, status; |
|
334 |
|
335 PR_ASSERT(thred != NULL); |
|
336 |
|
337 if ((thred->state & BT_THREAD_JOINABLE) == 0) |
|
338 { |
|
339 PR_SetError( PR_INVALID_ARGUMENT_ERROR, 0 ); |
|
340 return( PR_FAILURE ); |
|
341 } |
|
342 |
|
343 /* synchronize access to the thread's joinSem */ |
|
344 PR_Lock(joinSemLock); |
|
345 |
|
346 if (thred->md.is_joining) |
|
347 { |
|
348 /* another thread is already waiting to join the specified |
|
349 thread - we must fail */ |
|
350 PR_Unlock(joinSemLock); |
|
351 return PR_FAILURE; |
|
352 } |
|
353 |
|
354 /* let others know we are waiting to join */ |
|
355 thred->md.is_joining = PR_TRUE; |
|
356 |
|
357 if (thred->md.joinSem == B_ERROR) |
|
358 { |
|
359 /* the thread hasn't finished yet - it is our responsibility to |
|
360 allocate a joinSem and wait on it */ |
|
361 thred->md.joinSem = create_sem(0, "join sem"); |
|
362 |
|
363 /* we're done changing the joinSem now */ |
|
364 PR_Unlock(joinSemLock); |
|
365 |
|
366 /* wait for the thread to finish */ |
|
367 while (acquire_sem(thred->md.joinSem) == B_INTERRUPTED); |
|
368 |
|
369 } |
|
370 else |
|
371 { |
|
372 /* the thread has already finished, and has allocated the |
|
373 joinSem itself - let it know it can finally die */ |
|
374 delete_sem(thred->md.joinSem); |
|
375 |
|
376 PR_Unlock(joinSemLock); |
|
377 } |
|
378 |
|
379 /* make sure the thread is dead */ |
|
380 wait_for_thread(thred->md.tid, &eval); |
|
381 |
|
382 return PR_SUCCESS; |
|
383 } |
|
384 |
|
385 PR_IMPLEMENT(PRThread*) |
|
386 PR_GetCurrentThread () |
|
387 { |
|
388 PRThread* thred; |
|
389 |
|
390 if (!_pr_initialized) _PR_ImplicitInitialization(); |
|
391 |
|
392 thred = (PRThread *)tls_get( tls_prThreadSlot); |
|
393 if (thred == NULL) |
|
394 { |
|
395 /* this thread doesn't have a PRThread structure (it must be |
|
396 a native thread not created by the NSPR) - assimilate it */ |
|
397 thred = _bt_AttachThread(); |
|
398 } |
|
399 PR_ASSERT(NULL != thred); |
|
400 |
|
401 return thred; |
|
402 } |
|
403 |
|
404 PR_IMPLEMENT(PRThreadScope) |
|
405 PR_GetThreadScope (const PRThread* thred) |
|
406 { |
|
407 PR_ASSERT(thred != NULL); |
|
408 return PR_GLOBAL_THREAD; |
|
409 } |
|
410 |
|
411 PR_IMPLEMENT(PRThreadType) |
|
412 PR_GetThreadType (const PRThread* thred) |
|
413 { |
|
414 PR_ASSERT(thred != NULL); |
|
415 return (thred->state & BT_THREAD_SYSTEM) ? |
|
416 PR_SYSTEM_THREAD : PR_USER_THREAD; |
|
417 } |
|
418 |
|
419 PR_IMPLEMENT(PRThreadState) |
|
420 PR_GetThreadState (const PRThread* thred) |
|
421 { |
|
422 PR_ASSERT(thred != NULL); |
|
423 return (thred->state & BT_THREAD_JOINABLE)? |
|
424 PR_JOINABLE_THREAD: PR_UNJOINABLE_THREAD; |
|
425 } |
|
426 |
|
427 PR_IMPLEMENT(PRThreadPriority) |
|
428 PR_GetThreadPriority (const PRThread* thred) |
|
429 { |
|
430 PR_ASSERT(thred != NULL); |
|
431 return thred->priority; |
|
432 } /* PR_GetThreadPriority */ |
|
433 |
|
434 PR_IMPLEMENT(void) PR_SetThreadPriority(PRThread *thred, |
|
435 PRThreadPriority newPri) |
|
436 { |
|
437 PRUint32 bePriority; |
|
438 |
|
439 PR_ASSERT( thred != NULL ); |
|
440 |
|
441 thred->priority = newPri; |
|
442 bePriority = _bt_MapNSPRToNativePriority( newPri ); |
|
443 set_thread_priority( thred->md.tid, bePriority ); |
|
444 } |
|
445 |
|
446 PR_IMPLEMENT(PRStatus) |
|
447 PR_NewThreadPrivateIndex (PRUintn* newIndex, |
|
448 PRThreadPrivateDTOR destructor) |
|
449 { |
|
450 int32 index; |
|
451 |
|
452 if (!_pr_initialized) _PR_ImplicitInitialization(); |
|
453 |
|
454 /* reserve the next available tpd slot */ |
|
455 index = atomic_add( &tpd_slotsUsed, 1 ); |
|
456 if (index >= BT_TPD_LIMIT) |
|
457 { |
|
458 /* no slots left - decrement value, then fail */ |
|
459 atomic_add( &tpd_slotsUsed, -1 ); |
|
460 PR_SetError( PR_TPD_RANGE_ERROR, 0 ); |
|
461 return( PR_FAILURE ); |
|
462 } |
|
463 |
|
464 /* allocate a beos-native TLS slot for this index (the new slot |
|
465 automatically contains NULL) */ |
|
466 tpd_beosTLSSlots[index] = tls_allocate(); |
|
467 |
|
468 /* remember the destructor */ |
|
469 tpd_dtors[index] = destructor; |
|
470 |
|
471 *newIndex = (PRUintn)index; |
|
472 |
|
473 return( PR_SUCCESS ); |
|
474 } |
|
475 |
|
476 PR_IMPLEMENT(PRStatus) |
|
477 PR_SetThreadPrivate (PRUintn index, void* priv) |
|
478 { |
|
479 void *oldValue; |
|
480 |
|
481 /* |
|
482 ** Sanity checking |
|
483 */ |
|
484 |
|
485 if(index < 0 || index >= tpd_slotsUsed || index >= BT_TPD_LIMIT) |
|
486 { |
|
487 PR_SetError( PR_TPD_RANGE_ERROR, 0 ); |
|
488 return( PR_FAILURE ); |
|
489 } |
|
490 |
|
491 /* if the old value isn't NULL, and the dtor for this slot isn't |
|
492 NULL, we must destroy the data */ |
|
493 oldValue = tls_get(tpd_beosTLSSlots[index]); |
|
494 if (oldValue != NULL && tpd_dtors[index] != NULL) |
|
495 (*tpd_dtors[index])(oldValue); |
|
496 |
|
497 /* save new value */ |
|
498 tls_set(tpd_beosTLSSlots[index], priv); |
|
499 |
|
500 return( PR_SUCCESS ); |
|
501 } |
|
502 |
|
503 PR_IMPLEMENT(void*) |
|
504 PR_GetThreadPrivate (PRUintn index) |
|
505 { |
|
506 /* make sure the index is valid */ |
|
507 if (index < 0 || index >= tpd_slotsUsed || index >= BT_TPD_LIMIT) |
|
508 { |
|
509 PR_SetError( PR_TPD_RANGE_ERROR, 0 ); |
|
510 return NULL; |
|
511 } |
|
512 |
|
513 /* return the value */ |
|
514 return tls_get( tpd_beosTLSSlots[index] ); |
|
515 } |
|
516 |
|
517 |
|
518 PR_IMPLEMENT(PRStatus) |
|
519 PR_Interrupt (PRThread* thred) |
|
520 { |
|
521 PRIntn rv; |
|
522 |
|
523 PR_ASSERT(thred != NULL); |
|
524 |
|
525 /* |
|
526 ** there seems to be a bug in beos R5 in which calling |
|
527 ** resume_thread() on a blocked thread returns B_OK instead |
|
528 ** of B_BAD_THREAD_STATE (beos bug #20000422-19095). as such, |
|
529 ** to interrupt a thread, we will simply suspend then resume it |
|
530 ** (no longer call resume_thread(), check for B_BAD_THREAD_STATE, |
|
531 ** the suspend/resume to wake up a blocked thread). this wakes |
|
532 ** up blocked threads properly, and doesn't hurt unblocked threads |
|
533 ** (they simply get stopped then re-started immediately) |
|
534 */ |
|
535 |
|
536 rv = suspend_thread( thred->md.tid ); |
|
537 if( rv != B_NO_ERROR ) |
|
538 { |
|
539 /* this doesn't appear to be a valid thread_id */ |
|
540 PR_SetError( PR_UNKNOWN_ERROR, rv ); |
|
541 return PR_FAILURE; |
|
542 } |
|
543 |
|
544 rv = resume_thread( thred->md.tid ); |
|
545 if( rv != B_NO_ERROR ) |
|
546 { |
|
547 PR_SetError( PR_UNKNOWN_ERROR, rv ); |
|
548 return PR_FAILURE; |
|
549 } |
|
550 |
|
551 return PR_SUCCESS; |
|
552 } |
|
553 |
|
554 PR_IMPLEMENT(void) |
|
555 PR_ClearInterrupt () |
|
556 { |
|
557 } |
|
558 |
|
559 PR_IMPLEMENT(PRStatus) |
|
560 PR_Yield () |
|
561 { |
|
562 /* we just sleep for long enough to cause a reschedule (100 |
|
563 microseconds) */ |
|
564 snooze(100); |
|
565 } |
|
566 |
|
567 #define BT_MILLION 1000000UL |
|
568 |
|
569 PR_IMPLEMENT(PRStatus) |
|
570 PR_Sleep (PRIntervalTime ticks) |
|
571 { |
|
572 bigtime_t tps; |
|
573 status_t status; |
|
574 |
|
575 if (!_pr_initialized) _PR_ImplicitInitialization(); |
|
576 |
|
577 tps = PR_IntervalToMicroseconds( ticks ); |
|
578 |
|
579 status = snooze(tps); |
|
580 if (status == B_NO_ERROR) return PR_SUCCESS; |
|
581 |
|
582 PR_SetError(PR_NOT_IMPLEMENTED_ERROR, status); |
|
583 return PR_FAILURE; |
|
584 } |
|
585 |
|
586 PR_IMPLEMENT(PRStatus) |
|
587 PR_Cleanup () |
|
588 { |
|
589 PRThread *me = PR_GetCurrentThread(); |
|
590 |
|
591 PR_ASSERT(me->state & BT_THREAD_PRIMORD); |
|
592 if ((me->state & BT_THREAD_PRIMORD) == 0) { |
|
593 return PR_FAILURE; |
|
594 } |
|
595 |
|
596 PR_Lock( bt_book.ml ); |
|
597 |
|
598 if (bt_book.threadCount != 0) |
|
599 { |
|
600 /* we'll have to wait for some threads to finish - create a |
|
601 sem to block on */ |
|
602 bt_book.cleanUpSem = create_sem(0, "cleanup sem"); |
|
603 } |
|
604 |
|
605 PR_Unlock( bt_book.ml ); |
|
606 |
|
607 /* note that, if all the user threads were already dead, we |
|
608 wouldn't have created a sem above, so this acquire_sem() |
|
609 will fail immediately */ |
|
610 while (acquire_sem(bt_book.cleanUpSem) == B_INTERRUPTED); |
|
611 |
|
612 return PR_SUCCESS; |
|
613 } |
|
614 |
|
615 PR_IMPLEMENT(void) |
|
616 PR_ProcessExit (PRIntn status) |
|
617 { |
|
618 exit(status); |
|
619 } |
|
620 |
|
621 PRThread *_bt_AttachThread() |
|
622 { |
|
623 PRThread *thread; |
|
624 thread_info tInfo; |
|
625 |
|
626 /* make sure this thread doesn't already have a PRThread structure */ |
|
627 PR_ASSERT(tls_get(tls_prThreadSlot) == NULL); |
|
628 |
|
629 /* allocate a PRThread structure for this thread */ |
|
630 thread = PR_NEWZAP(PRThread); |
|
631 if (thread == NULL) |
|
632 { |
|
633 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); |
|
634 return NULL; |
|
635 } |
|
636 |
|
637 /* get the native thread's current state */ |
|
638 get_thread_info(find_thread(NULL), &tInfo); |
|
639 |
|
640 /* initialize new PRThread */ |
|
641 thread->md.tid = tInfo.thread; |
|
642 thread->md.joinSem = B_ERROR; |
|
643 thread->priority = _bt_MapNativeToNSPRPriority(tInfo.priority); |
|
644 |
|
645 /* attached threads are always non-joinable user threads */ |
|
646 thread->state = 0; |
|
647 |
|
648 /* increment user thread count */ |
|
649 PR_Lock(bt_book.ml); |
|
650 bt_book.threadCount++; |
|
651 PR_Unlock(bt_book.ml); |
|
652 |
|
653 /* store this thread's PRThread */ |
|
654 tls_set(tls_prThreadSlot, thread); |
|
655 |
|
656 /* the thread must call _bt_CleanupThread() before it dies, in order |
|
657 to clean up its PRThread, synchronize with the primordial thread, |
|
658 etc. */ |
|
659 on_exit_thread(_bt_CleanupThread, NULL); |
|
660 |
|
661 return thread; |
|
662 } |