|
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
|
2 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
3 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
5 |
|
6 #include "primpl.h" |
|
7 |
|
8 #if !defined (USE_SVR4_THREADS) |
|
9 |
|
10 /* |
|
11 * using only NSPR threads here |
|
12 */ |
|
13 |
|
14 #include <setjmp.h> |
|
15 |
|
16 void _MD_EarlyInit(void) |
|
17 { |
|
18 } |
|
19 |
|
20 PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np) |
|
21 { |
|
22 if (isCurrent) { |
|
23 (void) setjmp(CONTEXT(t)); |
|
24 } |
|
25 *np = sizeof(CONTEXT(t)) / sizeof(PRWord); |
|
26 return (PRWord *) CONTEXT(t); |
|
27 } |
|
28 |
|
29 #ifdef ALARMS_BREAK_TCP /* I don't think they do */ |
|
30 |
|
31 PRInt32 _MD_connect(PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, |
|
32 PRIntervalTime timeout) |
|
33 { |
|
34 PRInt32 rv; |
|
35 |
|
36 _MD_BLOCK_CLOCK_INTERRUPTS(); |
|
37 rv = _connect(osfd,addr,addrlen); |
|
38 _MD_UNBLOCK_CLOCK_INTERRUPTS(); |
|
39 } |
|
40 |
|
41 PRInt32 _MD_accept(PRInt32 osfd, PRNetAddr *addr, PRInt32 addrlen, |
|
42 PRIntervalTime timeout) |
|
43 { |
|
44 PRInt32 rv; |
|
45 |
|
46 _MD_BLOCK_CLOCK_INTERRUPTS(); |
|
47 rv = _accept(osfd,addr,addrlen); |
|
48 _MD_UNBLOCK_CLOCK_INTERRUPTS(); |
|
49 return(rv); |
|
50 } |
|
51 #endif |
|
52 |
|
53 /* |
|
54 * These are also implemented in pratom.c using NSPR locks. Any reason |
|
55 * this might be better or worse? If you like this better, define |
|
56 * _PR_HAVE_ATOMIC_OPS in include/md/unixware.h |
|
57 */ |
|
58 #ifdef _PR_HAVE_ATOMIC_OPS |
|
59 /* Atomic operations */ |
|
60 #include <stdio.h> |
|
61 static FILE *_uw_semf; |
|
62 |
|
63 void |
|
64 _MD_INIT_ATOMIC(void) |
|
65 { |
|
66 /* Sigh. Sure wish SYSV semaphores weren't such a pain to use */ |
|
67 if ((_uw_semf = tmpfile()) == NULL) |
|
68 PR_ASSERT(0); |
|
69 |
|
70 return; |
|
71 } |
|
72 |
|
73 void |
|
74 _MD_ATOMIC_INCREMENT(PRInt32 *val) |
|
75 { |
|
76 flockfile(_uw_semf); |
|
77 (*val)++; |
|
78 unflockfile(_uw_semf); |
|
79 } |
|
80 |
|
81 void |
|
82 _MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val) |
|
83 { |
|
84 flockfile(_uw_semf); |
|
85 (*ptr) += val; |
|
86 unflockfile(_uw_semf); |
|
87 } |
|
88 |
|
89 void |
|
90 _MD_ATOMIC_DECREMENT(PRInt32 *val) |
|
91 { |
|
92 flockfile(_uw_semf); |
|
93 (*val)--; |
|
94 unflockfile(_uw_semf); |
|
95 } |
|
96 |
|
97 void |
|
98 _MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval) |
|
99 { |
|
100 flockfile(_uw_semf); |
|
101 *val = newval; |
|
102 unflockfile(_uw_semf); |
|
103 } |
|
104 #endif |
|
105 |
|
106 void |
|
107 _MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri) |
|
108 { |
|
109 return; |
|
110 } |
|
111 |
|
112 PRStatus |
|
113 _MD_InitializeThread(PRThread *thread) |
|
114 { |
|
115 return PR_SUCCESS; |
|
116 } |
|
117 |
|
118 PRStatus |
|
119 _MD_WAIT(PRThread *thread, PRIntervalTime ticks) |
|
120 { |
|
121 PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE)); |
|
122 _PR_MD_SWITCH_CONTEXT(thread); |
|
123 return PR_SUCCESS; |
|
124 } |
|
125 |
|
126 PRStatus |
|
127 _MD_WAKEUP_WAITER(PRThread *thread) |
|
128 { |
|
129 if (thread) { |
|
130 PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE)); |
|
131 } |
|
132 return PR_SUCCESS; |
|
133 } |
|
134 |
|
135 /* These functions should not be called for Unixware */ |
|
136 void |
|
137 _MD_YIELD(void) |
|
138 { |
|
139 PR_NOT_REACHED("_MD_YIELD should not be called for Unixware."); |
|
140 } |
|
141 |
|
142 PRStatus |
|
143 _MD_CREATE_THREAD( |
|
144 PRThread *thread, |
|
145 void (*start) (void *), |
|
146 PRThreadPriority priority, |
|
147 PRThreadScope scope, |
|
148 PRThreadState state, |
|
149 PRUint32 stackSize) |
|
150 { |
|
151 PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for Unixware."); |
|
152 } |
|
153 |
|
154 #else /* USE_SVR4_THREADS */ |
|
155 |
|
156 /* NOTE: |
|
157 * SPARC v9 (Ultras) do have an atomic test-and-set operation. But |
|
158 * SPARC v8 doesn't. We should detect in the init if we are running on |
|
159 * v8 or v9, and then use assembly where we can. |
|
160 */ |
|
161 |
|
162 #include <thread.h> |
|
163 #include <synch.h> |
|
164 |
|
165 static mutex_t _unixware_atomic = DEFAULTMUTEX; |
|
166 |
|
167 #define TEST_THEN_ADD(where, inc) \ |
|
168 if (mutex_lock(&_unixware_atomic) != 0)\ |
|
169 PR_ASSERT(0);\ |
|
170 *where += inc;\ |
|
171 if (mutex_unlock(&_unixware_atomic) != 0)\ |
|
172 PR_ASSERT(0); |
|
173 |
|
174 #define TEST_THEN_SET(where, val) \ |
|
175 if (mutex_lock(&_unixware_atomic) != 0)\ |
|
176 PR_ASSERT(0);\ |
|
177 *where = val;\ |
|
178 if (mutex_unlock(&_unixware_atomic) != 0)\ |
|
179 PR_ASSERT(0); |
|
180 |
|
181 void |
|
182 _MD_INIT_ATOMIC(void) |
|
183 { |
|
184 } |
|
185 |
|
186 void |
|
187 _MD_ATOMIC_INCREMENT(PRInt32 *val) |
|
188 { |
|
189 TEST_THEN_ADD(val, 1); |
|
190 } |
|
191 |
|
192 void |
|
193 _MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val) |
|
194 { |
|
195 TEST_THEN_ADD(ptr, val); |
|
196 } |
|
197 |
|
198 void |
|
199 _MD_ATOMIC_DECREMENT(PRInt32 *val) |
|
200 { |
|
201 TEST_THEN_ADD(val, 0xffffffff); |
|
202 } |
|
203 |
|
204 void |
|
205 _MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval) |
|
206 { |
|
207 TEST_THEN_SET(val, newval); |
|
208 } |
|
209 |
|
210 #include <signal.h> |
|
211 #include <errno.h> |
|
212 #include <fcntl.h> |
|
213 |
|
214 #include <sys/lwp.h> |
|
215 #include <sys/procfs.h> |
|
216 #include <sys/syscall.h> |
|
217 |
|
218 |
|
219 THREAD_KEY_T threadid_key; |
|
220 THREAD_KEY_T cpuid_key; |
|
221 THREAD_KEY_T last_thread_key; |
|
222 static sigset_t set, oldset; |
|
223 |
|
224 void _MD_EarlyInit(void) |
|
225 { |
|
226 THR_KEYCREATE(&threadid_key, NULL); |
|
227 THR_KEYCREATE(&cpuid_key, NULL); |
|
228 THR_KEYCREATE(&last_thread_key, NULL); |
|
229 sigemptyset(&set); |
|
230 sigaddset(&set, SIGALRM); |
|
231 } |
|
232 |
|
233 PRStatus _MD_CREATE_THREAD(PRThread *thread, |
|
234 void (*start)(void *), |
|
235 PRThreadPriority priority, |
|
236 PRThreadScope scope, |
|
237 PRThreadState state, |
|
238 PRUint32 stackSize) |
|
239 { |
|
240 long flags; |
|
241 |
|
242 /* mask out SIGALRM for native thread creation */ |
|
243 thr_sigsetmask(SIG_BLOCK, &set, &oldset); |
|
244 |
|
245 flags = (state == PR_JOINABLE_THREAD ? THR_SUSPENDED/*|THR_NEW_LWP*/ |
|
246 : THR_SUSPENDED|THR_DETACHED/*|THR_NEW_LWP*/); |
|
247 if (_PR_IS_GCABLE_THREAD(thread) || |
|
248 (scope == PR_GLOBAL_BOUND_THREAD)) |
|
249 flags |= THR_BOUND; |
|
250 |
|
251 if (thr_create(NULL, thread->stack->stackSize, |
|
252 (void *(*)(void *)) start, (void *) thread, |
|
253 flags, |
|
254 &thread->md.handle)) { |
|
255 thr_sigsetmask(SIG_SETMASK, &oldset, NULL); |
|
256 return PR_FAILURE; |
|
257 } |
|
258 |
|
259 |
|
260 /* When the thread starts running, then the lwpid is set to the right |
|
261 * value. Until then we want to mark this as 'uninit' so that |
|
262 * its register state is initialized properly for GC */ |
|
263 |
|
264 thread->md.lwpid = -1; |
|
265 thr_sigsetmask(SIG_SETMASK, &oldset, NULL); |
|
266 _MD_NEW_SEM(&thread->md.waiter_sem, 0); |
|
267 |
|
268 if ((scope == PR_GLOBAL_THREAD) || (scope == PR_GLOBAL_BOUND_THREAD)) { |
|
269 thread->flags |= _PR_GLOBAL_SCOPE; |
|
270 } |
|
271 |
|
272 /* |
|
273 ** Set the thread priority. This will also place the thread on |
|
274 ** the runQ. |
|
275 ** |
|
276 ** Force PR_SetThreadPriority to set the priority by |
|
277 ** setting thread->priority to 100. |
|
278 */ |
|
279 { |
|
280 int pri; |
|
281 pri = thread->priority; |
|
282 thread->priority = 100; |
|
283 PR_SetThreadPriority( thread, pri ); |
|
284 |
|
285 PR_LOG(_pr_thread_lm, PR_LOG_MIN, |
|
286 ("(0X%x)[Start]: on to runq at priority %d", |
|
287 thread, thread->priority)); |
|
288 } |
|
289 |
|
290 /* Activate the thread */ |
|
291 if (thr_continue( thread->md.handle ) ) { |
|
292 return PR_FAILURE; |
|
293 } |
|
294 return PR_SUCCESS; |
|
295 } |
|
296 |
|
297 void _MD_cleanup_thread(PRThread *thread) |
|
298 { |
|
299 thread_t hdl; |
|
300 PRMonitor *mon; |
|
301 |
|
302 hdl = thread->md.handle; |
|
303 |
|
304 /* |
|
305 ** First, suspend the thread (unless it's the active one) |
|
306 ** Because we suspend it first, we don't have to use LOCK_SCHEDULER to |
|
307 ** prevent both of us modifying the thread structure at the same time. |
|
308 */ |
|
309 if ( thread != _PR_MD_CURRENT_THREAD() ) { |
|
310 thr_suspend(hdl); |
|
311 } |
|
312 PR_LOG(_pr_thread_lm, PR_LOG_MIN, |
|
313 ("(0X%x)[DestroyThread]\n", thread)); |
|
314 |
|
315 _MD_DESTROY_SEM(&thread->md.waiter_sem); |
|
316 } |
|
317 |
|
318 void _MD_SET_PRIORITY(_MDThread *md_thread, PRUintn newPri) |
|
319 { |
|
320 if(thr_setprio((thread_t)md_thread->handle, newPri)) { |
|
321 PR_LOG(_pr_thread_lm, PR_LOG_MIN, |
|
322 ("_PR_SetThreadPriority: can't set thread priority\n")); |
|
323 } |
|
324 } |
|
325 |
|
326 void _MD_WAIT_CV( |
|
327 struct _MDCVar *md_cv, struct _MDLock *md_lock, PRIntervalTime timeout) |
|
328 { |
|
329 struct timespec tt; |
|
330 PRUint32 msec; |
|
331 int rv; |
|
332 PRThread *me = _PR_MD_CURRENT_THREAD(); |
|
333 |
|
334 msec = PR_IntervalToMilliseconds(timeout); |
|
335 |
|
336 GETTIME (&tt); |
|
337 |
|
338 tt.tv_sec += msec / PR_MSEC_PER_SEC; |
|
339 tt.tv_nsec += (msec % PR_MSEC_PER_SEC) * PR_NSEC_PER_MSEC; |
|
340 /* Check for nsec overflow - otherwise we'll get an EINVAL */ |
|
341 if (tt.tv_nsec >= PR_NSEC_PER_SEC) { |
|
342 tt.tv_sec++; |
|
343 tt.tv_nsec -= PR_NSEC_PER_SEC; |
|
344 } |
|
345 me->md.sp = unixware_getsp(); |
|
346 |
|
347 |
|
348 /* XXX Solaris 2.5.x gives back EINTR occasionally for no reason |
|
349 * hence ignore EINTR for now */ |
|
350 |
|
351 COND_TIMEDWAIT(&md_cv->cv, &md_lock->lock, &tt); |
|
352 } |
|
353 |
|
354 void _MD_lock(struct _MDLock *md_lock) |
|
355 { |
|
356 mutex_lock(&md_lock->lock); |
|
357 } |
|
358 |
|
359 void _MD_unlock(struct _MDLock *md_lock) |
|
360 { |
|
361 mutex_unlock(&((md_lock)->lock)); |
|
362 } |
|
363 |
|
364 |
|
365 PRThread *_pr_current_thread_tls() |
|
366 { |
|
367 PRThread *ret; |
|
368 |
|
369 thr_getspecific(threadid_key, (void **)&ret); |
|
370 return ret; |
|
371 } |
|
372 |
|
373 PRStatus |
|
374 _MD_WAIT(PRThread *thread, PRIntervalTime ticks) |
|
375 { |
|
376 _MD_WAIT_SEM(&thread->md.waiter_sem); |
|
377 return PR_SUCCESS; |
|
378 } |
|
379 |
|
380 PRStatus |
|
381 _MD_WAKEUP_WAITER(PRThread *thread) |
|
382 { |
|
383 if (thread == NULL) { |
|
384 return PR_SUCCESS; |
|
385 } |
|
386 _MD_POST_SEM(&thread->md.waiter_sem); |
|
387 return PR_SUCCESS; |
|
388 } |
|
389 |
|
390 _PRCPU *_pr_current_cpu_tls() |
|
391 { |
|
392 _PRCPU *ret; |
|
393 |
|
394 thr_getspecific(cpuid_key, (void **)&ret); |
|
395 return ret; |
|
396 } |
|
397 |
|
398 PRThread *_pr_last_thread_tls() |
|
399 { |
|
400 PRThread *ret; |
|
401 |
|
402 thr_getspecific(last_thread_key, (void **)&ret); |
|
403 return ret; |
|
404 } |
|
405 |
|
406 _MDLock _pr_ioq_lock; |
|
407 |
|
408 void _MD_INIT_IO (void) |
|
409 { |
|
410 _MD_NEW_LOCK(&_pr_ioq_lock); |
|
411 } |
|
412 |
|
413 PRStatus _MD_InitializeThread(PRThread *thread) |
|
414 { |
|
415 if (!_PR_IS_NATIVE_THREAD(thread)) |
|
416 return; |
|
417 /* prime the sp; substract 4 so we don't hit the assert that |
|
418 * curr sp > base_stack |
|
419 */ |
|
420 thread->md.sp = (uint_t) thread->stack->allocBase - sizeof(long); |
|
421 thread->md.lwpid = _lwp_self(); |
|
422 thread->md.handle = THR_SELF(); |
|
423 |
|
424 /* all threads on Solaris are global threads from NSPR's perspective |
|
425 * since all of them are mapped to Solaris threads. |
|
426 */ |
|
427 thread->flags |= _PR_GLOBAL_SCOPE; |
|
428 |
|
429 /* For primordial/attached thread, we don't create an underlying native thread. |
|
430 * So, _MD_CREATE_THREAD() does not get called. We need to do initialization |
|
431 * like allocating thread's synchronization variables and set the underlying |
|
432 * native thread's priority. |
|
433 */ |
|
434 if (thread->flags & (_PR_PRIMORDIAL | _PR_ATTACHED)) { |
|
435 _MD_NEW_SEM(&thread->md.waiter_sem, 0); |
|
436 _MD_SET_PRIORITY(&(thread->md), thread->priority); |
|
437 } |
|
438 return PR_SUCCESS; |
|
439 } |
|
440 |
|
441 static sigset_t old_mask; /* store away original gc thread sigmask */ |
|
442 static int gcprio; /* store away original gc thread priority */ |
|
443 static lwpid_t *all_lwps=NULL; /* list of lwps that we suspended */ |
|
444 static int num_lwps ; |
|
445 static int suspendAllOn = 0; |
|
446 |
|
447 #define VALID_SP(sp, bottom, top) \ |
|
448 (((uint_t)(sp)) > ((uint_t)(bottom)) && ((uint_t)(sp)) < ((uint_t)(top))) |
|
449 |
|
450 void unixware_preempt_off() |
|
451 { |
|
452 sigset_t set; |
|
453 (void)sigfillset(&set); |
|
454 sigprocmask (SIG_SETMASK, &set, &old_mask); |
|
455 } |
|
456 |
|
457 void unixware_preempt_on() |
|
458 { |
|
459 sigprocmask (SIG_SETMASK, &old_mask, NULL); |
|
460 } |
|
461 |
|
462 void _MD_Begin_SuspendAll() |
|
463 { |
|
464 unixware_preempt_off(); |
|
465 |
|
466 PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("Begin_SuspendAll\n")); |
|
467 /* run at highest prio so I cannot be preempted */ |
|
468 thr_getprio(thr_self(), &gcprio); |
|
469 thr_setprio(thr_self(), 0x7fffffff); |
|
470 suspendAllOn = 1; |
|
471 } |
|
472 |
|
473 void _MD_End_SuspendAll() |
|
474 { |
|
475 } |
|
476 |
|
477 void _MD_End_ResumeAll() |
|
478 { |
|
479 PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("End_ResumeAll\n")); |
|
480 thr_setprio(thr_self(), gcprio); |
|
481 unixware_preempt_on(); |
|
482 suspendAllOn = 0; |
|
483 } |
|
484 |
|
485 void _MD_Suspend(PRThread *thr) |
|
486 { |
|
487 int lwp_fd, result; |
|
488 int lwp_main_proc_fd = 0; |
|
489 |
|
490 thr_suspend(thr->md.handle); |
|
491 if (!_PR_IS_GCABLE_THREAD(thr)) |
|
492 return; |
|
493 /* XXX Primordial thread can't be bound to an lwp, hence there is no |
|
494 * way we can assume that we can get the lwp status for primordial |
|
495 * thread reliably. Hence we skip this for primordial thread, hoping |
|
496 * that the SP is saved during lock and cond. wait. |
|
497 * XXX - Again this is concern only for java interpreter, not for the |
|
498 * server, 'cause primordial thread in the server does not do java work |
|
499 */ |
|
500 if (thr->flags & _PR_PRIMORDIAL) |
|
501 return; |
|
502 |
|
503 /* if the thread is not started yet then don't do anything */ |
|
504 if (!suspendAllOn || thr->md.lwpid == -1) |
|
505 return; |
|
506 |
|
507 } |
|
508 void _MD_Resume(PRThread *thr) |
|
509 { |
|
510 if (!_PR_IS_GCABLE_THREAD(thr) || !suspendAllOn){ |
|
511 /*XXX When the suspendAllOn is set, we will be trying to do lwp_suspend |
|
512 * during that time we can't call any thread lib or libc calls. Hence |
|
513 * make sure that no resume is requested for Non gcable thread |
|
514 * during suspendAllOn */ |
|
515 PR_ASSERT(!suspendAllOn); |
|
516 thr_continue(thr->md.handle); |
|
517 return; |
|
518 } |
|
519 if (thr->md.lwpid == -1) |
|
520 return; |
|
521 |
|
522 if ( _lwp_continue(thr->md.lwpid) < 0) { |
|
523 PR_ASSERT(0); /* ARGH, we are hosed! */ |
|
524 } |
|
525 } |
|
526 |
|
527 |
|
528 PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np) |
|
529 { |
|
530 if (isCurrent) { |
|
531 (void) getcontext(CONTEXT(t)); /* XXX tune me: set md_IRIX.c */ |
|
532 } |
|
533 *np = NGREG; |
|
534 if (t->md.lwpid == -1) |
|
535 memset(&t->md.context.uc_mcontext.gregs[0], 0, NGREG * sizeof(PRWord)); |
|
536 return (PRWord*) &t->md.context.uc_mcontext.gregs[0]; |
|
537 } |
|
538 |
|
539 int |
|
540 _pr_unixware_clock_gettime (struct timespec *tp) |
|
541 { |
|
542 struct timeval tv; |
|
543 |
|
544 gettimeofday(&tv, NULL); |
|
545 tp->tv_sec = tv.tv_sec; |
|
546 tp->tv_nsec = tv.tv_usec * 1000; |
|
547 return 0; |
|
548 } |
|
549 |
|
550 |
|
551 #endif /* USE_SVR4_THREADS */ |