Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | #include "primpl.h" |
michael@0 | 7 | |
michael@0 | 8 | #if !defined (USE_SVR4_THREADS) |
michael@0 | 9 | |
michael@0 | 10 | /* |
michael@0 | 11 | * using only NSPR threads here |
michael@0 | 12 | */ |
michael@0 | 13 | |
michael@0 | 14 | #include <setjmp.h> |
michael@0 | 15 | |
michael@0 | 16 | void _MD_EarlyInit(void) |
michael@0 | 17 | { |
michael@0 | 18 | } |
michael@0 | 19 | |
michael@0 | 20 | PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np) |
michael@0 | 21 | { |
michael@0 | 22 | if (isCurrent) { |
michael@0 | 23 | (void) setjmp(CONTEXT(t)); |
michael@0 | 24 | } |
michael@0 | 25 | *np = sizeof(CONTEXT(t)) / sizeof(PRWord); |
michael@0 | 26 | return (PRWord *) CONTEXT(t); |
michael@0 | 27 | } |
michael@0 | 28 | |
michael@0 | 29 | #ifdef ALARMS_BREAK_TCP /* I don't think they do */ |
michael@0 | 30 | |
michael@0 | 31 | PRInt32 _MD_connect(PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, |
michael@0 | 32 | PRIntervalTime timeout) |
michael@0 | 33 | { |
michael@0 | 34 | PRInt32 rv; |
michael@0 | 35 | |
michael@0 | 36 | _MD_BLOCK_CLOCK_INTERRUPTS(); |
michael@0 | 37 | rv = _connect(osfd,addr,addrlen); |
michael@0 | 38 | _MD_UNBLOCK_CLOCK_INTERRUPTS(); |
michael@0 | 39 | } |
michael@0 | 40 | |
michael@0 | 41 | PRInt32 _MD_accept(PRInt32 osfd, PRNetAddr *addr, PRInt32 addrlen, |
michael@0 | 42 | PRIntervalTime timeout) |
michael@0 | 43 | { |
michael@0 | 44 | PRInt32 rv; |
michael@0 | 45 | |
michael@0 | 46 | _MD_BLOCK_CLOCK_INTERRUPTS(); |
michael@0 | 47 | rv = _accept(osfd,addr,addrlen); |
michael@0 | 48 | _MD_UNBLOCK_CLOCK_INTERRUPTS(); |
michael@0 | 49 | return(rv); |
michael@0 | 50 | } |
michael@0 | 51 | #endif |
michael@0 | 52 | |
michael@0 | 53 | /* |
michael@0 | 54 | * These are also implemented in pratom.c using NSPR locks. Any reason |
michael@0 | 55 | * this might be better or worse? If you like this better, define |
michael@0 | 56 | * _PR_HAVE_ATOMIC_OPS in include/md/unixware.h |
michael@0 | 57 | */ |
michael@0 | 58 | #ifdef _PR_HAVE_ATOMIC_OPS |
michael@0 | 59 | /* Atomic operations */ |
michael@0 | 60 | #include <stdio.h> |
michael@0 | 61 | static FILE *_uw_semf; |
michael@0 | 62 | |
michael@0 | 63 | void |
michael@0 | 64 | _MD_INIT_ATOMIC(void) |
michael@0 | 65 | { |
michael@0 | 66 | /* Sigh. Sure wish SYSV semaphores weren't such a pain to use */ |
michael@0 | 67 | if ((_uw_semf = tmpfile()) == NULL) |
michael@0 | 68 | PR_ASSERT(0); |
michael@0 | 69 | |
michael@0 | 70 | return; |
michael@0 | 71 | } |
michael@0 | 72 | |
michael@0 | 73 | void |
michael@0 | 74 | _MD_ATOMIC_INCREMENT(PRInt32 *val) |
michael@0 | 75 | { |
michael@0 | 76 | flockfile(_uw_semf); |
michael@0 | 77 | (*val)++; |
michael@0 | 78 | unflockfile(_uw_semf); |
michael@0 | 79 | } |
michael@0 | 80 | |
michael@0 | 81 | void |
michael@0 | 82 | _MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val) |
michael@0 | 83 | { |
michael@0 | 84 | flockfile(_uw_semf); |
michael@0 | 85 | (*ptr) += val; |
michael@0 | 86 | unflockfile(_uw_semf); |
michael@0 | 87 | } |
michael@0 | 88 | |
michael@0 | 89 | void |
michael@0 | 90 | _MD_ATOMIC_DECREMENT(PRInt32 *val) |
michael@0 | 91 | { |
michael@0 | 92 | flockfile(_uw_semf); |
michael@0 | 93 | (*val)--; |
michael@0 | 94 | unflockfile(_uw_semf); |
michael@0 | 95 | } |
michael@0 | 96 | |
michael@0 | 97 | void |
michael@0 | 98 | _MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval) |
michael@0 | 99 | { |
michael@0 | 100 | flockfile(_uw_semf); |
michael@0 | 101 | *val = newval; |
michael@0 | 102 | unflockfile(_uw_semf); |
michael@0 | 103 | } |
michael@0 | 104 | #endif |
michael@0 | 105 | |
michael@0 | 106 | void |
michael@0 | 107 | _MD_SET_PRIORITY(_MDThread *thread, PRUintn newPri) |
michael@0 | 108 | { |
michael@0 | 109 | return; |
michael@0 | 110 | } |
michael@0 | 111 | |
michael@0 | 112 | PRStatus |
michael@0 | 113 | _MD_InitializeThread(PRThread *thread) |
michael@0 | 114 | { |
michael@0 | 115 | return PR_SUCCESS; |
michael@0 | 116 | } |
michael@0 | 117 | |
michael@0 | 118 | PRStatus |
michael@0 | 119 | _MD_WAIT(PRThread *thread, PRIntervalTime ticks) |
michael@0 | 120 | { |
michael@0 | 121 | PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE)); |
michael@0 | 122 | _PR_MD_SWITCH_CONTEXT(thread); |
michael@0 | 123 | return PR_SUCCESS; |
michael@0 | 124 | } |
michael@0 | 125 | |
michael@0 | 126 | PRStatus |
michael@0 | 127 | _MD_WAKEUP_WAITER(PRThread *thread) |
michael@0 | 128 | { |
michael@0 | 129 | if (thread) { |
michael@0 | 130 | PR_ASSERT(!(thread->flags & _PR_GLOBAL_SCOPE)); |
michael@0 | 131 | } |
michael@0 | 132 | return PR_SUCCESS; |
michael@0 | 133 | } |
michael@0 | 134 | |
michael@0 | 135 | /* These functions should not be called for Unixware */ |
michael@0 | 136 | void |
michael@0 | 137 | _MD_YIELD(void) |
michael@0 | 138 | { |
michael@0 | 139 | PR_NOT_REACHED("_MD_YIELD should not be called for Unixware."); |
michael@0 | 140 | } |
michael@0 | 141 | |
michael@0 | 142 | PRStatus |
michael@0 | 143 | _MD_CREATE_THREAD( |
michael@0 | 144 | PRThread *thread, |
michael@0 | 145 | void (*start) (void *), |
michael@0 | 146 | PRThreadPriority priority, |
michael@0 | 147 | PRThreadScope scope, |
michael@0 | 148 | PRThreadState state, |
michael@0 | 149 | PRUint32 stackSize) |
michael@0 | 150 | { |
michael@0 | 151 | PR_NOT_REACHED("_MD_CREATE_THREAD should not be called for Unixware."); |
michael@0 | 152 | } |
michael@0 | 153 | |
michael@0 | 154 | #else /* USE_SVR4_THREADS */ |
michael@0 | 155 | |
michael@0 | 156 | /* NOTE: |
michael@0 | 157 | * SPARC v9 (Ultras) do have an atomic test-and-set operation. But |
michael@0 | 158 | * SPARC v8 doesn't. We should detect in the init if we are running on |
michael@0 | 159 | * v8 or v9, and then use assembly where we can. |
michael@0 | 160 | */ |
michael@0 | 161 | |
michael@0 | 162 | #include <thread.h> |
michael@0 | 163 | #include <synch.h> |
michael@0 | 164 | |
michael@0 | 165 | static mutex_t _unixware_atomic = DEFAULTMUTEX; |
michael@0 | 166 | |
michael@0 | 167 | #define TEST_THEN_ADD(where, inc) \ |
michael@0 | 168 | if (mutex_lock(&_unixware_atomic) != 0)\ |
michael@0 | 169 | PR_ASSERT(0);\ |
michael@0 | 170 | *where += inc;\ |
michael@0 | 171 | if (mutex_unlock(&_unixware_atomic) != 0)\ |
michael@0 | 172 | PR_ASSERT(0); |
michael@0 | 173 | |
michael@0 | 174 | #define TEST_THEN_SET(where, val) \ |
michael@0 | 175 | if (mutex_lock(&_unixware_atomic) != 0)\ |
michael@0 | 176 | PR_ASSERT(0);\ |
michael@0 | 177 | *where = val;\ |
michael@0 | 178 | if (mutex_unlock(&_unixware_atomic) != 0)\ |
michael@0 | 179 | PR_ASSERT(0); |
michael@0 | 180 | |
michael@0 | 181 | void |
michael@0 | 182 | _MD_INIT_ATOMIC(void) |
michael@0 | 183 | { |
michael@0 | 184 | } |
michael@0 | 185 | |
michael@0 | 186 | void |
michael@0 | 187 | _MD_ATOMIC_INCREMENT(PRInt32 *val) |
michael@0 | 188 | { |
michael@0 | 189 | TEST_THEN_ADD(val, 1); |
michael@0 | 190 | } |
michael@0 | 191 | |
michael@0 | 192 | void |
michael@0 | 193 | _MD_ATOMIC_ADD(PRInt32 *ptr, PRInt32 val) |
michael@0 | 194 | { |
michael@0 | 195 | TEST_THEN_ADD(ptr, val); |
michael@0 | 196 | } |
michael@0 | 197 | |
michael@0 | 198 | void |
michael@0 | 199 | _MD_ATOMIC_DECREMENT(PRInt32 *val) |
michael@0 | 200 | { |
michael@0 | 201 | TEST_THEN_ADD(val, 0xffffffff); |
michael@0 | 202 | } |
michael@0 | 203 | |
michael@0 | 204 | void |
michael@0 | 205 | _MD_ATOMIC_SET(PRInt32 *val, PRInt32 newval) |
michael@0 | 206 | { |
michael@0 | 207 | TEST_THEN_SET(val, newval); |
michael@0 | 208 | } |
michael@0 | 209 | |
michael@0 | 210 | #include <signal.h> |
michael@0 | 211 | #include <errno.h> |
michael@0 | 212 | #include <fcntl.h> |
michael@0 | 213 | |
michael@0 | 214 | #include <sys/lwp.h> |
michael@0 | 215 | #include <sys/procfs.h> |
michael@0 | 216 | #include <sys/syscall.h> |
michael@0 | 217 | |
michael@0 | 218 | |
michael@0 | 219 | THREAD_KEY_T threadid_key; |
michael@0 | 220 | THREAD_KEY_T cpuid_key; |
michael@0 | 221 | THREAD_KEY_T last_thread_key; |
michael@0 | 222 | static sigset_t set, oldset; |
michael@0 | 223 | |
michael@0 | 224 | void _MD_EarlyInit(void) |
michael@0 | 225 | { |
michael@0 | 226 | THR_KEYCREATE(&threadid_key, NULL); |
michael@0 | 227 | THR_KEYCREATE(&cpuid_key, NULL); |
michael@0 | 228 | THR_KEYCREATE(&last_thread_key, NULL); |
michael@0 | 229 | sigemptyset(&set); |
michael@0 | 230 | sigaddset(&set, SIGALRM); |
michael@0 | 231 | } |
michael@0 | 232 | |
michael@0 | 233 | PRStatus _MD_CREATE_THREAD(PRThread *thread, |
michael@0 | 234 | void (*start)(void *), |
michael@0 | 235 | PRThreadPriority priority, |
michael@0 | 236 | PRThreadScope scope, |
michael@0 | 237 | PRThreadState state, |
michael@0 | 238 | PRUint32 stackSize) |
michael@0 | 239 | { |
michael@0 | 240 | long flags; |
michael@0 | 241 | |
michael@0 | 242 | /* mask out SIGALRM for native thread creation */ |
michael@0 | 243 | thr_sigsetmask(SIG_BLOCK, &set, &oldset); |
michael@0 | 244 | |
michael@0 | 245 | flags = (state == PR_JOINABLE_THREAD ? THR_SUSPENDED/*|THR_NEW_LWP*/ |
michael@0 | 246 | : THR_SUSPENDED|THR_DETACHED/*|THR_NEW_LWP*/); |
michael@0 | 247 | if (_PR_IS_GCABLE_THREAD(thread) || |
michael@0 | 248 | (scope == PR_GLOBAL_BOUND_THREAD)) |
michael@0 | 249 | flags |= THR_BOUND; |
michael@0 | 250 | |
michael@0 | 251 | if (thr_create(NULL, thread->stack->stackSize, |
michael@0 | 252 | (void *(*)(void *)) start, (void *) thread, |
michael@0 | 253 | flags, |
michael@0 | 254 | &thread->md.handle)) { |
michael@0 | 255 | thr_sigsetmask(SIG_SETMASK, &oldset, NULL); |
michael@0 | 256 | return PR_FAILURE; |
michael@0 | 257 | } |
michael@0 | 258 | |
michael@0 | 259 | |
michael@0 | 260 | /* When the thread starts running, then the lwpid is set to the right |
michael@0 | 261 | * value. Until then we want to mark this as 'uninit' so that |
michael@0 | 262 | * its register state is initialized properly for GC */ |
michael@0 | 263 | |
michael@0 | 264 | thread->md.lwpid = -1; |
michael@0 | 265 | thr_sigsetmask(SIG_SETMASK, &oldset, NULL); |
michael@0 | 266 | _MD_NEW_SEM(&thread->md.waiter_sem, 0); |
michael@0 | 267 | |
michael@0 | 268 | if ((scope == PR_GLOBAL_THREAD) || (scope == PR_GLOBAL_BOUND_THREAD)) { |
michael@0 | 269 | thread->flags |= _PR_GLOBAL_SCOPE; |
michael@0 | 270 | } |
michael@0 | 271 | |
michael@0 | 272 | /* |
michael@0 | 273 | ** Set the thread priority. This will also place the thread on |
michael@0 | 274 | ** the runQ. |
michael@0 | 275 | ** |
michael@0 | 276 | ** Force PR_SetThreadPriority to set the priority by |
michael@0 | 277 | ** setting thread->priority to 100. |
michael@0 | 278 | */ |
michael@0 | 279 | { |
michael@0 | 280 | int pri; |
michael@0 | 281 | pri = thread->priority; |
michael@0 | 282 | thread->priority = 100; |
michael@0 | 283 | PR_SetThreadPriority( thread, pri ); |
michael@0 | 284 | |
michael@0 | 285 | PR_LOG(_pr_thread_lm, PR_LOG_MIN, |
michael@0 | 286 | ("(0X%x)[Start]: on to runq at priority %d", |
michael@0 | 287 | thread, thread->priority)); |
michael@0 | 288 | } |
michael@0 | 289 | |
michael@0 | 290 | /* Activate the thread */ |
michael@0 | 291 | if (thr_continue( thread->md.handle ) ) { |
michael@0 | 292 | return PR_FAILURE; |
michael@0 | 293 | } |
michael@0 | 294 | return PR_SUCCESS; |
michael@0 | 295 | } |
michael@0 | 296 | |
michael@0 | 297 | void _MD_cleanup_thread(PRThread *thread) |
michael@0 | 298 | { |
michael@0 | 299 | thread_t hdl; |
michael@0 | 300 | PRMonitor *mon; |
michael@0 | 301 | |
michael@0 | 302 | hdl = thread->md.handle; |
michael@0 | 303 | |
michael@0 | 304 | /* |
michael@0 | 305 | ** First, suspend the thread (unless it's the active one) |
michael@0 | 306 | ** Because we suspend it first, we don't have to use LOCK_SCHEDULER to |
michael@0 | 307 | ** prevent both of us modifying the thread structure at the same time. |
michael@0 | 308 | */ |
michael@0 | 309 | if ( thread != _PR_MD_CURRENT_THREAD() ) { |
michael@0 | 310 | thr_suspend(hdl); |
michael@0 | 311 | } |
michael@0 | 312 | PR_LOG(_pr_thread_lm, PR_LOG_MIN, |
michael@0 | 313 | ("(0X%x)[DestroyThread]\n", thread)); |
michael@0 | 314 | |
michael@0 | 315 | _MD_DESTROY_SEM(&thread->md.waiter_sem); |
michael@0 | 316 | } |
michael@0 | 317 | |
michael@0 | 318 | void _MD_SET_PRIORITY(_MDThread *md_thread, PRUintn newPri) |
michael@0 | 319 | { |
michael@0 | 320 | if(thr_setprio((thread_t)md_thread->handle, newPri)) { |
michael@0 | 321 | PR_LOG(_pr_thread_lm, PR_LOG_MIN, |
michael@0 | 322 | ("_PR_SetThreadPriority: can't set thread priority\n")); |
michael@0 | 323 | } |
michael@0 | 324 | } |
michael@0 | 325 | |
michael@0 | 326 | void _MD_WAIT_CV( |
michael@0 | 327 | struct _MDCVar *md_cv, struct _MDLock *md_lock, PRIntervalTime timeout) |
michael@0 | 328 | { |
michael@0 | 329 | struct timespec tt; |
michael@0 | 330 | PRUint32 msec; |
michael@0 | 331 | int rv; |
michael@0 | 332 | PRThread *me = _PR_MD_CURRENT_THREAD(); |
michael@0 | 333 | |
michael@0 | 334 | msec = PR_IntervalToMilliseconds(timeout); |
michael@0 | 335 | |
michael@0 | 336 | GETTIME (&tt); |
michael@0 | 337 | |
michael@0 | 338 | tt.tv_sec += msec / PR_MSEC_PER_SEC; |
michael@0 | 339 | tt.tv_nsec += (msec % PR_MSEC_PER_SEC) * PR_NSEC_PER_MSEC; |
michael@0 | 340 | /* Check for nsec overflow - otherwise we'll get an EINVAL */ |
michael@0 | 341 | if (tt.tv_nsec >= PR_NSEC_PER_SEC) { |
michael@0 | 342 | tt.tv_sec++; |
michael@0 | 343 | tt.tv_nsec -= PR_NSEC_PER_SEC; |
michael@0 | 344 | } |
michael@0 | 345 | me->md.sp = unixware_getsp(); |
michael@0 | 346 | |
michael@0 | 347 | |
michael@0 | 348 | /* XXX Solaris 2.5.x gives back EINTR occasionally for no reason |
michael@0 | 349 | * hence ignore EINTR for now */ |
michael@0 | 350 | |
michael@0 | 351 | COND_TIMEDWAIT(&md_cv->cv, &md_lock->lock, &tt); |
michael@0 | 352 | } |
michael@0 | 353 | |
michael@0 | 354 | void _MD_lock(struct _MDLock *md_lock) |
michael@0 | 355 | { |
michael@0 | 356 | mutex_lock(&md_lock->lock); |
michael@0 | 357 | } |
michael@0 | 358 | |
michael@0 | 359 | void _MD_unlock(struct _MDLock *md_lock) |
michael@0 | 360 | { |
michael@0 | 361 | mutex_unlock(&((md_lock)->lock)); |
michael@0 | 362 | } |
michael@0 | 363 | |
michael@0 | 364 | |
michael@0 | 365 | PRThread *_pr_current_thread_tls() |
michael@0 | 366 | { |
michael@0 | 367 | PRThread *ret; |
michael@0 | 368 | |
michael@0 | 369 | thr_getspecific(threadid_key, (void **)&ret); |
michael@0 | 370 | return ret; |
michael@0 | 371 | } |
michael@0 | 372 | |
michael@0 | 373 | PRStatus |
michael@0 | 374 | _MD_WAIT(PRThread *thread, PRIntervalTime ticks) |
michael@0 | 375 | { |
michael@0 | 376 | _MD_WAIT_SEM(&thread->md.waiter_sem); |
michael@0 | 377 | return PR_SUCCESS; |
michael@0 | 378 | } |
michael@0 | 379 | |
michael@0 | 380 | PRStatus |
michael@0 | 381 | _MD_WAKEUP_WAITER(PRThread *thread) |
michael@0 | 382 | { |
michael@0 | 383 | if (thread == NULL) { |
michael@0 | 384 | return PR_SUCCESS; |
michael@0 | 385 | } |
michael@0 | 386 | _MD_POST_SEM(&thread->md.waiter_sem); |
michael@0 | 387 | return PR_SUCCESS; |
michael@0 | 388 | } |
michael@0 | 389 | |
michael@0 | 390 | _PRCPU *_pr_current_cpu_tls() |
michael@0 | 391 | { |
michael@0 | 392 | _PRCPU *ret; |
michael@0 | 393 | |
michael@0 | 394 | thr_getspecific(cpuid_key, (void **)&ret); |
michael@0 | 395 | return ret; |
michael@0 | 396 | } |
michael@0 | 397 | |
michael@0 | 398 | PRThread *_pr_last_thread_tls() |
michael@0 | 399 | { |
michael@0 | 400 | PRThread *ret; |
michael@0 | 401 | |
michael@0 | 402 | thr_getspecific(last_thread_key, (void **)&ret); |
michael@0 | 403 | return ret; |
michael@0 | 404 | } |
michael@0 | 405 | |
michael@0 | 406 | _MDLock _pr_ioq_lock; |
michael@0 | 407 | |
michael@0 | 408 | void _MD_INIT_IO (void) |
michael@0 | 409 | { |
michael@0 | 410 | _MD_NEW_LOCK(&_pr_ioq_lock); |
michael@0 | 411 | } |
michael@0 | 412 | |
michael@0 | 413 | PRStatus _MD_InitializeThread(PRThread *thread) |
michael@0 | 414 | { |
michael@0 | 415 | if (!_PR_IS_NATIVE_THREAD(thread)) |
michael@0 | 416 | return; |
michael@0 | 417 | /* prime the sp; substract 4 so we don't hit the assert that |
michael@0 | 418 | * curr sp > base_stack |
michael@0 | 419 | */ |
michael@0 | 420 | thread->md.sp = (uint_t) thread->stack->allocBase - sizeof(long); |
michael@0 | 421 | thread->md.lwpid = _lwp_self(); |
michael@0 | 422 | thread->md.handle = THR_SELF(); |
michael@0 | 423 | |
michael@0 | 424 | /* all threads on Solaris are global threads from NSPR's perspective |
michael@0 | 425 | * since all of them are mapped to Solaris threads. |
michael@0 | 426 | */ |
michael@0 | 427 | thread->flags |= _PR_GLOBAL_SCOPE; |
michael@0 | 428 | |
michael@0 | 429 | /* For primordial/attached thread, we don't create an underlying native thread. |
michael@0 | 430 | * So, _MD_CREATE_THREAD() does not get called. We need to do initialization |
michael@0 | 431 | * like allocating thread's synchronization variables and set the underlying |
michael@0 | 432 | * native thread's priority. |
michael@0 | 433 | */ |
michael@0 | 434 | if (thread->flags & (_PR_PRIMORDIAL | _PR_ATTACHED)) { |
michael@0 | 435 | _MD_NEW_SEM(&thread->md.waiter_sem, 0); |
michael@0 | 436 | _MD_SET_PRIORITY(&(thread->md), thread->priority); |
michael@0 | 437 | } |
michael@0 | 438 | return PR_SUCCESS; |
michael@0 | 439 | } |
michael@0 | 440 | |
michael@0 | 441 | static sigset_t old_mask; /* store away original gc thread sigmask */ |
michael@0 | 442 | static int gcprio; /* store away original gc thread priority */ |
michael@0 | 443 | static lwpid_t *all_lwps=NULL; /* list of lwps that we suspended */ |
michael@0 | 444 | static int num_lwps ; |
michael@0 | 445 | static int suspendAllOn = 0; |
michael@0 | 446 | |
michael@0 | 447 | #define VALID_SP(sp, bottom, top) \ |
michael@0 | 448 | (((uint_t)(sp)) > ((uint_t)(bottom)) && ((uint_t)(sp)) < ((uint_t)(top))) |
michael@0 | 449 | |
michael@0 | 450 | void unixware_preempt_off() |
michael@0 | 451 | { |
michael@0 | 452 | sigset_t set; |
michael@0 | 453 | (void)sigfillset(&set); |
michael@0 | 454 | sigprocmask (SIG_SETMASK, &set, &old_mask); |
michael@0 | 455 | } |
michael@0 | 456 | |
michael@0 | 457 | void unixware_preempt_on() |
michael@0 | 458 | { |
michael@0 | 459 | sigprocmask (SIG_SETMASK, &old_mask, NULL); |
michael@0 | 460 | } |
michael@0 | 461 | |
michael@0 | 462 | void _MD_Begin_SuspendAll() |
michael@0 | 463 | { |
michael@0 | 464 | unixware_preempt_off(); |
michael@0 | 465 | |
michael@0 | 466 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("Begin_SuspendAll\n")); |
michael@0 | 467 | /* run at highest prio so I cannot be preempted */ |
michael@0 | 468 | thr_getprio(thr_self(), &gcprio); |
michael@0 | 469 | thr_setprio(thr_self(), 0x7fffffff); |
michael@0 | 470 | suspendAllOn = 1; |
michael@0 | 471 | } |
michael@0 | 472 | |
michael@0 | 473 | void _MD_End_SuspendAll() |
michael@0 | 474 | { |
michael@0 | 475 | } |
michael@0 | 476 | |
michael@0 | 477 | void _MD_End_ResumeAll() |
michael@0 | 478 | { |
michael@0 | 479 | PR_LOG(_pr_gc_lm, PR_LOG_ALWAYS, ("End_ResumeAll\n")); |
michael@0 | 480 | thr_setprio(thr_self(), gcprio); |
michael@0 | 481 | unixware_preempt_on(); |
michael@0 | 482 | suspendAllOn = 0; |
michael@0 | 483 | } |
michael@0 | 484 | |
michael@0 | 485 | void _MD_Suspend(PRThread *thr) |
michael@0 | 486 | { |
michael@0 | 487 | int lwp_fd, result; |
michael@0 | 488 | int lwp_main_proc_fd = 0; |
michael@0 | 489 | |
michael@0 | 490 | thr_suspend(thr->md.handle); |
michael@0 | 491 | if (!_PR_IS_GCABLE_THREAD(thr)) |
michael@0 | 492 | return; |
michael@0 | 493 | /* XXX Primordial thread can't be bound to an lwp, hence there is no |
michael@0 | 494 | * way we can assume that we can get the lwp status for primordial |
michael@0 | 495 | * thread reliably. Hence we skip this for primordial thread, hoping |
michael@0 | 496 | * that the SP is saved during lock and cond. wait. |
michael@0 | 497 | * XXX - Again this is concern only for java interpreter, not for the |
michael@0 | 498 | * server, 'cause primordial thread in the server does not do java work |
michael@0 | 499 | */ |
michael@0 | 500 | if (thr->flags & _PR_PRIMORDIAL) |
michael@0 | 501 | return; |
michael@0 | 502 | |
michael@0 | 503 | /* if the thread is not started yet then don't do anything */ |
michael@0 | 504 | if (!suspendAllOn || thr->md.lwpid == -1) |
michael@0 | 505 | return; |
michael@0 | 506 | |
michael@0 | 507 | } |
michael@0 | 508 | void _MD_Resume(PRThread *thr) |
michael@0 | 509 | { |
michael@0 | 510 | if (!_PR_IS_GCABLE_THREAD(thr) || !suspendAllOn){ |
michael@0 | 511 | /*XXX When the suspendAllOn is set, we will be trying to do lwp_suspend |
michael@0 | 512 | * during that time we can't call any thread lib or libc calls. Hence |
michael@0 | 513 | * make sure that no resume is requested for Non gcable thread |
michael@0 | 514 | * during suspendAllOn */ |
michael@0 | 515 | PR_ASSERT(!suspendAllOn); |
michael@0 | 516 | thr_continue(thr->md.handle); |
michael@0 | 517 | return; |
michael@0 | 518 | } |
michael@0 | 519 | if (thr->md.lwpid == -1) |
michael@0 | 520 | return; |
michael@0 | 521 | |
michael@0 | 522 | if ( _lwp_continue(thr->md.lwpid) < 0) { |
michael@0 | 523 | PR_ASSERT(0); /* ARGH, we are hosed! */ |
michael@0 | 524 | } |
michael@0 | 525 | } |
michael@0 | 526 | |
michael@0 | 527 | |
michael@0 | 528 | PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np) |
michael@0 | 529 | { |
michael@0 | 530 | if (isCurrent) { |
michael@0 | 531 | (void) getcontext(CONTEXT(t)); /* XXX tune me: set md_IRIX.c */ |
michael@0 | 532 | } |
michael@0 | 533 | *np = NGREG; |
michael@0 | 534 | if (t->md.lwpid == -1) |
michael@0 | 535 | memset(&t->md.context.uc_mcontext.gregs[0], 0, NGREG * sizeof(PRWord)); |
michael@0 | 536 | return (PRWord*) &t->md.context.uc_mcontext.gregs[0]; |
michael@0 | 537 | } |
michael@0 | 538 | |
michael@0 | 539 | int |
michael@0 | 540 | _pr_unixware_clock_gettime (struct timespec *tp) |
michael@0 | 541 | { |
michael@0 | 542 | struct timeval tv; |
michael@0 | 543 | |
michael@0 | 544 | gettimeofday(&tv, NULL); |
michael@0 | 545 | tp->tv_sec = tv.tv_sec; |
michael@0 | 546 | tp->tv_nsec = tv.tv_usec * 1000; |
michael@0 | 547 | return 0; |
michael@0 | 548 | } |
michael@0 | 549 | |
michael@0 | 550 | |
michael@0 | 551 | #endif /* USE_SVR4_THREADS */ |