nsprpub/pr/src/md/unix/pthreads_user.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
michael@0 2 /* This Source Code Form is subject to the terms of the Mozilla Public
michael@0 3 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 5
michael@0 6 #include "primpl.h"
michael@0 7 #include <sys/types.h>
michael@0 8 #include <unistd.h>
michael@0 9 #include <signal.h>
michael@0 10 #include <pthread.h>
michael@0 11
michael@0 12
michael@0 13 sigset_t ints_off;
michael@0 14 pthread_mutex_t _pr_heapLock;
michael@0 15 pthread_key_t current_thread_key;
michael@0 16 pthread_key_t current_cpu_key;
michael@0 17 pthread_key_t last_thread_key;
michael@0 18 pthread_key_t intsoff_key;
michael@0 19
michael@0 20
michael@0 21 PRInt32 _pr_md_pthreads_created, _pr_md_pthreads_failed;
michael@0 22 PRInt32 _pr_md_pthreads = 1;
michael@0 23
michael@0 24 void _MD_EarlyInit(void)
michael@0 25 {
michael@0 26 extern PRInt32 _nspr_noclock;
michael@0 27
michael@0 28 if (pthread_key_create(&current_thread_key, NULL) != 0) {
michael@0 29 perror("pthread_key_create failed");
michael@0 30 exit(1);
michael@0 31 }
michael@0 32 if (pthread_key_create(&current_cpu_key, NULL) != 0) {
michael@0 33 perror("pthread_key_create failed");
michael@0 34 exit(1);
michael@0 35 }
michael@0 36 if (pthread_key_create(&last_thread_key, NULL) != 0) {
michael@0 37 perror("pthread_key_create failed");
michael@0 38 exit(1);
michael@0 39 }
michael@0 40 if (pthread_key_create(&intsoff_key, NULL) != 0) {
michael@0 41 perror("pthread_key_create failed");
michael@0 42 exit(1);
michael@0 43 }
michael@0 44
michael@0 45 sigemptyset(&ints_off);
michael@0 46 sigaddset(&ints_off, SIGALRM);
michael@0 47 sigaddset(&ints_off, SIGIO);
michael@0 48 sigaddset(&ints_off, SIGCLD);
michael@0 49
michael@0 50 /*
michael@0 51 * disable clock interrupts
michael@0 52 */
michael@0 53 _nspr_noclock = 1;
michael@0 54
michael@0 55 }
michael@0 56
michael@0 57 void _MD_InitLocks()
michael@0 58 {
michael@0 59 if (pthread_mutex_init(&_pr_heapLock, NULL) != 0) {
michael@0 60 perror("pthread_mutex_init failed");
michael@0 61 exit(1);
michael@0 62 }
michael@0 63 }
michael@0 64
michael@0 65 PR_IMPLEMENT(void) _MD_FREE_LOCK(struct _MDLock *lockp)
michael@0 66 {
michael@0 67 PRIntn _is;
michael@0 68 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 69
michael@0 70 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 71 _PR_INTSOFF(_is);
michael@0 72 pthread_mutex_destroy(&lockp->mutex);
michael@0 73 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 74 _PR_FAST_INTSON(_is);
michael@0 75 }
michael@0 76
michael@0 77
michael@0 78
michael@0 79 PR_IMPLEMENT(PRStatus) _MD_NEW_LOCK(struct _MDLock *lockp)
michael@0 80 {
michael@0 81 PRStatus rv;
michael@0 82 PRIntn is;
michael@0 83 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 84
michael@0 85 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 86 _PR_INTSOFF(is);
michael@0 87 rv = pthread_mutex_init(&lockp->mutex, NULL);
michael@0 88 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 89 _PR_FAST_INTSON(is);
michael@0 90 return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
michael@0 91 }
michael@0 92
michael@0 93
michael@0 94 PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
michael@0 95 {
michael@0 96 if (isCurrent) {
michael@0 97 (void) setjmp(CONTEXT(t));
michael@0 98 }
michael@0 99 *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
michael@0 100 return (PRWord *) CONTEXT(t);
michael@0 101 }
michael@0 102
michael@0 103 PR_IMPLEMENT(void)
michael@0 104 _MD_SetPriority(_MDThread *thread, PRThreadPriority newPri)
michael@0 105 {
michael@0 106 /*
michael@0 107 * XXX - to be implemented
michael@0 108 */
michael@0 109 return;
michael@0 110 }
michael@0 111
michael@0 112 PR_IMPLEMENT(PRStatus) _MD_InitThread(struct PRThread *thread)
michael@0 113 {
michael@0 114 struct sigaction sigact;
michael@0 115
michael@0 116 if (thread->flags & _PR_GLOBAL_SCOPE) {
michael@0 117 thread->md.pthread = pthread_self();
michael@0 118 #if 0
michael@0 119 /*
michael@0 120 * set up SIGUSR1 handler; this is used to save state
michael@0 121 * during PR_SuspendAll
michael@0 122 */
michael@0 123 sigact.sa_handler = save_context_and_block;
michael@0 124 sigact.sa_flags = SA_RESTART;
michael@0 125 /*
michael@0 126 * Must mask clock interrupts
michael@0 127 */
michael@0 128 sigact.sa_mask = timer_set;
michael@0 129 sigaction(SIGUSR1, &sigact, 0);
michael@0 130 #endif
michael@0 131 }
michael@0 132
michael@0 133 return PR_SUCCESS;
michael@0 134 }
michael@0 135
michael@0 136 PR_IMPLEMENT(void) _MD_ExitThread(struct PRThread *thread)
michael@0 137 {
michael@0 138 if (thread->flags & _PR_GLOBAL_SCOPE) {
michael@0 139 _MD_CLEAN_THREAD(thread);
michael@0 140 _MD_SET_CURRENT_THREAD(NULL);
michael@0 141 }
michael@0 142 }
michael@0 143
michael@0 144 PR_IMPLEMENT(void) _MD_CleanThread(struct PRThread *thread)
michael@0 145 {
michael@0 146 if (thread->flags & _PR_GLOBAL_SCOPE) {
michael@0 147 pthread_mutex_destroy(&thread->md.pthread_mutex);
michael@0 148 pthread_cond_destroy(&thread->md.pthread_cond);
michael@0 149 }
michael@0 150 }
michael@0 151
michael@0 152 PR_IMPLEMENT(void) _MD_SuspendThread(struct PRThread *thread)
michael@0 153 {
michael@0 154 PRInt32 rv;
michael@0 155
michael@0 156 PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
michael@0 157 _PR_IS_GCABLE_THREAD(thread));
michael@0 158 #if 0
michael@0 159 thread->md.suspending_id = getpid();
michael@0 160 rv = kill(thread->md.id, SIGUSR1);
michael@0 161 PR_ASSERT(rv == 0);
michael@0 162 /*
michael@0 163 * now, block the current thread/cpu until woken up by the suspended
michael@0 164 * thread from it's SIGUSR1 signal handler
michael@0 165 */
michael@0 166 blockproc(getpid());
michael@0 167 #endif
michael@0 168 }
michael@0 169
michael@0 170 PR_IMPLEMENT(void) _MD_ResumeThread(struct PRThread *thread)
michael@0 171 {
michael@0 172 PRInt32 rv;
michael@0 173
michael@0 174 PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
michael@0 175 _PR_IS_GCABLE_THREAD(thread));
michael@0 176 #if 0
michael@0 177 rv = unblockproc(thread->md.id);
michael@0 178 #endif
michael@0 179 }
michael@0 180
michael@0 181 PR_IMPLEMENT(void) _MD_SuspendCPU(struct _PRCPU *thread)
michael@0 182 {
michael@0 183 PRInt32 rv;
michael@0 184
michael@0 185 #if 0
michael@0 186 cpu->md.suspending_id = getpid();
michael@0 187 rv = kill(cpu->md.id, SIGUSR1);
michael@0 188 PR_ASSERT(rv == 0);
michael@0 189 /*
michael@0 190 * now, block the current thread/cpu until woken up by the suspended
michael@0 191 * thread from it's SIGUSR1 signal handler
michael@0 192 */
michael@0 193 blockproc(getpid());
michael@0 194 #endif
michael@0 195 }
michael@0 196
michael@0 197 PR_IMPLEMENT(void) _MD_ResumeCPU(struct _PRCPU *thread)
michael@0 198 {
michael@0 199 #if 0
michael@0 200 unblockproc(cpu->md.id);
michael@0 201 #endif
michael@0 202 }
michael@0 203
michael@0 204
michael@0 205 #define PT_NANOPERMICRO 1000UL
michael@0 206 #define PT_BILLION 1000000000UL
michael@0 207
michael@0 208 PR_IMPLEMENT(PRStatus)
michael@0 209 _pt_wait(PRThread *thread, PRIntervalTime timeout)
michael@0 210 {
michael@0 211 int rv;
michael@0 212 struct timeval now;
michael@0 213 struct timespec tmo;
michael@0 214 PRUint32 ticks = PR_TicksPerSecond();
michael@0 215
michael@0 216
michael@0 217 if (timeout != PR_INTERVAL_NO_TIMEOUT) {
michael@0 218 tmo.tv_sec = timeout / ticks;
michael@0 219 tmo.tv_nsec = timeout - (tmo.tv_sec * ticks);
michael@0 220 tmo.tv_nsec = PR_IntervalToMicroseconds(PT_NANOPERMICRO *
michael@0 221 tmo.tv_nsec);
michael@0 222
michael@0 223 /* pthreads wants this in absolute time, off we go ... */
michael@0 224 (void)GETTIMEOFDAY(&now);
michael@0 225 /* that one's usecs, this one's nsecs - grrrr! */
michael@0 226 tmo.tv_sec += now.tv_sec;
michael@0 227 tmo.tv_nsec += (PT_NANOPERMICRO * now.tv_usec);
michael@0 228 tmo.tv_sec += tmo.tv_nsec / PT_BILLION;
michael@0 229 tmo.tv_nsec %= PT_BILLION;
michael@0 230 }
michael@0 231
michael@0 232 pthread_mutex_lock(&thread->md.pthread_mutex);
michael@0 233 thread->md.wait--;
michael@0 234 if (thread->md.wait < 0) {
michael@0 235 if (timeout != PR_INTERVAL_NO_TIMEOUT) {
michael@0 236 rv = pthread_cond_timedwait(&thread->md.pthread_cond,
michael@0 237 &thread->md.pthread_mutex, &tmo);
michael@0 238 }
michael@0 239 else
michael@0 240 rv = pthread_cond_wait(&thread->md.pthread_cond,
michael@0 241 &thread->md.pthread_mutex);
michael@0 242 if (rv != 0) {
michael@0 243 thread->md.wait++;
michael@0 244 }
michael@0 245 } else
michael@0 246 rv = 0;
michael@0 247 pthread_mutex_unlock(&thread->md.pthread_mutex);
michael@0 248
michael@0 249 return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
michael@0 250 }
michael@0 251
michael@0 252 PR_IMPLEMENT(PRStatus)
michael@0 253 _MD_wait(PRThread *thread, PRIntervalTime ticks)
michael@0 254 {
michael@0 255 if ( thread->flags & _PR_GLOBAL_SCOPE ) {
michael@0 256 _MD_CHECK_FOR_EXIT();
michael@0 257 if (_pt_wait(thread, ticks) == PR_FAILURE) {
michael@0 258 _MD_CHECK_FOR_EXIT();
michael@0 259 /*
michael@0 260 * wait timed out
michael@0 261 */
michael@0 262 _PR_THREAD_LOCK(thread);
michael@0 263 if (thread->wait.cvar) {
michael@0 264 /*
michael@0 265 * The thread will remove itself from the waitQ
michael@0 266 * of the cvar in _PR_WaitCondVar
michael@0 267 */
michael@0 268 thread->wait.cvar = NULL;
michael@0 269 thread->state = _PR_RUNNING;
michael@0 270 _PR_THREAD_UNLOCK(thread);
michael@0 271 } else {
michael@0 272 _pt_wait(thread, PR_INTERVAL_NO_TIMEOUT);
michael@0 273 _PR_THREAD_UNLOCK(thread);
michael@0 274 }
michael@0 275 }
michael@0 276 } else {
michael@0 277 _PR_MD_SWITCH_CONTEXT(thread);
michael@0 278 }
michael@0 279 return PR_SUCCESS;
michael@0 280 }
michael@0 281
michael@0 282 PR_IMPLEMENT(PRStatus)
michael@0 283 _MD_WakeupWaiter(PRThread *thread)
michael@0 284 {
michael@0 285 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 286 PRInt32 pid, rv;
michael@0 287 PRIntn is;
michael@0 288
michael@0 289 PR_ASSERT(_pr_md_idle_cpus >= 0);
michael@0 290 if (thread == NULL) {
michael@0 291 if (_pr_md_idle_cpus)
michael@0 292 _MD_Wakeup_CPUs();
michael@0 293 } else if (!_PR_IS_NATIVE_THREAD(thread)) {
michael@0 294 /*
michael@0 295 * If the thread is on my cpu's runq there is no need to
michael@0 296 * wakeup any cpus
michael@0 297 */
michael@0 298 if (!_PR_IS_NATIVE_THREAD(me)) {
michael@0 299 if (me->cpu != thread->cpu) {
michael@0 300 if (_pr_md_idle_cpus)
michael@0 301 _MD_Wakeup_CPUs();
michael@0 302 }
michael@0 303 } else {
michael@0 304 if (_pr_md_idle_cpus)
michael@0 305 _MD_Wakeup_CPUs();
michael@0 306 }
michael@0 307 } else {
michael@0 308 PR_ASSERT(_PR_IS_NATIVE_THREAD(thread));
michael@0 309 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 310 _PR_INTSOFF(is);
michael@0 311
michael@0 312 pthread_mutex_lock(&thread->md.pthread_mutex);
michael@0 313 thread->md.wait++;
michael@0 314 rv = pthread_cond_signal(&thread->md.pthread_cond);
michael@0 315 PR_ASSERT(rv == 0);
michael@0 316 pthread_mutex_unlock(&thread->md.pthread_mutex);
michael@0 317
michael@0 318 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 319 _PR_FAST_INTSON(is);
michael@0 320 }
michael@0 321 return PR_SUCCESS;
michael@0 322 }
michael@0 323
michael@0 324 /* These functions should not be called for AIX */
michael@0 325 PR_IMPLEMENT(void)
michael@0 326 _MD_YIELD(void)
michael@0 327 {
michael@0 328 PR_NOT_REACHED("_MD_YIELD should not be called for AIX.");
michael@0 329 }
michael@0 330
michael@0 331 PR_IMPLEMENT(PRStatus)
michael@0 332 _MD_CreateThread(
michael@0 333 PRThread *thread,
michael@0 334 void (*start) (void *),
michael@0 335 PRThreadPriority priority,
michael@0 336 PRThreadScope scope,
michael@0 337 PRThreadState state,
michael@0 338 PRUint32 stackSize)
michael@0 339 {
michael@0 340 PRIntn is;
michael@0 341 int rv;
michael@0 342 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 343 pthread_attr_t attr;
michael@0 344
michael@0 345 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 346 _PR_INTSOFF(is);
michael@0 347
michael@0 348 if (pthread_mutex_init(&thread->md.pthread_mutex, NULL) != 0) {
michael@0 349 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 350 _PR_FAST_INTSON(is);
michael@0 351 return PR_FAILURE;
michael@0 352 }
michael@0 353
michael@0 354 if (pthread_cond_init(&thread->md.pthread_cond, NULL) != 0) {
michael@0 355 pthread_mutex_destroy(&thread->md.pthread_mutex);
michael@0 356 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 357 _PR_FAST_INTSON(is);
michael@0 358 return PR_FAILURE;
michael@0 359 }
michael@0 360 thread->flags |= _PR_GLOBAL_SCOPE;
michael@0 361
michael@0 362 pthread_attr_init(&attr); /* initialize attr with default attributes */
michael@0 363 if (pthread_attr_setstacksize(&attr, (size_t) stackSize) != 0) {
michael@0 364 pthread_mutex_destroy(&thread->md.pthread_mutex);
michael@0 365 pthread_cond_destroy(&thread->md.pthread_cond);
michael@0 366 pthread_attr_destroy(&attr);
michael@0 367 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 368 _PR_FAST_INTSON(is);
michael@0 369 return PR_FAILURE;
michael@0 370 }
michael@0 371
michael@0 372 thread->md.wait = 0;
michael@0 373 rv = pthread_create(&thread->md.pthread, &attr, start, (void *)thread);
michael@0 374 if (0 == rv) {
michael@0 375 _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_created);
michael@0 376 _MD_ATOMIC_INCREMENT(&_pr_md_pthreads);
michael@0 377 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 378 _PR_FAST_INTSON(is);
michael@0 379 return PR_SUCCESS;
michael@0 380 } else {
michael@0 381 pthread_mutex_destroy(&thread->md.pthread_mutex);
michael@0 382 pthread_cond_destroy(&thread->md.pthread_cond);
michael@0 383 pthread_attr_destroy(&attr);
michael@0 384 _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_failed);
michael@0 385 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 386 _PR_FAST_INTSON(is);
michael@0 387 PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, rv);
michael@0 388 return PR_FAILURE;
michael@0 389 }
michael@0 390 }
michael@0 391
michael@0 392 PR_IMPLEMENT(void)
michael@0 393 _MD_InitRunningCPU(struct _PRCPU *cpu)
michael@0 394 {
michael@0 395 extern int _pr_md_pipefd[2];
michael@0 396
michael@0 397 _MD_unix_init_running_cpu(cpu);
michael@0 398 cpu->md.pthread = pthread_self();
michael@0 399 if (_pr_md_pipefd[0] >= 0) {
michael@0 400 _PR_IOQ_MAX_OSFD(cpu) = _pr_md_pipefd[0];
michael@0 401 #ifndef _PR_USE_POLL
michael@0 402 FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(cpu));
michael@0 403 #endif
michael@0 404 }
michael@0 405 }
michael@0 406
michael@0 407
michael@0 408 void
michael@0 409 _MD_CleanupBeforeExit(void)
michael@0 410 {
michael@0 411 #if 0
michael@0 412 extern PRInt32 _pr_cpus_exit;
michael@0 413
michael@0 414 _pr_irix_exit_now = 1;
michael@0 415 if (_pr_numCPU > 1) {
michael@0 416 /*
michael@0 417 * Set a global flag, and wakeup all cpus which will notice the flag
michael@0 418 * and exit.
michael@0 419 */
michael@0 420 _pr_cpus_exit = getpid();
michael@0 421 _MD_Wakeup_CPUs();
michael@0 422 while(_pr_numCPU > 1) {
michael@0 423 _PR_WAIT_SEM(_pr_irix_exit_sem);
michael@0 424 _pr_numCPU--;
michael@0 425 }
michael@0 426 }
michael@0 427 /*
michael@0 428 * cause global threads on the recycle list to exit
michael@0 429 */
michael@0 430 _PR_DEADQ_LOCK;
michael@0 431 if (_PR_NUM_DEADNATIVE != 0) {
michael@0 432 PRThread *thread;
michael@0 433 PRCList *ptr;
michael@0 434
michael@0 435 ptr = _PR_DEADNATIVEQ.next;
michael@0 436 while( ptr != &_PR_DEADNATIVEQ ) {
michael@0 437 thread = _PR_THREAD_PTR(ptr);
michael@0 438 _MD_CVAR_POST_SEM(thread);
michael@0 439 ptr = ptr->next;
michael@0 440 }
michael@0 441 }
michael@0 442 _PR_DEADQ_UNLOCK;
michael@0 443 while(_PR_NUM_DEADNATIVE > 1) {
michael@0 444 _PR_WAIT_SEM(_pr_irix_exit_sem);
michael@0 445 _PR_DEC_DEADNATIVE;
michael@0 446 }
michael@0 447 #endif
michael@0 448 }

mercurial