michael@0: /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "primpl.h" michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: michael@0: michael@0: sigset_t ints_off; michael@0: pthread_mutex_t _pr_heapLock; michael@0: pthread_key_t current_thread_key; michael@0: pthread_key_t current_cpu_key; michael@0: pthread_key_t last_thread_key; michael@0: pthread_key_t intsoff_key; michael@0: michael@0: michael@0: PRInt32 _pr_md_pthreads_created, _pr_md_pthreads_failed; michael@0: PRInt32 _pr_md_pthreads = 1; michael@0: michael@0: void _MD_EarlyInit(void) michael@0: { michael@0: extern PRInt32 _nspr_noclock; michael@0: michael@0: if (pthread_key_create(¤t_thread_key, NULL) != 0) { michael@0: perror("pthread_key_create failed"); michael@0: exit(1); michael@0: } michael@0: if (pthread_key_create(¤t_cpu_key, NULL) != 0) { michael@0: perror("pthread_key_create failed"); michael@0: exit(1); michael@0: } michael@0: if (pthread_key_create(&last_thread_key, NULL) != 0) { michael@0: perror("pthread_key_create failed"); michael@0: exit(1); michael@0: } michael@0: if (pthread_key_create(&intsoff_key, NULL) != 0) { michael@0: perror("pthread_key_create failed"); michael@0: exit(1); michael@0: } michael@0: michael@0: sigemptyset(&ints_off); michael@0: sigaddset(&ints_off, SIGALRM); michael@0: sigaddset(&ints_off, SIGIO); michael@0: sigaddset(&ints_off, SIGCLD); michael@0: michael@0: /* michael@0: * disable clock interrupts michael@0: */ michael@0: _nspr_noclock = 1; michael@0: michael@0: } michael@0: michael@0: void _MD_InitLocks() michael@0: { michael@0: if (pthread_mutex_init(&_pr_heapLock, NULL) != 0) { michael@0: perror("pthread_mutex_init failed"); michael@0: exit(1); michael@0: } michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) _MD_FREE_LOCK(struct _MDLock *lockp) michael@0: { michael@0: PRIntn _is; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_INTSOFF(_is); michael@0: pthread_mutex_destroy(&lockp->mutex); michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(_is); michael@0: } michael@0: michael@0: michael@0: michael@0: PR_IMPLEMENT(PRStatus) _MD_NEW_LOCK(struct _MDLock *lockp) michael@0: { michael@0: PRStatus rv; michael@0: PRIntn is; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_INTSOFF(is); michael@0: rv = pthread_mutex_init(&lockp->mutex, NULL); michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: return (rv == 0) ? PR_SUCCESS : PR_FAILURE; michael@0: } michael@0: michael@0: michael@0: PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np) michael@0: { michael@0: if (isCurrent) { michael@0: (void) setjmp(CONTEXT(t)); michael@0: } michael@0: *np = sizeof(CONTEXT(t)) / sizeof(PRWord); michael@0: return (PRWord *) CONTEXT(t); michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: _MD_SetPriority(_MDThread *thread, PRThreadPriority newPri) michael@0: { michael@0: /* michael@0: * XXX - to be implemented michael@0: */ michael@0: return; michael@0: } michael@0: michael@0: PR_IMPLEMENT(PRStatus) _MD_InitThread(struct PRThread *thread) michael@0: { michael@0: struct sigaction sigact; michael@0: michael@0: if (thread->flags & _PR_GLOBAL_SCOPE) { michael@0: thread->md.pthread = pthread_self(); michael@0: #if 0 michael@0: /* michael@0: * set up SIGUSR1 handler; this is used to save state michael@0: * during PR_SuspendAll michael@0: */ michael@0: sigact.sa_handler = save_context_and_block; michael@0: sigact.sa_flags = SA_RESTART; michael@0: /* michael@0: * Must mask clock interrupts michael@0: */ michael@0: sigact.sa_mask = timer_set; michael@0: sigaction(SIGUSR1, &sigact, 0); michael@0: #endif michael@0: } michael@0: michael@0: return PR_SUCCESS; michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) _MD_ExitThread(struct PRThread *thread) michael@0: { michael@0: if (thread->flags & _PR_GLOBAL_SCOPE) { michael@0: _MD_CLEAN_THREAD(thread); michael@0: _MD_SET_CURRENT_THREAD(NULL); michael@0: } michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) _MD_CleanThread(struct PRThread *thread) michael@0: { michael@0: if (thread->flags & _PR_GLOBAL_SCOPE) { michael@0: pthread_mutex_destroy(&thread->md.pthread_mutex); michael@0: pthread_cond_destroy(&thread->md.pthread_cond); michael@0: } michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) _MD_SuspendThread(struct PRThread *thread) michael@0: { michael@0: PRInt32 rv; michael@0: michael@0: PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) && michael@0: _PR_IS_GCABLE_THREAD(thread)); michael@0: #if 0 michael@0: thread->md.suspending_id = getpid(); michael@0: rv = kill(thread->md.id, SIGUSR1); michael@0: PR_ASSERT(rv == 0); michael@0: /* michael@0: * now, block the current thread/cpu until woken up by the suspended michael@0: * thread from it's SIGUSR1 signal handler michael@0: */ michael@0: blockproc(getpid()); michael@0: #endif michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) _MD_ResumeThread(struct PRThread *thread) michael@0: { michael@0: PRInt32 rv; michael@0: michael@0: PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) && michael@0: _PR_IS_GCABLE_THREAD(thread)); michael@0: #if 0 michael@0: rv = unblockproc(thread->md.id); michael@0: #endif michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) _MD_SuspendCPU(struct _PRCPU *thread) michael@0: { michael@0: PRInt32 rv; michael@0: michael@0: #if 0 michael@0: cpu->md.suspending_id = getpid(); michael@0: rv = kill(cpu->md.id, SIGUSR1); michael@0: PR_ASSERT(rv == 0); michael@0: /* michael@0: * now, block the current thread/cpu until woken up by the suspended michael@0: * thread from it's SIGUSR1 signal handler michael@0: */ michael@0: blockproc(getpid()); michael@0: #endif michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) _MD_ResumeCPU(struct _PRCPU *thread) michael@0: { michael@0: #if 0 michael@0: unblockproc(cpu->md.id); michael@0: #endif michael@0: } michael@0: michael@0: michael@0: #define PT_NANOPERMICRO 1000UL michael@0: #define PT_BILLION 1000000000UL michael@0: michael@0: PR_IMPLEMENT(PRStatus) michael@0: _pt_wait(PRThread *thread, PRIntervalTime timeout) michael@0: { michael@0: int rv; michael@0: struct timeval now; michael@0: struct timespec tmo; michael@0: PRUint32 ticks = PR_TicksPerSecond(); michael@0: michael@0: michael@0: if (timeout != PR_INTERVAL_NO_TIMEOUT) { michael@0: tmo.tv_sec = timeout / ticks; michael@0: tmo.tv_nsec = timeout - (tmo.tv_sec * ticks); michael@0: tmo.tv_nsec = PR_IntervalToMicroseconds(PT_NANOPERMICRO * michael@0: tmo.tv_nsec); michael@0: michael@0: /* pthreads wants this in absolute time, off we go ... */ michael@0: (void)GETTIMEOFDAY(&now); michael@0: /* that one's usecs, this one's nsecs - grrrr! */ michael@0: tmo.tv_sec += now.tv_sec; michael@0: tmo.tv_nsec += (PT_NANOPERMICRO * now.tv_usec); michael@0: tmo.tv_sec += tmo.tv_nsec / PT_BILLION; michael@0: tmo.tv_nsec %= PT_BILLION; michael@0: } michael@0: michael@0: pthread_mutex_lock(&thread->md.pthread_mutex); michael@0: thread->md.wait--; michael@0: if (thread->md.wait < 0) { michael@0: if (timeout != PR_INTERVAL_NO_TIMEOUT) { michael@0: rv = pthread_cond_timedwait(&thread->md.pthread_cond, michael@0: &thread->md.pthread_mutex, &tmo); michael@0: } michael@0: else michael@0: rv = pthread_cond_wait(&thread->md.pthread_cond, michael@0: &thread->md.pthread_mutex); michael@0: if (rv != 0) { michael@0: thread->md.wait++; michael@0: } michael@0: } else michael@0: rv = 0; michael@0: pthread_mutex_unlock(&thread->md.pthread_mutex); michael@0: michael@0: return (rv == 0) ? PR_SUCCESS : PR_FAILURE; michael@0: } michael@0: michael@0: PR_IMPLEMENT(PRStatus) michael@0: _MD_wait(PRThread *thread, PRIntervalTime ticks) michael@0: { michael@0: if ( thread->flags & _PR_GLOBAL_SCOPE ) { michael@0: _MD_CHECK_FOR_EXIT(); michael@0: if (_pt_wait(thread, ticks) == PR_FAILURE) { michael@0: _MD_CHECK_FOR_EXIT(); michael@0: /* michael@0: * wait timed out michael@0: */ michael@0: _PR_THREAD_LOCK(thread); michael@0: if (thread->wait.cvar) { michael@0: /* michael@0: * The thread will remove itself from the waitQ michael@0: * of the cvar in _PR_WaitCondVar michael@0: */ michael@0: thread->wait.cvar = NULL; michael@0: thread->state = _PR_RUNNING; michael@0: _PR_THREAD_UNLOCK(thread); michael@0: } else { michael@0: _pt_wait(thread, PR_INTERVAL_NO_TIMEOUT); michael@0: _PR_THREAD_UNLOCK(thread); michael@0: } michael@0: } michael@0: } else { michael@0: _PR_MD_SWITCH_CONTEXT(thread); michael@0: } michael@0: return PR_SUCCESS; michael@0: } michael@0: michael@0: PR_IMPLEMENT(PRStatus) michael@0: _MD_WakeupWaiter(PRThread *thread) michael@0: { michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: PRInt32 pid, rv; michael@0: PRIntn is; michael@0: michael@0: PR_ASSERT(_pr_md_idle_cpus >= 0); michael@0: if (thread == NULL) { michael@0: if (_pr_md_idle_cpus) michael@0: _MD_Wakeup_CPUs(); michael@0: } else if (!_PR_IS_NATIVE_THREAD(thread)) { michael@0: /* michael@0: * If the thread is on my cpu's runq there is no need to michael@0: * wakeup any cpus michael@0: */ michael@0: if (!_PR_IS_NATIVE_THREAD(me)) { michael@0: if (me->cpu != thread->cpu) { michael@0: if (_pr_md_idle_cpus) michael@0: _MD_Wakeup_CPUs(); michael@0: } michael@0: } else { michael@0: if (_pr_md_idle_cpus) michael@0: _MD_Wakeup_CPUs(); michael@0: } michael@0: } else { michael@0: PR_ASSERT(_PR_IS_NATIVE_THREAD(thread)); michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_INTSOFF(is); michael@0: michael@0: pthread_mutex_lock(&thread->md.pthread_mutex); michael@0: thread->md.wait++; michael@0: rv = pthread_cond_signal(&thread->md.pthread_cond); michael@0: PR_ASSERT(rv == 0); michael@0: pthread_mutex_unlock(&thread->md.pthread_mutex); michael@0: michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: } michael@0: return PR_SUCCESS; michael@0: } michael@0: michael@0: /* These functions should not be called for AIX */ michael@0: PR_IMPLEMENT(void) michael@0: _MD_YIELD(void) michael@0: { michael@0: PR_NOT_REACHED("_MD_YIELD should not be called for AIX."); michael@0: } michael@0: michael@0: PR_IMPLEMENT(PRStatus) michael@0: _MD_CreateThread( michael@0: PRThread *thread, michael@0: void (*start) (void *), michael@0: PRThreadPriority priority, michael@0: PRThreadScope scope, michael@0: PRThreadState state, michael@0: PRUint32 stackSize) michael@0: { michael@0: PRIntn is; michael@0: int rv; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: pthread_attr_t attr; michael@0: michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_INTSOFF(is); michael@0: michael@0: if (pthread_mutex_init(&thread->md.pthread_mutex, NULL) != 0) { michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: if (pthread_cond_init(&thread->md.pthread_cond, NULL) != 0) { michael@0: pthread_mutex_destroy(&thread->md.pthread_mutex); michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: return PR_FAILURE; michael@0: } michael@0: thread->flags |= _PR_GLOBAL_SCOPE; michael@0: michael@0: pthread_attr_init(&attr); /* initialize attr with default attributes */ michael@0: if (pthread_attr_setstacksize(&attr, (size_t) stackSize) != 0) { michael@0: pthread_mutex_destroy(&thread->md.pthread_mutex); michael@0: pthread_cond_destroy(&thread->md.pthread_cond); michael@0: pthread_attr_destroy(&attr); michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: thread->md.wait = 0; michael@0: rv = pthread_create(&thread->md.pthread, &attr, start, (void *)thread); michael@0: if (0 == rv) { michael@0: _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_created); michael@0: _MD_ATOMIC_INCREMENT(&_pr_md_pthreads); michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: return PR_SUCCESS; michael@0: } else { michael@0: pthread_mutex_destroy(&thread->md.pthread_mutex); michael@0: pthread_cond_destroy(&thread->md.pthread_cond); michael@0: pthread_attr_destroy(&attr); michael@0: _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_failed); michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, rv); michael@0: return PR_FAILURE; michael@0: } michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: _MD_InitRunningCPU(struct _PRCPU *cpu) michael@0: { michael@0: extern int _pr_md_pipefd[2]; michael@0: michael@0: _MD_unix_init_running_cpu(cpu); michael@0: cpu->md.pthread = pthread_self(); michael@0: if (_pr_md_pipefd[0] >= 0) { michael@0: _PR_IOQ_MAX_OSFD(cpu) = _pr_md_pipefd[0]; michael@0: #ifndef _PR_USE_POLL michael@0: FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(cpu)); michael@0: #endif michael@0: } michael@0: } michael@0: michael@0: michael@0: void michael@0: _MD_CleanupBeforeExit(void) michael@0: { michael@0: #if 0 michael@0: extern PRInt32 _pr_cpus_exit; michael@0: michael@0: _pr_irix_exit_now = 1; michael@0: if (_pr_numCPU > 1) { michael@0: /* michael@0: * Set a global flag, and wakeup all cpus which will notice the flag michael@0: * and exit. michael@0: */ michael@0: _pr_cpus_exit = getpid(); michael@0: _MD_Wakeup_CPUs(); michael@0: while(_pr_numCPU > 1) { michael@0: _PR_WAIT_SEM(_pr_irix_exit_sem); michael@0: _pr_numCPU--; michael@0: } michael@0: } michael@0: /* michael@0: * cause global threads on the recycle list to exit michael@0: */ michael@0: _PR_DEADQ_LOCK; michael@0: if (_PR_NUM_DEADNATIVE != 0) { michael@0: PRThread *thread; michael@0: PRCList *ptr; michael@0: michael@0: ptr = _PR_DEADNATIVEQ.next; michael@0: while( ptr != &_PR_DEADNATIVEQ ) { michael@0: thread = _PR_THREAD_PTR(ptr); michael@0: _MD_CVAR_POST_SEM(thread); michael@0: ptr = ptr->next; michael@0: } michael@0: } michael@0: _PR_DEADQ_UNLOCK; michael@0: while(_PR_NUM_DEADNATIVE > 1) { michael@0: _PR_WAIT_SEM(_pr_irix_exit_sem); michael@0: _PR_DEC_DEADNATIVE; michael@0: } michael@0: #endif michael@0: }