michael@0: /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "primpl.h" michael@0: michael@0: #include michael@0: michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: michael@0: static void _MD_IrixIntervalInit(void); michael@0: michael@0: #if defined(_PR_PTHREADS) michael@0: /* michael@0: * for compatibility with classic nspr michael@0: */ michael@0: void _PR_IRIX_CHILD_PROCESS() michael@0: { michael@0: } michael@0: #else /* defined(_PR_PTHREADS) */ michael@0: michael@0: static void irix_detach_sproc(void); michael@0: char *_nspr_sproc_private; /* ptr. to private region in every sproc */ michael@0: michael@0: extern PRUintn _pr_numCPU; michael@0: michael@0: typedef struct nspr_arena { michael@0: PRCList links; michael@0: usptr_t *usarena; michael@0: } nspr_arena; michael@0: michael@0: #define ARENA_PTR(qp) \ michael@0: ((nspr_arena *) ((char*) (qp) - offsetof(nspr_arena , links))) michael@0: michael@0: static usptr_t *alloc_new_arena(void); michael@0: michael@0: PRCList arena_list = PR_INIT_STATIC_CLIST(&arena_list); michael@0: ulock_t arena_list_lock; michael@0: nspr_arena first_arena; michael@0: int _nspr_irix_arena_cnt = 1; michael@0: michael@0: PRCList sproc_list = PR_INIT_STATIC_CLIST(&sproc_list); michael@0: ulock_t sproc_list_lock; michael@0: michael@0: typedef struct sproc_data { michael@0: void (*entry) (void *, size_t); michael@0: unsigned inh; michael@0: void *arg; michael@0: caddr_t sp; michael@0: size_t len; michael@0: int *pid; michael@0: int creator_pid; michael@0: } sproc_data; michael@0: michael@0: typedef struct sproc_params { michael@0: PRCList links; michael@0: sproc_data sd; michael@0: } sproc_params; michael@0: michael@0: #define SPROC_PARAMS_PTR(qp) \ michael@0: ((sproc_params *) ((char*) (qp) - offsetof(sproc_params , links))) michael@0: michael@0: long _nspr_irix_lock_cnt = 0; michael@0: long _nspr_irix_sem_cnt = 0; michael@0: long _nspr_irix_pollsem_cnt = 0; michael@0: michael@0: usptr_t *_pr_usArena; michael@0: ulock_t _pr_heapLock; michael@0: michael@0: usema_t *_pr_irix_exit_sem; michael@0: PRInt32 _pr_irix_exit_now = 0; michael@0: PRInt32 _pr_irix_process_exit_code = 0; /* exit code for PR_ProcessExit */ michael@0: PRInt32 _pr_irix_process_exit = 0; /* process exiting due to call to michael@0: PR_ProcessExit */ michael@0: michael@0: int _pr_irix_primoridal_cpu_fd[2] = { -1, -1 }; michael@0: static void (*libc_exit)(int) = NULL; michael@0: static void *libc_handle = NULL; michael@0: michael@0: #define _NSPR_DEF_INITUSERS 100 /* default value of CONF_INITUSERS */ michael@0: #define _NSPR_DEF_INITSIZE (4 * 1024 * 1024) /* 4 MB */ michael@0: michael@0: int _irix_initusers = _NSPR_DEF_INITUSERS; michael@0: int _irix_initsize = _NSPR_DEF_INITSIZE; michael@0: michael@0: PRIntn _pr_io_in_progress, _pr_clock_in_progress; michael@0: michael@0: PRInt32 _pr_md_irix_sprocs_created, _pr_md_irix_sprocs_failed; michael@0: PRInt32 _pr_md_irix_sprocs = 1; michael@0: PRCList _pr_md_irix_sproc_list = michael@0: PR_INIT_STATIC_CLIST(&_pr_md_irix_sproc_list); michael@0: michael@0: sigset_t ints_off; michael@0: extern sigset_t timer_set; michael@0: michael@0: #if !defined(PR_SETABORTSIG) michael@0: #define PR_SETABORTSIG 18 michael@0: #endif michael@0: /* michael@0: * terminate the entire application if any sproc exits abnormally michael@0: */ michael@0: PRBool _nspr_terminate_on_error = PR_TRUE; michael@0: michael@0: /* michael@0: * exported interface to set the shared arena parameters michael@0: */ michael@0: void _PR_Irix_Set_Arena_Params(PRInt32 initusers, PRInt32 initsize) michael@0: { michael@0: _irix_initusers = initusers; michael@0: _irix_initsize = initsize; michael@0: } michael@0: michael@0: static usptr_t *alloc_new_arena() michael@0: { michael@0: return(usinit("/dev/zero")); michael@0: } michael@0: michael@0: static PRStatus new_poll_sem(struct _MDThread *mdthr, int val) michael@0: { michael@0: PRIntn _is; michael@0: PRStatus rv = PR_SUCCESS; michael@0: usema_t *sem = NULL; michael@0: PRCList *qp; michael@0: nspr_arena *arena; michael@0: usptr_t *irix_arena; michael@0: PRThread *me = _MD_GET_ATTACHED_THREAD(); michael@0: michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_INTSOFF(_is); michael@0: _PR_LOCK(arena_list_lock); michael@0: for (qp = arena_list.next; qp != &arena_list; qp = qp->next) { michael@0: arena = ARENA_PTR(qp); michael@0: sem = usnewpollsema(arena->usarena, val); michael@0: if (sem != NULL) { michael@0: mdthr->cvar_pollsem = sem; michael@0: mdthr->pollsem_arena = arena->usarena; michael@0: break; michael@0: } michael@0: } michael@0: if (sem == NULL) { michael@0: /* michael@0: * If no space left in the arena allocate a new one. michael@0: */ michael@0: if (errno == ENOMEM) { michael@0: arena = PR_NEWZAP(nspr_arena); michael@0: if (arena != NULL) { michael@0: irix_arena = alloc_new_arena(); michael@0: if (irix_arena) { michael@0: PR_APPEND_LINK(&arena->links, &arena_list); michael@0: _nspr_irix_arena_cnt++; michael@0: arena->usarena = irix_arena; michael@0: sem = usnewpollsema(arena->usarena, val); michael@0: if (sem != NULL) { michael@0: mdthr->cvar_pollsem = sem; michael@0: mdthr->pollsem_arena = arena->usarena; michael@0: } else michael@0: rv = PR_FAILURE; michael@0: } else { michael@0: PR_DELETE(arena); michael@0: rv = PR_FAILURE; michael@0: } michael@0: michael@0: } else michael@0: rv = PR_FAILURE; michael@0: } else michael@0: rv = PR_FAILURE; michael@0: } michael@0: _PR_UNLOCK(arena_list_lock); michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(_is); michael@0: if (rv == PR_SUCCESS) michael@0: _MD_ATOMIC_INCREMENT(&_nspr_irix_pollsem_cnt); michael@0: return rv; michael@0: } michael@0: michael@0: static void free_poll_sem(struct _MDThread *mdthr) michael@0: { michael@0: PRIntn _is; michael@0: PRThread *me = _MD_GET_ATTACHED_THREAD(); michael@0: michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_INTSOFF(_is); michael@0: usfreepollsema(mdthr->cvar_pollsem, mdthr->pollsem_arena); michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(_is); michael@0: _MD_ATOMIC_DECREMENT(&_nspr_irix_pollsem_cnt); michael@0: } michael@0: michael@0: static PRStatus new_lock(struct _MDLock *lockp) michael@0: { michael@0: PRIntn _is; michael@0: PRStatus rv = PR_SUCCESS; michael@0: ulock_t lock = NULL; michael@0: PRCList *qp; michael@0: nspr_arena *arena; michael@0: usptr_t *irix_arena; michael@0: PRThread *me = _MD_GET_ATTACHED_THREAD(); michael@0: michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_INTSOFF(_is); michael@0: _PR_LOCK(arena_list_lock); michael@0: for (qp = arena_list.next; qp != &arena_list; qp = qp->next) { michael@0: arena = ARENA_PTR(qp); michael@0: lock = usnewlock(arena->usarena); michael@0: if (lock != NULL) { michael@0: lockp->lock = lock; michael@0: lockp->arena = arena->usarena; michael@0: break; michael@0: } michael@0: } michael@0: if (lock == NULL) { michael@0: /* michael@0: * If no space left in the arena allocate a new one. michael@0: */ michael@0: if (errno == ENOMEM) { michael@0: arena = PR_NEWZAP(nspr_arena); michael@0: if (arena != NULL) { michael@0: irix_arena = alloc_new_arena(); michael@0: if (irix_arena) { michael@0: PR_APPEND_LINK(&arena->links, &arena_list); michael@0: _nspr_irix_arena_cnt++; michael@0: arena->usarena = irix_arena; michael@0: lock = usnewlock(irix_arena); michael@0: if (lock != NULL) { michael@0: lockp->lock = lock; michael@0: lockp->arena = arena->usarena; michael@0: } else michael@0: rv = PR_FAILURE; michael@0: } else { michael@0: PR_DELETE(arena); michael@0: rv = PR_FAILURE; michael@0: } michael@0: michael@0: } else michael@0: rv = PR_FAILURE; michael@0: } else michael@0: rv = PR_FAILURE; michael@0: } michael@0: _PR_UNLOCK(arena_list_lock); michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(_is); michael@0: if (rv == PR_SUCCESS) michael@0: _MD_ATOMIC_INCREMENT(&_nspr_irix_lock_cnt); michael@0: return rv; michael@0: } michael@0: michael@0: static void free_lock(struct _MDLock *lockp) michael@0: { michael@0: PRIntn _is; michael@0: PRThread *me = _MD_GET_ATTACHED_THREAD(); michael@0: michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_INTSOFF(_is); michael@0: usfreelock(lockp->lock, lockp->arena); michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(_is); michael@0: _MD_ATOMIC_DECREMENT(&_nspr_irix_lock_cnt); michael@0: } michael@0: michael@0: void _MD_FREE_LOCK(struct _MDLock *lockp) michael@0: { michael@0: PRIntn _is; michael@0: PRThread *me = _MD_GET_ATTACHED_THREAD(); michael@0: michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_INTSOFF(_is); michael@0: free_lock(lockp); michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(_is); michael@0: } michael@0: michael@0: /* michael@0: * _MD_get_attached_thread michael@0: * Return the thread pointer of the current thread if it is attached. michael@0: * michael@0: * This function is needed for Irix because the thread-local-storage is michael@0: * implemented by mmapin'g a page with the MAP_LOCAL flag. This causes the michael@0: * sproc-private page to inherit contents of the page of the caller of sproc(). michael@0: */ michael@0: PRThread *_MD_get_attached_thread(void) michael@0: { michael@0: michael@0: if (_MD_GET_SPROC_PID() == get_pid()) michael@0: return _MD_THIS_THREAD(); michael@0: else michael@0: return 0; michael@0: } michael@0: michael@0: /* michael@0: * _MD_get_current_thread michael@0: * Return the thread pointer of the current thread (attaching it if michael@0: * necessary) michael@0: */ michael@0: PRThread *_MD_get_current_thread(void) michael@0: { michael@0: PRThread *me; michael@0: michael@0: me = _MD_GET_ATTACHED_THREAD(); michael@0: if (NULL == me) { michael@0: me = _PRI_AttachThread( michael@0: PR_USER_THREAD, PR_PRIORITY_NORMAL, NULL, 0); michael@0: } michael@0: PR_ASSERT(me != NULL); michael@0: return(me); michael@0: } michael@0: michael@0: /* michael@0: * irix_detach_sproc michael@0: * auto-detach a sproc when it exits michael@0: */ michael@0: void irix_detach_sproc(void) michael@0: { michael@0: PRThread *me; michael@0: michael@0: me = _MD_GET_ATTACHED_THREAD(); michael@0: if ((me != NULL) && (me->flags & _PR_ATTACHED)) { michael@0: _PRI_DetachThread(); michael@0: } michael@0: } michael@0: michael@0: michael@0: PRStatus _MD_NEW_LOCK(struct _MDLock *lockp) michael@0: { michael@0: PRStatus rv; michael@0: PRIntn is; michael@0: PRThread *me = _MD_GET_ATTACHED_THREAD(); michael@0: michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_INTSOFF(is); michael@0: rv = new_lock(lockp); michael@0: if (me && !_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: return rv; michael@0: } michael@0: michael@0: static void michael@0: sigchld_handler(int sig) michael@0: { michael@0: pid_t pid; michael@0: int status; michael@0: michael@0: /* michael@0: * If an sproc exited abnormally send a SIGKILL signal to all the michael@0: * sprocs in the process to terminate the application michael@0: */ michael@0: while ((pid = waitpid(0, &status, WNOHANG)) > 0) { michael@0: if (WIFSIGNALED(status) && ((WTERMSIG(status) == SIGSEGV) || michael@0: (WTERMSIG(status) == SIGBUS) || michael@0: (WTERMSIG(status) == SIGABRT) || michael@0: (WTERMSIG(status) == SIGILL))) { michael@0: michael@0: prctl(PR_SETEXITSIG, SIGKILL); michael@0: _exit(status); michael@0: } michael@0: } michael@0: } michael@0: michael@0: static void save_context_and_block(int sig) michael@0: { michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: _PRCPU *cpu = _PR_MD_CURRENT_CPU(); michael@0: michael@0: /* michael@0: * save context michael@0: */ michael@0: (void) setjmp(me->md.jb); michael@0: /* michael@0: * unblock the suspending thread michael@0: */ michael@0: if (me->cpu) { michael@0: /* michael@0: * I am a cpu thread, not a user-created GLOBAL thread michael@0: */ michael@0: unblockproc(cpu->md.suspending_id); michael@0: } else { michael@0: unblockproc(me->md.suspending_id); michael@0: } michael@0: /* michael@0: * now, block current thread michael@0: */ michael@0: blockproc(getpid()); michael@0: } michael@0: michael@0: /* michael@0: ** The irix kernel has a bug in it which causes async connect's which are michael@0: ** interrupted by a signal to fail terribly (EADDRINUSE is returned). michael@0: ** We work around the bug by blocking signals during the async connect michael@0: ** attempt. michael@0: */ michael@0: PRInt32 _MD_irix_connect( michael@0: PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, PRIntervalTime timeout) michael@0: { michael@0: PRInt32 rv; michael@0: sigset_t oldset; michael@0: michael@0: sigprocmask(SIG_BLOCK, &ints_off, &oldset); michael@0: rv = connect(osfd, addr, addrlen); michael@0: sigprocmask(SIG_SETMASK, &oldset, 0); michael@0: michael@0: return(rv); michael@0: } michael@0: michael@0: #include "prprf.h" michael@0: michael@0: /********************************************************************/ michael@0: /********************************************************************/ michael@0: /*************** Various thread like things for IRIX ****************/ michael@0: /********************************************************************/ michael@0: /********************************************************************/ michael@0: michael@0: void *_MD_GetSP(PRThread *t) michael@0: { michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: void *sp; michael@0: michael@0: if (me == t) michael@0: (void) setjmp(t->md.jb); michael@0: michael@0: sp = (void *)(t->md.jb[JB_SP]); michael@0: PR_ASSERT((sp >= (void *) t->stack->stackBottom) && michael@0: (sp <= (void *) (t->stack->stackBottom + t->stack->stackSize))); michael@0: return(sp); michael@0: } michael@0: michael@0: void _MD_InitLocks() michael@0: { michael@0: char buf[200]; michael@0: char *init_users, *init_size; michael@0: michael@0: PR_snprintf(buf, sizeof(buf), "/dev/zero"); michael@0: michael@0: if (init_users = getenv("_NSPR_IRIX_INITUSERS")) michael@0: _irix_initusers = atoi(init_users); michael@0: michael@0: if (init_size = getenv("_NSPR_IRIX_INITSIZE")) michael@0: _irix_initsize = atoi(init_size); michael@0: michael@0: usconfig(CONF_INITUSERS, _irix_initusers); michael@0: usconfig(CONF_INITSIZE, _irix_initsize); michael@0: usconfig(CONF_AUTOGROW, 1); michael@0: usconfig(CONF_AUTORESV, 1); michael@0: if (usconfig(CONF_ARENATYPE, US_SHAREDONLY) < 0) { michael@0: perror("PR_Init: unable to config mutex arena"); michael@0: exit(-1); michael@0: } michael@0: michael@0: _pr_usArena = usinit(buf); michael@0: if (!_pr_usArena) { michael@0: fprintf(stderr, michael@0: "PR_Init: Error - unable to create lock/monitor arena\n"); michael@0: exit(-1); michael@0: } michael@0: _pr_heapLock = usnewlock(_pr_usArena); michael@0: _nspr_irix_lock_cnt++; michael@0: michael@0: arena_list_lock = usnewlock(_pr_usArena); michael@0: _nspr_irix_lock_cnt++; michael@0: michael@0: sproc_list_lock = usnewlock(_pr_usArena); michael@0: _nspr_irix_lock_cnt++; michael@0: michael@0: _pr_irix_exit_sem = usnewsema(_pr_usArena, 0); michael@0: _nspr_irix_sem_cnt = 1; michael@0: michael@0: first_arena.usarena = _pr_usArena; michael@0: PR_INIT_CLIST(&first_arena.links); michael@0: PR_APPEND_LINK(&first_arena.links, &arena_list); michael@0: } michael@0: michael@0: /* _PR_IRIX_CHILD_PROCESS is a private API for Server group */ michael@0: void _PR_IRIX_CHILD_PROCESS() michael@0: { michael@0: extern PRUint32 _pr_global_threads; michael@0: michael@0: PR_ASSERT(_PR_MD_CURRENT_CPU() == _pr_primordialCPU); michael@0: PR_ASSERT(_pr_numCPU == 1); michael@0: PR_ASSERT(_pr_global_threads == 0); michael@0: /* michael@0: * save the new pid michael@0: */ michael@0: _pr_primordialCPU->md.id = getpid(); michael@0: _MD_SET_SPROC_PID(getpid()); michael@0: } michael@0: michael@0: static PRStatus pr_cvar_wait_sem(PRThread *thread, PRIntervalTime timeout) michael@0: { michael@0: int rv; michael@0: michael@0: #ifdef _PR_USE_POLL michael@0: struct pollfd pfd; michael@0: int msecs; michael@0: michael@0: if (timeout == PR_INTERVAL_NO_TIMEOUT) michael@0: msecs = -1; michael@0: else michael@0: msecs = PR_IntervalToMilliseconds(timeout); michael@0: #else michael@0: struct timeval tv, *tvp; michael@0: fd_set rd; michael@0: michael@0: if(timeout == PR_INTERVAL_NO_TIMEOUT) michael@0: tvp = NULL; michael@0: else { michael@0: tv.tv_sec = PR_IntervalToSeconds(timeout); michael@0: tv.tv_usec = PR_IntervalToMicroseconds( michael@0: timeout - PR_SecondsToInterval(tv.tv_sec)); michael@0: tvp = &tv; michael@0: } michael@0: FD_ZERO(&rd); michael@0: FD_SET(thread->md.cvar_pollsemfd, &rd); michael@0: #endif michael@0: michael@0: /* michael@0: * call uspsema only if a previous select call on this semaphore michael@0: * did not timeout michael@0: */ michael@0: if (!thread->md.cvar_pollsem_select) { michael@0: rv = _PR_WAIT_SEM(thread->md.cvar_pollsem); michael@0: PR_ASSERT(rv >= 0); michael@0: } else michael@0: rv = 0; michael@0: again: michael@0: if(!rv) { michael@0: #ifdef _PR_USE_POLL michael@0: pfd.events = POLLIN; michael@0: pfd.fd = thread->md.cvar_pollsemfd; michael@0: rv = _MD_POLL(&pfd, 1, msecs); michael@0: #else michael@0: rv = _MD_SELECT(thread->md.cvar_pollsemfd + 1, &rd, NULL,NULL,tvp); michael@0: #endif michael@0: if ((rv == -1) && (errno == EINTR)) { michael@0: rv = 0; michael@0: goto again; michael@0: } michael@0: PR_ASSERT(rv >= 0); michael@0: } michael@0: michael@0: if (rv > 0) { michael@0: /* michael@0: * acquired the semaphore, call uspsema next time michael@0: */ michael@0: thread->md.cvar_pollsem_select = 0; michael@0: return PR_SUCCESS; michael@0: } else { michael@0: /* michael@0: * select timed out; must call select, not uspsema, when trying michael@0: * to acquire the semaphore the next time michael@0: */ michael@0: thread->md.cvar_pollsem_select = 1; michael@0: return PR_FAILURE; michael@0: } michael@0: } michael@0: michael@0: PRStatus _MD_wait(PRThread *thread, PRIntervalTime ticks) michael@0: { michael@0: if ( thread->flags & _PR_GLOBAL_SCOPE ) { michael@0: _MD_CHECK_FOR_EXIT(); michael@0: if (pr_cvar_wait_sem(thread, ticks) == PR_FAILURE) { michael@0: _MD_CHECK_FOR_EXIT(); michael@0: /* michael@0: * wait timed out michael@0: */ michael@0: _PR_THREAD_LOCK(thread); michael@0: if (thread->wait.cvar) { michael@0: /* michael@0: * The thread will remove itself from the waitQ michael@0: * of the cvar in _PR_WaitCondVar michael@0: */ michael@0: thread->wait.cvar = NULL; michael@0: thread->state = _PR_RUNNING; michael@0: _PR_THREAD_UNLOCK(thread); michael@0: } else { michael@0: _PR_THREAD_UNLOCK(thread); michael@0: /* michael@0: * This thread was woken up by a notifying thread michael@0: * at the same time as a timeout; so, consume the michael@0: * extra post operation on the semaphore michael@0: */ michael@0: _MD_CHECK_FOR_EXIT(); michael@0: pr_cvar_wait_sem(thread, PR_INTERVAL_NO_TIMEOUT); michael@0: } michael@0: _MD_CHECK_FOR_EXIT(); michael@0: } michael@0: } else { michael@0: _PR_MD_SWITCH_CONTEXT(thread); michael@0: } michael@0: return PR_SUCCESS; michael@0: } michael@0: michael@0: PRStatus _MD_WakeupWaiter(PRThread *thread) michael@0: { michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: PRIntn is; michael@0: michael@0: PR_ASSERT(_pr_md_idle_cpus >= 0); michael@0: if (thread == NULL) { michael@0: if (_pr_md_idle_cpus) michael@0: _MD_Wakeup_CPUs(); michael@0: } else if (!_PR_IS_NATIVE_THREAD(thread)) { michael@0: if (_pr_md_idle_cpus) michael@0: _MD_Wakeup_CPUs(); michael@0: } else { michael@0: PR_ASSERT(_PR_IS_NATIVE_THREAD(thread)); michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_INTSOFF(is); michael@0: _MD_CVAR_POST_SEM(thread); michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: } michael@0: return PR_SUCCESS; michael@0: } michael@0: michael@0: void create_sproc (void (*entry) (void *, size_t), unsigned inh, michael@0: void *arg, caddr_t sp, size_t len, int *pid) michael@0: { michael@0: sproc_params sparams; michael@0: char data; michael@0: int rv; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: michael@0: if (!_PR_IS_NATIVE_THREAD(me) && (_PR_MD_CURRENT_CPU()->id == 0)) { michael@0: *pid = sprocsp(entry, /* startup func */ michael@0: inh, /* attribute flags */ michael@0: arg, /* thread param */ michael@0: sp, /* stack address */ michael@0: len); /* stack size */ michael@0: } else { michael@0: sparams.sd.entry = entry; michael@0: sparams.sd.inh = inh; michael@0: sparams.sd.arg = arg; michael@0: sparams.sd.sp = sp; michael@0: sparams.sd.len = len; michael@0: sparams.sd.pid = pid; michael@0: sparams.sd.creator_pid = getpid(); michael@0: _PR_LOCK(sproc_list_lock); michael@0: PR_APPEND_LINK(&sparams.links, &sproc_list); michael@0: rv = write(_pr_irix_primoridal_cpu_fd[1], &data, 1); michael@0: PR_ASSERT(rv == 1); michael@0: _PR_UNLOCK(sproc_list_lock); michael@0: blockproc(getpid()); michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * _PR_MD_WAKEUP_PRIMORDIAL_CPU michael@0: * michael@0: * wakeup cpu 0 michael@0: */ michael@0: michael@0: void _PR_MD_WAKEUP_PRIMORDIAL_CPU() michael@0: { michael@0: char data = '0'; michael@0: int rv; michael@0: michael@0: rv = write(_pr_irix_primoridal_cpu_fd[1], &data, 1); michael@0: PR_ASSERT(rv == 1); michael@0: } michael@0: michael@0: /* michael@0: * _PR_MD_primordial_cpu michael@0: * michael@0: * process events that need to executed by the primordial cpu on each michael@0: * iteration through the idle loop michael@0: */ michael@0: michael@0: void _PR_MD_primordial_cpu() michael@0: { michael@0: PRCList *qp; michael@0: sproc_params *sp; michael@0: int pid; michael@0: michael@0: _PR_LOCK(sproc_list_lock); michael@0: while ((qp = sproc_list.next) != &sproc_list) { michael@0: sp = SPROC_PARAMS_PTR(qp); michael@0: PR_REMOVE_LINK(&sp->links); michael@0: pid = sp->sd.creator_pid; michael@0: (*(sp->sd.pid)) = sprocsp(sp->sd.entry, /* startup func */ michael@0: sp->sd.inh, /* attribute flags */ michael@0: sp->sd.arg, /* thread param */ michael@0: sp->sd.sp, /* stack address */ michael@0: sp->sd.len); /* stack size */ michael@0: unblockproc(pid); michael@0: } michael@0: _PR_UNLOCK(sproc_list_lock); michael@0: } michael@0: michael@0: PRStatus _MD_CreateThread(PRThread *thread, michael@0: void (*start)(void *), michael@0: PRThreadPriority priority, michael@0: PRThreadScope scope, michael@0: PRThreadState state, michael@0: PRUint32 stackSize) michael@0: { michael@0: typedef void (*SprocEntry) (void *, size_t); michael@0: SprocEntry spentry = (SprocEntry)start; michael@0: PRIntn is; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: PRInt32 pid; michael@0: PRStatus rv; michael@0: michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_INTSOFF(is); michael@0: thread->md.cvar_pollsem_select = 0; michael@0: thread->flags |= _PR_GLOBAL_SCOPE; michael@0: michael@0: thread->md.cvar_pollsemfd = -1; michael@0: if (new_poll_sem(&thread->md,0) == PR_FAILURE) { michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: return PR_FAILURE; michael@0: } michael@0: thread->md.cvar_pollsemfd = michael@0: _PR_OPEN_POLL_SEM(thread->md.cvar_pollsem); michael@0: if ((thread->md.cvar_pollsemfd < 0)) { michael@0: free_poll_sem(&thread->md); michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: create_sproc(spentry, /* startup func */ michael@0: PR_SALL, /* attribute flags */ michael@0: (void *)thread, /* thread param */ michael@0: NULL, /* stack address */ michael@0: stackSize, &pid); /* stack size */ michael@0: if (pid > 0) { michael@0: _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs_created); michael@0: _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs); michael@0: rv = PR_SUCCESS; michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: return rv; michael@0: } else { michael@0: close(thread->md.cvar_pollsemfd); michael@0: thread->md.cvar_pollsemfd = -1; michael@0: free_poll_sem(&thread->md); michael@0: thread->md.cvar_pollsem = NULL; michael@0: _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs_failed); michael@0: if (!_PR_IS_NATIVE_THREAD(me)) michael@0: _PR_FAST_INTSON(is); michael@0: return PR_FAILURE; michael@0: } michael@0: } michael@0: michael@0: void _MD_CleanThread(PRThread *thread) michael@0: { michael@0: if (thread->flags & _PR_GLOBAL_SCOPE) { michael@0: close(thread->md.cvar_pollsemfd); michael@0: thread->md.cvar_pollsemfd = -1; michael@0: free_poll_sem(&thread->md); michael@0: thread->md.cvar_pollsem = NULL; michael@0: } michael@0: } michael@0: michael@0: void _MD_SetPriority(_MDThread *thread, PRThreadPriority newPri) michael@0: { michael@0: return; michael@0: } michael@0: michael@0: extern void _MD_unix_terminate_waitpid_daemon(void); michael@0: michael@0: void michael@0: _MD_CleanupBeforeExit(void) michael@0: { michael@0: extern PRInt32 _pr_cpus_exit; michael@0: michael@0: _MD_unix_terminate_waitpid_daemon(); michael@0: michael@0: _pr_irix_exit_now = 1; michael@0: if (_pr_numCPU > 1) { michael@0: /* michael@0: * Set a global flag, and wakeup all cpus which will notice the flag michael@0: * and exit. michael@0: */ michael@0: _pr_cpus_exit = getpid(); michael@0: _MD_Wakeup_CPUs(); michael@0: while(_pr_numCPU > 1) { michael@0: _PR_WAIT_SEM(_pr_irix_exit_sem); michael@0: _pr_numCPU--; michael@0: } michael@0: } michael@0: /* michael@0: * cause global threads on the recycle list to exit michael@0: */ michael@0: _PR_DEADQ_LOCK; michael@0: if (_PR_NUM_DEADNATIVE != 0) { michael@0: PRThread *thread; michael@0: PRCList *ptr; michael@0: michael@0: ptr = _PR_DEADNATIVEQ.next; michael@0: while( ptr != &_PR_DEADNATIVEQ ) { michael@0: thread = _PR_THREAD_PTR(ptr); michael@0: _MD_CVAR_POST_SEM(thread); michael@0: ptr = ptr->next; michael@0: } michael@0: } michael@0: _PR_DEADQ_UNLOCK; michael@0: while(_PR_NUM_DEADNATIVE > 1) { michael@0: _PR_WAIT_SEM(_pr_irix_exit_sem); michael@0: _PR_DEC_DEADNATIVE; michael@0: } michael@0: } michael@0: michael@0: #ifdef _PR_HAVE_SGI_PRDA_PROCMASK michael@0: extern void __sgi_prda_procmask(int); michael@0: #endif michael@0: michael@0: PRStatus michael@0: _MD_InitAttachedThread(PRThread *thread, PRBool wakeup_parent) michael@0: { michael@0: PRStatus rv = PR_SUCCESS; michael@0: michael@0: if (thread->flags & _PR_GLOBAL_SCOPE) { michael@0: if (new_poll_sem(&thread->md,0) == PR_FAILURE) { michael@0: return PR_FAILURE; michael@0: } michael@0: thread->md.cvar_pollsemfd = michael@0: _PR_OPEN_POLL_SEM(thread->md.cvar_pollsem); michael@0: if ((thread->md.cvar_pollsemfd < 0)) { michael@0: free_poll_sem(&thread->md); michael@0: return PR_FAILURE; michael@0: } michael@0: if (_MD_InitThread(thread, PR_FALSE) == PR_FAILURE) { michael@0: close(thread->md.cvar_pollsemfd); michael@0: thread->md.cvar_pollsemfd = -1; michael@0: free_poll_sem(&thread->md); michael@0: thread->md.cvar_pollsem = NULL; michael@0: return PR_FAILURE; michael@0: } michael@0: } michael@0: return rv; michael@0: } michael@0: michael@0: PRStatus michael@0: _MD_InitThread(PRThread *thread, PRBool wakeup_parent) michael@0: { michael@0: struct sigaction sigact; michael@0: PRStatus rv = PR_SUCCESS; michael@0: michael@0: if (thread->flags & _PR_GLOBAL_SCOPE) { michael@0: thread->md.id = getpid(); michael@0: setblockproccnt(thread->md.id, 0); michael@0: _MD_SET_SPROC_PID(getpid()); michael@0: #ifdef _PR_HAVE_SGI_PRDA_PROCMASK michael@0: /* michael@0: * enable user-level processing of sigprocmask(); this is an michael@0: * undocumented feature available in Irix 6.2, 6.3, 6.4 and 6.5 michael@0: */ michael@0: __sgi_prda_procmask(USER_LEVEL); michael@0: #endif michael@0: /* michael@0: * set up SIGUSR1 handler; this is used to save state michael@0: */ michael@0: sigact.sa_handler = save_context_and_block; michael@0: sigact.sa_flags = SA_RESTART; michael@0: /* michael@0: * Must mask clock interrupts michael@0: */ michael@0: sigact.sa_mask = timer_set; michael@0: sigaction(SIGUSR1, &sigact, 0); michael@0: michael@0: michael@0: /* michael@0: * PR_SETABORTSIG is a new command implemented in a patch to michael@0: * Irix 6.2, 6.3 and 6.4. This causes a signal to be sent to all michael@0: * sprocs in the process when one of them terminates abnormally michael@0: * michael@0: */ michael@0: if (prctl(PR_SETABORTSIG, SIGKILL) < 0) { michael@0: /* michael@0: * if (errno == EINVAL) michael@0: * michael@0: * PR_SETABORTSIG not supported under this OS. michael@0: * You may want to get a recent kernel rollup patch that michael@0: * supports this feature. michael@0: */ michael@0: } michael@0: /* michael@0: * SIGCLD handler for detecting abormally-terminating michael@0: * sprocs and for reaping sprocs michael@0: */ michael@0: sigact.sa_handler = sigchld_handler; michael@0: sigact.sa_flags = SA_RESTART; michael@0: sigact.sa_mask = ints_off; michael@0: sigaction(SIGCLD, &sigact, NULL); michael@0: } michael@0: return rv; michael@0: } michael@0: michael@0: /* michael@0: * PR_Cleanup should be executed on the primordial sproc; migrate the thread michael@0: * to the primordial cpu michael@0: */ michael@0: michael@0: void _PR_MD_PRE_CLEANUP(PRThread *me) michael@0: { michael@0: PRIntn is; michael@0: _PRCPU *cpu = _pr_primordialCPU; michael@0: michael@0: PR_ASSERT(cpu); michael@0: michael@0: me->flags |= _PR_BOUND_THREAD; michael@0: michael@0: if (me->cpu->id != 0) { michael@0: _PR_INTSOFF(is); michael@0: _PR_RUNQ_LOCK(cpu); michael@0: me->cpu = cpu; michael@0: me->state = _PR_RUNNABLE; michael@0: _PR_ADD_RUNQ(me, cpu, me->priority); michael@0: _PR_RUNQ_UNLOCK(cpu); michael@0: _MD_Wakeup_CPUs(); michael@0: michael@0: _PR_MD_SWITCH_CONTEXT(me); michael@0: michael@0: _PR_FAST_INTSON(is); michael@0: PR_ASSERT(me->cpu->id == 0); michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * process exiting michael@0: */ michael@0: PR_EXTERN(void ) _MD_exit(PRIntn status) michael@0: { michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: michael@0: /* michael@0: * the exit code of the process is the exit code of the primordial michael@0: * sproc michael@0: */ michael@0: if (!_PR_IS_NATIVE_THREAD(me) && (_PR_MD_CURRENT_CPU()->id == 0)) { michael@0: /* michael@0: * primordial sproc case: call _exit directly michael@0: * Cause SIGKILL to be sent to other sprocs michael@0: */ michael@0: prctl(PR_SETEXITSIG, SIGKILL); michael@0: _exit(status); michael@0: } else { michael@0: int rv; michael@0: char data; michael@0: sigset_t set; michael@0: michael@0: /* michael@0: * non-primordial sproc case: cause the primordial sproc, cpu 0, michael@0: * to wakeup and call _exit michael@0: */ michael@0: _pr_irix_process_exit = 1; michael@0: _pr_irix_process_exit_code = status; michael@0: rv = write(_pr_irix_primoridal_cpu_fd[1], &data, 1); michael@0: PR_ASSERT(rv == 1); michael@0: /* michael@0: * block all signals and wait for SIGKILL to terminate this sproc michael@0: */ michael@0: sigfillset(&set); michael@0: sigsuspend(&set); michael@0: /* michael@0: * this code doesn't (shouldn't) execute michael@0: */ michael@0: prctl(PR_SETEXITSIG, SIGKILL); michael@0: _exit(status); michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * Override the exit() function in libc to cause the process to exit michael@0: * when the primodial/main nspr thread calls exit. Calls to exit by any michael@0: * other thread simply result in a call to the exit function in libc. michael@0: * The exit code of the process is the exit code of the primordial michael@0: * sproc. michael@0: */ michael@0: michael@0: void exit(int status) michael@0: { michael@0: PRThread *me, *thr; michael@0: PRCList *qp; michael@0: michael@0: if (!_pr_initialized) { michael@0: if (!libc_exit) { michael@0: michael@0: if (!libc_handle) michael@0: libc_handle = dlopen("libc.so",RTLD_NOW); michael@0: if (libc_handle) michael@0: libc_exit = (void (*)(int)) dlsym(libc_handle, "exit"); michael@0: } michael@0: if (libc_exit) michael@0: (*libc_exit)(status); michael@0: else michael@0: _exit(status); michael@0: } michael@0: michael@0: me = _PR_MD_CURRENT_THREAD(); michael@0: michael@0: if (me == NULL) /* detached thread */ michael@0: (*libc_exit)(status); michael@0: michael@0: PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || michael@0: (_PR_MD_CURRENT_CPU())->id == me->cpu->id); michael@0: michael@0: if (me->flags & _PR_PRIMORDIAL) { michael@0: michael@0: me->flags |= _PR_BOUND_THREAD; michael@0: michael@0: PR_ASSERT((_PR_MD_CURRENT_CPU())->id == me->cpu->id); michael@0: if (me->cpu->id != 0) { michael@0: _PRCPU *cpu = _pr_primordialCPU; michael@0: PRIntn is; michael@0: michael@0: _PR_INTSOFF(is); michael@0: _PR_RUNQ_LOCK(cpu); michael@0: me->cpu = cpu; michael@0: me->state = _PR_RUNNABLE; michael@0: _PR_ADD_RUNQ(me, cpu, me->priority); michael@0: _PR_RUNQ_UNLOCK(cpu); michael@0: _MD_Wakeup_CPUs(); michael@0: michael@0: _PR_MD_SWITCH_CONTEXT(me); michael@0: michael@0: _PR_FAST_INTSON(is); michael@0: } michael@0: michael@0: PR_ASSERT((_PR_MD_CURRENT_CPU())->id == 0); michael@0: michael@0: if (prctl(PR_GETNSHARE) > 1) { michael@0: #define SPROC_EXIT_WAIT_TIME 5 michael@0: int sleep_cnt = SPROC_EXIT_WAIT_TIME; michael@0: michael@0: /* michael@0: * sprocs still running; caue cpus and recycled global threads michael@0: * to exit michael@0: */ michael@0: _pr_irix_exit_now = 1; michael@0: if (_pr_numCPU > 1) { michael@0: _MD_Wakeup_CPUs(); michael@0: } michael@0: _PR_DEADQ_LOCK; michael@0: if (_PR_NUM_DEADNATIVE != 0) { michael@0: PRThread *thread; michael@0: PRCList *ptr; michael@0: michael@0: ptr = _PR_DEADNATIVEQ.next; michael@0: while( ptr != &_PR_DEADNATIVEQ ) { michael@0: thread = _PR_THREAD_PTR(ptr); michael@0: _MD_CVAR_POST_SEM(thread); michael@0: ptr = ptr->next; michael@0: } michael@0: } michael@0: michael@0: while (sleep_cnt-- > 0) { michael@0: if (waitpid(0, NULL, WNOHANG) >= 0) michael@0: sleep(1); michael@0: else michael@0: break; michael@0: } michael@0: prctl(PR_SETEXITSIG, SIGKILL); michael@0: } michael@0: (*libc_exit)(status); michael@0: } else { michael@0: /* michael@0: * non-primordial thread; simply call exit in libc. michael@0: */ michael@0: (*libc_exit)(status); michael@0: } michael@0: } michael@0: michael@0: michael@0: void michael@0: _MD_InitRunningCPU(_PRCPU *cpu) michael@0: { michael@0: extern int _pr_md_pipefd[2]; michael@0: michael@0: _MD_unix_init_running_cpu(cpu); michael@0: cpu->md.id = getpid(); michael@0: _MD_SET_SPROC_PID(getpid()); michael@0: if (_pr_md_pipefd[0] >= 0) { michael@0: _PR_IOQ_MAX_OSFD(cpu) = _pr_md_pipefd[0]; michael@0: #ifndef _PR_USE_POLL michael@0: FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(cpu)); michael@0: #endif michael@0: } michael@0: } michael@0: michael@0: void michael@0: _MD_ExitThread(PRThread *thread) michael@0: { michael@0: if (thread->flags & _PR_GLOBAL_SCOPE) { michael@0: _MD_ATOMIC_DECREMENT(&_pr_md_irix_sprocs); michael@0: _MD_CLEAN_THREAD(thread); michael@0: _MD_SET_CURRENT_THREAD(NULL); michael@0: } michael@0: } michael@0: michael@0: void michael@0: _MD_SuspendCPU(_PRCPU *cpu) michael@0: { michael@0: PRInt32 rv; michael@0: michael@0: cpu->md.suspending_id = getpid(); michael@0: rv = kill(cpu->md.id, SIGUSR1); michael@0: PR_ASSERT(rv == 0); michael@0: /* michael@0: * now, block the current thread/cpu until woken up by the suspended michael@0: * thread from it's SIGUSR1 signal handler michael@0: */ michael@0: blockproc(getpid()); michael@0: michael@0: } michael@0: michael@0: void michael@0: _MD_ResumeCPU(_PRCPU *cpu) michael@0: { michael@0: unblockproc(cpu->md.id); michael@0: } michael@0: michael@0: #if 0 michael@0: /* michael@0: * save the register context of a suspended sproc michael@0: */ michael@0: void get_context(PRThread *thr) michael@0: { michael@0: int len, fd; michael@0: char pidstr[24]; michael@0: char path[24]; michael@0: michael@0: /* michael@0: * open the file corresponding to this process in procfs michael@0: */ michael@0: sprintf(path,"/proc/%s","00000"); michael@0: len = strlen(path); michael@0: sprintf(pidstr,"%d",thr->md.id); michael@0: len -= strlen(pidstr); michael@0: sprintf(path + len,"%s",pidstr); michael@0: fd = open(path,O_RDONLY); michael@0: if (fd >= 0) { michael@0: (void) ioctl(fd, PIOCGREG, thr->md.gregs); michael@0: close(fd); michael@0: } michael@0: return; michael@0: } michael@0: #endif /* 0 */ michael@0: michael@0: void michael@0: _MD_SuspendThread(PRThread *thread) michael@0: { michael@0: PRInt32 rv; michael@0: michael@0: PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) && michael@0: _PR_IS_GCABLE_THREAD(thread)); michael@0: michael@0: thread->md.suspending_id = getpid(); michael@0: rv = kill(thread->md.id, SIGUSR1); michael@0: PR_ASSERT(rv == 0); michael@0: /* michael@0: * now, block the current thread/cpu until woken up by the suspended michael@0: * thread from it's SIGUSR1 signal handler michael@0: */ michael@0: blockproc(getpid()); michael@0: } michael@0: michael@0: void michael@0: _MD_ResumeThread(PRThread *thread) michael@0: { michael@0: PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) && michael@0: _PR_IS_GCABLE_THREAD(thread)); michael@0: (void)unblockproc(thread->md.id); michael@0: } michael@0: michael@0: /* michael@0: * return the set of processors available for scheduling procs in the michael@0: * "mask" argument michael@0: */ michael@0: PRInt32 _MD_GetThreadAffinityMask(PRThread *unused, PRUint32 *mask) michael@0: { michael@0: PRInt32 nprocs, rv; michael@0: struct pda_stat *pstat; michael@0: #define MAX_PROCESSORS 32 michael@0: michael@0: nprocs = sysmp(MP_NPROCS); michael@0: if (nprocs < 0) michael@0: return(-1); michael@0: pstat = (struct pda_stat*)PR_MALLOC(sizeof(struct pda_stat) * nprocs); michael@0: if (pstat == NULL) michael@0: return(-1); michael@0: rv = sysmp(MP_STAT, pstat); michael@0: if (rv < 0) { michael@0: PR_DELETE(pstat); michael@0: return(-1); michael@0: } michael@0: /* michael@0: * look at the first 32 cpus michael@0: */ michael@0: nprocs = (nprocs > MAX_PROCESSORS) ? MAX_PROCESSORS : nprocs; michael@0: *mask = 0; michael@0: while (nprocs) { michael@0: if ((pstat->p_flags & PDAF_ENABLED) && michael@0: !(pstat->p_flags & PDAF_ISOLATED)) { michael@0: *mask |= (1 << pstat->p_cpuid); michael@0: } michael@0: nprocs--; michael@0: pstat++; michael@0: } michael@0: return 0; michael@0: } michael@0: michael@0: static char *_thr_state[] = { michael@0: "UNBORN", michael@0: "RUNNABLE", michael@0: "RUNNING", michael@0: "LOCK_WAIT", michael@0: "COND_WAIT", michael@0: "JOIN_WAIT", michael@0: "IO_WAIT", michael@0: "SUSPENDED", michael@0: "DEAD" michael@0: }; michael@0: michael@0: void _PR_List_Threads() michael@0: { michael@0: PRThread *thr; michael@0: void *handle; michael@0: struct _PRCPU *cpu; michael@0: PRCList *qp; michael@0: int len, fd; michael@0: char pidstr[24]; michael@0: char path[24]; michael@0: prpsinfo_t pinfo; michael@0: michael@0: michael@0: printf("\n%s %-s\n"," ","LOCAL Threads"); michael@0: printf("%s %-s\n"," ","----- -------"); michael@0: printf("%s %-14s %-10s %-12s %-3s %-10s %-10s %-12s\n\n"," ", michael@0: "Thread", "State", "Wait-Handle", michael@0: "Cpu","Stk-Base","Stk-Sz","SP"); michael@0: for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; michael@0: qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) { michael@0: thr = _PR_ACTIVE_THREAD_PTR(qp); michael@0: printf("%s 0x%-12x %-10s "," ",thr,_thr_state[thr->state]); michael@0: if (thr->state == _PR_LOCK_WAIT) michael@0: handle = thr->wait.lock; michael@0: else if (thr->state == _PR_COND_WAIT) michael@0: handle = thr->wait.cvar; michael@0: else michael@0: handle = NULL; michael@0: if (handle) michael@0: printf("0x%-10x ",handle); michael@0: else michael@0: printf("%-12s "," "); michael@0: printf("%-3d ",thr->cpu->id); michael@0: printf("0x%-8x ",thr->stack->stackBottom); michael@0: printf("0x%-8x ",thr->stack->stackSize); michael@0: printf("0x%-10x\n",thr->md.jb[JB_SP]); michael@0: } michael@0: michael@0: printf("\n%s %-s\n"," ","GLOBAL Threads"); michael@0: printf("%s %-s\n"," ","------ -------"); michael@0: printf("%s %-14s %-6s %-12s %-12s %-12s %-12s\n\n"," ","Thread", michael@0: "Pid","State","Wait-Handle", michael@0: "Stk-Base","Stk-Sz"); michael@0: michael@0: for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; michael@0: qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) { michael@0: thr = _PR_ACTIVE_THREAD_PTR(qp); michael@0: if (thr->cpu != NULL) michael@0: continue; /* it is a cpu thread */ michael@0: printf("%s 0x%-12x %-6d "," ",thr,thr->md.id); michael@0: /* michael@0: * check if the sproc is still running michael@0: * first call prctl(PR_GETSHMASK,pid) to check if michael@0: * the process is part of the share group (the pid michael@0: * could have been recycled by the OS) michael@0: */ michael@0: if (prctl(PR_GETSHMASK,thr->md.id) < 0) { michael@0: printf("%-12s\n","TERMINATED"); michael@0: continue; michael@0: } michael@0: /* michael@0: * Now, check if the sproc terminated and is in zombie michael@0: * state michael@0: */ michael@0: sprintf(path,"/proc/pinfo/%s","00000"); michael@0: len = strlen(path); michael@0: sprintf(pidstr,"%d",thr->md.id); michael@0: len -= strlen(pidstr); michael@0: sprintf(path + len,"%s",pidstr); michael@0: fd = open(path,O_RDONLY); michael@0: if (fd >= 0) { michael@0: if (ioctl(fd, PIOCPSINFO, &pinfo) < 0) michael@0: printf("%-12s ","TERMINATED"); michael@0: else if (pinfo.pr_zomb) michael@0: printf("%-12s ","TERMINATED"); michael@0: else michael@0: printf("%-12s ",_thr_state[thr->state]); michael@0: close(fd); michael@0: } else { michael@0: printf("%-12s ","TERMINATED"); michael@0: } michael@0: michael@0: if (thr->state == _PR_LOCK_WAIT) michael@0: handle = thr->wait.lock; michael@0: else if (thr->state == _PR_COND_WAIT) michael@0: handle = thr->wait.cvar; michael@0: else michael@0: handle = NULL; michael@0: if (handle) michael@0: printf("%-12x ",handle); michael@0: else michael@0: printf("%-12s "," "); michael@0: printf("0x%-10x ",thr->stack->stackBottom); michael@0: printf("0x%-10x\n",thr->stack->stackSize); michael@0: } michael@0: michael@0: printf("\n%s %-s\n"," ","CPUs"); michael@0: printf("%s %-s\n"," ","----"); michael@0: printf("%s %-14s %-6s %-12s \n\n"," ","Id","Pid","State"); michael@0: michael@0: michael@0: for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) { michael@0: cpu = _PR_CPU_PTR(qp); michael@0: printf("%s %-14d %-6d "," ",cpu->id,cpu->md.id); michael@0: /* michael@0: * check if the sproc is still running michael@0: * first call prctl(PR_GETSHMASK,pid) to check if michael@0: * the process is part of the share group (the pid michael@0: * could have been recycled by the OS) michael@0: */ michael@0: if (prctl(PR_GETSHMASK,cpu->md.id) < 0) { michael@0: printf("%-12s\n","TERMINATED"); michael@0: continue; michael@0: } michael@0: /* michael@0: * Now, check if the sproc terminated and is in zombie michael@0: * state michael@0: */ michael@0: sprintf(path,"/proc/pinfo/%s","00000"); michael@0: len = strlen(path); michael@0: sprintf(pidstr,"%d",cpu->md.id); michael@0: len -= strlen(pidstr); michael@0: sprintf(path + len,"%s",pidstr); michael@0: fd = open(path,O_RDONLY); michael@0: if (fd >= 0) { michael@0: if (ioctl(fd, PIOCPSINFO, &pinfo) < 0) michael@0: printf("%-12s\n","TERMINATED"); michael@0: else if (pinfo.pr_zomb) michael@0: printf("%-12s\n","TERMINATED"); michael@0: else michael@0: printf("%-12s\n","RUNNING"); michael@0: close(fd); michael@0: } else { michael@0: printf("%-12s\n","TERMINATED"); michael@0: } michael@0: michael@0: } michael@0: fflush(stdout); michael@0: } michael@0: #endif /* defined(_PR_PTHREADS) */ michael@0: michael@0: PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np) michael@0: { michael@0: #if !defined(_PR_PTHREADS) michael@0: if (isCurrent) { michael@0: (void) setjmp(t->md.jb); michael@0: } michael@0: *np = sizeof(t->md.jb) / sizeof(PRWord); michael@0: return (PRWord *) (t->md.jb); michael@0: #else michael@0: *np = 0; michael@0: return NULL; michael@0: #endif michael@0: } michael@0: michael@0: void _MD_EarlyInit(void) michael@0: { michael@0: #if !defined(_PR_PTHREADS) michael@0: char *eval; michael@0: int fd; michael@0: extern int __ateachexit(void (*func)(void)); michael@0: michael@0: sigemptyset(&ints_off); michael@0: sigaddset(&ints_off, SIGALRM); michael@0: sigaddset(&ints_off, SIGIO); michael@0: sigaddset(&ints_off, SIGCLD); michael@0: michael@0: if (eval = getenv("_NSPR_TERMINATE_ON_ERROR")) michael@0: _nspr_terminate_on_error = (0 == atoi(eval) == 0) ? PR_FALSE : PR_TRUE; michael@0: michael@0: fd = open("/dev/zero",O_RDWR , 0); michael@0: if (fd < 0) { michael@0: perror("open /dev/zero failed"); michael@0: exit(1); michael@0: } michael@0: /* michael@0: * Set up the sproc private data area. michael@0: * This region exists at the same address, _nspr_sproc_private, for michael@0: * every sproc, but each sproc gets a private copy of the region. michael@0: */ michael@0: _nspr_sproc_private = (char*)mmap(0, _pr_pageSize, PROT_READ | PROT_WRITE, michael@0: MAP_PRIVATE| MAP_LOCAL, fd, 0); michael@0: if (_nspr_sproc_private == (void*)-1) { michael@0: perror("mmap /dev/zero failed"); michael@0: exit(1); michael@0: } michael@0: _MD_SET_SPROC_PID(getpid()); michael@0: close(fd); michael@0: __ateachexit(irix_detach_sproc); michael@0: #endif michael@0: _MD_IrixIntervalInit(); michael@0: } /* _MD_EarlyInit */ michael@0: michael@0: void _MD_IrixInit(void) michael@0: { michael@0: #if !defined(_PR_PTHREADS) michael@0: struct sigaction sigact; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: int rv; michael@0: michael@0: #ifdef _PR_HAVE_SGI_PRDA_PROCMASK michael@0: /* michael@0: * enable user-level processing of sigprocmask(); this is an undocumented michael@0: * feature available in Irix 6.2, 6.3, 6.4 and 6.5 michael@0: */ michael@0: __sgi_prda_procmask(USER_LEVEL); michael@0: #endif michael@0: michael@0: /* michael@0: * set up SIGUSR1 handler; this is used to save state michael@0: * during PR_SuspendAll michael@0: */ michael@0: sigact.sa_handler = save_context_and_block; michael@0: sigact.sa_flags = SA_RESTART; michael@0: sigact.sa_mask = ints_off; michael@0: sigaction(SIGUSR1, &sigact, 0); michael@0: michael@0: /* michael@0: * Change the name of the core file from core to core.pid, michael@0: * This is inherited by the sprocs created by this process michael@0: */ michael@0: #ifdef PR_COREPID michael@0: prctl(PR_COREPID, 0, 1); michael@0: #endif michael@0: /* michael@0: * Irix-specific terminate on error processing michael@0: */ michael@0: /* michael@0: * PR_SETABORTSIG is a new command implemented in a patch to michael@0: * Irix 6.2, 6.3 and 6.4. This causes a signal to be sent to all michael@0: * sprocs in the process when one of them terminates abnormally michael@0: * michael@0: */ michael@0: if (prctl(PR_SETABORTSIG, SIGKILL) < 0) { michael@0: /* michael@0: * if (errno == EINVAL) michael@0: * michael@0: * PR_SETABORTSIG not supported under this OS. michael@0: * You may want to get a recent kernel rollup patch that michael@0: * supports this feature. michael@0: * michael@0: */ michael@0: } michael@0: /* michael@0: * PR_SETEXITSIG - send the SIGCLD signal to the parent michael@0: * sproc when any sproc terminates michael@0: * michael@0: * This is used to cause the entire application to michael@0: * terminate when any sproc terminates abnormally by michael@0: * receipt of a SIGSEGV, SIGBUS or SIGABRT signal. michael@0: * If this is not done, the application may seem michael@0: * "hung" to the user because the other sprocs may be michael@0: * waiting for resources held by the michael@0: * abnormally-terminating sproc. michael@0: */ michael@0: prctl(PR_SETEXITSIG, 0); michael@0: michael@0: sigact.sa_handler = sigchld_handler; michael@0: sigact.sa_flags = SA_RESTART; michael@0: sigact.sa_mask = ints_off; michael@0: sigaction(SIGCLD, &sigact, NULL); michael@0: michael@0: /* michael@0: * setup stack fields for the primordial thread michael@0: */ michael@0: me->stack->stackSize = prctl(PR_GETSTACKSIZE); michael@0: me->stack->stackBottom = me->stack->stackTop - me->stack->stackSize; michael@0: michael@0: rv = pipe(_pr_irix_primoridal_cpu_fd); michael@0: PR_ASSERT(rv == 0); michael@0: #ifndef _PR_USE_POLL michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = _pr_irix_primoridal_cpu_fd[0]; michael@0: FD_SET(_pr_irix_primoridal_cpu_fd[0], &_PR_FD_READ_SET(me->cpu)); michael@0: #endif michael@0: michael@0: libc_handle = dlopen("libc.so",RTLD_NOW); michael@0: PR_ASSERT(libc_handle != NULL); michael@0: libc_exit = (void (*)(int)) dlsym(libc_handle, "exit"); michael@0: PR_ASSERT(libc_exit != NULL); michael@0: /* dlclose(libc_handle); */ michael@0: michael@0: #endif /* _PR_PTHREADS */ michael@0: michael@0: _PR_UnixInit(); michael@0: } michael@0: michael@0: /**************************************************************************/ michael@0: /************** code and such for NSPR 2.0's interval times ***************/ michael@0: /**************************************************************************/ michael@0: michael@0: #define PR_PSEC_PER_SEC 1000000000000ULL /* 10^12 */ michael@0: michael@0: #ifndef SGI_CYCLECNTR_SIZE michael@0: #define SGI_CYCLECNTR_SIZE 165 /* Size user needs to use to read CC */ michael@0: #endif michael@0: michael@0: static PRIntn mmem_fd = -1; michael@0: static PRIntn clock_width = 0; michael@0: static void *iotimer_addr = NULL; michael@0: static PRUint32 pr_clock_mask = 0; michael@0: static PRUint32 pr_clock_shift = 0; michael@0: static PRIntervalTime pr_ticks = 0; michael@0: static PRUint32 pr_clock_granularity = 1; michael@0: static PRUint32 pr_previous = 0, pr_residual = 0; michael@0: static PRUint32 pr_ticks_per_second = 0; michael@0: michael@0: extern PRIntervalTime _PR_UNIX_GetInterval(void); michael@0: extern PRIntervalTime _PR_UNIX_TicksPerSecond(void); michael@0: michael@0: static void _MD_IrixIntervalInit(void) michael@0: { michael@0: /* michael@0: * As much as I would like, the service available through this michael@0: * interface on R3000's (aka, IP12) just isn't going to make it. michael@0: * The register is only 24 bits wide, and rolls over at a verocious michael@0: * rate. michael@0: */ michael@0: PRUint32 one_tick = 0; michael@0: struct utsname utsinfo; michael@0: uname(&utsinfo); michael@0: if ((strncmp("IP12", utsinfo.machine, 4) != 0) michael@0: && ((mmem_fd = open("/dev/mmem", O_RDONLY)) != -1)) michael@0: { michael@0: int poffmask = getpagesize() - 1; michael@0: __psunsigned_t phys_addr, raddr, cycleval; michael@0: michael@0: phys_addr = syssgi(SGI_QUERY_CYCLECNTR, &cycleval); michael@0: raddr = phys_addr & ~poffmask; michael@0: iotimer_addr = mmap( michael@0: 0, poffmask, PROT_READ, MAP_PRIVATE, mmem_fd, (__psint_t)raddr); michael@0: michael@0: clock_width = syssgi(SGI_CYCLECNTR_SIZE); michael@0: if (clock_width < 0) michael@0: { michael@0: /* michael@0: * We must be executing on a 6.0 or earlier system, since the michael@0: * SGI_CYCLECNTR_SIZE call is not supported. michael@0: * michael@0: * The only pre-6.1 platforms with 64-bit counters are michael@0: * IP19 and IP21 (Challenge, PowerChallenge, Onyx). michael@0: */ michael@0: if (!strncmp(utsinfo.machine, "IP19", 4) || michael@0: !strncmp(utsinfo.machine, "IP21", 4)) michael@0: clock_width = 64; michael@0: else michael@0: clock_width = 32; michael@0: } michael@0: michael@0: /* michael@0: * 'cycleval' is picoseconds / increment of the counter. michael@0: * I'm pushing for a tick to be 100 microseconds, 10^(-4). michael@0: * That leaves 10^(-8) left over, or 10^8 / cycleval. michael@0: * Did I do that right? michael@0: */ michael@0: michael@0: one_tick = 100000000UL / cycleval ; /* 100 microseconds */ michael@0: michael@0: while (0 != one_tick) michael@0: { michael@0: pr_clock_shift += 1; michael@0: one_tick = one_tick >> 1; michael@0: pr_clock_granularity = pr_clock_granularity << 1; michael@0: } michael@0: pr_clock_mask = pr_clock_granularity - 1; /* to make a mask out of it */ michael@0: pr_ticks_per_second = PR_PSEC_PER_SEC michael@0: / ((PRUint64)pr_clock_granularity * (PRUint64)cycleval); michael@0: michael@0: iotimer_addr = (void*) michael@0: ((__psunsigned_t)iotimer_addr + (phys_addr & poffmask)); michael@0: } michael@0: else michael@0: { michael@0: pr_ticks_per_second = _PR_UNIX_TicksPerSecond(); michael@0: } michael@0: } /* _MD_IrixIntervalInit */ michael@0: michael@0: PRIntervalTime _MD_IrixIntervalPerSec(void) michael@0: { michael@0: return pr_ticks_per_second; michael@0: } michael@0: michael@0: PRIntervalTime _MD_IrixGetInterval(void) michael@0: { michael@0: if (mmem_fd != -1) michael@0: { michael@0: if (64 == clock_width) michael@0: { michael@0: PRUint64 temp = *(PRUint64*)iotimer_addr; michael@0: pr_ticks = (PRIntervalTime)(temp >> pr_clock_shift); michael@0: } michael@0: else michael@0: { michael@0: PRIntervalTime ticks = pr_ticks; michael@0: PRUint32 now = *(PRUint32*)iotimer_addr, temp; michael@0: PRUint32 residual = pr_residual, previous = pr_previous; michael@0: michael@0: temp = now - previous + residual; michael@0: residual = temp & pr_clock_mask; michael@0: ticks += temp >> pr_clock_shift; michael@0: michael@0: pr_previous = now; michael@0: pr_residual = residual; michael@0: pr_ticks = ticks; michael@0: } michael@0: } michael@0: else michael@0: { michael@0: /* michael@0: * No fast access. Use the time of day clock. This isn't the michael@0: * right answer since this clock can get set back, tick at odd michael@0: * rates, and it's expensive to acqurie. michael@0: */ michael@0: pr_ticks = _PR_UNIX_GetInterval(); michael@0: } michael@0: return pr_ticks; michael@0: } /* _MD_IrixGetInterval */ michael@0: