nsprpub/pr/src/md/unix/irix.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

     1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
     2 /* This Source Code Form is subject to the terms of the Mozilla Public
     3  * License, v. 2.0. If a copy of the MPL was not distributed with this
     4  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     6 #include "primpl.h"
     8 #include <signal.h>
    10 #include <sys/types.h>
    11 #include <fcntl.h>
    12 #include <unistd.h>
    13 #include <string.h>
    14 #include <sys/mman.h>
    15 #include <sys/syssgi.h>
    16 #include <sys/time.h>
    17 #include <sys/immu.h>
    18 #include <sys/utsname.h>
    19 #include <sys/sysmp.h>
    20 #include <sys/pda.h>
    21 #include <sys/prctl.h>
    22 #include <sys/wait.h>
    23 #include <sys/resource.h>
    24 #include <sys/procfs.h>
    25 #include <task.h>
    26 #include <dlfcn.h>
    28 static void _MD_IrixIntervalInit(void);
    30 #if defined(_PR_PTHREADS)
    31 /*
    32  * for compatibility with classic nspr
    33  */
    34 void _PR_IRIX_CHILD_PROCESS()
    35 {
    36 }
    37 #else  /* defined(_PR_PTHREADS) */
    39 static void irix_detach_sproc(void);
    40 char *_nspr_sproc_private;    /* ptr. to private region in every sproc */
    42 extern PRUintn    _pr_numCPU;
    44 typedef struct nspr_arena {
    45 	PRCList links;
    46 	usptr_t *usarena;
    47 } nspr_arena;
    49 #define ARENA_PTR(qp) \
    50 	((nspr_arena *) ((char*) (qp) - offsetof(nspr_arena , links)))
    52 static usptr_t *alloc_new_arena(void);
    54 PRCList arena_list = PR_INIT_STATIC_CLIST(&arena_list);
    55 ulock_t arena_list_lock;
    56 nspr_arena first_arena;
    57 int	_nspr_irix_arena_cnt = 1;
    59 PRCList sproc_list = PR_INIT_STATIC_CLIST(&sproc_list);
    60 ulock_t sproc_list_lock;
    62 typedef struct sproc_data {
    63 	void (*entry) (void *, size_t);
    64 	unsigned inh;
    65 	void *arg;
    66 	caddr_t sp;
    67 	size_t len;
    68 	int *pid;
    69 	int creator_pid;
    70 } sproc_data;
    72 typedef struct sproc_params {
    73 	PRCList links;
    74 	sproc_data sd;
    75 } sproc_params;
    77 #define SPROC_PARAMS_PTR(qp) \
    78 	((sproc_params *) ((char*) (qp) - offsetof(sproc_params , links)))
    80 long	_nspr_irix_lock_cnt = 0;
    81 long	_nspr_irix_sem_cnt = 0;
    82 long	_nspr_irix_pollsem_cnt = 0;
    84 usptr_t *_pr_usArena;
    85 ulock_t _pr_heapLock;
    87 usema_t *_pr_irix_exit_sem;
    88 PRInt32 _pr_irix_exit_now = 0;
    89 PRInt32 _pr_irix_process_exit_code = 0;	/* exit code for PR_ProcessExit */
    90 PRInt32 _pr_irix_process_exit = 0; /* process exiting due to call to
    91 										   PR_ProcessExit */
    93 int _pr_irix_primoridal_cpu_fd[2] = { -1, -1 };
    94 static void (*libc_exit)(int) = NULL;
    95 static void *libc_handle = NULL;
    97 #define _NSPR_DEF_INITUSERS		100	/* default value of CONF_INITUSERS */
    98 #define _NSPR_DEF_INITSIZE		(4 * 1024 * 1024)	/* 4 MB */
   100 int _irix_initusers = _NSPR_DEF_INITUSERS;
   101 int _irix_initsize = _NSPR_DEF_INITSIZE;
   103 PRIntn _pr_io_in_progress, _pr_clock_in_progress;
   105 PRInt32 _pr_md_irix_sprocs_created, _pr_md_irix_sprocs_failed;
   106 PRInt32 _pr_md_irix_sprocs = 1;
   107 PRCList _pr_md_irix_sproc_list =
   108 PR_INIT_STATIC_CLIST(&_pr_md_irix_sproc_list);
   110 sigset_t ints_off;
   111 extern sigset_t timer_set;
   113 #if !defined(PR_SETABORTSIG)
   114 #define PR_SETABORTSIG 18
   115 #endif
   116 /*
   117  * terminate the entire application if any sproc exits abnormally
   118  */
   119 PRBool _nspr_terminate_on_error = PR_TRUE;
   121 /*
   122  * exported interface to set the shared arena parameters
   123  */
   124 void _PR_Irix_Set_Arena_Params(PRInt32 initusers, PRInt32 initsize)
   125 {
   126     _irix_initusers = initusers;
   127     _irix_initsize = initsize;
   128 }
   130 static usptr_t *alloc_new_arena()
   131 {
   132     return(usinit("/dev/zero"));
   133 }
   135 static PRStatus new_poll_sem(struct _MDThread *mdthr, int val)
   136 {
   137 PRIntn _is;
   138 PRStatus rv = PR_SUCCESS;
   139 usema_t *sem = NULL;
   140 PRCList *qp;
   141 nspr_arena *arena;
   142 usptr_t *irix_arena;
   143 PRThread *me = _MD_GET_ATTACHED_THREAD();	
   145 	if (me && !_PR_IS_NATIVE_THREAD(me))
   146 		_PR_INTSOFF(_is); 
   147 	_PR_LOCK(arena_list_lock);
   148 	for (qp = arena_list.next; qp != &arena_list; qp = qp->next) {
   149 		arena = ARENA_PTR(qp);
   150 		sem = usnewpollsema(arena->usarena, val);
   151 		if (sem != NULL) {
   152 			mdthr->cvar_pollsem = sem;
   153 			mdthr->pollsem_arena = arena->usarena;
   154 			break;
   155 		}
   156 	}
   157 	if (sem == NULL) {
   158 		/*
   159 		 * If no space left in the arena allocate a new one.
   160 		 */
   161 		if (errno == ENOMEM) {
   162 			arena = PR_NEWZAP(nspr_arena);
   163 			if (arena != NULL) {
   164 				irix_arena = alloc_new_arena();
   165 				if (irix_arena) {
   166 					PR_APPEND_LINK(&arena->links, &arena_list);
   167 					_nspr_irix_arena_cnt++;
   168 					arena->usarena = irix_arena;
   169 					sem = usnewpollsema(arena->usarena, val);
   170 					if (sem != NULL) {
   171 						mdthr->cvar_pollsem = sem;
   172 						mdthr->pollsem_arena = arena->usarena;
   173 					} else
   174 						rv = PR_FAILURE;
   175 				} else {
   176 					PR_DELETE(arena);
   177 					rv = PR_FAILURE;
   178 				}
   180 			} else
   181 				rv = PR_FAILURE;
   182 		} else
   183 			rv = PR_FAILURE;
   184 	}
   185 	_PR_UNLOCK(arena_list_lock);
   186 	if (me && !_PR_IS_NATIVE_THREAD(me))
   187 		_PR_FAST_INTSON(_is);
   188 	if (rv == PR_SUCCESS)
   189 		_MD_ATOMIC_INCREMENT(&_nspr_irix_pollsem_cnt);
   190 	return rv;
   191 }
   193 static void free_poll_sem(struct _MDThread *mdthr)
   194 {
   195 PRIntn _is;
   196 PRThread *me = _MD_GET_ATTACHED_THREAD();	
   198 	if (me && !_PR_IS_NATIVE_THREAD(me))
   199 		_PR_INTSOFF(_is); 
   200 	usfreepollsema(mdthr->cvar_pollsem, mdthr->pollsem_arena);
   201 	if (me && !_PR_IS_NATIVE_THREAD(me))
   202 		_PR_FAST_INTSON(_is);
   203 	_MD_ATOMIC_DECREMENT(&_nspr_irix_pollsem_cnt);
   204 }
   206 static PRStatus new_lock(struct _MDLock *lockp)
   207 {
   208 PRIntn _is;
   209 PRStatus rv = PR_SUCCESS;
   210 ulock_t lock = NULL;
   211 PRCList *qp;
   212 nspr_arena *arena;
   213 usptr_t *irix_arena;
   214 PRThread *me = _MD_GET_ATTACHED_THREAD();	
   216 	if (me && !_PR_IS_NATIVE_THREAD(me))
   217 		_PR_INTSOFF(_is); 
   218 	_PR_LOCK(arena_list_lock);
   219 	for (qp = arena_list.next; qp != &arena_list; qp = qp->next) {
   220 		arena = ARENA_PTR(qp);
   221 		lock = usnewlock(arena->usarena);
   222 		if (lock != NULL) {
   223 			lockp->lock = lock;
   224 			lockp->arena = arena->usarena;
   225 			break;
   226 		}
   227 	}
   228 	if (lock == NULL) {
   229 		/*
   230 		 * If no space left in the arena allocate a new one.
   231 		 */
   232 		if (errno == ENOMEM) {
   233 			arena = PR_NEWZAP(nspr_arena);
   234 			if (arena != NULL) {
   235 				irix_arena = alloc_new_arena();
   236 				if (irix_arena) {
   237 					PR_APPEND_LINK(&arena->links, &arena_list);
   238 					_nspr_irix_arena_cnt++;
   239 					arena->usarena = irix_arena;
   240 					lock = usnewlock(irix_arena);
   241 					if (lock != NULL) {
   242 						lockp->lock = lock;
   243 						lockp->arena = arena->usarena;
   244 					} else
   245 						rv = PR_FAILURE;
   246 				} else {
   247 					PR_DELETE(arena);
   248 					rv = PR_FAILURE;
   249 				}
   251 			} else
   252 				rv = PR_FAILURE;
   253 		} else
   254 			rv = PR_FAILURE;
   255 	}
   256 	_PR_UNLOCK(arena_list_lock);
   257 	if (me && !_PR_IS_NATIVE_THREAD(me))
   258 		_PR_FAST_INTSON(_is);
   259 	if (rv == PR_SUCCESS)
   260 		_MD_ATOMIC_INCREMENT(&_nspr_irix_lock_cnt);
   261 	return rv;
   262 }
   264 static void free_lock(struct _MDLock *lockp)
   265 {
   266 PRIntn _is;
   267 PRThread *me = _MD_GET_ATTACHED_THREAD();	
   269 	if (me && !_PR_IS_NATIVE_THREAD(me))
   270 		_PR_INTSOFF(_is); 
   271 	usfreelock(lockp->lock, lockp->arena);
   272 	if (me && !_PR_IS_NATIVE_THREAD(me))
   273 		_PR_FAST_INTSON(_is);
   274 	_MD_ATOMIC_DECREMENT(&_nspr_irix_lock_cnt);
   275 }
   277 void _MD_FREE_LOCK(struct _MDLock *lockp)
   278 {
   279 	PRIntn _is;
   280 	PRThread *me = _MD_GET_ATTACHED_THREAD();	
   282 	if (me && !_PR_IS_NATIVE_THREAD(me))
   283 		_PR_INTSOFF(_is); 
   284 	free_lock(lockp);
   285 	if (me && !_PR_IS_NATIVE_THREAD(me))
   286 		_PR_FAST_INTSON(_is);
   287 }
   289 /*
   290  * _MD_get_attached_thread
   291  *		Return the thread pointer of the current thread if it is attached.
   292  *
   293  *		This function is needed for Irix because the thread-local-storage is
   294  *		implemented by mmapin'g a page with the MAP_LOCAL flag. This causes the
   295  *		sproc-private page to inherit contents of the page of the caller of sproc().
   296  */
   297 PRThread *_MD_get_attached_thread(void)
   298 {
   300 	if (_MD_GET_SPROC_PID() == get_pid())
   301 		return _MD_THIS_THREAD();
   302 	else
   303 		return 0;
   304 }
   306 /*
   307  * _MD_get_current_thread
   308  *		Return the thread pointer of the current thread (attaching it if
   309  *		necessary)
   310  */
   311 PRThread *_MD_get_current_thread(void)
   312 {
   313 PRThread *me;
   315 	me = _MD_GET_ATTACHED_THREAD();
   316     if (NULL == me) {
   317         me = _PRI_AttachThread(
   318             PR_USER_THREAD, PR_PRIORITY_NORMAL, NULL, 0);
   319     }
   320     PR_ASSERT(me != NULL);
   321 	return(me);
   322 }
   324 /*
   325  * irix_detach_sproc
   326  *		auto-detach a sproc when it exits
   327  */
   328 void irix_detach_sproc(void)
   329 {
   330 PRThread *me;
   332 	me = _MD_GET_ATTACHED_THREAD();
   333 	if ((me != NULL) && (me->flags & _PR_ATTACHED)) {
   334 		_PRI_DetachThread();
   335 	}
   336 }
   339 PRStatus _MD_NEW_LOCK(struct _MDLock *lockp)
   340 {
   341     PRStatus rv;
   342     PRIntn is;
   343     PRThread *me = _MD_GET_ATTACHED_THREAD();	
   345 	if (me && !_PR_IS_NATIVE_THREAD(me))
   346 		_PR_INTSOFF(is);
   347 	rv = new_lock(lockp);
   348 	if (me && !_PR_IS_NATIVE_THREAD(me))
   349 		_PR_FAST_INTSON(is);
   350 	return rv;
   351 }
   353 static void
   354 sigchld_handler(int sig)
   355 {
   356     pid_t pid;
   357     int status;
   359     /*
   360      * If an sproc exited abnormally send a SIGKILL signal to all the
   361      * sprocs in the process to terminate the application
   362      */
   363     while ((pid = waitpid(0, &status, WNOHANG)) > 0) {
   364         if (WIFSIGNALED(status) && ((WTERMSIG(status) == SIGSEGV) ||
   365             (WTERMSIG(status) == SIGBUS) ||
   366             (WTERMSIG(status) == SIGABRT) ||
   367             (WTERMSIG(status) == SIGILL))) {
   369 				prctl(PR_SETEXITSIG, SIGKILL);
   370 				_exit(status);
   371 			}
   372     }
   373 }
   375 static void save_context_and_block(int sig)
   376 {
   377 PRThread *me = _PR_MD_CURRENT_THREAD();
   378 _PRCPU *cpu = _PR_MD_CURRENT_CPU();
   380 	/*
   381 	 * save context
   382 	 */
   383 	(void) setjmp(me->md.jb);
   384 	/*
   385 	 * unblock the suspending thread
   386 	 */
   387 	if (me->cpu) {
   388 		/*
   389 		 * I am a cpu thread, not a user-created GLOBAL thread
   390 		 */
   391 		unblockproc(cpu->md.suspending_id);	
   392 	} else {
   393 		unblockproc(me->md.suspending_id);	
   394 	}
   395 	/*
   396 	 * now, block current thread
   397 	 */
   398 	blockproc(getpid());
   399 }
   401 /*
   402 ** The irix kernel has a bug in it which causes async connect's which are
   403 ** interrupted by a signal to fail terribly (EADDRINUSE is returned). 
   404 ** We work around the bug by blocking signals during the async connect
   405 ** attempt.
   406 */
   407 PRInt32 _MD_irix_connect(
   408     PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, PRIntervalTime timeout)
   409 {
   410     PRInt32 rv;
   411     sigset_t oldset;
   413     sigprocmask(SIG_BLOCK, &ints_off, &oldset);
   414     rv = connect(osfd, addr, addrlen);
   415     sigprocmask(SIG_SETMASK, &oldset, 0);
   417     return(rv);
   418 }
   420 #include "prprf.h"
   422 /********************************************************************/
   423 /********************************************************************/
   424 /*************** Various thread like things for IRIX ****************/
   425 /********************************************************************/
   426 /********************************************************************/
   428 void *_MD_GetSP(PRThread *t)
   429 {
   430     PRThread *me = _PR_MD_CURRENT_THREAD();
   431     void *sp;
   433     if (me == t)
   434         (void) setjmp(t->md.jb);
   436     sp = (void *)(t->md.jb[JB_SP]);
   437     PR_ASSERT((sp >= (void *) t->stack->stackBottom) &&
   438         (sp <= (void *) (t->stack->stackBottom + t->stack->stackSize)));
   439     return(sp);
   440 }
   442 void _MD_InitLocks()
   443 {
   444     char buf[200];
   445     char *init_users, *init_size;
   447     PR_snprintf(buf, sizeof(buf), "/dev/zero");
   449     if (init_users = getenv("_NSPR_IRIX_INITUSERS"))
   450         _irix_initusers = atoi(init_users);
   452     if (init_size = getenv("_NSPR_IRIX_INITSIZE"))
   453         _irix_initsize = atoi(init_size);
   455     usconfig(CONF_INITUSERS, _irix_initusers);
   456     usconfig(CONF_INITSIZE, _irix_initsize);
   457     usconfig(CONF_AUTOGROW, 1);
   458     usconfig(CONF_AUTORESV, 1);
   459 	if (usconfig(CONF_ARENATYPE, US_SHAREDONLY) < 0) {
   460 		perror("PR_Init: unable to config mutex arena");
   461 		exit(-1);
   462 	}
   464     _pr_usArena = usinit(buf);
   465     if (!_pr_usArena) {
   466         fprintf(stderr,
   467             "PR_Init: Error - unable to create lock/monitor arena\n");
   468         exit(-1);
   469     }
   470     _pr_heapLock = usnewlock(_pr_usArena);
   471 	_nspr_irix_lock_cnt++;
   473     arena_list_lock = usnewlock(_pr_usArena);
   474 	_nspr_irix_lock_cnt++;
   476     sproc_list_lock = usnewlock(_pr_usArena);
   477 	_nspr_irix_lock_cnt++;
   479 	_pr_irix_exit_sem = usnewsema(_pr_usArena, 0);
   480 	_nspr_irix_sem_cnt = 1;
   482 	first_arena.usarena = _pr_usArena;
   483 	PR_INIT_CLIST(&first_arena.links);
   484 	PR_APPEND_LINK(&first_arena.links, &arena_list);
   485 }
   487 /* _PR_IRIX_CHILD_PROCESS is a private API for Server group */
   488 void _PR_IRIX_CHILD_PROCESS()
   489 {
   490 extern PRUint32 _pr_global_threads;
   492     PR_ASSERT(_PR_MD_CURRENT_CPU() == _pr_primordialCPU);
   493     PR_ASSERT(_pr_numCPU == 1);
   494     PR_ASSERT(_pr_global_threads == 0);
   495     /*
   496      * save the new pid
   497      */
   498     _pr_primordialCPU->md.id = getpid();
   499 	_MD_SET_SPROC_PID(getpid());	
   500 }
   502 static PRStatus pr_cvar_wait_sem(PRThread *thread, PRIntervalTime timeout)
   503 {
   504     int rv;
   506 #ifdef _PR_USE_POLL
   507 	struct pollfd pfd;
   508 	int msecs;
   510 	if (timeout == PR_INTERVAL_NO_TIMEOUT)
   511 		msecs = -1;
   512 	else
   513 		msecs  = PR_IntervalToMilliseconds(timeout);
   514 #else
   515     struct timeval tv, *tvp;
   516     fd_set rd;
   518 	if(timeout == PR_INTERVAL_NO_TIMEOUT)
   519 		tvp = NULL;
   520 	else {
   521 		tv.tv_sec = PR_IntervalToSeconds(timeout);
   522 		tv.tv_usec = PR_IntervalToMicroseconds(
   523 		timeout - PR_SecondsToInterval(tv.tv_sec));
   524 		tvp = &tv;
   525 	}
   526 	FD_ZERO(&rd);
   527 	FD_SET(thread->md.cvar_pollsemfd, &rd);
   528 #endif
   530     /*
   531      * call uspsema only if a previous select call on this semaphore
   532      * did not timeout
   533      */
   534     if (!thread->md.cvar_pollsem_select) {
   535         rv = _PR_WAIT_SEM(thread->md.cvar_pollsem);
   536 		PR_ASSERT(rv >= 0);
   537 	} else
   538         rv = 0;
   539 again:
   540     if(!rv) {
   541 #ifdef _PR_USE_POLL
   542 		pfd.events = POLLIN;
   543 		pfd.fd = thread->md.cvar_pollsemfd;
   544 		rv = _MD_POLL(&pfd, 1, msecs);
   545 #else
   546 		rv = _MD_SELECT(thread->md.cvar_pollsemfd + 1, &rd, NULL,NULL,tvp);
   547 #endif
   548         if ((rv == -1) && (errno == EINTR)) {
   549 			rv = 0;
   550 			goto again;
   551 		}
   552 		PR_ASSERT(rv >= 0);
   553 	}
   555     if (rv > 0) {
   556         /*
   557          * acquired the semaphore, call uspsema next time
   558          */
   559         thread->md.cvar_pollsem_select = 0;
   560         return PR_SUCCESS;
   561     } else {
   562         /*
   563          * select timed out; must call select, not uspsema, when trying
   564          * to acquire the semaphore the next time
   565          */
   566         thread->md.cvar_pollsem_select = 1;
   567         return PR_FAILURE;
   568     }
   569 }
   571 PRStatus _MD_wait(PRThread *thread, PRIntervalTime ticks)
   572 {
   573     if ( thread->flags & _PR_GLOBAL_SCOPE ) {
   574 	_MD_CHECK_FOR_EXIT();
   575         if (pr_cvar_wait_sem(thread, ticks) == PR_FAILURE) {
   576 	    _MD_CHECK_FOR_EXIT();
   577             /*
   578              * wait timed out
   579              */
   580             _PR_THREAD_LOCK(thread);
   581             if (thread->wait.cvar) {
   582                 /*
   583                  * The thread will remove itself from the waitQ
   584                  * of the cvar in _PR_WaitCondVar
   585                  */
   586                 thread->wait.cvar = NULL;
   587                 thread->state =  _PR_RUNNING;
   588                 _PR_THREAD_UNLOCK(thread);
   589             }  else {
   590                 _PR_THREAD_UNLOCK(thread);
   591                 /*
   592              * This thread was woken up by a notifying thread
   593              * at the same time as a timeout; so, consume the
   594              * extra post operation on the semaphore
   595              */
   596 	        _MD_CHECK_FOR_EXIT();
   597             pr_cvar_wait_sem(thread, PR_INTERVAL_NO_TIMEOUT);
   598             }
   599 	    _MD_CHECK_FOR_EXIT();
   600         }
   601     } else {
   602         _PR_MD_SWITCH_CONTEXT(thread);
   603     }
   604     return PR_SUCCESS;
   605 }
   607 PRStatus _MD_WakeupWaiter(PRThread *thread)
   608 {
   609     PRThread *me = _PR_MD_CURRENT_THREAD();
   610     PRIntn is;
   612 	PR_ASSERT(_pr_md_idle_cpus >= 0);
   613     if (thread == NULL) {
   614 		if (_pr_md_idle_cpus)
   615         	_MD_Wakeup_CPUs();
   616     } else if (!_PR_IS_NATIVE_THREAD(thread)) {
   617 		if (_pr_md_idle_cpus)
   618        		_MD_Wakeup_CPUs();
   619     } else {
   620 		PR_ASSERT(_PR_IS_NATIVE_THREAD(thread));
   621 		if (!_PR_IS_NATIVE_THREAD(me))
   622 			_PR_INTSOFF(is);
   623 		_MD_CVAR_POST_SEM(thread);
   624 		if (!_PR_IS_NATIVE_THREAD(me))
   625 			_PR_FAST_INTSON(is);
   626     } 
   627     return PR_SUCCESS;
   628 }
   630 void create_sproc (void (*entry) (void *, size_t), unsigned inh,
   631 					void *arg, caddr_t sp, size_t len, int *pid)
   632 {
   633 sproc_params sparams;
   634 char data;
   635 int rv;
   636 PRThread *me = _PR_MD_CURRENT_THREAD();
   638 	if (!_PR_IS_NATIVE_THREAD(me) && (_PR_MD_CURRENT_CPU()->id == 0)) {
   639 		*pid = sprocsp(entry,		/* startup func		*/
   640 						inh,        /* attribute flags	*/
   641 						arg,     	/* thread param		*/
   642 						sp,         /* stack address	*/
   643 						len);       /* stack size		*/
   644 	} else {
   645 		sparams.sd.entry = entry;
   646 		sparams.sd.inh = inh;
   647 		sparams.sd.arg = arg;
   648 		sparams.sd.sp = sp;
   649 		sparams.sd.len = len;
   650 		sparams.sd.pid = pid;
   651 		sparams.sd.creator_pid = getpid();
   652 		_PR_LOCK(sproc_list_lock);
   653 		PR_APPEND_LINK(&sparams.links, &sproc_list);
   654 		rv = write(_pr_irix_primoridal_cpu_fd[1], &data, 1);
   655 		PR_ASSERT(rv == 1);
   656 		_PR_UNLOCK(sproc_list_lock);
   657 		blockproc(getpid());
   658 	}
   659 }
   661 /*
   662  * _PR_MD_WAKEUP_PRIMORDIAL_CPU
   663  *
   664  *		wakeup cpu 0
   665  */
   667 void _PR_MD_WAKEUP_PRIMORDIAL_CPU()
   668 {
   669 char data = '0';
   670 int rv;
   672 	rv = write(_pr_irix_primoridal_cpu_fd[1], &data, 1);
   673 	PR_ASSERT(rv == 1);
   674 }
   676 /*
   677  * _PR_MD_primordial_cpu
   678  *
   679  *		process events that need to executed by the primordial cpu on each
   680  *		iteration through the idle loop
   681  */
   683 void _PR_MD_primordial_cpu()
   684 {
   685 PRCList *qp;
   686 sproc_params *sp;
   687 int pid;
   689 	_PR_LOCK(sproc_list_lock);
   690 	while ((qp = sproc_list.next) != &sproc_list) {
   691 		sp = SPROC_PARAMS_PTR(qp);
   692 		PR_REMOVE_LINK(&sp->links);
   693 		pid = sp->sd.creator_pid;
   694 		(*(sp->sd.pid)) = sprocsp(sp->sd.entry,		/* startup func    */
   695 							sp->sd.inh,            	/* attribute flags     */
   696 							sp->sd.arg,     		/* thread param     */
   697 							sp->sd.sp,             	/* stack address    */
   698 							sp->sd.len);         	/* stack size     */
   699 		unblockproc(pid);
   700 	}
   701 	_PR_UNLOCK(sproc_list_lock);
   702 }
   704 PRStatus _MD_CreateThread(PRThread *thread, 
   705 void (*start)(void *), 
   706 PRThreadPriority priority, 
   707 PRThreadScope scope, 
   708 PRThreadState state, 
   709 PRUint32 stackSize)
   710 {
   711     typedef void (*SprocEntry) (void *, size_t);
   712     SprocEntry spentry = (SprocEntry)start;
   713     PRIntn is;
   714 	PRThread *me = _PR_MD_CURRENT_THREAD();	
   715 	PRInt32 pid;
   716 	PRStatus rv;
   718 	if (!_PR_IS_NATIVE_THREAD(me))
   719 		_PR_INTSOFF(is);
   720     thread->md.cvar_pollsem_select = 0;
   721     thread->flags |= _PR_GLOBAL_SCOPE;
   723 	thread->md.cvar_pollsemfd = -1;
   724 	if (new_poll_sem(&thread->md,0) == PR_FAILURE) {
   725 		if (!_PR_IS_NATIVE_THREAD(me))
   726 			_PR_FAST_INTSON(is);
   727 		return PR_FAILURE;
   728 	}
   729 	thread->md.cvar_pollsemfd =
   730 		_PR_OPEN_POLL_SEM(thread->md.cvar_pollsem);
   731 	if ((thread->md.cvar_pollsemfd < 0)) {
   732 		free_poll_sem(&thread->md);
   733 		if (!_PR_IS_NATIVE_THREAD(me))
   734 			_PR_FAST_INTSON(is);
   735 		return PR_FAILURE;
   736 	}
   738     create_sproc(spentry,            /* startup func    */
   739     			PR_SALL,            /* attribute flags     */
   740     			(void *)thread,     /* thread param     */
   741     			NULL,               /* stack address    */
   742     			stackSize, &pid);         /* stack size     */
   743     if (pid > 0) {
   744         _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs_created);
   745         _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs);
   746 		rv = PR_SUCCESS;
   747 		if (!_PR_IS_NATIVE_THREAD(me))
   748 			_PR_FAST_INTSON(is);
   749         return rv;
   750     } else {
   751         close(thread->md.cvar_pollsemfd);
   752         thread->md.cvar_pollsemfd = -1;
   753 		free_poll_sem(&thread->md);
   754         thread->md.cvar_pollsem = NULL;
   755         _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs_failed);
   756 		if (!_PR_IS_NATIVE_THREAD(me))
   757 			_PR_FAST_INTSON(is);
   758         return PR_FAILURE;
   759     }
   760 }
   762 void _MD_CleanThread(PRThread *thread)
   763 {
   764     if (thread->flags & _PR_GLOBAL_SCOPE) {
   765         close(thread->md.cvar_pollsemfd);
   766         thread->md.cvar_pollsemfd = -1;
   767 		free_poll_sem(&thread->md);
   768         thread->md.cvar_pollsem = NULL;
   769     }
   770 }
   772 void _MD_SetPriority(_MDThread *thread, PRThreadPriority newPri)
   773 {
   774     return;
   775 }
   777 extern void _MD_unix_terminate_waitpid_daemon(void);
   779 void
   780 _MD_CleanupBeforeExit(void)
   781 {
   782     extern PRInt32    _pr_cpus_exit;
   784     _MD_unix_terminate_waitpid_daemon();
   786 	_pr_irix_exit_now = 1;
   787     if (_pr_numCPU > 1) {
   788         /*
   789          * Set a global flag, and wakeup all cpus which will notice the flag
   790          * and exit.
   791          */
   792         _pr_cpus_exit = getpid();
   793         _MD_Wakeup_CPUs();
   794         while(_pr_numCPU > 1) {
   795             _PR_WAIT_SEM(_pr_irix_exit_sem);
   796             _pr_numCPU--;
   797         }
   798     }
   799     /*
   800      * cause global threads on the recycle list to exit
   801      */
   802      _PR_DEADQ_LOCK;
   803      if (_PR_NUM_DEADNATIVE != 0) {
   804 	PRThread *thread;
   805     	PRCList *ptr;
   807         ptr = _PR_DEADNATIVEQ.next;
   808         while( ptr != &_PR_DEADNATIVEQ ) {
   809         	thread = _PR_THREAD_PTR(ptr);
   810 		_MD_CVAR_POST_SEM(thread);
   811                 ptr = ptr->next;
   812         } 
   813      }
   814      _PR_DEADQ_UNLOCK;
   815      while(_PR_NUM_DEADNATIVE > 1) {
   816 	_PR_WAIT_SEM(_pr_irix_exit_sem);
   817 	_PR_DEC_DEADNATIVE;
   818      }
   819 }
   821 #ifdef _PR_HAVE_SGI_PRDA_PROCMASK
   822 extern void __sgi_prda_procmask(int);
   823 #endif
   825 PRStatus
   826 _MD_InitAttachedThread(PRThread *thread, PRBool wakeup_parent)
   827 {
   828 	PRStatus rv = PR_SUCCESS;
   830     if (thread->flags & _PR_GLOBAL_SCOPE) {
   831 		if (new_poll_sem(&thread->md,0) == PR_FAILURE) {
   832 			return PR_FAILURE;
   833 		}
   834 		thread->md.cvar_pollsemfd =
   835 			_PR_OPEN_POLL_SEM(thread->md.cvar_pollsem);
   836 		if ((thread->md.cvar_pollsemfd < 0)) {
   837 			free_poll_sem(&thread->md);
   838 			return PR_FAILURE;
   839 		}
   840 		if (_MD_InitThread(thread, PR_FALSE) == PR_FAILURE) {
   841 			close(thread->md.cvar_pollsemfd);
   842 			thread->md.cvar_pollsemfd = -1;
   843 			free_poll_sem(&thread->md);
   844 			thread->md.cvar_pollsem = NULL;
   845 			return PR_FAILURE;
   846 		}
   847     }
   848 	return rv;
   849 }
   851 PRStatus
   852 _MD_InitThread(PRThread *thread, PRBool wakeup_parent)
   853 {
   854     struct sigaction sigact;
   855 	PRStatus rv = PR_SUCCESS;
   857     if (thread->flags & _PR_GLOBAL_SCOPE) {
   858 		thread->md.id = getpid();
   859         setblockproccnt(thread->md.id, 0);
   860 		_MD_SET_SPROC_PID(getpid());	
   861 #ifdef _PR_HAVE_SGI_PRDA_PROCMASK
   862 		/*
   863 		 * enable user-level processing of sigprocmask(); this is an
   864 		 * undocumented feature available in Irix 6.2, 6.3, 6.4 and 6.5
   865 		 */
   866 		__sgi_prda_procmask(USER_LEVEL);
   867 #endif
   868 		/*
   869 		 * set up SIGUSR1 handler; this is used to save state
   870 		 */
   871 		sigact.sa_handler = save_context_and_block;
   872 		sigact.sa_flags = SA_RESTART;
   873 		/*
   874 		 * Must mask clock interrupts
   875 		 */
   876 		sigact.sa_mask = timer_set;
   877 		sigaction(SIGUSR1, &sigact, 0);
   880 		/*
   881 		 * PR_SETABORTSIG is a new command implemented in a patch to
   882 		 * Irix 6.2, 6.3 and 6.4. This causes a signal to be sent to all
   883 		 * sprocs in the process when one of them terminates abnormally
   884 		 *
   885 		 */
   886 		if (prctl(PR_SETABORTSIG, SIGKILL) < 0) {
   887 			/*
   888 			 *  if (errno == EINVAL)
   889 			 *
   890 			 *	PR_SETABORTSIG not supported under this OS.
   891 			 *	You may want to get a recent kernel rollup patch that
   892 			 *	supports this feature.
   893 			 */
   894 		}
   895 		/*
   896 		 * SIGCLD handler for detecting abormally-terminating
   897 		 * sprocs and for reaping sprocs
   898 		 */
   899 		sigact.sa_handler = sigchld_handler;
   900 		sigact.sa_flags = SA_RESTART;
   901 		sigact.sa_mask = ints_off;
   902 		sigaction(SIGCLD, &sigact, NULL);
   903     }
   904 	return rv;
   905 }
   907 /*
   908  * PR_Cleanup should be executed on the primordial sproc; migrate the thread
   909  * to the primordial cpu
   910  */
   912 void _PR_MD_PRE_CLEANUP(PRThread *me)
   913 {
   914 PRIntn is;
   915 _PRCPU *cpu = _pr_primordialCPU;
   917 	PR_ASSERT(cpu);
   919 	me->flags |= _PR_BOUND_THREAD;	
   921 	if (me->cpu->id != 0) {
   922 		_PR_INTSOFF(is);
   923 		_PR_RUNQ_LOCK(cpu);
   924 		me->cpu = cpu;
   925 		me->state = _PR_RUNNABLE;
   926 		_PR_ADD_RUNQ(me, cpu, me->priority);
   927 		_PR_RUNQ_UNLOCK(cpu);
   928 		_MD_Wakeup_CPUs();
   930 		_PR_MD_SWITCH_CONTEXT(me);
   932 		_PR_FAST_INTSON(is);
   933 		PR_ASSERT(me->cpu->id == 0);
   934 	}
   935 }
   937 /*
   938  * process exiting
   939  */
   940 PR_EXTERN(void ) _MD_exit(PRIntn status)
   941 {
   942 PRThread *me = _PR_MD_CURRENT_THREAD();
   944 	/*
   945 	 * the exit code of the process is the exit code of the primordial
   946 	 * sproc
   947 	 */
   948 	if (!_PR_IS_NATIVE_THREAD(me) && (_PR_MD_CURRENT_CPU()->id == 0)) {
   949 		/*
   950 		 * primordial sproc case: call _exit directly
   951 		 * Cause SIGKILL to be sent to other sprocs
   952 		 */
   953 		prctl(PR_SETEXITSIG, SIGKILL);
   954 		_exit(status);
   955 	} else {
   956 		int rv;
   957 		char data;
   958 		sigset_t set;
   960 		/*
   961 		 * non-primordial sproc case: cause the primordial sproc, cpu 0,
   962 		 * to wakeup and call _exit
   963 		 */
   964 		_pr_irix_process_exit = 1;
   965 		_pr_irix_process_exit_code = status;
   966 		rv = write(_pr_irix_primoridal_cpu_fd[1], &data, 1);
   967 		PR_ASSERT(rv == 1);
   968 		/*
   969 		 * block all signals and wait for SIGKILL to terminate this sproc
   970 		 */
   971 		sigfillset(&set);
   972 		sigsuspend(&set);
   973 		/*
   974 		 * this code doesn't (shouldn't) execute
   975 		 */
   976 		prctl(PR_SETEXITSIG, SIGKILL);
   977 		_exit(status);
   978 	}
   979 }
   981 /*
   982  * Override the exit() function in libc to cause the process to exit
   983  * when the primodial/main nspr thread calls exit. Calls to exit by any
   984  * other thread simply result in a call to the exit function in libc.
   985  * The exit code of the process is the exit code of the primordial
   986  * sproc.
   987  */
   989 void exit(int status)
   990 {
   991 PRThread *me, *thr;
   992 PRCList *qp;
   994 	if (!_pr_initialized)  {
   995 		if (!libc_exit) {
   997 			if (!libc_handle)
   998 				libc_handle = dlopen("libc.so",RTLD_NOW);
   999 			if (libc_handle)
  1000 				libc_exit = (void (*)(int)) dlsym(libc_handle, "exit");
  1002 		if (libc_exit)
  1003 			(*libc_exit)(status);
  1004 		else
  1005 			_exit(status);
  1008 	me = _PR_MD_CURRENT_THREAD();
  1010 	if (me == NULL) 		/* detached thread */
  1011 		(*libc_exit)(status);
  1013 	PR_ASSERT(_PR_IS_NATIVE_THREAD(me) ||
  1014 						(_PR_MD_CURRENT_CPU())->id == me->cpu->id);
  1016 	if (me->flags & _PR_PRIMORDIAL) {
  1018 		me->flags |= _PR_BOUND_THREAD;	
  1020 		PR_ASSERT((_PR_MD_CURRENT_CPU())->id == me->cpu->id);
  1021 		if (me->cpu->id != 0) {
  1022 			_PRCPU *cpu = _pr_primordialCPU;
  1023 			PRIntn is;
  1025 			_PR_INTSOFF(is);
  1026 			_PR_RUNQ_LOCK(cpu);
  1027 			me->cpu = cpu;
  1028 			me->state = _PR_RUNNABLE;
  1029 			_PR_ADD_RUNQ(me, cpu, me->priority);
  1030 			_PR_RUNQ_UNLOCK(cpu);
  1031 			_MD_Wakeup_CPUs();
  1033 			_PR_MD_SWITCH_CONTEXT(me);
  1035 			_PR_FAST_INTSON(is);
  1038 		PR_ASSERT((_PR_MD_CURRENT_CPU())->id == 0);
  1040 		if (prctl(PR_GETNSHARE) > 1) {
  1041 #define SPROC_EXIT_WAIT_TIME 5
  1042 			int sleep_cnt = SPROC_EXIT_WAIT_TIME;
  1044 			/*
  1045 			 * sprocs still running; caue cpus and recycled global threads
  1046 			 * to exit
  1047 			 */
  1048 			_pr_irix_exit_now = 1;
  1049 			if (_pr_numCPU > 1) {
  1050 				_MD_Wakeup_CPUs();
  1052 			 _PR_DEADQ_LOCK;
  1053 			 if (_PR_NUM_DEADNATIVE != 0) {
  1054 				PRThread *thread;
  1055 				PRCList *ptr;
  1057 				ptr = _PR_DEADNATIVEQ.next;
  1058 				while( ptr != &_PR_DEADNATIVEQ ) {
  1059 					thread = _PR_THREAD_PTR(ptr);
  1060 					_MD_CVAR_POST_SEM(thread);
  1061 					ptr = ptr->next;
  1065 			while (sleep_cnt-- > 0) {
  1066 				if (waitpid(0, NULL, WNOHANG) >= 0) 
  1067 					sleep(1);
  1068 				else
  1069 					break;
  1071 			prctl(PR_SETEXITSIG, SIGKILL);
  1073 		(*libc_exit)(status);
  1074 	} else {
  1075 		/*
  1076 		 * non-primordial thread; simply call exit in libc.
  1077 		 */
  1078 		(*libc_exit)(status);
  1083 void
  1084 _MD_InitRunningCPU(_PRCPU *cpu)
  1086     extern int _pr_md_pipefd[2];
  1088     _MD_unix_init_running_cpu(cpu);
  1089     cpu->md.id = getpid();
  1090 	_MD_SET_SPROC_PID(getpid());	
  1091 	if (_pr_md_pipefd[0] >= 0) {
  1092     	_PR_IOQ_MAX_OSFD(cpu) = _pr_md_pipefd[0];
  1093 #ifndef _PR_USE_POLL
  1094     	FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(cpu));
  1095 #endif
  1099 void
  1100 _MD_ExitThread(PRThread *thread)
  1102     if (thread->flags & _PR_GLOBAL_SCOPE) {
  1103         _MD_ATOMIC_DECREMENT(&_pr_md_irix_sprocs);
  1104         _MD_CLEAN_THREAD(thread);
  1105         _MD_SET_CURRENT_THREAD(NULL);
  1109 void
  1110 _MD_SuspendCPU(_PRCPU *cpu)
  1112     PRInt32 rv;
  1114 	cpu->md.suspending_id = getpid();
  1115 	rv = kill(cpu->md.id, SIGUSR1);
  1116 	PR_ASSERT(rv == 0);
  1117 	/*
  1118 	 * now, block the current thread/cpu until woken up by the suspended
  1119 	 * thread from it's SIGUSR1 signal handler
  1120 	 */
  1121 	blockproc(getpid());
  1125 void
  1126 _MD_ResumeCPU(_PRCPU *cpu)
  1128     unblockproc(cpu->md.id);
  1131 #if 0
  1132 /*
  1133  * save the register context of a suspended sproc
  1134  */
  1135 void get_context(PRThread *thr)
  1137     int len, fd;
  1138     char pidstr[24];
  1139     char path[24];
  1141     /*
  1142      * open the file corresponding to this process in procfs
  1143      */
  1144     sprintf(path,"/proc/%s","00000");
  1145     len = strlen(path);
  1146     sprintf(pidstr,"%d",thr->md.id);
  1147     len -= strlen(pidstr);
  1148     sprintf(path + len,"%s",pidstr);
  1149     fd = open(path,O_RDONLY);
  1150     if (fd >= 0) {
  1151         (void) ioctl(fd, PIOCGREG, thr->md.gregs);
  1152         close(fd);
  1154     return;
  1156 #endif	/* 0 */
  1158 void
  1159 _MD_SuspendThread(PRThread *thread)
  1161     PRInt32 rv;
  1163     PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
  1164         _PR_IS_GCABLE_THREAD(thread));
  1166 	thread->md.suspending_id = getpid();
  1167 	rv = kill(thread->md.id, SIGUSR1);
  1168 	PR_ASSERT(rv == 0);
  1169 	/*
  1170 	 * now, block the current thread/cpu until woken up by the suspended
  1171 	 * thread from it's SIGUSR1 signal handler
  1172 	 */
  1173 	blockproc(getpid());
  1176 void
  1177 _MD_ResumeThread(PRThread *thread)
  1179     PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
  1180         _PR_IS_GCABLE_THREAD(thread));
  1181     (void)unblockproc(thread->md.id);
  1184 /*
  1185  * return the set of processors available for scheduling procs in the
  1186  * "mask" argument
  1187  */
  1188 PRInt32 _MD_GetThreadAffinityMask(PRThread *unused, PRUint32 *mask)
  1190     PRInt32 nprocs, rv;
  1191     struct pda_stat *pstat;
  1192 #define MAX_PROCESSORS    32
  1194     nprocs = sysmp(MP_NPROCS);
  1195     if (nprocs < 0)
  1196         return(-1);
  1197     pstat = (struct pda_stat*)PR_MALLOC(sizeof(struct pda_stat) * nprocs);
  1198     if (pstat == NULL)
  1199         return(-1);
  1200     rv = sysmp(MP_STAT, pstat);
  1201     if (rv < 0) {
  1202         PR_DELETE(pstat);
  1203         return(-1);
  1205     /*
  1206      * look at the first 32 cpus
  1207      */
  1208     nprocs = (nprocs > MAX_PROCESSORS) ? MAX_PROCESSORS : nprocs;
  1209     *mask = 0;
  1210     while (nprocs) {
  1211         if ((pstat->p_flags & PDAF_ENABLED) &&
  1212             !(pstat->p_flags & PDAF_ISOLATED)) {
  1213             *mask |= (1 << pstat->p_cpuid);
  1215         nprocs--;
  1216         pstat++;
  1218     return 0;
  1221 static char *_thr_state[] = {
  1222     "UNBORN",
  1223     "RUNNABLE",
  1224     "RUNNING",
  1225     "LOCK_WAIT",
  1226     "COND_WAIT",
  1227     "JOIN_WAIT",
  1228     "IO_WAIT",
  1229     "SUSPENDED",
  1230     "DEAD"
  1231 };
  1233 void _PR_List_Threads()
  1235     PRThread *thr;
  1236     void *handle;
  1237     struct _PRCPU *cpu;
  1238     PRCList *qp;
  1239     int len, fd;
  1240     char pidstr[24];
  1241     char path[24];
  1242     prpsinfo_t pinfo;
  1245     printf("\n%s %-s\n"," ","LOCAL Threads");
  1246     printf("%s %-s\n"," ","----- -------");
  1247     printf("%s %-14s %-10s %-12s %-3s %-10s %-10s %-12s\n\n"," ",
  1248         "Thread", "State", "Wait-Handle",
  1249         "Cpu","Stk-Base","Stk-Sz","SP");
  1250     for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
  1251         qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) {
  1252         thr = _PR_ACTIVE_THREAD_PTR(qp);
  1253         printf("%s 0x%-12x %-10s "," ",thr,_thr_state[thr->state]);
  1254         if (thr->state == _PR_LOCK_WAIT)
  1255             handle = thr->wait.lock;
  1256         else if (thr->state == _PR_COND_WAIT)
  1257             handle = thr->wait.cvar;
  1258         else
  1259             handle = NULL;
  1260         if (handle)
  1261             printf("0x%-10x ",handle);
  1262         else
  1263             printf("%-12s "," ");
  1264         printf("%-3d ",thr->cpu->id);
  1265         printf("0x%-8x ",thr->stack->stackBottom);
  1266         printf("0x%-8x ",thr->stack->stackSize);
  1267         printf("0x%-10x\n",thr->md.jb[JB_SP]);
  1270     printf("\n%s %-s\n"," ","GLOBAL Threads");
  1271     printf("%s %-s\n"," ","------ -------");
  1272     printf("%s %-14s %-6s %-12s %-12s %-12s %-12s\n\n"," ","Thread",
  1273         "Pid","State","Wait-Handle",
  1274         "Stk-Base","Stk-Sz");
  1276     for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
  1277         qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
  1278         thr = _PR_ACTIVE_THREAD_PTR(qp);
  1279         if (thr->cpu != NULL)
  1280             continue;        /* it is a cpu thread */
  1281         printf("%s 0x%-12x %-6d "," ",thr,thr->md.id);
  1282         /*
  1283          * check if the sproc is still running
  1284          * first call prctl(PR_GETSHMASK,pid) to check if
  1285          * the process is part of the share group (the pid
  1286          * could have been recycled by the OS)
  1287          */
  1288         if (prctl(PR_GETSHMASK,thr->md.id) < 0) {
  1289             printf("%-12s\n","TERMINATED");
  1290             continue;
  1292         /*
  1293          * Now, check if the sproc terminated and is in zombie
  1294          * state
  1295          */
  1296         sprintf(path,"/proc/pinfo/%s","00000");
  1297         len = strlen(path);
  1298         sprintf(pidstr,"%d",thr->md.id);
  1299         len -= strlen(pidstr);
  1300         sprintf(path + len,"%s",pidstr);
  1301         fd = open(path,O_RDONLY);
  1302         if (fd >= 0) {
  1303             if (ioctl(fd, PIOCPSINFO, &pinfo) < 0)
  1304                 printf("%-12s ","TERMINATED");
  1305             else if (pinfo.pr_zomb)
  1306                 printf("%-12s ","TERMINATED");
  1307             else
  1308                 printf("%-12s ",_thr_state[thr->state]);
  1309             close(fd);
  1310         } else {
  1311             printf("%-12s ","TERMINATED");
  1314         if (thr->state == _PR_LOCK_WAIT)
  1315             handle = thr->wait.lock;
  1316         else if (thr->state == _PR_COND_WAIT)
  1317             handle = thr->wait.cvar;
  1318         else
  1319             handle = NULL;
  1320         if (handle)
  1321             printf("%-12x ",handle);
  1322         else
  1323             printf("%-12s "," ");
  1324         printf("0x%-10x ",thr->stack->stackBottom);
  1325         printf("0x%-10x\n",thr->stack->stackSize);
  1328     printf("\n%s %-s\n"," ","CPUs");
  1329     printf("%s %-s\n"," ","----");
  1330     printf("%s %-14s %-6s %-12s \n\n"," ","Id","Pid","State");
  1333     for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) {
  1334         cpu = _PR_CPU_PTR(qp);
  1335         printf("%s %-14d %-6d "," ",cpu->id,cpu->md.id);
  1336         /*
  1337          * check if the sproc is still running
  1338          * first call prctl(PR_GETSHMASK,pid) to check if
  1339          * the process is part of the share group (the pid
  1340          * could have been recycled by the OS)
  1341          */
  1342         if (prctl(PR_GETSHMASK,cpu->md.id) < 0) {
  1343             printf("%-12s\n","TERMINATED");
  1344             continue;
  1346         /*
  1347          * Now, check if the sproc terminated and is in zombie
  1348          * state
  1349          */
  1350         sprintf(path,"/proc/pinfo/%s","00000");
  1351         len = strlen(path);
  1352         sprintf(pidstr,"%d",cpu->md.id);
  1353         len -= strlen(pidstr);
  1354         sprintf(path + len,"%s",pidstr);
  1355         fd = open(path,O_RDONLY);
  1356         if (fd >= 0) {
  1357             if (ioctl(fd, PIOCPSINFO, &pinfo) < 0)
  1358                 printf("%-12s\n","TERMINATED");
  1359             else if (pinfo.pr_zomb)
  1360                 printf("%-12s\n","TERMINATED");
  1361             else
  1362                 printf("%-12s\n","RUNNING");
  1363             close(fd);
  1364         } else {
  1365             printf("%-12s\n","TERMINATED");
  1369     fflush(stdout);
  1371 #endif /* defined(_PR_PTHREADS) */ 
  1373 PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
  1375 #if !defined(_PR_PTHREADS)
  1376     if (isCurrent) {
  1377         (void) setjmp(t->md.jb);
  1379     *np = sizeof(t->md.jb) / sizeof(PRWord);
  1380     return (PRWord *) (t->md.jb);
  1381 #else
  1382 	*np = 0;
  1383 	return NULL;
  1384 #endif
  1387 void _MD_EarlyInit(void)
  1389 #if !defined(_PR_PTHREADS)
  1390     char *eval;
  1391     int fd;
  1392 	extern int __ateachexit(void (*func)(void));
  1394     sigemptyset(&ints_off);
  1395     sigaddset(&ints_off, SIGALRM);
  1396     sigaddset(&ints_off, SIGIO);
  1397     sigaddset(&ints_off, SIGCLD);
  1399     if (eval = getenv("_NSPR_TERMINATE_ON_ERROR"))
  1400         _nspr_terminate_on_error = (0 == atoi(eval) == 0) ? PR_FALSE : PR_TRUE;
  1402     fd = open("/dev/zero",O_RDWR , 0);
  1403     if (fd < 0) {
  1404         perror("open /dev/zero failed");
  1405         exit(1);
  1407     /*
  1408      * Set up the sproc private data area.
  1409      * This region exists at the same address, _nspr_sproc_private, for
  1410      * every sproc, but each sproc gets a private copy of the region.
  1411      */
  1412     _nspr_sproc_private = (char*)mmap(0, _pr_pageSize, PROT_READ | PROT_WRITE,
  1413         MAP_PRIVATE| MAP_LOCAL, fd, 0);
  1414     if (_nspr_sproc_private == (void*)-1) {
  1415         perror("mmap /dev/zero failed");
  1416         exit(1);
  1418 	_MD_SET_SPROC_PID(getpid());	
  1419     close(fd);
  1420 	__ateachexit(irix_detach_sproc);
  1421 #endif
  1422     _MD_IrixIntervalInit();
  1423 }  /* _MD_EarlyInit */
  1425 void _MD_IrixInit(void)
  1427 #if !defined(_PR_PTHREADS)
  1428     struct sigaction sigact;
  1429     PRThread *me = _PR_MD_CURRENT_THREAD();
  1430 	int rv;
  1432 #ifdef _PR_HAVE_SGI_PRDA_PROCMASK
  1433 	/*
  1434 	 * enable user-level processing of sigprocmask(); this is an undocumented
  1435 	 * feature available in Irix 6.2, 6.3, 6.4 and 6.5
  1436 	 */
  1437 	__sgi_prda_procmask(USER_LEVEL);
  1438 #endif
  1440 	/*
  1441 	 * set up SIGUSR1 handler; this is used to save state
  1442 	 * during PR_SuspendAll
  1443 	 */
  1444 	sigact.sa_handler = save_context_and_block;
  1445 	sigact.sa_flags = SA_RESTART;
  1446 	sigact.sa_mask = ints_off;
  1447 	sigaction(SIGUSR1, &sigact, 0);
  1449     /*
  1450      * Change the name of the core file from core to core.pid,
  1451      * This is inherited by the sprocs created by this process
  1452      */
  1453 #ifdef PR_COREPID
  1454     prctl(PR_COREPID, 0, 1);
  1455 #endif
  1456     /*
  1457      * Irix-specific terminate on error processing
  1458      */
  1459 	/*
  1460 	 * PR_SETABORTSIG is a new command implemented in a patch to
  1461 	 * Irix 6.2, 6.3 and 6.4. This causes a signal to be sent to all
  1462 	 * sprocs in the process when one of them terminates abnormally
  1464 	 */
  1465 	if (prctl(PR_SETABORTSIG, SIGKILL) < 0) {
  1466 		/*
  1467 		 *  if (errno == EINVAL)
  1469 		 *	PR_SETABORTSIG not supported under this OS.
  1470 		 *	You may want to get a recent kernel rollup patch that
  1471 		 *	supports this feature.
  1473 		 */
  1475 	/*
  1476 	 * PR_SETEXITSIG -  send the SIGCLD signal to the parent
  1477 	 *            sproc when any sproc terminates
  1479 	 *    This is used to cause the entire application to
  1480 	 *    terminate when    any sproc terminates abnormally by
  1481 	 *     receipt of a SIGSEGV, SIGBUS or SIGABRT signal.
  1482 	 *    If this is not done, the application may seem
  1483 	 *     "hung" to the user because the other sprocs may be
  1484 	 *    waiting for resources held by the
  1485 	 *    abnormally-terminating sproc.
  1486 	 */
  1487 	prctl(PR_SETEXITSIG, 0);
  1489 	sigact.sa_handler = sigchld_handler;
  1490 	sigact.sa_flags = SA_RESTART;
  1491 	sigact.sa_mask = ints_off;
  1492 	sigaction(SIGCLD, &sigact, NULL);
  1494     /*
  1495      * setup stack fields for the primordial thread
  1496      */
  1497     me->stack->stackSize = prctl(PR_GETSTACKSIZE);
  1498     me->stack->stackBottom = me->stack->stackTop - me->stack->stackSize;
  1500     rv = pipe(_pr_irix_primoridal_cpu_fd);
  1501     PR_ASSERT(rv == 0);
  1502 #ifndef _PR_USE_POLL
  1503     _PR_IOQ_MAX_OSFD(me->cpu) = _pr_irix_primoridal_cpu_fd[0];
  1504     FD_SET(_pr_irix_primoridal_cpu_fd[0], &_PR_FD_READ_SET(me->cpu));
  1505 #endif
  1507 	libc_handle = dlopen("libc.so",RTLD_NOW);
  1508 	PR_ASSERT(libc_handle != NULL);
  1509 	libc_exit = (void (*)(int)) dlsym(libc_handle, "exit");
  1510 	PR_ASSERT(libc_exit != NULL);
  1511 	/* dlclose(libc_handle); */
  1513 #endif /* _PR_PTHREADS */
  1515     _PR_UnixInit();
  1518 /**************************************************************************/
  1519 /************** code and such for NSPR 2.0's interval times ***************/
  1520 /**************************************************************************/
  1522 #define PR_PSEC_PER_SEC 1000000000000ULL  /* 10^12 */
  1524 #ifndef SGI_CYCLECNTR_SIZE
  1525 #define SGI_CYCLECNTR_SIZE      165     /* Size user needs to use to read CC */
  1526 #endif
  1528 static PRIntn mmem_fd = -1;
  1529 static PRIntn clock_width = 0;
  1530 static void *iotimer_addr = NULL;
  1531 static PRUint32 pr_clock_mask = 0;
  1532 static PRUint32 pr_clock_shift = 0;
  1533 static PRIntervalTime pr_ticks = 0;
  1534 static PRUint32 pr_clock_granularity = 1;
  1535 static PRUint32 pr_previous = 0, pr_residual = 0;
  1536 static PRUint32 pr_ticks_per_second = 0;
  1538 extern PRIntervalTime _PR_UNIX_GetInterval(void);
  1539 extern PRIntervalTime _PR_UNIX_TicksPerSecond(void);
  1541 static void _MD_IrixIntervalInit(void)
  1543     /*
  1544      * As much as I would like, the service available through this
  1545      * interface on R3000's (aka, IP12) just isn't going to make it.
  1546      * The register is only 24 bits wide, and rolls over at a verocious
  1547      * rate.
  1548      */
  1549     PRUint32 one_tick = 0;
  1550     struct utsname utsinfo;
  1551     uname(&utsinfo);
  1552     if ((strncmp("IP12", utsinfo.machine, 4) != 0)
  1553         && ((mmem_fd = open("/dev/mmem", O_RDONLY)) != -1))
  1555         int poffmask = getpagesize() - 1;
  1556         __psunsigned_t phys_addr, raddr, cycleval;
  1558         phys_addr = syssgi(SGI_QUERY_CYCLECNTR, &cycleval);
  1559         raddr = phys_addr & ~poffmask;
  1560         iotimer_addr = mmap(
  1561             0, poffmask, PROT_READ, MAP_PRIVATE, mmem_fd, (__psint_t)raddr);
  1563         clock_width = syssgi(SGI_CYCLECNTR_SIZE);
  1564         if (clock_width < 0)
  1566             /* 
  1567              * We must be executing on a 6.0 or earlier system, since the
  1568              * SGI_CYCLECNTR_SIZE call is not supported.
  1570              * The only pre-6.1 platforms with 64-bit counters are
  1571              * IP19 and IP21 (Challenge, PowerChallenge, Onyx).
  1572              */
  1573             if (!strncmp(utsinfo.machine, "IP19", 4) ||
  1574                 !strncmp(utsinfo.machine, "IP21", 4))
  1575                 clock_width = 64;
  1576             else
  1577                 clock_width = 32;
  1580         /*
  1581          * 'cycleval' is picoseconds / increment of the counter.
  1582          * I'm pushing for a tick to be 100 microseconds, 10^(-4).
  1583          * That leaves 10^(-8) left over, or 10^8 / cycleval.
  1584          * Did I do that right?
  1585          */
  1587         one_tick =  100000000UL / cycleval ;  /* 100 microseconds */
  1589         while (0 != one_tick)
  1591             pr_clock_shift += 1;
  1592             one_tick = one_tick >> 1;
  1593             pr_clock_granularity = pr_clock_granularity << 1;
  1595         pr_clock_mask = pr_clock_granularity - 1;  /* to make a mask out of it */
  1596         pr_ticks_per_second = PR_PSEC_PER_SEC
  1597                 / ((PRUint64)pr_clock_granularity * (PRUint64)cycleval);
  1599         iotimer_addr = (void*)
  1600             ((__psunsigned_t)iotimer_addr + (phys_addr & poffmask));
  1602     else
  1604         pr_ticks_per_second = _PR_UNIX_TicksPerSecond();
  1606 }  /* _MD_IrixIntervalInit */
  1608 PRIntervalTime _MD_IrixIntervalPerSec(void)
  1610     return pr_ticks_per_second;
  1613 PRIntervalTime _MD_IrixGetInterval(void)
  1615     if (mmem_fd != -1)
  1617         if (64 == clock_width)
  1619             PRUint64 temp = *(PRUint64*)iotimer_addr;
  1620             pr_ticks = (PRIntervalTime)(temp >> pr_clock_shift);
  1622         else
  1624             PRIntervalTime ticks = pr_ticks;
  1625             PRUint32 now = *(PRUint32*)iotimer_addr, temp;
  1626             PRUint32 residual = pr_residual, previous = pr_previous;
  1628             temp = now - previous + residual;
  1629             residual = temp & pr_clock_mask;
  1630             ticks += temp >> pr_clock_shift;
  1632             pr_previous = now;
  1633             pr_residual = residual;
  1634             pr_ticks = ticks;
  1637     else
  1639         /*
  1640          * No fast access. Use the time of day clock. This isn't the
  1641          * right answer since this clock can get set back, tick at odd
  1642          * rates, and it's expensive to acqurie.
  1643          */
  1644         pr_ticks = _PR_UNIX_GetInterval();
  1646     return pr_ticks;
  1647 }  /* _MD_IrixGetInterval */

mercurial