nsprpub/pr/src/md/unix/irix.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
michael@0 2 /* This Source Code Form is subject to the terms of the Mozilla Public
michael@0 3 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 5
michael@0 6 #include "primpl.h"
michael@0 7
michael@0 8 #include <signal.h>
michael@0 9
michael@0 10 #include <sys/types.h>
michael@0 11 #include <fcntl.h>
michael@0 12 #include <unistd.h>
michael@0 13 #include <string.h>
michael@0 14 #include <sys/mman.h>
michael@0 15 #include <sys/syssgi.h>
michael@0 16 #include <sys/time.h>
michael@0 17 #include <sys/immu.h>
michael@0 18 #include <sys/utsname.h>
michael@0 19 #include <sys/sysmp.h>
michael@0 20 #include <sys/pda.h>
michael@0 21 #include <sys/prctl.h>
michael@0 22 #include <sys/wait.h>
michael@0 23 #include <sys/resource.h>
michael@0 24 #include <sys/procfs.h>
michael@0 25 #include <task.h>
michael@0 26 #include <dlfcn.h>
michael@0 27
michael@0 28 static void _MD_IrixIntervalInit(void);
michael@0 29
michael@0 30 #if defined(_PR_PTHREADS)
michael@0 31 /*
michael@0 32 * for compatibility with classic nspr
michael@0 33 */
michael@0 34 void _PR_IRIX_CHILD_PROCESS()
michael@0 35 {
michael@0 36 }
michael@0 37 #else /* defined(_PR_PTHREADS) */
michael@0 38
michael@0 39 static void irix_detach_sproc(void);
michael@0 40 char *_nspr_sproc_private; /* ptr. to private region in every sproc */
michael@0 41
michael@0 42 extern PRUintn _pr_numCPU;
michael@0 43
michael@0 44 typedef struct nspr_arena {
michael@0 45 PRCList links;
michael@0 46 usptr_t *usarena;
michael@0 47 } nspr_arena;
michael@0 48
michael@0 49 #define ARENA_PTR(qp) \
michael@0 50 ((nspr_arena *) ((char*) (qp) - offsetof(nspr_arena , links)))
michael@0 51
michael@0 52 static usptr_t *alloc_new_arena(void);
michael@0 53
michael@0 54 PRCList arena_list = PR_INIT_STATIC_CLIST(&arena_list);
michael@0 55 ulock_t arena_list_lock;
michael@0 56 nspr_arena first_arena;
michael@0 57 int _nspr_irix_arena_cnt = 1;
michael@0 58
michael@0 59 PRCList sproc_list = PR_INIT_STATIC_CLIST(&sproc_list);
michael@0 60 ulock_t sproc_list_lock;
michael@0 61
michael@0 62 typedef struct sproc_data {
michael@0 63 void (*entry) (void *, size_t);
michael@0 64 unsigned inh;
michael@0 65 void *arg;
michael@0 66 caddr_t sp;
michael@0 67 size_t len;
michael@0 68 int *pid;
michael@0 69 int creator_pid;
michael@0 70 } sproc_data;
michael@0 71
michael@0 72 typedef struct sproc_params {
michael@0 73 PRCList links;
michael@0 74 sproc_data sd;
michael@0 75 } sproc_params;
michael@0 76
michael@0 77 #define SPROC_PARAMS_PTR(qp) \
michael@0 78 ((sproc_params *) ((char*) (qp) - offsetof(sproc_params , links)))
michael@0 79
michael@0 80 long _nspr_irix_lock_cnt = 0;
michael@0 81 long _nspr_irix_sem_cnt = 0;
michael@0 82 long _nspr_irix_pollsem_cnt = 0;
michael@0 83
michael@0 84 usptr_t *_pr_usArena;
michael@0 85 ulock_t _pr_heapLock;
michael@0 86
michael@0 87 usema_t *_pr_irix_exit_sem;
michael@0 88 PRInt32 _pr_irix_exit_now = 0;
michael@0 89 PRInt32 _pr_irix_process_exit_code = 0; /* exit code for PR_ProcessExit */
michael@0 90 PRInt32 _pr_irix_process_exit = 0; /* process exiting due to call to
michael@0 91 PR_ProcessExit */
michael@0 92
michael@0 93 int _pr_irix_primoridal_cpu_fd[2] = { -1, -1 };
michael@0 94 static void (*libc_exit)(int) = NULL;
michael@0 95 static void *libc_handle = NULL;
michael@0 96
michael@0 97 #define _NSPR_DEF_INITUSERS 100 /* default value of CONF_INITUSERS */
michael@0 98 #define _NSPR_DEF_INITSIZE (4 * 1024 * 1024) /* 4 MB */
michael@0 99
michael@0 100 int _irix_initusers = _NSPR_DEF_INITUSERS;
michael@0 101 int _irix_initsize = _NSPR_DEF_INITSIZE;
michael@0 102
michael@0 103 PRIntn _pr_io_in_progress, _pr_clock_in_progress;
michael@0 104
michael@0 105 PRInt32 _pr_md_irix_sprocs_created, _pr_md_irix_sprocs_failed;
michael@0 106 PRInt32 _pr_md_irix_sprocs = 1;
michael@0 107 PRCList _pr_md_irix_sproc_list =
michael@0 108 PR_INIT_STATIC_CLIST(&_pr_md_irix_sproc_list);
michael@0 109
michael@0 110 sigset_t ints_off;
michael@0 111 extern sigset_t timer_set;
michael@0 112
michael@0 113 #if !defined(PR_SETABORTSIG)
michael@0 114 #define PR_SETABORTSIG 18
michael@0 115 #endif
michael@0 116 /*
michael@0 117 * terminate the entire application if any sproc exits abnormally
michael@0 118 */
michael@0 119 PRBool _nspr_terminate_on_error = PR_TRUE;
michael@0 120
michael@0 121 /*
michael@0 122 * exported interface to set the shared arena parameters
michael@0 123 */
michael@0 124 void _PR_Irix_Set_Arena_Params(PRInt32 initusers, PRInt32 initsize)
michael@0 125 {
michael@0 126 _irix_initusers = initusers;
michael@0 127 _irix_initsize = initsize;
michael@0 128 }
michael@0 129
michael@0 130 static usptr_t *alloc_new_arena()
michael@0 131 {
michael@0 132 return(usinit("/dev/zero"));
michael@0 133 }
michael@0 134
michael@0 135 static PRStatus new_poll_sem(struct _MDThread *mdthr, int val)
michael@0 136 {
michael@0 137 PRIntn _is;
michael@0 138 PRStatus rv = PR_SUCCESS;
michael@0 139 usema_t *sem = NULL;
michael@0 140 PRCList *qp;
michael@0 141 nspr_arena *arena;
michael@0 142 usptr_t *irix_arena;
michael@0 143 PRThread *me = _MD_GET_ATTACHED_THREAD();
michael@0 144
michael@0 145 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 146 _PR_INTSOFF(_is);
michael@0 147 _PR_LOCK(arena_list_lock);
michael@0 148 for (qp = arena_list.next; qp != &arena_list; qp = qp->next) {
michael@0 149 arena = ARENA_PTR(qp);
michael@0 150 sem = usnewpollsema(arena->usarena, val);
michael@0 151 if (sem != NULL) {
michael@0 152 mdthr->cvar_pollsem = sem;
michael@0 153 mdthr->pollsem_arena = arena->usarena;
michael@0 154 break;
michael@0 155 }
michael@0 156 }
michael@0 157 if (sem == NULL) {
michael@0 158 /*
michael@0 159 * If no space left in the arena allocate a new one.
michael@0 160 */
michael@0 161 if (errno == ENOMEM) {
michael@0 162 arena = PR_NEWZAP(nspr_arena);
michael@0 163 if (arena != NULL) {
michael@0 164 irix_arena = alloc_new_arena();
michael@0 165 if (irix_arena) {
michael@0 166 PR_APPEND_LINK(&arena->links, &arena_list);
michael@0 167 _nspr_irix_arena_cnt++;
michael@0 168 arena->usarena = irix_arena;
michael@0 169 sem = usnewpollsema(arena->usarena, val);
michael@0 170 if (sem != NULL) {
michael@0 171 mdthr->cvar_pollsem = sem;
michael@0 172 mdthr->pollsem_arena = arena->usarena;
michael@0 173 } else
michael@0 174 rv = PR_FAILURE;
michael@0 175 } else {
michael@0 176 PR_DELETE(arena);
michael@0 177 rv = PR_FAILURE;
michael@0 178 }
michael@0 179
michael@0 180 } else
michael@0 181 rv = PR_FAILURE;
michael@0 182 } else
michael@0 183 rv = PR_FAILURE;
michael@0 184 }
michael@0 185 _PR_UNLOCK(arena_list_lock);
michael@0 186 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 187 _PR_FAST_INTSON(_is);
michael@0 188 if (rv == PR_SUCCESS)
michael@0 189 _MD_ATOMIC_INCREMENT(&_nspr_irix_pollsem_cnt);
michael@0 190 return rv;
michael@0 191 }
michael@0 192
michael@0 193 static void free_poll_sem(struct _MDThread *mdthr)
michael@0 194 {
michael@0 195 PRIntn _is;
michael@0 196 PRThread *me = _MD_GET_ATTACHED_THREAD();
michael@0 197
michael@0 198 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 199 _PR_INTSOFF(_is);
michael@0 200 usfreepollsema(mdthr->cvar_pollsem, mdthr->pollsem_arena);
michael@0 201 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 202 _PR_FAST_INTSON(_is);
michael@0 203 _MD_ATOMIC_DECREMENT(&_nspr_irix_pollsem_cnt);
michael@0 204 }
michael@0 205
michael@0 206 static PRStatus new_lock(struct _MDLock *lockp)
michael@0 207 {
michael@0 208 PRIntn _is;
michael@0 209 PRStatus rv = PR_SUCCESS;
michael@0 210 ulock_t lock = NULL;
michael@0 211 PRCList *qp;
michael@0 212 nspr_arena *arena;
michael@0 213 usptr_t *irix_arena;
michael@0 214 PRThread *me = _MD_GET_ATTACHED_THREAD();
michael@0 215
michael@0 216 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 217 _PR_INTSOFF(_is);
michael@0 218 _PR_LOCK(arena_list_lock);
michael@0 219 for (qp = arena_list.next; qp != &arena_list; qp = qp->next) {
michael@0 220 arena = ARENA_PTR(qp);
michael@0 221 lock = usnewlock(arena->usarena);
michael@0 222 if (lock != NULL) {
michael@0 223 lockp->lock = lock;
michael@0 224 lockp->arena = arena->usarena;
michael@0 225 break;
michael@0 226 }
michael@0 227 }
michael@0 228 if (lock == NULL) {
michael@0 229 /*
michael@0 230 * If no space left in the arena allocate a new one.
michael@0 231 */
michael@0 232 if (errno == ENOMEM) {
michael@0 233 arena = PR_NEWZAP(nspr_arena);
michael@0 234 if (arena != NULL) {
michael@0 235 irix_arena = alloc_new_arena();
michael@0 236 if (irix_arena) {
michael@0 237 PR_APPEND_LINK(&arena->links, &arena_list);
michael@0 238 _nspr_irix_arena_cnt++;
michael@0 239 arena->usarena = irix_arena;
michael@0 240 lock = usnewlock(irix_arena);
michael@0 241 if (lock != NULL) {
michael@0 242 lockp->lock = lock;
michael@0 243 lockp->arena = arena->usarena;
michael@0 244 } else
michael@0 245 rv = PR_FAILURE;
michael@0 246 } else {
michael@0 247 PR_DELETE(arena);
michael@0 248 rv = PR_FAILURE;
michael@0 249 }
michael@0 250
michael@0 251 } else
michael@0 252 rv = PR_FAILURE;
michael@0 253 } else
michael@0 254 rv = PR_FAILURE;
michael@0 255 }
michael@0 256 _PR_UNLOCK(arena_list_lock);
michael@0 257 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 258 _PR_FAST_INTSON(_is);
michael@0 259 if (rv == PR_SUCCESS)
michael@0 260 _MD_ATOMIC_INCREMENT(&_nspr_irix_lock_cnt);
michael@0 261 return rv;
michael@0 262 }
michael@0 263
michael@0 264 static void free_lock(struct _MDLock *lockp)
michael@0 265 {
michael@0 266 PRIntn _is;
michael@0 267 PRThread *me = _MD_GET_ATTACHED_THREAD();
michael@0 268
michael@0 269 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 270 _PR_INTSOFF(_is);
michael@0 271 usfreelock(lockp->lock, lockp->arena);
michael@0 272 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 273 _PR_FAST_INTSON(_is);
michael@0 274 _MD_ATOMIC_DECREMENT(&_nspr_irix_lock_cnt);
michael@0 275 }
michael@0 276
michael@0 277 void _MD_FREE_LOCK(struct _MDLock *lockp)
michael@0 278 {
michael@0 279 PRIntn _is;
michael@0 280 PRThread *me = _MD_GET_ATTACHED_THREAD();
michael@0 281
michael@0 282 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 283 _PR_INTSOFF(_is);
michael@0 284 free_lock(lockp);
michael@0 285 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 286 _PR_FAST_INTSON(_is);
michael@0 287 }
michael@0 288
michael@0 289 /*
michael@0 290 * _MD_get_attached_thread
michael@0 291 * Return the thread pointer of the current thread if it is attached.
michael@0 292 *
michael@0 293 * This function is needed for Irix because the thread-local-storage is
michael@0 294 * implemented by mmapin'g a page with the MAP_LOCAL flag. This causes the
michael@0 295 * sproc-private page to inherit contents of the page of the caller of sproc().
michael@0 296 */
michael@0 297 PRThread *_MD_get_attached_thread(void)
michael@0 298 {
michael@0 299
michael@0 300 if (_MD_GET_SPROC_PID() == get_pid())
michael@0 301 return _MD_THIS_THREAD();
michael@0 302 else
michael@0 303 return 0;
michael@0 304 }
michael@0 305
michael@0 306 /*
michael@0 307 * _MD_get_current_thread
michael@0 308 * Return the thread pointer of the current thread (attaching it if
michael@0 309 * necessary)
michael@0 310 */
michael@0 311 PRThread *_MD_get_current_thread(void)
michael@0 312 {
michael@0 313 PRThread *me;
michael@0 314
michael@0 315 me = _MD_GET_ATTACHED_THREAD();
michael@0 316 if (NULL == me) {
michael@0 317 me = _PRI_AttachThread(
michael@0 318 PR_USER_THREAD, PR_PRIORITY_NORMAL, NULL, 0);
michael@0 319 }
michael@0 320 PR_ASSERT(me != NULL);
michael@0 321 return(me);
michael@0 322 }
michael@0 323
michael@0 324 /*
michael@0 325 * irix_detach_sproc
michael@0 326 * auto-detach a sproc when it exits
michael@0 327 */
michael@0 328 void irix_detach_sproc(void)
michael@0 329 {
michael@0 330 PRThread *me;
michael@0 331
michael@0 332 me = _MD_GET_ATTACHED_THREAD();
michael@0 333 if ((me != NULL) && (me->flags & _PR_ATTACHED)) {
michael@0 334 _PRI_DetachThread();
michael@0 335 }
michael@0 336 }
michael@0 337
michael@0 338
michael@0 339 PRStatus _MD_NEW_LOCK(struct _MDLock *lockp)
michael@0 340 {
michael@0 341 PRStatus rv;
michael@0 342 PRIntn is;
michael@0 343 PRThread *me = _MD_GET_ATTACHED_THREAD();
michael@0 344
michael@0 345 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 346 _PR_INTSOFF(is);
michael@0 347 rv = new_lock(lockp);
michael@0 348 if (me && !_PR_IS_NATIVE_THREAD(me))
michael@0 349 _PR_FAST_INTSON(is);
michael@0 350 return rv;
michael@0 351 }
michael@0 352
michael@0 353 static void
michael@0 354 sigchld_handler(int sig)
michael@0 355 {
michael@0 356 pid_t pid;
michael@0 357 int status;
michael@0 358
michael@0 359 /*
michael@0 360 * If an sproc exited abnormally send a SIGKILL signal to all the
michael@0 361 * sprocs in the process to terminate the application
michael@0 362 */
michael@0 363 while ((pid = waitpid(0, &status, WNOHANG)) > 0) {
michael@0 364 if (WIFSIGNALED(status) && ((WTERMSIG(status) == SIGSEGV) ||
michael@0 365 (WTERMSIG(status) == SIGBUS) ||
michael@0 366 (WTERMSIG(status) == SIGABRT) ||
michael@0 367 (WTERMSIG(status) == SIGILL))) {
michael@0 368
michael@0 369 prctl(PR_SETEXITSIG, SIGKILL);
michael@0 370 _exit(status);
michael@0 371 }
michael@0 372 }
michael@0 373 }
michael@0 374
michael@0 375 static void save_context_and_block(int sig)
michael@0 376 {
michael@0 377 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 378 _PRCPU *cpu = _PR_MD_CURRENT_CPU();
michael@0 379
michael@0 380 /*
michael@0 381 * save context
michael@0 382 */
michael@0 383 (void) setjmp(me->md.jb);
michael@0 384 /*
michael@0 385 * unblock the suspending thread
michael@0 386 */
michael@0 387 if (me->cpu) {
michael@0 388 /*
michael@0 389 * I am a cpu thread, not a user-created GLOBAL thread
michael@0 390 */
michael@0 391 unblockproc(cpu->md.suspending_id);
michael@0 392 } else {
michael@0 393 unblockproc(me->md.suspending_id);
michael@0 394 }
michael@0 395 /*
michael@0 396 * now, block current thread
michael@0 397 */
michael@0 398 blockproc(getpid());
michael@0 399 }
michael@0 400
michael@0 401 /*
michael@0 402 ** The irix kernel has a bug in it which causes async connect's which are
michael@0 403 ** interrupted by a signal to fail terribly (EADDRINUSE is returned).
michael@0 404 ** We work around the bug by blocking signals during the async connect
michael@0 405 ** attempt.
michael@0 406 */
michael@0 407 PRInt32 _MD_irix_connect(
michael@0 408 PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, PRIntervalTime timeout)
michael@0 409 {
michael@0 410 PRInt32 rv;
michael@0 411 sigset_t oldset;
michael@0 412
michael@0 413 sigprocmask(SIG_BLOCK, &ints_off, &oldset);
michael@0 414 rv = connect(osfd, addr, addrlen);
michael@0 415 sigprocmask(SIG_SETMASK, &oldset, 0);
michael@0 416
michael@0 417 return(rv);
michael@0 418 }
michael@0 419
michael@0 420 #include "prprf.h"
michael@0 421
michael@0 422 /********************************************************************/
michael@0 423 /********************************************************************/
michael@0 424 /*************** Various thread like things for IRIX ****************/
michael@0 425 /********************************************************************/
michael@0 426 /********************************************************************/
michael@0 427
michael@0 428 void *_MD_GetSP(PRThread *t)
michael@0 429 {
michael@0 430 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 431 void *sp;
michael@0 432
michael@0 433 if (me == t)
michael@0 434 (void) setjmp(t->md.jb);
michael@0 435
michael@0 436 sp = (void *)(t->md.jb[JB_SP]);
michael@0 437 PR_ASSERT((sp >= (void *) t->stack->stackBottom) &&
michael@0 438 (sp <= (void *) (t->stack->stackBottom + t->stack->stackSize)));
michael@0 439 return(sp);
michael@0 440 }
michael@0 441
michael@0 442 void _MD_InitLocks()
michael@0 443 {
michael@0 444 char buf[200];
michael@0 445 char *init_users, *init_size;
michael@0 446
michael@0 447 PR_snprintf(buf, sizeof(buf), "/dev/zero");
michael@0 448
michael@0 449 if (init_users = getenv("_NSPR_IRIX_INITUSERS"))
michael@0 450 _irix_initusers = atoi(init_users);
michael@0 451
michael@0 452 if (init_size = getenv("_NSPR_IRIX_INITSIZE"))
michael@0 453 _irix_initsize = atoi(init_size);
michael@0 454
michael@0 455 usconfig(CONF_INITUSERS, _irix_initusers);
michael@0 456 usconfig(CONF_INITSIZE, _irix_initsize);
michael@0 457 usconfig(CONF_AUTOGROW, 1);
michael@0 458 usconfig(CONF_AUTORESV, 1);
michael@0 459 if (usconfig(CONF_ARENATYPE, US_SHAREDONLY) < 0) {
michael@0 460 perror("PR_Init: unable to config mutex arena");
michael@0 461 exit(-1);
michael@0 462 }
michael@0 463
michael@0 464 _pr_usArena = usinit(buf);
michael@0 465 if (!_pr_usArena) {
michael@0 466 fprintf(stderr,
michael@0 467 "PR_Init: Error - unable to create lock/monitor arena\n");
michael@0 468 exit(-1);
michael@0 469 }
michael@0 470 _pr_heapLock = usnewlock(_pr_usArena);
michael@0 471 _nspr_irix_lock_cnt++;
michael@0 472
michael@0 473 arena_list_lock = usnewlock(_pr_usArena);
michael@0 474 _nspr_irix_lock_cnt++;
michael@0 475
michael@0 476 sproc_list_lock = usnewlock(_pr_usArena);
michael@0 477 _nspr_irix_lock_cnt++;
michael@0 478
michael@0 479 _pr_irix_exit_sem = usnewsema(_pr_usArena, 0);
michael@0 480 _nspr_irix_sem_cnt = 1;
michael@0 481
michael@0 482 first_arena.usarena = _pr_usArena;
michael@0 483 PR_INIT_CLIST(&first_arena.links);
michael@0 484 PR_APPEND_LINK(&first_arena.links, &arena_list);
michael@0 485 }
michael@0 486
michael@0 487 /* _PR_IRIX_CHILD_PROCESS is a private API for Server group */
michael@0 488 void _PR_IRIX_CHILD_PROCESS()
michael@0 489 {
michael@0 490 extern PRUint32 _pr_global_threads;
michael@0 491
michael@0 492 PR_ASSERT(_PR_MD_CURRENT_CPU() == _pr_primordialCPU);
michael@0 493 PR_ASSERT(_pr_numCPU == 1);
michael@0 494 PR_ASSERT(_pr_global_threads == 0);
michael@0 495 /*
michael@0 496 * save the new pid
michael@0 497 */
michael@0 498 _pr_primordialCPU->md.id = getpid();
michael@0 499 _MD_SET_SPROC_PID(getpid());
michael@0 500 }
michael@0 501
michael@0 502 static PRStatus pr_cvar_wait_sem(PRThread *thread, PRIntervalTime timeout)
michael@0 503 {
michael@0 504 int rv;
michael@0 505
michael@0 506 #ifdef _PR_USE_POLL
michael@0 507 struct pollfd pfd;
michael@0 508 int msecs;
michael@0 509
michael@0 510 if (timeout == PR_INTERVAL_NO_TIMEOUT)
michael@0 511 msecs = -1;
michael@0 512 else
michael@0 513 msecs = PR_IntervalToMilliseconds(timeout);
michael@0 514 #else
michael@0 515 struct timeval tv, *tvp;
michael@0 516 fd_set rd;
michael@0 517
michael@0 518 if(timeout == PR_INTERVAL_NO_TIMEOUT)
michael@0 519 tvp = NULL;
michael@0 520 else {
michael@0 521 tv.tv_sec = PR_IntervalToSeconds(timeout);
michael@0 522 tv.tv_usec = PR_IntervalToMicroseconds(
michael@0 523 timeout - PR_SecondsToInterval(tv.tv_sec));
michael@0 524 tvp = &tv;
michael@0 525 }
michael@0 526 FD_ZERO(&rd);
michael@0 527 FD_SET(thread->md.cvar_pollsemfd, &rd);
michael@0 528 #endif
michael@0 529
michael@0 530 /*
michael@0 531 * call uspsema only if a previous select call on this semaphore
michael@0 532 * did not timeout
michael@0 533 */
michael@0 534 if (!thread->md.cvar_pollsem_select) {
michael@0 535 rv = _PR_WAIT_SEM(thread->md.cvar_pollsem);
michael@0 536 PR_ASSERT(rv >= 0);
michael@0 537 } else
michael@0 538 rv = 0;
michael@0 539 again:
michael@0 540 if(!rv) {
michael@0 541 #ifdef _PR_USE_POLL
michael@0 542 pfd.events = POLLIN;
michael@0 543 pfd.fd = thread->md.cvar_pollsemfd;
michael@0 544 rv = _MD_POLL(&pfd, 1, msecs);
michael@0 545 #else
michael@0 546 rv = _MD_SELECT(thread->md.cvar_pollsemfd + 1, &rd, NULL,NULL,tvp);
michael@0 547 #endif
michael@0 548 if ((rv == -1) && (errno == EINTR)) {
michael@0 549 rv = 0;
michael@0 550 goto again;
michael@0 551 }
michael@0 552 PR_ASSERT(rv >= 0);
michael@0 553 }
michael@0 554
michael@0 555 if (rv > 0) {
michael@0 556 /*
michael@0 557 * acquired the semaphore, call uspsema next time
michael@0 558 */
michael@0 559 thread->md.cvar_pollsem_select = 0;
michael@0 560 return PR_SUCCESS;
michael@0 561 } else {
michael@0 562 /*
michael@0 563 * select timed out; must call select, not uspsema, when trying
michael@0 564 * to acquire the semaphore the next time
michael@0 565 */
michael@0 566 thread->md.cvar_pollsem_select = 1;
michael@0 567 return PR_FAILURE;
michael@0 568 }
michael@0 569 }
michael@0 570
michael@0 571 PRStatus _MD_wait(PRThread *thread, PRIntervalTime ticks)
michael@0 572 {
michael@0 573 if ( thread->flags & _PR_GLOBAL_SCOPE ) {
michael@0 574 _MD_CHECK_FOR_EXIT();
michael@0 575 if (pr_cvar_wait_sem(thread, ticks) == PR_FAILURE) {
michael@0 576 _MD_CHECK_FOR_EXIT();
michael@0 577 /*
michael@0 578 * wait timed out
michael@0 579 */
michael@0 580 _PR_THREAD_LOCK(thread);
michael@0 581 if (thread->wait.cvar) {
michael@0 582 /*
michael@0 583 * The thread will remove itself from the waitQ
michael@0 584 * of the cvar in _PR_WaitCondVar
michael@0 585 */
michael@0 586 thread->wait.cvar = NULL;
michael@0 587 thread->state = _PR_RUNNING;
michael@0 588 _PR_THREAD_UNLOCK(thread);
michael@0 589 } else {
michael@0 590 _PR_THREAD_UNLOCK(thread);
michael@0 591 /*
michael@0 592 * This thread was woken up by a notifying thread
michael@0 593 * at the same time as a timeout; so, consume the
michael@0 594 * extra post operation on the semaphore
michael@0 595 */
michael@0 596 _MD_CHECK_FOR_EXIT();
michael@0 597 pr_cvar_wait_sem(thread, PR_INTERVAL_NO_TIMEOUT);
michael@0 598 }
michael@0 599 _MD_CHECK_FOR_EXIT();
michael@0 600 }
michael@0 601 } else {
michael@0 602 _PR_MD_SWITCH_CONTEXT(thread);
michael@0 603 }
michael@0 604 return PR_SUCCESS;
michael@0 605 }
michael@0 606
michael@0 607 PRStatus _MD_WakeupWaiter(PRThread *thread)
michael@0 608 {
michael@0 609 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 610 PRIntn is;
michael@0 611
michael@0 612 PR_ASSERT(_pr_md_idle_cpus >= 0);
michael@0 613 if (thread == NULL) {
michael@0 614 if (_pr_md_idle_cpus)
michael@0 615 _MD_Wakeup_CPUs();
michael@0 616 } else if (!_PR_IS_NATIVE_THREAD(thread)) {
michael@0 617 if (_pr_md_idle_cpus)
michael@0 618 _MD_Wakeup_CPUs();
michael@0 619 } else {
michael@0 620 PR_ASSERT(_PR_IS_NATIVE_THREAD(thread));
michael@0 621 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 622 _PR_INTSOFF(is);
michael@0 623 _MD_CVAR_POST_SEM(thread);
michael@0 624 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 625 _PR_FAST_INTSON(is);
michael@0 626 }
michael@0 627 return PR_SUCCESS;
michael@0 628 }
michael@0 629
michael@0 630 void create_sproc (void (*entry) (void *, size_t), unsigned inh,
michael@0 631 void *arg, caddr_t sp, size_t len, int *pid)
michael@0 632 {
michael@0 633 sproc_params sparams;
michael@0 634 char data;
michael@0 635 int rv;
michael@0 636 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 637
michael@0 638 if (!_PR_IS_NATIVE_THREAD(me) && (_PR_MD_CURRENT_CPU()->id == 0)) {
michael@0 639 *pid = sprocsp(entry, /* startup func */
michael@0 640 inh, /* attribute flags */
michael@0 641 arg, /* thread param */
michael@0 642 sp, /* stack address */
michael@0 643 len); /* stack size */
michael@0 644 } else {
michael@0 645 sparams.sd.entry = entry;
michael@0 646 sparams.sd.inh = inh;
michael@0 647 sparams.sd.arg = arg;
michael@0 648 sparams.sd.sp = sp;
michael@0 649 sparams.sd.len = len;
michael@0 650 sparams.sd.pid = pid;
michael@0 651 sparams.sd.creator_pid = getpid();
michael@0 652 _PR_LOCK(sproc_list_lock);
michael@0 653 PR_APPEND_LINK(&sparams.links, &sproc_list);
michael@0 654 rv = write(_pr_irix_primoridal_cpu_fd[1], &data, 1);
michael@0 655 PR_ASSERT(rv == 1);
michael@0 656 _PR_UNLOCK(sproc_list_lock);
michael@0 657 blockproc(getpid());
michael@0 658 }
michael@0 659 }
michael@0 660
michael@0 661 /*
michael@0 662 * _PR_MD_WAKEUP_PRIMORDIAL_CPU
michael@0 663 *
michael@0 664 * wakeup cpu 0
michael@0 665 */
michael@0 666
michael@0 667 void _PR_MD_WAKEUP_PRIMORDIAL_CPU()
michael@0 668 {
michael@0 669 char data = '0';
michael@0 670 int rv;
michael@0 671
michael@0 672 rv = write(_pr_irix_primoridal_cpu_fd[1], &data, 1);
michael@0 673 PR_ASSERT(rv == 1);
michael@0 674 }
michael@0 675
michael@0 676 /*
michael@0 677 * _PR_MD_primordial_cpu
michael@0 678 *
michael@0 679 * process events that need to executed by the primordial cpu on each
michael@0 680 * iteration through the idle loop
michael@0 681 */
michael@0 682
michael@0 683 void _PR_MD_primordial_cpu()
michael@0 684 {
michael@0 685 PRCList *qp;
michael@0 686 sproc_params *sp;
michael@0 687 int pid;
michael@0 688
michael@0 689 _PR_LOCK(sproc_list_lock);
michael@0 690 while ((qp = sproc_list.next) != &sproc_list) {
michael@0 691 sp = SPROC_PARAMS_PTR(qp);
michael@0 692 PR_REMOVE_LINK(&sp->links);
michael@0 693 pid = sp->sd.creator_pid;
michael@0 694 (*(sp->sd.pid)) = sprocsp(sp->sd.entry, /* startup func */
michael@0 695 sp->sd.inh, /* attribute flags */
michael@0 696 sp->sd.arg, /* thread param */
michael@0 697 sp->sd.sp, /* stack address */
michael@0 698 sp->sd.len); /* stack size */
michael@0 699 unblockproc(pid);
michael@0 700 }
michael@0 701 _PR_UNLOCK(sproc_list_lock);
michael@0 702 }
michael@0 703
michael@0 704 PRStatus _MD_CreateThread(PRThread *thread,
michael@0 705 void (*start)(void *),
michael@0 706 PRThreadPriority priority,
michael@0 707 PRThreadScope scope,
michael@0 708 PRThreadState state,
michael@0 709 PRUint32 stackSize)
michael@0 710 {
michael@0 711 typedef void (*SprocEntry) (void *, size_t);
michael@0 712 SprocEntry spentry = (SprocEntry)start;
michael@0 713 PRIntn is;
michael@0 714 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 715 PRInt32 pid;
michael@0 716 PRStatus rv;
michael@0 717
michael@0 718 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 719 _PR_INTSOFF(is);
michael@0 720 thread->md.cvar_pollsem_select = 0;
michael@0 721 thread->flags |= _PR_GLOBAL_SCOPE;
michael@0 722
michael@0 723 thread->md.cvar_pollsemfd = -1;
michael@0 724 if (new_poll_sem(&thread->md,0) == PR_FAILURE) {
michael@0 725 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 726 _PR_FAST_INTSON(is);
michael@0 727 return PR_FAILURE;
michael@0 728 }
michael@0 729 thread->md.cvar_pollsemfd =
michael@0 730 _PR_OPEN_POLL_SEM(thread->md.cvar_pollsem);
michael@0 731 if ((thread->md.cvar_pollsemfd < 0)) {
michael@0 732 free_poll_sem(&thread->md);
michael@0 733 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 734 _PR_FAST_INTSON(is);
michael@0 735 return PR_FAILURE;
michael@0 736 }
michael@0 737
michael@0 738 create_sproc(spentry, /* startup func */
michael@0 739 PR_SALL, /* attribute flags */
michael@0 740 (void *)thread, /* thread param */
michael@0 741 NULL, /* stack address */
michael@0 742 stackSize, &pid); /* stack size */
michael@0 743 if (pid > 0) {
michael@0 744 _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs_created);
michael@0 745 _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs);
michael@0 746 rv = PR_SUCCESS;
michael@0 747 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 748 _PR_FAST_INTSON(is);
michael@0 749 return rv;
michael@0 750 } else {
michael@0 751 close(thread->md.cvar_pollsemfd);
michael@0 752 thread->md.cvar_pollsemfd = -1;
michael@0 753 free_poll_sem(&thread->md);
michael@0 754 thread->md.cvar_pollsem = NULL;
michael@0 755 _MD_ATOMIC_INCREMENT(&_pr_md_irix_sprocs_failed);
michael@0 756 if (!_PR_IS_NATIVE_THREAD(me))
michael@0 757 _PR_FAST_INTSON(is);
michael@0 758 return PR_FAILURE;
michael@0 759 }
michael@0 760 }
michael@0 761
michael@0 762 void _MD_CleanThread(PRThread *thread)
michael@0 763 {
michael@0 764 if (thread->flags & _PR_GLOBAL_SCOPE) {
michael@0 765 close(thread->md.cvar_pollsemfd);
michael@0 766 thread->md.cvar_pollsemfd = -1;
michael@0 767 free_poll_sem(&thread->md);
michael@0 768 thread->md.cvar_pollsem = NULL;
michael@0 769 }
michael@0 770 }
michael@0 771
michael@0 772 void _MD_SetPriority(_MDThread *thread, PRThreadPriority newPri)
michael@0 773 {
michael@0 774 return;
michael@0 775 }
michael@0 776
michael@0 777 extern void _MD_unix_terminate_waitpid_daemon(void);
michael@0 778
michael@0 779 void
michael@0 780 _MD_CleanupBeforeExit(void)
michael@0 781 {
michael@0 782 extern PRInt32 _pr_cpus_exit;
michael@0 783
michael@0 784 _MD_unix_terminate_waitpid_daemon();
michael@0 785
michael@0 786 _pr_irix_exit_now = 1;
michael@0 787 if (_pr_numCPU > 1) {
michael@0 788 /*
michael@0 789 * Set a global flag, and wakeup all cpus which will notice the flag
michael@0 790 * and exit.
michael@0 791 */
michael@0 792 _pr_cpus_exit = getpid();
michael@0 793 _MD_Wakeup_CPUs();
michael@0 794 while(_pr_numCPU > 1) {
michael@0 795 _PR_WAIT_SEM(_pr_irix_exit_sem);
michael@0 796 _pr_numCPU--;
michael@0 797 }
michael@0 798 }
michael@0 799 /*
michael@0 800 * cause global threads on the recycle list to exit
michael@0 801 */
michael@0 802 _PR_DEADQ_LOCK;
michael@0 803 if (_PR_NUM_DEADNATIVE != 0) {
michael@0 804 PRThread *thread;
michael@0 805 PRCList *ptr;
michael@0 806
michael@0 807 ptr = _PR_DEADNATIVEQ.next;
michael@0 808 while( ptr != &_PR_DEADNATIVEQ ) {
michael@0 809 thread = _PR_THREAD_PTR(ptr);
michael@0 810 _MD_CVAR_POST_SEM(thread);
michael@0 811 ptr = ptr->next;
michael@0 812 }
michael@0 813 }
michael@0 814 _PR_DEADQ_UNLOCK;
michael@0 815 while(_PR_NUM_DEADNATIVE > 1) {
michael@0 816 _PR_WAIT_SEM(_pr_irix_exit_sem);
michael@0 817 _PR_DEC_DEADNATIVE;
michael@0 818 }
michael@0 819 }
michael@0 820
michael@0 821 #ifdef _PR_HAVE_SGI_PRDA_PROCMASK
michael@0 822 extern void __sgi_prda_procmask(int);
michael@0 823 #endif
michael@0 824
michael@0 825 PRStatus
michael@0 826 _MD_InitAttachedThread(PRThread *thread, PRBool wakeup_parent)
michael@0 827 {
michael@0 828 PRStatus rv = PR_SUCCESS;
michael@0 829
michael@0 830 if (thread->flags & _PR_GLOBAL_SCOPE) {
michael@0 831 if (new_poll_sem(&thread->md,0) == PR_FAILURE) {
michael@0 832 return PR_FAILURE;
michael@0 833 }
michael@0 834 thread->md.cvar_pollsemfd =
michael@0 835 _PR_OPEN_POLL_SEM(thread->md.cvar_pollsem);
michael@0 836 if ((thread->md.cvar_pollsemfd < 0)) {
michael@0 837 free_poll_sem(&thread->md);
michael@0 838 return PR_FAILURE;
michael@0 839 }
michael@0 840 if (_MD_InitThread(thread, PR_FALSE) == PR_FAILURE) {
michael@0 841 close(thread->md.cvar_pollsemfd);
michael@0 842 thread->md.cvar_pollsemfd = -1;
michael@0 843 free_poll_sem(&thread->md);
michael@0 844 thread->md.cvar_pollsem = NULL;
michael@0 845 return PR_FAILURE;
michael@0 846 }
michael@0 847 }
michael@0 848 return rv;
michael@0 849 }
michael@0 850
michael@0 851 PRStatus
michael@0 852 _MD_InitThread(PRThread *thread, PRBool wakeup_parent)
michael@0 853 {
michael@0 854 struct sigaction sigact;
michael@0 855 PRStatus rv = PR_SUCCESS;
michael@0 856
michael@0 857 if (thread->flags & _PR_GLOBAL_SCOPE) {
michael@0 858 thread->md.id = getpid();
michael@0 859 setblockproccnt(thread->md.id, 0);
michael@0 860 _MD_SET_SPROC_PID(getpid());
michael@0 861 #ifdef _PR_HAVE_SGI_PRDA_PROCMASK
michael@0 862 /*
michael@0 863 * enable user-level processing of sigprocmask(); this is an
michael@0 864 * undocumented feature available in Irix 6.2, 6.3, 6.4 and 6.5
michael@0 865 */
michael@0 866 __sgi_prda_procmask(USER_LEVEL);
michael@0 867 #endif
michael@0 868 /*
michael@0 869 * set up SIGUSR1 handler; this is used to save state
michael@0 870 */
michael@0 871 sigact.sa_handler = save_context_and_block;
michael@0 872 sigact.sa_flags = SA_RESTART;
michael@0 873 /*
michael@0 874 * Must mask clock interrupts
michael@0 875 */
michael@0 876 sigact.sa_mask = timer_set;
michael@0 877 sigaction(SIGUSR1, &sigact, 0);
michael@0 878
michael@0 879
michael@0 880 /*
michael@0 881 * PR_SETABORTSIG is a new command implemented in a patch to
michael@0 882 * Irix 6.2, 6.3 and 6.4. This causes a signal to be sent to all
michael@0 883 * sprocs in the process when one of them terminates abnormally
michael@0 884 *
michael@0 885 */
michael@0 886 if (prctl(PR_SETABORTSIG, SIGKILL) < 0) {
michael@0 887 /*
michael@0 888 * if (errno == EINVAL)
michael@0 889 *
michael@0 890 * PR_SETABORTSIG not supported under this OS.
michael@0 891 * You may want to get a recent kernel rollup patch that
michael@0 892 * supports this feature.
michael@0 893 */
michael@0 894 }
michael@0 895 /*
michael@0 896 * SIGCLD handler for detecting abormally-terminating
michael@0 897 * sprocs and for reaping sprocs
michael@0 898 */
michael@0 899 sigact.sa_handler = sigchld_handler;
michael@0 900 sigact.sa_flags = SA_RESTART;
michael@0 901 sigact.sa_mask = ints_off;
michael@0 902 sigaction(SIGCLD, &sigact, NULL);
michael@0 903 }
michael@0 904 return rv;
michael@0 905 }
michael@0 906
michael@0 907 /*
michael@0 908 * PR_Cleanup should be executed on the primordial sproc; migrate the thread
michael@0 909 * to the primordial cpu
michael@0 910 */
michael@0 911
michael@0 912 void _PR_MD_PRE_CLEANUP(PRThread *me)
michael@0 913 {
michael@0 914 PRIntn is;
michael@0 915 _PRCPU *cpu = _pr_primordialCPU;
michael@0 916
michael@0 917 PR_ASSERT(cpu);
michael@0 918
michael@0 919 me->flags |= _PR_BOUND_THREAD;
michael@0 920
michael@0 921 if (me->cpu->id != 0) {
michael@0 922 _PR_INTSOFF(is);
michael@0 923 _PR_RUNQ_LOCK(cpu);
michael@0 924 me->cpu = cpu;
michael@0 925 me->state = _PR_RUNNABLE;
michael@0 926 _PR_ADD_RUNQ(me, cpu, me->priority);
michael@0 927 _PR_RUNQ_UNLOCK(cpu);
michael@0 928 _MD_Wakeup_CPUs();
michael@0 929
michael@0 930 _PR_MD_SWITCH_CONTEXT(me);
michael@0 931
michael@0 932 _PR_FAST_INTSON(is);
michael@0 933 PR_ASSERT(me->cpu->id == 0);
michael@0 934 }
michael@0 935 }
michael@0 936
michael@0 937 /*
michael@0 938 * process exiting
michael@0 939 */
michael@0 940 PR_EXTERN(void ) _MD_exit(PRIntn status)
michael@0 941 {
michael@0 942 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 943
michael@0 944 /*
michael@0 945 * the exit code of the process is the exit code of the primordial
michael@0 946 * sproc
michael@0 947 */
michael@0 948 if (!_PR_IS_NATIVE_THREAD(me) && (_PR_MD_CURRENT_CPU()->id == 0)) {
michael@0 949 /*
michael@0 950 * primordial sproc case: call _exit directly
michael@0 951 * Cause SIGKILL to be sent to other sprocs
michael@0 952 */
michael@0 953 prctl(PR_SETEXITSIG, SIGKILL);
michael@0 954 _exit(status);
michael@0 955 } else {
michael@0 956 int rv;
michael@0 957 char data;
michael@0 958 sigset_t set;
michael@0 959
michael@0 960 /*
michael@0 961 * non-primordial sproc case: cause the primordial sproc, cpu 0,
michael@0 962 * to wakeup and call _exit
michael@0 963 */
michael@0 964 _pr_irix_process_exit = 1;
michael@0 965 _pr_irix_process_exit_code = status;
michael@0 966 rv = write(_pr_irix_primoridal_cpu_fd[1], &data, 1);
michael@0 967 PR_ASSERT(rv == 1);
michael@0 968 /*
michael@0 969 * block all signals and wait for SIGKILL to terminate this sproc
michael@0 970 */
michael@0 971 sigfillset(&set);
michael@0 972 sigsuspend(&set);
michael@0 973 /*
michael@0 974 * this code doesn't (shouldn't) execute
michael@0 975 */
michael@0 976 prctl(PR_SETEXITSIG, SIGKILL);
michael@0 977 _exit(status);
michael@0 978 }
michael@0 979 }
michael@0 980
michael@0 981 /*
michael@0 982 * Override the exit() function in libc to cause the process to exit
michael@0 983 * when the primodial/main nspr thread calls exit. Calls to exit by any
michael@0 984 * other thread simply result in a call to the exit function in libc.
michael@0 985 * The exit code of the process is the exit code of the primordial
michael@0 986 * sproc.
michael@0 987 */
michael@0 988
michael@0 989 void exit(int status)
michael@0 990 {
michael@0 991 PRThread *me, *thr;
michael@0 992 PRCList *qp;
michael@0 993
michael@0 994 if (!_pr_initialized) {
michael@0 995 if (!libc_exit) {
michael@0 996
michael@0 997 if (!libc_handle)
michael@0 998 libc_handle = dlopen("libc.so",RTLD_NOW);
michael@0 999 if (libc_handle)
michael@0 1000 libc_exit = (void (*)(int)) dlsym(libc_handle, "exit");
michael@0 1001 }
michael@0 1002 if (libc_exit)
michael@0 1003 (*libc_exit)(status);
michael@0 1004 else
michael@0 1005 _exit(status);
michael@0 1006 }
michael@0 1007
michael@0 1008 me = _PR_MD_CURRENT_THREAD();
michael@0 1009
michael@0 1010 if (me == NULL) /* detached thread */
michael@0 1011 (*libc_exit)(status);
michael@0 1012
michael@0 1013 PR_ASSERT(_PR_IS_NATIVE_THREAD(me) ||
michael@0 1014 (_PR_MD_CURRENT_CPU())->id == me->cpu->id);
michael@0 1015
michael@0 1016 if (me->flags & _PR_PRIMORDIAL) {
michael@0 1017
michael@0 1018 me->flags |= _PR_BOUND_THREAD;
michael@0 1019
michael@0 1020 PR_ASSERT((_PR_MD_CURRENT_CPU())->id == me->cpu->id);
michael@0 1021 if (me->cpu->id != 0) {
michael@0 1022 _PRCPU *cpu = _pr_primordialCPU;
michael@0 1023 PRIntn is;
michael@0 1024
michael@0 1025 _PR_INTSOFF(is);
michael@0 1026 _PR_RUNQ_LOCK(cpu);
michael@0 1027 me->cpu = cpu;
michael@0 1028 me->state = _PR_RUNNABLE;
michael@0 1029 _PR_ADD_RUNQ(me, cpu, me->priority);
michael@0 1030 _PR_RUNQ_UNLOCK(cpu);
michael@0 1031 _MD_Wakeup_CPUs();
michael@0 1032
michael@0 1033 _PR_MD_SWITCH_CONTEXT(me);
michael@0 1034
michael@0 1035 _PR_FAST_INTSON(is);
michael@0 1036 }
michael@0 1037
michael@0 1038 PR_ASSERT((_PR_MD_CURRENT_CPU())->id == 0);
michael@0 1039
michael@0 1040 if (prctl(PR_GETNSHARE) > 1) {
michael@0 1041 #define SPROC_EXIT_WAIT_TIME 5
michael@0 1042 int sleep_cnt = SPROC_EXIT_WAIT_TIME;
michael@0 1043
michael@0 1044 /*
michael@0 1045 * sprocs still running; caue cpus and recycled global threads
michael@0 1046 * to exit
michael@0 1047 */
michael@0 1048 _pr_irix_exit_now = 1;
michael@0 1049 if (_pr_numCPU > 1) {
michael@0 1050 _MD_Wakeup_CPUs();
michael@0 1051 }
michael@0 1052 _PR_DEADQ_LOCK;
michael@0 1053 if (_PR_NUM_DEADNATIVE != 0) {
michael@0 1054 PRThread *thread;
michael@0 1055 PRCList *ptr;
michael@0 1056
michael@0 1057 ptr = _PR_DEADNATIVEQ.next;
michael@0 1058 while( ptr != &_PR_DEADNATIVEQ ) {
michael@0 1059 thread = _PR_THREAD_PTR(ptr);
michael@0 1060 _MD_CVAR_POST_SEM(thread);
michael@0 1061 ptr = ptr->next;
michael@0 1062 }
michael@0 1063 }
michael@0 1064
michael@0 1065 while (sleep_cnt-- > 0) {
michael@0 1066 if (waitpid(0, NULL, WNOHANG) >= 0)
michael@0 1067 sleep(1);
michael@0 1068 else
michael@0 1069 break;
michael@0 1070 }
michael@0 1071 prctl(PR_SETEXITSIG, SIGKILL);
michael@0 1072 }
michael@0 1073 (*libc_exit)(status);
michael@0 1074 } else {
michael@0 1075 /*
michael@0 1076 * non-primordial thread; simply call exit in libc.
michael@0 1077 */
michael@0 1078 (*libc_exit)(status);
michael@0 1079 }
michael@0 1080 }
michael@0 1081
michael@0 1082
michael@0 1083 void
michael@0 1084 _MD_InitRunningCPU(_PRCPU *cpu)
michael@0 1085 {
michael@0 1086 extern int _pr_md_pipefd[2];
michael@0 1087
michael@0 1088 _MD_unix_init_running_cpu(cpu);
michael@0 1089 cpu->md.id = getpid();
michael@0 1090 _MD_SET_SPROC_PID(getpid());
michael@0 1091 if (_pr_md_pipefd[0] >= 0) {
michael@0 1092 _PR_IOQ_MAX_OSFD(cpu) = _pr_md_pipefd[0];
michael@0 1093 #ifndef _PR_USE_POLL
michael@0 1094 FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(cpu));
michael@0 1095 #endif
michael@0 1096 }
michael@0 1097 }
michael@0 1098
michael@0 1099 void
michael@0 1100 _MD_ExitThread(PRThread *thread)
michael@0 1101 {
michael@0 1102 if (thread->flags & _PR_GLOBAL_SCOPE) {
michael@0 1103 _MD_ATOMIC_DECREMENT(&_pr_md_irix_sprocs);
michael@0 1104 _MD_CLEAN_THREAD(thread);
michael@0 1105 _MD_SET_CURRENT_THREAD(NULL);
michael@0 1106 }
michael@0 1107 }
michael@0 1108
michael@0 1109 void
michael@0 1110 _MD_SuspendCPU(_PRCPU *cpu)
michael@0 1111 {
michael@0 1112 PRInt32 rv;
michael@0 1113
michael@0 1114 cpu->md.suspending_id = getpid();
michael@0 1115 rv = kill(cpu->md.id, SIGUSR1);
michael@0 1116 PR_ASSERT(rv == 0);
michael@0 1117 /*
michael@0 1118 * now, block the current thread/cpu until woken up by the suspended
michael@0 1119 * thread from it's SIGUSR1 signal handler
michael@0 1120 */
michael@0 1121 blockproc(getpid());
michael@0 1122
michael@0 1123 }
michael@0 1124
michael@0 1125 void
michael@0 1126 _MD_ResumeCPU(_PRCPU *cpu)
michael@0 1127 {
michael@0 1128 unblockproc(cpu->md.id);
michael@0 1129 }
michael@0 1130
michael@0 1131 #if 0
michael@0 1132 /*
michael@0 1133 * save the register context of a suspended sproc
michael@0 1134 */
michael@0 1135 void get_context(PRThread *thr)
michael@0 1136 {
michael@0 1137 int len, fd;
michael@0 1138 char pidstr[24];
michael@0 1139 char path[24];
michael@0 1140
michael@0 1141 /*
michael@0 1142 * open the file corresponding to this process in procfs
michael@0 1143 */
michael@0 1144 sprintf(path,"/proc/%s","00000");
michael@0 1145 len = strlen(path);
michael@0 1146 sprintf(pidstr,"%d",thr->md.id);
michael@0 1147 len -= strlen(pidstr);
michael@0 1148 sprintf(path + len,"%s",pidstr);
michael@0 1149 fd = open(path,O_RDONLY);
michael@0 1150 if (fd >= 0) {
michael@0 1151 (void) ioctl(fd, PIOCGREG, thr->md.gregs);
michael@0 1152 close(fd);
michael@0 1153 }
michael@0 1154 return;
michael@0 1155 }
michael@0 1156 #endif /* 0 */
michael@0 1157
michael@0 1158 void
michael@0 1159 _MD_SuspendThread(PRThread *thread)
michael@0 1160 {
michael@0 1161 PRInt32 rv;
michael@0 1162
michael@0 1163 PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
michael@0 1164 _PR_IS_GCABLE_THREAD(thread));
michael@0 1165
michael@0 1166 thread->md.suspending_id = getpid();
michael@0 1167 rv = kill(thread->md.id, SIGUSR1);
michael@0 1168 PR_ASSERT(rv == 0);
michael@0 1169 /*
michael@0 1170 * now, block the current thread/cpu until woken up by the suspended
michael@0 1171 * thread from it's SIGUSR1 signal handler
michael@0 1172 */
michael@0 1173 blockproc(getpid());
michael@0 1174 }
michael@0 1175
michael@0 1176 void
michael@0 1177 _MD_ResumeThread(PRThread *thread)
michael@0 1178 {
michael@0 1179 PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
michael@0 1180 _PR_IS_GCABLE_THREAD(thread));
michael@0 1181 (void)unblockproc(thread->md.id);
michael@0 1182 }
michael@0 1183
michael@0 1184 /*
michael@0 1185 * return the set of processors available for scheduling procs in the
michael@0 1186 * "mask" argument
michael@0 1187 */
michael@0 1188 PRInt32 _MD_GetThreadAffinityMask(PRThread *unused, PRUint32 *mask)
michael@0 1189 {
michael@0 1190 PRInt32 nprocs, rv;
michael@0 1191 struct pda_stat *pstat;
michael@0 1192 #define MAX_PROCESSORS 32
michael@0 1193
michael@0 1194 nprocs = sysmp(MP_NPROCS);
michael@0 1195 if (nprocs < 0)
michael@0 1196 return(-1);
michael@0 1197 pstat = (struct pda_stat*)PR_MALLOC(sizeof(struct pda_stat) * nprocs);
michael@0 1198 if (pstat == NULL)
michael@0 1199 return(-1);
michael@0 1200 rv = sysmp(MP_STAT, pstat);
michael@0 1201 if (rv < 0) {
michael@0 1202 PR_DELETE(pstat);
michael@0 1203 return(-1);
michael@0 1204 }
michael@0 1205 /*
michael@0 1206 * look at the first 32 cpus
michael@0 1207 */
michael@0 1208 nprocs = (nprocs > MAX_PROCESSORS) ? MAX_PROCESSORS : nprocs;
michael@0 1209 *mask = 0;
michael@0 1210 while (nprocs) {
michael@0 1211 if ((pstat->p_flags & PDAF_ENABLED) &&
michael@0 1212 !(pstat->p_flags & PDAF_ISOLATED)) {
michael@0 1213 *mask |= (1 << pstat->p_cpuid);
michael@0 1214 }
michael@0 1215 nprocs--;
michael@0 1216 pstat++;
michael@0 1217 }
michael@0 1218 return 0;
michael@0 1219 }
michael@0 1220
michael@0 1221 static char *_thr_state[] = {
michael@0 1222 "UNBORN",
michael@0 1223 "RUNNABLE",
michael@0 1224 "RUNNING",
michael@0 1225 "LOCK_WAIT",
michael@0 1226 "COND_WAIT",
michael@0 1227 "JOIN_WAIT",
michael@0 1228 "IO_WAIT",
michael@0 1229 "SUSPENDED",
michael@0 1230 "DEAD"
michael@0 1231 };
michael@0 1232
michael@0 1233 void _PR_List_Threads()
michael@0 1234 {
michael@0 1235 PRThread *thr;
michael@0 1236 void *handle;
michael@0 1237 struct _PRCPU *cpu;
michael@0 1238 PRCList *qp;
michael@0 1239 int len, fd;
michael@0 1240 char pidstr[24];
michael@0 1241 char path[24];
michael@0 1242 prpsinfo_t pinfo;
michael@0 1243
michael@0 1244
michael@0 1245 printf("\n%s %-s\n"," ","LOCAL Threads");
michael@0 1246 printf("%s %-s\n"," ","----- -------");
michael@0 1247 printf("%s %-14s %-10s %-12s %-3s %-10s %-10s %-12s\n\n"," ",
michael@0 1248 "Thread", "State", "Wait-Handle",
michael@0 1249 "Cpu","Stk-Base","Stk-Sz","SP");
michael@0 1250 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
michael@0 1251 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) {
michael@0 1252 thr = _PR_ACTIVE_THREAD_PTR(qp);
michael@0 1253 printf("%s 0x%-12x %-10s "," ",thr,_thr_state[thr->state]);
michael@0 1254 if (thr->state == _PR_LOCK_WAIT)
michael@0 1255 handle = thr->wait.lock;
michael@0 1256 else if (thr->state == _PR_COND_WAIT)
michael@0 1257 handle = thr->wait.cvar;
michael@0 1258 else
michael@0 1259 handle = NULL;
michael@0 1260 if (handle)
michael@0 1261 printf("0x%-10x ",handle);
michael@0 1262 else
michael@0 1263 printf("%-12s "," ");
michael@0 1264 printf("%-3d ",thr->cpu->id);
michael@0 1265 printf("0x%-8x ",thr->stack->stackBottom);
michael@0 1266 printf("0x%-8x ",thr->stack->stackSize);
michael@0 1267 printf("0x%-10x\n",thr->md.jb[JB_SP]);
michael@0 1268 }
michael@0 1269
michael@0 1270 printf("\n%s %-s\n"," ","GLOBAL Threads");
michael@0 1271 printf("%s %-s\n"," ","------ -------");
michael@0 1272 printf("%s %-14s %-6s %-12s %-12s %-12s %-12s\n\n"," ","Thread",
michael@0 1273 "Pid","State","Wait-Handle",
michael@0 1274 "Stk-Base","Stk-Sz");
michael@0 1275
michael@0 1276 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
michael@0 1277 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
michael@0 1278 thr = _PR_ACTIVE_THREAD_PTR(qp);
michael@0 1279 if (thr->cpu != NULL)
michael@0 1280 continue; /* it is a cpu thread */
michael@0 1281 printf("%s 0x%-12x %-6d "," ",thr,thr->md.id);
michael@0 1282 /*
michael@0 1283 * check if the sproc is still running
michael@0 1284 * first call prctl(PR_GETSHMASK,pid) to check if
michael@0 1285 * the process is part of the share group (the pid
michael@0 1286 * could have been recycled by the OS)
michael@0 1287 */
michael@0 1288 if (prctl(PR_GETSHMASK,thr->md.id) < 0) {
michael@0 1289 printf("%-12s\n","TERMINATED");
michael@0 1290 continue;
michael@0 1291 }
michael@0 1292 /*
michael@0 1293 * Now, check if the sproc terminated and is in zombie
michael@0 1294 * state
michael@0 1295 */
michael@0 1296 sprintf(path,"/proc/pinfo/%s","00000");
michael@0 1297 len = strlen(path);
michael@0 1298 sprintf(pidstr,"%d",thr->md.id);
michael@0 1299 len -= strlen(pidstr);
michael@0 1300 sprintf(path + len,"%s",pidstr);
michael@0 1301 fd = open(path,O_RDONLY);
michael@0 1302 if (fd >= 0) {
michael@0 1303 if (ioctl(fd, PIOCPSINFO, &pinfo) < 0)
michael@0 1304 printf("%-12s ","TERMINATED");
michael@0 1305 else if (pinfo.pr_zomb)
michael@0 1306 printf("%-12s ","TERMINATED");
michael@0 1307 else
michael@0 1308 printf("%-12s ",_thr_state[thr->state]);
michael@0 1309 close(fd);
michael@0 1310 } else {
michael@0 1311 printf("%-12s ","TERMINATED");
michael@0 1312 }
michael@0 1313
michael@0 1314 if (thr->state == _PR_LOCK_WAIT)
michael@0 1315 handle = thr->wait.lock;
michael@0 1316 else if (thr->state == _PR_COND_WAIT)
michael@0 1317 handle = thr->wait.cvar;
michael@0 1318 else
michael@0 1319 handle = NULL;
michael@0 1320 if (handle)
michael@0 1321 printf("%-12x ",handle);
michael@0 1322 else
michael@0 1323 printf("%-12s "," ");
michael@0 1324 printf("0x%-10x ",thr->stack->stackBottom);
michael@0 1325 printf("0x%-10x\n",thr->stack->stackSize);
michael@0 1326 }
michael@0 1327
michael@0 1328 printf("\n%s %-s\n"," ","CPUs");
michael@0 1329 printf("%s %-s\n"," ","----");
michael@0 1330 printf("%s %-14s %-6s %-12s \n\n"," ","Id","Pid","State");
michael@0 1331
michael@0 1332
michael@0 1333 for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) {
michael@0 1334 cpu = _PR_CPU_PTR(qp);
michael@0 1335 printf("%s %-14d %-6d "," ",cpu->id,cpu->md.id);
michael@0 1336 /*
michael@0 1337 * check if the sproc is still running
michael@0 1338 * first call prctl(PR_GETSHMASK,pid) to check if
michael@0 1339 * the process is part of the share group (the pid
michael@0 1340 * could have been recycled by the OS)
michael@0 1341 */
michael@0 1342 if (prctl(PR_GETSHMASK,cpu->md.id) < 0) {
michael@0 1343 printf("%-12s\n","TERMINATED");
michael@0 1344 continue;
michael@0 1345 }
michael@0 1346 /*
michael@0 1347 * Now, check if the sproc terminated and is in zombie
michael@0 1348 * state
michael@0 1349 */
michael@0 1350 sprintf(path,"/proc/pinfo/%s","00000");
michael@0 1351 len = strlen(path);
michael@0 1352 sprintf(pidstr,"%d",cpu->md.id);
michael@0 1353 len -= strlen(pidstr);
michael@0 1354 sprintf(path + len,"%s",pidstr);
michael@0 1355 fd = open(path,O_RDONLY);
michael@0 1356 if (fd >= 0) {
michael@0 1357 if (ioctl(fd, PIOCPSINFO, &pinfo) < 0)
michael@0 1358 printf("%-12s\n","TERMINATED");
michael@0 1359 else if (pinfo.pr_zomb)
michael@0 1360 printf("%-12s\n","TERMINATED");
michael@0 1361 else
michael@0 1362 printf("%-12s\n","RUNNING");
michael@0 1363 close(fd);
michael@0 1364 } else {
michael@0 1365 printf("%-12s\n","TERMINATED");
michael@0 1366 }
michael@0 1367
michael@0 1368 }
michael@0 1369 fflush(stdout);
michael@0 1370 }
michael@0 1371 #endif /* defined(_PR_PTHREADS) */
michael@0 1372
michael@0 1373 PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
michael@0 1374 {
michael@0 1375 #if !defined(_PR_PTHREADS)
michael@0 1376 if (isCurrent) {
michael@0 1377 (void) setjmp(t->md.jb);
michael@0 1378 }
michael@0 1379 *np = sizeof(t->md.jb) / sizeof(PRWord);
michael@0 1380 return (PRWord *) (t->md.jb);
michael@0 1381 #else
michael@0 1382 *np = 0;
michael@0 1383 return NULL;
michael@0 1384 #endif
michael@0 1385 }
michael@0 1386
michael@0 1387 void _MD_EarlyInit(void)
michael@0 1388 {
michael@0 1389 #if !defined(_PR_PTHREADS)
michael@0 1390 char *eval;
michael@0 1391 int fd;
michael@0 1392 extern int __ateachexit(void (*func)(void));
michael@0 1393
michael@0 1394 sigemptyset(&ints_off);
michael@0 1395 sigaddset(&ints_off, SIGALRM);
michael@0 1396 sigaddset(&ints_off, SIGIO);
michael@0 1397 sigaddset(&ints_off, SIGCLD);
michael@0 1398
michael@0 1399 if (eval = getenv("_NSPR_TERMINATE_ON_ERROR"))
michael@0 1400 _nspr_terminate_on_error = (0 == atoi(eval) == 0) ? PR_FALSE : PR_TRUE;
michael@0 1401
michael@0 1402 fd = open("/dev/zero",O_RDWR , 0);
michael@0 1403 if (fd < 0) {
michael@0 1404 perror("open /dev/zero failed");
michael@0 1405 exit(1);
michael@0 1406 }
michael@0 1407 /*
michael@0 1408 * Set up the sproc private data area.
michael@0 1409 * This region exists at the same address, _nspr_sproc_private, for
michael@0 1410 * every sproc, but each sproc gets a private copy of the region.
michael@0 1411 */
michael@0 1412 _nspr_sproc_private = (char*)mmap(0, _pr_pageSize, PROT_READ | PROT_WRITE,
michael@0 1413 MAP_PRIVATE| MAP_LOCAL, fd, 0);
michael@0 1414 if (_nspr_sproc_private == (void*)-1) {
michael@0 1415 perror("mmap /dev/zero failed");
michael@0 1416 exit(1);
michael@0 1417 }
michael@0 1418 _MD_SET_SPROC_PID(getpid());
michael@0 1419 close(fd);
michael@0 1420 __ateachexit(irix_detach_sproc);
michael@0 1421 #endif
michael@0 1422 _MD_IrixIntervalInit();
michael@0 1423 } /* _MD_EarlyInit */
michael@0 1424
michael@0 1425 void _MD_IrixInit(void)
michael@0 1426 {
michael@0 1427 #if !defined(_PR_PTHREADS)
michael@0 1428 struct sigaction sigact;
michael@0 1429 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 1430 int rv;
michael@0 1431
michael@0 1432 #ifdef _PR_HAVE_SGI_PRDA_PROCMASK
michael@0 1433 /*
michael@0 1434 * enable user-level processing of sigprocmask(); this is an undocumented
michael@0 1435 * feature available in Irix 6.2, 6.3, 6.4 and 6.5
michael@0 1436 */
michael@0 1437 __sgi_prda_procmask(USER_LEVEL);
michael@0 1438 #endif
michael@0 1439
michael@0 1440 /*
michael@0 1441 * set up SIGUSR1 handler; this is used to save state
michael@0 1442 * during PR_SuspendAll
michael@0 1443 */
michael@0 1444 sigact.sa_handler = save_context_and_block;
michael@0 1445 sigact.sa_flags = SA_RESTART;
michael@0 1446 sigact.sa_mask = ints_off;
michael@0 1447 sigaction(SIGUSR1, &sigact, 0);
michael@0 1448
michael@0 1449 /*
michael@0 1450 * Change the name of the core file from core to core.pid,
michael@0 1451 * This is inherited by the sprocs created by this process
michael@0 1452 */
michael@0 1453 #ifdef PR_COREPID
michael@0 1454 prctl(PR_COREPID, 0, 1);
michael@0 1455 #endif
michael@0 1456 /*
michael@0 1457 * Irix-specific terminate on error processing
michael@0 1458 */
michael@0 1459 /*
michael@0 1460 * PR_SETABORTSIG is a new command implemented in a patch to
michael@0 1461 * Irix 6.2, 6.3 and 6.4. This causes a signal to be sent to all
michael@0 1462 * sprocs in the process when one of them terminates abnormally
michael@0 1463 *
michael@0 1464 */
michael@0 1465 if (prctl(PR_SETABORTSIG, SIGKILL) < 0) {
michael@0 1466 /*
michael@0 1467 * if (errno == EINVAL)
michael@0 1468 *
michael@0 1469 * PR_SETABORTSIG not supported under this OS.
michael@0 1470 * You may want to get a recent kernel rollup patch that
michael@0 1471 * supports this feature.
michael@0 1472 *
michael@0 1473 */
michael@0 1474 }
michael@0 1475 /*
michael@0 1476 * PR_SETEXITSIG - send the SIGCLD signal to the parent
michael@0 1477 * sproc when any sproc terminates
michael@0 1478 *
michael@0 1479 * This is used to cause the entire application to
michael@0 1480 * terminate when any sproc terminates abnormally by
michael@0 1481 * receipt of a SIGSEGV, SIGBUS or SIGABRT signal.
michael@0 1482 * If this is not done, the application may seem
michael@0 1483 * "hung" to the user because the other sprocs may be
michael@0 1484 * waiting for resources held by the
michael@0 1485 * abnormally-terminating sproc.
michael@0 1486 */
michael@0 1487 prctl(PR_SETEXITSIG, 0);
michael@0 1488
michael@0 1489 sigact.sa_handler = sigchld_handler;
michael@0 1490 sigact.sa_flags = SA_RESTART;
michael@0 1491 sigact.sa_mask = ints_off;
michael@0 1492 sigaction(SIGCLD, &sigact, NULL);
michael@0 1493
michael@0 1494 /*
michael@0 1495 * setup stack fields for the primordial thread
michael@0 1496 */
michael@0 1497 me->stack->stackSize = prctl(PR_GETSTACKSIZE);
michael@0 1498 me->stack->stackBottom = me->stack->stackTop - me->stack->stackSize;
michael@0 1499
michael@0 1500 rv = pipe(_pr_irix_primoridal_cpu_fd);
michael@0 1501 PR_ASSERT(rv == 0);
michael@0 1502 #ifndef _PR_USE_POLL
michael@0 1503 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_irix_primoridal_cpu_fd[0];
michael@0 1504 FD_SET(_pr_irix_primoridal_cpu_fd[0], &_PR_FD_READ_SET(me->cpu));
michael@0 1505 #endif
michael@0 1506
michael@0 1507 libc_handle = dlopen("libc.so",RTLD_NOW);
michael@0 1508 PR_ASSERT(libc_handle != NULL);
michael@0 1509 libc_exit = (void (*)(int)) dlsym(libc_handle, "exit");
michael@0 1510 PR_ASSERT(libc_exit != NULL);
michael@0 1511 /* dlclose(libc_handle); */
michael@0 1512
michael@0 1513 #endif /* _PR_PTHREADS */
michael@0 1514
michael@0 1515 _PR_UnixInit();
michael@0 1516 }
michael@0 1517
michael@0 1518 /**************************************************************************/
michael@0 1519 /************** code and such for NSPR 2.0's interval times ***************/
michael@0 1520 /**************************************************************************/
michael@0 1521
michael@0 1522 #define PR_PSEC_PER_SEC 1000000000000ULL /* 10^12 */
michael@0 1523
michael@0 1524 #ifndef SGI_CYCLECNTR_SIZE
michael@0 1525 #define SGI_CYCLECNTR_SIZE 165 /* Size user needs to use to read CC */
michael@0 1526 #endif
michael@0 1527
michael@0 1528 static PRIntn mmem_fd = -1;
michael@0 1529 static PRIntn clock_width = 0;
michael@0 1530 static void *iotimer_addr = NULL;
michael@0 1531 static PRUint32 pr_clock_mask = 0;
michael@0 1532 static PRUint32 pr_clock_shift = 0;
michael@0 1533 static PRIntervalTime pr_ticks = 0;
michael@0 1534 static PRUint32 pr_clock_granularity = 1;
michael@0 1535 static PRUint32 pr_previous = 0, pr_residual = 0;
michael@0 1536 static PRUint32 pr_ticks_per_second = 0;
michael@0 1537
michael@0 1538 extern PRIntervalTime _PR_UNIX_GetInterval(void);
michael@0 1539 extern PRIntervalTime _PR_UNIX_TicksPerSecond(void);
michael@0 1540
michael@0 1541 static void _MD_IrixIntervalInit(void)
michael@0 1542 {
michael@0 1543 /*
michael@0 1544 * As much as I would like, the service available through this
michael@0 1545 * interface on R3000's (aka, IP12) just isn't going to make it.
michael@0 1546 * The register is only 24 bits wide, and rolls over at a verocious
michael@0 1547 * rate.
michael@0 1548 */
michael@0 1549 PRUint32 one_tick = 0;
michael@0 1550 struct utsname utsinfo;
michael@0 1551 uname(&utsinfo);
michael@0 1552 if ((strncmp("IP12", utsinfo.machine, 4) != 0)
michael@0 1553 && ((mmem_fd = open("/dev/mmem", O_RDONLY)) != -1))
michael@0 1554 {
michael@0 1555 int poffmask = getpagesize() - 1;
michael@0 1556 __psunsigned_t phys_addr, raddr, cycleval;
michael@0 1557
michael@0 1558 phys_addr = syssgi(SGI_QUERY_CYCLECNTR, &cycleval);
michael@0 1559 raddr = phys_addr & ~poffmask;
michael@0 1560 iotimer_addr = mmap(
michael@0 1561 0, poffmask, PROT_READ, MAP_PRIVATE, mmem_fd, (__psint_t)raddr);
michael@0 1562
michael@0 1563 clock_width = syssgi(SGI_CYCLECNTR_SIZE);
michael@0 1564 if (clock_width < 0)
michael@0 1565 {
michael@0 1566 /*
michael@0 1567 * We must be executing on a 6.0 or earlier system, since the
michael@0 1568 * SGI_CYCLECNTR_SIZE call is not supported.
michael@0 1569 *
michael@0 1570 * The only pre-6.1 platforms with 64-bit counters are
michael@0 1571 * IP19 and IP21 (Challenge, PowerChallenge, Onyx).
michael@0 1572 */
michael@0 1573 if (!strncmp(utsinfo.machine, "IP19", 4) ||
michael@0 1574 !strncmp(utsinfo.machine, "IP21", 4))
michael@0 1575 clock_width = 64;
michael@0 1576 else
michael@0 1577 clock_width = 32;
michael@0 1578 }
michael@0 1579
michael@0 1580 /*
michael@0 1581 * 'cycleval' is picoseconds / increment of the counter.
michael@0 1582 * I'm pushing for a tick to be 100 microseconds, 10^(-4).
michael@0 1583 * That leaves 10^(-8) left over, or 10^8 / cycleval.
michael@0 1584 * Did I do that right?
michael@0 1585 */
michael@0 1586
michael@0 1587 one_tick = 100000000UL / cycleval ; /* 100 microseconds */
michael@0 1588
michael@0 1589 while (0 != one_tick)
michael@0 1590 {
michael@0 1591 pr_clock_shift += 1;
michael@0 1592 one_tick = one_tick >> 1;
michael@0 1593 pr_clock_granularity = pr_clock_granularity << 1;
michael@0 1594 }
michael@0 1595 pr_clock_mask = pr_clock_granularity - 1; /* to make a mask out of it */
michael@0 1596 pr_ticks_per_second = PR_PSEC_PER_SEC
michael@0 1597 / ((PRUint64)pr_clock_granularity * (PRUint64)cycleval);
michael@0 1598
michael@0 1599 iotimer_addr = (void*)
michael@0 1600 ((__psunsigned_t)iotimer_addr + (phys_addr & poffmask));
michael@0 1601 }
michael@0 1602 else
michael@0 1603 {
michael@0 1604 pr_ticks_per_second = _PR_UNIX_TicksPerSecond();
michael@0 1605 }
michael@0 1606 } /* _MD_IrixIntervalInit */
michael@0 1607
michael@0 1608 PRIntervalTime _MD_IrixIntervalPerSec(void)
michael@0 1609 {
michael@0 1610 return pr_ticks_per_second;
michael@0 1611 }
michael@0 1612
michael@0 1613 PRIntervalTime _MD_IrixGetInterval(void)
michael@0 1614 {
michael@0 1615 if (mmem_fd != -1)
michael@0 1616 {
michael@0 1617 if (64 == clock_width)
michael@0 1618 {
michael@0 1619 PRUint64 temp = *(PRUint64*)iotimer_addr;
michael@0 1620 pr_ticks = (PRIntervalTime)(temp >> pr_clock_shift);
michael@0 1621 }
michael@0 1622 else
michael@0 1623 {
michael@0 1624 PRIntervalTime ticks = pr_ticks;
michael@0 1625 PRUint32 now = *(PRUint32*)iotimer_addr, temp;
michael@0 1626 PRUint32 residual = pr_residual, previous = pr_previous;
michael@0 1627
michael@0 1628 temp = now - previous + residual;
michael@0 1629 residual = temp & pr_clock_mask;
michael@0 1630 ticks += temp >> pr_clock_shift;
michael@0 1631
michael@0 1632 pr_previous = now;
michael@0 1633 pr_residual = residual;
michael@0 1634 pr_ticks = ticks;
michael@0 1635 }
michael@0 1636 }
michael@0 1637 else
michael@0 1638 {
michael@0 1639 /*
michael@0 1640 * No fast access. Use the time of day clock. This isn't the
michael@0 1641 * right answer since this clock can get set back, tick at odd
michael@0 1642 * rates, and it's expensive to acqurie.
michael@0 1643 */
michael@0 1644 pr_ticks = _PR_UNIX_GetInterval();
michael@0 1645 }
michael@0 1646 return pr_ticks;
michael@0 1647 } /* _MD_IrixGetInterval */
michael@0 1648

mercurial