nsprpub/pr/src/md/unix/unix.c

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
michael@0 2 /* This Source Code Form is subject to the terms of the Mozilla Public
michael@0 3 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 5
michael@0 6 #include "primpl.h"
michael@0 7
michael@0 8 #include <string.h>
michael@0 9 #include <signal.h>
michael@0 10 #include <unistd.h>
michael@0 11 #include <fcntl.h>
michael@0 12 #include <sys/types.h>
michael@0 13 #include <sys/socket.h>
michael@0 14 #include <sys/time.h>
michael@0 15 #include <sys/ioctl.h>
michael@0 16 #include <sys/mman.h>
michael@0 17 #include <unistd.h>
michael@0 18 #include <sys/utsname.h>
michael@0 19
michael@0 20 #ifdef _PR_POLL_AVAILABLE
michael@0 21 #include <poll.h>
michael@0 22 #endif
michael@0 23
michael@0 24 /* To get FIONREAD */
michael@0 25 #if defined(UNIXWARE)
michael@0 26 #include <sys/filio.h>
michael@0 27 #endif
michael@0 28
michael@0 29 #if defined(NTO)
michael@0 30 #include <sys/statvfs.h>
michael@0 31 #endif
michael@0 32
michael@0 33 /*
michael@0 34 * Make sure _PRSockLen_t is 32-bit, because we will cast a PRUint32* or
michael@0 35 * PRInt32* pointer to a _PRSockLen_t* pointer.
michael@0 36 */
michael@0 37 #if defined(HAVE_SOCKLEN_T) \
michael@0 38 || (defined(__GLIBC__) && __GLIBC__ >= 2)
michael@0 39 #define _PRSockLen_t socklen_t
michael@0 40 #elif defined(IRIX) || defined(HPUX) || defined(OSF1) || defined(SOLARIS) \
michael@0 41 || defined(AIX4_1) || defined(LINUX) \
michael@0 42 || defined(BSDI) || defined(SCO) \
michael@0 43 || defined(DARWIN) \
michael@0 44 || defined(QNX)
michael@0 45 #define _PRSockLen_t int
michael@0 46 #elif (defined(AIX) && !defined(AIX4_1)) || defined(FREEBSD) \
michael@0 47 || defined(NETBSD) || defined(OPENBSD) || defined(UNIXWARE) \
michael@0 48 || defined(DGUX) || defined(NTO) || defined(RISCOS)
michael@0 49 #define _PRSockLen_t size_t
michael@0 50 #else
michael@0 51 #error "Cannot determine architecture"
michael@0 52 #endif
michael@0 53
michael@0 54 /*
michael@0 55 ** Global lock variable used to bracket calls into rusty libraries that
michael@0 56 ** aren't thread safe (like libc, libX, etc).
michael@0 57 */
michael@0 58 static PRLock *_pr_rename_lock = NULL;
michael@0 59 static PRMonitor *_pr_Xfe_mon = NULL;
michael@0 60
michael@0 61 static PRInt64 minus_one;
michael@0 62
michael@0 63 sigset_t timer_set;
michael@0 64
michael@0 65 #if !defined(_PR_PTHREADS)
michael@0 66
michael@0 67 static sigset_t empty_set;
michael@0 68
michael@0 69 #ifdef SOLARIS
michael@0 70 #include <sys/file.h>
michael@0 71 #include <sys/filio.h>
michael@0 72 #endif
michael@0 73
michael@0 74 #ifndef PIPE_BUF
michael@0 75 #define PIPE_BUF 512
michael@0 76 #endif
michael@0 77
michael@0 78 /*
michael@0 79 * _nspr_noclock - if set clock interrupts are disabled
michael@0 80 */
michael@0 81 int _nspr_noclock = 1;
michael@0 82
michael@0 83 #ifdef IRIX
michael@0 84 extern PRInt32 _nspr_terminate_on_error;
michael@0 85 #endif
michael@0 86
michael@0 87 /*
michael@0 88 * There is an assertion in this code that NSPR's definition of PRIOVec
michael@0 89 * is bit compatible with UNIX' definition of a struct iovec. This is
michael@0 90 * applicable to the 'writev()' operations where the types are casually
michael@0 91 * cast to avoid warnings.
michael@0 92 */
michael@0 93
michael@0 94 int _pr_md_pipefd[2] = { -1, -1 };
michael@0 95 static char _pr_md_pipebuf[PIPE_BUF];
michael@0 96 static PRInt32 local_io_wait(PRInt32 osfd, PRInt32 wait_flag,
michael@0 97 PRIntervalTime timeout);
michael@0 98
michael@0 99 _PRInterruptTable _pr_interruptTable[] = {
michael@0 100 {
michael@0 101 "clock", _PR_MISSED_CLOCK, _PR_ClockInterrupt, },
michael@0 102 {
michael@0 103 0 }
michael@0 104 };
michael@0 105
michael@0 106 void _MD_unix_init_running_cpu(_PRCPU *cpu)
michael@0 107 {
michael@0 108 PR_INIT_CLIST(&(cpu->md.md_unix.ioQ));
michael@0 109 cpu->md.md_unix.ioq_max_osfd = -1;
michael@0 110 cpu->md.md_unix.ioq_timeout = PR_INTERVAL_NO_TIMEOUT;
michael@0 111 }
michael@0 112
michael@0 113 PRStatus _MD_open_dir(_MDDir *d, const char *name)
michael@0 114 {
michael@0 115 int err;
michael@0 116
michael@0 117 d->d = opendir(name);
michael@0 118 if (!d->d) {
michael@0 119 err = _MD_ERRNO();
michael@0 120 _PR_MD_MAP_OPENDIR_ERROR(err);
michael@0 121 return PR_FAILURE;
michael@0 122 }
michael@0 123 return PR_SUCCESS;
michael@0 124 }
michael@0 125
michael@0 126 PRInt32 _MD_close_dir(_MDDir *d)
michael@0 127 {
michael@0 128 int rv = 0, err;
michael@0 129
michael@0 130 if (d->d) {
michael@0 131 rv = closedir(d->d);
michael@0 132 if (rv == -1) {
michael@0 133 err = _MD_ERRNO();
michael@0 134 _PR_MD_MAP_CLOSEDIR_ERROR(err);
michael@0 135 }
michael@0 136 }
michael@0 137 return rv;
michael@0 138 }
michael@0 139
michael@0 140 char * _MD_read_dir(_MDDir *d, PRIntn flags)
michael@0 141 {
michael@0 142 struct dirent *de;
michael@0 143 int err;
michael@0 144
michael@0 145 for (;;) {
michael@0 146 /*
michael@0 147 * XXX: readdir() is not MT-safe. There is an MT-safe version
michael@0 148 * readdir_r() on some systems.
michael@0 149 */
michael@0 150 _MD_ERRNO() = 0;
michael@0 151 de = readdir(d->d);
michael@0 152 if (!de) {
michael@0 153 err = _MD_ERRNO();
michael@0 154 _PR_MD_MAP_READDIR_ERROR(err);
michael@0 155 return 0;
michael@0 156 }
michael@0 157 if ((flags & PR_SKIP_DOT) &&
michael@0 158 (de->d_name[0] == '.') && (de->d_name[1] == 0))
michael@0 159 continue;
michael@0 160 if ((flags & PR_SKIP_DOT_DOT) &&
michael@0 161 (de->d_name[0] == '.') && (de->d_name[1] == '.') &&
michael@0 162 (de->d_name[2] == 0))
michael@0 163 continue;
michael@0 164 if ((flags & PR_SKIP_HIDDEN) && (de->d_name[0] == '.'))
michael@0 165 continue;
michael@0 166 break;
michael@0 167 }
michael@0 168 return de->d_name;
michael@0 169 }
michael@0 170
michael@0 171 PRInt32 _MD_delete(const char *name)
michael@0 172 {
michael@0 173 PRInt32 rv, err;
michael@0 174 #ifdef UNIXWARE
michael@0 175 sigset_t set, oset;
michael@0 176 #endif
michael@0 177
michael@0 178 #ifdef UNIXWARE
michael@0 179 sigfillset(&set);
michael@0 180 sigprocmask(SIG_SETMASK, &set, &oset);
michael@0 181 #endif
michael@0 182 rv = unlink(name);
michael@0 183 #ifdef UNIXWARE
michael@0 184 sigprocmask(SIG_SETMASK, &oset, NULL);
michael@0 185 #endif
michael@0 186 if (rv == -1) {
michael@0 187 err = _MD_ERRNO();
michael@0 188 _PR_MD_MAP_UNLINK_ERROR(err);
michael@0 189 }
michael@0 190 return(rv);
michael@0 191 }
michael@0 192
michael@0 193 PRInt32 _MD_rename(const char *from, const char *to)
michael@0 194 {
michael@0 195 PRInt32 rv = -1, err;
michael@0 196
michael@0 197 /*
michael@0 198 ** This is trying to enforce the semantics of WINDOZE' rename
michael@0 199 ** operation. That means one is not allowed to rename over top
michael@0 200 ** of an existing file. Holding a lock across these two function
michael@0 201 ** and the open function is known to be a bad idea, but ....
michael@0 202 */
michael@0 203 if (NULL != _pr_rename_lock)
michael@0 204 PR_Lock(_pr_rename_lock);
michael@0 205 if (0 == access(to, F_OK))
michael@0 206 PR_SetError(PR_FILE_EXISTS_ERROR, 0);
michael@0 207 else
michael@0 208 {
michael@0 209 rv = rename(from, to);
michael@0 210 if (rv < 0) {
michael@0 211 err = _MD_ERRNO();
michael@0 212 _PR_MD_MAP_RENAME_ERROR(err);
michael@0 213 }
michael@0 214 }
michael@0 215 if (NULL != _pr_rename_lock)
michael@0 216 PR_Unlock(_pr_rename_lock);
michael@0 217 return rv;
michael@0 218 }
michael@0 219
michael@0 220 PRInt32 _MD_access(const char *name, PRAccessHow how)
michael@0 221 {
michael@0 222 PRInt32 rv, err;
michael@0 223 int amode;
michael@0 224
michael@0 225 switch (how) {
michael@0 226 case PR_ACCESS_WRITE_OK:
michael@0 227 amode = W_OK;
michael@0 228 break;
michael@0 229 case PR_ACCESS_READ_OK:
michael@0 230 amode = R_OK;
michael@0 231 break;
michael@0 232 case PR_ACCESS_EXISTS:
michael@0 233 amode = F_OK;
michael@0 234 break;
michael@0 235 default:
michael@0 236 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
michael@0 237 rv = -1;
michael@0 238 goto done;
michael@0 239 }
michael@0 240 rv = access(name, amode);
michael@0 241
michael@0 242 if (rv < 0) {
michael@0 243 err = _MD_ERRNO();
michael@0 244 _PR_MD_MAP_ACCESS_ERROR(err);
michael@0 245 }
michael@0 246
michael@0 247 done:
michael@0 248 return(rv);
michael@0 249 }
michael@0 250
michael@0 251 PRInt32 _MD_mkdir(const char *name, PRIntn mode)
michael@0 252 {
michael@0 253 int rv, err;
michael@0 254
michael@0 255 /*
michael@0 256 ** This lock is used to enforce rename semantics as described
michael@0 257 ** in PR_Rename. Look there for more fun details.
michael@0 258 */
michael@0 259 if (NULL !=_pr_rename_lock)
michael@0 260 PR_Lock(_pr_rename_lock);
michael@0 261 rv = mkdir(name, mode);
michael@0 262 if (rv < 0) {
michael@0 263 err = _MD_ERRNO();
michael@0 264 _PR_MD_MAP_MKDIR_ERROR(err);
michael@0 265 }
michael@0 266 if (NULL !=_pr_rename_lock)
michael@0 267 PR_Unlock(_pr_rename_lock);
michael@0 268 return rv;
michael@0 269 }
michael@0 270
michael@0 271 PRInt32 _MD_rmdir(const char *name)
michael@0 272 {
michael@0 273 int rv, err;
michael@0 274
michael@0 275 rv = rmdir(name);
michael@0 276 if (rv == -1) {
michael@0 277 err = _MD_ERRNO();
michael@0 278 _PR_MD_MAP_RMDIR_ERROR(err);
michael@0 279 }
michael@0 280 return rv;
michael@0 281 }
michael@0 282
michael@0 283 PRInt32 _MD_read(PRFileDesc *fd, void *buf, PRInt32 amount)
michael@0 284 {
michael@0 285 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 286 PRInt32 rv, err;
michael@0 287 #ifndef _PR_USE_POLL
michael@0 288 fd_set rd;
michael@0 289 #else
michael@0 290 struct pollfd pfd;
michael@0 291 #endif /* _PR_USE_POLL */
michael@0 292 PRInt32 osfd = fd->secret->md.osfd;
michael@0 293
michael@0 294 #ifndef _PR_USE_POLL
michael@0 295 FD_ZERO(&rd);
michael@0 296 FD_SET(osfd, &rd);
michael@0 297 #else
michael@0 298 pfd.fd = osfd;
michael@0 299 pfd.events = POLLIN;
michael@0 300 #endif /* _PR_USE_POLL */
michael@0 301 while ((rv = read(osfd,buf,amount)) == -1) {
michael@0 302 err = _MD_ERRNO();
michael@0 303 if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
michael@0 304 if (fd->secret->nonblocking) {
michael@0 305 break;
michael@0 306 }
michael@0 307 if (!_PR_IS_NATIVE_THREAD(me)) {
michael@0 308 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ,
michael@0 309 PR_INTERVAL_NO_TIMEOUT)) < 0)
michael@0 310 goto done;
michael@0 311 } else {
michael@0 312 #ifndef _PR_USE_POLL
michael@0 313 while ((rv = _MD_SELECT(osfd + 1, &rd, NULL, NULL, NULL))
michael@0 314 == -1 && (err = _MD_ERRNO()) == EINTR) {
michael@0 315 /* retry _MD_SELECT() if it is interrupted */
michael@0 316 }
michael@0 317 #else /* _PR_USE_POLL */
michael@0 318 while ((rv = _MD_POLL(&pfd, 1, -1))
michael@0 319 == -1 && (err = _MD_ERRNO()) == EINTR) {
michael@0 320 /* retry _MD_POLL() if it is interrupted */
michael@0 321 }
michael@0 322 #endif /* _PR_USE_POLL */
michael@0 323 if (rv == -1) {
michael@0 324 break;
michael@0 325 }
michael@0 326 }
michael@0 327 if (_PR_PENDING_INTERRUPT(me))
michael@0 328 break;
michael@0 329 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
michael@0 330 continue;
michael@0 331 } else {
michael@0 332 break;
michael@0 333 }
michael@0 334 }
michael@0 335 if (rv < 0) {
michael@0 336 if (_PR_PENDING_INTERRUPT(me)) {
michael@0 337 me->flags &= ~_PR_INTERRUPT;
michael@0 338 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
michael@0 339 } else {
michael@0 340 _PR_MD_MAP_READ_ERROR(err);
michael@0 341 }
michael@0 342 }
michael@0 343 done:
michael@0 344 return(rv);
michael@0 345 }
michael@0 346
michael@0 347 PRInt32 _MD_write(PRFileDesc *fd, const void *buf, PRInt32 amount)
michael@0 348 {
michael@0 349 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 350 PRInt32 rv, err;
michael@0 351 #ifndef _PR_USE_POLL
michael@0 352 fd_set wd;
michael@0 353 #else
michael@0 354 struct pollfd pfd;
michael@0 355 #endif /* _PR_USE_POLL */
michael@0 356 PRInt32 osfd = fd->secret->md.osfd;
michael@0 357
michael@0 358 #ifndef _PR_USE_POLL
michael@0 359 FD_ZERO(&wd);
michael@0 360 FD_SET(osfd, &wd);
michael@0 361 #else
michael@0 362 pfd.fd = osfd;
michael@0 363 pfd.events = POLLOUT;
michael@0 364 #endif /* _PR_USE_POLL */
michael@0 365 while ((rv = write(osfd,buf,amount)) == -1) {
michael@0 366 err = _MD_ERRNO();
michael@0 367 if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
michael@0 368 if (fd->secret->nonblocking) {
michael@0 369 break;
michael@0 370 }
michael@0 371 if (!_PR_IS_NATIVE_THREAD(me)) {
michael@0 372 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE,
michael@0 373 PR_INTERVAL_NO_TIMEOUT)) < 0)
michael@0 374 goto done;
michael@0 375 } else {
michael@0 376 #ifndef _PR_USE_POLL
michael@0 377 while ((rv = _MD_SELECT(osfd + 1, NULL, &wd, NULL, NULL))
michael@0 378 == -1 && (err = _MD_ERRNO()) == EINTR) {
michael@0 379 /* retry _MD_SELECT() if it is interrupted */
michael@0 380 }
michael@0 381 #else /* _PR_USE_POLL */
michael@0 382 while ((rv = _MD_POLL(&pfd, 1, -1))
michael@0 383 == -1 && (err = _MD_ERRNO()) == EINTR) {
michael@0 384 /* retry _MD_POLL() if it is interrupted */
michael@0 385 }
michael@0 386 #endif /* _PR_USE_POLL */
michael@0 387 if (rv == -1) {
michael@0 388 break;
michael@0 389 }
michael@0 390 }
michael@0 391 if (_PR_PENDING_INTERRUPT(me))
michael@0 392 break;
michael@0 393 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
michael@0 394 continue;
michael@0 395 } else {
michael@0 396 break;
michael@0 397 }
michael@0 398 }
michael@0 399 if (rv < 0) {
michael@0 400 if (_PR_PENDING_INTERRUPT(me)) {
michael@0 401 me->flags &= ~_PR_INTERRUPT;
michael@0 402 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
michael@0 403 } else {
michael@0 404 _PR_MD_MAP_WRITE_ERROR(err);
michael@0 405 }
michael@0 406 }
michael@0 407 done:
michael@0 408 return(rv);
michael@0 409 }
michael@0 410
michael@0 411 PRInt32 _MD_fsync(PRFileDesc *fd)
michael@0 412 {
michael@0 413 PRInt32 rv, err;
michael@0 414
michael@0 415 rv = fsync(fd->secret->md.osfd);
michael@0 416 if (rv == -1) {
michael@0 417 err = _MD_ERRNO();
michael@0 418 _PR_MD_MAP_FSYNC_ERROR(err);
michael@0 419 }
michael@0 420 return(rv);
michael@0 421 }
michael@0 422
michael@0 423 PRInt32 _MD_close(PRInt32 osfd)
michael@0 424 {
michael@0 425 PRInt32 rv, err;
michael@0 426
michael@0 427 rv = close(osfd);
michael@0 428 if (rv == -1) {
michael@0 429 err = _MD_ERRNO();
michael@0 430 _PR_MD_MAP_CLOSE_ERROR(err);
michael@0 431 }
michael@0 432 return(rv);
michael@0 433 }
michael@0 434
michael@0 435 PRInt32 _MD_socket(PRInt32 domain, PRInt32 type, PRInt32 proto)
michael@0 436 {
michael@0 437 PRInt32 osfd, err;
michael@0 438
michael@0 439 osfd = socket(domain, type, proto);
michael@0 440
michael@0 441 if (osfd == -1) {
michael@0 442 err = _MD_ERRNO();
michael@0 443 _PR_MD_MAP_SOCKET_ERROR(err);
michael@0 444 return(osfd);
michael@0 445 }
michael@0 446
michael@0 447 return(osfd);
michael@0 448 }
michael@0 449
michael@0 450 PRInt32 _MD_socketavailable(PRFileDesc *fd)
michael@0 451 {
michael@0 452 PRInt32 result;
michael@0 453
michael@0 454 if (ioctl(fd->secret->md.osfd, FIONREAD, &result) < 0) {
michael@0 455 _PR_MD_MAP_SOCKETAVAILABLE_ERROR(_MD_ERRNO());
michael@0 456 return -1;
michael@0 457 }
michael@0 458 return result;
michael@0 459 }
michael@0 460
michael@0 461 PRInt64 _MD_socketavailable64(PRFileDesc *fd)
michael@0 462 {
michael@0 463 PRInt64 result;
michael@0 464 LL_I2L(result, _MD_socketavailable(fd));
michael@0 465 return result;
michael@0 466 } /* _MD_socketavailable64 */
michael@0 467
michael@0 468 #define READ_FD 1
michael@0 469 #define WRITE_FD 2
michael@0 470
michael@0 471 /*
michael@0 472 * socket_io_wait --
michael@0 473 *
michael@0 474 * wait for socket i/o, periodically checking for interrupt
michael@0 475 *
michael@0 476 * The first implementation uses select(), for platforms without
michael@0 477 * poll(). The second (preferred) implementation uses poll().
michael@0 478 */
michael@0 479
michael@0 480 #ifndef _PR_USE_POLL
michael@0 481
michael@0 482 static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type,
michael@0 483 PRIntervalTime timeout)
michael@0 484 {
michael@0 485 PRInt32 rv = -1;
michael@0 486 struct timeval tv;
michael@0 487 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 488 PRIntervalTime epoch, now, elapsed, remaining;
michael@0 489 PRBool wait_for_remaining;
michael@0 490 PRInt32 syserror;
michael@0 491 fd_set rd_wr;
michael@0 492
michael@0 493 switch (timeout) {
michael@0 494 case PR_INTERVAL_NO_WAIT:
michael@0 495 PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
michael@0 496 break;
michael@0 497 case PR_INTERVAL_NO_TIMEOUT:
michael@0 498 /*
michael@0 499 * This is a special case of the 'default' case below.
michael@0 500 * Please see the comments there.
michael@0 501 */
michael@0 502 tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS;
michael@0 503 tv.tv_usec = 0;
michael@0 504 FD_ZERO(&rd_wr);
michael@0 505 do {
michael@0 506 FD_SET(osfd, &rd_wr);
michael@0 507 if (fd_type == READ_FD)
michael@0 508 rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv);
michael@0 509 else
michael@0 510 rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv);
michael@0 511 if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
michael@0 512 _PR_MD_MAP_SELECT_ERROR(syserror);
michael@0 513 break;
michael@0 514 }
michael@0 515 if (_PR_PENDING_INTERRUPT(me)) {
michael@0 516 me->flags &= ~_PR_INTERRUPT;
michael@0 517 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
michael@0 518 rv = -1;
michael@0 519 break;
michael@0 520 }
michael@0 521 } while (rv == 0 || (rv == -1 && syserror == EINTR));
michael@0 522 break;
michael@0 523 default:
michael@0 524 now = epoch = PR_IntervalNow();
michael@0 525 remaining = timeout;
michael@0 526 FD_ZERO(&rd_wr);
michael@0 527 do {
michael@0 528 /*
michael@0 529 * We block in _MD_SELECT for at most
michael@0 530 * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds,
michael@0 531 * so that there is an upper limit on the delay
michael@0 532 * before the interrupt bit is checked.
michael@0 533 */
michael@0 534 wait_for_remaining = PR_TRUE;
michael@0 535 tv.tv_sec = PR_IntervalToSeconds(remaining);
michael@0 536 if (tv.tv_sec > _PR_INTERRUPT_CHECK_INTERVAL_SECS) {
michael@0 537 wait_for_remaining = PR_FALSE;
michael@0 538 tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS;
michael@0 539 tv.tv_usec = 0;
michael@0 540 } else {
michael@0 541 tv.tv_usec = PR_IntervalToMicroseconds(
michael@0 542 remaining -
michael@0 543 PR_SecondsToInterval(tv.tv_sec));
michael@0 544 }
michael@0 545 FD_SET(osfd, &rd_wr);
michael@0 546 if (fd_type == READ_FD)
michael@0 547 rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv);
michael@0 548 else
michael@0 549 rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv);
michael@0 550 /*
michael@0 551 * we don't consider EINTR a real error
michael@0 552 */
michael@0 553 if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
michael@0 554 _PR_MD_MAP_SELECT_ERROR(syserror);
michael@0 555 break;
michael@0 556 }
michael@0 557 if (_PR_PENDING_INTERRUPT(me)) {
michael@0 558 me->flags &= ~_PR_INTERRUPT;
michael@0 559 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
michael@0 560 rv = -1;
michael@0 561 break;
michael@0 562 }
michael@0 563 /*
michael@0 564 * We loop again if _MD_SELECT timed out or got interrupted
michael@0 565 * by a signal, and the timeout deadline has not passed yet.
michael@0 566 */
michael@0 567 if (rv == 0 || (rv == -1 && syserror == EINTR)) {
michael@0 568 /*
michael@0 569 * If _MD_SELECT timed out, we know how much time
michael@0 570 * we spent in blocking, so we can avoid a
michael@0 571 * PR_IntervalNow() call.
michael@0 572 */
michael@0 573 if (rv == 0) {
michael@0 574 if (wait_for_remaining) {
michael@0 575 now += remaining;
michael@0 576 } else {
michael@0 577 now += PR_SecondsToInterval(tv.tv_sec)
michael@0 578 + PR_MicrosecondsToInterval(tv.tv_usec);
michael@0 579 }
michael@0 580 } else {
michael@0 581 now = PR_IntervalNow();
michael@0 582 }
michael@0 583 elapsed = (PRIntervalTime) (now - epoch);
michael@0 584 if (elapsed >= timeout) {
michael@0 585 PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
michael@0 586 rv = -1;
michael@0 587 break;
michael@0 588 } else {
michael@0 589 remaining = timeout - elapsed;
michael@0 590 }
michael@0 591 }
michael@0 592 } while (rv == 0 || (rv == -1 && syserror == EINTR));
michael@0 593 break;
michael@0 594 }
michael@0 595 return(rv);
michael@0 596 }
michael@0 597
michael@0 598 #else /* _PR_USE_POLL */
michael@0 599
michael@0 600 static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type,
michael@0 601 PRIntervalTime timeout)
michael@0 602 {
michael@0 603 PRInt32 rv = -1;
michael@0 604 int msecs;
michael@0 605 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 606 PRIntervalTime epoch, now, elapsed, remaining;
michael@0 607 PRBool wait_for_remaining;
michael@0 608 PRInt32 syserror;
michael@0 609 struct pollfd pfd;
michael@0 610
michael@0 611 switch (timeout) {
michael@0 612 case PR_INTERVAL_NO_WAIT:
michael@0 613 PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
michael@0 614 break;
michael@0 615 case PR_INTERVAL_NO_TIMEOUT:
michael@0 616 /*
michael@0 617 * This is a special case of the 'default' case below.
michael@0 618 * Please see the comments there.
michael@0 619 */
michael@0 620 msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000;
michael@0 621 pfd.fd = osfd;
michael@0 622 if (fd_type == READ_FD) {
michael@0 623 pfd.events = POLLIN;
michael@0 624 } else {
michael@0 625 pfd.events = POLLOUT;
michael@0 626 }
michael@0 627 do {
michael@0 628 rv = _MD_POLL(&pfd, 1, msecs);
michael@0 629 if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
michael@0 630 _PR_MD_MAP_POLL_ERROR(syserror);
michael@0 631 break;
michael@0 632 }
michael@0 633 /*
michael@0 634 * If POLLERR is set, don't process it; retry the operation
michael@0 635 */
michael@0 636 if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) {
michael@0 637 rv = -1;
michael@0 638 _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents);
michael@0 639 break;
michael@0 640 }
michael@0 641 if (_PR_PENDING_INTERRUPT(me)) {
michael@0 642 me->flags &= ~_PR_INTERRUPT;
michael@0 643 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
michael@0 644 rv = -1;
michael@0 645 break;
michael@0 646 }
michael@0 647 } while (rv == 0 || (rv == -1 && syserror == EINTR));
michael@0 648 break;
michael@0 649 default:
michael@0 650 now = epoch = PR_IntervalNow();
michael@0 651 remaining = timeout;
michael@0 652 pfd.fd = osfd;
michael@0 653 if (fd_type == READ_FD) {
michael@0 654 pfd.events = POLLIN;
michael@0 655 } else {
michael@0 656 pfd.events = POLLOUT;
michael@0 657 }
michael@0 658 do {
michael@0 659 /*
michael@0 660 * We block in _MD_POLL for at most
michael@0 661 * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds,
michael@0 662 * so that there is an upper limit on the delay
michael@0 663 * before the interrupt bit is checked.
michael@0 664 */
michael@0 665 wait_for_remaining = PR_TRUE;
michael@0 666 msecs = PR_IntervalToMilliseconds(remaining);
michael@0 667 if (msecs > _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000) {
michael@0 668 wait_for_remaining = PR_FALSE;
michael@0 669 msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000;
michael@0 670 }
michael@0 671 rv = _MD_POLL(&pfd, 1, msecs);
michael@0 672 /*
michael@0 673 * we don't consider EINTR a real error
michael@0 674 */
michael@0 675 if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
michael@0 676 _PR_MD_MAP_POLL_ERROR(syserror);
michael@0 677 break;
michael@0 678 }
michael@0 679 if (_PR_PENDING_INTERRUPT(me)) {
michael@0 680 me->flags &= ~_PR_INTERRUPT;
michael@0 681 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
michael@0 682 rv = -1;
michael@0 683 break;
michael@0 684 }
michael@0 685 /*
michael@0 686 * If POLLERR is set, don't process it; retry the operation
michael@0 687 */
michael@0 688 if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) {
michael@0 689 rv = -1;
michael@0 690 _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents);
michael@0 691 break;
michael@0 692 }
michael@0 693 /*
michael@0 694 * We loop again if _MD_POLL timed out or got interrupted
michael@0 695 * by a signal, and the timeout deadline has not passed yet.
michael@0 696 */
michael@0 697 if (rv == 0 || (rv == -1 && syserror == EINTR)) {
michael@0 698 /*
michael@0 699 * If _MD_POLL timed out, we know how much time
michael@0 700 * we spent in blocking, so we can avoid a
michael@0 701 * PR_IntervalNow() call.
michael@0 702 */
michael@0 703 if (rv == 0) {
michael@0 704 if (wait_for_remaining) {
michael@0 705 now += remaining;
michael@0 706 } else {
michael@0 707 now += PR_MillisecondsToInterval(msecs);
michael@0 708 }
michael@0 709 } else {
michael@0 710 now = PR_IntervalNow();
michael@0 711 }
michael@0 712 elapsed = (PRIntervalTime) (now - epoch);
michael@0 713 if (elapsed >= timeout) {
michael@0 714 PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
michael@0 715 rv = -1;
michael@0 716 break;
michael@0 717 } else {
michael@0 718 remaining = timeout - elapsed;
michael@0 719 }
michael@0 720 }
michael@0 721 } while (rv == 0 || (rv == -1 && syserror == EINTR));
michael@0 722 break;
michael@0 723 }
michael@0 724 return(rv);
michael@0 725 }
michael@0 726
michael@0 727 #endif /* _PR_USE_POLL */
michael@0 728
michael@0 729 static PRInt32 local_io_wait(
michael@0 730 PRInt32 osfd,
michael@0 731 PRInt32 wait_flag,
michael@0 732 PRIntervalTime timeout)
michael@0 733 {
michael@0 734 _PRUnixPollDesc pd;
michael@0 735 PRInt32 rv;
michael@0 736
michael@0 737 PR_LOG(_pr_io_lm, PR_LOG_MIN,
michael@0 738 ("waiting to %s on osfd=%d",
michael@0 739 (wait_flag == _PR_UNIX_POLL_READ) ? "read" : "write",
michael@0 740 osfd));
michael@0 741
michael@0 742 if (timeout == PR_INTERVAL_NO_WAIT) return 0;
michael@0 743
michael@0 744 pd.osfd = osfd;
michael@0 745 pd.in_flags = wait_flag;
michael@0 746 pd.out_flags = 0;
michael@0 747
michael@0 748 rv = _PR_WaitForMultipleFDs(&pd, 1, timeout);
michael@0 749
michael@0 750 if (rv == 0) {
michael@0 751 PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
michael@0 752 rv = -1;
michael@0 753 }
michael@0 754 return rv;
michael@0 755 }
michael@0 756
michael@0 757
michael@0 758 PRInt32 _MD_recv(PRFileDesc *fd, void *buf, PRInt32 amount,
michael@0 759 PRInt32 flags, PRIntervalTime timeout)
michael@0 760 {
michael@0 761 PRInt32 osfd = fd->secret->md.osfd;
michael@0 762 PRInt32 rv, err;
michael@0 763 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 764
michael@0 765 /*
michael@0 766 * Many OS's (Solaris, Unixware) have a broken recv which won't read
michael@0 767 * from socketpairs. As long as we don't use flags on socketpairs, this
michael@0 768 * is a decent fix. - mikep
michael@0 769 */
michael@0 770 #if defined(UNIXWARE) || defined(SOLARIS)
michael@0 771 while ((rv = read(osfd,buf,amount)) == -1) {
michael@0 772 #else
michael@0 773 while ((rv = recv(osfd,buf,amount,flags)) == -1) {
michael@0 774 #endif
michael@0 775 err = _MD_ERRNO();
michael@0 776 if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
michael@0 777 if (fd->secret->nonblocking) {
michael@0 778 break;
michael@0 779 }
michael@0 780 if (!_PR_IS_NATIVE_THREAD(me)) {
michael@0 781 if ((rv = local_io_wait(osfd,_PR_UNIX_POLL_READ,timeout)) < 0)
michael@0 782 goto done;
michael@0 783 } else {
michael@0 784 if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0)
michael@0 785 goto done;
michael@0 786 }
michael@0 787 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
michael@0 788 continue;
michael@0 789 } else {
michael@0 790 break;
michael@0 791 }
michael@0 792 }
michael@0 793 if (rv < 0) {
michael@0 794 _PR_MD_MAP_RECV_ERROR(err);
michael@0 795 }
michael@0 796 done:
michael@0 797 return(rv);
michael@0 798 }
michael@0 799
michael@0 800 PRInt32 _MD_recvfrom(PRFileDesc *fd, void *buf, PRInt32 amount,
michael@0 801 PRIntn flags, PRNetAddr *addr, PRUint32 *addrlen,
michael@0 802 PRIntervalTime timeout)
michael@0 803 {
michael@0 804 PRInt32 osfd = fd->secret->md.osfd;
michael@0 805 PRInt32 rv, err;
michael@0 806 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 807
michael@0 808 while ((*addrlen = PR_NETADDR_SIZE(addr)),
michael@0 809 ((rv = recvfrom(osfd, buf, amount, flags,
michael@0 810 (struct sockaddr *) addr, (_PRSockLen_t *)addrlen)) == -1)) {
michael@0 811 err = _MD_ERRNO();
michael@0 812 if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
michael@0 813 if (fd->secret->nonblocking) {
michael@0 814 break;
michael@0 815 }
michael@0 816 if (!_PR_IS_NATIVE_THREAD(me)) {
michael@0 817 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0)
michael@0 818 goto done;
michael@0 819 } else {
michael@0 820 if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0)
michael@0 821 goto done;
michael@0 822 }
michael@0 823 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
michael@0 824 continue;
michael@0 825 } else {
michael@0 826 break;
michael@0 827 }
michael@0 828 }
michael@0 829 if (rv < 0) {
michael@0 830 _PR_MD_MAP_RECVFROM_ERROR(err);
michael@0 831 }
michael@0 832 done:
michael@0 833 #ifdef _PR_HAVE_SOCKADDR_LEN
michael@0 834 if (rv != -1) {
michael@0 835 /* ignore the sa_len field of struct sockaddr */
michael@0 836 if (addr) {
michael@0 837 addr->raw.family = ((struct sockaddr *) addr)->sa_family;
michael@0 838 }
michael@0 839 }
michael@0 840 #endif /* _PR_HAVE_SOCKADDR_LEN */
michael@0 841 return(rv);
michael@0 842 }
michael@0 843
michael@0 844 PRInt32 _MD_send(PRFileDesc *fd, const void *buf, PRInt32 amount,
michael@0 845 PRInt32 flags, PRIntervalTime timeout)
michael@0 846 {
michael@0 847 PRInt32 osfd = fd->secret->md.osfd;
michael@0 848 PRInt32 rv, err;
michael@0 849 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 850 #if defined(SOLARIS)
michael@0 851 PRInt32 tmp_amount = amount;
michael@0 852 #endif
michael@0 853
michael@0 854 /*
michael@0 855 * On pre-2.6 Solaris, send() is much slower than write().
michael@0 856 * On 2.6 and beyond, with in-kernel sockets, send() and
michael@0 857 * write() are fairly equivalent in performance.
michael@0 858 */
michael@0 859 #if defined(SOLARIS)
michael@0 860 PR_ASSERT(0 == flags);
michael@0 861 while ((rv = write(osfd,buf,tmp_amount)) == -1) {
michael@0 862 #else
michael@0 863 while ((rv = send(osfd,buf,amount,flags)) == -1) {
michael@0 864 #endif
michael@0 865 err = _MD_ERRNO();
michael@0 866 if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
michael@0 867 if (fd->secret->nonblocking) {
michael@0 868 break;
michael@0 869 }
michael@0 870 if (!_PR_IS_NATIVE_THREAD(me)) {
michael@0 871 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0)
michael@0 872 goto done;
michael@0 873 } else {
michael@0 874 if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0)
michael@0 875 goto done;
michael@0 876 }
michael@0 877 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
michael@0 878 continue;
michael@0 879 } else {
michael@0 880 #if defined(SOLARIS)
michael@0 881 /*
michael@0 882 * The write system call has been reported to return the ERANGE
michael@0 883 * error on occasion. Try to write in smaller chunks to workaround
michael@0 884 * this bug.
michael@0 885 */
michael@0 886 if (err == ERANGE) {
michael@0 887 if (tmp_amount > 1) {
michael@0 888 tmp_amount = tmp_amount/2; /* half the bytes */
michael@0 889 continue;
michael@0 890 }
michael@0 891 }
michael@0 892 #endif
michael@0 893 break;
michael@0 894 }
michael@0 895 }
michael@0 896 /*
michael@0 897 * optimization; if bytes sent is less than "amount" call
michael@0 898 * select before returning. This is because it is likely that
michael@0 899 * the next send() call will return EWOULDBLOCK.
michael@0 900 */
michael@0 901 if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount)
michael@0 902 && (timeout != PR_INTERVAL_NO_WAIT)) {
michael@0 903 if (_PR_IS_NATIVE_THREAD(me)) {
michael@0 904 if (socket_io_wait(osfd, WRITE_FD, timeout)< 0) {
michael@0 905 rv = -1;
michael@0 906 goto done;
michael@0 907 }
michael@0 908 } else {
michael@0 909 if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) < 0) {
michael@0 910 rv = -1;
michael@0 911 goto done;
michael@0 912 }
michael@0 913 }
michael@0 914 }
michael@0 915 if (rv < 0) {
michael@0 916 _PR_MD_MAP_SEND_ERROR(err);
michael@0 917 }
michael@0 918 done:
michael@0 919 return(rv);
michael@0 920 }
michael@0 921
michael@0 922 PRInt32 _MD_sendto(
michael@0 923 PRFileDesc *fd, const void *buf, PRInt32 amount, PRIntn flags,
michael@0 924 const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout)
michael@0 925 {
michael@0 926 PRInt32 osfd = fd->secret->md.osfd;
michael@0 927 PRInt32 rv, err;
michael@0 928 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 929 #ifdef _PR_HAVE_SOCKADDR_LEN
michael@0 930 PRNetAddr addrCopy;
michael@0 931
michael@0 932 addrCopy = *addr;
michael@0 933 ((struct sockaddr *) &addrCopy)->sa_len = addrlen;
michael@0 934 ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family;
michael@0 935
michael@0 936 while ((rv = sendto(osfd, buf, amount, flags,
michael@0 937 (struct sockaddr *) &addrCopy, addrlen)) == -1) {
michael@0 938 #else
michael@0 939 while ((rv = sendto(osfd, buf, amount, flags,
michael@0 940 (struct sockaddr *) addr, addrlen)) == -1) {
michael@0 941 #endif
michael@0 942 err = _MD_ERRNO();
michael@0 943 if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
michael@0 944 if (fd->secret->nonblocking) {
michael@0 945 break;
michael@0 946 }
michael@0 947 if (!_PR_IS_NATIVE_THREAD(me)) {
michael@0 948 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0)
michael@0 949 goto done;
michael@0 950 } else {
michael@0 951 if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0)
michael@0 952 goto done;
michael@0 953 }
michael@0 954 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
michael@0 955 continue;
michael@0 956 } else {
michael@0 957 break;
michael@0 958 }
michael@0 959 }
michael@0 960 if (rv < 0) {
michael@0 961 _PR_MD_MAP_SENDTO_ERROR(err);
michael@0 962 }
michael@0 963 done:
michael@0 964 return(rv);
michael@0 965 }
michael@0 966
michael@0 967 PRInt32 _MD_writev(
michael@0 968 PRFileDesc *fd, const PRIOVec *iov,
michael@0 969 PRInt32 iov_size, PRIntervalTime timeout)
michael@0 970 {
michael@0 971 PRInt32 rv, err;
michael@0 972 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 973 PRInt32 index, amount = 0;
michael@0 974 PRInt32 osfd = fd->secret->md.osfd;
michael@0 975
michael@0 976 /*
michael@0 977 * Calculate the total number of bytes to be sent; needed for
michael@0 978 * optimization later.
michael@0 979 * We could avoid this if this number was passed in; but it is
michael@0 980 * probably not a big deal because iov_size is usually small (less than
michael@0 981 * 3)
michael@0 982 */
michael@0 983 if (!fd->secret->nonblocking) {
michael@0 984 for (index=0; index<iov_size; index++) {
michael@0 985 amount += iov[index].iov_len;
michael@0 986 }
michael@0 987 }
michael@0 988
michael@0 989 while ((rv = writev(osfd, (const struct iovec*)iov, iov_size)) == -1) {
michael@0 990 err = _MD_ERRNO();
michael@0 991 if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
michael@0 992 if (fd->secret->nonblocking) {
michael@0 993 break;
michael@0 994 }
michael@0 995 if (!_PR_IS_NATIVE_THREAD(me)) {
michael@0 996 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0)
michael@0 997 goto done;
michael@0 998 } else {
michael@0 999 if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))<0)
michael@0 1000 goto done;
michael@0 1001 }
michael@0 1002 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
michael@0 1003 continue;
michael@0 1004 } else {
michael@0 1005 break;
michael@0 1006 }
michael@0 1007 }
michael@0 1008 /*
michael@0 1009 * optimization; if bytes sent is less than "amount" call
michael@0 1010 * select before returning. This is because it is likely that
michael@0 1011 * the next writev() call will return EWOULDBLOCK.
michael@0 1012 */
michael@0 1013 if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount)
michael@0 1014 && (timeout != PR_INTERVAL_NO_WAIT)) {
michael@0 1015 if (_PR_IS_NATIVE_THREAD(me)) {
michael@0 1016 if (socket_io_wait(osfd, WRITE_FD, timeout) < 0) {
michael@0 1017 rv = -1;
michael@0 1018 goto done;
michael@0 1019 }
michael@0 1020 } else {
michael@0 1021 if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) < 0) {
michael@0 1022 rv = -1;
michael@0 1023 goto done;
michael@0 1024 }
michael@0 1025 }
michael@0 1026 }
michael@0 1027 if (rv < 0) {
michael@0 1028 _PR_MD_MAP_WRITEV_ERROR(err);
michael@0 1029 }
michael@0 1030 done:
michael@0 1031 return(rv);
michael@0 1032 }
michael@0 1033
michael@0 1034 PRInt32 _MD_accept(PRFileDesc *fd, PRNetAddr *addr,
michael@0 1035 PRUint32 *addrlen, PRIntervalTime timeout)
michael@0 1036 {
michael@0 1037 PRInt32 osfd = fd->secret->md.osfd;
michael@0 1038 PRInt32 rv, err;
michael@0 1039 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 1040
michael@0 1041 while ((rv = accept(osfd, (struct sockaddr *) addr,
michael@0 1042 (_PRSockLen_t *)addrlen)) == -1) {
michael@0 1043 err = _MD_ERRNO();
michael@0 1044 if ((err == EAGAIN) || (err == EWOULDBLOCK) || (err == ECONNABORTED)) {
michael@0 1045 if (fd->secret->nonblocking) {
michael@0 1046 break;
michael@0 1047 }
michael@0 1048 if (!_PR_IS_NATIVE_THREAD(me)) {
michael@0 1049 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0)
michael@0 1050 goto done;
michael@0 1051 } else {
michael@0 1052 if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0)
michael@0 1053 goto done;
michael@0 1054 }
michael@0 1055 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){
michael@0 1056 continue;
michael@0 1057 } else {
michael@0 1058 break;
michael@0 1059 }
michael@0 1060 }
michael@0 1061 if (rv < 0) {
michael@0 1062 _PR_MD_MAP_ACCEPT_ERROR(err);
michael@0 1063 }
michael@0 1064 done:
michael@0 1065 #ifdef _PR_HAVE_SOCKADDR_LEN
michael@0 1066 if (rv != -1) {
michael@0 1067 /* ignore the sa_len field of struct sockaddr */
michael@0 1068 if (addr) {
michael@0 1069 addr->raw.family = ((struct sockaddr *) addr)->sa_family;
michael@0 1070 }
michael@0 1071 }
michael@0 1072 #endif /* _PR_HAVE_SOCKADDR_LEN */
michael@0 1073 return(rv);
michael@0 1074 }
michael@0 1075
michael@0 1076 extern int _connect (int s, const struct sockaddr *name, int namelen);
michael@0 1077 PRInt32 _MD_connect(
michael@0 1078 PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout)
michael@0 1079 {
michael@0 1080 PRInt32 rv, err;
michael@0 1081 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 1082 PRInt32 osfd = fd->secret->md.osfd;
michael@0 1083 #ifdef IRIX
michael@0 1084 extern PRInt32 _MD_irix_connect(
michael@0 1085 PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, PRIntervalTime timeout);
michael@0 1086 #endif
michael@0 1087 #ifdef _PR_HAVE_SOCKADDR_LEN
michael@0 1088 PRNetAddr addrCopy;
michael@0 1089
michael@0 1090 addrCopy = *addr;
michael@0 1091 ((struct sockaddr *) &addrCopy)->sa_len = addrlen;
michael@0 1092 ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family;
michael@0 1093 #endif
michael@0 1094
michael@0 1095 /*
michael@0 1096 * We initiate the connection setup by making a nonblocking connect()
michael@0 1097 * call. If the connect() call fails, there are two cases we handle
michael@0 1098 * specially:
michael@0 1099 * 1. The connect() call was interrupted by a signal. In this case
michael@0 1100 * we simply retry connect().
michael@0 1101 * 2. The NSPR socket is nonblocking and connect() fails with
michael@0 1102 * EINPROGRESS. We first wait until the socket becomes writable.
michael@0 1103 * Then we try to find out whether the connection setup succeeded
michael@0 1104 * or failed.
michael@0 1105 */
michael@0 1106
michael@0 1107 retry:
michael@0 1108 #ifdef IRIX
michael@0 1109 if ((rv = _MD_irix_connect(osfd, addr, addrlen, timeout)) == -1) {
michael@0 1110 #else
michael@0 1111 #ifdef _PR_HAVE_SOCKADDR_LEN
michael@0 1112 if ((rv = connect(osfd, (struct sockaddr *)&addrCopy, addrlen)) == -1) {
michael@0 1113 #else
michael@0 1114 if ((rv = connect(osfd, (struct sockaddr *)addr, addrlen)) == -1) {
michael@0 1115 #endif
michael@0 1116 #endif
michael@0 1117 err = _MD_ERRNO();
michael@0 1118
michael@0 1119 if (err == EINTR) {
michael@0 1120 if (_PR_PENDING_INTERRUPT(me)) {
michael@0 1121 me->flags &= ~_PR_INTERRUPT;
michael@0 1122 PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
michael@0 1123 return -1;
michael@0 1124 }
michael@0 1125 goto retry;
michael@0 1126 }
michael@0 1127
michael@0 1128 if (!fd->secret->nonblocking && (err == EINPROGRESS)) {
michael@0 1129 if (!_PR_IS_NATIVE_THREAD(me)) {
michael@0 1130
michael@0 1131 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0)
michael@0 1132 return -1;
michael@0 1133 } else {
michael@0 1134 /*
michael@0 1135 * socket_io_wait() may return -1 or 1.
michael@0 1136 */
michael@0 1137
michael@0 1138 rv = socket_io_wait(osfd, WRITE_FD, timeout);
michael@0 1139 if (rv == -1) {
michael@0 1140 return -1;
michael@0 1141 }
michael@0 1142 }
michael@0 1143
michael@0 1144 PR_ASSERT(rv == 1);
michael@0 1145 if (_PR_PENDING_INTERRUPT(me)) {
michael@0 1146 me->flags &= ~_PR_INTERRUPT;
michael@0 1147 PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0);
michael@0 1148 return -1;
michael@0 1149 }
michael@0 1150 err = _MD_unix_get_nonblocking_connect_error(osfd);
michael@0 1151 if (err != 0) {
michael@0 1152 _PR_MD_MAP_CONNECT_ERROR(err);
michael@0 1153 return -1;
michael@0 1154 }
michael@0 1155 return 0;
michael@0 1156 }
michael@0 1157
michael@0 1158 _PR_MD_MAP_CONNECT_ERROR(err);
michael@0 1159 }
michael@0 1160
michael@0 1161 return rv;
michael@0 1162 } /* _MD_connect */
michael@0 1163
michael@0 1164 PRInt32 _MD_bind(PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen)
michael@0 1165 {
michael@0 1166 PRInt32 rv, err;
michael@0 1167 #ifdef _PR_HAVE_SOCKADDR_LEN
michael@0 1168 PRNetAddr addrCopy;
michael@0 1169
michael@0 1170 addrCopy = *addr;
michael@0 1171 ((struct sockaddr *) &addrCopy)->sa_len = addrlen;
michael@0 1172 ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family;
michael@0 1173 rv = bind(fd->secret->md.osfd, (struct sockaddr *) &addrCopy, (int )addrlen);
michael@0 1174 #else
michael@0 1175 rv = bind(fd->secret->md.osfd, (struct sockaddr *) addr, (int )addrlen);
michael@0 1176 #endif
michael@0 1177 if (rv < 0) {
michael@0 1178 err = _MD_ERRNO();
michael@0 1179 _PR_MD_MAP_BIND_ERROR(err);
michael@0 1180 }
michael@0 1181 return(rv);
michael@0 1182 }
michael@0 1183
michael@0 1184 PRInt32 _MD_listen(PRFileDesc *fd, PRIntn backlog)
michael@0 1185 {
michael@0 1186 PRInt32 rv, err;
michael@0 1187
michael@0 1188 rv = listen(fd->secret->md.osfd, backlog);
michael@0 1189 if (rv < 0) {
michael@0 1190 err = _MD_ERRNO();
michael@0 1191 _PR_MD_MAP_LISTEN_ERROR(err);
michael@0 1192 }
michael@0 1193 return(rv);
michael@0 1194 }
michael@0 1195
michael@0 1196 PRInt32 _MD_shutdown(PRFileDesc *fd, PRIntn how)
michael@0 1197 {
michael@0 1198 PRInt32 rv, err;
michael@0 1199
michael@0 1200 rv = shutdown(fd->secret->md.osfd, how);
michael@0 1201 if (rv < 0) {
michael@0 1202 err = _MD_ERRNO();
michael@0 1203 _PR_MD_MAP_SHUTDOWN_ERROR(err);
michael@0 1204 }
michael@0 1205 return(rv);
michael@0 1206 }
michael@0 1207
michael@0 1208 PRInt32 _MD_socketpair(int af, int type, int flags,
michael@0 1209 PRInt32 *osfd)
michael@0 1210 {
michael@0 1211 PRInt32 rv, err;
michael@0 1212
michael@0 1213 rv = socketpair(af, type, flags, osfd);
michael@0 1214 if (rv < 0) {
michael@0 1215 err = _MD_ERRNO();
michael@0 1216 _PR_MD_MAP_SOCKETPAIR_ERROR(err);
michael@0 1217 }
michael@0 1218 return rv;
michael@0 1219 }
michael@0 1220
michael@0 1221 PRStatus _MD_getsockname(PRFileDesc *fd, PRNetAddr *addr,
michael@0 1222 PRUint32 *addrlen)
michael@0 1223 {
michael@0 1224 PRInt32 rv, err;
michael@0 1225
michael@0 1226 rv = getsockname(fd->secret->md.osfd,
michael@0 1227 (struct sockaddr *) addr, (_PRSockLen_t *)addrlen);
michael@0 1228 #ifdef _PR_HAVE_SOCKADDR_LEN
michael@0 1229 if (rv == 0) {
michael@0 1230 /* ignore the sa_len field of struct sockaddr */
michael@0 1231 if (addr) {
michael@0 1232 addr->raw.family = ((struct sockaddr *) addr)->sa_family;
michael@0 1233 }
michael@0 1234 }
michael@0 1235 #endif /* _PR_HAVE_SOCKADDR_LEN */
michael@0 1236 if (rv < 0) {
michael@0 1237 err = _MD_ERRNO();
michael@0 1238 _PR_MD_MAP_GETSOCKNAME_ERROR(err);
michael@0 1239 }
michael@0 1240 return rv==0?PR_SUCCESS:PR_FAILURE;
michael@0 1241 }
michael@0 1242
michael@0 1243 PRStatus _MD_getpeername(PRFileDesc *fd, PRNetAddr *addr,
michael@0 1244 PRUint32 *addrlen)
michael@0 1245 {
michael@0 1246 PRInt32 rv, err;
michael@0 1247
michael@0 1248 rv = getpeername(fd->secret->md.osfd,
michael@0 1249 (struct sockaddr *) addr, (_PRSockLen_t *)addrlen);
michael@0 1250 #ifdef _PR_HAVE_SOCKADDR_LEN
michael@0 1251 if (rv == 0) {
michael@0 1252 /* ignore the sa_len field of struct sockaddr */
michael@0 1253 if (addr) {
michael@0 1254 addr->raw.family = ((struct sockaddr *) addr)->sa_family;
michael@0 1255 }
michael@0 1256 }
michael@0 1257 #endif /* _PR_HAVE_SOCKADDR_LEN */
michael@0 1258 if (rv < 0) {
michael@0 1259 err = _MD_ERRNO();
michael@0 1260 _PR_MD_MAP_GETPEERNAME_ERROR(err);
michael@0 1261 }
michael@0 1262 return rv==0?PR_SUCCESS:PR_FAILURE;
michael@0 1263 }
michael@0 1264
michael@0 1265 PRStatus _MD_getsockopt(PRFileDesc *fd, PRInt32 level,
michael@0 1266 PRInt32 optname, char* optval, PRInt32* optlen)
michael@0 1267 {
michael@0 1268 PRInt32 rv, err;
michael@0 1269
michael@0 1270 rv = getsockopt(fd->secret->md.osfd, level, optname, optval, (_PRSockLen_t *)optlen);
michael@0 1271 if (rv < 0) {
michael@0 1272 err = _MD_ERRNO();
michael@0 1273 _PR_MD_MAP_GETSOCKOPT_ERROR(err);
michael@0 1274 }
michael@0 1275 return rv==0?PR_SUCCESS:PR_FAILURE;
michael@0 1276 }
michael@0 1277
michael@0 1278 PRStatus _MD_setsockopt(PRFileDesc *fd, PRInt32 level,
michael@0 1279 PRInt32 optname, const char* optval, PRInt32 optlen)
michael@0 1280 {
michael@0 1281 PRInt32 rv, err;
michael@0 1282
michael@0 1283 rv = setsockopt(fd->secret->md.osfd, level, optname, optval, optlen);
michael@0 1284 if (rv < 0) {
michael@0 1285 err = _MD_ERRNO();
michael@0 1286 _PR_MD_MAP_SETSOCKOPT_ERROR(err);
michael@0 1287 }
michael@0 1288 return rv==0?PR_SUCCESS:PR_FAILURE;
michael@0 1289 }
michael@0 1290
michael@0 1291 PRStatus _MD_set_fd_inheritable(PRFileDesc *fd, PRBool inheritable)
michael@0 1292 {
michael@0 1293 int rv;
michael@0 1294
michael@0 1295 rv = fcntl(fd->secret->md.osfd, F_SETFD, inheritable ? 0 : FD_CLOEXEC);
michael@0 1296 if (-1 == rv) {
michael@0 1297 PR_SetError(PR_UNKNOWN_ERROR, _MD_ERRNO());
michael@0 1298 return PR_FAILURE;
michael@0 1299 }
michael@0 1300 return PR_SUCCESS;
michael@0 1301 }
michael@0 1302
michael@0 1303 void _MD_init_fd_inheritable(PRFileDesc *fd, PRBool imported)
michael@0 1304 {
michael@0 1305 if (imported) {
michael@0 1306 fd->secret->inheritable = _PR_TRI_UNKNOWN;
michael@0 1307 } else {
michael@0 1308 /* By default, a Unix fd is not closed on exec. */
michael@0 1309 #ifdef DEBUG
michael@0 1310 {
michael@0 1311 int flags = fcntl(fd->secret->md.osfd, F_GETFD, 0);
michael@0 1312 PR_ASSERT(0 == flags);
michael@0 1313 }
michael@0 1314 #endif
michael@0 1315 fd->secret->inheritable = _PR_TRI_TRUE;
michael@0 1316 }
michael@0 1317 }
michael@0 1318
michael@0 1319 /************************************************************************/
michael@0 1320 #if !defined(_PR_USE_POLL)
michael@0 1321
michael@0 1322 /*
michael@0 1323 ** Scan through io queue and find any bad fd's that triggered the error
michael@0 1324 ** from _MD_SELECT
michael@0 1325 */
michael@0 1326 static void FindBadFDs(void)
michael@0 1327 {
michael@0 1328 PRCList *q;
michael@0 1329 PRThread *me = _MD_CURRENT_THREAD();
michael@0 1330
michael@0 1331 PR_ASSERT(!_PR_IS_NATIVE_THREAD(me));
michael@0 1332 q = (_PR_IOQ(me->cpu)).next;
michael@0 1333 _PR_IOQ_MAX_OSFD(me->cpu) = -1;
michael@0 1334 _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT;
michael@0 1335 while (q != &_PR_IOQ(me->cpu)) {
michael@0 1336 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
michael@0 1337 PRBool notify = PR_FALSE;
michael@0 1338 _PRUnixPollDesc *pds = pq->pds;
michael@0 1339 _PRUnixPollDesc *epds = pds + pq->npds;
michael@0 1340 PRInt32 pq_max_osfd = -1;
michael@0 1341
michael@0 1342 q = q->next;
michael@0 1343 for (; pds < epds; pds++) {
michael@0 1344 PRInt32 osfd = pds->osfd;
michael@0 1345 pds->out_flags = 0;
michael@0 1346 PR_ASSERT(osfd >= 0 || pds->in_flags == 0);
michael@0 1347 if (pds->in_flags == 0) {
michael@0 1348 continue; /* skip this fd */
michael@0 1349 }
michael@0 1350 if (fcntl(osfd, F_GETFL, 0) == -1) {
michael@0 1351 /* Found a bad descriptor, remove it from the fd_sets. */
michael@0 1352 PR_LOG(_pr_io_lm, PR_LOG_MAX,
michael@0 1353 ("file descriptor %d is bad", osfd));
michael@0 1354 pds->out_flags = _PR_UNIX_POLL_NVAL;
michael@0 1355 notify = PR_TRUE;
michael@0 1356 }
michael@0 1357 if (osfd > pq_max_osfd) {
michael@0 1358 pq_max_osfd = osfd;
michael@0 1359 }
michael@0 1360 }
michael@0 1361
michael@0 1362 if (notify) {
michael@0 1363 PRIntn pri;
michael@0 1364 PR_REMOVE_LINK(&pq->links);
michael@0 1365 pq->on_ioq = PR_FALSE;
michael@0 1366
michael@0 1367 /*
michael@0 1368 * Decrement the count of descriptors for each desciptor/event
michael@0 1369 * because this I/O request is being removed from the
michael@0 1370 * ioq
michael@0 1371 */
michael@0 1372 pds = pq->pds;
michael@0 1373 for (; pds < epds; pds++) {
michael@0 1374 PRInt32 osfd = pds->osfd;
michael@0 1375 PRInt16 in_flags = pds->in_flags;
michael@0 1376 PR_ASSERT(osfd >= 0 || in_flags == 0);
michael@0 1377 if (in_flags & _PR_UNIX_POLL_READ) {
michael@0 1378 if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0)
michael@0 1379 FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
michael@0 1380 }
michael@0 1381 if (in_flags & _PR_UNIX_POLL_WRITE) {
michael@0 1382 if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0)
michael@0 1383 FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
michael@0 1384 }
michael@0 1385 if (in_flags & _PR_UNIX_POLL_EXCEPT) {
michael@0 1386 if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0)
michael@0 1387 FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
michael@0 1388 }
michael@0 1389 }
michael@0 1390
michael@0 1391 _PR_THREAD_LOCK(pq->thr);
michael@0 1392 if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
michael@0 1393 _PRCPU *cpu = pq->thr->cpu;
michael@0 1394 _PR_SLEEPQ_LOCK(pq->thr->cpu);
michael@0 1395 _PR_DEL_SLEEPQ(pq->thr, PR_TRUE);
michael@0 1396 _PR_SLEEPQ_UNLOCK(pq->thr->cpu);
michael@0 1397
michael@0 1398 if (pq->thr->flags & _PR_SUSPENDING) {
michael@0 1399 /*
michael@0 1400 * set thread state to SUSPENDED;
michael@0 1401 * a Resume operation on the thread
michael@0 1402 * will move it to the runQ
michael@0 1403 */
michael@0 1404 pq->thr->state = _PR_SUSPENDED;
michael@0 1405 _PR_MISCQ_LOCK(pq->thr->cpu);
michael@0 1406 _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu);
michael@0 1407 _PR_MISCQ_UNLOCK(pq->thr->cpu);
michael@0 1408 } else {
michael@0 1409 pri = pq->thr->priority;
michael@0 1410 pq->thr->state = _PR_RUNNABLE;
michael@0 1411
michael@0 1412 _PR_RUNQ_LOCK(cpu);
michael@0 1413 _PR_ADD_RUNQ(pq->thr, cpu, pri);
michael@0 1414 _PR_RUNQ_UNLOCK(cpu);
michael@0 1415 }
michael@0 1416 }
michael@0 1417 _PR_THREAD_UNLOCK(pq->thr);
michael@0 1418 } else {
michael@0 1419 if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu))
michael@0 1420 _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout;
michael@0 1421 if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd)
michael@0 1422 _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd;
michael@0 1423 }
michael@0 1424 }
michael@0 1425 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
michael@0 1426 if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0])
michael@0 1427 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
michael@0 1428 }
michael@0 1429 }
michael@0 1430 #endif /* !defined(_PR_USE_POLL) */
michael@0 1431
michael@0 1432 /************************************************************************/
michael@0 1433
michael@0 1434 /*
michael@0 1435 ** Called by the scheduler when there is nothing to do. This means that
michael@0 1436 ** all threads are blocked on some monitor somewhere.
michael@0 1437 **
michael@0 1438 ** Note: this code doesn't release the scheduler lock.
michael@0 1439 */
michael@0 1440 /*
michael@0 1441 ** Pause the current CPU. longjmp to the cpu's pause stack
michael@0 1442 **
michael@0 1443 ** This must be called with the scheduler locked
michael@0 1444 */
michael@0 1445 void _MD_PauseCPU(PRIntervalTime ticks)
michael@0 1446 {
michael@0 1447 PRThread *me = _MD_CURRENT_THREAD();
michael@0 1448 #ifdef _PR_USE_POLL
michael@0 1449 int timeout;
michael@0 1450 struct pollfd *pollfds; /* an array of pollfd structures */
michael@0 1451 struct pollfd *pollfdPtr; /* a pointer that steps through the array */
michael@0 1452 unsigned long npollfds; /* number of pollfd structures in array */
michael@0 1453 unsigned long pollfds_size;
michael@0 1454 int nfd; /* to hold the return value of poll() */
michael@0 1455 #else
michael@0 1456 struct timeval timeout, *tvp;
michael@0 1457 fd_set r, w, e;
michael@0 1458 fd_set *rp, *wp, *ep;
michael@0 1459 PRInt32 max_osfd, nfd;
michael@0 1460 #endif /* _PR_USE_POLL */
michael@0 1461 PRInt32 rv;
michael@0 1462 PRCList *q;
michael@0 1463 PRUint32 min_timeout;
michael@0 1464 sigset_t oldset;
michael@0 1465 #ifdef IRIX
michael@0 1466 extern sigset_t ints_off;
michael@0 1467 #endif
michael@0 1468
michael@0 1469 PR_ASSERT(_PR_MD_GET_INTSOFF() != 0);
michael@0 1470
michael@0 1471 _PR_MD_IOQ_LOCK();
michael@0 1472
michael@0 1473 #ifdef _PR_USE_POLL
michael@0 1474 /* Build up the pollfd structure array to wait on */
michael@0 1475
michael@0 1476 /* Find out how many pollfd structures are needed */
michael@0 1477 npollfds = _PR_IOQ_OSFD_CNT(me->cpu);
michael@0 1478 PR_ASSERT(npollfds >= 0);
michael@0 1479
michael@0 1480 /*
michael@0 1481 * We use a pipe to wake up a native thread. An fd is needed
michael@0 1482 * for the pipe and we poll it for reading.
michael@0 1483 */
michael@0 1484 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
michael@0 1485 npollfds++;
michael@0 1486 #ifdef IRIX
michael@0 1487 /*
michael@0 1488 * On Irix, a second pipe is used to cause the primordial cpu to
michael@0 1489 * wakeup and exit, when the process is exiting because of a call
michael@0 1490 * to exit/PR_ProcessExit.
michael@0 1491 */
michael@0 1492 if (me->cpu->id == 0) {
michael@0 1493 npollfds++;
michael@0 1494 }
michael@0 1495 #endif
michael@0 1496 }
michael@0 1497
michael@0 1498 /*
michael@0 1499 * if the cpu's pollfd array is not big enough, release it and allocate a new one
michael@0 1500 */
michael@0 1501 if (npollfds > _PR_IOQ_POLLFDS_SIZE(me->cpu)) {
michael@0 1502 if (_PR_IOQ_POLLFDS(me->cpu) != NULL)
michael@0 1503 PR_DELETE(_PR_IOQ_POLLFDS(me->cpu));
michael@0 1504 pollfds_size = PR_MAX(_PR_IOQ_MIN_POLLFDS_SIZE(me->cpu), npollfds);
michael@0 1505 pollfds = (struct pollfd *) PR_MALLOC(pollfds_size * sizeof(struct pollfd));
michael@0 1506 _PR_IOQ_POLLFDS(me->cpu) = pollfds;
michael@0 1507 _PR_IOQ_POLLFDS_SIZE(me->cpu) = pollfds_size;
michael@0 1508 } else {
michael@0 1509 pollfds = _PR_IOQ_POLLFDS(me->cpu);
michael@0 1510 }
michael@0 1511 pollfdPtr = pollfds;
michael@0 1512
michael@0 1513 /*
michael@0 1514 * If we need to poll the pipe for waking up a native thread,
michael@0 1515 * the pipe's fd is the first element in the pollfds array.
michael@0 1516 */
michael@0 1517 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
michael@0 1518 pollfdPtr->fd = _pr_md_pipefd[0];
michael@0 1519 pollfdPtr->events = POLLIN;
michael@0 1520 pollfdPtr++;
michael@0 1521 #ifdef IRIX
michael@0 1522 /*
michael@0 1523 * On Irix, the second element is the exit pipe
michael@0 1524 */
michael@0 1525 if (me->cpu->id == 0) {
michael@0 1526 pollfdPtr->fd = _pr_irix_primoridal_cpu_fd[0];
michael@0 1527 pollfdPtr->events = POLLIN;
michael@0 1528 pollfdPtr++;
michael@0 1529 }
michael@0 1530 #endif
michael@0 1531 }
michael@0 1532
michael@0 1533 min_timeout = PR_INTERVAL_NO_TIMEOUT;
michael@0 1534 for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) {
michael@0 1535 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
michael@0 1536 _PRUnixPollDesc *pds = pq->pds;
michael@0 1537 _PRUnixPollDesc *epds = pds + pq->npds;
michael@0 1538
michael@0 1539 if (pq->timeout < min_timeout) {
michael@0 1540 min_timeout = pq->timeout;
michael@0 1541 }
michael@0 1542 for (; pds < epds; pds++, pollfdPtr++) {
michael@0 1543 /*
michael@0 1544 * Assert that the pollfdPtr pointer does not go
michael@0 1545 * beyond the end of the pollfds array
michael@0 1546 */
michael@0 1547 PR_ASSERT(pollfdPtr < pollfds + npollfds);
michael@0 1548 pollfdPtr->fd = pds->osfd;
michael@0 1549 /* direct copy of poll flags */
michael@0 1550 pollfdPtr->events = pds->in_flags;
michael@0 1551 }
michael@0 1552 }
michael@0 1553 _PR_IOQ_TIMEOUT(me->cpu) = min_timeout;
michael@0 1554 #else
michael@0 1555 /*
michael@0 1556 * assigment of fd_sets
michael@0 1557 */
michael@0 1558 r = _PR_FD_READ_SET(me->cpu);
michael@0 1559 w = _PR_FD_WRITE_SET(me->cpu);
michael@0 1560 e = _PR_FD_EXCEPTION_SET(me->cpu);
michael@0 1561
michael@0 1562 rp = &r;
michael@0 1563 wp = &w;
michael@0 1564 ep = &e;
michael@0 1565
michael@0 1566 max_osfd = _PR_IOQ_MAX_OSFD(me->cpu) + 1;
michael@0 1567 min_timeout = _PR_IOQ_TIMEOUT(me->cpu);
michael@0 1568 #endif /* _PR_USE_POLL */
michael@0 1569 /*
michael@0 1570 ** Compute the minimum timeout value: make it the smaller of the
michael@0 1571 ** timeouts specified by the i/o pollers or the timeout of the first
michael@0 1572 ** sleeping thread.
michael@0 1573 */
michael@0 1574 q = _PR_SLEEPQ(me->cpu).next;
michael@0 1575
michael@0 1576 if (q != &_PR_SLEEPQ(me->cpu)) {
michael@0 1577 PRThread *t = _PR_THREAD_PTR(q);
michael@0 1578
michael@0 1579 if (t->sleep < min_timeout) {
michael@0 1580 min_timeout = t->sleep;
michael@0 1581 }
michael@0 1582 }
michael@0 1583 if (min_timeout > ticks) {
michael@0 1584 min_timeout = ticks;
michael@0 1585 }
michael@0 1586
michael@0 1587 #ifdef _PR_USE_POLL
michael@0 1588 if (min_timeout == PR_INTERVAL_NO_TIMEOUT)
michael@0 1589 timeout = -1;
michael@0 1590 else
michael@0 1591 timeout = PR_IntervalToMilliseconds(min_timeout);
michael@0 1592 #else
michael@0 1593 if (min_timeout == PR_INTERVAL_NO_TIMEOUT) {
michael@0 1594 tvp = NULL;
michael@0 1595 } else {
michael@0 1596 timeout.tv_sec = PR_IntervalToSeconds(min_timeout);
michael@0 1597 timeout.tv_usec = PR_IntervalToMicroseconds(min_timeout)
michael@0 1598 % PR_USEC_PER_SEC;
michael@0 1599 tvp = &timeout;
michael@0 1600 }
michael@0 1601 #endif /* _PR_USE_POLL */
michael@0 1602
michael@0 1603 _PR_MD_IOQ_UNLOCK();
michael@0 1604 _MD_CHECK_FOR_EXIT();
michael@0 1605 /*
michael@0 1606 * check for i/o operations
michael@0 1607 */
michael@0 1608 #ifndef _PR_NO_CLOCK_TIMER
michael@0 1609 /*
michael@0 1610 * Disable the clock interrupts while we are in select, if clock interrupts
michael@0 1611 * are enabled. Otherwise, when the select/poll calls are interrupted, the
michael@0 1612 * timer value starts ticking from zero again when the system call is restarted.
michael@0 1613 */
michael@0 1614 #ifdef IRIX
michael@0 1615 /*
michael@0 1616 * SIGCHLD signal is used on Irix to detect he termination of an
michael@0 1617 * sproc by SIGSEGV, SIGBUS or SIGABRT signals when
michael@0 1618 * _nspr_terminate_on_error is set.
michael@0 1619 */
michael@0 1620 if ((!_nspr_noclock) || (_nspr_terminate_on_error))
michael@0 1621 #else
michael@0 1622 if (!_nspr_noclock)
michael@0 1623 #endif /* IRIX */
michael@0 1624 #ifdef IRIX
michael@0 1625 sigprocmask(SIG_BLOCK, &ints_off, &oldset);
michael@0 1626 #else
michael@0 1627 PR_ASSERT(sigismember(&timer_set, SIGALRM));
michael@0 1628 sigprocmask(SIG_BLOCK, &timer_set, &oldset);
michael@0 1629 #endif /* IRIX */
michael@0 1630 #endif /* !_PR_NO_CLOCK_TIMER */
michael@0 1631
michael@0 1632 #ifndef _PR_USE_POLL
michael@0 1633 PR_ASSERT(FD_ISSET(_pr_md_pipefd[0],rp));
michael@0 1634 nfd = _MD_SELECT(max_osfd, rp, wp, ep, tvp);
michael@0 1635 #else
michael@0 1636 nfd = _MD_POLL(pollfds, npollfds, timeout);
michael@0 1637 #endif /* !_PR_USE_POLL */
michael@0 1638
michael@0 1639 #ifndef _PR_NO_CLOCK_TIMER
michael@0 1640 #ifdef IRIX
michael@0 1641 if ((!_nspr_noclock) || (_nspr_terminate_on_error))
michael@0 1642 #else
michael@0 1643 if (!_nspr_noclock)
michael@0 1644 #endif /* IRIX */
michael@0 1645 sigprocmask(SIG_SETMASK, &oldset, 0);
michael@0 1646 #endif /* !_PR_NO_CLOCK_TIMER */
michael@0 1647
michael@0 1648 _MD_CHECK_FOR_EXIT();
michael@0 1649
michael@0 1650 #ifdef IRIX
michael@0 1651 _PR_MD_primordial_cpu();
michael@0 1652 #endif
michael@0 1653
michael@0 1654 _PR_MD_IOQ_LOCK();
michael@0 1655 /*
michael@0 1656 ** Notify monitors that are associated with the selected descriptors.
michael@0 1657 */
michael@0 1658 #ifdef _PR_USE_POLL
michael@0 1659 if (nfd > 0) {
michael@0 1660 pollfdPtr = pollfds;
michael@0 1661 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
michael@0 1662 /*
michael@0 1663 * Assert that the pipe is the first element in the
michael@0 1664 * pollfds array.
michael@0 1665 */
michael@0 1666 PR_ASSERT(pollfds[0].fd == _pr_md_pipefd[0]);
michael@0 1667 if ((pollfds[0].revents & POLLIN) && (nfd == 1)) {
michael@0 1668 /*
michael@0 1669 * woken up by another thread; read all the data
michael@0 1670 * in the pipe to empty the pipe
michael@0 1671 */
michael@0 1672 while ((rv = read(_pr_md_pipefd[0], _pr_md_pipebuf,
michael@0 1673 PIPE_BUF)) == PIPE_BUF){
michael@0 1674 }
michael@0 1675 PR_ASSERT((rv > 0) || ((rv == -1) && (errno == EAGAIN)));
michael@0 1676 }
michael@0 1677 pollfdPtr++;
michael@0 1678 #ifdef IRIX
michael@0 1679 /*
michael@0 1680 * On Irix, check to see if the primordial cpu needs to exit
michael@0 1681 * to cause the process to terminate
michael@0 1682 */
michael@0 1683 if (me->cpu->id == 0) {
michael@0 1684 PR_ASSERT(pollfds[1].fd == _pr_irix_primoridal_cpu_fd[0]);
michael@0 1685 if (pollfdPtr->revents & POLLIN) {
michael@0 1686 if (_pr_irix_process_exit) {
michael@0 1687 /*
michael@0 1688 * process exit due to a call to PR_ProcessExit
michael@0 1689 */
michael@0 1690 prctl(PR_SETEXITSIG, SIGKILL);
michael@0 1691 _exit(_pr_irix_process_exit_code);
michael@0 1692 } else {
michael@0 1693 while ((rv = read(_pr_irix_primoridal_cpu_fd[0],
michael@0 1694 _pr_md_pipebuf, PIPE_BUF)) == PIPE_BUF) {
michael@0 1695 }
michael@0 1696 PR_ASSERT(rv > 0);
michael@0 1697 }
michael@0 1698 }
michael@0 1699 pollfdPtr++;
michael@0 1700 }
michael@0 1701 #endif
michael@0 1702 }
michael@0 1703 for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) {
michael@0 1704 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
michael@0 1705 PRBool notify = PR_FALSE;
michael@0 1706 _PRUnixPollDesc *pds = pq->pds;
michael@0 1707 _PRUnixPollDesc *epds = pds + pq->npds;
michael@0 1708
michael@0 1709 for (; pds < epds; pds++, pollfdPtr++) {
michael@0 1710 /*
michael@0 1711 * Assert that the pollfdPtr pointer does not go beyond
michael@0 1712 * the end of the pollfds array.
michael@0 1713 */
michael@0 1714 PR_ASSERT(pollfdPtr < pollfds + npollfds);
michael@0 1715 /*
michael@0 1716 * Assert that the fd's in the pollfds array (stepped
michael@0 1717 * through by pollfdPtr) are in the same order as
michael@0 1718 * the fd's in _PR_IOQ() (stepped through by q and pds).
michael@0 1719 * This is how the pollfds array was created earlier.
michael@0 1720 */
michael@0 1721 PR_ASSERT(pollfdPtr->fd == pds->osfd);
michael@0 1722 pds->out_flags = pollfdPtr->revents;
michael@0 1723 /* Negative fd's are ignored by poll() */
michael@0 1724 if (pds->osfd >= 0 && pds->out_flags) {
michael@0 1725 notify = PR_TRUE;
michael@0 1726 }
michael@0 1727 }
michael@0 1728 if (notify) {
michael@0 1729 PRIntn pri;
michael@0 1730 PRThread *thred;
michael@0 1731
michael@0 1732 PR_REMOVE_LINK(&pq->links);
michael@0 1733 pq->on_ioq = PR_FALSE;
michael@0 1734
michael@0 1735 thred = pq->thr;
michael@0 1736 _PR_THREAD_LOCK(thred);
michael@0 1737 if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
michael@0 1738 _PRCPU *cpu = pq->thr->cpu;
michael@0 1739 _PR_SLEEPQ_LOCK(pq->thr->cpu);
michael@0 1740 _PR_DEL_SLEEPQ(pq->thr, PR_TRUE);
michael@0 1741 _PR_SLEEPQ_UNLOCK(pq->thr->cpu);
michael@0 1742
michael@0 1743 if (pq->thr->flags & _PR_SUSPENDING) {
michael@0 1744 /*
michael@0 1745 * set thread state to SUSPENDED;
michael@0 1746 * a Resume operation on the thread
michael@0 1747 * will move it to the runQ
michael@0 1748 */
michael@0 1749 pq->thr->state = _PR_SUSPENDED;
michael@0 1750 _PR_MISCQ_LOCK(pq->thr->cpu);
michael@0 1751 _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu);
michael@0 1752 _PR_MISCQ_UNLOCK(pq->thr->cpu);
michael@0 1753 } else {
michael@0 1754 pri = pq->thr->priority;
michael@0 1755 pq->thr->state = _PR_RUNNABLE;
michael@0 1756
michael@0 1757 _PR_RUNQ_LOCK(cpu);
michael@0 1758 _PR_ADD_RUNQ(pq->thr, cpu, pri);
michael@0 1759 _PR_RUNQ_UNLOCK(cpu);
michael@0 1760 if (_pr_md_idle_cpus > 1)
michael@0 1761 _PR_MD_WAKEUP_WAITER(thred);
michael@0 1762 }
michael@0 1763 }
michael@0 1764 _PR_THREAD_UNLOCK(thred);
michael@0 1765 _PR_IOQ_OSFD_CNT(me->cpu) -= pq->npds;
michael@0 1766 PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0);
michael@0 1767 }
michael@0 1768 }
michael@0 1769 } else if (nfd == -1) {
michael@0 1770 PR_LOG(_pr_io_lm, PR_LOG_MAX, ("poll() failed with errno %d", errno));
michael@0 1771 }
michael@0 1772
michael@0 1773 #else
michael@0 1774 if (nfd > 0) {
michael@0 1775 q = _PR_IOQ(me->cpu).next;
michael@0 1776 _PR_IOQ_MAX_OSFD(me->cpu) = -1;
michael@0 1777 _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT;
michael@0 1778 while (q != &_PR_IOQ(me->cpu)) {
michael@0 1779 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
michael@0 1780 PRBool notify = PR_FALSE;
michael@0 1781 _PRUnixPollDesc *pds = pq->pds;
michael@0 1782 _PRUnixPollDesc *epds = pds + pq->npds;
michael@0 1783 PRInt32 pq_max_osfd = -1;
michael@0 1784
michael@0 1785 q = q->next;
michael@0 1786 for (; pds < epds; pds++) {
michael@0 1787 PRInt32 osfd = pds->osfd;
michael@0 1788 PRInt16 in_flags = pds->in_flags;
michael@0 1789 PRInt16 out_flags = 0;
michael@0 1790 PR_ASSERT(osfd >= 0 || in_flags == 0);
michael@0 1791 if ((in_flags & _PR_UNIX_POLL_READ) && FD_ISSET(osfd, rp)) {
michael@0 1792 out_flags |= _PR_UNIX_POLL_READ;
michael@0 1793 }
michael@0 1794 if ((in_flags & _PR_UNIX_POLL_WRITE) && FD_ISSET(osfd, wp)) {
michael@0 1795 out_flags |= _PR_UNIX_POLL_WRITE;
michael@0 1796 }
michael@0 1797 if ((in_flags & _PR_UNIX_POLL_EXCEPT) && FD_ISSET(osfd, ep)) {
michael@0 1798 out_flags |= _PR_UNIX_POLL_EXCEPT;
michael@0 1799 }
michael@0 1800 pds->out_flags = out_flags;
michael@0 1801 if (out_flags) {
michael@0 1802 notify = PR_TRUE;
michael@0 1803 }
michael@0 1804 if (osfd > pq_max_osfd) {
michael@0 1805 pq_max_osfd = osfd;
michael@0 1806 }
michael@0 1807 }
michael@0 1808 if (notify == PR_TRUE) {
michael@0 1809 PRIntn pri;
michael@0 1810 PRThread *thred;
michael@0 1811
michael@0 1812 PR_REMOVE_LINK(&pq->links);
michael@0 1813 pq->on_ioq = PR_FALSE;
michael@0 1814
michael@0 1815 /*
michael@0 1816 * Decrement the count of descriptors for each desciptor/event
michael@0 1817 * because this I/O request is being removed from the
michael@0 1818 * ioq
michael@0 1819 */
michael@0 1820 pds = pq->pds;
michael@0 1821 for (; pds < epds; pds++) {
michael@0 1822 PRInt32 osfd = pds->osfd;
michael@0 1823 PRInt16 in_flags = pds->in_flags;
michael@0 1824 PR_ASSERT(osfd >= 0 || in_flags == 0);
michael@0 1825 if (in_flags & _PR_UNIX_POLL_READ) {
michael@0 1826 if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0)
michael@0 1827 FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
michael@0 1828 }
michael@0 1829 if (in_flags & _PR_UNIX_POLL_WRITE) {
michael@0 1830 if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0)
michael@0 1831 FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
michael@0 1832 }
michael@0 1833 if (in_flags & _PR_UNIX_POLL_EXCEPT) {
michael@0 1834 if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0)
michael@0 1835 FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
michael@0 1836 }
michael@0 1837 }
michael@0 1838
michael@0 1839 /*
michael@0 1840 * Because this thread can run on a different cpu right
michael@0 1841 * after being added to the run queue, do not dereference
michael@0 1842 * pq
michael@0 1843 */
michael@0 1844 thred = pq->thr;
michael@0 1845 _PR_THREAD_LOCK(thred);
michael@0 1846 if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
michael@0 1847 _PRCPU *cpu = thred->cpu;
michael@0 1848 _PR_SLEEPQ_LOCK(pq->thr->cpu);
michael@0 1849 _PR_DEL_SLEEPQ(pq->thr, PR_TRUE);
michael@0 1850 _PR_SLEEPQ_UNLOCK(pq->thr->cpu);
michael@0 1851
michael@0 1852 if (pq->thr->flags & _PR_SUSPENDING) {
michael@0 1853 /*
michael@0 1854 * set thread state to SUSPENDED;
michael@0 1855 * a Resume operation on the thread
michael@0 1856 * will move it to the runQ
michael@0 1857 */
michael@0 1858 pq->thr->state = _PR_SUSPENDED;
michael@0 1859 _PR_MISCQ_LOCK(pq->thr->cpu);
michael@0 1860 _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu);
michael@0 1861 _PR_MISCQ_UNLOCK(pq->thr->cpu);
michael@0 1862 } else {
michael@0 1863 pri = pq->thr->priority;
michael@0 1864 pq->thr->state = _PR_RUNNABLE;
michael@0 1865
michael@0 1866 pq->thr->cpu = cpu;
michael@0 1867 _PR_RUNQ_LOCK(cpu);
michael@0 1868 _PR_ADD_RUNQ(pq->thr, cpu, pri);
michael@0 1869 _PR_RUNQ_UNLOCK(cpu);
michael@0 1870 if (_pr_md_idle_cpus > 1)
michael@0 1871 _PR_MD_WAKEUP_WAITER(thred);
michael@0 1872 }
michael@0 1873 }
michael@0 1874 _PR_THREAD_UNLOCK(thred);
michael@0 1875 } else {
michael@0 1876 if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu))
michael@0 1877 _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout;
michael@0 1878 if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd)
michael@0 1879 _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd;
michael@0 1880 }
michael@0 1881 }
michael@0 1882 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
michael@0 1883 if ((FD_ISSET(_pr_md_pipefd[0], rp)) && (nfd == 1)) {
michael@0 1884 /*
michael@0 1885 * woken up by another thread; read all the data
michael@0 1886 * in the pipe to empty the pipe
michael@0 1887 */
michael@0 1888 while ((rv =
michael@0 1889 read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF))
michael@0 1890 == PIPE_BUF){
michael@0 1891 }
michael@0 1892 PR_ASSERT((rv > 0) ||
michael@0 1893 ((rv == -1) && (errno == EAGAIN)));
michael@0 1894 }
michael@0 1895 if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0])
michael@0 1896 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
michael@0 1897 #ifdef IRIX
michael@0 1898 if ((me->cpu->id == 0) &&
michael@0 1899 (FD_ISSET(_pr_irix_primoridal_cpu_fd[0], rp))) {
michael@0 1900 if (_pr_irix_process_exit) {
michael@0 1901 /*
michael@0 1902 * process exit due to a call to PR_ProcessExit
michael@0 1903 */
michael@0 1904 prctl(PR_SETEXITSIG, SIGKILL);
michael@0 1905 _exit(_pr_irix_process_exit_code);
michael@0 1906 } else {
michael@0 1907 while ((rv = read(_pr_irix_primoridal_cpu_fd[0],
michael@0 1908 _pr_md_pipebuf, PIPE_BUF)) == PIPE_BUF) {
michael@0 1909 }
michael@0 1910 PR_ASSERT(rv > 0);
michael@0 1911 }
michael@0 1912 }
michael@0 1913 if (me->cpu->id == 0) {
michael@0 1914 if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_irix_primoridal_cpu_fd[0])
michael@0 1915 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_irix_primoridal_cpu_fd[0];
michael@0 1916 }
michael@0 1917 #endif
michael@0 1918 }
michael@0 1919 } else if (nfd < 0) {
michael@0 1920 if (errno == EBADF) {
michael@0 1921 FindBadFDs();
michael@0 1922 } else {
michael@0 1923 PR_LOG(_pr_io_lm, PR_LOG_MAX, ("select() failed with errno %d",
michael@0 1924 errno));
michael@0 1925 }
michael@0 1926 } else {
michael@0 1927 PR_ASSERT(nfd == 0);
michael@0 1928 /*
michael@0 1929 * compute the new value of _PR_IOQ_TIMEOUT
michael@0 1930 */
michael@0 1931 q = _PR_IOQ(me->cpu).next;
michael@0 1932 _PR_IOQ_MAX_OSFD(me->cpu) = -1;
michael@0 1933 _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT;
michael@0 1934 while (q != &_PR_IOQ(me->cpu)) {
michael@0 1935 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q);
michael@0 1936 _PRUnixPollDesc *pds = pq->pds;
michael@0 1937 _PRUnixPollDesc *epds = pds + pq->npds;
michael@0 1938 PRInt32 pq_max_osfd = -1;
michael@0 1939
michael@0 1940 q = q->next;
michael@0 1941 for (; pds < epds; pds++) {
michael@0 1942 if (pds->osfd > pq_max_osfd) {
michael@0 1943 pq_max_osfd = pds->osfd;
michael@0 1944 }
michael@0 1945 }
michael@0 1946 if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu))
michael@0 1947 _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout;
michael@0 1948 if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd)
michael@0 1949 _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd;
michael@0 1950 }
michael@0 1951 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
michael@0 1952 if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0])
michael@0 1953 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
michael@0 1954 }
michael@0 1955 }
michael@0 1956 #endif /* _PR_USE_POLL */
michael@0 1957 _PR_MD_IOQ_UNLOCK();
michael@0 1958 }
michael@0 1959
michael@0 1960 void _MD_Wakeup_CPUs()
michael@0 1961 {
michael@0 1962 PRInt32 rv, data;
michael@0 1963
michael@0 1964 data = 0;
michael@0 1965 rv = write(_pr_md_pipefd[1], &data, 1);
michael@0 1966
michael@0 1967 while ((rv < 0) && (errno == EAGAIN)) {
michael@0 1968 /*
michael@0 1969 * pipe full, read all data in pipe to empty it
michael@0 1970 */
michael@0 1971 while ((rv =
michael@0 1972 read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF))
michael@0 1973 == PIPE_BUF) {
michael@0 1974 }
michael@0 1975 PR_ASSERT((rv > 0) ||
michael@0 1976 ((rv == -1) && (errno == EAGAIN)));
michael@0 1977 rv = write(_pr_md_pipefd[1], &data, 1);
michael@0 1978 }
michael@0 1979 }
michael@0 1980
michael@0 1981
michael@0 1982 void _MD_InitCPUS()
michael@0 1983 {
michael@0 1984 PRInt32 rv, flags;
michael@0 1985 PRThread *me = _MD_CURRENT_THREAD();
michael@0 1986
michael@0 1987 rv = pipe(_pr_md_pipefd);
michael@0 1988 PR_ASSERT(rv == 0);
michael@0 1989 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
michael@0 1990 #ifndef _PR_USE_POLL
michael@0 1991 FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(me->cpu));
michael@0 1992 #endif
michael@0 1993
michael@0 1994 flags = fcntl(_pr_md_pipefd[0], F_GETFL, 0);
michael@0 1995 fcntl(_pr_md_pipefd[0], F_SETFL, flags | O_NONBLOCK);
michael@0 1996 flags = fcntl(_pr_md_pipefd[1], F_GETFL, 0);
michael@0 1997 fcntl(_pr_md_pipefd[1], F_SETFL, flags | O_NONBLOCK);
michael@0 1998 }
michael@0 1999
michael@0 2000 /*
michael@0 2001 ** Unix SIGALRM (clock) signal handler
michael@0 2002 */
michael@0 2003 static void ClockInterruptHandler()
michael@0 2004 {
michael@0 2005 int olderrno;
michael@0 2006 PRUintn pri;
michael@0 2007 _PRCPU *cpu = _PR_MD_CURRENT_CPU();
michael@0 2008 PRThread *me = _MD_CURRENT_THREAD();
michael@0 2009
michael@0 2010 #ifdef SOLARIS
michael@0 2011 if (!me || _PR_IS_NATIVE_THREAD(me)) {
michael@0 2012 _pr_primordialCPU->u.missed[_pr_primordialCPU->where] |= _PR_MISSED_CLOCK;
michael@0 2013 return;
michael@0 2014 }
michael@0 2015 #endif
michael@0 2016
michael@0 2017 if (_PR_MD_GET_INTSOFF() != 0) {
michael@0 2018 cpu->u.missed[cpu->where] |= _PR_MISSED_CLOCK;
michael@0 2019 return;
michael@0 2020 }
michael@0 2021 _PR_MD_SET_INTSOFF(1);
michael@0 2022
michael@0 2023 olderrno = errno;
michael@0 2024 _PR_ClockInterrupt();
michael@0 2025 errno = olderrno;
michael@0 2026
michael@0 2027 /*
michael@0 2028 ** If the interrupt wants a resched or if some other thread at
michael@0 2029 ** the same priority needs the cpu, reschedule.
michael@0 2030 */
michael@0 2031 pri = me->priority;
michael@0 2032 if ((cpu->u.missed[3] || (_PR_RUNQREADYMASK(me->cpu) >> pri))) {
michael@0 2033 #ifdef _PR_NO_PREEMPT
michael@0 2034 cpu->resched = PR_TRUE;
michael@0 2035 if (pr_interruptSwitchHook) {
michael@0 2036 (*pr_interruptSwitchHook)(pr_interruptSwitchHookArg);
michael@0 2037 }
michael@0 2038 #else /* _PR_NO_PREEMPT */
michael@0 2039 /*
michael@0 2040 ** Re-enable unix interrupts (so that we can use
michael@0 2041 ** setjmp/longjmp for context switching without having to
michael@0 2042 ** worry about the signal state)
michael@0 2043 */
michael@0 2044 sigprocmask(SIG_SETMASK, &empty_set, 0);
michael@0 2045 PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock caused context switch"));
michael@0 2046
michael@0 2047 if(!(me->flags & _PR_IDLE_THREAD)) {
michael@0 2048 _PR_THREAD_LOCK(me);
michael@0 2049 me->state = _PR_RUNNABLE;
michael@0 2050 me->cpu = cpu;
michael@0 2051 _PR_RUNQ_LOCK(cpu);
michael@0 2052 _PR_ADD_RUNQ(me, cpu, pri);
michael@0 2053 _PR_RUNQ_UNLOCK(cpu);
michael@0 2054 _PR_THREAD_UNLOCK(me);
michael@0 2055 } else
michael@0 2056 me->state = _PR_RUNNABLE;
michael@0 2057 _MD_SWITCH_CONTEXT(me);
michael@0 2058 PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock back from context switch"));
michael@0 2059 #endif /* _PR_NO_PREEMPT */
michael@0 2060 }
michael@0 2061 /*
michael@0 2062 * Because this thread could be running on a different cpu after
michael@0 2063 * a context switch the current cpu should be accessed and the
michael@0 2064 * value of the 'cpu' variable should not be used.
michael@0 2065 */
michael@0 2066 _PR_MD_SET_INTSOFF(0);
michael@0 2067 }
michael@0 2068
michael@0 2069 /*
michael@0 2070 * On HP-UX 9, we have to use the sigvector() interface to restart
michael@0 2071 * interrupted system calls, because sigaction() does not have the
michael@0 2072 * SA_RESTART flag.
michael@0 2073 */
michael@0 2074
michael@0 2075 #ifdef HPUX9
michael@0 2076 static void HPUX9_ClockInterruptHandler(
michael@0 2077 int sig,
michael@0 2078 int code,
michael@0 2079 struct sigcontext *scp)
michael@0 2080 {
michael@0 2081 ClockInterruptHandler();
michael@0 2082 scp->sc_syscall_action = SIG_RESTART;
michael@0 2083 }
michael@0 2084 #endif /* HPUX9 */
michael@0 2085
michael@0 2086 /* # of milliseconds per clock tick that we will use */
michael@0 2087 #define MSEC_PER_TICK 50
michael@0 2088
michael@0 2089
michael@0 2090 void _MD_StartInterrupts()
michael@0 2091 {
michael@0 2092 char *eval;
michael@0 2093
michael@0 2094 if ((eval = getenv("NSPR_NOCLOCK")) != NULL) {
michael@0 2095 if (atoi(eval) == 0)
michael@0 2096 _nspr_noclock = 0;
michael@0 2097 else
michael@0 2098 _nspr_noclock = 1;
michael@0 2099 }
michael@0 2100
michael@0 2101 #ifndef _PR_NO_CLOCK_TIMER
michael@0 2102 if (!_nspr_noclock) {
michael@0 2103 _MD_EnableClockInterrupts();
michael@0 2104 }
michael@0 2105 #endif
michael@0 2106 }
michael@0 2107
michael@0 2108 void _MD_StopInterrupts()
michael@0 2109 {
michael@0 2110 sigprocmask(SIG_BLOCK, &timer_set, 0);
michael@0 2111 }
michael@0 2112
michael@0 2113 void _MD_EnableClockInterrupts()
michael@0 2114 {
michael@0 2115 struct itimerval itval;
michael@0 2116 extern PRUintn _pr_numCPU;
michael@0 2117 #ifdef HPUX9
michael@0 2118 struct sigvec vec;
michael@0 2119
michael@0 2120 vec.sv_handler = (void (*)()) HPUX9_ClockInterruptHandler;
michael@0 2121 vec.sv_mask = 0;
michael@0 2122 vec.sv_flags = 0;
michael@0 2123 sigvector(SIGALRM, &vec, 0);
michael@0 2124 #else
michael@0 2125 struct sigaction vtact;
michael@0 2126
michael@0 2127 vtact.sa_handler = (void (*)()) ClockInterruptHandler;
michael@0 2128 sigemptyset(&vtact.sa_mask);
michael@0 2129 vtact.sa_flags = SA_RESTART;
michael@0 2130 sigaction(SIGALRM, &vtact, 0);
michael@0 2131 #endif /* HPUX9 */
michael@0 2132
michael@0 2133 PR_ASSERT(_pr_numCPU == 1);
michael@0 2134 itval.it_interval.tv_sec = 0;
michael@0 2135 itval.it_interval.tv_usec = MSEC_PER_TICK * PR_USEC_PER_MSEC;
michael@0 2136 itval.it_value = itval.it_interval;
michael@0 2137 setitimer(ITIMER_REAL, &itval, 0);
michael@0 2138 }
michael@0 2139
michael@0 2140 void _MD_DisableClockInterrupts()
michael@0 2141 {
michael@0 2142 struct itimerval itval;
michael@0 2143 extern PRUintn _pr_numCPU;
michael@0 2144
michael@0 2145 PR_ASSERT(_pr_numCPU == 1);
michael@0 2146 itval.it_interval.tv_sec = 0;
michael@0 2147 itval.it_interval.tv_usec = 0;
michael@0 2148 itval.it_value = itval.it_interval;
michael@0 2149 setitimer(ITIMER_REAL, &itval, 0);
michael@0 2150 }
michael@0 2151
michael@0 2152 void _MD_BlockClockInterrupts()
michael@0 2153 {
michael@0 2154 sigprocmask(SIG_BLOCK, &timer_set, 0);
michael@0 2155 }
michael@0 2156
michael@0 2157 void _MD_UnblockClockInterrupts()
michael@0 2158 {
michael@0 2159 sigprocmask(SIG_UNBLOCK, &timer_set, 0);
michael@0 2160 }
michael@0 2161
michael@0 2162 void _MD_MakeNonblock(PRFileDesc *fd)
michael@0 2163 {
michael@0 2164 PRInt32 osfd = fd->secret->md.osfd;
michael@0 2165 int flags;
michael@0 2166
michael@0 2167 if (osfd <= 2) {
michael@0 2168 /* Don't mess around with stdin, stdout or stderr */
michael@0 2169 return;
michael@0 2170 }
michael@0 2171 flags = fcntl(osfd, F_GETFL, 0);
michael@0 2172
michael@0 2173 /*
michael@0 2174 * Use O_NONBLOCK (POSIX-style non-blocking I/O) whenever possible.
michael@0 2175 * On SunOS 4, we must use FNDELAY (BSD-style non-blocking I/O),
michael@0 2176 * otherwise connect() still blocks and can be interrupted by SIGALRM.
michael@0 2177 */
michael@0 2178
michael@0 2179 fcntl(osfd, F_SETFL, flags | O_NONBLOCK);
michael@0 2180 }
michael@0 2181
michael@0 2182 PRInt32 _MD_open(const char *name, PRIntn flags, PRIntn mode)
michael@0 2183 {
michael@0 2184 PRInt32 osflags;
michael@0 2185 PRInt32 rv, err;
michael@0 2186
michael@0 2187 if (flags & PR_RDWR) {
michael@0 2188 osflags = O_RDWR;
michael@0 2189 } else if (flags & PR_WRONLY) {
michael@0 2190 osflags = O_WRONLY;
michael@0 2191 } else {
michael@0 2192 osflags = O_RDONLY;
michael@0 2193 }
michael@0 2194
michael@0 2195 if (flags & PR_EXCL)
michael@0 2196 osflags |= O_EXCL;
michael@0 2197 if (flags & PR_APPEND)
michael@0 2198 osflags |= O_APPEND;
michael@0 2199 if (flags & PR_TRUNCATE)
michael@0 2200 osflags |= O_TRUNC;
michael@0 2201 if (flags & PR_SYNC) {
michael@0 2202 #if defined(O_SYNC)
michael@0 2203 osflags |= O_SYNC;
michael@0 2204 #elif defined(O_FSYNC)
michael@0 2205 osflags |= O_FSYNC;
michael@0 2206 #else
michael@0 2207 #error "Neither O_SYNC nor O_FSYNC is defined on this platform"
michael@0 2208 #endif
michael@0 2209 }
michael@0 2210
michael@0 2211 /*
michael@0 2212 ** On creations we hold the 'create' lock in order to enforce
michael@0 2213 ** the semantics of PR_Rename. (see the latter for more details)
michael@0 2214 */
michael@0 2215 if (flags & PR_CREATE_FILE)
michael@0 2216 {
michael@0 2217 osflags |= O_CREAT;
michael@0 2218 if (NULL !=_pr_rename_lock)
michael@0 2219 PR_Lock(_pr_rename_lock);
michael@0 2220 }
michael@0 2221
michael@0 2222 #if defined(ANDROID)
michael@0 2223 osflags |= O_LARGEFILE;
michael@0 2224 #endif
michael@0 2225
michael@0 2226 rv = _md_iovector._open64(name, osflags, mode);
michael@0 2227
michael@0 2228 if (rv < 0) {
michael@0 2229 err = _MD_ERRNO();
michael@0 2230 _PR_MD_MAP_OPEN_ERROR(err);
michael@0 2231 }
michael@0 2232
michael@0 2233 if ((flags & PR_CREATE_FILE) && (NULL !=_pr_rename_lock))
michael@0 2234 PR_Unlock(_pr_rename_lock);
michael@0 2235 return rv;
michael@0 2236 }
michael@0 2237
michael@0 2238 PRIntervalTime intr_timeout_ticks;
michael@0 2239
michael@0 2240 #if defined(SOLARIS) || defined(IRIX)
michael@0 2241 static void sigsegvhandler() {
michael@0 2242 fprintf(stderr,"Received SIGSEGV\n");
michael@0 2243 fflush(stderr);
michael@0 2244 pause();
michael@0 2245 }
michael@0 2246
michael@0 2247 static void sigaborthandler() {
michael@0 2248 fprintf(stderr,"Received SIGABRT\n");
michael@0 2249 fflush(stderr);
michael@0 2250 pause();
michael@0 2251 }
michael@0 2252
michael@0 2253 static void sigbushandler() {
michael@0 2254 fprintf(stderr,"Received SIGBUS\n");
michael@0 2255 fflush(stderr);
michael@0 2256 pause();
michael@0 2257 }
michael@0 2258 #endif /* SOLARIS, IRIX */
michael@0 2259
michael@0 2260 #endif /* !defined(_PR_PTHREADS) */
michael@0 2261
michael@0 2262 void _MD_query_fd_inheritable(PRFileDesc *fd)
michael@0 2263 {
michael@0 2264 int flags;
michael@0 2265
michael@0 2266 PR_ASSERT(_PR_TRI_UNKNOWN == fd->secret->inheritable);
michael@0 2267 flags = fcntl(fd->secret->md.osfd, F_GETFD, 0);
michael@0 2268 PR_ASSERT(-1 != flags);
michael@0 2269 fd->secret->inheritable = (flags & FD_CLOEXEC) ?
michael@0 2270 _PR_TRI_FALSE : _PR_TRI_TRUE;
michael@0 2271 }
michael@0 2272
michael@0 2273 PROffset32 _MD_lseek(PRFileDesc *fd, PROffset32 offset, PRSeekWhence whence)
michael@0 2274 {
michael@0 2275 PROffset32 rv, where;
michael@0 2276
michael@0 2277 switch (whence) {
michael@0 2278 case PR_SEEK_SET:
michael@0 2279 where = SEEK_SET;
michael@0 2280 break;
michael@0 2281 case PR_SEEK_CUR:
michael@0 2282 where = SEEK_CUR;
michael@0 2283 break;
michael@0 2284 case PR_SEEK_END:
michael@0 2285 where = SEEK_END;
michael@0 2286 break;
michael@0 2287 default:
michael@0 2288 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
michael@0 2289 rv = -1;
michael@0 2290 goto done;
michael@0 2291 }
michael@0 2292 rv = lseek(fd->secret->md.osfd,offset,where);
michael@0 2293 if (rv == -1)
michael@0 2294 {
michael@0 2295 PRInt32 syserr = _MD_ERRNO();
michael@0 2296 _PR_MD_MAP_LSEEK_ERROR(syserr);
michael@0 2297 }
michael@0 2298 done:
michael@0 2299 return(rv);
michael@0 2300 }
michael@0 2301
michael@0 2302 PROffset64 _MD_lseek64(PRFileDesc *fd, PROffset64 offset, PRSeekWhence whence)
michael@0 2303 {
michael@0 2304 PRInt32 where;
michael@0 2305 PROffset64 rv;
michael@0 2306
michael@0 2307 switch (whence)
michael@0 2308 {
michael@0 2309 case PR_SEEK_SET:
michael@0 2310 where = SEEK_SET;
michael@0 2311 break;
michael@0 2312 case PR_SEEK_CUR:
michael@0 2313 where = SEEK_CUR;
michael@0 2314 break;
michael@0 2315 case PR_SEEK_END:
michael@0 2316 where = SEEK_END;
michael@0 2317 break;
michael@0 2318 default:
michael@0 2319 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
michael@0 2320 rv = minus_one;
michael@0 2321 goto done;
michael@0 2322 }
michael@0 2323 rv = _md_iovector._lseek64(fd->secret->md.osfd, offset, where);
michael@0 2324 if (LL_EQ(rv, minus_one))
michael@0 2325 {
michael@0 2326 PRInt32 syserr = _MD_ERRNO();
michael@0 2327 _PR_MD_MAP_LSEEK_ERROR(syserr);
michael@0 2328 }
michael@0 2329 done:
michael@0 2330 return rv;
michael@0 2331 } /* _MD_lseek64 */
michael@0 2332
michael@0 2333 /*
michael@0 2334 ** _MD_set_fileinfo_times --
michael@0 2335 ** Set the modifyTime and creationTime of the PRFileInfo
michael@0 2336 ** structure using the values in struct stat.
michael@0 2337 **
michael@0 2338 ** _MD_set_fileinfo64_times --
michael@0 2339 ** Set the modifyTime and creationTime of the PRFileInfo64
michael@0 2340 ** structure using the values in _MDStat64.
michael@0 2341 */
michael@0 2342
michael@0 2343 #if defined(_PR_STAT_HAS_ST_ATIM)
michael@0 2344 /*
michael@0 2345 ** struct stat has st_atim, st_mtim, and st_ctim fields of
michael@0 2346 ** type timestruc_t.
michael@0 2347 */
michael@0 2348 static void _MD_set_fileinfo_times(
michael@0 2349 const struct stat *sb,
michael@0 2350 PRFileInfo *info)
michael@0 2351 {
michael@0 2352 PRInt64 us, s2us;
michael@0 2353
michael@0 2354 LL_I2L(s2us, PR_USEC_PER_SEC);
michael@0 2355 LL_I2L(info->modifyTime, sb->st_mtim.tv_sec);
michael@0 2356 LL_MUL(info->modifyTime, info->modifyTime, s2us);
michael@0 2357 LL_I2L(us, sb->st_mtim.tv_nsec / 1000);
michael@0 2358 LL_ADD(info->modifyTime, info->modifyTime, us);
michael@0 2359 LL_I2L(info->creationTime, sb->st_ctim.tv_sec);
michael@0 2360 LL_MUL(info->creationTime, info->creationTime, s2us);
michael@0 2361 LL_I2L(us, sb->st_ctim.tv_nsec / 1000);
michael@0 2362 LL_ADD(info->creationTime, info->creationTime, us);
michael@0 2363 }
michael@0 2364
michael@0 2365 static void _MD_set_fileinfo64_times(
michael@0 2366 const _MDStat64 *sb,
michael@0 2367 PRFileInfo64 *info)
michael@0 2368 {
michael@0 2369 PRInt64 us, s2us;
michael@0 2370
michael@0 2371 LL_I2L(s2us, PR_USEC_PER_SEC);
michael@0 2372 LL_I2L(info->modifyTime, sb->st_mtim.tv_sec);
michael@0 2373 LL_MUL(info->modifyTime, info->modifyTime, s2us);
michael@0 2374 LL_I2L(us, sb->st_mtim.tv_nsec / 1000);
michael@0 2375 LL_ADD(info->modifyTime, info->modifyTime, us);
michael@0 2376 LL_I2L(info->creationTime, sb->st_ctim.tv_sec);
michael@0 2377 LL_MUL(info->creationTime, info->creationTime, s2us);
michael@0 2378 LL_I2L(us, sb->st_ctim.tv_nsec / 1000);
michael@0 2379 LL_ADD(info->creationTime, info->creationTime, us);
michael@0 2380 }
michael@0 2381 #elif defined(_PR_STAT_HAS_ST_ATIM_UNION)
michael@0 2382 /*
michael@0 2383 ** The st_atim, st_mtim, and st_ctim fields in struct stat are
michael@0 2384 ** unions with a st__tim union member of type timestruc_t.
michael@0 2385 */
michael@0 2386 static void _MD_set_fileinfo_times(
michael@0 2387 const struct stat *sb,
michael@0 2388 PRFileInfo *info)
michael@0 2389 {
michael@0 2390 PRInt64 us, s2us;
michael@0 2391
michael@0 2392 LL_I2L(s2us, PR_USEC_PER_SEC);
michael@0 2393 LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec);
michael@0 2394 LL_MUL(info->modifyTime, info->modifyTime, s2us);
michael@0 2395 LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000);
michael@0 2396 LL_ADD(info->modifyTime, info->modifyTime, us);
michael@0 2397 LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec);
michael@0 2398 LL_MUL(info->creationTime, info->creationTime, s2us);
michael@0 2399 LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000);
michael@0 2400 LL_ADD(info->creationTime, info->creationTime, us);
michael@0 2401 }
michael@0 2402
michael@0 2403 static void _MD_set_fileinfo64_times(
michael@0 2404 const _MDStat64 *sb,
michael@0 2405 PRFileInfo64 *info)
michael@0 2406 {
michael@0 2407 PRInt64 us, s2us;
michael@0 2408
michael@0 2409 LL_I2L(s2us, PR_USEC_PER_SEC);
michael@0 2410 LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec);
michael@0 2411 LL_MUL(info->modifyTime, info->modifyTime, s2us);
michael@0 2412 LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000);
michael@0 2413 LL_ADD(info->modifyTime, info->modifyTime, us);
michael@0 2414 LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec);
michael@0 2415 LL_MUL(info->creationTime, info->creationTime, s2us);
michael@0 2416 LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000);
michael@0 2417 LL_ADD(info->creationTime, info->creationTime, us);
michael@0 2418 }
michael@0 2419 #elif defined(_PR_STAT_HAS_ST_ATIMESPEC)
michael@0 2420 /*
michael@0 2421 ** struct stat has st_atimespec, st_mtimespec, and st_ctimespec
michael@0 2422 ** fields of type struct timespec.
michael@0 2423 */
michael@0 2424 #if defined(_PR_TIMESPEC_HAS_TS_SEC)
michael@0 2425 static void _MD_set_fileinfo_times(
michael@0 2426 const struct stat *sb,
michael@0 2427 PRFileInfo *info)
michael@0 2428 {
michael@0 2429 PRInt64 us, s2us;
michael@0 2430
michael@0 2431 LL_I2L(s2us, PR_USEC_PER_SEC);
michael@0 2432 LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec);
michael@0 2433 LL_MUL(info->modifyTime, info->modifyTime, s2us);
michael@0 2434 LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000);
michael@0 2435 LL_ADD(info->modifyTime, info->modifyTime, us);
michael@0 2436 LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec);
michael@0 2437 LL_MUL(info->creationTime, info->creationTime, s2us);
michael@0 2438 LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000);
michael@0 2439 LL_ADD(info->creationTime, info->creationTime, us);
michael@0 2440 }
michael@0 2441
michael@0 2442 static void _MD_set_fileinfo64_times(
michael@0 2443 const _MDStat64 *sb,
michael@0 2444 PRFileInfo64 *info)
michael@0 2445 {
michael@0 2446 PRInt64 us, s2us;
michael@0 2447
michael@0 2448 LL_I2L(s2us, PR_USEC_PER_SEC);
michael@0 2449 LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec);
michael@0 2450 LL_MUL(info->modifyTime, info->modifyTime, s2us);
michael@0 2451 LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000);
michael@0 2452 LL_ADD(info->modifyTime, info->modifyTime, us);
michael@0 2453 LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec);
michael@0 2454 LL_MUL(info->creationTime, info->creationTime, s2us);
michael@0 2455 LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000);
michael@0 2456 LL_ADD(info->creationTime, info->creationTime, us);
michael@0 2457 }
michael@0 2458 #else /* _PR_TIMESPEC_HAS_TS_SEC */
michael@0 2459 /*
michael@0 2460 ** The POSIX timespec structure has tv_sec and tv_nsec.
michael@0 2461 */
michael@0 2462 static void _MD_set_fileinfo_times(
michael@0 2463 const struct stat *sb,
michael@0 2464 PRFileInfo *info)
michael@0 2465 {
michael@0 2466 PRInt64 us, s2us;
michael@0 2467
michael@0 2468 LL_I2L(s2us, PR_USEC_PER_SEC);
michael@0 2469 LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec);
michael@0 2470 LL_MUL(info->modifyTime, info->modifyTime, s2us);
michael@0 2471 LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000);
michael@0 2472 LL_ADD(info->modifyTime, info->modifyTime, us);
michael@0 2473 LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec);
michael@0 2474 LL_MUL(info->creationTime, info->creationTime, s2us);
michael@0 2475 LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000);
michael@0 2476 LL_ADD(info->creationTime, info->creationTime, us);
michael@0 2477 }
michael@0 2478
michael@0 2479 static void _MD_set_fileinfo64_times(
michael@0 2480 const _MDStat64 *sb,
michael@0 2481 PRFileInfo64 *info)
michael@0 2482 {
michael@0 2483 PRInt64 us, s2us;
michael@0 2484
michael@0 2485 LL_I2L(s2us, PR_USEC_PER_SEC);
michael@0 2486 LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec);
michael@0 2487 LL_MUL(info->modifyTime, info->modifyTime, s2us);
michael@0 2488 LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000);
michael@0 2489 LL_ADD(info->modifyTime, info->modifyTime, us);
michael@0 2490 LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec);
michael@0 2491 LL_MUL(info->creationTime, info->creationTime, s2us);
michael@0 2492 LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000);
michael@0 2493 LL_ADD(info->creationTime, info->creationTime, us);
michael@0 2494 }
michael@0 2495 #endif /* _PR_TIMESPEC_HAS_TS_SEC */
michael@0 2496 #elif defined(_PR_STAT_HAS_ONLY_ST_ATIME)
michael@0 2497 /*
michael@0 2498 ** struct stat only has st_atime, st_mtime, and st_ctime fields
michael@0 2499 ** of type time_t.
michael@0 2500 */
michael@0 2501 static void _MD_set_fileinfo_times(
michael@0 2502 const struct stat *sb,
michael@0 2503 PRFileInfo *info)
michael@0 2504 {
michael@0 2505 PRInt64 s, s2us;
michael@0 2506 LL_I2L(s2us, PR_USEC_PER_SEC);
michael@0 2507 LL_I2L(s, sb->st_mtime);
michael@0 2508 LL_MUL(s, s, s2us);
michael@0 2509 info->modifyTime = s;
michael@0 2510 LL_I2L(s, sb->st_ctime);
michael@0 2511 LL_MUL(s, s, s2us);
michael@0 2512 info->creationTime = s;
michael@0 2513 }
michael@0 2514
michael@0 2515 static void _MD_set_fileinfo64_times(
michael@0 2516 const _MDStat64 *sb,
michael@0 2517 PRFileInfo64 *info)
michael@0 2518 {
michael@0 2519 PRInt64 s, s2us;
michael@0 2520 LL_I2L(s2us, PR_USEC_PER_SEC);
michael@0 2521 LL_I2L(s, sb->st_mtime);
michael@0 2522 LL_MUL(s, s, s2us);
michael@0 2523 info->modifyTime = s;
michael@0 2524 LL_I2L(s, sb->st_ctime);
michael@0 2525 LL_MUL(s, s, s2us);
michael@0 2526 info->creationTime = s;
michael@0 2527 }
michael@0 2528 #else
michael@0 2529 #error "I don't know yet"
michael@0 2530 #endif
michael@0 2531
michael@0 2532 static int _MD_convert_stat_to_fileinfo(
michael@0 2533 const struct stat *sb,
michael@0 2534 PRFileInfo *info)
michael@0 2535 {
michael@0 2536 if (S_IFREG & sb->st_mode)
michael@0 2537 info->type = PR_FILE_FILE;
michael@0 2538 else if (S_IFDIR & sb->st_mode)
michael@0 2539 info->type = PR_FILE_DIRECTORY;
michael@0 2540 else
michael@0 2541 info->type = PR_FILE_OTHER;
michael@0 2542
michael@0 2543 #if defined(_PR_HAVE_LARGE_OFF_T)
michael@0 2544 if (0x7fffffffL < sb->st_size)
michael@0 2545 {
michael@0 2546 PR_SetError(PR_FILE_TOO_BIG_ERROR, 0);
michael@0 2547 return -1;
michael@0 2548 }
michael@0 2549 #endif /* defined(_PR_HAVE_LARGE_OFF_T) */
michael@0 2550 info->size = sb->st_size;
michael@0 2551
michael@0 2552 _MD_set_fileinfo_times(sb, info);
michael@0 2553 return 0;
michael@0 2554 } /* _MD_convert_stat_to_fileinfo */
michael@0 2555
michael@0 2556 static int _MD_convert_stat64_to_fileinfo64(
michael@0 2557 const _MDStat64 *sb,
michael@0 2558 PRFileInfo64 *info)
michael@0 2559 {
michael@0 2560 if (S_IFREG & sb->st_mode)
michael@0 2561 info->type = PR_FILE_FILE;
michael@0 2562 else if (S_IFDIR & sb->st_mode)
michael@0 2563 info->type = PR_FILE_DIRECTORY;
michael@0 2564 else
michael@0 2565 info->type = PR_FILE_OTHER;
michael@0 2566
michael@0 2567 LL_I2L(info->size, sb->st_size);
michael@0 2568
michael@0 2569 _MD_set_fileinfo64_times(sb, info);
michael@0 2570 return 0;
michael@0 2571 } /* _MD_convert_stat64_to_fileinfo64 */
michael@0 2572
michael@0 2573 PRInt32 _MD_getfileinfo(const char *fn, PRFileInfo *info)
michael@0 2574 {
michael@0 2575 PRInt32 rv;
michael@0 2576 struct stat sb;
michael@0 2577
michael@0 2578 rv = stat(fn, &sb);
michael@0 2579 if (rv < 0)
michael@0 2580 _PR_MD_MAP_STAT_ERROR(_MD_ERRNO());
michael@0 2581 else if (NULL != info)
michael@0 2582 rv = _MD_convert_stat_to_fileinfo(&sb, info);
michael@0 2583 return rv;
michael@0 2584 }
michael@0 2585
michael@0 2586 PRInt32 _MD_getfileinfo64(const char *fn, PRFileInfo64 *info)
michael@0 2587 {
michael@0 2588 _MDStat64 sb;
michael@0 2589 PRInt32 rv = _md_iovector._stat64(fn, &sb);
michael@0 2590 if (rv < 0)
michael@0 2591 _PR_MD_MAP_STAT_ERROR(_MD_ERRNO());
michael@0 2592 else if (NULL != info)
michael@0 2593 rv = _MD_convert_stat64_to_fileinfo64(&sb, info);
michael@0 2594 return rv;
michael@0 2595 }
michael@0 2596
michael@0 2597 PRInt32 _MD_getopenfileinfo(const PRFileDesc *fd, PRFileInfo *info)
michael@0 2598 {
michael@0 2599 struct stat sb;
michael@0 2600 PRInt32 rv = fstat(fd->secret->md.osfd, &sb);
michael@0 2601 if (rv < 0)
michael@0 2602 _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO());
michael@0 2603 else if (NULL != info)
michael@0 2604 rv = _MD_convert_stat_to_fileinfo(&sb, info);
michael@0 2605 return rv;
michael@0 2606 }
michael@0 2607
michael@0 2608 PRInt32 _MD_getopenfileinfo64(const PRFileDesc *fd, PRFileInfo64 *info)
michael@0 2609 {
michael@0 2610 _MDStat64 sb;
michael@0 2611 PRInt32 rv = _md_iovector._fstat64(fd->secret->md.osfd, &sb);
michael@0 2612 if (rv < 0)
michael@0 2613 _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO());
michael@0 2614 else if (NULL != info)
michael@0 2615 rv = _MD_convert_stat64_to_fileinfo64(&sb, info);
michael@0 2616 return rv;
michael@0 2617 }
michael@0 2618
michael@0 2619 /*
michael@0 2620 * _md_iovector._open64 must be initialized to 'open' so that _PR_InitLog can
michael@0 2621 * open the log file during NSPR initialization, before _md_iovector is
michael@0 2622 * initialized by _PR_MD_FINAL_INIT. This means the log file cannot be a
michael@0 2623 * large file on some platforms.
michael@0 2624 */
michael@0 2625 #ifdef SYMBIAN
michael@0 2626 struct _MD_IOVector _md_iovector; /* Will crash if NSPR_LOG_FILE is set. */
michael@0 2627 #else
michael@0 2628 struct _MD_IOVector _md_iovector = { open };
michael@0 2629 #endif
michael@0 2630
michael@0 2631 /*
michael@0 2632 ** These implementations are to emulate large file routines on systems that
michael@0 2633 ** don't have them. Their goal is to check in case overflow occurs. Otherwise
michael@0 2634 ** they will just operate as normal using 32-bit file routines.
michael@0 2635 **
michael@0 2636 ** The checking might be pre- or post-op, depending on the semantics.
michael@0 2637 */
michael@0 2638
michael@0 2639 #if defined(SOLARIS2_5)
michael@0 2640
michael@0 2641 static PRIntn _MD_solaris25_fstat64(PRIntn osfd, _MDStat64 *buf)
michael@0 2642 {
michael@0 2643 PRInt32 rv;
michael@0 2644 struct stat sb;
michael@0 2645
michael@0 2646 rv = fstat(osfd, &sb);
michael@0 2647 if (rv >= 0)
michael@0 2648 {
michael@0 2649 /*
michael@0 2650 ** I'm only copying the fields that are immediately needed.
michael@0 2651 ** If somebody else calls this function, some of the fields
michael@0 2652 ** may not be defined.
michael@0 2653 */
michael@0 2654 (void)memset(buf, 0, sizeof(_MDStat64));
michael@0 2655 buf->st_mode = sb.st_mode;
michael@0 2656 buf->st_ctim = sb.st_ctim;
michael@0 2657 buf->st_mtim = sb.st_mtim;
michael@0 2658 buf->st_size = sb.st_size;
michael@0 2659 }
michael@0 2660 return rv;
michael@0 2661 } /* _MD_solaris25_fstat64 */
michael@0 2662
michael@0 2663 static PRIntn _MD_solaris25_stat64(const char *fn, _MDStat64 *buf)
michael@0 2664 {
michael@0 2665 PRInt32 rv;
michael@0 2666 struct stat sb;
michael@0 2667
michael@0 2668 rv = stat(fn, &sb);
michael@0 2669 if (rv >= 0)
michael@0 2670 {
michael@0 2671 /*
michael@0 2672 ** I'm only copying the fields that are immediately needed.
michael@0 2673 ** If somebody else calls this function, some of the fields
michael@0 2674 ** may not be defined.
michael@0 2675 */
michael@0 2676 (void)memset(buf, 0, sizeof(_MDStat64));
michael@0 2677 buf->st_mode = sb.st_mode;
michael@0 2678 buf->st_ctim = sb.st_ctim;
michael@0 2679 buf->st_mtim = sb.st_mtim;
michael@0 2680 buf->st_size = sb.st_size;
michael@0 2681 }
michael@0 2682 return rv;
michael@0 2683 } /* _MD_solaris25_stat64 */
michael@0 2684 #endif /* defined(SOLARIS2_5) */
michael@0 2685
michael@0 2686 #if defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5)
michael@0 2687
michael@0 2688 static PROffset64 _MD_Unix_lseek64(PRIntn osfd, PROffset64 offset, PRIntn whence)
michael@0 2689 {
michael@0 2690 PRUint64 maxoff;
michael@0 2691 PROffset64 rv = minus_one;
michael@0 2692 LL_I2L(maxoff, 0x7fffffff);
michael@0 2693 if (LL_CMP(offset, <=, maxoff))
michael@0 2694 {
michael@0 2695 off_t off;
michael@0 2696 LL_L2I(off, offset);
michael@0 2697 LL_I2L(rv, lseek(osfd, off, whence));
michael@0 2698 }
michael@0 2699 else errno = EFBIG; /* we can't go there */
michael@0 2700 return rv;
michael@0 2701 } /* _MD_Unix_lseek64 */
michael@0 2702
michael@0 2703 static void* _MD_Unix_mmap64(
michael@0 2704 void *addr, PRSize len, PRIntn prot, PRIntn flags,
michael@0 2705 PRIntn fildes, PRInt64 offset)
michael@0 2706 {
michael@0 2707 PR_SetError(PR_FILE_TOO_BIG_ERROR, 0);
michael@0 2708 return NULL;
michael@0 2709 } /* _MD_Unix_mmap64 */
michael@0 2710 #endif /* defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5) */
michael@0 2711
michael@0 2712 /* Android doesn't have mmap64. */
michael@0 2713 #if defined(ANDROID)
michael@0 2714 extern void *__mmap2(void *, size_t, int, int, int, size_t);
michael@0 2715
michael@0 2716 #define ANDROID_PAGE_SIZE 4096
michael@0 2717
michael@0 2718 static void *
michael@0 2719 mmap64(void *addr, size_t len, int prot, int flags, int fd, loff_t offset)
michael@0 2720 {
michael@0 2721 if (offset & (ANDROID_PAGE_SIZE - 1)) {
michael@0 2722 errno = EINVAL;
michael@0 2723 return MAP_FAILED;
michael@0 2724 }
michael@0 2725 return __mmap2(addr, len, prot, flags, fd, offset / ANDROID_PAGE_SIZE);
michael@0 2726 }
michael@0 2727 #endif
michael@0 2728
michael@0 2729 #if defined(OSF1) && defined(__GNUC__)
michael@0 2730
michael@0 2731 /*
michael@0 2732 * On OSF1 V5.0A, <sys/stat.h> defines stat and fstat as
michael@0 2733 * macros when compiled under gcc, so it is rather tricky to
michael@0 2734 * take the addresses of the real functions the macros expend
michael@0 2735 * to. A simple solution is to define forwarder functions
michael@0 2736 * and take the addresses of the forwarder functions instead.
michael@0 2737 */
michael@0 2738
michael@0 2739 static int stat_forwarder(const char *path, struct stat *buffer)
michael@0 2740 {
michael@0 2741 return stat(path, buffer);
michael@0 2742 }
michael@0 2743
michael@0 2744 static int fstat_forwarder(int filedes, struct stat *buffer)
michael@0 2745 {
michael@0 2746 return fstat(filedes, buffer);
michael@0 2747 }
michael@0 2748
michael@0 2749 #endif
michael@0 2750
michael@0 2751 static void _PR_InitIOV(void)
michael@0 2752 {
michael@0 2753 #if defined(SOLARIS2_5)
michael@0 2754 PRLibrary *lib;
michael@0 2755 void *open64_func;
michael@0 2756
michael@0 2757 open64_func = PR_FindSymbolAndLibrary("open64", &lib);
michael@0 2758 if (NULL != open64_func)
michael@0 2759 {
michael@0 2760 PR_ASSERT(NULL != lib);
michael@0 2761 _md_iovector._open64 = (_MD_Open64)open64_func;
michael@0 2762 _md_iovector._mmap64 = (_MD_Mmap64)PR_FindSymbol(lib, "mmap64");
michael@0 2763 _md_iovector._fstat64 = (_MD_Fstat64)PR_FindSymbol(lib, "fstat64");
michael@0 2764 _md_iovector._stat64 = (_MD_Stat64)PR_FindSymbol(lib, "stat64");
michael@0 2765 _md_iovector._lseek64 = (_MD_Lseek64)PR_FindSymbol(lib, "lseek64");
michael@0 2766 (void)PR_UnloadLibrary(lib);
michael@0 2767 }
michael@0 2768 else
michael@0 2769 {
michael@0 2770 _md_iovector._open64 = open;
michael@0 2771 _md_iovector._mmap64 = _MD_Unix_mmap64;
michael@0 2772 _md_iovector._fstat64 = _MD_solaris25_fstat64;
michael@0 2773 _md_iovector._stat64 = _MD_solaris25_stat64;
michael@0 2774 _md_iovector._lseek64 = _MD_Unix_lseek64;
michael@0 2775 }
michael@0 2776 #elif defined(_PR_NO_LARGE_FILES)
michael@0 2777 _md_iovector._open64 = open;
michael@0 2778 _md_iovector._mmap64 = _MD_Unix_mmap64;
michael@0 2779 _md_iovector._fstat64 = fstat;
michael@0 2780 _md_iovector._stat64 = stat;
michael@0 2781 _md_iovector._lseek64 = _MD_Unix_lseek64;
michael@0 2782 #elif defined(_PR_HAVE_OFF64_T)
michael@0 2783 #if defined(IRIX5_3) || defined(ANDROID)
michael@0 2784 /*
michael@0 2785 * Android doesn't have open64. We pass the O_LARGEFILE flag to open
michael@0 2786 * in _MD_open.
michael@0 2787 */
michael@0 2788 _md_iovector._open64 = open;
michael@0 2789 #else
michael@0 2790 _md_iovector._open64 = open64;
michael@0 2791 #endif
michael@0 2792 _md_iovector._mmap64 = mmap64;
michael@0 2793 _md_iovector._fstat64 = fstat64;
michael@0 2794 _md_iovector._stat64 = stat64;
michael@0 2795 _md_iovector._lseek64 = lseek64;
michael@0 2796 #elif defined(_PR_HAVE_LARGE_OFF_T)
michael@0 2797 _md_iovector._open64 = open;
michael@0 2798 _md_iovector._mmap64 = mmap;
michael@0 2799 #if defined(OSF1) && defined(__GNUC__)
michael@0 2800 _md_iovector._fstat64 = fstat_forwarder;
michael@0 2801 _md_iovector._stat64 = stat_forwarder;
michael@0 2802 #else
michael@0 2803 _md_iovector._fstat64 = fstat;
michael@0 2804 _md_iovector._stat64 = stat;
michael@0 2805 #endif
michael@0 2806 _md_iovector._lseek64 = lseek;
michael@0 2807 #else
michael@0 2808 #error "I don't know yet"
michael@0 2809 #endif
michael@0 2810 LL_I2L(minus_one, -1);
michael@0 2811 } /* _PR_InitIOV */
michael@0 2812
michael@0 2813 void _PR_UnixInit(void)
michael@0 2814 {
michael@0 2815 struct sigaction sigact;
michael@0 2816 int rv;
michael@0 2817
michael@0 2818 sigemptyset(&timer_set);
michael@0 2819
michael@0 2820 #if !defined(_PR_PTHREADS)
michael@0 2821
michael@0 2822 sigaddset(&timer_set, SIGALRM);
michael@0 2823 sigemptyset(&empty_set);
michael@0 2824 intr_timeout_ticks =
michael@0 2825 PR_SecondsToInterval(_PR_INTERRUPT_CHECK_INTERVAL_SECS);
michael@0 2826
michael@0 2827 #if defined(SOLARIS) || defined(IRIX)
michael@0 2828
michael@0 2829 if (getenv("NSPR_SIGSEGV_HANDLE")) {
michael@0 2830 sigact.sa_handler = sigsegvhandler;
michael@0 2831 sigact.sa_flags = 0;
michael@0 2832 sigact.sa_mask = timer_set;
michael@0 2833 sigaction(SIGSEGV, &sigact, 0);
michael@0 2834 }
michael@0 2835
michael@0 2836 if (getenv("NSPR_SIGABRT_HANDLE")) {
michael@0 2837 sigact.sa_handler = sigaborthandler;
michael@0 2838 sigact.sa_flags = 0;
michael@0 2839 sigact.sa_mask = timer_set;
michael@0 2840 sigaction(SIGABRT, &sigact, 0);
michael@0 2841 }
michael@0 2842
michael@0 2843 if (getenv("NSPR_SIGBUS_HANDLE")) {
michael@0 2844 sigact.sa_handler = sigbushandler;
michael@0 2845 sigact.sa_flags = 0;
michael@0 2846 sigact.sa_mask = timer_set;
michael@0 2847 sigaction(SIGBUS, &sigact, 0);
michael@0 2848 }
michael@0 2849
michael@0 2850 #endif
michael@0 2851 #endif /* !defined(_PR_PTHREADS) */
michael@0 2852
michael@0 2853 /*
michael@0 2854 * Under HP-UX DCE threads, sigaction() installs a per-thread
michael@0 2855 * handler, so we use sigvector() to install a process-wide
michael@0 2856 * handler.
michael@0 2857 */
michael@0 2858 #if defined(HPUX) && defined(_PR_DCETHREADS)
michael@0 2859 {
michael@0 2860 struct sigvec vec;
michael@0 2861
michael@0 2862 vec.sv_handler = SIG_IGN;
michael@0 2863 vec.sv_mask = 0;
michael@0 2864 vec.sv_flags = 0;
michael@0 2865 rv = sigvector(SIGPIPE, &vec, NULL);
michael@0 2866 PR_ASSERT(0 == rv);
michael@0 2867 }
michael@0 2868 #else
michael@0 2869 sigact.sa_handler = SIG_IGN;
michael@0 2870 sigemptyset(&sigact.sa_mask);
michael@0 2871 sigact.sa_flags = 0;
michael@0 2872 rv = sigaction(SIGPIPE, &sigact, 0);
michael@0 2873 PR_ASSERT(0 == rv);
michael@0 2874 #endif /* HPUX && _PR_DCETHREADS */
michael@0 2875
michael@0 2876 _pr_rename_lock = PR_NewLock();
michael@0 2877 PR_ASSERT(NULL != _pr_rename_lock);
michael@0 2878 _pr_Xfe_mon = PR_NewMonitor();
michael@0 2879 PR_ASSERT(NULL != _pr_Xfe_mon);
michael@0 2880
michael@0 2881 _PR_InitIOV(); /* one last hack */
michael@0 2882 }
michael@0 2883
michael@0 2884 void _PR_UnixCleanup(void)
michael@0 2885 {
michael@0 2886 if (_pr_rename_lock) {
michael@0 2887 PR_DestroyLock(_pr_rename_lock);
michael@0 2888 _pr_rename_lock = NULL;
michael@0 2889 }
michael@0 2890 if (_pr_Xfe_mon) {
michael@0 2891 PR_DestroyMonitor(_pr_Xfe_mon);
michael@0 2892 _pr_Xfe_mon = NULL;
michael@0 2893 }
michael@0 2894 }
michael@0 2895
michael@0 2896 #if !defined(_PR_PTHREADS)
michael@0 2897
michael@0 2898 /*
michael@0 2899 * Variables used by the GC code, initialized in _MD_InitSegs().
michael@0 2900 */
michael@0 2901 static PRInt32 _pr_zero_fd = -1;
michael@0 2902 static PRLock *_pr_md_lock = NULL;
michael@0 2903
michael@0 2904 /*
michael@0 2905 * _MD_InitSegs --
michael@0 2906 *
michael@0 2907 * This is Unix's version of _PR_MD_INIT_SEGS(), which is
michael@0 2908 * called by _PR_InitSegs(), which in turn is called by
michael@0 2909 * PR_Init().
michael@0 2910 */
michael@0 2911 void _MD_InitSegs(void)
michael@0 2912 {
michael@0 2913 #ifdef DEBUG
michael@0 2914 /*
michael@0 2915 ** Disable using mmap(2) if NSPR_NO_MMAP is set
michael@0 2916 */
michael@0 2917 if (getenv("NSPR_NO_MMAP")) {
michael@0 2918 _pr_zero_fd = -2;
michael@0 2919 return;
michael@0 2920 }
michael@0 2921 #endif
michael@0 2922 _pr_zero_fd = open("/dev/zero",O_RDWR , 0);
michael@0 2923 /* Prevent the fd from being inherited by child processes */
michael@0 2924 fcntl(_pr_zero_fd, F_SETFD, FD_CLOEXEC);
michael@0 2925 _pr_md_lock = PR_NewLock();
michael@0 2926 }
michael@0 2927
michael@0 2928 PRStatus _MD_AllocSegment(PRSegment *seg, PRUint32 size, void *vaddr)
michael@0 2929 {
michael@0 2930 static char *lastaddr = (char*) _PR_STACK_VMBASE;
michael@0 2931 PRStatus retval = PR_SUCCESS;
michael@0 2932 int prot;
michael@0 2933 void *rv;
michael@0 2934
michael@0 2935 PR_ASSERT(seg != 0);
michael@0 2936 PR_ASSERT(size != 0);
michael@0 2937
michael@0 2938 PR_Lock(_pr_md_lock);
michael@0 2939 if (_pr_zero_fd < 0) {
michael@0 2940 from_heap:
michael@0 2941 seg->vaddr = PR_MALLOC(size);
michael@0 2942 if (!seg->vaddr) {
michael@0 2943 retval = PR_FAILURE;
michael@0 2944 }
michael@0 2945 else {
michael@0 2946 seg->size = size;
michael@0 2947 }
michael@0 2948 goto exit;
michael@0 2949 }
michael@0 2950
michael@0 2951 prot = PROT_READ|PROT_WRITE;
michael@0 2952 /*
michael@0 2953 * On Alpha Linux, the user-level thread stack needs
michael@0 2954 * to be made executable because longjmp/signal seem
michael@0 2955 * to put machine instructions on the stack.
michael@0 2956 */
michael@0 2957 #if defined(LINUX) && defined(__alpha)
michael@0 2958 prot |= PROT_EXEC;
michael@0 2959 #endif
michael@0 2960 rv = mmap((vaddr != 0) ? vaddr : lastaddr, size, prot,
michael@0 2961 _MD_MMAP_FLAGS,
michael@0 2962 _pr_zero_fd, 0);
michael@0 2963 if (rv == (void*)-1) {
michael@0 2964 goto from_heap;
michael@0 2965 }
michael@0 2966 lastaddr += size;
michael@0 2967 seg->vaddr = rv;
michael@0 2968 seg->size = size;
michael@0 2969 seg->flags = _PR_SEG_VM;
michael@0 2970
michael@0 2971 exit:
michael@0 2972 PR_Unlock(_pr_md_lock);
michael@0 2973 return retval;
michael@0 2974 }
michael@0 2975
michael@0 2976 void _MD_FreeSegment(PRSegment *seg)
michael@0 2977 {
michael@0 2978 if (seg->flags & _PR_SEG_VM)
michael@0 2979 (void) munmap(seg->vaddr, seg->size);
michael@0 2980 else
michael@0 2981 PR_DELETE(seg->vaddr);
michael@0 2982 }
michael@0 2983
michael@0 2984 #endif /* _PR_PTHREADS */
michael@0 2985
michael@0 2986 /*
michael@0 2987 *-----------------------------------------------------------------------
michael@0 2988 *
michael@0 2989 * PR_Now --
michael@0 2990 *
michael@0 2991 * Returns the current time in microseconds since the epoch.
michael@0 2992 * The epoch is midnight January 1, 1970 GMT.
michael@0 2993 * The implementation is machine dependent. This is the Unix
michael@0 2994 * implementation.
michael@0 2995 * Cf. time_t time(time_t *tp)
michael@0 2996 *
michael@0 2997 *-----------------------------------------------------------------------
michael@0 2998 */
michael@0 2999
michael@0 3000 PR_IMPLEMENT(PRTime)
michael@0 3001 PR_Now(void)
michael@0 3002 {
michael@0 3003 struct timeval tv;
michael@0 3004 PRInt64 s, us, s2us;
michael@0 3005
michael@0 3006 GETTIMEOFDAY(&tv);
michael@0 3007 LL_I2L(s2us, PR_USEC_PER_SEC);
michael@0 3008 LL_I2L(s, tv.tv_sec);
michael@0 3009 LL_I2L(us, tv.tv_usec);
michael@0 3010 LL_MUL(s, s, s2us);
michael@0 3011 LL_ADD(s, s, us);
michael@0 3012 return s;
michael@0 3013 }
michael@0 3014
michael@0 3015 #if defined(_MD_INTERVAL_USE_GTOD)
michael@0 3016 /*
michael@0 3017 * This version of interval times is based on the time of day
michael@0 3018 * capability offered by the system. This isn't valid for two reasons:
michael@0 3019 * 1) The time of day is neither linear nor montonically increasing
michael@0 3020 * 2) The units here are milliseconds. That's not appropriate for our use.
michael@0 3021 */
michael@0 3022 PRIntervalTime _PR_UNIX_GetInterval()
michael@0 3023 {
michael@0 3024 struct timeval time;
michael@0 3025 PRIntervalTime ticks;
michael@0 3026
michael@0 3027 (void)GETTIMEOFDAY(&time); /* fallicy of course */
michael@0 3028 ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC; /* that's in milliseconds */
michael@0 3029 ticks += (PRUint32)time.tv_usec / PR_USEC_PER_MSEC; /* so's that */
michael@0 3030 return ticks;
michael@0 3031 } /* _PR_UNIX_GetInterval */
michael@0 3032
michael@0 3033 PRIntervalTime _PR_UNIX_TicksPerSecond()
michael@0 3034 {
michael@0 3035 return 1000; /* this needs some work :) */
michael@0 3036 }
michael@0 3037 #endif
michael@0 3038
michael@0 3039 #if defined(HAVE_CLOCK_MONOTONIC)
michael@0 3040 PRIntervalTime _PR_UNIX_GetInterval2()
michael@0 3041 {
michael@0 3042 struct timespec time;
michael@0 3043 PRIntervalTime ticks;
michael@0 3044
michael@0 3045 if (clock_gettime(CLOCK_MONOTONIC, &time) != 0) {
michael@0 3046 fprintf(stderr, "clock_gettime failed: %d\n", errno);
michael@0 3047 abort();
michael@0 3048 }
michael@0 3049
michael@0 3050 ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC;
michael@0 3051 ticks += (PRUint32)time.tv_nsec / PR_NSEC_PER_MSEC;
michael@0 3052 return ticks;
michael@0 3053 }
michael@0 3054
michael@0 3055 PRIntervalTime _PR_UNIX_TicksPerSecond2()
michael@0 3056 {
michael@0 3057 return 1000;
michael@0 3058 }
michael@0 3059 #endif
michael@0 3060
michael@0 3061 #if !defined(_PR_PTHREADS)
michael@0 3062 /*
michael@0 3063 * Wait for I/O on multiple descriptors.
michael@0 3064 *
michael@0 3065 * Return 0 if timed out, return -1 if interrupted,
michael@0 3066 * else return the number of ready descriptors.
michael@0 3067 */
michael@0 3068 PRInt32 _PR_WaitForMultipleFDs(
michael@0 3069 _PRUnixPollDesc *unixpds,
michael@0 3070 PRInt32 pdcnt,
michael@0 3071 PRIntervalTime timeout)
michael@0 3072 {
michael@0 3073 PRPollQueue pq;
michael@0 3074 PRIntn is;
michael@0 3075 PRInt32 rv;
michael@0 3076 _PRCPU *io_cpu;
michael@0 3077 _PRUnixPollDesc *unixpd, *eunixpd;
michael@0 3078 PRThread *me = _PR_MD_CURRENT_THREAD();
michael@0 3079
michael@0 3080 PR_ASSERT(!(me->flags & _PR_IDLE_THREAD));
michael@0 3081
michael@0 3082 if (_PR_PENDING_INTERRUPT(me)) {
michael@0 3083 me->flags &= ~_PR_INTERRUPT;
michael@0 3084 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
michael@0 3085 return -1;
michael@0 3086 }
michael@0 3087
michael@0 3088 pq.pds = unixpds;
michael@0 3089 pq.npds = pdcnt;
michael@0 3090
michael@0 3091 _PR_INTSOFF(is);
michael@0 3092 _PR_MD_IOQ_LOCK();
michael@0 3093 _PR_THREAD_LOCK(me);
michael@0 3094
michael@0 3095 pq.thr = me;
michael@0 3096 io_cpu = me->cpu;
michael@0 3097 pq.on_ioq = PR_TRUE;
michael@0 3098 pq.timeout = timeout;
michael@0 3099 _PR_ADD_TO_IOQ(pq, me->cpu);
michael@0 3100
michael@0 3101 #if !defined(_PR_USE_POLL)
michael@0 3102 eunixpd = unixpds + pdcnt;
michael@0 3103 for (unixpd = unixpds; unixpd < eunixpd; unixpd++) {
michael@0 3104 PRInt32 osfd = unixpd->osfd;
michael@0 3105 if (unixpd->in_flags & _PR_UNIX_POLL_READ) {
michael@0 3106 FD_SET(osfd, &_PR_FD_READ_SET(me->cpu));
michael@0 3107 _PR_FD_READ_CNT(me->cpu)[osfd]++;
michael@0 3108 }
michael@0 3109 if (unixpd->in_flags & _PR_UNIX_POLL_WRITE) {
michael@0 3110 FD_SET(osfd, &_PR_FD_WRITE_SET(me->cpu));
michael@0 3111 (_PR_FD_WRITE_CNT(me->cpu))[osfd]++;
michael@0 3112 }
michael@0 3113 if (unixpd->in_flags & _PR_UNIX_POLL_EXCEPT) {
michael@0 3114 FD_SET(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
michael@0 3115 (_PR_FD_EXCEPTION_CNT(me->cpu))[osfd]++;
michael@0 3116 }
michael@0 3117 if (osfd > _PR_IOQ_MAX_OSFD(me->cpu)) {
michael@0 3118 _PR_IOQ_MAX_OSFD(me->cpu) = osfd;
michael@0 3119 }
michael@0 3120 }
michael@0 3121 #endif /* !defined(_PR_USE_POLL) */
michael@0 3122
michael@0 3123 if (_PR_IOQ_TIMEOUT(me->cpu) > timeout) {
michael@0 3124 _PR_IOQ_TIMEOUT(me->cpu) = timeout;
michael@0 3125 }
michael@0 3126
michael@0 3127 _PR_IOQ_OSFD_CNT(me->cpu) += pdcnt;
michael@0 3128
michael@0 3129 _PR_SLEEPQ_LOCK(me->cpu);
michael@0 3130 _PR_ADD_SLEEPQ(me, timeout);
michael@0 3131 me->state = _PR_IO_WAIT;
michael@0 3132 me->io_pending = PR_TRUE;
michael@0 3133 me->io_suspended = PR_FALSE;
michael@0 3134 _PR_SLEEPQ_UNLOCK(me->cpu);
michael@0 3135 _PR_THREAD_UNLOCK(me);
michael@0 3136 _PR_MD_IOQ_UNLOCK();
michael@0 3137
michael@0 3138 _PR_MD_WAIT(me, timeout);
michael@0 3139
michael@0 3140 me->io_pending = PR_FALSE;
michael@0 3141 me->io_suspended = PR_FALSE;
michael@0 3142
michael@0 3143 /*
michael@0 3144 * This thread should run on the same cpu on which it was blocked; when
michael@0 3145 * the IO request times out the fd sets and fd counts for the
michael@0 3146 * cpu are updated below.
michael@0 3147 */
michael@0 3148 PR_ASSERT(me->cpu == io_cpu);
michael@0 3149
michael@0 3150 /*
michael@0 3151 ** If we timed out the pollq might still be on the ioq. Remove it
michael@0 3152 ** before continuing.
michael@0 3153 */
michael@0 3154 if (pq.on_ioq) {
michael@0 3155 _PR_MD_IOQ_LOCK();
michael@0 3156 /*
michael@0 3157 * Need to check pq.on_ioq again
michael@0 3158 */
michael@0 3159 if (pq.on_ioq) {
michael@0 3160 PR_REMOVE_LINK(&pq.links);
michael@0 3161 #ifndef _PR_USE_POLL
michael@0 3162 eunixpd = unixpds + pdcnt;
michael@0 3163 for (unixpd = unixpds; unixpd < eunixpd; unixpd++) {
michael@0 3164 PRInt32 osfd = unixpd->osfd;
michael@0 3165 PRInt16 in_flags = unixpd->in_flags;
michael@0 3166
michael@0 3167 if (in_flags & _PR_UNIX_POLL_READ) {
michael@0 3168 if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0)
michael@0 3169 FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
michael@0 3170 }
michael@0 3171 if (in_flags & _PR_UNIX_POLL_WRITE) {
michael@0 3172 if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0)
michael@0 3173 FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
michael@0 3174 }
michael@0 3175 if (in_flags & _PR_UNIX_POLL_EXCEPT) {
michael@0 3176 if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0)
michael@0 3177 FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
michael@0 3178 }
michael@0 3179 }
michael@0 3180 #endif /* _PR_USE_POLL */
michael@0 3181 PR_ASSERT(pq.npds == pdcnt);
michael@0 3182 _PR_IOQ_OSFD_CNT(me->cpu) -= pdcnt;
michael@0 3183 PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0);
michael@0 3184 }
michael@0 3185 _PR_MD_IOQ_UNLOCK();
michael@0 3186 }
michael@0 3187 /* XXX Should we use _PR_FAST_INTSON or _PR_INTSON? */
michael@0 3188 if (1 == pdcnt) {
michael@0 3189 _PR_FAST_INTSON(is);
michael@0 3190 } else {
michael@0 3191 _PR_INTSON(is);
michael@0 3192 }
michael@0 3193
michael@0 3194 if (_PR_PENDING_INTERRUPT(me)) {
michael@0 3195 me->flags &= ~_PR_INTERRUPT;
michael@0 3196 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
michael@0 3197 return -1;
michael@0 3198 }
michael@0 3199
michael@0 3200 rv = 0;
michael@0 3201 if (pq.on_ioq == PR_FALSE) {
michael@0 3202 /* Count the number of ready descriptors */
michael@0 3203 while (--pdcnt >= 0) {
michael@0 3204 if (unixpds->out_flags != 0) {
michael@0 3205 rv++;
michael@0 3206 }
michael@0 3207 unixpds++;
michael@0 3208 }
michael@0 3209 }
michael@0 3210
michael@0 3211 return rv;
michael@0 3212 }
michael@0 3213
michael@0 3214 /*
michael@0 3215 * Unblock threads waiting for I/O
michael@0 3216 * used when interrupting threads
michael@0 3217 *
michael@0 3218 * NOTE: The thread lock should held when this function is called.
michael@0 3219 * On return, the thread lock is released.
michael@0 3220 */
michael@0 3221 void _PR_Unblock_IO_Wait(PRThread *thr)
michael@0 3222 {
michael@0 3223 int pri = thr->priority;
michael@0 3224 _PRCPU *cpu = thr->cpu;
michael@0 3225
michael@0 3226 /*
michael@0 3227 * GLOBAL threads wakeup periodically to check for interrupt
michael@0 3228 */
michael@0 3229 if (_PR_IS_NATIVE_THREAD(thr)) {
michael@0 3230 _PR_THREAD_UNLOCK(thr);
michael@0 3231 return;
michael@0 3232 }
michael@0 3233
michael@0 3234 PR_ASSERT(thr->flags & (_PR_ON_SLEEPQ | _PR_ON_PAUSEQ));
michael@0 3235 _PR_SLEEPQ_LOCK(cpu);
michael@0 3236 _PR_DEL_SLEEPQ(thr, PR_TRUE);
michael@0 3237 _PR_SLEEPQ_UNLOCK(cpu);
michael@0 3238
michael@0 3239 PR_ASSERT(!(thr->flags & _PR_IDLE_THREAD));
michael@0 3240 thr->state = _PR_RUNNABLE;
michael@0 3241 _PR_RUNQ_LOCK(cpu);
michael@0 3242 _PR_ADD_RUNQ(thr, cpu, pri);
michael@0 3243 _PR_RUNQ_UNLOCK(cpu);
michael@0 3244 _PR_THREAD_UNLOCK(thr);
michael@0 3245 _PR_MD_WAKEUP_WAITER(thr);
michael@0 3246 }
michael@0 3247 #endif /* !defined(_PR_PTHREADS) */
michael@0 3248
michael@0 3249 /*
michael@0 3250 * When a nonblocking connect has completed, determine whether it
michael@0 3251 * succeeded or failed, and if it failed, what the error code is.
michael@0 3252 *
michael@0 3253 * The function returns the error code. An error code of 0 means
michael@0 3254 * that the nonblocking connect succeeded.
michael@0 3255 */
michael@0 3256
michael@0 3257 int _MD_unix_get_nonblocking_connect_error(int osfd)
michael@0 3258 {
michael@0 3259 #if defined(NTO)
michael@0 3260 /* Neutrino does not support the SO_ERROR socket option */
michael@0 3261 PRInt32 rv;
michael@0 3262 PRNetAddr addr;
michael@0 3263 _PRSockLen_t addrlen = sizeof(addr);
michael@0 3264
michael@0 3265 /* Test to see if we are using the Tiny TCP/IP Stack or the Full one. */
michael@0 3266 struct statvfs superblock;
michael@0 3267 rv = fstatvfs(osfd, &superblock);
michael@0 3268 if (rv == 0) {
michael@0 3269 if (strcmp(superblock.f_basetype, "ttcpip") == 0) {
michael@0 3270 /* Using the Tiny Stack! */
michael@0 3271 rv = getpeername(osfd, (struct sockaddr *) &addr,
michael@0 3272 (_PRSockLen_t *) &addrlen);
michael@0 3273 if (rv == -1) {
michael@0 3274 int errno_copy = errno; /* make a copy so I don't
michael@0 3275 * accidentally reset */
michael@0 3276
michael@0 3277 if (errno_copy == ENOTCONN) {
michael@0 3278 struct stat StatInfo;
michael@0 3279 rv = fstat(osfd, &StatInfo);
michael@0 3280 if (rv == 0) {
michael@0 3281 time_t current_time = time(NULL);
michael@0 3282
michael@0 3283 /*
michael@0 3284 * this is a real hack, can't explain why it
michael@0 3285 * works it just does
michael@0 3286 */
michael@0 3287 if (abs(current_time - StatInfo.st_atime) < 5) {
michael@0 3288 return ECONNREFUSED;
michael@0 3289 } else {
michael@0 3290 return ETIMEDOUT;
michael@0 3291 }
michael@0 3292 } else {
michael@0 3293 return ECONNREFUSED;
michael@0 3294 }
michael@0 3295 } else {
michael@0 3296 return errno_copy;
michael@0 3297 }
michael@0 3298 } else {
michael@0 3299 /* No Error */
michael@0 3300 return 0;
michael@0 3301 }
michael@0 3302 } else {
michael@0 3303 /* Have the FULL Stack which supports SO_ERROR */
michael@0 3304 /* Hasn't been written yet, never been tested! */
michael@0 3305 /* Jerry.Kirk@Nexwarecorp.com */
michael@0 3306
michael@0 3307 int err;
michael@0 3308 _PRSockLen_t optlen = sizeof(err);
michael@0 3309
michael@0 3310 if (getsockopt(osfd, SOL_SOCKET, SO_ERROR,
michael@0 3311 (char *) &err, &optlen) == -1) {
michael@0 3312 return errno;
michael@0 3313 } else {
michael@0 3314 return err;
michael@0 3315 }
michael@0 3316 }
michael@0 3317 } else {
michael@0 3318 return ECONNREFUSED;
michael@0 3319 }
michael@0 3320 #elif defined(UNIXWARE)
michael@0 3321 /*
michael@0 3322 * getsockopt() fails with EPIPE, so use getmsg() instead.
michael@0 3323 */
michael@0 3324
michael@0 3325 int rv;
michael@0 3326 int flags = 0;
michael@0 3327 rv = getmsg(osfd, NULL, NULL, &flags);
michael@0 3328 PR_ASSERT(-1 == rv || 0 == rv);
michael@0 3329 if (-1 == rv && errno != EAGAIN && errno != EWOULDBLOCK) {
michael@0 3330 return errno;
michael@0 3331 }
michael@0 3332 return 0; /* no error */
michael@0 3333 #else
michael@0 3334 int err;
michael@0 3335 _PRSockLen_t optlen = sizeof(err);
michael@0 3336 if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, (char *) &err, &optlen) == -1) {
michael@0 3337 return errno;
michael@0 3338 } else {
michael@0 3339 return err;
michael@0 3340 }
michael@0 3341 #endif
michael@0 3342 }
michael@0 3343
michael@0 3344 /************************************************************************/
michael@0 3345
michael@0 3346 /*
michael@0 3347 ** Special hacks for xlib. Xlib/Xt/Xm is not re-entrant nor is it thread
michael@0 3348 ** safe. Unfortunately, neither is mozilla. To make these programs work
michael@0 3349 ** in a pre-emptive threaded environment, we need to use a lock.
michael@0 3350 */
michael@0 3351
michael@0 3352 void PR_XLock(void)
michael@0 3353 {
michael@0 3354 PR_EnterMonitor(_pr_Xfe_mon);
michael@0 3355 }
michael@0 3356
michael@0 3357 void PR_XUnlock(void)
michael@0 3358 {
michael@0 3359 PR_ExitMonitor(_pr_Xfe_mon);
michael@0 3360 }
michael@0 3361
michael@0 3362 PRBool PR_XIsLocked(void)
michael@0 3363 {
michael@0 3364 return (PR_InMonitor(_pr_Xfe_mon)) ? PR_TRUE : PR_FALSE;
michael@0 3365 }
michael@0 3366
michael@0 3367 void PR_XWait(int ms)
michael@0 3368 {
michael@0 3369 PR_Wait(_pr_Xfe_mon, PR_MillisecondsToInterval(ms));
michael@0 3370 }
michael@0 3371
michael@0 3372 void PR_XNotify(void)
michael@0 3373 {
michael@0 3374 PR_Notify(_pr_Xfe_mon);
michael@0 3375 }
michael@0 3376
michael@0 3377 void PR_XNotifyAll(void)
michael@0 3378 {
michael@0 3379 PR_NotifyAll(_pr_Xfe_mon);
michael@0 3380 }
michael@0 3381
michael@0 3382 #if defined(HAVE_FCNTL_FILE_LOCKING)
michael@0 3383
michael@0 3384 PRStatus
michael@0 3385 _MD_LockFile(PRInt32 f)
michael@0 3386 {
michael@0 3387 PRInt32 rv;
michael@0 3388 struct flock arg;
michael@0 3389
michael@0 3390 arg.l_type = F_WRLCK;
michael@0 3391 arg.l_whence = SEEK_SET;
michael@0 3392 arg.l_start = 0;
michael@0 3393 arg.l_len = 0; /* until EOF */
michael@0 3394 rv = fcntl(f, F_SETLKW, &arg);
michael@0 3395 if (rv == 0)
michael@0 3396 return PR_SUCCESS;
michael@0 3397 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
michael@0 3398 return PR_FAILURE;
michael@0 3399 }
michael@0 3400
michael@0 3401 PRStatus
michael@0 3402 _MD_TLockFile(PRInt32 f)
michael@0 3403 {
michael@0 3404 PRInt32 rv;
michael@0 3405 struct flock arg;
michael@0 3406
michael@0 3407 arg.l_type = F_WRLCK;
michael@0 3408 arg.l_whence = SEEK_SET;
michael@0 3409 arg.l_start = 0;
michael@0 3410 arg.l_len = 0; /* until EOF */
michael@0 3411 rv = fcntl(f, F_SETLK, &arg);
michael@0 3412 if (rv == 0)
michael@0 3413 return PR_SUCCESS;
michael@0 3414 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
michael@0 3415 return PR_FAILURE;
michael@0 3416 }
michael@0 3417
michael@0 3418 PRStatus
michael@0 3419 _MD_UnlockFile(PRInt32 f)
michael@0 3420 {
michael@0 3421 PRInt32 rv;
michael@0 3422 struct flock arg;
michael@0 3423
michael@0 3424 arg.l_type = F_UNLCK;
michael@0 3425 arg.l_whence = SEEK_SET;
michael@0 3426 arg.l_start = 0;
michael@0 3427 arg.l_len = 0; /* until EOF */
michael@0 3428 rv = fcntl(f, F_SETLK, &arg);
michael@0 3429 if (rv == 0)
michael@0 3430 return PR_SUCCESS;
michael@0 3431 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
michael@0 3432 return PR_FAILURE;
michael@0 3433 }
michael@0 3434
michael@0 3435 #elif defined(HAVE_BSD_FLOCK)
michael@0 3436
michael@0 3437 #include <sys/file.h>
michael@0 3438
michael@0 3439 PRStatus
michael@0 3440 _MD_LockFile(PRInt32 f)
michael@0 3441 {
michael@0 3442 PRInt32 rv;
michael@0 3443 rv = flock(f, LOCK_EX);
michael@0 3444 if (rv == 0)
michael@0 3445 return PR_SUCCESS;
michael@0 3446 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
michael@0 3447 return PR_FAILURE;
michael@0 3448 }
michael@0 3449
michael@0 3450 PRStatus
michael@0 3451 _MD_TLockFile(PRInt32 f)
michael@0 3452 {
michael@0 3453 PRInt32 rv;
michael@0 3454 rv = flock(f, LOCK_EX|LOCK_NB);
michael@0 3455 if (rv == 0)
michael@0 3456 return PR_SUCCESS;
michael@0 3457 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
michael@0 3458 return PR_FAILURE;
michael@0 3459 }
michael@0 3460
michael@0 3461 PRStatus
michael@0 3462 _MD_UnlockFile(PRInt32 f)
michael@0 3463 {
michael@0 3464 PRInt32 rv;
michael@0 3465 rv = flock(f, LOCK_UN);
michael@0 3466 if (rv == 0)
michael@0 3467 return PR_SUCCESS;
michael@0 3468 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
michael@0 3469 return PR_FAILURE;
michael@0 3470 }
michael@0 3471 #else
michael@0 3472
michael@0 3473 PRStatus
michael@0 3474 _MD_LockFile(PRInt32 f)
michael@0 3475 {
michael@0 3476 PRInt32 rv;
michael@0 3477 rv = lockf(f, F_LOCK, 0);
michael@0 3478 if (rv == 0)
michael@0 3479 return PR_SUCCESS;
michael@0 3480 _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO());
michael@0 3481 return PR_FAILURE;
michael@0 3482 }
michael@0 3483
michael@0 3484 PRStatus
michael@0 3485 _MD_TLockFile(PRInt32 f)
michael@0 3486 {
michael@0 3487 PRInt32 rv;
michael@0 3488 rv = lockf(f, F_TLOCK, 0);
michael@0 3489 if (rv == 0)
michael@0 3490 return PR_SUCCESS;
michael@0 3491 _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO());
michael@0 3492 return PR_FAILURE;
michael@0 3493 }
michael@0 3494
michael@0 3495 PRStatus
michael@0 3496 _MD_UnlockFile(PRInt32 f)
michael@0 3497 {
michael@0 3498 PRInt32 rv;
michael@0 3499 rv = lockf(f, F_ULOCK, 0);
michael@0 3500 if (rv == 0)
michael@0 3501 return PR_SUCCESS;
michael@0 3502 _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO());
michael@0 3503 return PR_FAILURE;
michael@0 3504 }
michael@0 3505 #endif
michael@0 3506
michael@0 3507 PRStatus _MD_gethostname(char *name, PRUint32 namelen)
michael@0 3508 {
michael@0 3509 PRIntn rv;
michael@0 3510
michael@0 3511 rv = gethostname(name, namelen);
michael@0 3512 if (0 == rv) {
michael@0 3513 return PR_SUCCESS;
michael@0 3514 }
michael@0 3515 _PR_MD_MAP_GETHOSTNAME_ERROR(_MD_ERRNO());
michael@0 3516 return PR_FAILURE;
michael@0 3517 }
michael@0 3518
michael@0 3519 PRStatus _MD_getsysinfo(PRSysInfo cmd, char *name, PRUint32 namelen)
michael@0 3520 {
michael@0 3521 struct utsname info;
michael@0 3522
michael@0 3523 PR_ASSERT((cmd == PR_SI_SYSNAME) || (cmd == PR_SI_RELEASE));
michael@0 3524
michael@0 3525 if (uname(&info) == -1) {
michael@0 3526 _PR_MD_MAP_DEFAULT_ERROR(errno);
michael@0 3527 return PR_FAILURE;
michael@0 3528 }
michael@0 3529 if (PR_SI_SYSNAME == cmd)
michael@0 3530 (void)PR_snprintf(name, namelen, info.sysname);
michael@0 3531 else if (PR_SI_RELEASE == cmd)
michael@0 3532 (void)PR_snprintf(name, namelen, info.release);
michael@0 3533 else
michael@0 3534 return PR_FAILURE;
michael@0 3535 return PR_SUCCESS;
michael@0 3536 }
michael@0 3537
michael@0 3538 /*
michael@0 3539 *******************************************************************
michael@0 3540 *
michael@0 3541 * Memory-mapped files
michael@0 3542 *
michael@0 3543 *******************************************************************
michael@0 3544 */
michael@0 3545
michael@0 3546 PRStatus _MD_CreateFileMap(PRFileMap *fmap, PRInt64 size)
michael@0 3547 {
michael@0 3548 PRFileInfo info;
michael@0 3549 PRUint32 sz;
michael@0 3550
michael@0 3551 LL_L2UI(sz, size);
michael@0 3552 if (sz) {
michael@0 3553 if (PR_GetOpenFileInfo(fmap->fd, &info) == PR_FAILURE) {
michael@0 3554 return PR_FAILURE;
michael@0 3555 }
michael@0 3556 if (sz > info.size) {
michael@0 3557 /*
michael@0 3558 * Need to extend the file
michael@0 3559 */
michael@0 3560 if (fmap->prot != PR_PROT_READWRITE) {
michael@0 3561 PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, 0);
michael@0 3562 return PR_FAILURE;
michael@0 3563 }
michael@0 3564 if (PR_Seek(fmap->fd, sz - 1, PR_SEEK_SET) == -1) {
michael@0 3565 return PR_FAILURE;
michael@0 3566 }
michael@0 3567 if (PR_Write(fmap->fd, "", 1) != 1) {
michael@0 3568 return PR_FAILURE;
michael@0 3569 }
michael@0 3570 }
michael@0 3571 }
michael@0 3572 if (fmap->prot == PR_PROT_READONLY) {
michael@0 3573 fmap->md.prot = PROT_READ;
michael@0 3574 #ifdef OSF1V4_MAP_PRIVATE_BUG
michael@0 3575 /*
michael@0 3576 * Use MAP_SHARED to work around a bug in OSF1 V4.0D
michael@0 3577 * (QAR 70220 in the OSF_QAR database) that results in
michael@0 3578 * corrupted data in the memory-mapped region. This
michael@0 3579 * bug is fixed in V5.0.
michael@0 3580 */
michael@0 3581 fmap->md.flags = MAP_SHARED;
michael@0 3582 #else
michael@0 3583 fmap->md.flags = MAP_PRIVATE;
michael@0 3584 #endif
michael@0 3585 } else if (fmap->prot == PR_PROT_READWRITE) {
michael@0 3586 fmap->md.prot = PROT_READ | PROT_WRITE;
michael@0 3587 fmap->md.flags = MAP_SHARED;
michael@0 3588 } else {
michael@0 3589 PR_ASSERT(fmap->prot == PR_PROT_WRITECOPY);
michael@0 3590 fmap->md.prot = PROT_READ | PROT_WRITE;
michael@0 3591 fmap->md.flags = MAP_PRIVATE;
michael@0 3592 }
michael@0 3593 return PR_SUCCESS;
michael@0 3594 }
michael@0 3595
michael@0 3596 void * _MD_MemMap(
michael@0 3597 PRFileMap *fmap,
michael@0 3598 PRInt64 offset,
michael@0 3599 PRUint32 len)
michael@0 3600 {
michael@0 3601 PRInt32 off;
michael@0 3602 void *addr;
michael@0 3603
michael@0 3604 LL_L2I(off, offset);
michael@0 3605 if ((addr = mmap(0, len, fmap->md.prot, fmap->md.flags,
michael@0 3606 fmap->fd->secret->md.osfd, off)) == (void *) -1) {
michael@0 3607 _PR_MD_MAP_MMAP_ERROR(_MD_ERRNO());
michael@0 3608 addr = NULL;
michael@0 3609 }
michael@0 3610 return addr;
michael@0 3611 }
michael@0 3612
michael@0 3613 PRStatus _MD_MemUnmap(void *addr, PRUint32 len)
michael@0 3614 {
michael@0 3615 if (munmap(addr, len) == 0) {
michael@0 3616 return PR_SUCCESS;
michael@0 3617 }
michael@0 3618 _PR_MD_MAP_DEFAULT_ERROR(errno);
michael@0 3619 return PR_FAILURE;
michael@0 3620 }
michael@0 3621
michael@0 3622 PRStatus _MD_CloseFileMap(PRFileMap *fmap)
michael@0 3623 {
michael@0 3624 if ( PR_TRUE == fmap->md.isAnonFM ) {
michael@0 3625 PRStatus rc = PR_Close( fmap->fd );
michael@0 3626 if ( PR_FAILURE == rc ) {
michael@0 3627 PR_LOG( _pr_io_lm, PR_LOG_DEBUG,
michael@0 3628 ("_MD_CloseFileMap(): error closing anonymnous file map osfd"));
michael@0 3629 return PR_FAILURE;
michael@0 3630 }
michael@0 3631 }
michael@0 3632 PR_DELETE(fmap);
michael@0 3633 return PR_SUCCESS;
michael@0 3634 }
michael@0 3635
michael@0 3636 PRStatus _MD_SyncMemMap(
michael@0 3637 PRFileDesc *fd,
michael@0 3638 void *addr,
michael@0 3639 PRUint32 len)
michael@0 3640 {
michael@0 3641 /* msync(..., MS_SYNC) alone is sufficient to flush modified data to disk
michael@0 3642 * synchronously. It is not necessary to call fsync. */
michael@0 3643 if (msync(addr, len, MS_SYNC) == 0) {
michael@0 3644 return PR_SUCCESS;
michael@0 3645 }
michael@0 3646 _PR_MD_MAP_DEFAULT_ERROR(errno);
michael@0 3647 return PR_FAILURE;
michael@0 3648 }
michael@0 3649
michael@0 3650 #if defined(_PR_NEED_FAKE_POLL)
michael@0 3651
michael@0 3652 /*
michael@0 3653 * Some platforms don't have poll(). For easier porting of code
michael@0 3654 * that calls poll(), we emulate poll() using select().
michael@0 3655 */
michael@0 3656
michael@0 3657 int poll(struct pollfd *filedes, unsigned long nfds, int timeout)
michael@0 3658 {
michael@0 3659 int i;
michael@0 3660 int rv;
michael@0 3661 int maxfd;
michael@0 3662 fd_set rd, wr, ex;
michael@0 3663 struct timeval tv, *tvp;
michael@0 3664
michael@0 3665 if (timeout < 0 && timeout != -1) {
michael@0 3666 errno = EINVAL;
michael@0 3667 return -1;
michael@0 3668 }
michael@0 3669
michael@0 3670 if (timeout == -1) {
michael@0 3671 tvp = NULL;
michael@0 3672 } else {
michael@0 3673 tv.tv_sec = timeout / 1000;
michael@0 3674 tv.tv_usec = (timeout % 1000) * 1000;
michael@0 3675 tvp = &tv;
michael@0 3676 }
michael@0 3677
michael@0 3678 maxfd = -1;
michael@0 3679 FD_ZERO(&rd);
michael@0 3680 FD_ZERO(&wr);
michael@0 3681 FD_ZERO(&ex);
michael@0 3682
michael@0 3683 for (i = 0; i < nfds; i++) {
michael@0 3684 int osfd = filedes[i].fd;
michael@0 3685 int events = filedes[i].events;
michael@0 3686 PRBool fdHasEvent = PR_FALSE;
michael@0 3687
michael@0 3688 if (osfd < 0) {
michael@0 3689 continue; /* Skip this osfd. */
michael@0 3690 }
michael@0 3691
michael@0 3692 /*
michael@0 3693 * Map the poll events to the select fd_sets.
michael@0 3694 * POLLIN, POLLRDNORM ===> readable
michael@0 3695 * POLLOUT, POLLWRNORM ===> writable
michael@0 3696 * POLLPRI, POLLRDBAND ===> exception
michael@0 3697 * POLLNORM, POLLWRBAND (and POLLMSG on some platforms)
michael@0 3698 * are ignored.
michael@0 3699 *
michael@0 3700 * The output events POLLERR and POLLHUP are never turned on.
michael@0 3701 * POLLNVAL may be turned on.
michael@0 3702 */
michael@0 3703
michael@0 3704 if (events & (POLLIN | POLLRDNORM)) {
michael@0 3705 FD_SET(osfd, &rd);
michael@0 3706 fdHasEvent = PR_TRUE;
michael@0 3707 }
michael@0 3708 if (events & (POLLOUT | POLLWRNORM)) {
michael@0 3709 FD_SET(osfd, &wr);
michael@0 3710 fdHasEvent = PR_TRUE;
michael@0 3711 }
michael@0 3712 if (events & (POLLPRI | POLLRDBAND)) {
michael@0 3713 FD_SET(osfd, &ex);
michael@0 3714 fdHasEvent = PR_TRUE;
michael@0 3715 }
michael@0 3716 if (fdHasEvent && osfd > maxfd) {
michael@0 3717 maxfd = osfd;
michael@0 3718 }
michael@0 3719 }
michael@0 3720
michael@0 3721 rv = select(maxfd + 1, &rd, &wr, &ex, tvp);
michael@0 3722
michael@0 3723 /* Compute poll results */
michael@0 3724 if (rv > 0) {
michael@0 3725 rv = 0;
michael@0 3726 for (i = 0; i < nfds; i++) {
michael@0 3727 PRBool fdHasEvent = PR_FALSE;
michael@0 3728
michael@0 3729 filedes[i].revents = 0;
michael@0 3730 if (filedes[i].fd < 0) {
michael@0 3731 continue;
michael@0 3732 }
michael@0 3733 if (FD_ISSET(filedes[i].fd, &rd)) {
michael@0 3734 if (filedes[i].events & POLLIN) {
michael@0 3735 filedes[i].revents |= POLLIN;
michael@0 3736 }
michael@0 3737 if (filedes[i].events & POLLRDNORM) {
michael@0 3738 filedes[i].revents |= POLLRDNORM;
michael@0 3739 }
michael@0 3740 fdHasEvent = PR_TRUE;
michael@0 3741 }
michael@0 3742 if (FD_ISSET(filedes[i].fd, &wr)) {
michael@0 3743 if (filedes[i].events & POLLOUT) {
michael@0 3744 filedes[i].revents |= POLLOUT;
michael@0 3745 }
michael@0 3746 if (filedes[i].events & POLLWRNORM) {
michael@0 3747 filedes[i].revents |= POLLWRNORM;
michael@0 3748 }
michael@0 3749 fdHasEvent = PR_TRUE;
michael@0 3750 }
michael@0 3751 if (FD_ISSET(filedes[i].fd, &ex)) {
michael@0 3752 if (filedes[i].events & POLLPRI) {
michael@0 3753 filedes[i].revents |= POLLPRI;
michael@0 3754 }
michael@0 3755 if (filedes[i].events & POLLRDBAND) {
michael@0 3756 filedes[i].revents |= POLLRDBAND;
michael@0 3757 }
michael@0 3758 fdHasEvent = PR_TRUE;
michael@0 3759 }
michael@0 3760 if (fdHasEvent) {
michael@0 3761 rv++;
michael@0 3762 }
michael@0 3763 }
michael@0 3764 PR_ASSERT(rv > 0);
michael@0 3765 } else if (rv == -1 && errno == EBADF) {
michael@0 3766 rv = 0;
michael@0 3767 for (i = 0; i < nfds; i++) {
michael@0 3768 filedes[i].revents = 0;
michael@0 3769 if (filedes[i].fd < 0) {
michael@0 3770 continue;
michael@0 3771 }
michael@0 3772 if (fcntl(filedes[i].fd, F_GETFL, 0) == -1) {
michael@0 3773 filedes[i].revents = POLLNVAL;
michael@0 3774 rv++;
michael@0 3775 }
michael@0 3776 }
michael@0 3777 PR_ASSERT(rv > 0);
michael@0 3778 }
michael@0 3779 PR_ASSERT(-1 != timeout || rv != 0);
michael@0 3780
michael@0 3781 return rv;
michael@0 3782 }
michael@0 3783 #endif /* _PR_NEED_FAKE_POLL */

mercurial