1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/nsprpub/pr/src/md/unix/unix.c Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,3783 @@ 1.4 +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ 1.5 +/* This Source Code Form is subject to the terms of the Mozilla Public 1.6 + * License, v. 2.0. If a copy of the MPL was not distributed with this 1.7 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 1.8 + 1.9 +#include "primpl.h" 1.10 + 1.11 +#include <string.h> 1.12 +#include <signal.h> 1.13 +#include <unistd.h> 1.14 +#include <fcntl.h> 1.15 +#include <sys/types.h> 1.16 +#include <sys/socket.h> 1.17 +#include <sys/time.h> 1.18 +#include <sys/ioctl.h> 1.19 +#include <sys/mman.h> 1.20 +#include <unistd.h> 1.21 +#include <sys/utsname.h> 1.22 + 1.23 +#ifdef _PR_POLL_AVAILABLE 1.24 +#include <poll.h> 1.25 +#endif 1.26 + 1.27 +/* To get FIONREAD */ 1.28 +#if defined(UNIXWARE) 1.29 +#include <sys/filio.h> 1.30 +#endif 1.31 + 1.32 +#if defined(NTO) 1.33 +#include <sys/statvfs.h> 1.34 +#endif 1.35 + 1.36 +/* 1.37 + * Make sure _PRSockLen_t is 32-bit, because we will cast a PRUint32* or 1.38 + * PRInt32* pointer to a _PRSockLen_t* pointer. 1.39 + */ 1.40 +#if defined(HAVE_SOCKLEN_T) \ 1.41 + || (defined(__GLIBC__) && __GLIBC__ >= 2) 1.42 +#define _PRSockLen_t socklen_t 1.43 +#elif defined(IRIX) || defined(HPUX) || defined(OSF1) || defined(SOLARIS) \ 1.44 + || defined(AIX4_1) || defined(LINUX) \ 1.45 + || defined(BSDI) || defined(SCO) \ 1.46 + || defined(DARWIN) \ 1.47 + || defined(QNX) 1.48 +#define _PRSockLen_t int 1.49 +#elif (defined(AIX) && !defined(AIX4_1)) || defined(FREEBSD) \ 1.50 + || defined(NETBSD) || defined(OPENBSD) || defined(UNIXWARE) \ 1.51 + || defined(DGUX) || defined(NTO) || defined(RISCOS) 1.52 +#define _PRSockLen_t size_t 1.53 +#else 1.54 +#error "Cannot determine architecture" 1.55 +#endif 1.56 + 1.57 +/* 1.58 +** Global lock variable used to bracket calls into rusty libraries that 1.59 +** aren't thread safe (like libc, libX, etc). 1.60 +*/ 1.61 +static PRLock *_pr_rename_lock = NULL; 1.62 +static PRMonitor *_pr_Xfe_mon = NULL; 1.63 + 1.64 +static PRInt64 minus_one; 1.65 + 1.66 +sigset_t timer_set; 1.67 + 1.68 +#if !defined(_PR_PTHREADS) 1.69 + 1.70 +static sigset_t empty_set; 1.71 + 1.72 +#ifdef SOLARIS 1.73 +#include <sys/file.h> 1.74 +#include <sys/filio.h> 1.75 +#endif 1.76 + 1.77 +#ifndef PIPE_BUF 1.78 +#define PIPE_BUF 512 1.79 +#endif 1.80 + 1.81 +/* 1.82 + * _nspr_noclock - if set clock interrupts are disabled 1.83 + */ 1.84 +int _nspr_noclock = 1; 1.85 + 1.86 +#ifdef IRIX 1.87 +extern PRInt32 _nspr_terminate_on_error; 1.88 +#endif 1.89 + 1.90 +/* 1.91 + * There is an assertion in this code that NSPR's definition of PRIOVec 1.92 + * is bit compatible with UNIX' definition of a struct iovec. This is 1.93 + * applicable to the 'writev()' operations where the types are casually 1.94 + * cast to avoid warnings. 1.95 + */ 1.96 + 1.97 +int _pr_md_pipefd[2] = { -1, -1 }; 1.98 +static char _pr_md_pipebuf[PIPE_BUF]; 1.99 +static PRInt32 local_io_wait(PRInt32 osfd, PRInt32 wait_flag, 1.100 + PRIntervalTime timeout); 1.101 + 1.102 +_PRInterruptTable _pr_interruptTable[] = { 1.103 + { 1.104 + "clock", _PR_MISSED_CLOCK, _PR_ClockInterrupt, }, 1.105 + { 1.106 + 0 } 1.107 +}; 1.108 + 1.109 +void _MD_unix_init_running_cpu(_PRCPU *cpu) 1.110 +{ 1.111 + PR_INIT_CLIST(&(cpu->md.md_unix.ioQ)); 1.112 + cpu->md.md_unix.ioq_max_osfd = -1; 1.113 + cpu->md.md_unix.ioq_timeout = PR_INTERVAL_NO_TIMEOUT; 1.114 +} 1.115 + 1.116 +PRStatus _MD_open_dir(_MDDir *d, const char *name) 1.117 +{ 1.118 +int err; 1.119 + 1.120 + d->d = opendir(name); 1.121 + if (!d->d) { 1.122 + err = _MD_ERRNO(); 1.123 + _PR_MD_MAP_OPENDIR_ERROR(err); 1.124 + return PR_FAILURE; 1.125 + } 1.126 + return PR_SUCCESS; 1.127 +} 1.128 + 1.129 +PRInt32 _MD_close_dir(_MDDir *d) 1.130 +{ 1.131 +int rv = 0, err; 1.132 + 1.133 + if (d->d) { 1.134 + rv = closedir(d->d); 1.135 + if (rv == -1) { 1.136 + err = _MD_ERRNO(); 1.137 + _PR_MD_MAP_CLOSEDIR_ERROR(err); 1.138 + } 1.139 + } 1.140 + return rv; 1.141 +} 1.142 + 1.143 +char * _MD_read_dir(_MDDir *d, PRIntn flags) 1.144 +{ 1.145 +struct dirent *de; 1.146 +int err; 1.147 + 1.148 + for (;;) { 1.149 + /* 1.150 + * XXX: readdir() is not MT-safe. There is an MT-safe version 1.151 + * readdir_r() on some systems. 1.152 + */ 1.153 + _MD_ERRNO() = 0; 1.154 + de = readdir(d->d); 1.155 + if (!de) { 1.156 + err = _MD_ERRNO(); 1.157 + _PR_MD_MAP_READDIR_ERROR(err); 1.158 + return 0; 1.159 + } 1.160 + if ((flags & PR_SKIP_DOT) && 1.161 + (de->d_name[0] == '.') && (de->d_name[1] == 0)) 1.162 + continue; 1.163 + if ((flags & PR_SKIP_DOT_DOT) && 1.164 + (de->d_name[0] == '.') && (de->d_name[1] == '.') && 1.165 + (de->d_name[2] == 0)) 1.166 + continue; 1.167 + if ((flags & PR_SKIP_HIDDEN) && (de->d_name[0] == '.')) 1.168 + continue; 1.169 + break; 1.170 + } 1.171 + return de->d_name; 1.172 +} 1.173 + 1.174 +PRInt32 _MD_delete(const char *name) 1.175 +{ 1.176 +PRInt32 rv, err; 1.177 +#ifdef UNIXWARE 1.178 + sigset_t set, oset; 1.179 +#endif 1.180 + 1.181 +#ifdef UNIXWARE 1.182 + sigfillset(&set); 1.183 + sigprocmask(SIG_SETMASK, &set, &oset); 1.184 +#endif 1.185 + rv = unlink(name); 1.186 +#ifdef UNIXWARE 1.187 + sigprocmask(SIG_SETMASK, &oset, NULL); 1.188 +#endif 1.189 + if (rv == -1) { 1.190 + err = _MD_ERRNO(); 1.191 + _PR_MD_MAP_UNLINK_ERROR(err); 1.192 + } 1.193 + return(rv); 1.194 +} 1.195 + 1.196 +PRInt32 _MD_rename(const char *from, const char *to) 1.197 +{ 1.198 + PRInt32 rv = -1, err; 1.199 + 1.200 + /* 1.201 + ** This is trying to enforce the semantics of WINDOZE' rename 1.202 + ** operation. That means one is not allowed to rename over top 1.203 + ** of an existing file. Holding a lock across these two function 1.204 + ** and the open function is known to be a bad idea, but .... 1.205 + */ 1.206 + if (NULL != _pr_rename_lock) 1.207 + PR_Lock(_pr_rename_lock); 1.208 + if (0 == access(to, F_OK)) 1.209 + PR_SetError(PR_FILE_EXISTS_ERROR, 0); 1.210 + else 1.211 + { 1.212 + rv = rename(from, to); 1.213 + if (rv < 0) { 1.214 + err = _MD_ERRNO(); 1.215 + _PR_MD_MAP_RENAME_ERROR(err); 1.216 + } 1.217 + } 1.218 + if (NULL != _pr_rename_lock) 1.219 + PR_Unlock(_pr_rename_lock); 1.220 + return rv; 1.221 +} 1.222 + 1.223 +PRInt32 _MD_access(const char *name, PRAccessHow how) 1.224 +{ 1.225 +PRInt32 rv, err; 1.226 +int amode; 1.227 + 1.228 + switch (how) { 1.229 + case PR_ACCESS_WRITE_OK: 1.230 + amode = W_OK; 1.231 + break; 1.232 + case PR_ACCESS_READ_OK: 1.233 + amode = R_OK; 1.234 + break; 1.235 + case PR_ACCESS_EXISTS: 1.236 + amode = F_OK; 1.237 + break; 1.238 + default: 1.239 + PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); 1.240 + rv = -1; 1.241 + goto done; 1.242 + } 1.243 + rv = access(name, amode); 1.244 + 1.245 + if (rv < 0) { 1.246 + err = _MD_ERRNO(); 1.247 + _PR_MD_MAP_ACCESS_ERROR(err); 1.248 + } 1.249 + 1.250 +done: 1.251 + return(rv); 1.252 +} 1.253 + 1.254 +PRInt32 _MD_mkdir(const char *name, PRIntn mode) 1.255 +{ 1.256 +int rv, err; 1.257 + 1.258 + /* 1.259 + ** This lock is used to enforce rename semantics as described 1.260 + ** in PR_Rename. Look there for more fun details. 1.261 + */ 1.262 + if (NULL !=_pr_rename_lock) 1.263 + PR_Lock(_pr_rename_lock); 1.264 + rv = mkdir(name, mode); 1.265 + if (rv < 0) { 1.266 + err = _MD_ERRNO(); 1.267 + _PR_MD_MAP_MKDIR_ERROR(err); 1.268 + } 1.269 + if (NULL !=_pr_rename_lock) 1.270 + PR_Unlock(_pr_rename_lock); 1.271 + return rv; 1.272 +} 1.273 + 1.274 +PRInt32 _MD_rmdir(const char *name) 1.275 +{ 1.276 +int rv, err; 1.277 + 1.278 + rv = rmdir(name); 1.279 + if (rv == -1) { 1.280 + err = _MD_ERRNO(); 1.281 + _PR_MD_MAP_RMDIR_ERROR(err); 1.282 + } 1.283 + return rv; 1.284 +} 1.285 + 1.286 +PRInt32 _MD_read(PRFileDesc *fd, void *buf, PRInt32 amount) 1.287 +{ 1.288 +PRThread *me = _PR_MD_CURRENT_THREAD(); 1.289 +PRInt32 rv, err; 1.290 +#ifndef _PR_USE_POLL 1.291 +fd_set rd; 1.292 +#else 1.293 +struct pollfd pfd; 1.294 +#endif /* _PR_USE_POLL */ 1.295 +PRInt32 osfd = fd->secret->md.osfd; 1.296 + 1.297 +#ifndef _PR_USE_POLL 1.298 + FD_ZERO(&rd); 1.299 + FD_SET(osfd, &rd); 1.300 +#else 1.301 + pfd.fd = osfd; 1.302 + pfd.events = POLLIN; 1.303 +#endif /* _PR_USE_POLL */ 1.304 + while ((rv = read(osfd,buf,amount)) == -1) { 1.305 + err = _MD_ERRNO(); 1.306 + if ((err == EAGAIN) || (err == EWOULDBLOCK)) { 1.307 + if (fd->secret->nonblocking) { 1.308 + break; 1.309 + } 1.310 + if (!_PR_IS_NATIVE_THREAD(me)) { 1.311 + if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, 1.312 + PR_INTERVAL_NO_TIMEOUT)) < 0) 1.313 + goto done; 1.314 + } else { 1.315 +#ifndef _PR_USE_POLL 1.316 + while ((rv = _MD_SELECT(osfd + 1, &rd, NULL, NULL, NULL)) 1.317 + == -1 && (err = _MD_ERRNO()) == EINTR) { 1.318 + /* retry _MD_SELECT() if it is interrupted */ 1.319 + } 1.320 +#else /* _PR_USE_POLL */ 1.321 + while ((rv = _MD_POLL(&pfd, 1, -1)) 1.322 + == -1 && (err = _MD_ERRNO()) == EINTR) { 1.323 + /* retry _MD_POLL() if it is interrupted */ 1.324 + } 1.325 +#endif /* _PR_USE_POLL */ 1.326 + if (rv == -1) { 1.327 + break; 1.328 + } 1.329 + } 1.330 + if (_PR_PENDING_INTERRUPT(me)) 1.331 + break; 1.332 + } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ 1.333 + continue; 1.334 + } else { 1.335 + break; 1.336 + } 1.337 + } 1.338 + if (rv < 0) { 1.339 + if (_PR_PENDING_INTERRUPT(me)) { 1.340 + me->flags &= ~_PR_INTERRUPT; 1.341 + PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); 1.342 + } else { 1.343 + _PR_MD_MAP_READ_ERROR(err); 1.344 + } 1.345 + } 1.346 +done: 1.347 + return(rv); 1.348 +} 1.349 + 1.350 +PRInt32 _MD_write(PRFileDesc *fd, const void *buf, PRInt32 amount) 1.351 +{ 1.352 +PRThread *me = _PR_MD_CURRENT_THREAD(); 1.353 +PRInt32 rv, err; 1.354 +#ifndef _PR_USE_POLL 1.355 +fd_set wd; 1.356 +#else 1.357 +struct pollfd pfd; 1.358 +#endif /* _PR_USE_POLL */ 1.359 +PRInt32 osfd = fd->secret->md.osfd; 1.360 + 1.361 +#ifndef _PR_USE_POLL 1.362 + FD_ZERO(&wd); 1.363 + FD_SET(osfd, &wd); 1.364 +#else 1.365 + pfd.fd = osfd; 1.366 + pfd.events = POLLOUT; 1.367 +#endif /* _PR_USE_POLL */ 1.368 + while ((rv = write(osfd,buf,amount)) == -1) { 1.369 + err = _MD_ERRNO(); 1.370 + if ((err == EAGAIN) || (err == EWOULDBLOCK)) { 1.371 + if (fd->secret->nonblocking) { 1.372 + break; 1.373 + } 1.374 + if (!_PR_IS_NATIVE_THREAD(me)) { 1.375 + if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, 1.376 + PR_INTERVAL_NO_TIMEOUT)) < 0) 1.377 + goto done; 1.378 + } else { 1.379 +#ifndef _PR_USE_POLL 1.380 + while ((rv = _MD_SELECT(osfd + 1, NULL, &wd, NULL, NULL)) 1.381 + == -1 && (err = _MD_ERRNO()) == EINTR) { 1.382 + /* retry _MD_SELECT() if it is interrupted */ 1.383 + } 1.384 +#else /* _PR_USE_POLL */ 1.385 + while ((rv = _MD_POLL(&pfd, 1, -1)) 1.386 + == -1 && (err = _MD_ERRNO()) == EINTR) { 1.387 + /* retry _MD_POLL() if it is interrupted */ 1.388 + } 1.389 +#endif /* _PR_USE_POLL */ 1.390 + if (rv == -1) { 1.391 + break; 1.392 + } 1.393 + } 1.394 + if (_PR_PENDING_INTERRUPT(me)) 1.395 + break; 1.396 + } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ 1.397 + continue; 1.398 + } else { 1.399 + break; 1.400 + } 1.401 + } 1.402 + if (rv < 0) { 1.403 + if (_PR_PENDING_INTERRUPT(me)) { 1.404 + me->flags &= ~_PR_INTERRUPT; 1.405 + PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); 1.406 + } else { 1.407 + _PR_MD_MAP_WRITE_ERROR(err); 1.408 + } 1.409 + } 1.410 +done: 1.411 + return(rv); 1.412 +} 1.413 + 1.414 +PRInt32 _MD_fsync(PRFileDesc *fd) 1.415 +{ 1.416 +PRInt32 rv, err; 1.417 + 1.418 + rv = fsync(fd->secret->md.osfd); 1.419 + if (rv == -1) { 1.420 + err = _MD_ERRNO(); 1.421 + _PR_MD_MAP_FSYNC_ERROR(err); 1.422 + } 1.423 + return(rv); 1.424 +} 1.425 + 1.426 +PRInt32 _MD_close(PRInt32 osfd) 1.427 +{ 1.428 +PRInt32 rv, err; 1.429 + 1.430 + rv = close(osfd); 1.431 + if (rv == -1) { 1.432 + err = _MD_ERRNO(); 1.433 + _PR_MD_MAP_CLOSE_ERROR(err); 1.434 + } 1.435 + return(rv); 1.436 +} 1.437 + 1.438 +PRInt32 _MD_socket(PRInt32 domain, PRInt32 type, PRInt32 proto) 1.439 +{ 1.440 + PRInt32 osfd, err; 1.441 + 1.442 + osfd = socket(domain, type, proto); 1.443 + 1.444 + if (osfd == -1) { 1.445 + err = _MD_ERRNO(); 1.446 + _PR_MD_MAP_SOCKET_ERROR(err); 1.447 + return(osfd); 1.448 + } 1.449 + 1.450 + return(osfd); 1.451 +} 1.452 + 1.453 +PRInt32 _MD_socketavailable(PRFileDesc *fd) 1.454 +{ 1.455 + PRInt32 result; 1.456 + 1.457 + if (ioctl(fd->secret->md.osfd, FIONREAD, &result) < 0) { 1.458 + _PR_MD_MAP_SOCKETAVAILABLE_ERROR(_MD_ERRNO()); 1.459 + return -1; 1.460 + } 1.461 + return result; 1.462 +} 1.463 + 1.464 +PRInt64 _MD_socketavailable64(PRFileDesc *fd) 1.465 +{ 1.466 + PRInt64 result; 1.467 + LL_I2L(result, _MD_socketavailable(fd)); 1.468 + return result; 1.469 +} /* _MD_socketavailable64 */ 1.470 + 1.471 +#define READ_FD 1 1.472 +#define WRITE_FD 2 1.473 + 1.474 +/* 1.475 + * socket_io_wait -- 1.476 + * 1.477 + * wait for socket i/o, periodically checking for interrupt 1.478 + * 1.479 + * The first implementation uses select(), for platforms without 1.480 + * poll(). The second (preferred) implementation uses poll(). 1.481 + */ 1.482 + 1.483 +#ifndef _PR_USE_POLL 1.484 + 1.485 +static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type, 1.486 + PRIntervalTime timeout) 1.487 +{ 1.488 + PRInt32 rv = -1; 1.489 + struct timeval tv; 1.490 + PRThread *me = _PR_MD_CURRENT_THREAD(); 1.491 + PRIntervalTime epoch, now, elapsed, remaining; 1.492 + PRBool wait_for_remaining; 1.493 + PRInt32 syserror; 1.494 + fd_set rd_wr; 1.495 + 1.496 + switch (timeout) { 1.497 + case PR_INTERVAL_NO_WAIT: 1.498 + PR_SetError(PR_IO_TIMEOUT_ERROR, 0); 1.499 + break; 1.500 + case PR_INTERVAL_NO_TIMEOUT: 1.501 + /* 1.502 + * This is a special case of the 'default' case below. 1.503 + * Please see the comments there. 1.504 + */ 1.505 + tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS; 1.506 + tv.tv_usec = 0; 1.507 + FD_ZERO(&rd_wr); 1.508 + do { 1.509 + FD_SET(osfd, &rd_wr); 1.510 + if (fd_type == READ_FD) 1.511 + rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv); 1.512 + else 1.513 + rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv); 1.514 + if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { 1.515 + _PR_MD_MAP_SELECT_ERROR(syserror); 1.516 + break; 1.517 + } 1.518 + if (_PR_PENDING_INTERRUPT(me)) { 1.519 + me->flags &= ~_PR_INTERRUPT; 1.520 + PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); 1.521 + rv = -1; 1.522 + break; 1.523 + } 1.524 + } while (rv == 0 || (rv == -1 && syserror == EINTR)); 1.525 + break; 1.526 + default: 1.527 + now = epoch = PR_IntervalNow(); 1.528 + remaining = timeout; 1.529 + FD_ZERO(&rd_wr); 1.530 + do { 1.531 + /* 1.532 + * We block in _MD_SELECT for at most 1.533 + * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds, 1.534 + * so that there is an upper limit on the delay 1.535 + * before the interrupt bit is checked. 1.536 + */ 1.537 + wait_for_remaining = PR_TRUE; 1.538 + tv.tv_sec = PR_IntervalToSeconds(remaining); 1.539 + if (tv.tv_sec > _PR_INTERRUPT_CHECK_INTERVAL_SECS) { 1.540 + wait_for_remaining = PR_FALSE; 1.541 + tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS; 1.542 + tv.tv_usec = 0; 1.543 + } else { 1.544 + tv.tv_usec = PR_IntervalToMicroseconds( 1.545 + remaining - 1.546 + PR_SecondsToInterval(tv.tv_sec)); 1.547 + } 1.548 + FD_SET(osfd, &rd_wr); 1.549 + if (fd_type == READ_FD) 1.550 + rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv); 1.551 + else 1.552 + rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv); 1.553 + /* 1.554 + * we don't consider EINTR a real error 1.555 + */ 1.556 + if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { 1.557 + _PR_MD_MAP_SELECT_ERROR(syserror); 1.558 + break; 1.559 + } 1.560 + if (_PR_PENDING_INTERRUPT(me)) { 1.561 + me->flags &= ~_PR_INTERRUPT; 1.562 + PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); 1.563 + rv = -1; 1.564 + break; 1.565 + } 1.566 + /* 1.567 + * We loop again if _MD_SELECT timed out or got interrupted 1.568 + * by a signal, and the timeout deadline has not passed yet. 1.569 + */ 1.570 + if (rv == 0 || (rv == -1 && syserror == EINTR)) { 1.571 + /* 1.572 + * If _MD_SELECT timed out, we know how much time 1.573 + * we spent in blocking, so we can avoid a 1.574 + * PR_IntervalNow() call. 1.575 + */ 1.576 + if (rv == 0) { 1.577 + if (wait_for_remaining) { 1.578 + now += remaining; 1.579 + } else { 1.580 + now += PR_SecondsToInterval(tv.tv_sec) 1.581 + + PR_MicrosecondsToInterval(tv.tv_usec); 1.582 + } 1.583 + } else { 1.584 + now = PR_IntervalNow(); 1.585 + } 1.586 + elapsed = (PRIntervalTime) (now - epoch); 1.587 + if (elapsed >= timeout) { 1.588 + PR_SetError(PR_IO_TIMEOUT_ERROR, 0); 1.589 + rv = -1; 1.590 + break; 1.591 + } else { 1.592 + remaining = timeout - elapsed; 1.593 + } 1.594 + } 1.595 + } while (rv == 0 || (rv == -1 && syserror == EINTR)); 1.596 + break; 1.597 + } 1.598 + return(rv); 1.599 +} 1.600 + 1.601 +#else /* _PR_USE_POLL */ 1.602 + 1.603 +static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type, 1.604 + PRIntervalTime timeout) 1.605 +{ 1.606 + PRInt32 rv = -1; 1.607 + int msecs; 1.608 + PRThread *me = _PR_MD_CURRENT_THREAD(); 1.609 + PRIntervalTime epoch, now, elapsed, remaining; 1.610 + PRBool wait_for_remaining; 1.611 + PRInt32 syserror; 1.612 + struct pollfd pfd; 1.613 + 1.614 + switch (timeout) { 1.615 + case PR_INTERVAL_NO_WAIT: 1.616 + PR_SetError(PR_IO_TIMEOUT_ERROR, 0); 1.617 + break; 1.618 + case PR_INTERVAL_NO_TIMEOUT: 1.619 + /* 1.620 + * This is a special case of the 'default' case below. 1.621 + * Please see the comments there. 1.622 + */ 1.623 + msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000; 1.624 + pfd.fd = osfd; 1.625 + if (fd_type == READ_FD) { 1.626 + pfd.events = POLLIN; 1.627 + } else { 1.628 + pfd.events = POLLOUT; 1.629 + } 1.630 + do { 1.631 + rv = _MD_POLL(&pfd, 1, msecs); 1.632 + if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { 1.633 + _PR_MD_MAP_POLL_ERROR(syserror); 1.634 + break; 1.635 + } 1.636 + /* 1.637 + * If POLLERR is set, don't process it; retry the operation 1.638 + */ 1.639 + if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) { 1.640 + rv = -1; 1.641 + _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents); 1.642 + break; 1.643 + } 1.644 + if (_PR_PENDING_INTERRUPT(me)) { 1.645 + me->flags &= ~_PR_INTERRUPT; 1.646 + PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); 1.647 + rv = -1; 1.648 + break; 1.649 + } 1.650 + } while (rv == 0 || (rv == -1 && syserror == EINTR)); 1.651 + break; 1.652 + default: 1.653 + now = epoch = PR_IntervalNow(); 1.654 + remaining = timeout; 1.655 + pfd.fd = osfd; 1.656 + if (fd_type == READ_FD) { 1.657 + pfd.events = POLLIN; 1.658 + } else { 1.659 + pfd.events = POLLOUT; 1.660 + } 1.661 + do { 1.662 + /* 1.663 + * We block in _MD_POLL for at most 1.664 + * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds, 1.665 + * so that there is an upper limit on the delay 1.666 + * before the interrupt bit is checked. 1.667 + */ 1.668 + wait_for_remaining = PR_TRUE; 1.669 + msecs = PR_IntervalToMilliseconds(remaining); 1.670 + if (msecs > _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000) { 1.671 + wait_for_remaining = PR_FALSE; 1.672 + msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000; 1.673 + } 1.674 + rv = _MD_POLL(&pfd, 1, msecs); 1.675 + /* 1.676 + * we don't consider EINTR a real error 1.677 + */ 1.678 + if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { 1.679 + _PR_MD_MAP_POLL_ERROR(syserror); 1.680 + break; 1.681 + } 1.682 + if (_PR_PENDING_INTERRUPT(me)) { 1.683 + me->flags &= ~_PR_INTERRUPT; 1.684 + PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); 1.685 + rv = -1; 1.686 + break; 1.687 + } 1.688 + /* 1.689 + * If POLLERR is set, don't process it; retry the operation 1.690 + */ 1.691 + if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) { 1.692 + rv = -1; 1.693 + _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents); 1.694 + break; 1.695 + } 1.696 + /* 1.697 + * We loop again if _MD_POLL timed out or got interrupted 1.698 + * by a signal, and the timeout deadline has not passed yet. 1.699 + */ 1.700 + if (rv == 0 || (rv == -1 && syserror == EINTR)) { 1.701 + /* 1.702 + * If _MD_POLL timed out, we know how much time 1.703 + * we spent in blocking, so we can avoid a 1.704 + * PR_IntervalNow() call. 1.705 + */ 1.706 + if (rv == 0) { 1.707 + if (wait_for_remaining) { 1.708 + now += remaining; 1.709 + } else { 1.710 + now += PR_MillisecondsToInterval(msecs); 1.711 + } 1.712 + } else { 1.713 + now = PR_IntervalNow(); 1.714 + } 1.715 + elapsed = (PRIntervalTime) (now - epoch); 1.716 + if (elapsed >= timeout) { 1.717 + PR_SetError(PR_IO_TIMEOUT_ERROR, 0); 1.718 + rv = -1; 1.719 + break; 1.720 + } else { 1.721 + remaining = timeout - elapsed; 1.722 + } 1.723 + } 1.724 + } while (rv == 0 || (rv == -1 && syserror == EINTR)); 1.725 + break; 1.726 + } 1.727 + return(rv); 1.728 +} 1.729 + 1.730 +#endif /* _PR_USE_POLL */ 1.731 + 1.732 +static PRInt32 local_io_wait( 1.733 + PRInt32 osfd, 1.734 + PRInt32 wait_flag, 1.735 + PRIntervalTime timeout) 1.736 +{ 1.737 + _PRUnixPollDesc pd; 1.738 + PRInt32 rv; 1.739 + 1.740 + PR_LOG(_pr_io_lm, PR_LOG_MIN, 1.741 + ("waiting to %s on osfd=%d", 1.742 + (wait_flag == _PR_UNIX_POLL_READ) ? "read" : "write", 1.743 + osfd)); 1.744 + 1.745 + if (timeout == PR_INTERVAL_NO_WAIT) return 0; 1.746 + 1.747 + pd.osfd = osfd; 1.748 + pd.in_flags = wait_flag; 1.749 + pd.out_flags = 0; 1.750 + 1.751 + rv = _PR_WaitForMultipleFDs(&pd, 1, timeout); 1.752 + 1.753 + if (rv == 0) { 1.754 + PR_SetError(PR_IO_TIMEOUT_ERROR, 0); 1.755 + rv = -1; 1.756 + } 1.757 + return rv; 1.758 +} 1.759 + 1.760 + 1.761 +PRInt32 _MD_recv(PRFileDesc *fd, void *buf, PRInt32 amount, 1.762 + PRInt32 flags, PRIntervalTime timeout) 1.763 +{ 1.764 + PRInt32 osfd = fd->secret->md.osfd; 1.765 + PRInt32 rv, err; 1.766 + PRThread *me = _PR_MD_CURRENT_THREAD(); 1.767 + 1.768 +/* 1.769 + * Many OS's (Solaris, Unixware) have a broken recv which won't read 1.770 + * from socketpairs. As long as we don't use flags on socketpairs, this 1.771 + * is a decent fix. - mikep 1.772 + */ 1.773 +#if defined(UNIXWARE) || defined(SOLARIS) 1.774 + while ((rv = read(osfd,buf,amount)) == -1) { 1.775 +#else 1.776 + while ((rv = recv(osfd,buf,amount,flags)) == -1) { 1.777 +#endif 1.778 + err = _MD_ERRNO(); 1.779 + if ((err == EAGAIN) || (err == EWOULDBLOCK)) { 1.780 + if (fd->secret->nonblocking) { 1.781 + break; 1.782 + } 1.783 + if (!_PR_IS_NATIVE_THREAD(me)) { 1.784 + if ((rv = local_io_wait(osfd,_PR_UNIX_POLL_READ,timeout)) < 0) 1.785 + goto done; 1.786 + } else { 1.787 + if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) 1.788 + goto done; 1.789 + } 1.790 + } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ 1.791 + continue; 1.792 + } else { 1.793 + break; 1.794 + } 1.795 + } 1.796 + if (rv < 0) { 1.797 + _PR_MD_MAP_RECV_ERROR(err); 1.798 + } 1.799 +done: 1.800 + return(rv); 1.801 +} 1.802 + 1.803 +PRInt32 _MD_recvfrom(PRFileDesc *fd, void *buf, PRInt32 amount, 1.804 + PRIntn flags, PRNetAddr *addr, PRUint32 *addrlen, 1.805 + PRIntervalTime timeout) 1.806 +{ 1.807 + PRInt32 osfd = fd->secret->md.osfd; 1.808 + PRInt32 rv, err; 1.809 + PRThread *me = _PR_MD_CURRENT_THREAD(); 1.810 + 1.811 + while ((*addrlen = PR_NETADDR_SIZE(addr)), 1.812 + ((rv = recvfrom(osfd, buf, amount, flags, 1.813 + (struct sockaddr *) addr, (_PRSockLen_t *)addrlen)) == -1)) { 1.814 + err = _MD_ERRNO(); 1.815 + if ((err == EAGAIN) || (err == EWOULDBLOCK)) { 1.816 + if (fd->secret->nonblocking) { 1.817 + break; 1.818 + } 1.819 + if (!_PR_IS_NATIVE_THREAD(me)) { 1.820 + if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0) 1.821 + goto done; 1.822 + } else { 1.823 + if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) 1.824 + goto done; 1.825 + } 1.826 + } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ 1.827 + continue; 1.828 + } else { 1.829 + break; 1.830 + } 1.831 + } 1.832 + if (rv < 0) { 1.833 + _PR_MD_MAP_RECVFROM_ERROR(err); 1.834 + } 1.835 +done: 1.836 +#ifdef _PR_HAVE_SOCKADDR_LEN 1.837 + if (rv != -1) { 1.838 + /* ignore the sa_len field of struct sockaddr */ 1.839 + if (addr) { 1.840 + addr->raw.family = ((struct sockaddr *) addr)->sa_family; 1.841 + } 1.842 + } 1.843 +#endif /* _PR_HAVE_SOCKADDR_LEN */ 1.844 + return(rv); 1.845 +} 1.846 + 1.847 +PRInt32 _MD_send(PRFileDesc *fd, const void *buf, PRInt32 amount, 1.848 + PRInt32 flags, PRIntervalTime timeout) 1.849 +{ 1.850 + PRInt32 osfd = fd->secret->md.osfd; 1.851 + PRInt32 rv, err; 1.852 + PRThread *me = _PR_MD_CURRENT_THREAD(); 1.853 +#if defined(SOLARIS) 1.854 + PRInt32 tmp_amount = amount; 1.855 +#endif 1.856 + 1.857 + /* 1.858 + * On pre-2.6 Solaris, send() is much slower than write(). 1.859 + * On 2.6 and beyond, with in-kernel sockets, send() and 1.860 + * write() are fairly equivalent in performance. 1.861 + */ 1.862 +#if defined(SOLARIS) 1.863 + PR_ASSERT(0 == flags); 1.864 + while ((rv = write(osfd,buf,tmp_amount)) == -1) { 1.865 +#else 1.866 + while ((rv = send(osfd,buf,amount,flags)) == -1) { 1.867 +#endif 1.868 + err = _MD_ERRNO(); 1.869 + if ((err == EAGAIN) || (err == EWOULDBLOCK)) { 1.870 + if (fd->secret->nonblocking) { 1.871 + break; 1.872 + } 1.873 + if (!_PR_IS_NATIVE_THREAD(me)) { 1.874 + if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) 1.875 + goto done; 1.876 + } else { 1.877 + if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0) 1.878 + goto done; 1.879 + } 1.880 + } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ 1.881 + continue; 1.882 + } else { 1.883 +#if defined(SOLARIS) 1.884 + /* 1.885 + * The write system call has been reported to return the ERANGE 1.886 + * error on occasion. Try to write in smaller chunks to workaround 1.887 + * this bug. 1.888 + */ 1.889 + if (err == ERANGE) { 1.890 + if (tmp_amount > 1) { 1.891 + tmp_amount = tmp_amount/2; /* half the bytes */ 1.892 + continue; 1.893 + } 1.894 + } 1.895 +#endif 1.896 + break; 1.897 + } 1.898 + } 1.899 + /* 1.900 + * optimization; if bytes sent is less than "amount" call 1.901 + * select before returning. This is because it is likely that 1.902 + * the next send() call will return EWOULDBLOCK. 1.903 + */ 1.904 + if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount) 1.905 + && (timeout != PR_INTERVAL_NO_WAIT)) { 1.906 + if (_PR_IS_NATIVE_THREAD(me)) { 1.907 + if (socket_io_wait(osfd, WRITE_FD, timeout)< 0) { 1.908 + rv = -1; 1.909 + goto done; 1.910 + } 1.911 + } else { 1.912 + if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) < 0) { 1.913 + rv = -1; 1.914 + goto done; 1.915 + } 1.916 + } 1.917 + } 1.918 + if (rv < 0) { 1.919 + _PR_MD_MAP_SEND_ERROR(err); 1.920 + } 1.921 +done: 1.922 + return(rv); 1.923 +} 1.924 + 1.925 +PRInt32 _MD_sendto( 1.926 + PRFileDesc *fd, const void *buf, PRInt32 amount, PRIntn flags, 1.927 + const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout) 1.928 +{ 1.929 + PRInt32 osfd = fd->secret->md.osfd; 1.930 + PRInt32 rv, err; 1.931 + PRThread *me = _PR_MD_CURRENT_THREAD(); 1.932 +#ifdef _PR_HAVE_SOCKADDR_LEN 1.933 + PRNetAddr addrCopy; 1.934 + 1.935 + addrCopy = *addr; 1.936 + ((struct sockaddr *) &addrCopy)->sa_len = addrlen; 1.937 + ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family; 1.938 + 1.939 + while ((rv = sendto(osfd, buf, amount, flags, 1.940 + (struct sockaddr *) &addrCopy, addrlen)) == -1) { 1.941 +#else 1.942 + while ((rv = sendto(osfd, buf, amount, flags, 1.943 + (struct sockaddr *) addr, addrlen)) == -1) { 1.944 +#endif 1.945 + err = _MD_ERRNO(); 1.946 + if ((err == EAGAIN) || (err == EWOULDBLOCK)) { 1.947 + if (fd->secret->nonblocking) { 1.948 + break; 1.949 + } 1.950 + if (!_PR_IS_NATIVE_THREAD(me)) { 1.951 + if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) 1.952 + goto done; 1.953 + } else { 1.954 + if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0) 1.955 + goto done; 1.956 + } 1.957 + } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ 1.958 + continue; 1.959 + } else { 1.960 + break; 1.961 + } 1.962 + } 1.963 + if (rv < 0) { 1.964 + _PR_MD_MAP_SENDTO_ERROR(err); 1.965 + } 1.966 +done: 1.967 + return(rv); 1.968 +} 1.969 + 1.970 +PRInt32 _MD_writev( 1.971 + PRFileDesc *fd, const PRIOVec *iov, 1.972 + PRInt32 iov_size, PRIntervalTime timeout) 1.973 +{ 1.974 + PRInt32 rv, err; 1.975 + PRThread *me = _PR_MD_CURRENT_THREAD(); 1.976 + PRInt32 index, amount = 0; 1.977 + PRInt32 osfd = fd->secret->md.osfd; 1.978 + 1.979 + /* 1.980 + * Calculate the total number of bytes to be sent; needed for 1.981 + * optimization later. 1.982 + * We could avoid this if this number was passed in; but it is 1.983 + * probably not a big deal because iov_size is usually small (less than 1.984 + * 3) 1.985 + */ 1.986 + if (!fd->secret->nonblocking) { 1.987 + for (index=0; index<iov_size; index++) { 1.988 + amount += iov[index].iov_len; 1.989 + } 1.990 + } 1.991 + 1.992 + while ((rv = writev(osfd, (const struct iovec*)iov, iov_size)) == -1) { 1.993 + err = _MD_ERRNO(); 1.994 + if ((err == EAGAIN) || (err == EWOULDBLOCK)) { 1.995 + if (fd->secret->nonblocking) { 1.996 + break; 1.997 + } 1.998 + if (!_PR_IS_NATIVE_THREAD(me)) { 1.999 + if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) 1.1000 + goto done; 1.1001 + } else { 1.1002 + if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))<0) 1.1003 + goto done; 1.1004 + } 1.1005 + } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ 1.1006 + continue; 1.1007 + } else { 1.1008 + break; 1.1009 + } 1.1010 + } 1.1011 + /* 1.1012 + * optimization; if bytes sent is less than "amount" call 1.1013 + * select before returning. This is because it is likely that 1.1014 + * the next writev() call will return EWOULDBLOCK. 1.1015 + */ 1.1016 + if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount) 1.1017 + && (timeout != PR_INTERVAL_NO_WAIT)) { 1.1018 + if (_PR_IS_NATIVE_THREAD(me)) { 1.1019 + if (socket_io_wait(osfd, WRITE_FD, timeout) < 0) { 1.1020 + rv = -1; 1.1021 + goto done; 1.1022 + } 1.1023 + } else { 1.1024 + if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) < 0) { 1.1025 + rv = -1; 1.1026 + goto done; 1.1027 + } 1.1028 + } 1.1029 + } 1.1030 + if (rv < 0) { 1.1031 + _PR_MD_MAP_WRITEV_ERROR(err); 1.1032 + } 1.1033 +done: 1.1034 + return(rv); 1.1035 +} 1.1036 + 1.1037 +PRInt32 _MD_accept(PRFileDesc *fd, PRNetAddr *addr, 1.1038 + PRUint32 *addrlen, PRIntervalTime timeout) 1.1039 +{ 1.1040 + PRInt32 osfd = fd->secret->md.osfd; 1.1041 + PRInt32 rv, err; 1.1042 + PRThread *me = _PR_MD_CURRENT_THREAD(); 1.1043 + 1.1044 + while ((rv = accept(osfd, (struct sockaddr *) addr, 1.1045 + (_PRSockLen_t *)addrlen)) == -1) { 1.1046 + err = _MD_ERRNO(); 1.1047 + if ((err == EAGAIN) || (err == EWOULDBLOCK) || (err == ECONNABORTED)) { 1.1048 + if (fd->secret->nonblocking) { 1.1049 + break; 1.1050 + } 1.1051 + if (!_PR_IS_NATIVE_THREAD(me)) { 1.1052 + if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0) 1.1053 + goto done; 1.1054 + } else { 1.1055 + if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) 1.1056 + goto done; 1.1057 + } 1.1058 + } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ 1.1059 + continue; 1.1060 + } else { 1.1061 + break; 1.1062 + } 1.1063 + } 1.1064 + if (rv < 0) { 1.1065 + _PR_MD_MAP_ACCEPT_ERROR(err); 1.1066 + } 1.1067 +done: 1.1068 +#ifdef _PR_HAVE_SOCKADDR_LEN 1.1069 + if (rv != -1) { 1.1070 + /* ignore the sa_len field of struct sockaddr */ 1.1071 + if (addr) { 1.1072 + addr->raw.family = ((struct sockaddr *) addr)->sa_family; 1.1073 + } 1.1074 + } 1.1075 +#endif /* _PR_HAVE_SOCKADDR_LEN */ 1.1076 + return(rv); 1.1077 +} 1.1078 + 1.1079 +extern int _connect (int s, const struct sockaddr *name, int namelen); 1.1080 +PRInt32 _MD_connect( 1.1081 + PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout) 1.1082 +{ 1.1083 + PRInt32 rv, err; 1.1084 + PRThread *me = _PR_MD_CURRENT_THREAD(); 1.1085 + PRInt32 osfd = fd->secret->md.osfd; 1.1086 +#ifdef IRIX 1.1087 +extern PRInt32 _MD_irix_connect( 1.1088 + PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, PRIntervalTime timeout); 1.1089 +#endif 1.1090 +#ifdef _PR_HAVE_SOCKADDR_LEN 1.1091 + PRNetAddr addrCopy; 1.1092 + 1.1093 + addrCopy = *addr; 1.1094 + ((struct sockaddr *) &addrCopy)->sa_len = addrlen; 1.1095 + ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family; 1.1096 +#endif 1.1097 + 1.1098 + /* 1.1099 + * We initiate the connection setup by making a nonblocking connect() 1.1100 + * call. If the connect() call fails, there are two cases we handle 1.1101 + * specially: 1.1102 + * 1. The connect() call was interrupted by a signal. In this case 1.1103 + * we simply retry connect(). 1.1104 + * 2. The NSPR socket is nonblocking and connect() fails with 1.1105 + * EINPROGRESS. We first wait until the socket becomes writable. 1.1106 + * Then we try to find out whether the connection setup succeeded 1.1107 + * or failed. 1.1108 + */ 1.1109 + 1.1110 +retry: 1.1111 +#ifdef IRIX 1.1112 + if ((rv = _MD_irix_connect(osfd, addr, addrlen, timeout)) == -1) { 1.1113 +#else 1.1114 +#ifdef _PR_HAVE_SOCKADDR_LEN 1.1115 + if ((rv = connect(osfd, (struct sockaddr *)&addrCopy, addrlen)) == -1) { 1.1116 +#else 1.1117 + if ((rv = connect(osfd, (struct sockaddr *)addr, addrlen)) == -1) { 1.1118 +#endif 1.1119 +#endif 1.1120 + err = _MD_ERRNO(); 1.1121 + 1.1122 + if (err == EINTR) { 1.1123 + if (_PR_PENDING_INTERRUPT(me)) { 1.1124 + me->flags &= ~_PR_INTERRUPT; 1.1125 + PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0); 1.1126 + return -1; 1.1127 + } 1.1128 + goto retry; 1.1129 + } 1.1130 + 1.1131 + if (!fd->secret->nonblocking && (err == EINPROGRESS)) { 1.1132 + if (!_PR_IS_NATIVE_THREAD(me)) { 1.1133 + 1.1134 + if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) 1.1135 + return -1; 1.1136 + } else { 1.1137 + /* 1.1138 + * socket_io_wait() may return -1 or 1. 1.1139 + */ 1.1140 + 1.1141 + rv = socket_io_wait(osfd, WRITE_FD, timeout); 1.1142 + if (rv == -1) { 1.1143 + return -1; 1.1144 + } 1.1145 + } 1.1146 + 1.1147 + PR_ASSERT(rv == 1); 1.1148 + if (_PR_PENDING_INTERRUPT(me)) { 1.1149 + me->flags &= ~_PR_INTERRUPT; 1.1150 + PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0); 1.1151 + return -1; 1.1152 + } 1.1153 + err = _MD_unix_get_nonblocking_connect_error(osfd); 1.1154 + if (err != 0) { 1.1155 + _PR_MD_MAP_CONNECT_ERROR(err); 1.1156 + return -1; 1.1157 + } 1.1158 + return 0; 1.1159 + } 1.1160 + 1.1161 + _PR_MD_MAP_CONNECT_ERROR(err); 1.1162 + } 1.1163 + 1.1164 + return rv; 1.1165 +} /* _MD_connect */ 1.1166 + 1.1167 +PRInt32 _MD_bind(PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen) 1.1168 +{ 1.1169 + PRInt32 rv, err; 1.1170 +#ifdef _PR_HAVE_SOCKADDR_LEN 1.1171 + PRNetAddr addrCopy; 1.1172 + 1.1173 + addrCopy = *addr; 1.1174 + ((struct sockaddr *) &addrCopy)->sa_len = addrlen; 1.1175 + ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family; 1.1176 + rv = bind(fd->secret->md.osfd, (struct sockaddr *) &addrCopy, (int )addrlen); 1.1177 +#else 1.1178 + rv = bind(fd->secret->md.osfd, (struct sockaddr *) addr, (int )addrlen); 1.1179 +#endif 1.1180 + if (rv < 0) { 1.1181 + err = _MD_ERRNO(); 1.1182 + _PR_MD_MAP_BIND_ERROR(err); 1.1183 + } 1.1184 + return(rv); 1.1185 +} 1.1186 + 1.1187 +PRInt32 _MD_listen(PRFileDesc *fd, PRIntn backlog) 1.1188 +{ 1.1189 + PRInt32 rv, err; 1.1190 + 1.1191 + rv = listen(fd->secret->md.osfd, backlog); 1.1192 + if (rv < 0) { 1.1193 + err = _MD_ERRNO(); 1.1194 + _PR_MD_MAP_LISTEN_ERROR(err); 1.1195 + } 1.1196 + return(rv); 1.1197 +} 1.1198 + 1.1199 +PRInt32 _MD_shutdown(PRFileDesc *fd, PRIntn how) 1.1200 +{ 1.1201 + PRInt32 rv, err; 1.1202 + 1.1203 + rv = shutdown(fd->secret->md.osfd, how); 1.1204 + if (rv < 0) { 1.1205 + err = _MD_ERRNO(); 1.1206 + _PR_MD_MAP_SHUTDOWN_ERROR(err); 1.1207 + } 1.1208 + return(rv); 1.1209 +} 1.1210 + 1.1211 +PRInt32 _MD_socketpair(int af, int type, int flags, 1.1212 + PRInt32 *osfd) 1.1213 +{ 1.1214 + PRInt32 rv, err; 1.1215 + 1.1216 + rv = socketpair(af, type, flags, osfd); 1.1217 + if (rv < 0) { 1.1218 + err = _MD_ERRNO(); 1.1219 + _PR_MD_MAP_SOCKETPAIR_ERROR(err); 1.1220 + } 1.1221 + return rv; 1.1222 +} 1.1223 + 1.1224 +PRStatus _MD_getsockname(PRFileDesc *fd, PRNetAddr *addr, 1.1225 + PRUint32 *addrlen) 1.1226 +{ 1.1227 + PRInt32 rv, err; 1.1228 + 1.1229 + rv = getsockname(fd->secret->md.osfd, 1.1230 + (struct sockaddr *) addr, (_PRSockLen_t *)addrlen); 1.1231 +#ifdef _PR_HAVE_SOCKADDR_LEN 1.1232 + if (rv == 0) { 1.1233 + /* ignore the sa_len field of struct sockaddr */ 1.1234 + if (addr) { 1.1235 + addr->raw.family = ((struct sockaddr *) addr)->sa_family; 1.1236 + } 1.1237 + } 1.1238 +#endif /* _PR_HAVE_SOCKADDR_LEN */ 1.1239 + if (rv < 0) { 1.1240 + err = _MD_ERRNO(); 1.1241 + _PR_MD_MAP_GETSOCKNAME_ERROR(err); 1.1242 + } 1.1243 + return rv==0?PR_SUCCESS:PR_FAILURE; 1.1244 +} 1.1245 + 1.1246 +PRStatus _MD_getpeername(PRFileDesc *fd, PRNetAddr *addr, 1.1247 + PRUint32 *addrlen) 1.1248 +{ 1.1249 + PRInt32 rv, err; 1.1250 + 1.1251 + rv = getpeername(fd->secret->md.osfd, 1.1252 + (struct sockaddr *) addr, (_PRSockLen_t *)addrlen); 1.1253 +#ifdef _PR_HAVE_SOCKADDR_LEN 1.1254 + if (rv == 0) { 1.1255 + /* ignore the sa_len field of struct sockaddr */ 1.1256 + if (addr) { 1.1257 + addr->raw.family = ((struct sockaddr *) addr)->sa_family; 1.1258 + } 1.1259 + } 1.1260 +#endif /* _PR_HAVE_SOCKADDR_LEN */ 1.1261 + if (rv < 0) { 1.1262 + err = _MD_ERRNO(); 1.1263 + _PR_MD_MAP_GETPEERNAME_ERROR(err); 1.1264 + } 1.1265 + return rv==0?PR_SUCCESS:PR_FAILURE; 1.1266 +} 1.1267 + 1.1268 +PRStatus _MD_getsockopt(PRFileDesc *fd, PRInt32 level, 1.1269 + PRInt32 optname, char* optval, PRInt32* optlen) 1.1270 +{ 1.1271 + PRInt32 rv, err; 1.1272 + 1.1273 + rv = getsockopt(fd->secret->md.osfd, level, optname, optval, (_PRSockLen_t *)optlen); 1.1274 + if (rv < 0) { 1.1275 + err = _MD_ERRNO(); 1.1276 + _PR_MD_MAP_GETSOCKOPT_ERROR(err); 1.1277 + } 1.1278 + return rv==0?PR_SUCCESS:PR_FAILURE; 1.1279 +} 1.1280 + 1.1281 +PRStatus _MD_setsockopt(PRFileDesc *fd, PRInt32 level, 1.1282 + PRInt32 optname, const char* optval, PRInt32 optlen) 1.1283 +{ 1.1284 + PRInt32 rv, err; 1.1285 + 1.1286 + rv = setsockopt(fd->secret->md.osfd, level, optname, optval, optlen); 1.1287 + if (rv < 0) { 1.1288 + err = _MD_ERRNO(); 1.1289 + _PR_MD_MAP_SETSOCKOPT_ERROR(err); 1.1290 + } 1.1291 + return rv==0?PR_SUCCESS:PR_FAILURE; 1.1292 +} 1.1293 + 1.1294 +PRStatus _MD_set_fd_inheritable(PRFileDesc *fd, PRBool inheritable) 1.1295 +{ 1.1296 + int rv; 1.1297 + 1.1298 + rv = fcntl(fd->secret->md.osfd, F_SETFD, inheritable ? 0 : FD_CLOEXEC); 1.1299 + if (-1 == rv) { 1.1300 + PR_SetError(PR_UNKNOWN_ERROR, _MD_ERRNO()); 1.1301 + return PR_FAILURE; 1.1302 + } 1.1303 + return PR_SUCCESS; 1.1304 +} 1.1305 + 1.1306 +void _MD_init_fd_inheritable(PRFileDesc *fd, PRBool imported) 1.1307 +{ 1.1308 + if (imported) { 1.1309 + fd->secret->inheritable = _PR_TRI_UNKNOWN; 1.1310 + } else { 1.1311 + /* By default, a Unix fd is not closed on exec. */ 1.1312 +#ifdef DEBUG 1.1313 + { 1.1314 + int flags = fcntl(fd->secret->md.osfd, F_GETFD, 0); 1.1315 + PR_ASSERT(0 == flags); 1.1316 + } 1.1317 +#endif 1.1318 + fd->secret->inheritable = _PR_TRI_TRUE; 1.1319 + } 1.1320 +} 1.1321 + 1.1322 +/************************************************************************/ 1.1323 +#if !defined(_PR_USE_POLL) 1.1324 + 1.1325 +/* 1.1326 +** Scan through io queue and find any bad fd's that triggered the error 1.1327 +** from _MD_SELECT 1.1328 +*/ 1.1329 +static void FindBadFDs(void) 1.1330 +{ 1.1331 + PRCList *q; 1.1332 + PRThread *me = _MD_CURRENT_THREAD(); 1.1333 + 1.1334 + PR_ASSERT(!_PR_IS_NATIVE_THREAD(me)); 1.1335 + q = (_PR_IOQ(me->cpu)).next; 1.1336 + _PR_IOQ_MAX_OSFD(me->cpu) = -1; 1.1337 + _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT; 1.1338 + while (q != &_PR_IOQ(me->cpu)) { 1.1339 + PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); 1.1340 + PRBool notify = PR_FALSE; 1.1341 + _PRUnixPollDesc *pds = pq->pds; 1.1342 + _PRUnixPollDesc *epds = pds + pq->npds; 1.1343 + PRInt32 pq_max_osfd = -1; 1.1344 + 1.1345 + q = q->next; 1.1346 + for (; pds < epds; pds++) { 1.1347 + PRInt32 osfd = pds->osfd; 1.1348 + pds->out_flags = 0; 1.1349 + PR_ASSERT(osfd >= 0 || pds->in_flags == 0); 1.1350 + if (pds->in_flags == 0) { 1.1351 + continue; /* skip this fd */ 1.1352 + } 1.1353 + if (fcntl(osfd, F_GETFL, 0) == -1) { 1.1354 + /* Found a bad descriptor, remove it from the fd_sets. */ 1.1355 + PR_LOG(_pr_io_lm, PR_LOG_MAX, 1.1356 + ("file descriptor %d is bad", osfd)); 1.1357 + pds->out_flags = _PR_UNIX_POLL_NVAL; 1.1358 + notify = PR_TRUE; 1.1359 + } 1.1360 + if (osfd > pq_max_osfd) { 1.1361 + pq_max_osfd = osfd; 1.1362 + } 1.1363 + } 1.1364 + 1.1365 + if (notify) { 1.1366 + PRIntn pri; 1.1367 + PR_REMOVE_LINK(&pq->links); 1.1368 + pq->on_ioq = PR_FALSE; 1.1369 + 1.1370 + /* 1.1371 + * Decrement the count of descriptors for each desciptor/event 1.1372 + * because this I/O request is being removed from the 1.1373 + * ioq 1.1374 + */ 1.1375 + pds = pq->pds; 1.1376 + for (; pds < epds; pds++) { 1.1377 + PRInt32 osfd = pds->osfd; 1.1378 + PRInt16 in_flags = pds->in_flags; 1.1379 + PR_ASSERT(osfd >= 0 || in_flags == 0); 1.1380 + if (in_flags & _PR_UNIX_POLL_READ) { 1.1381 + if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) 1.1382 + FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu)); 1.1383 + } 1.1384 + if (in_flags & _PR_UNIX_POLL_WRITE) { 1.1385 + if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) 1.1386 + FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu)); 1.1387 + } 1.1388 + if (in_flags & _PR_UNIX_POLL_EXCEPT) { 1.1389 + if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) 1.1390 + FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); 1.1391 + } 1.1392 + } 1.1393 + 1.1394 + _PR_THREAD_LOCK(pq->thr); 1.1395 + if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { 1.1396 + _PRCPU *cpu = pq->thr->cpu; 1.1397 + _PR_SLEEPQ_LOCK(pq->thr->cpu); 1.1398 + _PR_DEL_SLEEPQ(pq->thr, PR_TRUE); 1.1399 + _PR_SLEEPQ_UNLOCK(pq->thr->cpu); 1.1400 + 1.1401 + if (pq->thr->flags & _PR_SUSPENDING) { 1.1402 + /* 1.1403 + * set thread state to SUSPENDED; 1.1404 + * a Resume operation on the thread 1.1405 + * will move it to the runQ 1.1406 + */ 1.1407 + pq->thr->state = _PR_SUSPENDED; 1.1408 + _PR_MISCQ_LOCK(pq->thr->cpu); 1.1409 + _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu); 1.1410 + _PR_MISCQ_UNLOCK(pq->thr->cpu); 1.1411 + } else { 1.1412 + pri = pq->thr->priority; 1.1413 + pq->thr->state = _PR_RUNNABLE; 1.1414 + 1.1415 + _PR_RUNQ_LOCK(cpu); 1.1416 + _PR_ADD_RUNQ(pq->thr, cpu, pri); 1.1417 + _PR_RUNQ_UNLOCK(cpu); 1.1418 + } 1.1419 + } 1.1420 + _PR_THREAD_UNLOCK(pq->thr); 1.1421 + } else { 1.1422 + if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) 1.1423 + _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout; 1.1424 + if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) 1.1425 + _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd; 1.1426 + } 1.1427 + } 1.1428 + if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { 1.1429 + if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) 1.1430 + _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; 1.1431 + } 1.1432 +} 1.1433 +#endif /* !defined(_PR_USE_POLL) */ 1.1434 + 1.1435 +/************************************************************************/ 1.1436 + 1.1437 +/* 1.1438 +** Called by the scheduler when there is nothing to do. This means that 1.1439 +** all threads are blocked on some monitor somewhere. 1.1440 +** 1.1441 +** Note: this code doesn't release the scheduler lock. 1.1442 +*/ 1.1443 +/* 1.1444 +** Pause the current CPU. longjmp to the cpu's pause stack 1.1445 +** 1.1446 +** This must be called with the scheduler locked 1.1447 +*/ 1.1448 +void _MD_PauseCPU(PRIntervalTime ticks) 1.1449 +{ 1.1450 + PRThread *me = _MD_CURRENT_THREAD(); 1.1451 +#ifdef _PR_USE_POLL 1.1452 + int timeout; 1.1453 + struct pollfd *pollfds; /* an array of pollfd structures */ 1.1454 + struct pollfd *pollfdPtr; /* a pointer that steps through the array */ 1.1455 + unsigned long npollfds; /* number of pollfd structures in array */ 1.1456 + unsigned long pollfds_size; 1.1457 + int nfd; /* to hold the return value of poll() */ 1.1458 +#else 1.1459 + struct timeval timeout, *tvp; 1.1460 + fd_set r, w, e; 1.1461 + fd_set *rp, *wp, *ep; 1.1462 + PRInt32 max_osfd, nfd; 1.1463 +#endif /* _PR_USE_POLL */ 1.1464 + PRInt32 rv; 1.1465 + PRCList *q; 1.1466 + PRUint32 min_timeout; 1.1467 + sigset_t oldset; 1.1468 +#ifdef IRIX 1.1469 +extern sigset_t ints_off; 1.1470 +#endif 1.1471 + 1.1472 + PR_ASSERT(_PR_MD_GET_INTSOFF() != 0); 1.1473 + 1.1474 + _PR_MD_IOQ_LOCK(); 1.1475 + 1.1476 +#ifdef _PR_USE_POLL 1.1477 + /* Build up the pollfd structure array to wait on */ 1.1478 + 1.1479 + /* Find out how many pollfd structures are needed */ 1.1480 + npollfds = _PR_IOQ_OSFD_CNT(me->cpu); 1.1481 + PR_ASSERT(npollfds >= 0); 1.1482 + 1.1483 + /* 1.1484 + * We use a pipe to wake up a native thread. An fd is needed 1.1485 + * for the pipe and we poll it for reading. 1.1486 + */ 1.1487 + if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { 1.1488 + npollfds++; 1.1489 +#ifdef IRIX 1.1490 + /* 1.1491 + * On Irix, a second pipe is used to cause the primordial cpu to 1.1492 + * wakeup and exit, when the process is exiting because of a call 1.1493 + * to exit/PR_ProcessExit. 1.1494 + */ 1.1495 + if (me->cpu->id == 0) { 1.1496 + npollfds++; 1.1497 + } 1.1498 +#endif 1.1499 + } 1.1500 + 1.1501 + /* 1.1502 + * if the cpu's pollfd array is not big enough, release it and allocate a new one 1.1503 + */ 1.1504 + if (npollfds > _PR_IOQ_POLLFDS_SIZE(me->cpu)) { 1.1505 + if (_PR_IOQ_POLLFDS(me->cpu) != NULL) 1.1506 + PR_DELETE(_PR_IOQ_POLLFDS(me->cpu)); 1.1507 + pollfds_size = PR_MAX(_PR_IOQ_MIN_POLLFDS_SIZE(me->cpu), npollfds); 1.1508 + pollfds = (struct pollfd *) PR_MALLOC(pollfds_size * sizeof(struct pollfd)); 1.1509 + _PR_IOQ_POLLFDS(me->cpu) = pollfds; 1.1510 + _PR_IOQ_POLLFDS_SIZE(me->cpu) = pollfds_size; 1.1511 + } else { 1.1512 + pollfds = _PR_IOQ_POLLFDS(me->cpu); 1.1513 + } 1.1514 + pollfdPtr = pollfds; 1.1515 + 1.1516 + /* 1.1517 + * If we need to poll the pipe for waking up a native thread, 1.1518 + * the pipe's fd is the first element in the pollfds array. 1.1519 + */ 1.1520 + if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { 1.1521 + pollfdPtr->fd = _pr_md_pipefd[0]; 1.1522 + pollfdPtr->events = POLLIN; 1.1523 + pollfdPtr++; 1.1524 +#ifdef IRIX 1.1525 + /* 1.1526 + * On Irix, the second element is the exit pipe 1.1527 + */ 1.1528 + if (me->cpu->id == 0) { 1.1529 + pollfdPtr->fd = _pr_irix_primoridal_cpu_fd[0]; 1.1530 + pollfdPtr->events = POLLIN; 1.1531 + pollfdPtr++; 1.1532 + } 1.1533 +#endif 1.1534 + } 1.1535 + 1.1536 + min_timeout = PR_INTERVAL_NO_TIMEOUT; 1.1537 + for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) { 1.1538 + PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); 1.1539 + _PRUnixPollDesc *pds = pq->pds; 1.1540 + _PRUnixPollDesc *epds = pds + pq->npds; 1.1541 + 1.1542 + if (pq->timeout < min_timeout) { 1.1543 + min_timeout = pq->timeout; 1.1544 + } 1.1545 + for (; pds < epds; pds++, pollfdPtr++) { 1.1546 + /* 1.1547 + * Assert that the pollfdPtr pointer does not go 1.1548 + * beyond the end of the pollfds array 1.1549 + */ 1.1550 + PR_ASSERT(pollfdPtr < pollfds + npollfds); 1.1551 + pollfdPtr->fd = pds->osfd; 1.1552 + /* direct copy of poll flags */ 1.1553 + pollfdPtr->events = pds->in_flags; 1.1554 + } 1.1555 + } 1.1556 + _PR_IOQ_TIMEOUT(me->cpu) = min_timeout; 1.1557 +#else 1.1558 + /* 1.1559 + * assigment of fd_sets 1.1560 + */ 1.1561 + r = _PR_FD_READ_SET(me->cpu); 1.1562 + w = _PR_FD_WRITE_SET(me->cpu); 1.1563 + e = _PR_FD_EXCEPTION_SET(me->cpu); 1.1564 + 1.1565 + rp = &r; 1.1566 + wp = &w; 1.1567 + ep = &e; 1.1568 + 1.1569 + max_osfd = _PR_IOQ_MAX_OSFD(me->cpu) + 1; 1.1570 + min_timeout = _PR_IOQ_TIMEOUT(me->cpu); 1.1571 +#endif /* _PR_USE_POLL */ 1.1572 + /* 1.1573 + ** Compute the minimum timeout value: make it the smaller of the 1.1574 + ** timeouts specified by the i/o pollers or the timeout of the first 1.1575 + ** sleeping thread. 1.1576 + */ 1.1577 + q = _PR_SLEEPQ(me->cpu).next; 1.1578 + 1.1579 + if (q != &_PR_SLEEPQ(me->cpu)) { 1.1580 + PRThread *t = _PR_THREAD_PTR(q); 1.1581 + 1.1582 + if (t->sleep < min_timeout) { 1.1583 + min_timeout = t->sleep; 1.1584 + } 1.1585 + } 1.1586 + if (min_timeout > ticks) { 1.1587 + min_timeout = ticks; 1.1588 + } 1.1589 + 1.1590 +#ifdef _PR_USE_POLL 1.1591 + if (min_timeout == PR_INTERVAL_NO_TIMEOUT) 1.1592 + timeout = -1; 1.1593 + else 1.1594 + timeout = PR_IntervalToMilliseconds(min_timeout); 1.1595 +#else 1.1596 + if (min_timeout == PR_INTERVAL_NO_TIMEOUT) { 1.1597 + tvp = NULL; 1.1598 + } else { 1.1599 + timeout.tv_sec = PR_IntervalToSeconds(min_timeout); 1.1600 + timeout.tv_usec = PR_IntervalToMicroseconds(min_timeout) 1.1601 + % PR_USEC_PER_SEC; 1.1602 + tvp = &timeout; 1.1603 + } 1.1604 +#endif /* _PR_USE_POLL */ 1.1605 + 1.1606 + _PR_MD_IOQ_UNLOCK(); 1.1607 + _MD_CHECK_FOR_EXIT(); 1.1608 + /* 1.1609 + * check for i/o operations 1.1610 + */ 1.1611 +#ifndef _PR_NO_CLOCK_TIMER 1.1612 + /* 1.1613 + * Disable the clock interrupts while we are in select, if clock interrupts 1.1614 + * are enabled. Otherwise, when the select/poll calls are interrupted, the 1.1615 + * timer value starts ticking from zero again when the system call is restarted. 1.1616 + */ 1.1617 +#ifdef IRIX 1.1618 + /* 1.1619 + * SIGCHLD signal is used on Irix to detect he termination of an 1.1620 + * sproc by SIGSEGV, SIGBUS or SIGABRT signals when 1.1621 + * _nspr_terminate_on_error is set. 1.1622 + */ 1.1623 + if ((!_nspr_noclock) || (_nspr_terminate_on_error)) 1.1624 +#else 1.1625 + if (!_nspr_noclock) 1.1626 +#endif /* IRIX */ 1.1627 +#ifdef IRIX 1.1628 + sigprocmask(SIG_BLOCK, &ints_off, &oldset); 1.1629 +#else 1.1630 + PR_ASSERT(sigismember(&timer_set, SIGALRM)); 1.1631 + sigprocmask(SIG_BLOCK, &timer_set, &oldset); 1.1632 +#endif /* IRIX */ 1.1633 +#endif /* !_PR_NO_CLOCK_TIMER */ 1.1634 + 1.1635 +#ifndef _PR_USE_POLL 1.1636 + PR_ASSERT(FD_ISSET(_pr_md_pipefd[0],rp)); 1.1637 + nfd = _MD_SELECT(max_osfd, rp, wp, ep, tvp); 1.1638 +#else 1.1639 + nfd = _MD_POLL(pollfds, npollfds, timeout); 1.1640 +#endif /* !_PR_USE_POLL */ 1.1641 + 1.1642 +#ifndef _PR_NO_CLOCK_TIMER 1.1643 +#ifdef IRIX 1.1644 + if ((!_nspr_noclock) || (_nspr_terminate_on_error)) 1.1645 +#else 1.1646 + if (!_nspr_noclock) 1.1647 +#endif /* IRIX */ 1.1648 + sigprocmask(SIG_SETMASK, &oldset, 0); 1.1649 +#endif /* !_PR_NO_CLOCK_TIMER */ 1.1650 + 1.1651 + _MD_CHECK_FOR_EXIT(); 1.1652 + 1.1653 +#ifdef IRIX 1.1654 + _PR_MD_primordial_cpu(); 1.1655 +#endif 1.1656 + 1.1657 + _PR_MD_IOQ_LOCK(); 1.1658 + /* 1.1659 + ** Notify monitors that are associated with the selected descriptors. 1.1660 + */ 1.1661 +#ifdef _PR_USE_POLL 1.1662 + if (nfd > 0) { 1.1663 + pollfdPtr = pollfds; 1.1664 + if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { 1.1665 + /* 1.1666 + * Assert that the pipe is the first element in the 1.1667 + * pollfds array. 1.1668 + */ 1.1669 + PR_ASSERT(pollfds[0].fd == _pr_md_pipefd[0]); 1.1670 + if ((pollfds[0].revents & POLLIN) && (nfd == 1)) { 1.1671 + /* 1.1672 + * woken up by another thread; read all the data 1.1673 + * in the pipe to empty the pipe 1.1674 + */ 1.1675 + while ((rv = read(_pr_md_pipefd[0], _pr_md_pipebuf, 1.1676 + PIPE_BUF)) == PIPE_BUF){ 1.1677 + } 1.1678 + PR_ASSERT((rv > 0) || ((rv == -1) && (errno == EAGAIN))); 1.1679 + } 1.1680 + pollfdPtr++; 1.1681 +#ifdef IRIX 1.1682 + /* 1.1683 + * On Irix, check to see if the primordial cpu needs to exit 1.1684 + * to cause the process to terminate 1.1685 + */ 1.1686 + if (me->cpu->id == 0) { 1.1687 + PR_ASSERT(pollfds[1].fd == _pr_irix_primoridal_cpu_fd[0]); 1.1688 + if (pollfdPtr->revents & POLLIN) { 1.1689 + if (_pr_irix_process_exit) { 1.1690 + /* 1.1691 + * process exit due to a call to PR_ProcessExit 1.1692 + */ 1.1693 + prctl(PR_SETEXITSIG, SIGKILL); 1.1694 + _exit(_pr_irix_process_exit_code); 1.1695 + } else { 1.1696 + while ((rv = read(_pr_irix_primoridal_cpu_fd[0], 1.1697 + _pr_md_pipebuf, PIPE_BUF)) == PIPE_BUF) { 1.1698 + } 1.1699 + PR_ASSERT(rv > 0); 1.1700 + } 1.1701 + } 1.1702 + pollfdPtr++; 1.1703 + } 1.1704 +#endif 1.1705 + } 1.1706 + for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) { 1.1707 + PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); 1.1708 + PRBool notify = PR_FALSE; 1.1709 + _PRUnixPollDesc *pds = pq->pds; 1.1710 + _PRUnixPollDesc *epds = pds + pq->npds; 1.1711 + 1.1712 + for (; pds < epds; pds++, pollfdPtr++) { 1.1713 + /* 1.1714 + * Assert that the pollfdPtr pointer does not go beyond 1.1715 + * the end of the pollfds array. 1.1716 + */ 1.1717 + PR_ASSERT(pollfdPtr < pollfds + npollfds); 1.1718 + /* 1.1719 + * Assert that the fd's in the pollfds array (stepped 1.1720 + * through by pollfdPtr) are in the same order as 1.1721 + * the fd's in _PR_IOQ() (stepped through by q and pds). 1.1722 + * This is how the pollfds array was created earlier. 1.1723 + */ 1.1724 + PR_ASSERT(pollfdPtr->fd == pds->osfd); 1.1725 + pds->out_flags = pollfdPtr->revents; 1.1726 + /* Negative fd's are ignored by poll() */ 1.1727 + if (pds->osfd >= 0 && pds->out_flags) { 1.1728 + notify = PR_TRUE; 1.1729 + } 1.1730 + } 1.1731 + if (notify) { 1.1732 + PRIntn pri; 1.1733 + PRThread *thred; 1.1734 + 1.1735 + PR_REMOVE_LINK(&pq->links); 1.1736 + pq->on_ioq = PR_FALSE; 1.1737 + 1.1738 + thred = pq->thr; 1.1739 + _PR_THREAD_LOCK(thred); 1.1740 + if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { 1.1741 + _PRCPU *cpu = pq->thr->cpu; 1.1742 + _PR_SLEEPQ_LOCK(pq->thr->cpu); 1.1743 + _PR_DEL_SLEEPQ(pq->thr, PR_TRUE); 1.1744 + _PR_SLEEPQ_UNLOCK(pq->thr->cpu); 1.1745 + 1.1746 + if (pq->thr->flags & _PR_SUSPENDING) { 1.1747 + /* 1.1748 + * set thread state to SUSPENDED; 1.1749 + * a Resume operation on the thread 1.1750 + * will move it to the runQ 1.1751 + */ 1.1752 + pq->thr->state = _PR_SUSPENDED; 1.1753 + _PR_MISCQ_LOCK(pq->thr->cpu); 1.1754 + _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu); 1.1755 + _PR_MISCQ_UNLOCK(pq->thr->cpu); 1.1756 + } else { 1.1757 + pri = pq->thr->priority; 1.1758 + pq->thr->state = _PR_RUNNABLE; 1.1759 + 1.1760 + _PR_RUNQ_LOCK(cpu); 1.1761 + _PR_ADD_RUNQ(pq->thr, cpu, pri); 1.1762 + _PR_RUNQ_UNLOCK(cpu); 1.1763 + if (_pr_md_idle_cpus > 1) 1.1764 + _PR_MD_WAKEUP_WAITER(thred); 1.1765 + } 1.1766 + } 1.1767 + _PR_THREAD_UNLOCK(thred); 1.1768 + _PR_IOQ_OSFD_CNT(me->cpu) -= pq->npds; 1.1769 + PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0); 1.1770 + } 1.1771 + } 1.1772 + } else if (nfd == -1) { 1.1773 + PR_LOG(_pr_io_lm, PR_LOG_MAX, ("poll() failed with errno %d", errno)); 1.1774 + } 1.1775 + 1.1776 +#else 1.1777 + if (nfd > 0) { 1.1778 + q = _PR_IOQ(me->cpu).next; 1.1779 + _PR_IOQ_MAX_OSFD(me->cpu) = -1; 1.1780 + _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT; 1.1781 + while (q != &_PR_IOQ(me->cpu)) { 1.1782 + PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); 1.1783 + PRBool notify = PR_FALSE; 1.1784 + _PRUnixPollDesc *pds = pq->pds; 1.1785 + _PRUnixPollDesc *epds = pds + pq->npds; 1.1786 + PRInt32 pq_max_osfd = -1; 1.1787 + 1.1788 + q = q->next; 1.1789 + for (; pds < epds; pds++) { 1.1790 + PRInt32 osfd = pds->osfd; 1.1791 + PRInt16 in_flags = pds->in_flags; 1.1792 + PRInt16 out_flags = 0; 1.1793 + PR_ASSERT(osfd >= 0 || in_flags == 0); 1.1794 + if ((in_flags & _PR_UNIX_POLL_READ) && FD_ISSET(osfd, rp)) { 1.1795 + out_flags |= _PR_UNIX_POLL_READ; 1.1796 + } 1.1797 + if ((in_flags & _PR_UNIX_POLL_WRITE) && FD_ISSET(osfd, wp)) { 1.1798 + out_flags |= _PR_UNIX_POLL_WRITE; 1.1799 + } 1.1800 + if ((in_flags & _PR_UNIX_POLL_EXCEPT) && FD_ISSET(osfd, ep)) { 1.1801 + out_flags |= _PR_UNIX_POLL_EXCEPT; 1.1802 + } 1.1803 + pds->out_flags = out_flags; 1.1804 + if (out_flags) { 1.1805 + notify = PR_TRUE; 1.1806 + } 1.1807 + if (osfd > pq_max_osfd) { 1.1808 + pq_max_osfd = osfd; 1.1809 + } 1.1810 + } 1.1811 + if (notify == PR_TRUE) { 1.1812 + PRIntn pri; 1.1813 + PRThread *thred; 1.1814 + 1.1815 + PR_REMOVE_LINK(&pq->links); 1.1816 + pq->on_ioq = PR_FALSE; 1.1817 + 1.1818 + /* 1.1819 + * Decrement the count of descriptors for each desciptor/event 1.1820 + * because this I/O request is being removed from the 1.1821 + * ioq 1.1822 + */ 1.1823 + pds = pq->pds; 1.1824 + for (; pds < epds; pds++) { 1.1825 + PRInt32 osfd = pds->osfd; 1.1826 + PRInt16 in_flags = pds->in_flags; 1.1827 + PR_ASSERT(osfd >= 0 || in_flags == 0); 1.1828 + if (in_flags & _PR_UNIX_POLL_READ) { 1.1829 + if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) 1.1830 + FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu)); 1.1831 + } 1.1832 + if (in_flags & _PR_UNIX_POLL_WRITE) { 1.1833 + if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) 1.1834 + FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu)); 1.1835 + } 1.1836 + if (in_flags & _PR_UNIX_POLL_EXCEPT) { 1.1837 + if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) 1.1838 + FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); 1.1839 + } 1.1840 + } 1.1841 + 1.1842 + /* 1.1843 + * Because this thread can run on a different cpu right 1.1844 + * after being added to the run queue, do not dereference 1.1845 + * pq 1.1846 + */ 1.1847 + thred = pq->thr; 1.1848 + _PR_THREAD_LOCK(thred); 1.1849 + if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { 1.1850 + _PRCPU *cpu = thred->cpu; 1.1851 + _PR_SLEEPQ_LOCK(pq->thr->cpu); 1.1852 + _PR_DEL_SLEEPQ(pq->thr, PR_TRUE); 1.1853 + _PR_SLEEPQ_UNLOCK(pq->thr->cpu); 1.1854 + 1.1855 + if (pq->thr->flags & _PR_SUSPENDING) { 1.1856 + /* 1.1857 + * set thread state to SUSPENDED; 1.1858 + * a Resume operation on the thread 1.1859 + * will move it to the runQ 1.1860 + */ 1.1861 + pq->thr->state = _PR_SUSPENDED; 1.1862 + _PR_MISCQ_LOCK(pq->thr->cpu); 1.1863 + _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu); 1.1864 + _PR_MISCQ_UNLOCK(pq->thr->cpu); 1.1865 + } else { 1.1866 + pri = pq->thr->priority; 1.1867 + pq->thr->state = _PR_RUNNABLE; 1.1868 + 1.1869 + pq->thr->cpu = cpu; 1.1870 + _PR_RUNQ_LOCK(cpu); 1.1871 + _PR_ADD_RUNQ(pq->thr, cpu, pri); 1.1872 + _PR_RUNQ_UNLOCK(cpu); 1.1873 + if (_pr_md_idle_cpus > 1) 1.1874 + _PR_MD_WAKEUP_WAITER(thred); 1.1875 + } 1.1876 + } 1.1877 + _PR_THREAD_UNLOCK(thred); 1.1878 + } else { 1.1879 + if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) 1.1880 + _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout; 1.1881 + if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) 1.1882 + _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd; 1.1883 + } 1.1884 + } 1.1885 + if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { 1.1886 + if ((FD_ISSET(_pr_md_pipefd[0], rp)) && (nfd == 1)) { 1.1887 + /* 1.1888 + * woken up by another thread; read all the data 1.1889 + * in the pipe to empty the pipe 1.1890 + */ 1.1891 + while ((rv = 1.1892 + read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF)) 1.1893 + == PIPE_BUF){ 1.1894 + } 1.1895 + PR_ASSERT((rv > 0) || 1.1896 + ((rv == -1) && (errno == EAGAIN))); 1.1897 + } 1.1898 + if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) 1.1899 + _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; 1.1900 +#ifdef IRIX 1.1901 + if ((me->cpu->id == 0) && 1.1902 + (FD_ISSET(_pr_irix_primoridal_cpu_fd[0], rp))) { 1.1903 + if (_pr_irix_process_exit) { 1.1904 + /* 1.1905 + * process exit due to a call to PR_ProcessExit 1.1906 + */ 1.1907 + prctl(PR_SETEXITSIG, SIGKILL); 1.1908 + _exit(_pr_irix_process_exit_code); 1.1909 + } else { 1.1910 + while ((rv = read(_pr_irix_primoridal_cpu_fd[0], 1.1911 + _pr_md_pipebuf, PIPE_BUF)) == PIPE_BUF) { 1.1912 + } 1.1913 + PR_ASSERT(rv > 0); 1.1914 + } 1.1915 + } 1.1916 + if (me->cpu->id == 0) { 1.1917 + if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_irix_primoridal_cpu_fd[0]) 1.1918 + _PR_IOQ_MAX_OSFD(me->cpu) = _pr_irix_primoridal_cpu_fd[0]; 1.1919 + } 1.1920 +#endif 1.1921 + } 1.1922 + } else if (nfd < 0) { 1.1923 + if (errno == EBADF) { 1.1924 + FindBadFDs(); 1.1925 + } else { 1.1926 + PR_LOG(_pr_io_lm, PR_LOG_MAX, ("select() failed with errno %d", 1.1927 + errno)); 1.1928 + } 1.1929 + } else { 1.1930 + PR_ASSERT(nfd == 0); 1.1931 + /* 1.1932 + * compute the new value of _PR_IOQ_TIMEOUT 1.1933 + */ 1.1934 + q = _PR_IOQ(me->cpu).next; 1.1935 + _PR_IOQ_MAX_OSFD(me->cpu) = -1; 1.1936 + _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT; 1.1937 + while (q != &_PR_IOQ(me->cpu)) { 1.1938 + PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); 1.1939 + _PRUnixPollDesc *pds = pq->pds; 1.1940 + _PRUnixPollDesc *epds = pds + pq->npds; 1.1941 + PRInt32 pq_max_osfd = -1; 1.1942 + 1.1943 + q = q->next; 1.1944 + for (; pds < epds; pds++) { 1.1945 + if (pds->osfd > pq_max_osfd) { 1.1946 + pq_max_osfd = pds->osfd; 1.1947 + } 1.1948 + } 1.1949 + if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) 1.1950 + _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout; 1.1951 + if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) 1.1952 + _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd; 1.1953 + } 1.1954 + if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { 1.1955 + if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) 1.1956 + _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; 1.1957 + } 1.1958 + } 1.1959 +#endif /* _PR_USE_POLL */ 1.1960 + _PR_MD_IOQ_UNLOCK(); 1.1961 +} 1.1962 + 1.1963 +void _MD_Wakeup_CPUs() 1.1964 +{ 1.1965 + PRInt32 rv, data; 1.1966 + 1.1967 + data = 0; 1.1968 + rv = write(_pr_md_pipefd[1], &data, 1); 1.1969 + 1.1970 + while ((rv < 0) && (errno == EAGAIN)) { 1.1971 + /* 1.1972 + * pipe full, read all data in pipe to empty it 1.1973 + */ 1.1974 + while ((rv = 1.1975 + read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF)) 1.1976 + == PIPE_BUF) { 1.1977 + } 1.1978 + PR_ASSERT((rv > 0) || 1.1979 + ((rv == -1) && (errno == EAGAIN))); 1.1980 + rv = write(_pr_md_pipefd[1], &data, 1); 1.1981 + } 1.1982 +} 1.1983 + 1.1984 + 1.1985 +void _MD_InitCPUS() 1.1986 +{ 1.1987 + PRInt32 rv, flags; 1.1988 + PRThread *me = _MD_CURRENT_THREAD(); 1.1989 + 1.1990 + rv = pipe(_pr_md_pipefd); 1.1991 + PR_ASSERT(rv == 0); 1.1992 + _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; 1.1993 +#ifndef _PR_USE_POLL 1.1994 + FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(me->cpu)); 1.1995 +#endif 1.1996 + 1.1997 + flags = fcntl(_pr_md_pipefd[0], F_GETFL, 0); 1.1998 + fcntl(_pr_md_pipefd[0], F_SETFL, flags | O_NONBLOCK); 1.1999 + flags = fcntl(_pr_md_pipefd[1], F_GETFL, 0); 1.2000 + fcntl(_pr_md_pipefd[1], F_SETFL, flags | O_NONBLOCK); 1.2001 +} 1.2002 + 1.2003 +/* 1.2004 +** Unix SIGALRM (clock) signal handler 1.2005 +*/ 1.2006 +static void ClockInterruptHandler() 1.2007 +{ 1.2008 + int olderrno; 1.2009 + PRUintn pri; 1.2010 + _PRCPU *cpu = _PR_MD_CURRENT_CPU(); 1.2011 + PRThread *me = _MD_CURRENT_THREAD(); 1.2012 + 1.2013 +#ifdef SOLARIS 1.2014 + if (!me || _PR_IS_NATIVE_THREAD(me)) { 1.2015 + _pr_primordialCPU->u.missed[_pr_primordialCPU->where] |= _PR_MISSED_CLOCK; 1.2016 + return; 1.2017 + } 1.2018 +#endif 1.2019 + 1.2020 + if (_PR_MD_GET_INTSOFF() != 0) { 1.2021 + cpu->u.missed[cpu->where] |= _PR_MISSED_CLOCK; 1.2022 + return; 1.2023 + } 1.2024 + _PR_MD_SET_INTSOFF(1); 1.2025 + 1.2026 + olderrno = errno; 1.2027 + _PR_ClockInterrupt(); 1.2028 + errno = olderrno; 1.2029 + 1.2030 + /* 1.2031 + ** If the interrupt wants a resched or if some other thread at 1.2032 + ** the same priority needs the cpu, reschedule. 1.2033 + */ 1.2034 + pri = me->priority; 1.2035 + if ((cpu->u.missed[3] || (_PR_RUNQREADYMASK(me->cpu) >> pri))) { 1.2036 +#ifdef _PR_NO_PREEMPT 1.2037 + cpu->resched = PR_TRUE; 1.2038 + if (pr_interruptSwitchHook) { 1.2039 + (*pr_interruptSwitchHook)(pr_interruptSwitchHookArg); 1.2040 + } 1.2041 +#else /* _PR_NO_PREEMPT */ 1.2042 + /* 1.2043 + ** Re-enable unix interrupts (so that we can use 1.2044 + ** setjmp/longjmp for context switching without having to 1.2045 + ** worry about the signal state) 1.2046 + */ 1.2047 + sigprocmask(SIG_SETMASK, &empty_set, 0); 1.2048 + PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock caused context switch")); 1.2049 + 1.2050 + if(!(me->flags & _PR_IDLE_THREAD)) { 1.2051 + _PR_THREAD_LOCK(me); 1.2052 + me->state = _PR_RUNNABLE; 1.2053 + me->cpu = cpu; 1.2054 + _PR_RUNQ_LOCK(cpu); 1.2055 + _PR_ADD_RUNQ(me, cpu, pri); 1.2056 + _PR_RUNQ_UNLOCK(cpu); 1.2057 + _PR_THREAD_UNLOCK(me); 1.2058 + } else 1.2059 + me->state = _PR_RUNNABLE; 1.2060 + _MD_SWITCH_CONTEXT(me); 1.2061 + PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock back from context switch")); 1.2062 +#endif /* _PR_NO_PREEMPT */ 1.2063 + } 1.2064 + /* 1.2065 + * Because this thread could be running on a different cpu after 1.2066 + * a context switch the current cpu should be accessed and the 1.2067 + * value of the 'cpu' variable should not be used. 1.2068 + */ 1.2069 + _PR_MD_SET_INTSOFF(0); 1.2070 +} 1.2071 + 1.2072 +/* 1.2073 + * On HP-UX 9, we have to use the sigvector() interface to restart 1.2074 + * interrupted system calls, because sigaction() does not have the 1.2075 + * SA_RESTART flag. 1.2076 + */ 1.2077 + 1.2078 +#ifdef HPUX9 1.2079 +static void HPUX9_ClockInterruptHandler( 1.2080 + int sig, 1.2081 + int code, 1.2082 + struct sigcontext *scp) 1.2083 +{ 1.2084 + ClockInterruptHandler(); 1.2085 + scp->sc_syscall_action = SIG_RESTART; 1.2086 +} 1.2087 +#endif /* HPUX9 */ 1.2088 + 1.2089 +/* # of milliseconds per clock tick that we will use */ 1.2090 +#define MSEC_PER_TICK 50 1.2091 + 1.2092 + 1.2093 +void _MD_StartInterrupts() 1.2094 +{ 1.2095 + char *eval; 1.2096 + 1.2097 + if ((eval = getenv("NSPR_NOCLOCK")) != NULL) { 1.2098 + if (atoi(eval) == 0) 1.2099 + _nspr_noclock = 0; 1.2100 + else 1.2101 + _nspr_noclock = 1; 1.2102 + } 1.2103 + 1.2104 +#ifndef _PR_NO_CLOCK_TIMER 1.2105 + if (!_nspr_noclock) { 1.2106 + _MD_EnableClockInterrupts(); 1.2107 + } 1.2108 +#endif 1.2109 +} 1.2110 + 1.2111 +void _MD_StopInterrupts() 1.2112 +{ 1.2113 + sigprocmask(SIG_BLOCK, &timer_set, 0); 1.2114 +} 1.2115 + 1.2116 +void _MD_EnableClockInterrupts() 1.2117 +{ 1.2118 + struct itimerval itval; 1.2119 + extern PRUintn _pr_numCPU; 1.2120 +#ifdef HPUX9 1.2121 + struct sigvec vec; 1.2122 + 1.2123 + vec.sv_handler = (void (*)()) HPUX9_ClockInterruptHandler; 1.2124 + vec.sv_mask = 0; 1.2125 + vec.sv_flags = 0; 1.2126 + sigvector(SIGALRM, &vec, 0); 1.2127 +#else 1.2128 + struct sigaction vtact; 1.2129 + 1.2130 + vtact.sa_handler = (void (*)()) ClockInterruptHandler; 1.2131 + sigemptyset(&vtact.sa_mask); 1.2132 + vtact.sa_flags = SA_RESTART; 1.2133 + sigaction(SIGALRM, &vtact, 0); 1.2134 +#endif /* HPUX9 */ 1.2135 + 1.2136 + PR_ASSERT(_pr_numCPU == 1); 1.2137 + itval.it_interval.tv_sec = 0; 1.2138 + itval.it_interval.tv_usec = MSEC_PER_TICK * PR_USEC_PER_MSEC; 1.2139 + itval.it_value = itval.it_interval; 1.2140 + setitimer(ITIMER_REAL, &itval, 0); 1.2141 +} 1.2142 + 1.2143 +void _MD_DisableClockInterrupts() 1.2144 +{ 1.2145 + struct itimerval itval; 1.2146 + extern PRUintn _pr_numCPU; 1.2147 + 1.2148 + PR_ASSERT(_pr_numCPU == 1); 1.2149 + itval.it_interval.tv_sec = 0; 1.2150 + itval.it_interval.tv_usec = 0; 1.2151 + itval.it_value = itval.it_interval; 1.2152 + setitimer(ITIMER_REAL, &itval, 0); 1.2153 +} 1.2154 + 1.2155 +void _MD_BlockClockInterrupts() 1.2156 +{ 1.2157 + sigprocmask(SIG_BLOCK, &timer_set, 0); 1.2158 +} 1.2159 + 1.2160 +void _MD_UnblockClockInterrupts() 1.2161 +{ 1.2162 + sigprocmask(SIG_UNBLOCK, &timer_set, 0); 1.2163 +} 1.2164 + 1.2165 +void _MD_MakeNonblock(PRFileDesc *fd) 1.2166 +{ 1.2167 + PRInt32 osfd = fd->secret->md.osfd; 1.2168 + int flags; 1.2169 + 1.2170 + if (osfd <= 2) { 1.2171 + /* Don't mess around with stdin, stdout or stderr */ 1.2172 + return; 1.2173 + } 1.2174 + flags = fcntl(osfd, F_GETFL, 0); 1.2175 + 1.2176 + /* 1.2177 + * Use O_NONBLOCK (POSIX-style non-blocking I/O) whenever possible. 1.2178 + * On SunOS 4, we must use FNDELAY (BSD-style non-blocking I/O), 1.2179 + * otherwise connect() still blocks and can be interrupted by SIGALRM. 1.2180 + */ 1.2181 + 1.2182 + fcntl(osfd, F_SETFL, flags | O_NONBLOCK); 1.2183 + } 1.2184 + 1.2185 +PRInt32 _MD_open(const char *name, PRIntn flags, PRIntn mode) 1.2186 +{ 1.2187 + PRInt32 osflags; 1.2188 + PRInt32 rv, err; 1.2189 + 1.2190 + if (flags & PR_RDWR) { 1.2191 + osflags = O_RDWR; 1.2192 + } else if (flags & PR_WRONLY) { 1.2193 + osflags = O_WRONLY; 1.2194 + } else { 1.2195 + osflags = O_RDONLY; 1.2196 + } 1.2197 + 1.2198 + if (flags & PR_EXCL) 1.2199 + osflags |= O_EXCL; 1.2200 + if (flags & PR_APPEND) 1.2201 + osflags |= O_APPEND; 1.2202 + if (flags & PR_TRUNCATE) 1.2203 + osflags |= O_TRUNC; 1.2204 + if (flags & PR_SYNC) { 1.2205 +#if defined(O_SYNC) 1.2206 + osflags |= O_SYNC; 1.2207 +#elif defined(O_FSYNC) 1.2208 + osflags |= O_FSYNC; 1.2209 +#else 1.2210 +#error "Neither O_SYNC nor O_FSYNC is defined on this platform" 1.2211 +#endif 1.2212 + } 1.2213 + 1.2214 + /* 1.2215 + ** On creations we hold the 'create' lock in order to enforce 1.2216 + ** the semantics of PR_Rename. (see the latter for more details) 1.2217 + */ 1.2218 + if (flags & PR_CREATE_FILE) 1.2219 + { 1.2220 + osflags |= O_CREAT; 1.2221 + if (NULL !=_pr_rename_lock) 1.2222 + PR_Lock(_pr_rename_lock); 1.2223 + } 1.2224 + 1.2225 +#if defined(ANDROID) 1.2226 + osflags |= O_LARGEFILE; 1.2227 +#endif 1.2228 + 1.2229 + rv = _md_iovector._open64(name, osflags, mode); 1.2230 + 1.2231 + if (rv < 0) { 1.2232 + err = _MD_ERRNO(); 1.2233 + _PR_MD_MAP_OPEN_ERROR(err); 1.2234 + } 1.2235 + 1.2236 + if ((flags & PR_CREATE_FILE) && (NULL !=_pr_rename_lock)) 1.2237 + PR_Unlock(_pr_rename_lock); 1.2238 + return rv; 1.2239 +} 1.2240 + 1.2241 +PRIntervalTime intr_timeout_ticks; 1.2242 + 1.2243 +#if defined(SOLARIS) || defined(IRIX) 1.2244 +static void sigsegvhandler() { 1.2245 + fprintf(stderr,"Received SIGSEGV\n"); 1.2246 + fflush(stderr); 1.2247 + pause(); 1.2248 +} 1.2249 + 1.2250 +static void sigaborthandler() { 1.2251 + fprintf(stderr,"Received SIGABRT\n"); 1.2252 + fflush(stderr); 1.2253 + pause(); 1.2254 +} 1.2255 + 1.2256 +static void sigbushandler() { 1.2257 + fprintf(stderr,"Received SIGBUS\n"); 1.2258 + fflush(stderr); 1.2259 + pause(); 1.2260 +} 1.2261 +#endif /* SOLARIS, IRIX */ 1.2262 + 1.2263 +#endif /* !defined(_PR_PTHREADS) */ 1.2264 + 1.2265 +void _MD_query_fd_inheritable(PRFileDesc *fd) 1.2266 +{ 1.2267 + int flags; 1.2268 + 1.2269 + PR_ASSERT(_PR_TRI_UNKNOWN == fd->secret->inheritable); 1.2270 + flags = fcntl(fd->secret->md.osfd, F_GETFD, 0); 1.2271 + PR_ASSERT(-1 != flags); 1.2272 + fd->secret->inheritable = (flags & FD_CLOEXEC) ? 1.2273 + _PR_TRI_FALSE : _PR_TRI_TRUE; 1.2274 +} 1.2275 + 1.2276 +PROffset32 _MD_lseek(PRFileDesc *fd, PROffset32 offset, PRSeekWhence whence) 1.2277 +{ 1.2278 + PROffset32 rv, where; 1.2279 + 1.2280 + switch (whence) { 1.2281 + case PR_SEEK_SET: 1.2282 + where = SEEK_SET; 1.2283 + break; 1.2284 + case PR_SEEK_CUR: 1.2285 + where = SEEK_CUR; 1.2286 + break; 1.2287 + case PR_SEEK_END: 1.2288 + where = SEEK_END; 1.2289 + break; 1.2290 + default: 1.2291 + PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); 1.2292 + rv = -1; 1.2293 + goto done; 1.2294 + } 1.2295 + rv = lseek(fd->secret->md.osfd,offset,where); 1.2296 + if (rv == -1) 1.2297 + { 1.2298 + PRInt32 syserr = _MD_ERRNO(); 1.2299 + _PR_MD_MAP_LSEEK_ERROR(syserr); 1.2300 + } 1.2301 +done: 1.2302 + return(rv); 1.2303 +} 1.2304 + 1.2305 +PROffset64 _MD_lseek64(PRFileDesc *fd, PROffset64 offset, PRSeekWhence whence) 1.2306 +{ 1.2307 + PRInt32 where; 1.2308 + PROffset64 rv; 1.2309 + 1.2310 + switch (whence) 1.2311 + { 1.2312 + case PR_SEEK_SET: 1.2313 + where = SEEK_SET; 1.2314 + break; 1.2315 + case PR_SEEK_CUR: 1.2316 + where = SEEK_CUR; 1.2317 + break; 1.2318 + case PR_SEEK_END: 1.2319 + where = SEEK_END; 1.2320 + break; 1.2321 + default: 1.2322 + PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); 1.2323 + rv = minus_one; 1.2324 + goto done; 1.2325 + } 1.2326 + rv = _md_iovector._lseek64(fd->secret->md.osfd, offset, where); 1.2327 + if (LL_EQ(rv, minus_one)) 1.2328 + { 1.2329 + PRInt32 syserr = _MD_ERRNO(); 1.2330 + _PR_MD_MAP_LSEEK_ERROR(syserr); 1.2331 + } 1.2332 +done: 1.2333 + return rv; 1.2334 +} /* _MD_lseek64 */ 1.2335 + 1.2336 +/* 1.2337 +** _MD_set_fileinfo_times -- 1.2338 +** Set the modifyTime and creationTime of the PRFileInfo 1.2339 +** structure using the values in struct stat. 1.2340 +** 1.2341 +** _MD_set_fileinfo64_times -- 1.2342 +** Set the modifyTime and creationTime of the PRFileInfo64 1.2343 +** structure using the values in _MDStat64. 1.2344 +*/ 1.2345 + 1.2346 +#if defined(_PR_STAT_HAS_ST_ATIM) 1.2347 +/* 1.2348 +** struct stat has st_atim, st_mtim, and st_ctim fields of 1.2349 +** type timestruc_t. 1.2350 +*/ 1.2351 +static void _MD_set_fileinfo_times( 1.2352 + const struct stat *sb, 1.2353 + PRFileInfo *info) 1.2354 +{ 1.2355 + PRInt64 us, s2us; 1.2356 + 1.2357 + LL_I2L(s2us, PR_USEC_PER_SEC); 1.2358 + LL_I2L(info->modifyTime, sb->st_mtim.tv_sec); 1.2359 + LL_MUL(info->modifyTime, info->modifyTime, s2us); 1.2360 + LL_I2L(us, sb->st_mtim.tv_nsec / 1000); 1.2361 + LL_ADD(info->modifyTime, info->modifyTime, us); 1.2362 + LL_I2L(info->creationTime, sb->st_ctim.tv_sec); 1.2363 + LL_MUL(info->creationTime, info->creationTime, s2us); 1.2364 + LL_I2L(us, sb->st_ctim.tv_nsec / 1000); 1.2365 + LL_ADD(info->creationTime, info->creationTime, us); 1.2366 +} 1.2367 + 1.2368 +static void _MD_set_fileinfo64_times( 1.2369 + const _MDStat64 *sb, 1.2370 + PRFileInfo64 *info) 1.2371 +{ 1.2372 + PRInt64 us, s2us; 1.2373 + 1.2374 + LL_I2L(s2us, PR_USEC_PER_SEC); 1.2375 + LL_I2L(info->modifyTime, sb->st_mtim.tv_sec); 1.2376 + LL_MUL(info->modifyTime, info->modifyTime, s2us); 1.2377 + LL_I2L(us, sb->st_mtim.tv_nsec / 1000); 1.2378 + LL_ADD(info->modifyTime, info->modifyTime, us); 1.2379 + LL_I2L(info->creationTime, sb->st_ctim.tv_sec); 1.2380 + LL_MUL(info->creationTime, info->creationTime, s2us); 1.2381 + LL_I2L(us, sb->st_ctim.tv_nsec / 1000); 1.2382 + LL_ADD(info->creationTime, info->creationTime, us); 1.2383 +} 1.2384 +#elif defined(_PR_STAT_HAS_ST_ATIM_UNION) 1.2385 +/* 1.2386 +** The st_atim, st_mtim, and st_ctim fields in struct stat are 1.2387 +** unions with a st__tim union member of type timestruc_t. 1.2388 +*/ 1.2389 +static void _MD_set_fileinfo_times( 1.2390 + const struct stat *sb, 1.2391 + PRFileInfo *info) 1.2392 +{ 1.2393 + PRInt64 us, s2us; 1.2394 + 1.2395 + LL_I2L(s2us, PR_USEC_PER_SEC); 1.2396 + LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec); 1.2397 + LL_MUL(info->modifyTime, info->modifyTime, s2us); 1.2398 + LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000); 1.2399 + LL_ADD(info->modifyTime, info->modifyTime, us); 1.2400 + LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec); 1.2401 + LL_MUL(info->creationTime, info->creationTime, s2us); 1.2402 + LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000); 1.2403 + LL_ADD(info->creationTime, info->creationTime, us); 1.2404 +} 1.2405 + 1.2406 +static void _MD_set_fileinfo64_times( 1.2407 + const _MDStat64 *sb, 1.2408 + PRFileInfo64 *info) 1.2409 +{ 1.2410 + PRInt64 us, s2us; 1.2411 + 1.2412 + LL_I2L(s2us, PR_USEC_PER_SEC); 1.2413 + LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec); 1.2414 + LL_MUL(info->modifyTime, info->modifyTime, s2us); 1.2415 + LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000); 1.2416 + LL_ADD(info->modifyTime, info->modifyTime, us); 1.2417 + LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec); 1.2418 + LL_MUL(info->creationTime, info->creationTime, s2us); 1.2419 + LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000); 1.2420 + LL_ADD(info->creationTime, info->creationTime, us); 1.2421 +} 1.2422 +#elif defined(_PR_STAT_HAS_ST_ATIMESPEC) 1.2423 +/* 1.2424 +** struct stat has st_atimespec, st_mtimespec, and st_ctimespec 1.2425 +** fields of type struct timespec. 1.2426 +*/ 1.2427 +#if defined(_PR_TIMESPEC_HAS_TS_SEC) 1.2428 +static void _MD_set_fileinfo_times( 1.2429 + const struct stat *sb, 1.2430 + PRFileInfo *info) 1.2431 +{ 1.2432 + PRInt64 us, s2us; 1.2433 + 1.2434 + LL_I2L(s2us, PR_USEC_PER_SEC); 1.2435 + LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec); 1.2436 + LL_MUL(info->modifyTime, info->modifyTime, s2us); 1.2437 + LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000); 1.2438 + LL_ADD(info->modifyTime, info->modifyTime, us); 1.2439 + LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec); 1.2440 + LL_MUL(info->creationTime, info->creationTime, s2us); 1.2441 + LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000); 1.2442 + LL_ADD(info->creationTime, info->creationTime, us); 1.2443 +} 1.2444 + 1.2445 +static void _MD_set_fileinfo64_times( 1.2446 + const _MDStat64 *sb, 1.2447 + PRFileInfo64 *info) 1.2448 +{ 1.2449 + PRInt64 us, s2us; 1.2450 + 1.2451 + LL_I2L(s2us, PR_USEC_PER_SEC); 1.2452 + LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec); 1.2453 + LL_MUL(info->modifyTime, info->modifyTime, s2us); 1.2454 + LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000); 1.2455 + LL_ADD(info->modifyTime, info->modifyTime, us); 1.2456 + LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec); 1.2457 + LL_MUL(info->creationTime, info->creationTime, s2us); 1.2458 + LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000); 1.2459 + LL_ADD(info->creationTime, info->creationTime, us); 1.2460 +} 1.2461 +#else /* _PR_TIMESPEC_HAS_TS_SEC */ 1.2462 +/* 1.2463 +** The POSIX timespec structure has tv_sec and tv_nsec. 1.2464 +*/ 1.2465 +static void _MD_set_fileinfo_times( 1.2466 + const struct stat *sb, 1.2467 + PRFileInfo *info) 1.2468 +{ 1.2469 + PRInt64 us, s2us; 1.2470 + 1.2471 + LL_I2L(s2us, PR_USEC_PER_SEC); 1.2472 + LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec); 1.2473 + LL_MUL(info->modifyTime, info->modifyTime, s2us); 1.2474 + LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000); 1.2475 + LL_ADD(info->modifyTime, info->modifyTime, us); 1.2476 + LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec); 1.2477 + LL_MUL(info->creationTime, info->creationTime, s2us); 1.2478 + LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000); 1.2479 + LL_ADD(info->creationTime, info->creationTime, us); 1.2480 +} 1.2481 + 1.2482 +static void _MD_set_fileinfo64_times( 1.2483 + const _MDStat64 *sb, 1.2484 + PRFileInfo64 *info) 1.2485 +{ 1.2486 + PRInt64 us, s2us; 1.2487 + 1.2488 + LL_I2L(s2us, PR_USEC_PER_SEC); 1.2489 + LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec); 1.2490 + LL_MUL(info->modifyTime, info->modifyTime, s2us); 1.2491 + LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000); 1.2492 + LL_ADD(info->modifyTime, info->modifyTime, us); 1.2493 + LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec); 1.2494 + LL_MUL(info->creationTime, info->creationTime, s2us); 1.2495 + LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000); 1.2496 + LL_ADD(info->creationTime, info->creationTime, us); 1.2497 +} 1.2498 +#endif /* _PR_TIMESPEC_HAS_TS_SEC */ 1.2499 +#elif defined(_PR_STAT_HAS_ONLY_ST_ATIME) 1.2500 +/* 1.2501 +** struct stat only has st_atime, st_mtime, and st_ctime fields 1.2502 +** of type time_t. 1.2503 +*/ 1.2504 +static void _MD_set_fileinfo_times( 1.2505 + const struct stat *sb, 1.2506 + PRFileInfo *info) 1.2507 +{ 1.2508 + PRInt64 s, s2us; 1.2509 + LL_I2L(s2us, PR_USEC_PER_SEC); 1.2510 + LL_I2L(s, sb->st_mtime); 1.2511 + LL_MUL(s, s, s2us); 1.2512 + info->modifyTime = s; 1.2513 + LL_I2L(s, sb->st_ctime); 1.2514 + LL_MUL(s, s, s2us); 1.2515 + info->creationTime = s; 1.2516 +} 1.2517 + 1.2518 +static void _MD_set_fileinfo64_times( 1.2519 + const _MDStat64 *sb, 1.2520 + PRFileInfo64 *info) 1.2521 +{ 1.2522 + PRInt64 s, s2us; 1.2523 + LL_I2L(s2us, PR_USEC_PER_SEC); 1.2524 + LL_I2L(s, sb->st_mtime); 1.2525 + LL_MUL(s, s, s2us); 1.2526 + info->modifyTime = s; 1.2527 + LL_I2L(s, sb->st_ctime); 1.2528 + LL_MUL(s, s, s2us); 1.2529 + info->creationTime = s; 1.2530 +} 1.2531 +#else 1.2532 +#error "I don't know yet" 1.2533 +#endif 1.2534 + 1.2535 +static int _MD_convert_stat_to_fileinfo( 1.2536 + const struct stat *sb, 1.2537 + PRFileInfo *info) 1.2538 +{ 1.2539 + if (S_IFREG & sb->st_mode) 1.2540 + info->type = PR_FILE_FILE; 1.2541 + else if (S_IFDIR & sb->st_mode) 1.2542 + info->type = PR_FILE_DIRECTORY; 1.2543 + else 1.2544 + info->type = PR_FILE_OTHER; 1.2545 + 1.2546 +#if defined(_PR_HAVE_LARGE_OFF_T) 1.2547 + if (0x7fffffffL < sb->st_size) 1.2548 + { 1.2549 + PR_SetError(PR_FILE_TOO_BIG_ERROR, 0); 1.2550 + return -1; 1.2551 + } 1.2552 +#endif /* defined(_PR_HAVE_LARGE_OFF_T) */ 1.2553 + info->size = sb->st_size; 1.2554 + 1.2555 + _MD_set_fileinfo_times(sb, info); 1.2556 + return 0; 1.2557 +} /* _MD_convert_stat_to_fileinfo */ 1.2558 + 1.2559 +static int _MD_convert_stat64_to_fileinfo64( 1.2560 + const _MDStat64 *sb, 1.2561 + PRFileInfo64 *info) 1.2562 +{ 1.2563 + if (S_IFREG & sb->st_mode) 1.2564 + info->type = PR_FILE_FILE; 1.2565 + else if (S_IFDIR & sb->st_mode) 1.2566 + info->type = PR_FILE_DIRECTORY; 1.2567 + else 1.2568 + info->type = PR_FILE_OTHER; 1.2569 + 1.2570 + LL_I2L(info->size, sb->st_size); 1.2571 + 1.2572 + _MD_set_fileinfo64_times(sb, info); 1.2573 + return 0; 1.2574 +} /* _MD_convert_stat64_to_fileinfo64 */ 1.2575 + 1.2576 +PRInt32 _MD_getfileinfo(const char *fn, PRFileInfo *info) 1.2577 +{ 1.2578 + PRInt32 rv; 1.2579 + struct stat sb; 1.2580 + 1.2581 + rv = stat(fn, &sb); 1.2582 + if (rv < 0) 1.2583 + _PR_MD_MAP_STAT_ERROR(_MD_ERRNO()); 1.2584 + else if (NULL != info) 1.2585 + rv = _MD_convert_stat_to_fileinfo(&sb, info); 1.2586 + return rv; 1.2587 +} 1.2588 + 1.2589 +PRInt32 _MD_getfileinfo64(const char *fn, PRFileInfo64 *info) 1.2590 +{ 1.2591 + _MDStat64 sb; 1.2592 + PRInt32 rv = _md_iovector._stat64(fn, &sb); 1.2593 + if (rv < 0) 1.2594 + _PR_MD_MAP_STAT_ERROR(_MD_ERRNO()); 1.2595 + else if (NULL != info) 1.2596 + rv = _MD_convert_stat64_to_fileinfo64(&sb, info); 1.2597 + return rv; 1.2598 +} 1.2599 + 1.2600 +PRInt32 _MD_getopenfileinfo(const PRFileDesc *fd, PRFileInfo *info) 1.2601 +{ 1.2602 + struct stat sb; 1.2603 + PRInt32 rv = fstat(fd->secret->md.osfd, &sb); 1.2604 + if (rv < 0) 1.2605 + _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO()); 1.2606 + else if (NULL != info) 1.2607 + rv = _MD_convert_stat_to_fileinfo(&sb, info); 1.2608 + return rv; 1.2609 +} 1.2610 + 1.2611 +PRInt32 _MD_getopenfileinfo64(const PRFileDesc *fd, PRFileInfo64 *info) 1.2612 +{ 1.2613 + _MDStat64 sb; 1.2614 + PRInt32 rv = _md_iovector._fstat64(fd->secret->md.osfd, &sb); 1.2615 + if (rv < 0) 1.2616 + _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO()); 1.2617 + else if (NULL != info) 1.2618 + rv = _MD_convert_stat64_to_fileinfo64(&sb, info); 1.2619 + return rv; 1.2620 +} 1.2621 + 1.2622 +/* 1.2623 + * _md_iovector._open64 must be initialized to 'open' so that _PR_InitLog can 1.2624 + * open the log file during NSPR initialization, before _md_iovector is 1.2625 + * initialized by _PR_MD_FINAL_INIT. This means the log file cannot be a 1.2626 + * large file on some platforms. 1.2627 + */ 1.2628 +#ifdef SYMBIAN 1.2629 +struct _MD_IOVector _md_iovector; /* Will crash if NSPR_LOG_FILE is set. */ 1.2630 +#else 1.2631 +struct _MD_IOVector _md_iovector = { open }; 1.2632 +#endif 1.2633 + 1.2634 +/* 1.2635 +** These implementations are to emulate large file routines on systems that 1.2636 +** don't have them. Their goal is to check in case overflow occurs. Otherwise 1.2637 +** they will just operate as normal using 32-bit file routines. 1.2638 +** 1.2639 +** The checking might be pre- or post-op, depending on the semantics. 1.2640 +*/ 1.2641 + 1.2642 +#if defined(SOLARIS2_5) 1.2643 + 1.2644 +static PRIntn _MD_solaris25_fstat64(PRIntn osfd, _MDStat64 *buf) 1.2645 +{ 1.2646 + PRInt32 rv; 1.2647 + struct stat sb; 1.2648 + 1.2649 + rv = fstat(osfd, &sb); 1.2650 + if (rv >= 0) 1.2651 + { 1.2652 + /* 1.2653 + ** I'm only copying the fields that are immediately needed. 1.2654 + ** If somebody else calls this function, some of the fields 1.2655 + ** may not be defined. 1.2656 + */ 1.2657 + (void)memset(buf, 0, sizeof(_MDStat64)); 1.2658 + buf->st_mode = sb.st_mode; 1.2659 + buf->st_ctim = sb.st_ctim; 1.2660 + buf->st_mtim = sb.st_mtim; 1.2661 + buf->st_size = sb.st_size; 1.2662 + } 1.2663 + return rv; 1.2664 +} /* _MD_solaris25_fstat64 */ 1.2665 + 1.2666 +static PRIntn _MD_solaris25_stat64(const char *fn, _MDStat64 *buf) 1.2667 +{ 1.2668 + PRInt32 rv; 1.2669 + struct stat sb; 1.2670 + 1.2671 + rv = stat(fn, &sb); 1.2672 + if (rv >= 0) 1.2673 + { 1.2674 + /* 1.2675 + ** I'm only copying the fields that are immediately needed. 1.2676 + ** If somebody else calls this function, some of the fields 1.2677 + ** may not be defined. 1.2678 + */ 1.2679 + (void)memset(buf, 0, sizeof(_MDStat64)); 1.2680 + buf->st_mode = sb.st_mode; 1.2681 + buf->st_ctim = sb.st_ctim; 1.2682 + buf->st_mtim = sb.st_mtim; 1.2683 + buf->st_size = sb.st_size; 1.2684 + } 1.2685 + return rv; 1.2686 +} /* _MD_solaris25_stat64 */ 1.2687 +#endif /* defined(SOLARIS2_5) */ 1.2688 + 1.2689 +#if defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5) 1.2690 + 1.2691 +static PROffset64 _MD_Unix_lseek64(PRIntn osfd, PROffset64 offset, PRIntn whence) 1.2692 +{ 1.2693 + PRUint64 maxoff; 1.2694 + PROffset64 rv = minus_one; 1.2695 + LL_I2L(maxoff, 0x7fffffff); 1.2696 + if (LL_CMP(offset, <=, maxoff)) 1.2697 + { 1.2698 + off_t off; 1.2699 + LL_L2I(off, offset); 1.2700 + LL_I2L(rv, lseek(osfd, off, whence)); 1.2701 + } 1.2702 + else errno = EFBIG; /* we can't go there */ 1.2703 + return rv; 1.2704 +} /* _MD_Unix_lseek64 */ 1.2705 + 1.2706 +static void* _MD_Unix_mmap64( 1.2707 + void *addr, PRSize len, PRIntn prot, PRIntn flags, 1.2708 + PRIntn fildes, PRInt64 offset) 1.2709 +{ 1.2710 + PR_SetError(PR_FILE_TOO_BIG_ERROR, 0); 1.2711 + return NULL; 1.2712 +} /* _MD_Unix_mmap64 */ 1.2713 +#endif /* defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5) */ 1.2714 + 1.2715 +/* Android doesn't have mmap64. */ 1.2716 +#if defined(ANDROID) 1.2717 +extern void *__mmap2(void *, size_t, int, int, int, size_t); 1.2718 + 1.2719 +#define ANDROID_PAGE_SIZE 4096 1.2720 + 1.2721 +static void * 1.2722 +mmap64(void *addr, size_t len, int prot, int flags, int fd, loff_t offset) 1.2723 +{ 1.2724 + if (offset & (ANDROID_PAGE_SIZE - 1)) { 1.2725 + errno = EINVAL; 1.2726 + return MAP_FAILED; 1.2727 + } 1.2728 + return __mmap2(addr, len, prot, flags, fd, offset / ANDROID_PAGE_SIZE); 1.2729 +} 1.2730 +#endif 1.2731 + 1.2732 +#if defined(OSF1) && defined(__GNUC__) 1.2733 + 1.2734 +/* 1.2735 + * On OSF1 V5.0A, <sys/stat.h> defines stat and fstat as 1.2736 + * macros when compiled under gcc, so it is rather tricky to 1.2737 + * take the addresses of the real functions the macros expend 1.2738 + * to. A simple solution is to define forwarder functions 1.2739 + * and take the addresses of the forwarder functions instead. 1.2740 + */ 1.2741 + 1.2742 +static int stat_forwarder(const char *path, struct stat *buffer) 1.2743 +{ 1.2744 + return stat(path, buffer); 1.2745 +} 1.2746 + 1.2747 +static int fstat_forwarder(int filedes, struct stat *buffer) 1.2748 +{ 1.2749 + return fstat(filedes, buffer); 1.2750 +} 1.2751 + 1.2752 +#endif 1.2753 + 1.2754 +static void _PR_InitIOV(void) 1.2755 +{ 1.2756 +#if defined(SOLARIS2_5) 1.2757 + PRLibrary *lib; 1.2758 + void *open64_func; 1.2759 + 1.2760 + open64_func = PR_FindSymbolAndLibrary("open64", &lib); 1.2761 + if (NULL != open64_func) 1.2762 + { 1.2763 + PR_ASSERT(NULL != lib); 1.2764 + _md_iovector._open64 = (_MD_Open64)open64_func; 1.2765 + _md_iovector._mmap64 = (_MD_Mmap64)PR_FindSymbol(lib, "mmap64"); 1.2766 + _md_iovector._fstat64 = (_MD_Fstat64)PR_FindSymbol(lib, "fstat64"); 1.2767 + _md_iovector._stat64 = (_MD_Stat64)PR_FindSymbol(lib, "stat64"); 1.2768 + _md_iovector._lseek64 = (_MD_Lseek64)PR_FindSymbol(lib, "lseek64"); 1.2769 + (void)PR_UnloadLibrary(lib); 1.2770 + } 1.2771 + else 1.2772 + { 1.2773 + _md_iovector._open64 = open; 1.2774 + _md_iovector._mmap64 = _MD_Unix_mmap64; 1.2775 + _md_iovector._fstat64 = _MD_solaris25_fstat64; 1.2776 + _md_iovector._stat64 = _MD_solaris25_stat64; 1.2777 + _md_iovector._lseek64 = _MD_Unix_lseek64; 1.2778 + } 1.2779 +#elif defined(_PR_NO_LARGE_FILES) 1.2780 + _md_iovector._open64 = open; 1.2781 + _md_iovector._mmap64 = _MD_Unix_mmap64; 1.2782 + _md_iovector._fstat64 = fstat; 1.2783 + _md_iovector._stat64 = stat; 1.2784 + _md_iovector._lseek64 = _MD_Unix_lseek64; 1.2785 +#elif defined(_PR_HAVE_OFF64_T) 1.2786 +#if defined(IRIX5_3) || defined(ANDROID) 1.2787 + /* 1.2788 + * Android doesn't have open64. We pass the O_LARGEFILE flag to open 1.2789 + * in _MD_open. 1.2790 + */ 1.2791 + _md_iovector._open64 = open; 1.2792 +#else 1.2793 + _md_iovector._open64 = open64; 1.2794 +#endif 1.2795 + _md_iovector._mmap64 = mmap64; 1.2796 + _md_iovector._fstat64 = fstat64; 1.2797 + _md_iovector._stat64 = stat64; 1.2798 + _md_iovector._lseek64 = lseek64; 1.2799 +#elif defined(_PR_HAVE_LARGE_OFF_T) 1.2800 + _md_iovector._open64 = open; 1.2801 + _md_iovector._mmap64 = mmap; 1.2802 +#if defined(OSF1) && defined(__GNUC__) 1.2803 + _md_iovector._fstat64 = fstat_forwarder; 1.2804 + _md_iovector._stat64 = stat_forwarder; 1.2805 +#else 1.2806 + _md_iovector._fstat64 = fstat; 1.2807 + _md_iovector._stat64 = stat; 1.2808 +#endif 1.2809 + _md_iovector._lseek64 = lseek; 1.2810 +#else 1.2811 +#error "I don't know yet" 1.2812 +#endif 1.2813 + LL_I2L(minus_one, -1); 1.2814 +} /* _PR_InitIOV */ 1.2815 + 1.2816 +void _PR_UnixInit(void) 1.2817 +{ 1.2818 + struct sigaction sigact; 1.2819 + int rv; 1.2820 + 1.2821 + sigemptyset(&timer_set); 1.2822 + 1.2823 +#if !defined(_PR_PTHREADS) 1.2824 + 1.2825 + sigaddset(&timer_set, SIGALRM); 1.2826 + sigemptyset(&empty_set); 1.2827 + intr_timeout_ticks = 1.2828 + PR_SecondsToInterval(_PR_INTERRUPT_CHECK_INTERVAL_SECS); 1.2829 + 1.2830 +#if defined(SOLARIS) || defined(IRIX) 1.2831 + 1.2832 + if (getenv("NSPR_SIGSEGV_HANDLE")) { 1.2833 + sigact.sa_handler = sigsegvhandler; 1.2834 + sigact.sa_flags = 0; 1.2835 + sigact.sa_mask = timer_set; 1.2836 + sigaction(SIGSEGV, &sigact, 0); 1.2837 + } 1.2838 + 1.2839 + if (getenv("NSPR_SIGABRT_HANDLE")) { 1.2840 + sigact.sa_handler = sigaborthandler; 1.2841 + sigact.sa_flags = 0; 1.2842 + sigact.sa_mask = timer_set; 1.2843 + sigaction(SIGABRT, &sigact, 0); 1.2844 + } 1.2845 + 1.2846 + if (getenv("NSPR_SIGBUS_HANDLE")) { 1.2847 + sigact.sa_handler = sigbushandler; 1.2848 + sigact.sa_flags = 0; 1.2849 + sigact.sa_mask = timer_set; 1.2850 + sigaction(SIGBUS, &sigact, 0); 1.2851 + } 1.2852 + 1.2853 +#endif 1.2854 +#endif /* !defined(_PR_PTHREADS) */ 1.2855 + 1.2856 + /* 1.2857 + * Under HP-UX DCE threads, sigaction() installs a per-thread 1.2858 + * handler, so we use sigvector() to install a process-wide 1.2859 + * handler. 1.2860 + */ 1.2861 +#if defined(HPUX) && defined(_PR_DCETHREADS) 1.2862 + { 1.2863 + struct sigvec vec; 1.2864 + 1.2865 + vec.sv_handler = SIG_IGN; 1.2866 + vec.sv_mask = 0; 1.2867 + vec.sv_flags = 0; 1.2868 + rv = sigvector(SIGPIPE, &vec, NULL); 1.2869 + PR_ASSERT(0 == rv); 1.2870 + } 1.2871 +#else 1.2872 + sigact.sa_handler = SIG_IGN; 1.2873 + sigemptyset(&sigact.sa_mask); 1.2874 + sigact.sa_flags = 0; 1.2875 + rv = sigaction(SIGPIPE, &sigact, 0); 1.2876 + PR_ASSERT(0 == rv); 1.2877 +#endif /* HPUX && _PR_DCETHREADS */ 1.2878 + 1.2879 + _pr_rename_lock = PR_NewLock(); 1.2880 + PR_ASSERT(NULL != _pr_rename_lock); 1.2881 + _pr_Xfe_mon = PR_NewMonitor(); 1.2882 + PR_ASSERT(NULL != _pr_Xfe_mon); 1.2883 + 1.2884 + _PR_InitIOV(); /* one last hack */ 1.2885 +} 1.2886 + 1.2887 +void _PR_UnixCleanup(void) 1.2888 +{ 1.2889 + if (_pr_rename_lock) { 1.2890 + PR_DestroyLock(_pr_rename_lock); 1.2891 + _pr_rename_lock = NULL; 1.2892 + } 1.2893 + if (_pr_Xfe_mon) { 1.2894 + PR_DestroyMonitor(_pr_Xfe_mon); 1.2895 + _pr_Xfe_mon = NULL; 1.2896 + } 1.2897 +} 1.2898 + 1.2899 +#if !defined(_PR_PTHREADS) 1.2900 + 1.2901 +/* 1.2902 + * Variables used by the GC code, initialized in _MD_InitSegs(). 1.2903 + */ 1.2904 +static PRInt32 _pr_zero_fd = -1; 1.2905 +static PRLock *_pr_md_lock = NULL; 1.2906 + 1.2907 +/* 1.2908 + * _MD_InitSegs -- 1.2909 + * 1.2910 + * This is Unix's version of _PR_MD_INIT_SEGS(), which is 1.2911 + * called by _PR_InitSegs(), which in turn is called by 1.2912 + * PR_Init(). 1.2913 + */ 1.2914 +void _MD_InitSegs(void) 1.2915 +{ 1.2916 +#ifdef DEBUG 1.2917 + /* 1.2918 + ** Disable using mmap(2) if NSPR_NO_MMAP is set 1.2919 + */ 1.2920 + if (getenv("NSPR_NO_MMAP")) { 1.2921 + _pr_zero_fd = -2; 1.2922 + return; 1.2923 + } 1.2924 +#endif 1.2925 + _pr_zero_fd = open("/dev/zero",O_RDWR , 0); 1.2926 + /* Prevent the fd from being inherited by child processes */ 1.2927 + fcntl(_pr_zero_fd, F_SETFD, FD_CLOEXEC); 1.2928 + _pr_md_lock = PR_NewLock(); 1.2929 +} 1.2930 + 1.2931 +PRStatus _MD_AllocSegment(PRSegment *seg, PRUint32 size, void *vaddr) 1.2932 +{ 1.2933 + static char *lastaddr = (char*) _PR_STACK_VMBASE; 1.2934 + PRStatus retval = PR_SUCCESS; 1.2935 + int prot; 1.2936 + void *rv; 1.2937 + 1.2938 + PR_ASSERT(seg != 0); 1.2939 + PR_ASSERT(size != 0); 1.2940 + 1.2941 + PR_Lock(_pr_md_lock); 1.2942 + if (_pr_zero_fd < 0) { 1.2943 +from_heap: 1.2944 + seg->vaddr = PR_MALLOC(size); 1.2945 + if (!seg->vaddr) { 1.2946 + retval = PR_FAILURE; 1.2947 + } 1.2948 + else { 1.2949 + seg->size = size; 1.2950 + } 1.2951 + goto exit; 1.2952 + } 1.2953 + 1.2954 + prot = PROT_READ|PROT_WRITE; 1.2955 + /* 1.2956 + * On Alpha Linux, the user-level thread stack needs 1.2957 + * to be made executable because longjmp/signal seem 1.2958 + * to put machine instructions on the stack. 1.2959 + */ 1.2960 +#if defined(LINUX) && defined(__alpha) 1.2961 + prot |= PROT_EXEC; 1.2962 +#endif 1.2963 + rv = mmap((vaddr != 0) ? vaddr : lastaddr, size, prot, 1.2964 + _MD_MMAP_FLAGS, 1.2965 + _pr_zero_fd, 0); 1.2966 + if (rv == (void*)-1) { 1.2967 + goto from_heap; 1.2968 + } 1.2969 + lastaddr += size; 1.2970 + seg->vaddr = rv; 1.2971 + seg->size = size; 1.2972 + seg->flags = _PR_SEG_VM; 1.2973 + 1.2974 +exit: 1.2975 + PR_Unlock(_pr_md_lock); 1.2976 + return retval; 1.2977 +} 1.2978 + 1.2979 +void _MD_FreeSegment(PRSegment *seg) 1.2980 +{ 1.2981 + if (seg->flags & _PR_SEG_VM) 1.2982 + (void) munmap(seg->vaddr, seg->size); 1.2983 + else 1.2984 + PR_DELETE(seg->vaddr); 1.2985 +} 1.2986 + 1.2987 +#endif /* _PR_PTHREADS */ 1.2988 + 1.2989 +/* 1.2990 + *----------------------------------------------------------------------- 1.2991 + * 1.2992 + * PR_Now -- 1.2993 + * 1.2994 + * Returns the current time in microseconds since the epoch. 1.2995 + * The epoch is midnight January 1, 1970 GMT. 1.2996 + * The implementation is machine dependent. This is the Unix 1.2997 + * implementation. 1.2998 + * Cf. time_t time(time_t *tp) 1.2999 + * 1.3000 + *----------------------------------------------------------------------- 1.3001 + */ 1.3002 + 1.3003 +PR_IMPLEMENT(PRTime) 1.3004 +PR_Now(void) 1.3005 +{ 1.3006 + struct timeval tv; 1.3007 + PRInt64 s, us, s2us; 1.3008 + 1.3009 + GETTIMEOFDAY(&tv); 1.3010 + LL_I2L(s2us, PR_USEC_PER_SEC); 1.3011 + LL_I2L(s, tv.tv_sec); 1.3012 + LL_I2L(us, tv.tv_usec); 1.3013 + LL_MUL(s, s, s2us); 1.3014 + LL_ADD(s, s, us); 1.3015 + return s; 1.3016 +} 1.3017 + 1.3018 +#if defined(_MD_INTERVAL_USE_GTOD) 1.3019 +/* 1.3020 + * This version of interval times is based on the time of day 1.3021 + * capability offered by the system. This isn't valid for two reasons: 1.3022 + * 1) The time of day is neither linear nor montonically increasing 1.3023 + * 2) The units here are milliseconds. That's not appropriate for our use. 1.3024 + */ 1.3025 +PRIntervalTime _PR_UNIX_GetInterval() 1.3026 +{ 1.3027 + struct timeval time; 1.3028 + PRIntervalTime ticks; 1.3029 + 1.3030 + (void)GETTIMEOFDAY(&time); /* fallicy of course */ 1.3031 + ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC; /* that's in milliseconds */ 1.3032 + ticks += (PRUint32)time.tv_usec / PR_USEC_PER_MSEC; /* so's that */ 1.3033 + return ticks; 1.3034 +} /* _PR_UNIX_GetInterval */ 1.3035 + 1.3036 +PRIntervalTime _PR_UNIX_TicksPerSecond() 1.3037 +{ 1.3038 + return 1000; /* this needs some work :) */ 1.3039 +} 1.3040 +#endif 1.3041 + 1.3042 +#if defined(HAVE_CLOCK_MONOTONIC) 1.3043 +PRIntervalTime _PR_UNIX_GetInterval2() 1.3044 +{ 1.3045 + struct timespec time; 1.3046 + PRIntervalTime ticks; 1.3047 + 1.3048 + if (clock_gettime(CLOCK_MONOTONIC, &time) != 0) { 1.3049 + fprintf(stderr, "clock_gettime failed: %d\n", errno); 1.3050 + abort(); 1.3051 + } 1.3052 + 1.3053 + ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC; 1.3054 + ticks += (PRUint32)time.tv_nsec / PR_NSEC_PER_MSEC; 1.3055 + return ticks; 1.3056 +} 1.3057 + 1.3058 +PRIntervalTime _PR_UNIX_TicksPerSecond2() 1.3059 +{ 1.3060 + return 1000; 1.3061 +} 1.3062 +#endif 1.3063 + 1.3064 +#if !defined(_PR_PTHREADS) 1.3065 +/* 1.3066 + * Wait for I/O on multiple descriptors. 1.3067 + * 1.3068 + * Return 0 if timed out, return -1 if interrupted, 1.3069 + * else return the number of ready descriptors. 1.3070 + */ 1.3071 +PRInt32 _PR_WaitForMultipleFDs( 1.3072 + _PRUnixPollDesc *unixpds, 1.3073 + PRInt32 pdcnt, 1.3074 + PRIntervalTime timeout) 1.3075 +{ 1.3076 + PRPollQueue pq; 1.3077 + PRIntn is; 1.3078 + PRInt32 rv; 1.3079 + _PRCPU *io_cpu; 1.3080 + _PRUnixPollDesc *unixpd, *eunixpd; 1.3081 + PRThread *me = _PR_MD_CURRENT_THREAD(); 1.3082 + 1.3083 + PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); 1.3084 + 1.3085 + if (_PR_PENDING_INTERRUPT(me)) { 1.3086 + me->flags &= ~_PR_INTERRUPT; 1.3087 + PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); 1.3088 + return -1; 1.3089 + } 1.3090 + 1.3091 + pq.pds = unixpds; 1.3092 + pq.npds = pdcnt; 1.3093 + 1.3094 + _PR_INTSOFF(is); 1.3095 + _PR_MD_IOQ_LOCK(); 1.3096 + _PR_THREAD_LOCK(me); 1.3097 + 1.3098 + pq.thr = me; 1.3099 + io_cpu = me->cpu; 1.3100 + pq.on_ioq = PR_TRUE; 1.3101 + pq.timeout = timeout; 1.3102 + _PR_ADD_TO_IOQ(pq, me->cpu); 1.3103 + 1.3104 +#if !defined(_PR_USE_POLL) 1.3105 + eunixpd = unixpds + pdcnt; 1.3106 + for (unixpd = unixpds; unixpd < eunixpd; unixpd++) { 1.3107 + PRInt32 osfd = unixpd->osfd; 1.3108 + if (unixpd->in_flags & _PR_UNIX_POLL_READ) { 1.3109 + FD_SET(osfd, &_PR_FD_READ_SET(me->cpu)); 1.3110 + _PR_FD_READ_CNT(me->cpu)[osfd]++; 1.3111 + } 1.3112 + if (unixpd->in_flags & _PR_UNIX_POLL_WRITE) { 1.3113 + FD_SET(osfd, &_PR_FD_WRITE_SET(me->cpu)); 1.3114 + (_PR_FD_WRITE_CNT(me->cpu))[osfd]++; 1.3115 + } 1.3116 + if (unixpd->in_flags & _PR_UNIX_POLL_EXCEPT) { 1.3117 + FD_SET(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); 1.3118 + (_PR_FD_EXCEPTION_CNT(me->cpu))[osfd]++; 1.3119 + } 1.3120 + if (osfd > _PR_IOQ_MAX_OSFD(me->cpu)) { 1.3121 + _PR_IOQ_MAX_OSFD(me->cpu) = osfd; 1.3122 + } 1.3123 + } 1.3124 +#endif /* !defined(_PR_USE_POLL) */ 1.3125 + 1.3126 + if (_PR_IOQ_TIMEOUT(me->cpu) > timeout) { 1.3127 + _PR_IOQ_TIMEOUT(me->cpu) = timeout; 1.3128 + } 1.3129 + 1.3130 + _PR_IOQ_OSFD_CNT(me->cpu) += pdcnt; 1.3131 + 1.3132 + _PR_SLEEPQ_LOCK(me->cpu); 1.3133 + _PR_ADD_SLEEPQ(me, timeout); 1.3134 + me->state = _PR_IO_WAIT; 1.3135 + me->io_pending = PR_TRUE; 1.3136 + me->io_suspended = PR_FALSE; 1.3137 + _PR_SLEEPQ_UNLOCK(me->cpu); 1.3138 + _PR_THREAD_UNLOCK(me); 1.3139 + _PR_MD_IOQ_UNLOCK(); 1.3140 + 1.3141 + _PR_MD_WAIT(me, timeout); 1.3142 + 1.3143 + me->io_pending = PR_FALSE; 1.3144 + me->io_suspended = PR_FALSE; 1.3145 + 1.3146 + /* 1.3147 + * This thread should run on the same cpu on which it was blocked; when 1.3148 + * the IO request times out the fd sets and fd counts for the 1.3149 + * cpu are updated below. 1.3150 + */ 1.3151 + PR_ASSERT(me->cpu == io_cpu); 1.3152 + 1.3153 + /* 1.3154 + ** If we timed out the pollq might still be on the ioq. Remove it 1.3155 + ** before continuing. 1.3156 + */ 1.3157 + if (pq.on_ioq) { 1.3158 + _PR_MD_IOQ_LOCK(); 1.3159 + /* 1.3160 + * Need to check pq.on_ioq again 1.3161 + */ 1.3162 + if (pq.on_ioq) { 1.3163 + PR_REMOVE_LINK(&pq.links); 1.3164 +#ifndef _PR_USE_POLL 1.3165 + eunixpd = unixpds + pdcnt; 1.3166 + for (unixpd = unixpds; unixpd < eunixpd; unixpd++) { 1.3167 + PRInt32 osfd = unixpd->osfd; 1.3168 + PRInt16 in_flags = unixpd->in_flags; 1.3169 + 1.3170 + if (in_flags & _PR_UNIX_POLL_READ) { 1.3171 + if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) 1.3172 + FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu)); 1.3173 + } 1.3174 + if (in_flags & _PR_UNIX_POLL_WRITE) { 1.3175 + if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) 1.3176 + FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu)); 1.3177 + } 1.3178 + if (in_flags & _PR_UNIX_POLL_EXCEPT) { 1.3179 + if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) 1.3180 + FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); 1.3181 + } 1.3182 + } 1.3183 +#endif /* _PR_USE_POLL */ 1.3184 + PR_ASSERT(pq.npds == pdcnt); 1.3185 + _PR_IOQ_OSFD_CNT(me->cpu) -= pdcnt; 1.3186 + PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0); 1.3187 + } 1.3188 + _PR_MD_IOQ_UNLOCK(); 1.3189 + } 1.3190 + /* XXX Should we use _PR_FAST_INTSON or _PR_INTSON? */ 1.3191 + if (1 == pdcnt) { 1.3192 + _PR_FAST_INTSON(is); 1.3193 + } else { 1.3194 + _PR_INTSON(is); 1.3195 + } 1.3196 + 1.3197 + if (_PR_PENDING_INTERRUPT(me)) { 1.3198 + me->flags &= ~_PR_INTERRUPT; 1.3199 + PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); 1.3200 + return -1; 1.3201 + } 1.3202 + 1.3203 + rv = 0; 1.3204 + if (pq.on_ioq == PR_FALSE) { 1.3205 + /* Count the number of ready descriptors */ 1.3206 + while (--pdcnt >= 0) { 1.3207 + if (unixpds->out_flags != 0) { 1.3208 + rv++; 1.3209 + } 1.3210 + unixpds++; 1.3211 + } 1.3212 + } 1.3213 + 1.3214 + return rv; 1.3215 +} 1.3216 + 1.3217 +/* 1.3218 + * Unblock threads waiting for I/O 1.3219 + * used when interrupting threads 1.3220 + * 1.3221 + * NOTE: The thread lock should held when this function is called. 1.3222 + * On return, the thread lock is released. 1.3223 + */ 1.3224 +void _PR_Unblock_IO_Wait(PRThread *thr) 1.3225 +{ 1.3226 + int pri = thr->priority; 1.3227 + _PRCPU *cpu = thr->cpu; 1.3228 + 1.3229 + /* 1.3230 + * GLOBAL threads wakeup periodically to check for interrupt 1.3231 + */ 1.3232 + if (_PR_IS_NATIVE_THREAD(thr)) { 1.3233 + _PR_THREAD_UNLOCK(thr); 1.3234 + return; 1.3235 + } 1.3236 + 1.3237 + PR_ASSERT(thr->flags & (_PR_ON_SLEEPQ | _PR_ON_PAUSEQ)); 1.3238 + _PR_SLEEPQ_LOCK(cpu); 1.3239 + _PR_DEL_SLEEPQ(thr, PR_TRUE); 1.3240 + _PR_SLEEPQ_UNLOCK(cpu); 1.3241 + 1.3242 + PR_ASSERT(!(thr->flags & _PR_IDLE_THREAD)); 1.3243 + thr->state = _PR_RUNNABLE; 1.3244 + _PR_RUNQ_LOCK(cpu); 1.3245 + _PR_ADD_RUNQ(thr, cpu, pri); 1.3246 + _PR_RUNQ_UNLOCK(cpu); 1.3247 + _PR_THREAD_UNLOCK(thr); 1.3248 + _PR_MD_WAKEUP_WAITER(thr); 1.3249 +} 1.3250 +#endif /* !defined(_PR_PTHREADS) */ 1.3251 + 1.3252 +/* 1.3253 + * When a nonblocking connect has completed, determine whether it 1.3254 + * succeeded or failed, and if it failed, what the error code is. 1.3255 + * 1.3256 + * The function returns the error code. An error code of 0 means 1.3257 + * that the nonblocking connect succeeded. 1.3258 + */ 1.3259 + 1.3260 +int _MD_unix_get_nonblocking_connect_error(int osfd) 1.3261 +{ 1.3262 +#if defined(NTO) 1.3263 + /* Neutrino does not support the SO_ERROR socket option */ 1.3264 + PRInt32 rv; 1.3265 + PRNetAddr addr; 1.3266 + _PRSockLen_t addrlen = sizeof(addr); 1.3267 + 1.3268 + /* Test to see if we are using the Tiny TCP/IP Stack or the Full one. */ 1.3269 + struct statvfs superblock; 1.3270 + rv = fstatvfs(osfd, &superblock); 1.3271 + if (rv == 0) { 1.3272 + if (strcmp(superblock.f_basetype, "ttcpip") == 0) { 1.3273 + /* Using the Tiny Stack! */ 1.3274 + rv = getpeername(osfd, (struct sockaddr *) &addr, 1.3275 + (_PRSockLen_t *) &addrlen); 1.3276 + if (rv == -1) { 1.3277 + int errno_copy = errno; /* make a copy so I don't 1.3278 + * accidentally reset */ 1.3279 + 1.3280 + if (errno_copy == ENOTCONN) { 1.3281 + struct stat StatInfo; 1.3282 + rv = fstat(osfd, &StatInfo); 1.3283 + if (rv == 0) { 1.3284 + time_t current_time = time(NULL); 1.3285 + 1.3286 + /* 1.3287 + * this is a real hack, can't explain why it 1.3288 + * works it just does 1.3289 + */ 1.3290 + if (abs(current_time - StatInfo.st_atime) < 5) { 1.3291 + return ECONNREFUSED; 1.3292 + } else { 1.3293 + return ETIMEDOUT; 1.3294 + } 1.3295 + } else { 1.3296 + return ECONNREFUSED; 1.3297 + } 1.3298 + } else { 1.3299 + return errno_copy; 1.3300 + } 1.3301 + } else { 1.3302 + /* No Error */ 1.3303 + return 0; 1.3304 + } 1.3305 + } else { 1.3306 + /* Have the FULL Stack which supports SO_ERROR */ 1.3307 + /* Hasn't been written yet, never been tested! */ 1.3308 + /* Jerry.Kirk@Nexwarecorp.com */ 1.3309 + 1.3310 + int err; 1.3311 + _PRSockLen_t optlen = sizeof(err); 1.3312 + 1.3313 + if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, 1.3314 + (char *) &err, &optlen) == -1) { 1.3315 + return errno; 1.3316 + } else { 1.3317 + return err; 1.3318 + } 1.3319 + } 1.3320 + } else { 1.3321 + return ECONNREFUSED; 1.3322 + } 1.3323 +#elif defined(UNIXWARE) 1.3324 + /* 1.3325 + * getsockopt() fails with EPIPE, so use getmsg() instead. 1.3326 + */ 1.3327 + 1.3328 + int rv; 1.3329 + int flags = 0; 1.3330 + rv = getmsg(osfd, NULL, NULL, &flags); 1.3331 + PR_ASSERT(-1 == rv || 0 == rv); 1.3332 + if (-1 == rv && errno != EAGAIN && errno != EWOULDBLOCK) { 1.3333 + return errno; 1.3334 + } 1.3335 + return 0; /* no error */ 1.3336 +#else 1.3337 + int err; 1.3338 + _PRSockLen_t optlen = sizeof(err); 1.3339 + if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, (char *) &err, &optlen) == -1) { 1.3340 + return errno; 1.3341 + } else { 1.3342 + return err; 1.3343 + } 1.3344 +#endif 1.3345 +} 1.3346 + 1.3347 +/************************************************************************/ 1.3348 + 1.3349 +/* 1.3350 +** Special hacks for xlib. Xlib/Xt/Xm is not re-entrant nor is it thread 1.3351 +** safe. Unfortunately, neither is mozilla. To make these programs work 1.3352 +** in a pre-emptive threaded environment, we need to use a lock. 1.3353 +*/ 1.3354 + 1.3355 +void PR_XLock(void) 1.3356 +{ 1.3357 + PR_EnterMonitor(_pr_Xfe_mon); 1.3358 +} 1.3359 + 1.3360 +void PR_XUnlock(void) 1.3361 +{ 1.3362 + PR_ExitMonitor(_pr_Xfe_mon); 1.3363 +} 1.3364 + 1.3365 +PRBool PR_XIsLocked(void) 1.3366 +{ 1.3367 + return (PR_InMonitor(_pr_Xfe_mon)) ? PR_TRUE : PR_FALSE; 1.3368 +} 1.3369 + 1.3370 +void PR_XWait(int ms) 1.3371 +{ 1.3372 + PR_Wait(_pr_Xfe_mon, PR_MillisecondsToInterval(ms)); 1.3373 +} 1.3374 + 1.3375 +void PR_XNotify(void) 1.3376 +{ 1.3377 + PR_Notify(_pr_Xfe_mon); 1.3378 +} 1.3379 + 1.3380 +void PR_XNotifyAll(void) 1.3381 +{ 1.3382 + PR_NotifyAll(_pr_Xfe_mon); 1.3383 +} 1.3384 + 1.3385 +#if defined(HAVE_FCNTL_FILE_LOCKING) 1.3386 + 1.3387 +PRStatus 1.3388 +_MD_LockFile(PRInt32 f) 1.3389 +{ 1.3390 + PRInt32 rv; 1.3391 + struct flock arg; 1.3392 + 1.3393 + arg.l_type = F_WRLCK; 1.3394 + arg.l_whence = SEEK_SET; 1.3395 + arg.l_start = 0; 1.3396 + arg.l_len = 0; /* until EOF */ 1.3397 + rv = fcntl(f, F_SETLKW, &arg); 1.3398 + if (rv == 0) 1.3399 + return PR_SUCCESS; 1.3400 + _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); 1.3401 + return PR_FAILURE; 1.3402 +} 1.3403 + 1.3404 +PRStatus 1.3405 +_MD_TLockFile(PRInt32 f) 1.3406 +{ 1.3407 + PRInt32 rv; 1.3408 + struct flock arg; 1.3409 + 1.3410 + arg.l_type = F_WRLCK; 1.3411 + arg.l_whence = SEEK_SET; 1.3412 + arg.l_start = 0; 1.3413 + arg.l_len = 0; /* until EOF */ 1.3414 + rv = fcntl(f, F_SETLK, &arg); 1.3415 + if (rv == 0) 1.3416 + return PR_SUCCESS; 1.3417 + _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); 1.3418 + return PR_FAILURE; 1.3419 +} 1.3420 + 1.3421 +PRStatus 1.3422 +_MD_UnlockFile(PRInt32 f) 1.3423 +{ 1.3424 + PRInt32 rv; 1.3425 + struct flock arg; 1.3426 + 1.3427 + arg.l_type = F_UNLCK; 1.3428 + arg.l_whence = SEEK_SET; 1.3429 + arg.l_start = 0; 1.3430 + arg.l_len = 0; /* until EOF */ 1.3431 + rv = fcntl(f, F_SETLK, &arg); 1.3432 + if (rv == 0) 1.3433 + return PR_SUCCESS; 1.3434 + _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); 1.3435 + return PR_FAILURE; 1.3436 +} 1.3437 + 1.3438 +#elif defined(HAVE_BSD_FLOCK) 1.3439 + 1.3440 +#include <sys/file.h> 1.3441 + 1.3442 +PRStatus 1.3443 +_MD_LockFile(PRInt32 f) 1.3444 +{ 1.3445 + PRInt32 rv; 1.3446 + rv = flock(f, LOCK_EX); 1.3447 + if (rv == 0) 1.3448 + return PR_SUCCESS; 1.3449 + _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); 1.3450 + return PR_FAILURE; 1.3451 +} 1.3452 + 1.3453 +PRStatus 1.3454 +_MD_TLockFile(PRInt32 f) 1.3455 +{ 1.3456 + PRInt32 rv; 1.3457 + rv = flock(f, LOCK_EX|LOCK_NB); 1.3458 + if (rv == 0) 1.3459 + return PR_SUCCESS; 1.3460 + _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); 1.3461 + return PR_FAILURE; 1.3462 +} 1.3463 + 1.3464 +PRStatus 1.3465 +_MD_UnlockFile(PRInt32 f) 1.3466 +{ 1.3467 + PRInt32 rv; 1.3468 + rv = flock(f, LOCK_UN); 1.3469 + if (rv == 0) 1.3470 + return PR_SUCCESS; 1.3471 + _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); 1.3472 + return PR_FAILURE; 1.3473 +} 1.3474 +#else 1.3475 + 1.3476 +PRStatus 1.3477 +_MD_LockFile(PRInt32 f) 1.3478 +{ 1.3479 + PRInt32 rv; 1.3480 + rv = lockf(f, F_LOCK, 0); 1.3481 + if (rv == 0) 1.3482 + return PR_SUCCESS; 1.3483 + _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO()); 1.3484 + return PR_FAILURE; 1.3485 +} 1.3486 + 1.3487 +PRStatus 1.3488 +_MD_TLockFile(PRInt32 f) 1.3489 +{ 1.3490 + PRInt32 rv; 1.3491 + rv = lockf(f, F_TLOCK, 0); 1.3492 + if (rv == 0) 1.3493 + return PR_SUCCESS; 1.3494 + _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO()); 1.3495 + return PR_FAILURE; 1.3496 +} 1.3497 + 1.3498 +PRStatus 1.3499 +_MD_UnlockFile(PRInt32 f) 1.3500 +{ 1.3501 + PRInt32 rv; 1.3502 + rv = lockf(f, F_ULOCK, 0); 1.3503 + if (rv == 0) 1.3504 + return PR_SUCCESS; 1.3505 + _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO()); 1.3506 + return PR_FAILURE; 1.3507 +} 1.3508 +#endif 1.3509 + 1.3510 +PRStatus _MD_gethostname(char *name, PRUint32 namelen) 1.3511 +{ 1.3512 + PRIntn rv; 1.3513 + 1.3514 + rv = gethostname(name, namelen); 1.3515 + if (0 == rv) { 1.3516 + return PR_SUCCESS; 1.3517 + } 1.3518 + _PR_MD_MAP_GETHOSTNAME_ERROR(_MD_ERRNO()); 1.3519 + return PR_FAILURE; 1.3520 +} 1.3521 + 1.3522 +PRStatus _MD_getsysinfo(PRSysInfo cmd, char *name, PRUint32 namelen) 1.3523 +{ 1.3524 + struct utsname info; 1.3525 + 1.3526 + PR_ASSERT((cmd == PR_SI_SYSNAME) || (cmd == PR_SI_RELEASE)); 1.3527 + 1.3528 + if (uname(&info) == -1) { 1.3529 + _PR_MD_MAP_DEFAULT_ERROR(errno); 1.3530 + return PR_FAILURE; 1.3531 + } 1.3532 + if (PR_SI_SYSNAME == cmd) 1.3533 + (void)PR_snprintf(name, namelen, info.sysname); 1.3534 + else if (PR_SI_RELEASE == cmd) 1.3535 + (void)PR_snprintf(name, namelen, info.release); 1.3536 + else 1.3537 + return PR_FAILURE; 1.3538 + return PR_SUCCESS; 1.3539 +} 1.3540 + 1.3541 +/* 1.3542 + ******************************************************************* 1.3543 + * 1.3544 + * Memory-mapped files 1.3545 + * 1.3546 + ******************************************************************* 1.3547 + */ 1.3548 + 1.3549 +PRStatus _MD_CreateFileMap(PRFileMap *fmap, PRInt64 size) 1.3550 +{ 1.3551 + PRFileInfo info; 1.3552 + PRUint32 sz; 1.3553 + 1.3554 + LL_L2UI(sz, size); 1.3555 + if (sz) { 1.3556 + if (PR_GetOpenFileInfo(fmap->fd, &info) == PR_FAILURE) { 1.3557 + return PR_FAILURE; 1.3558 + } 1.3559 + if (sz > info.size) { 1.3560 + /* 1.3561 + * Need to extend the file 1.3562 + */ 1.3563 + if (fmap->prot != PR_PROT_READWRITE) { 1.3564 + PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, 0); 1.3565 + return PR_FAILURE; 1.3566 + } 1.3567 + if (PR_Seek(fmap->fd, sz - 1, PR_SEEK_SET) == -1) { 1.3568 + return PR_FAILURE; 1.3569 + } 1.3570 + if (PR_Write(fmap->fd, "", 1) != 1) { 1.3571 + return PR_FAILURE; 1.3572 + } 1.3573 + } 1.3574 + } 1.3575 + if (fmap->prot == PR_PROT_READONLY) { 1.3576 + fmap->md.prot = PROT_READ; 1.3577 +#ifdef OSF1V4_MAP_PRIVATE_BUG 1.3578 + /* 1.3579 + * Use MAP_SHARED to work around a bug in OSF1 V4.0D 1.3580 + * (QAR 70220 in the OSF_QAR database) that results in 1.3581 + * corrupted data in the memory-mapped region. This 1.3582 + * bug is fixed in V5.0. 1.3583 + */ 1.3584 + fmap->md.flags = MAP_SHARED; 1.3585 +#else 1.3586 + fmap->md.flags = MAP_PRIVATE; 1.3587 +#endif 1.3588 + } else if (fmap->prot == PR_PROT_READWRITE) { 1.3589 + fmap->md.prot = PROT_READ | PROT_WRITE; 1.3590 + fmap->md.flags = MAP_SHARED; 1.3591 + } else { 1.3592 + PR_ASSERT(fmap->prot == PR_PROT_WRITECOPY); 1.3593 + fmap->md.prot = PROT_READ | PROT_WRITE; 1.3594 + fmap->md.flags = MAP_PRIVATE; 1.3595 + } 1.3596 + return PR_SUCCESS; 1.3597 +} 1.3598 + 1.3599 +void * _MD_MemMap( 1.3600 + PRFileMap *fmap, 1.3601 + PRInt64 offset, 1.3602 + PRUint32 len) 1.3603 +{ 1.3604 + PRInt32 off; 1.3605 + void *addr; 1.3606 + 1.3607 + LL_L2I(off, offset); 1.3608 + if ((addr = mmap(0, len, fmap->md.prot, fmap->md.flags, 1.3609 + fmap->fd->secret->md.osfd, off)) == (void *) -1) { 1.3610 + _PR_MD_MAP_MMAP_ERROR(_MD_ERRNO()); 1.3611 + addr = NULL; 1.3612 + } 1.3613 + return addr; 1.3614 +} 1.3615 + 1.3616 +PRStatus _MD_MemUnmap(void *addr, PRUint32 len) 1.3617 +{ 1.3618 + if (munmap(addr, len) == 0) { 1.3619 + return PR_SUCCESS; 1.3620 + } 1.3621 + _PR_MD_MAP_DEFAULT_ERROR(errno); 1.3622 + return PR_FAILURE; 1.3623 +} 1.3624 + 1.3625 +PRStatus _MD_CloseFileMap(PRFileMap *fmap) 1.3626 +{ 1.3627 + if ( PR_TRUE == fmap->md.isAnonFM ) { 1.3628 + PRStatus rc = PR_Close( fmap->fd ); 1.3629 + if ( PR_FAILURE == rc ) { 1.3630 + PR_LOG( _pr_io_lm, PR_LOG_DEBUG, 1.3631 + ("_MD_CloseFileMap(): error closing anonymnous file map osfd")); 1.3632 + return PR_FAILURE; 1.3633 + } 1.3634 + } 1.3635 + PR_DELETE(fmap); 1.3636 + return PR_SUCCESS; 1.3637 +} 1.3638 + 1.3639 +PRStatus _MD_SyncMemMap( 1.3640 + PRFileDesc *fd, 1.3641 + void *addr, 1.3642 + PRUint32 len) 1.3643 +{ 1.3644 + /* msync(..., MS_SYNC) alone is sufficient to flush modified data to disk 1.3645 + * synchronously. It is not necessary to call fsync. */ 1.3646 + if (msync(addr, len, MS_SYNC) == 0) { 1.3647 + return PR_SUCCESS; 1.3648 + } 1.3649 + _PR_MD_MAP_DEFAULT_ERROR(errno); 1.3650 + return PR_FAILURE; 1.3651 +} 1.3652 + 1.3653 +#if defined(_PR_NEED_FAKE_POLL) 1.3654 + 1.3655 +/* 1.3656 + * Some platforms don't have poll(). For easier porting of code 1.3657 + * that calls poll(), we emulate poll() using select(). 1.3658 + */ 1.3659 + 1.3660 +int poll(struct pollfd *filedes, unsigned long nfds, int timeout) 1.3661 +{ 1.3662 + int i; 1.3663 + int rv; 1.3664 + int maxfd; 1.3665 + fd_set rd, wr, ex; 1.3666 + struct timeval tv, *tvp; 1.3667 + 1.3668 + if (timeout < 0 && timeout != -1) { 1.3669 + errno = EINVAL; 1.3670 + return -1; 1.3671 + } 1.3672 + 1.3673 + if (timeout == -1) { 1.3674 + tvp = NULL; 1.3675 + } else { 1.3676 + tv.tv_sec = timeout / 1000; 1.3677 + tv.tv_usec = (timeout % 1000) * 1000; 1.3678 + tvp = &tv; 1.3679 + } 1.3680 + 1.3681 + maxfd = -1; 1.3682 + FD_ZERO(&rd); 1.3683 + FD_ZERO(&wr); 1.3684 + FD_ZERO(&ex); 1.3685 + 1.3686 + for (i = 0; i < nfds; i++) { 1.3687 + int osfd = filedes[i].fd; 1.3688 + int events = filedes[i].events; 1.3689 + PRBool fdHasEvent = PR_FALSE; 1.3690 + 1.3691 + if (osfd < 0) { 1.3692 + continue; /* Skip this osfd. */ 1.3693 + } 1.3694 + 1.3695 + /* 1.3696 + * Map the poll events to the select fd_sets. 1.3697 + * POLLIN, POLLRDNORM ===> readable 1.3698 + * POLLOUT, POLLWRNORM ===> writable 1.3699 + * POLLPRI, POLLRDBAND ===> exception 1.3700 + * POLLNORM, POLLWRBAND (and POLLMSG on some platforms) 1.3701 + * are ignored. 1.3702 + * 1.3703 + * The output events POLLERR and POLLHUP are never turned on. 1.3704 + * POLLNVAL may be turned on. 1.3705 + */ 1.3706 + 1.3707 + if (events & (POLLIN | POLLRDNORM)) { 1.3708 + FD_SET(osfd, &rd); 1.3709 + fdHasEvent = PR_TRUE; 1.3710 + } 1.3711 + if (events & (POLLOUT | POLLWRNORM)) { 1.3712 + FD_SET(osfd, &wr); 1.3713 + fdHasEvent = PR_TRUE; 1.3714 + } 1.3715 + if (events & (POLLPRI | POLLRDBAND)) { 1.3716 + FD_SET(osfd, &ex); 1.3717 + fdHasEvent = PR_TRUE; 1.3718 + } 1.3719 + if (fdHasEvent && osfd > maxfd) { 1.3720 + maxfd = osfd; 1.3721 + } 1.3722 + } 1.3723 + 1.3724 + rv = select(maxfd + 1, &rd, &wr, &ex, tvp); 1.3725 + 1.3726 + /* Compute poll results */ 1.3727 + if (rv > 0) { 1.3728 + rv = 0; 1.3729 + for (i = 0; i < nfds; i++) { 1.3730 + PRBool fdHasEvent = PR_FALSE; 1.3731 + 1.3732 + filedes[i].revents = 0; 1.3733 + if (filedes[i].fd < 0) { 1.3734 + continue; 1.3735 + } 1.3736 + if (FD_ISSET(filedes[i].fd, &rd)) { 1.3737 + if (filedes[i].events & POLLIN) { 1.3738 + filedes[i].revents |= POLLIN; 1.3739 + } 1.3740 + if (filedes[i].events & POLLRDNORM) { 1.3741 + filedes[i].revents |= POLLRDNORM; 1.3742 + } 1.3743 + fdHasEvent = PR_TRUE; 1.3744 + } 1.3745 + if (FD_ISSET(filedes[i].fd, &wr)) { 1.3746 + if (filedes[i].events & POLLOUT) { 1.3747 + filedes[i].revents |= POLLOUT; 1.3748 + } 1.3749 + if (filedes[i].events & POLLWRNORM) { 1.3750 + filedes[i].revents |= POLLWRNORM; 1.3751 + } 1.3752 + fdHasEvent = PR_TRUE; 1.3753 + } 1.3754 + if (FD_ISSET(filedes[i].fd, &ex)) { 1.3755 + if (filedes[i].events & POLLPRI) { 1.3756 + filedes[i].revents |= POLLPRI; 1.3757 + } 1.3758 + if (filedes[i].events & POLLRDBAND) { 1.3759 + filedes[i].revents |= POLLRDBAND; 1.3760 + } 1.3761 + fdHasEvent = PR_TRUE; 1.3762 + } 1.3763 + if (fdHasEvent) { 1.3764 + rv++; 1.3765 + } 1.3766 + } 1.3767 + PR_ASSERT(rv > 0); 1.3768 + } else if (rv == -1 && errno == EBADF) { 1.3769 + rv = 0; 1.3770 + for (i = 0; i < nfds; i++) { 1.3771 + filedes[i].revents = 0; 1.3772 + if (filedes[i].fd < 0) { 1.3773 + continue; 1.3774 + } 1.3775 + if (fcntl(filedes[i].fd, F_GETFL, 0) == -1) { 1.3776 + filedes[i].revents = POLLNVAL; 1.3777 + rv++; 1.3778 + } 1.3779 + } 1.3780 + PR_ASSERT(rv > 0); 1.3781 + } 1.3782 + PR_ASSERT(-1 != timeout || rv != 0); 1.3783 + 1.3784 + return rv; 1.3785 +} 1.3786 +#endif /* _PR_NEED_FAKE_POLL */