michael@0: /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "primpl.h" michael@0: michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: michael@0: #ifdef _PR_POLL_AVAILABLE michael@0: #include michael@0: #endif michael@0: michael@0: /* To get FIONREAD */ michael@0: #if defined(UNIXWARE) michael@0: #include michael@0: #endif michael@0: michael@0: #if defined(NTO) michael@0: #include michael@0: #endif michael@0: michael@0: /* michael@0: * Make sure _PRSockLen_t is 32-bit, because we will cast a PRUint32* or michael@0: * PRInt32* pointer to a _PRSockLen_t* pointer. michael@0: */ michael@0: #if defined(HAVE_SOCKLEN_T) \ michael@0: || (defined(__GLIBC__) && __GLIBC__ >= 2) michael@0: #define _PRSockLen_t socklen_t michael@0: #elif defined(IRIX) || defined(HPUX) || defined(OSF1) || defined(SOLARIS) \ michael@0: || defined(AIX4_1) || defined(LINUX) \ michael@0: || defined(BSDI) || defined(SCO) \ michael@0: || defined(DARWIN) \ michael@0: || defined(QNX) michael@0: #define _PRSockLen_t int michael@0: #elif (defined(AIX) && !defined(AIX4_1)) || defined(FREEBSD) \ michael@0: || defined(NETBSD) || defined(OPENBSD) || defined(UNIXWARE) \ michael@0: || defined(DGUX) || defined(NTO) || defined(RISCOS) michael@0: #define _PRSockLen_t size_t michael@0: #else michael@0: #error "Cannot determine architecture" michael@0: #endif michael@0: michael@0: /* michael@0: ** Global lock variable used to bracket calls into rusty libraries that michael@0: ** aren't thread safe (like libc, libX, etc). michael@0: */ michael@0: static PRLock *_pr_rename_lock = NULL; michael@0: static PRMonitor *_pr_Xfe_mon = NULL; michael@0: michael@0: static PRInt64 minus_one; michael@0: michael@0: sigset_t timer_set; michael@0: michael@0: #if !defined(_PR_PTHREADS) michael@0: michael@0: static sigset_t empty_set; michael@0: michael@0: #ifdef SOLARIS michael@0: #include michael@0: #include michael@0: #endif michael@0: michael@0: #ifndef PIPE_BUF michael@0: #define PIPE_BUF 512 michael@0: #endif michael@0: michael@0: /* michael@0: * _nspr_noclock - if set clock interrupts are disabled michael@0: */ michael@0: int _nspr_noclock = 1; michael@0: michael@0: #ifdef IRIX michael@0: extern PRInt32 _nspr_terminate_on_error; michael@0: #endif michael@0: michael@0: /* michael@0: * There is an assertion in this code that NSPR's definition of PRIOVec michael@0: * is bit compatible with UNIX' definition of a struct iovec. This is michael@0: * applicable to the 'writev()' operations where the types are casually michael@0: * cast to avoid warnings. michael@0: */ michael@0: michael@0: int _pr_md_pipefd[2] = { -1, -1 }; michael@0: static char _pr_md_pipebuf[PIPE_BUF]; michael@0: static PRInt32 local_io_wait(PRInt32 osfd, PRInt32 wait_flag, michael@0: PRIntervalTime timeout); michael@0: michael@0: _PRInterruptTable _pr_interruptTable[] = { michael@0: { michael@0: "clock", _PR_MISSED_CLOCK, _PR_ClockInterrupt, }, michael@0: { michael@0: 0 } michael@0: }; michael@0: michael@0: void _MD_unix_init_running_cpu(_PRCPU *cpu) michael@0: { michael@0: PR_INIT_CLIST(&(cpu->md.md_unix.ioQ)); michael@0: cpu->md.md_unix.ioq_max_osfd = -1; michael@0: cpu->md.md_unix.ioq_timeout = PR_INTERVAL_NO_TIMEOUT; michael@0: } michael@0: michael@0: PRStatus _MD_open_dir(_MDDir *d, const char *name) michael@0: { michael@0: int err; michael@0: michael@0: d->d = opendir(name); michael@0: if (!d->d) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_OPENDIR_ERROR(err); michael@0: return PR_FAILURE; michael@0: } michael@0: return PR_SUCCESS; michael@0: } michael@0: michael@0: PRInt32 _MD_close_dir(_MDDir *d) michael@0: { michael@0: int rv = 0, err; michael@0: michael@0: if (d->d) { michael@0: rv = closedir(d->d); michael@0: if (rv == -1) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_CLOSEDIR_ERROR(err); michael@0: } michael@0: } michael@0: return rv; michael@0: } michael@0: michael@0: char * _MD_read_dir(_MDDir *d, PRIntn flags) michael@0: { michael@0: struct dirent *de; michael@0: int err; michael@0: michael@0: for (;;) { michael@0: /* michael@0: * XXX: readdir() is not MT-safe. There is an MT-safe version michael@0: * readdir_r() on some systems. michael@0: */ michael@0: _MD_ERRNO() = 0; michael@0: de = readdir(d->d); michael@0: if (!de) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_READDIR_ERROR(err); michael@0: return 0; michael@0: } michael@0: if ((flags & PR_SKIP_DOT) && michael@0: (de->d_name[0] == '.') && (de->d_name[1] == 0)) michael@0: continue; michael@0: if ((flags & PR_SKIP_DOT_DOT) && michael@0: (de->d_name[0] == '.') && (de->d_name[1] == '.') && michael@0: (de->d_name[2] == 0)) michael@0: continue; michael@0: if ((flags & PR_SKIP_HIDDEN) && (de->d_name[0] == '.')) michael@0: continue; michael@0: break; michael@0: } michael@0: return de->d_name; michael@0: } michael@0: michael@0: PRInt32 _MD_delete(const char *name) michael@0: { michael@0: PRInt32 rv, err; michael@0: #ifdef UNIXWARE michael@0: sigset_t set, oset; michael@0: #endif michael@0: michael@0: #ifdef UNIXWARE michael@0: sigfillset(&set); michael@0: sigprocmask(SIG_SETMASK, &set, &oset); michael@0: #endif michael@0: rv = unlink(name); michael@0: #ifdef UNIXWARE michael@0: sigprocmask(SIG_SETMASK, &oset, NULL); michael@0: #endif michael@0: if (rv == -1) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_UNLINK_ERROR(err); michael@0: } michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_rename(const char *from, const char *to) michael@0: { michael@0: PRInt32 rv = -1, err; michael@0: michael@0: /* michael@0: ** This is trying to enforce the semantics of WINDOZE' rename michael@0: ** operation. That means one is not allowed to rename over top michael@0: ** of an existing file. Holding a lock across these two function michael@0: ** and the open function is known to be a bad idea, but .... michael@0: */ michael@0: if (NULL != _pr_rename_lock) michael@0: PR_Lock(_pr_rename_lock); michael@0: if (0 == access(to, F_OK)) michael@0: PR_SetError(PR_FILE_EXISTS_ERROR, 0); michael@0: else michael@0: { michael@0: rv = rename(from, to); michael@0: if (rv < 0) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_RENAME_ERROR(err); michael@0: } michael@0: } michael@0: if (NULL != _pr_rename_lock) michael@0: PR_Unlock(_pr_rename_lock); michael@0: return rv; michael@0: } michael@0: michael@0: PRInt32 _MD_access(const char *name, PRAccessHow how) michael@0: { michael@0: PRInt32 rv, err; michael@0: int amode; michael@0: michael@0: switch (how) { michael@0: case PR_ACCESS_WRITE_OK: michael@0: amode = W_OK; michael@0: break; michael@0: case PR_ACCESS_READ_OK: michael@0: amode = R_OK; michael@0: break; michael@0: case PR_ACCESS_EXISTS: michael@0: amode = F_OK; michael@0: break; michael@0: default: michael@0: PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); michael@0: rv = -1; michael@0: goto done; michael@0: } michael@0: rv = access(name, amode); michael@0: michael@0: if (rv < 0) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_ACCESS_ERROR(err); michael@0: } michael@0: michael@0: done: michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_mkdir(const char *name, PRIntn mode) michael@0: { michael@0: int rv, err; michael@0: michael@0: /* michael@0: ** This lock is used to enforce rename semantics as described michael@0: ** in PR_Rename. Look there for more fun details. michael@0: */ michael@0: if (NULL !=_pr_rename_lock) michael@0: PR_Lock(_pr_rename_lock); michael@0: rv = mkdir(name, mode); michael@0: if (rv < 0) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_MKDIR_ERROR(err); michael@0: } michael@0: if (NULL !=_pr_rename_lock) michael@0: PR_Unlock(_pr_rename_lock); michael@0: return rv; michael@0: } michael@0: michael@0: PRInt32 _MD_rmdir(const char *name) michael@0: { michael@0: int rv, err; michael@0: michael@0: rv = rmdir(name); michael@0: if (rv == -1) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_RMDIR_ERROR(err); michael@0: } michael@0: return rv; michael@0: } michael@0: michael@0: PRInt32 _MD_read(PRFileDesc *fd, void *buf, PRInt32 amount) michael@0: { michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: PRInt32 rv, err; michael@0: #ifndef _PR_USE_POLL michael@0: fd_set rd; michael@0: #else michael@0: struct pollfd pfd; michael@0: #endif /* _PR_USE_POLL */ michael@0: PRInt32 osfd = fd->secret->md.osfd; michael@0: michael@0: #ifndef _PR_USE_POLL michael@0: FD_ZERO(&rd); michael@0: FD_SET(osfd, &rd); michael@0: #else michael@0: pfd.fd = osfd; michael@0: pfd.events = POLLIN; michael@0: #endif /* _PR_USE_POLL */ michael@0: while ((rv = read(osfd,buf,amount)) == -1) { michael@0: err = _MD_ERRNO(); michael@0: if ((err == EAGAIN) || (err == EWOULDBLOCK)) { michael@0: if (fd->secret->nonblocking) { michael@0: break; michael@0: } michael@0: if (!_PR_IS_NATIVE_THREAD(me)) { michael@0: if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, michael@0: PR_INTERVAL_NO_TIMEOUT)) < 0) michael@0: goto done; michael@0: } else { michael@0: #ifndef _PR_USE_POLL michael@0: while ((rv = _MD_SELECT(osfd + 1, &rd, NULL, NULL, NULL)) michael@0: == -1 && (err = _MD_ERRNO()) == EINTR) { michael@0: /* retry _MD_SELECT() if it is interrupted */ michael@0: } michael@0: #else /* _PR_USE_POLL */ michael@0: while ((rv = _MD_POLL(&pfd, 1, -1)) michael@0: == -1 && (err = _MD_ERRNO()) == EINTR) { michael@0: /* retry _MD_POLL() if it is interrupted */ michael@0: } michael@0: #endif /* _PR_USE_POLL */ michael@0: if (rv == -1) { michael@0: break; michael@0: } michael@0: } michael@0: if (_PR_PENDING_INTERRUPT(me)) michael@0: break; michael@0: } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ michael@0: continue; michael@0: } else { michael@0: break; michael@0: } michael@0: } michael@0: if (rv < 0) { michael@0: if (_PR_PENDING_INTERRUPT(me)) { michael@0: me->flags &= ~_PR_INTERRUPT; michael@0: PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); michael@0: } else { michael@0: _PR_MD_MAP_READ_ERROR(err); michael@0: } michael@0: } michael@0: done: michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_write(PRFileDesc *fd, const void *buf, PRInt32 amount) michael@0: { michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: PRInt32 rv, err; michael@0: #ifndef _PR_USE_POLL michael@0: fd_set wd; michael@0: #else michael@0: struct pollfd pfd; michael@0: #endif /* _PR_USE_POLL */ michael@0: PRInt32 osfd = fd->secret->md.osfd; michael@0: michael@0: #ifndef _PR_USE_POLL michael@0: FD_ZERO(&wd); michael@0: FD_SET(osfd, &wd); michael@0: #else michael@0: pfd.fd = osfd; michael@0: pfd.events = POLLOUT; michael@0: #endif /* _PR_USE_POLL */ michael@0: while ((rv = write(osfd,buf,amount)) == -1) { michael@0: err = _MD_ERRNO(); michael@0: if ((err == EAGAIN) || (err == EWOULDBLOCK)) { michael@0: if (fd->secret->nonblocking) { michael@0: break; michael@0: } michael@0: if (!_PR_IS_NATIVE_THREAD(me)) { michael@0: if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, michael@0: PR_INTERVAL_NO_TIMEOUT)) < 0) michael@0: goto done; michael@0: } else { michael@0: #ifndef _PR_USE_POLL michael@0: while ((rv = _MD_SELECT(osfd + 1, NULL, &wd, NULL, NULL)) michael@0: == -1 && (err = _MD_ERRNO()) == EINTR) { michael@0: /* retry _MD_SELECT() if it is interrupted */ michael@0: } michael@0: #else /* _PR_USE_POLL */ michael@0: while ((rv = _MD_POLL(&pfd, 1, -1)) michael@0: == -1 && (err = _MD_ERRNO()) == EINTR) { michael@0: /* retry _MD_POLL() if it is interrupted */ michael@0: } michael@0: #endif /* _PR_USE_POLL */ michael@0: if (rv == -1) { michael@0: break; michael@0: } michael@0: } michael@0: if (_PR_PENDING_INTERRUPT(me)) michael@0: break; michael@0: } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ michael@0: continue; michael@0: } else { michael@0: break; michael@0: } michael@0: } michael@0: if (rv < 0) { michael@0: if (_PR_PENDING_INTERRUPT(me)) { michael@0: me->flags &= ~_PR_INTERRUPT; michael@0: PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); michael@0: } else { michael@0: _PR_MD_MAP_WRITE_ERROR(err); michael@0: } michael@0: } michael@0: done: michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_fsync(PRFileDesc *fd) michael@0: { michael@0: PRInt32 rv, err; michael@0: michael@0: rv = fsync(fd->secret->md.osfd); michael@0: if (rv == -1) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_FSYNC_ERROR(err); michael@0: } michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_close(PRInt32 osfd) michael@0: { michael@0: PRInt32 rv, err; michael@0: michael@0: rv = close(osfd); michael@0: if (rv == -1) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_CLOSE_ERROR(err); michael@0: } michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_socket(PRInt32 domain, PRInt32 type, PRInt32 proto) michael@0: { michael@0: PRInt32 osfd, err; michael@0: michael@0: osfd = socket(domain, type, proto); michael@0: michael@0: if (osfd == -1) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_SOCKET_ERROR(err); michael@0: return(osfd); michael@0: } michael@0: michael@0: return(osfd); michael@0: } michael@0: michael@0: PRInt32 _MD_socketavailable(PRFileDesc *fd) michael@0: { michael@0: PRInt32 result; michael@0: michael@0: if (ioctl(fd->secret->md.osfd, FIONREAD, &result) < 0) { michael@0: _PR_MD_MAP_SOCKETAVAILABLE_ERROR(_MD_ERRNO()); michael@0: return -1; michael@0: } michael@0: return result; michael@0: } michael@0: michael@0: PRInt64 _MD_socketavailable64(PRFileDesc *fd) michael@0: { michael@0: PRInt64 result; michael@0: LL_I2L(result, _MD_socketavailable(fd)); michael@0: return result; michael@0: } /* _MD_socketavailable64 */ michael@0: michael@0: #define READ_FD 1 michael@0: #define WRITE_FD 2 michael@0: michael@0: /* michael@0: * socket_io_wait -- michael@0: * michael@0: * wait for socket i/o, periodically checking for interrupt michael@0: * michael@0: * The first implementation uses select(), for platforms without michael@0: * poll(). The second (preferred) implementation uses poll(). michael@0: */ michael@0: michael@0: #ifndef _PR_USE_POLL michael@0: michael@0: static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type, michael@0: PRIntervalTime timeout) michael@0: { michael@0: PRInt32 rv = -1; michael@0: struct timeval tv; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: PRIntervalTime epoch, now, elapsed, remaining; michael@0: PRBool wait_for_remaining; michael@0: PRInt32 syserror; michael@0: fd_set rd_wr; michael@0: michael@0: switch (timeout) { michael@0: case PR_INTERVAL_NO_WAIT: michael@0: PR_SetError(PR_IO_TIMEOUT_ERROR, 0); michael@0: break; michael@0: case PR_INTERVAL_NO_TIMEOUT: michael@0: /* michael@0: * This is a special case of the 'default' case below. michael@0: * Please see the comments there. michael@0: */ michael@0: tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS; michael@0: tv.tv_usec = 0; michael@0: FD_ZERO(&rd_wr); michael@0: do { michael@0: FD_SET(osfd, &rd_wr); michael@0: if (fd_type == READ_FD) michael@0: rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv); michael@0: else michael@0: rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv); michael@0: if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { michael@0: _PR_MD_MAP_SELECT_ERROR(syserror); michael@0: break; michael@0: } michael@0: if (_PR_PENDING_INTERRUPT(me)) { michael@0: me->flags &= ~_PR_INTERRUPT; michael@0: PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); michael@0: rv = -1; michael@0: break; michael@0: } michael@0: } while (rv == 0 || (rv == -1 && syserror == EINTR)); michael@0: break; michael@0: default: michael@0: now = epoch = PR_IntervalNow(); michael@0: remaining = timeout; michael@0: FD_ZERO(&rd_wr); michael@0: do { michael@0: /* michael@0: * We block in _MD_SELECT for at most michael@0: * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds, michael@0: * so that there is an upper limit on the delay michael@0: * before the interrupt bit is checked. michael@0: */ michael@0: wait_for_remaining = PR_TRUE; michael@0: tv.tv_sec = PR_IntervalToSeconds(remaining); michael@0: if (tv.tv_sec > _PR_INTERRUPT_CHECK_INTERVAL_SECS) { michael@0: wait_for_remaining = PR_FALSE; michael@0: tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS; michael@0: tv.tv_usec = 0; michael@0: } else { michael@0: tv.tv_usec = PR_IntervalToMicroseconds( michael@0: remaining - michael@0: PR_SecondsToInterval(tv.tv_sec)); michael@0: } michael@0: FD_SET(osfd, &rd_wr); michael@0: if (fd_type == READ_FD) michael@0: rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv); michael@0: else michael@0: rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv); michael@0: /* michael@0: * we don't consider EINTR a real error michael@0: */ michael@0: if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { michael@0: _PR_MD_MAP_SELECT_ERROR(syserror); michael@0: break; michael@0: } michael@0: if (_PR_PENDING_INTERRUPT(me)) { michael@0: me->flags &= ~_PR_INTERRUPT; michael@0: PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); michael@0: rv = -1; michael@0: break; michael@0: } michael@0: /* michael@0: * We loop again if _MD_SELECT timed out or got interrupted michael@0: * by a signal, and the timeout deadline has not passed yet. michael@0: */ michael@0: if (rv == 0 || (rv == -1 && syserror == EINTR)) { michael@0: /* michael@0: * If _MD_SELECT timed out, we know how much time michael@0: * we spent in blocking, so we can avoid a michael@0: * PR_IntervalNow() call. michael@0: */ michael@0: if (rv == 0) { michael@0: if (wait_for_remaining) { michael@0: now += remaining; michael@0: } else { michael@0: now += PR_SecondsToInterval(tv.tv_sec) michael@0: + PR_MicrosecondsToInterval(tv.tv_usec); michael@0: } michael@0: } else { michael@0: now = PR_IntervalNow(); michael@0: } michael@0: elapsed = (PRIntervalTime) (now - epoch); michael@0: if (elapsed >= timeout) { michael@0: PR_SetError(PR_IO_TIMEOUT_ERROR, 0); michael@0: rv = -1; michael@0: break; michael@0: } else { michael@0: remaining = timeout - elapsed; michael@0: } michael@0: } michael@0: } while (rv == 0 || (rv == -1 && syserror == EINTR)); michael@0: break; michael@0: } michael@0: return(rv); michael@0: } michael@0: michael@0: #else /* _PR_USE_POLL */ michael@0: michael@0: static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type, michael@0: PRIntervalTime timeout) michael@0: { michael@0: PRInt32 rv = -1; michael@0: int msecs; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: PRIntervalTime epoch, now, elapsed, remaining; michael@0: PRBool wait_for_remaining; michael@0: PRInt32 syserror; michael@0: struct pollfd pfd; michael@0: michael@0: switch (timeout) { michael@0: case PR_INTERVAL_NO_WAIT: michael@0: PR_SetError(PR_IO_TIMEOUT_ERROR, 0); michael@0: break; michael@0: case PR_INTERVAL_NO_TIMEOUT: michael@0: /* michael@0: * This is a special case of the 'default' case below. michael@0: * Please see the comments there. michael@0: */ michael@0: msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000; michael@0: pfd.fd = osfd; michael@0: if (fd_type == READ_FD) { michael@0: pfd.events = POLLIN; michael@0: } else { michael@0: pfd.events = POLLOUT; michael@0: } michael@0: do { michael@0: rv = _MD_POLL(&pfd, 1, msecs); michael@0: if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { michael@0: _PR_MD_MAP_POLL_ERROR(syserror); michael@0: break; michael@0: } michael@0: /* michael@0: * If POLLERR is set, don't process it; retry the operation michael@0: */ michael@0: if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) { michael@0: rv = -1; michael@0: _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents); michael@0: break; michael@0: } michael@0: if (_PR_PENDING_INTERRUPT(me)) { michael@0: me->flags &= ~_PR_INTERRUPT; michael@0: PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); michael@0: rv = -1; michael@0: break; michael@0: } michael@0: } while (rv == 0 || (rv == -1 && syserror == EINTR)); michael@0: break; michael@0: default: michael@0: now = epoch = PR_IntervalNow(); michael@0: remaining = timeout; michael@0: pfd.fd = osfd; michael@0: if (fd_type == READ_FD) { michael@0: pfd.events = POLLIN; michael@0: } else { michael@0: pfd.events = POLLOUT; michael@0: } michael@0: do { michael@0: /* michael@0: * We block in _MD_POLL for at most michael@0: * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds, michael@0: * so that there is an upper limit on the delay michael@0: * before the interrupt bit is checked. michael@0: */ michael@0: wait_for_remaining = PR_TRUE; michael@0: msecs = PR_IntervalToMilliseconds(remaining); michael@0: if (msecs > _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000) { michael@0: wait_for_remaining = PR_FALSE; michael@0: msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000; michael@0: } michael@0: rv = _MD_POLL(&pfd, 1, msecs); michael@0: /* michael@0: * we don't consider EINTR a real error michael@0: */ michael@0: if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { michael@0: _PR_MD_MAP_POLL_ERROR(syserror); michael@0: break; michael@0: } michael@0: if (_PR_PENDING_INTERRUPT(me)) { michael@0: me->flags &= ~_PR_INTERRUPT; michael@0: PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); michael@0: rv = -1; michael@0: break; michael@0: } michael@0: /* michael@0: * If POLLERR is set, don't process it; retry the operation michael@0: */ michael@0: if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) { michael@0: rv = -1; michael@0: _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents); michael@0: break; michael@0: } michael@0: /* michael@0: * We loop again if _MD_POLL timed out or got interrupted michael@0: * by a signal, and the timeout deadline has not passed yet. michael@0: */ michael@0: if (rv == 0 || (rv == -1 && syserror == EINTR)) { michael@0: /* michael@0: * If _MD_POLL timed out, we know how much time michael@0: * we spent in blocking, so we can avoid a michael@0: * PR_IntervalNow() call. michael@0: */ michael@0: if (rv == 0) { michael@0: if (wait_for_remaining) { michael@0: now += remaining; michael@0: } else { michael@0: now += PR_MillisecondsToInterval(msecs); michael@0: } michael@0: } else { michael@0: now = PR_IntervalNow(); michael@0: } michael@0: elapsed = (PRIntervalTime) (now - epoch); michael@0: if (elapsed >= timeout) { michael@0: PR_SetError(PR_IO_TIMEOUT_ERROR, 0); michael@0: rv = -1; michael@0: break; michael@0: } else { michael@0: remaining = timeout - elapsed; michael@0: } michael@0: } michael@0: } while (rv == 0 || (rv == -1 && syserror == EINTR)); michael@0: break; michael@0: } michael@0: return(rv); michael@0: } michael@0: michael@0: #endif /* _PR_USE_POLL */ michael@0: michael@0: static PRInt32 local_io_wait( michael@0: PRInt32 osfd, michael@0: PRInt32 wait_flag, michael@0: PRIntervalTime timeout) michael@0: { michael@0: _PRUnixPollDesc pd; michael@0: PRInt32 rv; michael@0: michael@0: PR_LOG(_pr_io_lm, PR_LOG_MIN, michael@0: ("waiting to %s on osfd=%d", michael@0: (wait_flag == _PR_UNIX_POLL_READ) ? "read" : "write", michael@0: osfd)); michael@0: michael@0: if (timeout == PR_INTERVAL_NO_WAIT) return 0; michael@0: michael@0: pd.osfd = osfd; michael@0: pd.in_flags = wait_flag; michael@0: pd.out_flags = 0; michael@0: michael@0: rv = _PR_WaitForMultipleFDs(&pd, 1, timeout); michael@0: michael@0: if (rv == 0) { michael@0: PR_SetError(PR_IO_TIMEOUT_ERROR, 0); michael@0: rv = -1; michael@0: } michael@0: return rv; michael@0: } michael@0: michael@0: michael@0: PRInt32 _MD_recv(PRFileDesc *fd, void *buf, PRInt32 amount, michael@0: PRInt32 flags, PRIntervalTime timeout) michael@0: { michael@0: PRInt32 osfd = fd->secret->md.osfd; michael@0: PRInt32 rv, err; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: michael@0: /* michael@0: * Many OS's (Solaris, Unixware) have a broken recv which won't read michael@0: * from socketpairs. As long as we don't use flags on socketpairs, this michael@0: * is a decent fix. - mikep michael@0: */ michael@0: #if defined(UNIXWARE) || defined(SOLARIS) michael@0: while ((rv = read(osfd,buf,amount)) == -1) { michael@0: #else michael@0: while ((rv = recv(osfd,buf,amount,flags)) == -1) { michael@0: #endif michael@0: err = _MD_ERRNO(); michael@0: if ((err == EAGAIN) || (err == EWOULDBLOCK)) { michael@0: if (fd->secret->nonblocking) { michael@0: break; michael@0: } michael@0: if (!_PR_IS_NATIVE_THREAD(me)) { michael@0: if ((rv = local_io_wait(osfd,_PR_UNIX_POLL_READ,timeout)) < 0) michael@0: goto done; michael@0: } else { michael@0: if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) michael@0: goto done; michael@0: } michael@0: } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ michael@0: continue; michael@0: } else { michael@0: break; michael@0: } michael@0: } michael@0: if (rv < 0) { michael@0: _PR_MD_MAP_RECV_ERROR(err); michael@0: } michael@0: done: michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_recvfrom(PRFileDesc *fd, void *buf, PRInt32 amount, michael@0: PRIntn flags, PRNetAddr *addr, PRUint32 *addrlen, michael@0: PRIntervalTime timeout) michael@0: { michael@0: PRInt32 osfd = fd->secret->md.osfd; michael@0: PRInt32 rv, err; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: michael@0: while ((*addrlen = PR_NETADDR_SIZE(addr)), michael@0: ((rv = recvfrom(osfd, buf, amount, flags, michael@0: (struct sockaddr *) addr, (_PRSockLen_t *)addrlen)) == -1)) { michael@0: err = _MD_ERRNO(); michael@0: if ((err == EAGAIN) || (err == EWOULDBLOCK)) { michael@0: if (fd->secret->nonblocking) { michael@0: break; michael@0: } michael@0: if (!_PR_IS_NATIVE_THREAD(me)) { michael@0: if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0) michael@0: goto done; michael@0: } else { michael@0: if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) michael@0: goto done; michael@0: } michael@0: } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ michael@0: continue; michael@0: } else { michael@0: break; michael@0: } michael@0: } michael@0: if (rv < 0) { michael@0: _PR_MD_MAP_RECVFROM_ERROR(err); michael@0: } michael@0: done: michael@0: #ifdef _PR_HAVE_SOCKADDR_LEN michael@0: if (rv != -1) { michael@0: /* ignore the sa_len field of struct sockaddr */ michael@0: if (addr) { michael@0: addr->raw.family = ((struct sockaddr *) addr)->sa_family; michael@0: } michael@0: } michael@0: #endif /* _PR_HAVE_SOCKADDR_LEN */ michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_send(PRFileDesc *fd, const void *buf, PRInt32 amount, michael@0: PRInt32 flags, PRIntervalTime timeout) michael@0: { michael@0: PRInt32 osfd = fd->secret->md.osfd; michael@0: PRInt32 rv, err; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: #if defined(SOLARIS) michael@0: PRInt32 tmp_amount = amount; michael@0: #endif michael@0: michael@0: /* michael@0: * On pre-2.6 Solaris, send() is much slower than write(). michael@0: * On 2.6 and beyond, with in-kernel sockets, send() and michael@0: * write() are fairly equivalent in performance. michael@0: */ michael@0: #if defined(SOLARIS) michael@0: PR_ASSERT(0 == flags); michael@0: while ((rv = write(osfd,buf,tmp_amount)) == -1) { michael@0: #else michael@0: while ((rv = send(osfd,buf,amount,flags)) == -1) { michael@0: #endif michael@0: err = _MD_ERRNO(); michael@0: if ((err == EAGAIN) || (err == EWOULDBLOCK)) { michael@0: if (fd->secret->nonblocking) { michael@0: break; michael@0: } michael@0: if (!_PR_IS_NATIVE_THREAD(me)) { michael@0: if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) michael@0: goto done; michael@0: } else { michael@0: if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0) michael@0: goto done; michael@0: } michael@0: } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ michael@0: continue; michael@0: } else { michael@0: #if defined(SOLARIS) michael@0: /* michael@0: * The write system call has been reported to return the ERANGE michael@0: * error on occasion. Try to write in smaller chunks to workaround michael@0: * this bug. michael@0: */ michael@0: if (err == ERANGE) { michael@0: if (tmp_amount > 1) { michael@0: tmp_amount = tmp_amount/2; /* half the bytes */ michael@0: continue; michael@0: } michael@0: } michael@0: #endif michael@0: break; michael@0: } michael@0: } michael@0: /* michael@0: * optimization; if bytes sent is less than "amount" call michael@0: * select before returning. This is because it is likely that michael@0: * the next send() call will return EWOULDBLOCK. michael@0: */ michael@0: if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount) michael@0: && (timeout != PR_INTERVAL_NO_WAIT)) { michael@0: if (_PR_IS_NATIVE_THREAD(me)) { michael@0: if (socket_io_wait(osfd, WRITE_FD, timeout)< 0) { michael@0: rv = -1; michael@0: goto done; michael@0: } michael@0: } else { michael@0: if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) < 0) { michael@0: rv = -1; michael@0: goto done; michael@0: } michael@0: } michael@0: } michael@0: if (rv < 0) { michael@0: _PR_MD_MAP_SEND_ERROR(err); michael@0: } michael@0: done: michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_sendto( michael@0: PRFileDesc *fd, const void *buf, PRInt32 amount, PRIntn flags, michael@0: const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout) michael@0: { michael@0: PRInt32 osfd = fd->secret->md.osfd; michael@0: PRInt32 rv, err; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: #ifdef _PR_HAVE_SOCKADDR_LEN michael@0: PRNetAddr addrCopy; michael@0: michael@0: addrCopy = *addr; michael@0: ((struct sockaddr *) &addrCopy)->sa_len = addrlen; michael@0: ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family; michael@0: michael@0: while ((rv = sendto(osfd, buf, amount, flags, michael@0: (struct sockaddr *) &addrCopy, addrlen)) == -1) { michael@0: #else michael@0: while ((rv = sendto(osfd, buf, amount, flags, michael@0: (struct sockaddr *) addr, addrlen)) == -1) { michael@0: #endif michael@0: err = _MD_ERRNO(); michael@0: if ((err == EAGAIN) || (err == EWOULDBLOCK)) { michael@0: if (fd->secret->nonblocking) { michael@0: break; michael@0: } michael@0: if (!_PR_IS_NATIVE_THREAD(me)) { michael@0: if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) michael@0: goto done; michael@0: } else { michael@0: if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0) michael@0: goto done; michael@0: } michael@0: } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ michael@0: continue; michael@0: } else { michael@0: break; michael@0: } michael@0: } michael@0: if (rv < 0) { michael@0: _PR_MD_MAP_SENDTO_ERROR(err); michael@0: } michael@0: done: michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_writev( michael@0: PRFileDesc *fd, const PRIOVec *iov, michael@0: PRInt32 iov_size, PRIntervalTime timeout) michael@0: { michael@0: PRInt32 rv, err; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: PRInt32 index, amount = 0; michael@0: PRInt32 osfd = fd->secret->md.osfd; michael@0: michael@0: /* michael@0: * Calculate the total number of bytes to be sent; needed for michael@0: * optimization later. michael@0: * We could avoid this if this number was passed in; but it is michael@0: * probably not a big deal because iov_size is usually small (less than michael@0: * 3) michael@0: */ michael@0: if (!fd->secret->nonblocking) { michael@0: for (index=0; indexsecret->nonblocking) { michael@0: break; michael@0: } michael@0: if (!_PR_IS_NATIVE_THREAD(me)) { michael@0: if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) michael@0: goto done; michael@0: } else { michael@0: if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))<0) michael@0: goto done; michael@0: } michael@0: } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ michael@0: continue; michael@0: } else { michael@0: break; michael@0: } michael@0: } michael@0: /* michael@0: * optimization; if bytes sent is less than "amount" call michael@0: * select before returning. This is because it is likely that michael@0: * the next writev() call will return EWOULDBLOCK. michael@0: */ michael@0: if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount) michael@0: && (timeout != PR_INTERVAL_NO_WAIT)) { michael@0: if (_PR_IS_NATIVE_THREAD(me)) { michael@0: if (socket_io_wait(osfd, WRITE_FD, timeout) < 0) { michael@0: rv = -1; michael@0: goto done; michael@0: } michael@0: } else { michael@0: if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) < 0) { michael@0: rv = -1; michael@0: goto done; michael@0: } michael@0: } michael@0: } michael@0: if (rv < 0) { michael@0: _PR_MD_MAP_WRITEV_ERROR(err); michael@0: } michael@0: done: michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_accept(PRFileDesc *fd, PRNetAddr *addr, michael@0: PRUint32 *addrlen, PRIntervalTime timeout) michael@0: { michael@0: PRInt32 osfd = fd->secret->md.osfd; michael@0: PRInt32 rv, err; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: michael@0: while ((rv = accept(osfd, (struct sockaddr *) addr, michael@0: (_PRSockLen_t *)addrlen)) == -1) { michael@0: err = _MD_ERRNO(); michael@0: if ((err == EAGAIN) || (err == EWOULDBLOCK) || (err == ECONNABORTED)) { michael@0: if (fd->secret->nonblocking) { michael@0: break; michael@0: } michael@0: if (!_PR_IS_NATIVE_THREAD(me)) { michael@0: if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0) michael@0: goto done; michael@0: } else { michael@0: if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) michael@0: goto done; michael@0: } michael@0: } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ michael@0: continue; michael@0: } else { michael@0: break; michael@0: } michael@0: } michael@0: if (rv < 0) { michael@0: _PR_MD_MAP_ACCEPT_ERROR(err); michael@0: } michael@0: done: michael@0: #ifdef _PR_HAVE_SOCKADDR_LEN michael@0: if (rv != -1) { michael@0: /* ignore the sa_len field of struct sockaddr */ michael@0: if (addr) { michael@0: addr->raw.family = ((struct sockaddr *) addr)->sa_family; michael@0: } michael@0: } michael@0: #endif /* _PR_HAVE_SOCKADDR_LEN */ michael@0: return(rv); michael@0: } michael@0: michael@0: extern int _connect (int s, const struct sockaddr *name, int namelen); michael@0: PRInt32 _MD_connect( michael@0: PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout) michael@0: { michael@0: PRInt32 rv, err; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: PRInt32 osfd = fd->secret->md.osfd; michael@0: #ifdef IRIX michael@0: extern PRInt32 _MD_irix_connect( michael@0: PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, PRIntervalTime timeout); michael@0: #endif michael@0: #ifdef _PR_HAVE_SOCKADDR_LEN michael@0: PRNetAddr addrCopy; michael@0: michael@0: addrCopy = *addr; michael@0: ((struct sockaddr *) &addrCopy)->sa_len = addrlen; michael@0: ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family; michael@0: #endif michael@0: michael@0: /* michael@0: * We initiate the connection setup by making a nonblocking connect() michael@0: * call. If the connect() call fails, there are two cases we handle michael@0: * specially: michael@0: * 1. The connect() call was interrupted by a signal. In this case michael@0: * we simply retry connect(). michael@0: * 2. The NSPR socket is nonblocking and connect() fails with michael@0: * EINPROGRESS. We first wait until the socket becomes writable. michael@0: * Then we try to find out whether the connection setup succeeded michael@0: * or failed. michael@0: */ michael@0: michael@0: retry: michael@0: #ifdef IRIX michael@0: if ((rv = _MD_irix_connect(osfd, addr, addrlen, timeout)) == -1) { michael@0: #else michael@0: #ifdef _PR_HAVE_SOCKADDR_LEN michael@0: if ((rv = connect(osfd, (struct sockaddr *)&addrCopy, addrlen)) == -1) { michael@0: #else michael@0: if ((rv = connect(osfd, (struct sockaddr *)addr, addrlen)) == -1) { michael@0: #endif michael@0: #endif michael@0: err = _MD_ERRNO(); michael@0: michael@0: if (err == EINTR) { michael@0: if (_PR_PENDING_INTERRUPT(me)) { michael@0: me->flags &= ~_PR_INTERRUPT; michael@0: PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0); michael@0: return -1; michael@0: } michael@0: goto retry; michael@0: } michael@0: michael@0: if (!fd->secret->nonblocking && (err == EINPROGRESS)) { michael@0: if (!_PR_IS_NATIVE_THREAD(me)) { michael@0: michael@0: if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) michael@0: return -1; michael@0: } else { michael@0: /* michael@0: * socket_io_wait() may return -1 or 1. michael@0: */ michael@0: michael@0: rv = socket_io_wait(osfd, WRITE_FD, timeout); michael@0: if (rv == -1) { michael@0: return -1; michael@0: } michael@0: } michael@0: michael@0: PR_ASSERT(rv == 1); michael@0: if (_PR_PENDING_INTERRUPT(me)) { michael@0: me->flags &= ~_PR_INTERRUPT; michael@0: PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0); michael@0: return -1; michael@0: } michael@0: err = _MD_unix_get_nonblocking_connect_error(osfd); michael@0: if (err != 0) { michael@0: _PR_MD_MAP_CONNECT_ERROR(err); michael@0: return -1; michael@0: } michael@0: return 0; michael@0: } michael@0: michael@0: _PR_MD_MAP_CONNECT_ERROR(err); michael@0: } michael@0: michael@0: return rv; michael@0: } /* _MD_connect */ michael@0: michael@0: PRInt32 _MD_bind(PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen) michael@0: { michael@0: PRInt32 rv, err; michael@0: #ifdef _PR_HAVE_SOCKADDR_LEN michael@0: PRNetAddr addrCopy; michael@0: michael@0: addrCopy = *addr; michael@0: ((struct sockaddr *) &addrCopy)->sa_len = addrlen; michael@0: ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family; michael@0: rv = bind(fd->secret->md.osfd, (struct sockaddr *) &addrCopy, (int )addrlen); michael@0: #else michael@0: rv = bind(fd->secret->md.osfd, (struct sockaddr *) addr, (int )addrlen); michael@0: #endif michael@0: if (rv < 0) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_BIND_ERROR(err); michael@0: } michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_listen(PRFileDesc *fd, PRIntn backlog) michael@0: { michael@0: PRInt32 rv, err; michael@0: michael@0: rv = listen(fd->secret->md.osfd, backlog); michael@0: if (rv < 0) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_LISTEN_ERROR(err); michael@0: } michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_shutdown(PRFileDesc *fd, PRIntn how) michael@0: { michael@0: PRInt32 rv, err; michael@0: michael@0: rv = shutdown(fd->secret->md.osfd, how); michael@0: if (rv < 0) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_SHUTDOWN_ERROR(err); michael@0: } michael@0: return(rv); michael@0: } michael@0: michael@0: PRInt32 _MD_socketpair(int af, int type, int flags, michael@0: PRInt32 *osfd) michael@0: { michael@0: PRInt32 rv, err; michael@0: michael@0: rv = socketpair(af, type, flags, osfd); michael@0: if (rv < 0) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_SOCKETPAIR_ERROR(err); michael@0: } michael@0: return rv; michael@0: } michael@0: michael@0: PRStatus _MD_getsockname(PRFileDesc *fd, PRNetAddr *addr, michael@0: PRUint32 *addrlen) michael@0: { michael@0: PRInt32 rv, err; michael@0: michael@0: rv = getsockname(fd->secret->md.osfd, michael@0: (struct sockaddr *) addr, (_PRSockLen_t *)addrlen); michael@0: #ifdef _PR_HAVE_SOCKADDR_LEN michael@0: if (rv == 0) { michael@0: /* ignore the sa_len field of struct sockaddr */ michael@0: if (addr) { michael@0: addr->raw.family = ((struct sockaddr *) addr)->sa_family; michael@0: } michael@0: } michael@0: #endif /* _PR_HAVE_SOCKADDR_LEN */ michael@0: if (rv < 0) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_GETSOCKNAME_ERROR(err); michael@0: } michael@0: return rv==0?PR_SUCCESS:PR_FAILURE; michael@0: } michael@0: michael@0: PRStatus _MD_getpeername(PRFileDesc *fd, PRNetAddr *addr, michael@0: PRUint32 *addrlen) michael@0: { michael@0: PRInt32 rv, err; michael@0: michael@0: rv = getpeername(fd->secret->md.osfd, michael@0: (struct sockaddr *) addr, (_PRSockLen_t *)addrlen); michael@0: #ifdef _PR_HAVE_SOCKADDR_LEN michael@0: if (rv == 0) { michael@0: /* ignore the sa_len field of struct sockaddr */ michael@0: if (addr) { michael@0: addr->raw.family = ((struct sockaddr *) addr)->sa_family; michael@0: } michael@0: } michael@0: #endif /* _PR_HAVE_SOCKADDR_LEN */ michael@0: if (rv < 0) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_GETPEERNAME_ERROR(err); michael@0: } michael@0: return rv==0?PR_SUCCESS:PR_FAILURE; michael@0: } michael@0: michael@0: PRStatus _MD_getsockopt(PRFileDesc *fd, PRInt32 level, michael@0: PRInt32 optname, char* optval, PRInt32* optlen) michael@0: { michael@0: PRInt32 rv, err; michael@0: michael@0: rv = getsockopt(fd->secret->md.osfd, level, optname, optval, (_PRSockLen_t *)optlen); michael@0: if (rv < 0) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_GETSOCKOPT_ERROR(err); michael@0: } michael@0: return rv==0?PR_SUCCESS:PR_FAILURE; michael@0: } michael@0: michael@0: PRStatus _MD_setsockopt(PRFileDesc *fd, PRInt32 level, michael@0: PRInt32 optname, const char* optval, PRInt32 optlen) michael@0: { michael@0: PRInt32 rv, err; michael@0: michael@0: rv = setsockopt(fd->secret->md.osfd, level, optname, optval, optlen); michael@0: if (rv < 0) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_SETSOCKOPT_ERROR(err); michael@0: } michael@0: return rv==0?PR_SUCCESS:PR_FAILURE; michael@0: } michael@0: michael@0: PRStatus _MD_set_fd_inheritable(PRFileDesc *fd, PRBool inheritable) michael@0: { michael@0: int rv; michael@0: michael@0: rv = fcntl(fd->secret->md.osfd, F_SETFD, inheritable ? 0 : FD_CLOEXEC); michael@0: if (-1 == rv) { michael@0: PR_SetError(PR_UNKNOWN_ERROR, _MD_ERRNO()); michael@0: return PR_FAILURE; michael@0: } michael@0: return PR_SUCCESS; michael@0: } michael@0: michael@0: void _MD_init_fd_inheritable(PRFileDesc *fd, PRBool imported) michael@0: { michael@0: if (imported) { michael@0: fd->secret->inheritable = _PR_TRI_UNKNOWN; michael@0: } else { michael@0: /* By default, a Unix fd is not closed on exec. */ michael@0: #ifdef DEBUG michael@0: { michael@0: int flags = fcntl(fd->secret->md.osfd, F_GETFD, 0); michael@0: PR_ASSERT(0 == flags); michael@0: } michael@0: #endif michael@0: fd->secret->inheritable = _PR_TRI_TRUE; michael@0: } michael@0: } michael@0: michael@0: /************************************************************************/ michael@0: #if !defined(_PR_USE_POLL) michael@0: michael@0: /* michael@0: ** Scan through io queue and find any bad fd's that triggered the error michael@0: ** from _MD_SELECT michael@0: */ michael@0: static void FindBadFDs(void) michael@0: { michael@0: PRCList *q; michael@0: PRThread *me = _MD_CURRENT_THREAD(); michael@0: michael@0: PR_ASSERT(!_PR_IS_NATIVE_THREAD(me)); michael@0: q = (_PR_IOQ(me->cpu)).next; michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = -1; michael@0: _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT; michael@0: while (q != &_PR_IOQ(me->cpu)) { michael@0: PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); michael@0: PRBool notify = PR_FALSE; michael@0: _PRUnixPollDesc *pds = pq->pds; michael@0: _PRUnixPollDesc *epds = pds + pq->npds; michael@0: PRInt32 pq_max_osfd = -1; michael@0: michael@0: q = q->next; michael@0: for (; pds < epds; pds++) { michael@0: PRInt32 osfd = pds->osfd; michael@0: pds->out_flags = 0; michael@0: PR_ASSERT(osfd >= 0 || pds->in_flags == 0); michael@0: if (pds->in_flags == 0) { michael@0: continue; /* skip this fd */ michael@0: } michael@0: if (fcntl(osfd, F_GETFL, 0) == -1) { michael@0: /* Found a bad descriptor, remove it from the fd_sets. */ michael@0: PR_LOG(_pr_io_lm, PR_LOG_MAX, michael@0: ("file descriptor %d is bad", osfd)); michael@0: pds->out_flags = _PR_UNIX_POLL_NVAL; michael@0: notify = PR_TRUE; michael@0: } michael@0: if (osfd > pq_max_osfd) { michael@0: pq_max_osfd = osfd; michael@0: } michael@0: } michael@0: michael@0: if (notify) { michael@0: PRIntn pri; michael@0: PR_REMOVE_LINK(&pq->links); michael@0: pq->on_ioq = PR_FALSE; michael@0: michael@0: /* michael@0: * Decrement the count of descriptors for each desciptor/event michael@0: * because this I/O request is being removed from the michael@0: * ioq michael@0: */ michael@0: pds = pq->pds; michael@0: for (; pds < epds; pds++) { michael@0: PRInt32 osfd = pds->osfd; michael@0: PRInt16 in_flags = pds->in_flags; michael@0: PR_ASSERT(osfd >= 0 || in_flags == 0); michael@0: if (in_flags & _PR_UNIX_POLL_READ) { michael@0: if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) michael@0: FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu)); michael@0: } michael@0: if (in_flags & _PR_UNIX_POLL_WRITE) { michael@0: if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) michael@0: FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu)); michael@0: } michael@0: if (in_flags & _PR_UNIX_POLL_EXCEPT) { michael@0: if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) michael@0: FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); michael@0: } michael@0: } michael@0: michael@0: _PR_THREAD_LOCK(pq->thr); michael@0: if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { michael@0: _PRCPU *cpu = pq->thr->cpu; michael@0: _PR_SLEEPQ_LOCK(pq->thr->cpu); michael@0: _PR_DEL_SLEEPQ(pq->thr, PR_TRUE); michael@0: _PR_SLEEPQ_UNLOCK(pq->thr->cpu); michael@0: michael@0: if (pq->thr->flags & _PR_SUSPENDING) { michael@0: /* michael@0: * set thread state to SUSPENDED; michael@0: * a Resume operation on the thread michael@0: * will move it to the runQ michael@0: */ michael@0: pq->thr->state = _PR_SUSPENDED; michael@0: _PR_MISCQ_LOCK(pq->thr->cpu); michael@0: _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu); michael@0: _PR_MISCQ_UNLOCK(pq->thr->cpu); michael@0: } else { michael@0: pri = pq->thr->priority; michael@0: pq->thr->state = _PR_RUNNABLE; michael@0: michael@0: _PR_RUNQ_LOCK(cpu); michael@0: _PR_ADD_RUNQ(pq->thr, cpu, pri); michael@0: _PR_RUNQ_UNLOCK(cpu); michael@0: } michael@0: } michael@0: _PR_THREAD_UNLOCK(pq->thr); michael@0: } else { michael@0: if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) michael@0: _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout; michael@0: if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd; michael@0: } michael@0: } michael@0: if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { michael@0: if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; michael@0: } michael@0: } michael@0: #endif /* !defined(_PR_USE_POLL) */ michael@0: michael@0: /************************************************************************/ michael@0: michael@0: /* michael@0: ** Called by the scheduler when there is nothing to do. This means that michael@0: ** all threads are blocked on some monitor somewhere. michael@0: ** michael@0: ** Note: this code doesn't release the scheduler lock. michael@0: */ michael@0: /* michael@0: ** Pause the current CPU. longjmp to the cpu's pause stack michael@0: ** michael@0: ** This must be called with the scheduler locked michael@0: */ michael@0: void _MD_PauseCPU(PRIntervalTime ticks) michael@0: { michael@0: PRThread *me = _MD_CURRENT_THREAD(); michael@0: #ifdef _PR_USE_POLL michael@0: int timeout; michael@0: struct pollfd *pollfds; /* an array of pollfd structures */ michael@0: struct pollfd *pollfdPtr; /* a pointer that steps through the array */ michael@0: unsigned long npollfds; /* number of pollfd structures in array */ michael@0: unsigned long pollfds_size; michael@0: int nfd; /* to hold the return value of poll() */ michael@0: #else michael@0: struct timeval timeout, *tvp; michael@0: fd_set r, w, e; michael@0: fd_set *rp, *wp, *ep; michael@0: PRInt32 max_osfd, nfd; michael@0: #endif /* _PR_USE_POLL */ michael@0: PRInt32 rv; michael@0: PRCList *q; michael@0: PRUint32 min_timeout; michael@0: sigset_t oldset; michael@0: #ifdef IRIX michael@0: extern sigset_t ints_off; michael@0: #endif michael@0: michael@0: PR_ASSERT(_PR_MD_GET_INTSOFF() != 0); michael@0: michael@0: _PR_MD_IOQ_LOCK(); michael@0: michael@0: #ifdef _PR_USE_POLL michael@0: /* Build up the pollfd structure array to wait on */ michael@0: michael@0: /* Find out how many pollfd structures are needed */ michael@0: npollfds = _PR_IOQ_OSFD_CNT(me->cpu); michael@0: PR_ASSERT(npollfds >= 0); michael@0: michael@0: /* michael@0: * We use a pipe to wake up a native thread. An fd is needed michael@0: * for the pipe and we poll it for reading. michael@0: */ michael@0: if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { michael@0: npollfds++; michael@0: #ifdef IRIX michael@0: /* michael@0: * On Irix, a second pipe is used to cause the primordial cpu to michael@0: * wakeup and exit, when the process is exiting because of a call michael@0: * to exit/PR_ProcessExit. michael@0: */ michael@0: if (me->cpu->id == 0) { michael@0: npollfds++; michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: /* michael@0: * if the cpu's pollfd array is not big enough, release it and allocate a new one michael@0: */ michael@0: if (npollfds > _PR_IOQ_POLLFDS_SIZE(me->cpu)) { michael@0: if (_PR_IOQ_POLLFDS(me->cpu) != NULL) michael@0: PR_DELETE(_PR_IOQ_POLLFDS(me->cpu)); michael@0: pollfds_size = PR_MAX(_PR_IOQ_MIN_POLLFDS_SIZE(me->cpu), npollfds); michael@0: pollfds = (struct pollfd *) PR_MALLOC(pollfds_size * sizeof(struct pollfd)); michael@0: _PR_IOQ_POLLFDS(me->cpu) = pollfds; michael@0: _PR_IOQ_POLLFDS_SIZE(me->cpu) = pollfds_size; michael@0: } else { michael@0: pollfds = _PR_IOQ_POLLFDS(me->cpu); michael@0: } michael@0: pollfdPtr = pollfds; michael@0: michael@0: /* michael@0: * If we need to poll the pipe for waking up a native thread, michael@0: * the pipe's fd is the first element in the pollfds array. michael@0: */ michael@0: if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { michael@0: pollfdPtr->fd = _pr_md_pipefd[0]; michael@0: pollfdPtr->events = POLLIN; michael@0: pollfdPtr++; michael@0: #ifdef IRIX michael@0: /* michael@0: * On Irix, the second element is the exit pipe michael@0: */ michael@0: if (me->cpu->id == 0) { michael@0: pollfdPtr->fd = _pr_irix_primoridal_cpu_fd[0]; michael@0: pollfdPtr->events = POLLIN; michael@0: pollfdPtr++; michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: min_timeout = PR_INTERVAL_NO_TIMEOUT; michael@0: for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) { michael@0: PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); michael@0: _PRUnixPollDesc *pds = pq->pds; michael@0: _PRUnixPollDesc *epds = pds + pq->npds; michael@0: michael@0: if (pq->timeout < min_timeout) { michael@0: min_timeout = pq->timeout; michael@0: } michael@0: for (; pds < epds; pds++, pollfdPtr++) { michael@0: /* michael@0: * Assert that the pollfdPtr pointer does not go michael@0: * beyond the end of the pollfds array michael@0: */ michael@0: PR_ASSERT(pollfdPtr < pollfds + npollfds); michael@0: pollfdPtr->fd = pds->osfd; michael@0: /* direct copy of poll flags */ michael@0: pollfdPtr->events = pds->in_flags; michael@0: } michael@0: } michael@0: _PR_IOQ_TIMEOUT(me->cpu) = min_timeout; michael@0: #else michael@0: /* michael@0: * assigment of fd_sets michael@0: */ michael@0: r = _PR_FD_READ_SET(me->cpu); michael@0: w = _PR_FD_WRITE_SET(me->cpu); michael@0: e = _PR_FD_EXCEPTION_SET(me->cpu); michael@0: michael@0: rp = &r; michael@0: wp = &w; michael@0: ep = &e; michael@0: michael@0: max_osfd = _PR_IOQ_MAX_OSFD(me->cpu) + 1; michael@0: min_timeout = _PR_IOQ_TIMEOUT(me->cpu); michael@0: #endif /* _PR_USE_POLL */ michael@0: /* michael@0: ** Compute the minimum timeout value: make it the smaller of the michael@0: ** timeouts specified by the i/o pollers or the timeout of the first michael@0: ** sleeping thread. michael@0: */ michael@0: q = _PR_SLEEPQ(me->cpu).next; michael@0: michael@0: if (q != &_PR_SLEEPQ(me->cpu)) { michael@0: PRThread *t = _PR_THREAD_PTR(q); michael@0: michael@0: if (t->sleep < min_timeout) { michael@0: min_timeout = t->sleep; michael@0: } michael@0: } michael@0: if (min_timeout > ticks) { michael@0: min_timeout = ticks; michael@0: } michael@0: michael@0: #ifdef _PR_USE_POLL michael@0: if (min_timeout == PR_INTERVAL_NO_TIMEOUT) michael@0: timeout = -1; michael@0: else michael@0: timeout = PR_IntervalToMilliseconds(min_timeout); michael@0: #else michael@0: if (min_timeout == PR_INTERVAL_NO_TIMEOUT) { michael@0: tvp = NULL; michael@0: } else { michael@0: timeout.tv_sec = PR_IntervalToSeconds(min_timeout); michael@0: timeout.tv_usec = PR_IntervalToMicroseconds(min_timeout) michael@0: % PR_USEC_PER_SEC; michael@0: tvp = &timeout; michael@0: } michael@0: #endif /* _PR_USE_POLL */ michael@0: michael@0: _PR_MD_IOQ_UNLOCK(); michael@0: _MD_CHECK_FOR_EXIT(); michael@0: /* michael@0: * check for i/o operations michael@0: */ michael@0: #ifndef _PR_NO_CLOCK_TIMER michael@0: /* michael@0: * Disable the clock interrupts while we are in select, if clock interrupts michael@0: * are enabled. Otherwise, when the select/poll calls are interrupted, the michael@0: * timer value starts ticking from zero again when the system call is restarted. michael@0: */ michael@0: #ifdef IRIX michael@0: /* michael@0: * SIGCHLD signal is used on Irix to detect he termination of an michael@0: * sproc by SIGSEGV, SIGBUS or SIGABRT signals when michael@0: * _nspr_terminate_on_error is set. michael@0: */ michael@0: if ((!_nspr_noclock) || (_nspr_terminate_on_error)) michael@0: #else michael@0: if (!_nspr_noclock) michael@0: #endif /* IRIX */ michael@0: #ifdef IRIX michael@0: sigprocmask(SIG_BLOCK, &ints_off, &oldset); michael@0: #else michael@0: PR_ASSERT(sigismember(&timer_set, SIGALRM)); michael@0: sigprocmask(SIG_BLOCK, &timer_set, &oldset); michael@0: #endif /* IRIX */ michael@0: #endif /* !_PR_NO_CLOCK_TIMER */ michael@0: michael@0: #ifndef _PR_USE_POLL michael@0: PR_ASSERT(FD_ISSET(_pr_md_pipefd[0],rp)); michael@0: nfd = _MD_SELECT(max_osfd, rp, wp, ep, tvp); michael@0: #else michael@0: nfd = _MD_POLL(pollfds, npollfds, timeout); michael@0: #endif /* !_PR_USE_POLL */ michael@0: michael@0: #ifndef _PR_NO_CLOCK_TIMER michael@0: #ifdef IRIX michael@0: if ((!_nspr_noclock) || (_nspr_terminate_on_error)) michael@0: #else michael@0: if (!_nspr_noclock) michael@0: #endif /* IRIX */ michael@0: sigprocmask(SIG_SETMASK, &oldset, 0); michael@0: #endif /* !_PR_NO_CLOCK_TIMER */ michael@0: michael@0: _MD_CHECK_FOR_EXIT(); michael@0: michael@0: #ifdef IRIX michael@0: _PR_MD_primordial_cpu(); michael@0: #endif michael@0: michael@0: _PR_MD_IOQ_LOCK(); michael@0: /* michael@0: ** Notify monitors that are associated with the selected descriptors. michael@0: */ michael@0: #ifdef _PR_USE_POLL michael@0: if (nfd > 0) { michael@0: pollfdPtr = pollfds; michael@0: if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { michael@0: /* michael@0: * Assert that the pipe is the first element in the michael@0: * pollfds array. michael@0: */ michael@0: PR_ASSERT(pollfds[0].fd == _pr_md_pipefd[0]); michael@0: if ((pollfds[0].revents & POLLIN) && (nfd == 1)) { michael@0: /* michael@0: * woken up by another thread; read all the data michael@0: * in the pipe to empty the pipe michael@0: */ michael@0: while ((rv = read(_pr_md_pipefd[0], _pr_md_pipebuf, michael@0: PIPE_BUF)) == PIPE_BUF){ michael@0: } michael@0: PR_ASSERT((rv > 0) || ((rv == -1) && (errno == EAGAIN))); michael@0: } michael@0: pollfdPtr++; michael@0: #ifdef IRIX michael@0: /* michael@0: * On Irix, check to see if the primordial cpu needs to exit michael@0: * to cause the process to terminate michael@0: */ michael@0: if (me->cpu->id == 0) { michael@0: PR_ASSERT(pollfds[1].fd == _pr_irix_primoridal_cpu_fd[0]); michael@0: if (pollfdPtr->revents & POLLIN) { michael@0: if (_pr_irix_process_exit) { michael@0: /* michael@0: * process exit due to a call to PR_ProcessExit michael@0: */ michael@0: prctl(PR_SETEXITSIG, SIGKILL); michael@0: _exit(_pr_irix_process_exit_code); michael@0: } else { michael@0: while ((rv = read(_pr_irix_primoridal_cpu_fd[0], michael@0: _pr_md_pipebuf, PIPE_BUF)) == PIPE_BUF) { michael@0: } michael@0: PR_ASSERT(rv > 0); michael@0: } michael@0: } michael@0: pollfdPtr++; michael@0: } michael@0: #endif michael@0: } michael@0: for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) { michael@0: PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); michael@0: PRBool notify = PR_FALSE; michael@0: _PRUnixPollDesc *pds = pq->pds; michael@0: _PRUnixPollDesc *epds = pds + pq->npds; michael@0: michael@0: for (; pds < epds; pds++, pollfdPtr++) { michael@0: /* michael@0: * Assert that the pollfdPtr pointer does not go beyond michael@0: * the end of the pollfds array. michael@0: */ michael@0: PR_ASSERT(pollfdPtr < pollfds + npollfds); michael@0: /* michael@0: * Assert that the fd's in the pollfds array (stepped michael@0: * through by pollfdPtr) are in the same order as michael@0: * the fd's in _PR_IOQ() (stepped through by q and pds). michael@0: * This is how the pollfds array was created earlier. michael@0: */ michael@0: PR_ASSERT(pollfdPtr->fd == pds->osfd); michael@0: pds->out_flags = pollfdPtr->revents; michael@0: /* Negative fd's are ignored by poll() */ michael@0: if (pds->osfd >= 0 && pds->out_flags) { michael@0: notify = PR_TRUE; michael@0: } michael@0: } michael@0: if (notify) { michael@0: PRIntn pri; michael@0: PRThread *thred; michael@0: michael@0: PR_REMOVE_LINK(&pq->links); michael@0: pq->on_ioq = PR_FALSE; michael@0: michael@0: thred = pq->thr; michael@0: _PR_THREAD_LOCK(thred); michael@0: if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { michael@0: _PRCPU *cpu = pq->thr->cpu; michael@0: _PR_SLEEPQ_LOCK(pq->thr->cpu); michael@0: _PR_DEL_SLEEPQ(pq->thr, PR_TRUE); michael@0: _PR_SLEEPQ_UNLOCK(pq->thr->cpu); michael@0: michael@0: if (pq->thr->flags & _PR_SUSPENDING) { michael@0: /* michael@0: * set thread state to SUSPENDED; michael@0: * a Resume operation on the thread michael@0: * will move it to the runQ michael@0: */ michael@0: pq->thr->state = _PR_SUSPENDED; michael@0: _PR_MISCQ_LOCK(pq->thr->cpu); michael@0: _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu); michael@0: _PR_MISCQ_UNLOCK(pq->thr->cpu); michael@0: } else { michael@0: pri = pq->thr->priority; michael@0: pq->thr->state = _PR_RUNNABLE; michael@0: michael@0: _PR_RUNQ_LOCK(cpu); michael@0: _PR_ADD_RUNQ(pq->thr, cpu, pri); michael@0: _PR_RUNQ_UNLOCK(cpu); michael@0: if (_pr_md_idle_cpus > 1) michael@0: _PR_MD_WAKEUP_WAITER(thred); michael@0: } michael@0: } michael@0: _PR_THREAD_UNLOCK(thred); michael@0: _PR_IOQ_OSFD_CNT(me->cpu) -= pq->npds; michael@0: PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0); michael@0: } michael@0: } michael@0: } else if (nfd == -1) { michael@0: PR_LOG(_pr_io_lm, PR_LOG_MAX, ("poll() failed with errno %d", errno)); michael@0: } michael@0: michael@0: #else michael@0: if (nfd > 0) { michael@0: q = _PR_IOQ(me->cpu).next; michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = -1; michael@0: _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT; michael@0: while (q != &_PR_IOQ(me->cpu)) { michael@0: PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); michael@0: PRBool notify = PR_FALSE; michael@0: _PRUnixPollDesc *pds = pq->pds; michael@0: _PRUnixPollDesc *epds = pds + pq->npds; michael@0: PRInt32 pq_max_osfd = -1; michael@0: michael@0: q = q->next; michael@0: for (; pds < epds; pds++) { michael@0: PRInt32 osfd = pds->osfd; michael@0: PRInt16 in_flags = pds->in_flags; michael@0: PRInt16 out_flags = 0; michael@0: PR_ASSERT(osfd >= 0 || in_flags == 0); michael@0: if ((in_flags & _PR_UNIX_POLL_READ) && FD_ISSET(osfd, rp)) { michael@0: out_flags |= _PR_UNIX_POLL_READ; michael@0: } michael@0: if ((in_flags & _PR_UNIX_POLL_WRITE) && FD_ISSET(osfd, wp)) { michael@0: out_flags |= _PR_UNIX_POLL_WRITE; michael@0: } michael@0: if ((in_flags & _PR_UNIX_POLL_EXCEPT) && FD_ISSET(osfd, ep)) { michael@0: out_flags |= _PR_UNIX_POLL_EXCEPT; michael@0: } michael@0: pds->out_flags = out_flags; michael@0: if (out_flags) { michael@0: notify = PR_TRUE; michael@0: } michael@0: if (osfd > pq_max_osfd) { michael@0: pq_max_osfd = osfd; michael@0: } michael@0: } michael@0: if (notify == PR_TRUE) { michael@0: PRIntn pri; michael@0: PRThread *thred; michael@0: michael@0: PR_REMOVE_LINK(&pq->links); michael@0: pq->on_ioq = PR_FALSE; michael@0: michael@0: /* michael@0: * Decrement the count of descriptors for each desciptor/event michael@0: * because this I/O request is being removed from the michael@0: * ioq michael@0: */ michael@0: pds = pq->pds; michael@0: for (; pds < epds; pds++) { michael@0: PRInt32 osfd = pds->osfd; michael@0: PRInt16 in_flags = pds->in_flags; michael@0: PR_ASSERT(osfd >= 0 || in_flags == 0); michael@0: if (in_flags & _PR_UNIX_POLL_READ) { michael@0: if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) michael@0: FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu)); michael@0: } michael@0: if (in_flags & _PR_UNIX_POLL_WRITE) { michael@0: if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) michael@0: FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu)); michael@0: } michael@0: if (in_flags & _PR_UNIX_POLL_EXCEPT) { michael@0: if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) michael@0: FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * Because this thread can run on a different cpu right michael@0: * after being added to the run queue, do not dereference michael@0: * pq michael@0: */ michael@0: thred = pq->thr; michael@0: _PR_THREAD_LOCK(thred); michael@0: if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { michael@0: _PRCPU *cpu = thred->cpu; michael@0: _PR_SLEEPQ_LOCK(pq->thr->cpu); michael@0: _PR_DEL_SLEEPQ(pq->thr, PR_TRUE); michael@0: _PR_SLEEPQ_UNLOCK(pq->thr->cpu); michael@0: michael@0: if (pq->thr->flags & _PR_SUSPENDING) { michael@0: /* michael@0: * set thread state to SUSPENDED; michael@0: * a Resume operation on the thread michael@0: * will move it to the runQ michael@0: */ michael@0: pq->thr->state = _PR_SUSPENDED; michael@0: _PR_MISCQ_LOCK(pq->thr->cpu); michael@0: _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu); michael@0: _PR_MISCQ_UNLOCK(pq->thr->cpu); michael@0: } else { michael@0: pri = pq->thr->priority; michael@0: pq->thr->state = _PR_RUNNABLE; michael@0: michael@0: pq->thr->cpu = cpu; michael@0: _PR_RUNQ_LOCK(cpu); michael@0: _PR_ADD_RUNQ(pq->thr, cpu, pri); michael@0: _PR_RUNQ_UNLOCK(cpu); michael@0: if (_pr_md_idle_cpus > 1) michael@0: _PR_MD_WAKEUP_WAITER(thred); michael@0: } michael@0: } michael@0: _PR_THREAD_UNLOCK(thred); michael@0: } else { michael@0: if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) michael@0: _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout; michael@0: if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd; michael@0: } michael@0: } michael@0: if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { michael@0: if ((FD_ISSET(_pr_md_pipefd[0], rp)) && (nfd == 1)) { michael@0: /* michael@0: * woken up by another thread; read all the data michael@0: * in the pipe to empty the pipe michael@0: */ michael@0: while ((rv = michael@0: read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF)) michael@0: == PIPE_BUF){ michael@0: } michael@0: PR_ASSERT((rv > 0) || michael@0: ((rv == -1) && (errno == EAGAIN))); michael@0: } michael@0: if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; michael@0: #ifdef IRIX michael@0: if ((me->cpu->id == 0) && michael@0: (FD_ISSET(_pr_irix_primoridal_cpu_fd[0], rp))) { michael@0: if (_pr_irix_process_exit) { michael@0: /* michael@0: * process exit due to a call to PR_ProcessExit michael@0: */ michael@0: prctl(PR_SETEXITSIG, SIGKILL); michael@0: _exit(_pr_irix_process_exit_code); michael@0: } else { michael@0: while ((rv = read(_pr_irix_primoridal_cpu_fd[0], michael@0: _pr_md_pipebuf, PIPE_BUF)) == PIPE_BUF) { michael@0: } michael@0: PR_ASSERT(rv > 0); michael@0: } michael@0: } michael@0: if (me->cpu->id == 0) { michael@0: if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_irix_primoridal_cpu_fd[0]) michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = _pr_irix_primoridal_cpu_fd[0]; michael@0: } michael@0: #endif michael@0: } michael@0: } else if (nfd < 0) { michael@0: if (errno == EBADF) { michael@0: FindBadFDs(); michael@0: } else { michael@0: PR_LOG(_pr_io_lm, PR_LOG_MAX, ("select() failed with errno %d", michael@0: errno)); michael@0: } michael@0: } else { michael@0: PR_ASSERT(nfd == 0); michael@0: /* michael@0: * compute the new value of _PR_IOQ_TIMEOUT michael@0: */ michael@0: q = _PR_IOQ(me->cpu).next; michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = -1; michael@0: _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT; michael@0: while (q != &_PR_IOQ(me->cpu)) { michael@0: PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); michael@0: _PRUnixPollDesc *pds = pq->pds; michael@0: _PRUnixPollDesc *epds = pds + pq->npds; michael@0: PRInt32 pq_max_osfd = -1; michael@0: michael@0: q = q->next; michael@0: for (; pds < epds; pds++) { michael@0: if (pds->osfd > pq_max_osfd) { michael@0: pq_max_osfd = pds->osfd; michael@0: } michael@0: } michael@0: if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) michael@0: _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout; michael@0: if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd; michael@0: } michael@0: if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { michael@0: if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; michael@0: } michael@0: } michael@0: #endif /* _PR_USE_POLL */ michael@0: _PR_MD_IOQ_UNLOCK(); michael@0: } michael@0: michael@0: void _MD_Wakeup_CPUs() michael@0: { michael@0: PRInt32 rv, data; michael@0: michael@0: data = 0; michael@0: rv = write(_pr_md_pipefd[1], &data, 1); michael@0: michael@0: while ((rv < 0) && (errno == EAGAIN)) { michael@0: /* michael@0: * pipe full, read all data in pipe to empty it michael@0: */ michael@0: while ((rv = michael@0: read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF)) michael@0: == PIPE_BUF) { michael@0: } michael@0: PR_ASSERT((rv > 0) || michael@0: ((rv == -1) && (errno == EAGAIN))); michael@0: rv = write(_pr_md_pipefd[1], &data, 1); michael@0: } michael@0: } michael@0: michael@0: michael@0: void _MD_InitCPUS() michael@0: { michael@0: PRInt32 rv, flags; michael@0: PRThread *me = _MD_CURRENT_THREAD(); michael@0: michael@0: rv = pipe(_pr_md_pipefd); michael@0: PR_ASSERT(rv == 0); michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; michael@0: #ifndef _PR_USE_POLL michael@0: FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(me->cpu)); michael@0: #endif michael@0: michael@0: flags = fcntl(_pr_md_pipefd[0], F_GETFL, 0); michael@0: fcntl(_pr_md_pipefd[0], F_SETFL, flags | O_NONBLOCK); michael@0: flags = fcntl(_pr_md_pipefd[1], F_GETFL, 0); michael@0: fcntl(_pr_md_pipefd[1], F_SETFL, flags | O_NONBLOCK); michael@0: } michael@0: michael@0: /* michael@0: ** Unix SIGALRM (clock) signal handler michael@0: */ michael@0: static void ClockInterruptHandler() michael@0: { michael@0: int olderrno; michael@0: PRUintn pri; michael@0: _PRCPU *cpu = _PR_MD_CURRENT_CPU(); michael@0: PRThread *me = _MD_CURRENT_THREAD(); michael@0: michael@0: #ifdef SOLARIS michael@0: if (!me || _PR_IS_NATIVE_THREAD(me)) { michael@0: _pr_primordialCPU->u.missed[_pr_primordialCPU->where] |= _PR_MISSED_CLOCK; michael@0: return; michael@0: } michael@0: #endif michael@0: michael@0: if (_PR_MD_GET_INTSOFF() != 0) { michael@0: cpu->u.missed[cpu->where] |= _PR_MISSED_CLOCK; michael@0: return; michael@0: } michael@0: _PR_MD_SET_INTSOFF(1); michael@0: michael@0: olderrno = errno; michael@0: _PR_ClockInterrupt(); michael@0: errno = olderrno; michael@0: michael@0: /* michael@0: ** If the interrupt wants a resched or if some other thread at michael@0: ** the same priority needs the cpu, reschedule. michael@0: */ michael@0: pri = me->priority; michael@0: if ((cpu->u.missed[3] || (_PR_RUNQREADYMASK(me->cpu) >> pri))) { michael@0: #ifdef _PR_NO_PREEMPT michael@0: cpu->resched = PR_TRUE; michael@0: if (pr_interruptSwitchHook) { michael@0: (*pr_interruptSwitchHook)(pr_interruptSwitchHookArg); michael@0: } michael@0: #else /* _PR_NO_PREEMPT */ michael@0: /* michael@0: ** Re-enable unix interrupts (so that we can use michael@0: ** setjmp/longjmp for context switching without having to michael@0: ** worry about the signal state) michael@0: */ michael@0: sigprocmask(SIG_SETMASK, &empty_set, 0); michael@0: PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock caused context switch")); michael@0: michael@0: if(!(me->flags & _PR_IDLE_THREAD)) { michael@0: _PR_THREAD_LOCK(me); michael@0: me->state = _PR_RUNNABLE; michael@0: me->cpu = cpu; michael@0: _PR_RUNQ_LOCK(cpu); michael@0: _PR_ADD_RUNQ(me, cpu, pri); michael@0: _PR_RUNQ_UNLOCK(cpu); michael@0: _PR_THREAD_UNLOCK(me); michael@0: } else michael@0: me->state = _PR_RUNNABLE; michael@0: _MD_SWITCH_CONTEXT(me); michael@0: PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock back from context switch")); michael@0: #endif /* _PR_NO_PREEMPT */ michael@0: } michael@0: /* michael@0: * Because this thread could be running on a different cpu after michael@0: * a context switch the current cpu should be accessed and the michael@0: * value of the 'cpu' variable should not be used. michael@0: */ michael@0: _PR_MD_SET_INTSOFF(0); michael@0: } michael@0: michael@0: /* michael@0: * On HP-UX 9, we have to use the sigvector() interface to restart michael@0: * interrupted system calls, because sigaction() does not have the michael@0: * SA_RESTART flag. michael@0: */ michael@0: michael@0: #ifdef HPUX9 michael@0: static void HPUX9_ClockInterruptHandler( michael@0: int sig, michael@0: int code, michael@0: struct sigcontext *scp) michael@0: { michael@0: ClockInterruptHandler(); michael@0: scp->sc_syscall_action = SIG_RESTART; michael@0: } michael@0: #endif /* HPUX9 */ michael@0: michael@0: /* # of milliseconds per clock tick that we will use */ michael@0: #define MSEC_PER_TICK 50 michael@0: michael@0: michael@0: void _MD_StartInterrupts() michael@0: { michael@0: char *eval; michael@0: michael@0: if ((eval = getenv("NSPR_NOCLOCK")) != NULL) { michael@0: if (atoi(eval) == 0) michael@0: _nspr_noclock = 0; michael@0: else michael@0: _nspr_noclock = 1; michael@0: } michael@0: michael@0: #ifndef _PR_NO_CLOCK_TIMER michael@0: if (!_nspr_noclock) { michael@0: _MD_EnableClockInterrupts(); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: void _MD_StopInterrupts() michael@0: { michael@0: sigprocmask(SIG_BLOCK, &timer_set, 0); michael@0: } michael@0: michael@0: void _MD_EnableClockInterrupts() michael@0: { michael@0: struct itimerval itval; michael@0: extern PRUintn _pr_numCPU; michael@0: #ifdef HPUX9 michael@0: struct sigvec vec; michael@0: michael@0: vec.sv_handler = (void (*)()) HPUX9_ClockInterruptHandler; michael@0: vec.sv_mask = 0; michael@0: vec.sv_flags = 0; michael@0: sigvector(SIGALRM, &vec, 0); michael@0: #else michael@0: struct sigaction vtact; michael@0: michael@0: vtact.sa_handler = (void (*)()) ClockInterruptHandler; michael@0: sigemptyset(&vtact.sa_mask); michael@0: vtact.sa_flags = SA_RESTART; michael@0: sigaction(SIGALRM, &vtact, 0); michael@0: #endif /* HPUX9 */ michael@0: michael@0: PR_ASSERT(_pr_numCPU == 1); michael@0: itval.it_interval.tv_sec = 0; michael@0: itval.it_interval.tv_usec = MSEC_PER_TICK * PR_USEC_PER_MSEC; michael@0: itval.it_value = itval.it_interval; michael@0: setitimer(ITIMER_REAL, &itval, 0); michael@0: } michael@0: michael@0: void _MD_DisableClockInterrupts() michael@0: { michael@0: struct itimerval itval; michael@0: extern PRUintn _pr_numCPU; michael@0: michael@0: PR_ASSERT(_pr_numCPU == 1); michael@0: itval.it_interval.tv_sec = 0; michael@0: itval.it_interval.tv_usec = 0; michael@0: itval.it_value = itval.it_interval; michael@0: setitimer(ITIMER_REAL, &itval, 0); michael@0: } michael@0: michael@0: void _MD_BlockClockInterrupts() michael@0: { michael@0: sigprocmask(SIG_BLOCK, &timer_set, 0); michael@0: } michael@0: michael@0: void _MD_UnblockClockInterrupts() michael@0: { michael@0: sigprocmask(SIG_UNBLOCK, &timer_set, 0); michael@0: } michael@0: michael@0: void _MD_MakeNonblock(PRFileDesc *fd) michael@0: { michael@0: PRInt32 osfd = fd->secret->md.osfd; michael@0: int flags; michael@0: michael@0: if (osfd <= 2) { michael@0: /* Don't mess around with stdin, stdout or stderr */ michael@0: return; michael@0: } michael@0: flags = fcntl(osfd, F_GETFL, 0); michael@0: michael@0: /* michael@0: * Use O_NONBLOCK (POSIX-style non-blocking I/O) whenever possible. michael@0: * On SunOS 4, we must use FNDELAY (BSD-style non-blocking I/O), michael@0: * otherwise connect() still blocks and can be interrupted by SIGALRM. michael@0: */ michael@0: michael@0: fcntl(osfd, F_SETFL, flags | O_NONBLOCK); michael@0: } michael@0: michael@0: PRInt32 _MD_open(const char *name, PRIntn flags, PRIntn mode) michael@0: { michael@0: PRInt32 osflags; michael@0: PRInt32 rv, err; michael@0: michael@0: if (flags & PR_RDWR) { michael@0: osflags = O_RDWR; michael@0: } else if (flags & PR_WRONLY) { michael@0: osflags = O_WRONLY; michael@0: } else { michael@0: osflags = O_RDONLY; michael@0: } michael@0: michael@0: if (flags & PR_EXCL) michael@0: osflags |= O_EXCL; michael@0: if (flags & PR_APPEND) michael@0: osflags |= O_APPEND; michael@0: if (flags & PR_TRUNCATE) michael@0: osflags |= O_TRUNC; michael@0: if (flags & PR_SYNC) { michael@0: #if defined(O_SYNC) michael@0: osflags |= O_SYNC; michael@0: #elif defined(O_FSYNC) michael@0: osflags |= O_FSYNC; michael@0: #else michael@0: #error "Neither O_SYNC nor O_FSYNC is defined on this platform" michael@0: #endif michael@0: } michael@0: michael@0: /* michael@0: ** On creations we hold the 'create' lock in order to enforce michael@0: ** the semantics of PR_Rename. (see the latter for more details) michael@0: */ michael@0: if (flags & PR_CREATE_FILE) michael@0: { michael@0: osflags |= O_CREAT; michael@0: if (NULL !=_pr_rename_lock) michael@0: PR_Lock(_pr_rename_lock); michael@0: } michael@0: michael@0: #if defined(ANDROID) michael@0: osflags |= O_LARGEFILE; michael@0: #endif michael@0: michael@0: rv = _md_iovector._open64(name, osflags, mode); michael@0: michael@0: if (rv < 0) { michael@0: err = _MD_ERRNO(); michael@0: _PR_MD_MAP_OPEN_ERROR(err); michael@0: } michael@0: michael@0: if ((flags & PR_CREATE_FILE) && (NULL !=_pr_rename_lock)) michael@0: PR_Unlock(_pr_rename_lock); michael@0: return rv; michael@0: } michael@0: michael@0: PRIntervalTime intr_timeout_ticks; michael@0: michael@0: #if defined(SOLARIS) || defined(IRIX) michael@0: static void sigsegvhandler() { michael@0: fprintf(stderr,"Received SIGSEGV\n"); michael@0: fflush(stderr); michael@0: pause(); michael@0: } michael@0: michael@0: static void sigaborthandler() { michael@0: fprintf(stderr,"Received SIGABRT\n"); michael@0: fflush(stderr); michael@0: pause(); michael@0: } michael@0: michael@0: static void sigbushandler() { michael@0: fprintf(stderr,"Received SIGBUS\n"); michael@0: fflush(stderr); michael@0: pause(); michael@0: } michael@0: #endif /* SOLARIS, IRIX */ michael@0: michael@0: #endif /* !defined(_PR_PTHREADS) */ michael@0: michael@0: void _MD_query_fd_inheritable(PRFileDesc *fd) michael@0: { michael@0: int flags; michael@0: michael@0: PR_ASSERT(_PR_TRI_UNKNOWN == fd->secret->inheritable); michael@0: flags = fcntl(fd->secret->md.osfd, F_GETFD, 0); michael@0: PR_ASSERT(-1 != flags); michael@0: fd->secret->inheritable = (flags & FD_CLOEXEC) ? michael@0: _PR_TRI_FALSE : _PR_TRI_TRUE; michael@0: } michael@0: michael@0: PROffset32 _MD_lseek(PRFileDesc *fd, PROffset32 offset, PRSeekWhence whence) michael@0: { michael@0: PROffset32 rv, where; michael@0: michael@0: switch (whence) { michael@0: case PR_SEEK_SET: michael@0: where = SEEK_SET; michael@0: break; michael@0: case PR_SEEK_CUR: michael@0: where = SEEK_CUR; michael@0: break; michael@0: case PR_SEEK_END: michael@0: where = SEEK_END; michael@0: break; michael@0: default: michael@0: PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); michael@0: rv = -1; michael@0: goto done; michael@0: } michael@0: rv = lseek(fd->secret->md.osfd,offset,where); michael@0: if (rv == -1) michael@0: { michael@0: PRInt32 syserr = _MD_ERRNO(); michael@0: _PR_MD_MAP_LSEEK_ERROR(syserr); michael@0: } michael@0: done: michael@0: return(rv); michael@0: } michael@0: michael@0: PROffset64 _MD_lseek64(PRFileDesc *fd, PROffset64 offset, PRSeekWhence whence) michael@0: { michael@0: PRInt32 where; michael@0: PROffset64 rv; michael@0: michael@0: switch (whence) michael@0: { michael@0: case PR_SEEK_SET: michael@0: where = SEEK_SET; michael@0: break; michael@0: case PR_SEEK_CUR: michael@0: where = SEEK_CUR; michael@0: break; michael@0: case PR_SEEK_END: michael@0: where = SEEK_END; michael@0: break; michael@0: default: michael@0: PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); michael@0: rv = minus_one; michael@0: goto done; michael@0: } michael@0: rv = _md_iovector._lseek64(fd->secret->md.osfd, offset, where); michael@0: if (LL_EQ(rv, minus_one)) michael@0: { michael@0: PRInt32 syserr = _MD_ERRNO(); michael@0: _PR_MD_MAP_LSEEK_ERROR(syserr); michael@0: } michael@0: done: michael@0: return rv; michael@0: } /* _MD_lseek64 */ michael@0: michael@0: /* michael@0: ** _MD_set_fileinfo_times -- michael@0: ** Set the modifyTime and creationTime of the PRFileInfo michael@0: ** structure using the values in struct stat. michael@0: ** michael@0: ** _MD_set_fileinfo64_times -- michael@0: ** Set the modifyTime and creationTime of the PRFileInfo64 michael@0: ** structure using the values in _MDStat64. michael@0: */ michael@0: michael@0: #if defined(_PR_STAT_HAS_ST_ATIM) michael@0: /* michael@0: ** struct stat has st_atim, st_mtim, and st_ctim fields of michael@0: ** type timestruc_t. michael@0: */ michael@0: static void _MD_set_fileinfo_times( michael@0: const struct stat *sb, michael@0: PRFileInfo *info) michael@0: { michael@0: PRInt64 us, s2us; michael@0: michael@0: LL_I2L(s2us, PR_USEC_PER_SEC); michael@0: LL_I2L(info->modifyTime, sb->st_mtim.tv_sec); michael@0: LL_MUL(info->modifyTime, info->modifyTime, s2us); michael@0: LL_I2L(us, sb->st_mtim.tv_nsec / 1000); michael@0: LL_ADD(info->modifyTime, info->modifyTime, us); michael@0: LL_I2L(info->creationTime, sb->st_ctim.tv_sec); michael@0: LL_MUL(info->creationTime, info->creationTime, s2us); michael@0: LL_I2L(us, sb->st_ctim.tv_nsec / 1000); michael@0: LL_ADD(info->creationTime, info->creationTime, us); michael@0: } michael@0: michael@0: static void _MD_set_fileinfo64_times( michael@0: const _MDStat64 *sb, michael@0: PRFileInfo64 *info) michael@0: { michael@0: PRInt64 us, s2us; michael@0: michael@0: LL_I2L(s2us, PR_USEC_PER_SEC); michael@0: LL_I2L(info->modifyTime, sb->st_mtim.tv_sec); michael@0: LL_MUL(info->modifyTime, info->modifyTime, s2us); michael@0: LL_I2L(us, sb->st_mtim.tv_nsec / 1000); michael@0: LL_ADD(info->modifyTime, info->modifyTime, us); michael@0: LL_I2L(info->creationTime, sb->st_ctim.tv_sec); michael@0: LL_MUL(info->creationTime, info->creationTime, s2us); michael@0: LL_I2L(us, sb->st_ctim.tv_nsec / 1000); michael@0: LL_ADD(info->creationTime, info->creationTime, us); michael@0: } michael@0: #elif defined(_PR_STAT_HAS_ST_ATIM_UNION) michael@0: /* michael@0: ** The st_atim, st_mtim, and st_ctim fields in struct stat are michael@0: ** unions with a st__tim union member of type timestruc_t. michael@0: */ michael@0: static void _MD_set_fileinfo_times( michael@0: const struct stat *sb, michael@0: PRFileInfo *info) michael@0: { michael@0: PRInt64 us, s2us; michael@0: michael@0: LL_I2L(s2us, PR_USEC_PER_SEC); michael@0: LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec); michael@0: LL_MUL(info->modifyTime, info->modifyTime, s2us); michael@0: LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000); michael@0: LL_ADD(info->modifyTime, info->modifyTime, us); michael@0: LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec); michael@0: LL_MUL(info->creationTime, info->creationTime, s2us); michael@0: LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000); michael@0: LL_ADD(info->creationTime, info->creationTime, us); michael@0: } michael@0: michael@0: static void _MD_set_fileinfo64_times( michael@0: const _MDStat64 *sb, michael@0: PRFileInfo64 *info) michael@0: { michael@0: PRInt64 us, s2us; michael@0: michael@0: LL_I2L(s2us, PR_USEC_PER_SEC); michael@0: LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec); michael@0: LL_MUL(info->modifyTime, info->modifyTime, s2us); michael@0: LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000); michael@0: LL_ADD(info->modifyTime, info->modifyTime, us); michael@0: LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec); michael@0: LL_MUL(info->creationTime, info->creationTime, s2us); michael@0: LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000); michael@0: LL_ADD(info->creationTime, info->creationTime, us); michael@0: } michael@0: #elif defined(_PR_STAT_HAS_ST_ATIMESPEC) michael@0: /* michael@0: ** struct stat has st_atimespec, st_mtimespec, and st_ctimespec michael@0: ** fields of type struct timespec. michael@0: */ michael@0: #if defined(_PR_TIMESPEC_HAS_TS_SEC) michael@0: static void _MD_set_fileinfo_times( michael@0: const struct stat *sb, michael@0: PRFileInfo *info) michael@0: { michael@0: PRInt64 us, s2us; michael@0: michael@0: LL_I2L(s2us, PR_USEC_PER_SEC); michael@0: LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec); michael@0: LL_MUL(info->modifyTime, info->modifyTime, s2us); michael@0: LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000); michael@0: LL_ADD(info->modifyTime, info->modifyTime, us); michael@0: LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec); michael@0: LL_MUL(info->creationTime, info->creationTime, s2us); michael@0: LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000); michael@0: LL_ADD(info->creationTime, info->creationTime, us); michael@0: } michael@0: michael@0: static void _MD_set_fileinfo64_times( michael@0: const _MDStat64 *sb, michael@0: PRFileInfo64 *info) michael@0: { michael@0: PRInt64 us, s2us; michael@0: michael@0: LL_I2L(s2us, PR_USEC_PER_SEC); michael@0: LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec); michael@0: LL_MUL(info->modifyTime, info->modifyTime, s2us); michael@0: LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000); michael@0: LL_ADD(info->modifyTime, info->modifyTime, us); michael@0: LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec); michael@0: LL_MUL(info->creationTime, info->creationTime, s2us); michael@0: LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000); michael@0: LL_ADD(info->creationTime, info->creationTime, us); michael@0: } michael@0: #else /* _PR_TIMESPEC_HAS_TS_SEC */ michael@0: /* michael@0: ** The POSIX timespec structure has tv_sec and tv_nsec. michael@0: */ michael@0: static void _MD_set_fileinfo_times( michael@0: const struct stat *sb, michael@0: PRFileInfo *info) michael@0: { michael@0: PRInt64 us, s2us; michael@0: michael@0: LL_I2L(s2us, PR_USEC_PER_SEC); michael@0: LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec); michael@0: LL_MUL(info->modifyTime, info->modifyTime, s2us); michael@0: LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000); michael@0: LL_ADD(info->modifyTime, info->modifyTime, us); michael@0: LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec); michael@0: LL_MUL(info->creationTime, info->creationTime, s2us); michael@0: LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000); michael@0: LL_ADD(info->creationTime, info->creationTime, us); michael@0: } michael@0: michael@0: static void _MD_set_fileinfo64_times( michael@0: const _MDStat64 *sb, michael@0: PRFileInfo64 *info) michael@0: { michael@0: PRInt64 us, s2us; michael@0: michael@0: LL_I2L(s2us, PR_USEC_PER_SEC); michael@0: LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec); michael@0: LL_MUL(info->modifyTime, info->modifyTime, s2us); michael@0: LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000); michael@0: LL_ADD(info->modifyTime, info->modifyTime, us); michael@0: LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec); michael@0: LL_MUL(info->creationTime, info->creationTime, s2us); michael@0: LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000); michael@0: LL_ADD(info->creationTime, info->creationTime, us); michael@0: } michael@0: #endif /* _PR_TIMESPEC_HAS_TS_SEC */ michael@0: #elif defined(_PR_STAT_HAS_ONLY_ST_ATIME) michael@0: /* michael@0: ** struct stat only has st_atime, st_mtime, and st_ctime fields michael@0: ** of type time_t. michael@0: */ michael@0: static void _MD_set_fileinfo_times( michael@0: const struct stat *sb, michael@0: PRFileInfo *info) michael@0: { michael@0: PRInt64 s, s2us; michael@0: LL_I2L(s2us, PR_USEC_PER_SEC); michael@0: LL_I2L(s, sb->st_mtime); michael@0: LL_MUL(s, s, s2us); michael@0: info->modifyTime = s; michael@0: LL_I2L(s, sb->st_ctime); michael@0: LL_MUL(s, s, s2us); michael@0: info->creationTime = s; michael@0: } michael@0: michael@0: static void _MD_set_fileinfo64_times( michael@0: const _MDStat64 *sb, michael@0: PRFileInfo64 *info) michael@0: { michael@0: PRInt64 s, s2us; michael@0: LL_I2L(s2us, PR_USEC_PER_SEC); michael@0: LL_I2L(s, sb->st_mtime); michael@0: LL_MUL(s, s, s2us); michael@0: info->modifyTime = s; michael@0: LL_I2L(s, sb->st_ctime); michael@0: LL_MUL(s, s, s2us); michael@0: info->creationTime = s; michael@0: } michael@0: #else michael@0: #error "I don't know yet" michael@0: #endif michael@0: michael@0: static int _MD_convert_stat_to_fileinfo( michael@0: const struct stat *sb, michael@0: PRFileInfo *info) michael@0: { michael@0: if (S_IFREG & sb->st_mode) michael@0: info->type = PR_FILE_FILE; michael@0: else if (S_IFDIR & sb->st_mode) michael@0: info->type = PR_FILE_DIRECTORY; michael@0: else michael@0: info->type = PR_FILE_OTHER; michael@0: michael@0: #if defined(_PR_HAVE_LARGE_OFF_T) michael@0: if (0x7fffffffL < sb->st_size) michael@0: { michael@0: PR_SetError(PR_FILE_TOO_BIG_ERROR, 0); michael@0: return -1; michael@0: } michael@0: #endif /* defined(_PR_HAVE_LARGE_OFF_T) */ michael@0: info->size = sb->st_size; michael@0: michael@0: _MD_set_fileinfo_times(sb, info); michael@0: return 0; michael@0: } /* _MD_convert_stat_to_fileinfo */ michael@0: michael@0: static int _MD_convert_stat64_to_fileinfo64( michael@0: const _MDStat64 *sb, michael@0: PRFileInfo64 *info) michael@0: { michael@0: if (S_IFREG & sb->st_mode) michael@0: info->type = PR_FILE_FILE; michael@0: else if (S_IFDIR & sb->st_mode) michael@0: info->type = PR_FILE_DIRECTORY; michael@0: else michael@0: info->type = PR_FILE_OTHER; michael@0: michael@0: LL_I2L(info->size, sb->st_size); michael@0: michael@0: _MD_set_fileinfo64_times(sb, info); michael@0: return 0; michael@0: } /* _MD_convert_stat64_to_fileinfo64 */ michael@0: michael@0: PRInt32 _MD_getfileinfo(const char *fn, PRFileInfo *info) michael@0: { michael@0: PRInt32 rv; michael@0: struct stat sb; michael@0: michael@0: rv = stat(fn, &sb); michael@0: if (rv < 0) michael@0: _PR_MD_MAP_STAT_ERROR(_MD_ERRNO()); michael@0: else if (NULL != info) michael@0: rv = _MD_convert_stat_to_fileinfo(&sb, info); michael@0: return rv; michael@0: } michael@0: michael@0: PRInt32 _MD_getfileinfo64(const char *fn, PRFileInfo64 *info) michael@0: { michael@0: _MDStat64 sb; michael@0: PRInt32 rv = _md_iovector._stat64(fn, &sb); michael@0: if (rv < 0) michael@0: _PR_MD_MAP_STAT_ERROR(_MD_ERRNO()); michael@0: else if (NULL != info) michael@0: rv = _MD_convert_stat64_to_fileinfo64(&sb, info); michael@0: return rv; michael@0: } michael@0: michael@0: PRInt32 _MD_getopenfileinfo(const PRFileDesc *fd, PRFileInfo *info) michael@0: { michael@0: struct stat sb; michael@0: PRInt32 rv = fstat(fd->secret->md.osfd, &sb); michael@0: if (rv < 0) michael@0: _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO()); michael@0: else if (NULL != info) michael@0: rv = _MD_convert_stat_to_fileinfo(&sb, info); michael@0: return rv; michael@0: } michael@0: michael@0: PRInt32 _MD_getopenfileinfo64(const PRFileDesc *fd, PRFileInfo64 *info) michael@0: { michael@0: _MDStat64 sb; michael@0: PRInt32 rv = _md_iovector._fstat64(fd->secret->md.osfd, &sb); michael@0: if (rv < 0) michael@0: _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO()); michael@0: else if (NULL != info) michael@0: rv = _MD_convert_stat64_to_fileinfo64(&sb, info); michael@0: return rv; michael@0: } michael@0: michael@0: /* michael@0: * _md_iovector._open64 must be initialized to 'open' so that _PR_InitLog can michael@0: * open the log file during NSPR initialization, before _md_iovector is michael@0: * initialized by _PR_MD_FINAL_INIT. This means the log file cannot be a michael@0: * large file on some platforms. michael@0: */ michael@0: #ifdef SYMBIAN michael@0: struct _MD_IOVector _md_iovector; /* Will crash if NSPR_LOG_FILE is set. */ michael@0: #else michael@0: struct _MD_IOVector _md_iovector = { open }; michael@0: #endif michael@0: michael@0: /* michael@0: ** These implementations are to emulate large file routines on systems that michael@0: ** don't have them. Their goal is to check in case overflow occurs. Otherwise michael@0: ** they will just operate as normal using 32-bit file routines. michael@0: ** michael@0: ** The checking might be pre- or post-op, depending on the semantics. michael@0: */ michael@0: michael@0: #if defined(SOLARIS2_5) michael@0: michael@0: static PRIntn _MD_solaris25_fstat64(PRIntn osfd, _MDStat64 *buf) michael@0: { michael@0: PRInt32 rv; michael@0: struct stat sb; michael@0: michael@0: rv = fstat(osfd, &sb); michael@0: if (rv >= 0) michael@0: { michael@0: /* michael@0: ** I'm only copying the fields that are immediately needed. michael@0: ** If somebody else calls this function, some of the fields michael@0: ** may not be defined. michael@0: */ michael@0: (void)memset(buf, 0, sizeof(_MDStat64)); michael@0: buf->st_mode = sb.st_mode; michael@0: buf->st_ctim = sb.st_ctim; michael@0: buf->st_mtim = sb.st_mtim; michael@0: buf->st_size = sb.st_size; michael@0: } michael@0: return rv; michael@0: } /* _MD_solaris25_fstat64 */ michael@0: michael@0: static PRIntn _MD_solaris25_stat64(const char *fn, _MDStat64 *buf) michael@0: { michael@0: PRInt32 rv; michael@0: struct stat sb; michael@0: michael@0: rv = stat(fn, &sb); michael@0: if (rv >= 0) michael@0: { michael@0: /* michael@0: ** I'm only copying the fields that are immediately needed. michael@0: ** If somebody else calls this function, some of the fields michael@0: ** may not be defined. michael@0: */ michael@0: (void)memset(buf, 0, sizeof(_MDStat64)); michael@0: buf->st_mode = sb.st_mode; michael@0: buf->st_ctim = sb.st_ctim; michael@0: buf->st_mtim = sb.st_mtim; michael@0: buf->st_size = sb.st_size; michael@0: } michael@0: return rv; michael@0: } /* _MD_solaris25_stat64 */ michael@0: #endif /* defined(SOLARIS2_5) */ michael@0: michael@0: #if defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5) michael@0: michael@0: static PROffset64 _MD_Unix_lseek64(PRIntn osfd, PROffset64 offset, PRIntn whence) michael@0: { michael@0: PRUint64 maxoff; michael@0: PROffset64 rv = minus_one; michael@0: LL_I2L(maxoff, 0x7fffffff); michael@0: if (LL_CMP(offset, <=, maxoff)) michael@0: { michael@0: off_t off; michael@0: LL_L2I(off, offset); michael@0: LL_I2L(rv, lseek(osfd, off, whence)); michael@0: } michael@0: else errno = EFBIG; /* we can't go there */ michael@0: return rv; michael@0: } /* _MD_Unix_lseek64 */ michael@0: michael@0: static void* _MD_Unix_mmap64( michael@0: void *addr, PRSize len, PRIntn prot, PRIntn flags, michael@0: PRIntn fildes, PRInt64 offset) michael@0: { michael@0: PR_SetError(PR_FILE_TOO_BIG_ERROR, 0); michael@0: return NULL; michael@0: } /* _MD_Unix_mmap64 */ michael@0: #endif /* defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5) */ michael@0: michael@0: /* Android doesn't have mmap64. */ michael@0: #if defined(ANDROID) michael@0: extern void *__mmap2(void *, size_t, int, int, int, size_t); michael@0: michael@0: #define ANDROID_PAGE_SIZE 4096 michael@0: michael@0: static void * michael@0: mmap64(void *addr, size_t len, int prot, int flags, int fd, loff_t offset) michael@0: { michael@0: if (offset & (ANDROID_PAGE_SIZE - 1)) { michael@0: errno = EINVAL; michael@0: return MAP_FAILED; michael@0: } michael@0: return __mmap2(addr, len, prot, flags, fd, offset / ANDROID_PAGE_SIZE); michael@0: } michael@0: #endif michael@0: michael@0: #if defined(OSF1) && defined(__GNUC__) michael@0: michael@0: /* michael@0: * On OSF1 V5.0A, defines stat and fstat as michael@0: * macros when compiled under gcc, so it is rather tricky to michael@0: * take the addresses of the real functions the macros expend michael@0: * to. A simple solution is to define forwarder functions michael@0: * and take the addresses of the forwarder functions instead. michael@0: */ michael@0: michael@0: static int stat_forwarder(const char *path, struct stat *buffer) michael@0: { michael@0: return stat(path, buffer); michael@0: } michael@0: michael@0: static int fstat_forwarder(int filedes, struct stat *buffer) michael@0: { michael@0: return fstat(filedes, buffer); michael@0: } michael@0: michael@0: #endif michael@0: michael@0: static void _PR_InitIOV(void) michael@0: { michael@0: #if defined(SOLARIS2_5) michael@0: PRLibrary *lib; michael@0: void *open64_func; michael@0: michael@0: open64_func = PR_FindSymbolAndLibrary("open64", &lib); michael@0: if (NULL != open64_func) michael@0: { michael@0: PR_ASSERT(NULL != lib); michael@0: _md_iovector._open64 = (_MD_Open64)open64_func; michael@0: _md_iovector._mmap64 = (_MD_Mmap64)PR_FindSymbol(lib, "mmap64"); michael@0: _md_iovector._fstat64 = (_MD_Fstat64)PR_FindSymbol(lib, "fstat64"); michael@0: _md_iovector._stat64 = (_MD_Stat64)PR_FindSymbol(lib, "stat64"); michael@0: _md_iovector._lseek64 = (_MD_Lseek64)PR_FindSymbol(lib, "lseek64"); michael@0: (void)PR_UnloadLibrary(lib); michael@0: } michael@0: else michael@0: { michael@0: _md_iovector._open64 = open; michael@0: _md_iovector._mmap64 = _MD_Unix_mmap64; michael@0: _md_iovector._fstat64 = _MD_solaris25_fstat64; michael@0: _md_iovector._stat64 = _MD_solaris25_stat64; michael@0: _md_iovector._lseek64 = _MD_Unix_lseek64; michael@0: } michael@0: #elif defined(_PR_NO_LARGE_FILES) michael@0: _md_iovector._open64 = open; michael@0: _md_iovector._mmap64 = _MD_Unix_mmap64; michael@0: _md_iovector._fstat64 = fstat; michael@0: _md_iovector._stat64 = stat; michael@0: _md_iovector._lseek64 = _MD_Unix_lseek64; michael@0: #elif defined(_PR_HAVE_OFF64_T) michael@0: #if defined(IRIX5_3) || defined(ANDROID) michael@0: /* michael@0: * Android doesn't have open64. We pass the O_LARGEFILE flag to open michael@0: * in _MD_open. michael@0: */ michael@0: _md_iovector._open64 = open; michael@0: #else michael@0: _md_iovector._open64 = open64; michael@0: #endif michael@0: _md_iovector._mmap64 = mmap64; michael@0: _md_iovector._fstat64 = fstat64; michael@0: _md_iovector._stat64 = stat64; michael@0: _md_iovector._lseek64 = lseek64; michael@0: #elif defined(_PR_HAVE_LARGE_OFF_T) michael@0: _md_iovector._open64 = open; michael@0: _md_iovector._mmap64 = mmap; michael@0: #if defined(OSF1) && defined(__GNUC__) michael@0: _md_iovector._fstat64 = fstat_forwarder; michael@0: _md_iovector._stat64 = stat_forwarder; michael@0: #else michael@0: _md_iovector._fstat64 = fstat; michael@0: _md_iovector._stat64 = stat; michael@0: #endif michael@0: _md_iovector._lseek64 = lseek; michael@0: #else michael@0: #error "I don't know yet" michael@0: #endif michael@0: LL_I2L(minus_one, -1); michael@0: } /* _PR_InitIOV */ michael@0: michael@0: void _PR_UnixInit(void) michael@0: { michael@0: struct sigaction sigact; michael@0: int rv; michael@0: michael@0: sigemptyset(&timer_set); michael@0: michael@0: #if !defined(_PR_PTHREADS) michael@0: michael@0: sigaddset(&timer_set, SIGALRM); michael@0: sigemptyset(&empty_set); michael@0: intr_timeout_ticks = michael@0: PR_SecondsToInterval(_PR_INTERRUPT_CHECK_INTERVAL_SECS); michael@0: michael@0: #if defined(SOLARIS) || defined(IRIX) michael@0: michael@0: if (getenv("NSPR_SIGSEGV_HANDLE")) { michael@0: sigact.sa_handler = sigsegvhandler; michael@0: sigact.sa_flags = 0; michael@0: sigact.sa_mask = timer_set; michael@0: sigaction(SIGSEGV, &sigact, 0); michael@0: } michael@0: michael@0: if (getenv("NSPR_SIGABRT_HANDLE")) { michael@0: sigact.sa_handler = sigaborthandler; michael@0: sigact.sa_flags = 0; michael@0: sigact.sa_mask = timer_set; michael@0: sigaction(SIGABRT, &sigact, 0); michael@0: } michael@0: michael@0: if (getenv("NSPR_SIGBUS_HANDLE")) { michael@0: sigact.sa_handler = sigbushandler; michael@0: sigact.sa_flags = 0; michael@0: sigact.sa_mask = timer_set; michael@0: sigaction(SIGBUS, &sigact, 0); michael@0: } michael@0: michael@0: #endif michael@0: #endif /* !defined(_PR_PTHREADS) */ michael@0: michael@0: /* michael@0: * Under HP-UX DCE threads, sigaction() installs a per-thread michael@0: * handler, so we use sigvector() to install a process-wide michael@0: * handler. michael@0: */ michael@0: #if defined(HPUX) && defined(_PR_DCETHREADS) michael@0: { michael@0: struct sigvec vec; michael@0: michael@0: vec.sv_handler = SIG_IGN; michael@0: vec.sv_mask = 0; michael@0: vec.sv_flags = 0; michael@0: rv = sigvector(SIGPIPE, &vec, NULL); michael@0: PR_ASSERT(0 == rv); michael@0: } michael@0: #else michael@0: sigact.sa_handler = SIG_IGN; michael@0: sigemptyset(&sigact.sa_mask); michael@0: sigact.sa_flags = 0; michael@0: rv = sigaction(SIGPIPE, &sigact, 0); michael@0: PR_ASSERT(0 == rv); michael@0: #endif /* HPUX && _PR_DCETHREADS */ michael@0: michael@0: _pr_rename_lock = PR_NewLock(); michael@0: PR_ASSERT(NULL != _pr_rename_lock); michael@0: _pr_Xfe_mon = PR_NewMonitor(); michael@0: PR_ASSERT(NULL != _pr_Xfe_mon); michael@0: michael@0: _PR_InitIOV(); /* one last hack */ michael@0: } michael@0: michael@0: void _PR_UnixCleanup(void) michael@0: { michael@0: if (_pr_rename_lock) { michael@0: PR_DestroyLock(_pr_rename_lock); michael@0: _pr_rename_lock = NULL; michael@0: } michael@0: if (_pr_Xfe_mon) { michael@0: PR_DestroyMonitor(_pr_Xfe_mon); michael@0: _pr_Xfe_mon = NULL; michael@0: } michael@0: } michael@0: michael@0: #if !defined(_PR_PTHREADS) michael@0: michael@0: /* michael@0: * Variables used by the GC code, initialized in _MD_InitSegs(). michael@0: */ michael@0: static PRInt32 _pr_zero_fd = -1; michael@0: static PRLock *_pr_md_lock = NULL; michael@0: michael@0: /* michael@0: * _MD_InitSegs -- michael@0: * michael@0: * This is Unix's version of _PR_MD_INIT_SEGS(), which is michael@0: * called by _PR_InitSegs(), which in turn is called by michael@0: * PR_Init(). michael@0: */ michael@0: void _MD_InitSegs(void) michael@0: { michael@0: #ifdef DEBUG michael@0: /* michael@0: ** Disable using mmap(2) if NSPR_NO_MMAP is set michael@0: */ michael@0: if (getenv("NSPR_NO_MMAP")) { michael@0: _pr_zero_fd = -2; michael@0: return; michael@0: } michael@0: #endif michael@0: _pr_zero_fd = open("/dev/zero",O_RDWR , 0); michael@0: /* Prevent the fd from being inherited by child processes */ michael@0: fcntl(_pr_zero_fd, F_SETFD, FD_CLOEXEC); michael@0: _pr_md_lock = PR_NewLock(); michael@0: } michael@0: michael@0: PRStatus _MD_AllocSegment(PRSegment *seg, PRUint32 size, void *vaddr) michael@0: { michael@0: static char *lastaddr = (char*) _PR_STACK_VMBASE; michael@0: PRStatus retval = PR_SUCCESS; michael@0: int prot; michael@0: void *rv; michael@0: michael@0: PR_ASSERT(seg != 0); michael@0: PR_ASSERT(size != 0); michael@0: michael@0: PR_Lock(_pr_md_lock); michael@0: if (_pr_zero_fd < 0) { michael@0: from_heap: michael@0: seg->vaddr = PR_MALLOC(size); michael@0: if (!seg->vaddr) { michael@0: retval = PR_FAILURE; michael@0: } michael@0: else { michael@0: seg->size = size; michael@0: } michael@0: goto exit; michael@0: } michael@0: michael@0: prot = PROT_READ|PROT_WRITE; michael@0: /* michael@0: * On Alpha Linux, the user-level thread stack needs michael@0: * to be made executable because longjmp/signal seem michael@0: * to put machine instructions on the stack. michael@0: */ michael@0: #if defined(LINUX) && defined(__alpha) michael@0: prot |= PROT_EXEC; michael@0: #endif michael@0: rv = mmap((vaddr != 0) ? vaddr : lastaddr, size, prot, michael@0: _MD_MMAP_FLAGS, michael@0: _pr_zero_fd, 0); michael@0: if (rv == (void*)-1) { michael@0: goto from_heap; michael@0: } michael@0: lastaddr += size; michael@0: seg->vaddr = rv; michael@0: seg->size = size; michael@0: seg->flags = _PR_SEG_VM; michael@0: michael@0: exit: michael@0: PR_Unlock(_pr_md_lock); michael@0: return retval; michael@0: } michael@0: michael@0: void _MD_FreeSegment(PRSegment *seg) michael@0: { michael@0: if (seg->flags & _PR_SEG_VM) michael@0: (void) munmap(seg->vaddr, seg->size); michael@0: else michael@0: PR_DELETE(seg->vaddr); michael@0: } michael@0: michael@0: #endif /* _PR_PTHREADS */ michael@0: michael@0: /* michael@0: *----------------------------------------------------------------------- michael@0: * michael@0: * PR_Now -- michael@0: * michael@0: * Returns the current time in microseconds since the epoch. michael@0: * The epoch is midnight January 1, 1970 GMT. michael@0: * The implementation is machine dependent. This is the Unix michael@0: * implementation. michael@0: * Cf. time_t time(time_t *tp) michael@0: * michael@0: *----------------------------------------------------------------------- michael@0: */ michael@0: michael@0: PR_IMPLEMENT(PRTime) michael@0: PR_Now(void) michael@0: { michael@0: struct timeval tv; michael@0: PRInt64 s, us, s2us; michael@0: michael@0: GETTIMEOFDAY(&tv); michael@0: LL_I2L(s2us, PR_USEC_PER_SEC); michael@0: LL_I2L(s, tv.tv_sec); michael@0: LL_I2L(us, tv.tv_usec); michael@0: LL_MUL(s, s, s2us); michael@0: LL_ADD(s, s, us); michael@0: return s; michael@0: } michael@0: michael@0: #if defined(_MD_INTERVAL_USE_GTOD) michael@0: /* michael@0: * This version of interval times is based on the time of day michael@0: * capability offered by the system. This isn't valid for two reasons: michael@0: * 1) The time of day is neither linear nor montonically increasing michael@0: * 2) The units here are milliseconds. That's not appropriate for our use. michael@0: */ michael@0: PRIntervalTime _PR_UNIX_GetInterval() michael@0: { michael@0: struct timeval time; michael@0: PRIntervalTime ticks; michael@0: michael@0: (void)GETTIMEOFDAY(&time); /* fallicy of course */ michael@0: ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC; /* that's in milliseconds */ michael@0: ticks += (PRUint32)time.tv_usec / PR_USEC_PER_MSEC; /* so's that */ michael@0: return ticks; michael@0: } /* _PR_UNIX_GetInterval */ michael@0: michael@0: PRIntervalTime _PR_UNIX_TicksPerSecond() michael@0: { michael@0: return 1000; /* this needs some work :) */ michael@0: } michael@0: #endif michael@0: michael@0: #if defined(HAVE_CLOCK_MONOTONIC) michael@0: PRIntervalTime _PR_UNIX_GetInterval2() michael@0: { michael@0: struct timespec time; michael@0: PRIntervalTime ticks; michael@0: michael@0: if (clock_gettime(CLOCK_MONOTONIC, &time) != 0) { michael@0: fprintf(stderr, "clock_gettime failed: %d\n", errno); michael@0: abort(); michael@0: } michael@0: michael@0: ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC; michael@0: ticks += (PRUint32)time.tv_nsec / PR_NSEC_PER_MSEC; michael@0: return ticks; michael@0: } michael@0: michael@0: PRIntervalTime _PR_UNIX_TicksPerSecond2() michael@0: { michael@0: return 1000; michael@0: } michael@0: #endif michael@0: michael@0: #if !defined(_PR_PTHREADS) michael@0: /* michael@0: * Wait for I/O on multiple descriptors. michael@0: * michael@0: * Return 0 if timed out, return -1 if interrupted, michael@0: * else return the number of ready descriptors. michael@0: */ michael@0: PRInt32 _PR_WaitForMultipleFDs( michael@0: _PRUnixPollDesc *unixpds, michael@0: PRInt32 pdcnt, michael@0: PRIntervalTime timeout) michael@0: { michael@0: PRPollQueue pq; michael@0: PRIntn is; michael@0: PRInt32 rv; michael@0: _PRCPU *io_cpu; michael@0: _PRUnixPollDesc *unixpd, *eunixpd; michael@0: PRThread *me = _PR_MD_CURRENT_THREAD(); michael@0: michael@0: PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); michael@0: michael@0: if (_PR_PENDING_INTERRUPT(me)) { michael@0: me->flags &= ~_PR_INTERRUPT; michael@0: PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); michael@0: return -1; michael@0: } michael@0: michael@0: pq.pds = unixpds; michael@0: pq.npds = pdcnt; michael@0: michael@0: _PR_INTSOFF(is); michael@0: _PR_MD_IOQ_LOCK(); michael@0: _PR_THREAD_LOCK(me); michael@0: michael@0: pq.thr = me; michael@0: io_cpu = me->cpu; michael@0: pq.on_ioq = PR_TRUE; michael@0: pq.timeout = timeout; michael@0: _PR_ADD_TO_IOQ(pq, me->cpu); michael@0: michael@0: #if !defined(_PR_USE_POLL) michael@0: eunixpd = unixpds + pdcnt; michael@0: for (unixpd = unixpds; unixpd < eunixpd; unixpd++) { michael@0: PRInt32 osfd = unixpd->osfd; michael@0: if (unixpd->in_flags & _PR_UNIX_POLL_READ) { michael@0: FD_SET(osfd, &_PR_FD_READ_SET(me->cpu)); michael@0: _PR_FD_READ_CNT(me->cpu)[osfd]++; michael@0: } michael@0: if (unixpd->in_flags & _PR_UNIX_POLL_WRITE) { michael@0: FD_SET(osfd, &_PR_FD_WRITE_SET(me->cpu)); michael@0: (_PR_FD_WRITE_CNT(me->cpu))[osfd]++; michael@0: } michael@0: if (unixpd->in_flags & _PR_UNIX_POLL_EXCEPT) { michael@0: FD_SET(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); michael@0: (_PR_FD_EXCEPTION_CNT(me->cpu))[osfd]++; michael@0: } michael@0: if (osfd > _PR_IOQ_MAX_OSFD(me->cpu)) { michael@0: _PR_IOQ_MAX_OSFD(me->cpu) = osfd; michael@0: } michael@0: } michael@0: #endif /* !defined(_PR_USE_POLL) */ michael@0: michael@0: if (_PR_IOQ_TIMEOUT(me->cpu) > timeout) { michael@0: _PR_IOQ_TIMEOUT(me->cpu) = timeout; michael@0: } michael@0: michael@0: _PR_IOQ_OSFD_CNT(me->cpu) += pdcnt; michael@0: michael@0: _PR_SLEEPQ_LOCK(me->cpu); michael@0: _PR_ADD_SLEEPQ(me, timeout); michael@0: me->state = _PR_IO_WAIT; michael@0: me->io_pending = PR_TRUE; michael@0: me->io_suspended = PR_FALSE; michael@0: _PR_SLEEPQ_UNLOCK(me->cpu); michael@0: _PR_THREAD_UNLOCK(me); michael@0: _PR_MD_IOQ_UNLOCK(); michael@0: michael@0: _PR_MD_WAIT(me, timeout); michael@0: michael@0: me->io_pending = PR_FALSE; michael@0: me->io_suspended = PR_FALSE; michael@0: michael@0: /* michael@0: * This thread should run on the same cpu on which it was blocked; when michael@0: * the IO request times out the fd sets and fd counts for the michael@0: * cpu are updated below. michael@0: */ michael@0: PR_ASSERT(me->cpu == io_cpu); michael@0: michael@0: /* michael@0: ** If we timed out the pollq might still be on the ioq. Remove it michael@0: ** before continuing. michael@0: */ michael@0: if (pq.on_ioq) { michael@0: _PR_MD_IOQ_LOCK(); michael@0: /* michael@0: * Need to check pq.on_ioq again michael@0: */ michael@0: if (pq.on_ioq) { michael@0: PR_REMOVE_LINK(&pq.links); michael@0: #ifndef _PR_USE_POLL michael@0: eunixpd = unixpds + pdcnt; michael@0: for (unixpd = unixpds; unixpd < eunixpd; unixpd++) { michael@0: PRInt32 osfd = unixpd->osfd; michael@0: PRInt16 in_flags = unixpd->in_flags; michael@0: michael@0: if (in_flags & _PR_UNIX_POLL_READ) { michael@0: if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) michael@0: FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu)); michael@0: } michael@0: if (in_flags & _PR_UNIX_POLL_WRITE) { michael@0: if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) michael@0: FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu)); michael@0: } michael@0: if (in_flags & _PR_UNIX_POLL_EXCEPT) { michael@0: if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) michael@0: FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); michael@0: } michael@0: } michael@0: #endif /* _PR_USE_POLL */ michael@0: PR_ASSERT(pq.npds == pdcnt); michael@0: _PR_IOQ_OSFD_CNT(me->cpu) -= pdcnt; michael@0: PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0); michael@0: } michael@0: _PR_MD_IOQ_UNLOCK(); michael@0: } michael@0: /* XXX Should we use _PR_FAST_INTSON or _PR_INTSON? */ michael@0: if (1 == pdcnt) { michael@0: _PR_FAST_INTSON(is); michael@0: } else { michael@0: _PR_INTSON(is); michael@0: } michael@0: michael@0: if (_PR_PENDING_INTERRUPT(me)) { michael@0: me->flags &= ~_PR_INTERRUPT; michael@0: PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); michael@0: return -1; michael@0: } michael@0: michael@0: rv = 0; michael@0: if (pq.on_ioq == PR_FALSE) { michael@0: /* Count the number of ready descriptors */ michael@0: while (--pdcnt >= 0) { michael@0: if (unixpds->out_flags != 0) { michael@0: rv++; michael@0: } michael@0: unixpds++; michael@0: } michael@0: } michael@0: michael@0: return rv; michael@0: } michael@0: michael@0: /* michael@0: * Unblock threads waiting for I/O michael@0: * used when interrupting threads michael@0: * michael@0: * NOTE: The thread lock should held when this function is called. michael@0: * On return, the thread lock is released. michael@0: */ michael@0: void _PR_Unblock_IO_Wait(PRThread *thr) michael@0: { michael@0: int pri = thr->priority; michael@0: _PRCPU *cpu = thr->cpu; michael@0: michael@0: /* michael@0: * GLOBAL threads wakeup periodically to check for interrupt michael@0: */ michael@0: if (_PR_IS_NATIVE_THREAD(thr)) { michael@0: _PR_THREAD_UNLOCK(thr); michael@0: return; michael@0: } michael@0: michael@0: PR_ASSERT(thr->flags & (_PR_ON_SLEEPQ | _PR_ON_PAUSEQ)); michael@0: _PR_SLEEPQ_LOCK(cpu); michael@0: _PR_DEL_SLEEPQ(thr, PR_TRUE); michael@0: _PR_SLEEPQ_UNLOCK(cpu); michael@0: michael@0: PR_ASSERT(!(thr->flags & _PR_IDLE_THREAD)); michael@0: thr->state = _PR_RUNNABLE; michael@0: _PR_RUNQ_LOCK(cpu); michael@0: _PR_ADD_RUNQ(thr, cpu, pri); michael@0: _PR_RUNQ_UNLOCK(cpu); michael@0: _PR_THREAD_UNLOCK(thr); michael@0: _PR_MD_WAKEUP_WAITER(thr); michael@0: } michael@0: #endif /* !defined(_PR_PTHREADS) */ michael@0: michael@0: /* michael@0: * When a nonblocking connect has completed, determine whether it michael@0: * succeeded or failed, and if it failed, what the error code is. michael@0: * michael@0: * The function returns the error code. An error code of 0 means michael@0: * that the nonblocking connect succeeded. michael@0: */ michael@0: michael@0: int _MD_unix_get_nonblocking_connect_error(int osfd) michael@0: { michael@0: #if defined(NTO) michael@0: /* Neutrino does not support the SO_ERROR socket option */ michael@0: PRInt32 rv; michael@0: PRNetAddr addr; michael@0: _PRSockLen_t addrlen = sizeof(addr); michael@0: michael@0: /* Test to see if we are using the Tiny TCP/IP Stack or the Full one. */ michael@0: struct statvfs superblock; michael@0: rv = fstatvfs(osfd, &superblock); michael@0: if (rv == 0) { michael@0: if (strcmp(superblock.f_basetype, "ttcpip") == 0) { michael@0: /* Using the Tiny Stack! */ michael@0: rv = getpeername(osfd, (struct sockaddr *) &addr, michael@0: (_PRSockLen_t *) &addrlen); michael@0: if (rv == -1) { michael@0: int errno_copy = errno; /* make a copy so I don't michael@0: * accidentally reset */ michael@0: michael@0: if (errno_copy == ENOTCONN) { michael@0: struct stat StatInfo; michael@0: rv = fstat(osfd, &StatInfo); michael@0: if (rv == 0) { michael@0: time_t current_time = time(NULL); michael@0: michael@0: /* michael@0: * this is a real hack, can't explain why it michael@0: * works it just does michael@0: */ michael@0: if (abs(current_time - StatInfo.st_atime) < 5) { michael@0: return ECONNREFUSED; michael@0: } else { michael@0: return ETIMEDOUT; michael@0: } michael@0: } else { michael@0: return ECONNREFUSED; michael@0: } michael@0: } else { michael@0: return errno_copy; michael@0: } michael@0: } else { michael@0: /* No Error */ michael@0: return 0; michael@0: } michael@0: } else { michael@0: /* Have the FULL Stack which supports SO_ERROR */ michael@0: /* Hasn't been written yet, never been tested! */ michael@0: /* Jerry.Kirk@Nexwarecorp.com */ michael@0: michael@0: int err; michael@0: _PRSockLen_t optlen = sizeof(err); michael@0: michael@0: if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, michael@0: (char *) &err, &optlen) == -1) { michael@0: return errno; michael@0: } else { michael@0: return err; michael@0: } michael@0: } michael@0: } else { michael@0: return ECONNREFUSED; michael@0: } michael@0: #elif defined(UNIXWARE) michael@0: /* michael@0: * getsockopt() fails with EPIPE, so use getmsg() instead. michael@0: */ michael@0: michael@0: int rv; michael@0: int flags = 0; michael@0: rv = getmsg(osfd, NULL, NULL, &flags); michael@0: PR_ASSERT(-1 == rv || 0 == rv); michael@0: if (-1 == rv && errno != EAGAIN && errno != EWOULDBLOCK) { michael@0: return errno; michael@0: } michael@0: return 0; /* no error */ michael@0: #else michael@0: int err; michael@0: _PRSockLen_t optlen = sizeof(err); michael@0: if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, (char *) &err, &optlen) == -1) { michael@0: return errno; michael@0: } else { michael@0: return err; michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: /************************************************************************/ michael@0: michael@0: /* michael@0: ** Special hacks for xlib. Xlib/Xt/Xm is not re-entrant nor is it thread michael@0: ** safe. Unfortunately, neither is mozilla. To make these programs work michael@0: ** in a pre-emptive threaded environment, we need to use a lock. michael@0: */ michael@0: michael@0: void PR_XLock(void) michael@0: { michael@0: PR_EnterMonitor(_pr_Xfe_mon); michael@0: } michael@0: michael@0: void PR_XUnlock(void) michael@0: { michael@0: PR_ExitMonitor(_pr_Xfe_mon); michael@0: } michael@0: michael@0: PRBool PR_XIsLocked(void) michael@0: { michael@0: return (PR_InMonitor(_pr_Xfe_mon)) ? PR_TRUE : PR_FALSE; michael@0: } michael@0: michael@0: void PR_XWait(int ms) michael@0: { michael@0: PR_Wait(_pr_Xfe_mon, PR_MillisecondsToInterval(ms)); michael@0: } michael@0: michael@0: void PR_XNotify(void) michael@0: { michael@0: PR_Notify(_pr_Xfe_mon); michael@0: } michael@0: michael@0: void PR_XNotifyAll(void) michael@0: { michael@0: PR_NotifyAll(_pr_Xfe_mon); michael@0: } michael@0: michael@0: #if defined(HAVE_FCNTL_FILE_LOCKING) michael@0: michael@0: PRStatus michael@0: _MD_LockFile(PRInt32 f) michael@0: { michael@0: PRInt32 rv; michael@0: struct flock arg; michael@0: michael@0: arg.l_type = F_WRLCK; michael@0: arg.l_whence = SEEK_SET; michael@0: arg.l_start = 0; michael@0: arg.l_len = 0; /* until EOF */ michael@0: rv = fcntl(f, F_SETLKW, &arg); michael@0: if (rv == 0) michael@0: return PR_SUCCESS; michael@0: _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: PRStatus michael@0: _MD_TLockFile(PRInt32 f) michael@0: { michael@0: PRInt32 rv; michael@0: struct flock arg; michael@0: michael@0: arg.l_type = F_WRLCK; michael@0: arg.l_whence = SEEK_SET; michael@0: arg.l_start = 0; michael@0: arg.l_len = 0; /* until EOF */ michael@0: rv = fcntl(f, F_SETLK, &arg); michael@0: if (rv == 0) michael@0: return PR_SUCCESS; michael@0: _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: PRStatus michael@0: _MD_UnlockFile(PRInt32 f) michael@0: { michael@0: PRInt32 rv; michael@0: struct flock arg; michael@0: michael@0: arg.l_type = F_UNLCK; michael@0: arg.l_whence = SEEK_SET; michael@0: arg.l_start = 0; michael@0: arg.l_len = 0; /* until EOF */ michael@0: rv = fcntl(f, F_SETLK, &arg); michael@0: if (rv == 0) michael@0: return PR_SUCCESS; michael@0: _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: #elif defined(HAVE_BSD_FLOCK) michael@0: michael@0: #include michael@0: michael@0: PRStatus michael@0: _MD_LockFile(PRInt32 f) michael@0: { michael@0: PRInt32 rv; michael@0: rv = flock(f, LOCK_EX); michael@0: if (rv == 0) michael@0: return PR_SUCCESS; michael@0: _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: PRStatus michael@0: _MD_TLockFile(PRInt32 f) michael@0: { michael@0: PRInt32 rv; michael@0: rv = flock(f, LOCK_EX|LOCK_NB); michael@0: if (rv == 0) michael@0: return PR_SUCCESS; michael@0: _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: PRStatus michael@0: _MD_UnlockFile(PRInt32 f) michael@0: { michael@0: PRInt32 rv; michael@0: rv = flock(f, LOCK_UN); michael@0: if (rv == 0) michael@0: return PR_SUCCESS; michael@0: _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); michael@0: return PR_FAILURE; michael@0: } michael@0: #else michael@0: michael@0: PRStatus michael@0: _MD_LockFile(PRInt32 f) michael@0: { michael@0: PRInt32 rv; michael@0: rv = lockf(f, F_LOCK, 0); michael@0: if (rv == 0) michael@0: return PR_SUCCESS; michael@0: _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO()); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: PRStatus michael@0: _MD_TLockFile(PRInt32 f) michael@0: { michael@0: PRInt32 rv; michael@0: rv = lockf(f, F_TLOCK, 0); michael@0: if (rv == 0) michael@0: return PR_SUCCESS; michael@0: _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO()); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: PRStatus michael@0: _MD_UnlockFile(PRInt32 f) michael@0: { michael@0: PRInt32 rv; michael@0: rv = lockf(f, F_ULOCK, 0); michael@0: if (rv == 0) michael@0: return PR_SUCCESS; michael@0: _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO()); michael@0: return PR_FAILURE; michael@0: } michael@0: #endif michael@0: michael@0: PRStatus _MD_gethostname(char *name, PRUint32 namelen) michael@0: { michael@0: PRIntn rv; michael@0: michael@0: rv = gethostname(name, namelen); michael@0: if (0 == rv) { michael@0: return PR_SUCCESS; michael@0: } michael@0: _PR_MD_MAP_GETHOSTNAME_ERROR(_MD_ERRNO()); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: PRStatus _MD_getsysinfo(PRSysInfo cmd, char *name, PRUint32 namelen) michael@0: { michael@0: struct utsname info; michael@0: michael@0: PR_ASSERT((cmd == PR_SI_SYSNAME) || (cmd == PR_SI_RELEASE)); michael@0: michael@0: if (uname(&info) == -1) { michael@0: _PR_MD_MAP_DEFAULT_ERROR(errno); michael@0: return PR_FAILURE; michael@0: } michael@0: if (PR_SI_SYSNAME == cmd) michael@0: (void)PR_snprintf(name, namelen, info.sysname); michael@0: else if (PR_SI_RELEASE == cmd) michael@0: (void)PR_snprintf(name, namelen, info.release); michael@0: else michael@0: return PR_FAILURE; michael@0: return PR_SUCCESS; michael@0: } michael@0: michael@0: /* michael@0: ******************************************************************* michael@0: * michael@0: * Memory-mapped files michael@0: * michael@0: ******************************************************************* michael@0: */ michael@0: michael@0: PRStatus _MD_CreateFileMap(PRFileMap *fmap, PRInt64 size) michael@0: { michael@0: PRFileInfo info; michael@0: PRUint32 sz; michael@0: michael@0: LL_L2UI(sz, size); michael@0: if (sz) { michael@0: if (PR_GetOpenFileInfo(fmap->fd, &info) == PR_FAILURE) { michael@0: return PR_FAILURE; michael@0: } michael@0: if (sz > info.size) { michael@0: /* michael@0: * Need to extend the file michael@0: */ michael@0: if (fmap->prot != PR_PROT_READWRITE) { michael@0: PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, 0); michael@0: return PR_FAILURE; michael@0: } michael@0: if (PR_Seek(fmap->fd, sz - 1, PR_SEEK_SET) == -1) { michael@0: return PR_FAILURE; michael@0: } michael@0: if (PR_Write(fmap->fd, "", 1) != 1) { michael@0: return PR_FAILURE; michael@0: } michael@0: } michael@0: } michael@0: if (fmap->prot == PR_PROT_READONLY) { michael@0: fmap->md.prot = PROT_READ; michael@0: #ifdef OSF1V4_MAP_PRIVATE_BUG michael@0: /* michael@0: * Use MAP_SHARED to work around a bug in OSF1 V4.0D michael@0: * (QAR 70220 in the OSF_QAR database) that results in michael@0: * corrupted data in the memory-mapped region. This michael@0: * bug is fixed in V5.0. michael@0: */ michael@0: fmap->md.flags = MAP_SHARED; michael@0: #else michael@0: fmap->md.flags = MAP_PRIVATE; michael@0: #endif michael@0: } else if (fmap->prot == PR_PROT_READWRITE) { michael@0: fmap->md.prot = PROT_READ | PROT_WRITE; michael@0: fmap->md.flags = MAP_SHARED; michael@0: } else { michael@0: PR_ASSERT(fmap->prot == PR_PROT_WRITECOPY); michael@0: fmap->md.prot = PROT_READ | PROT_WRITE; michael@0: fmap->md.flags = MAP_PRIVATE; michael@0: } michael@0: return PR_SUCCESS; michael@0: } michael@0: michael@0: void * _MD_MemMap( michael@0: PRFileMap *fmap, michael@0: PRInt64 offset, michael@0: PRUint32 len) michael@0: { michael@0: PRInt32 off; michael@0: void *addr; michael@0: michael@0: LL_L2I(off, offset); michael@0: if ((addr = mmap(0, len, fmap->md.prot, fmap->md.flags, michael@0: fmap->fd->secret->md.osfd, off)) == (void *) -1) { michael@0: _PR_MD_MAP_MMAP_ERROR(_MD_ERRNO()); michael@0: addr = NULL; michael@0: } michael@0: return addr; michael@0: } michael@0: michael@0: PRStatus _MD_MemUnmap(void *addr, PRUint32 len) michael@0: { michael@0: if (munmap(addr, len) == 0) { michael@0: return PR_SUCCESS; michael@0: } michael@0: _PR_MD_MAP_DEFAULT_ERROR(errno); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: PRStatus _MD_CloseFileMap(PRFileMap *fmap) michael@0: { michael@0: if ( PR_TRUE == fmap->md.isAnonFM ) { michael@0: PRStatus rc = PR_Close( fmap->fd ); michael@0: if ( PR_FAILURE == rc ) { michael@0: PR_LOG( _pr_io_lm, PR_LOG_DEBUG, michael@0: ("_MD_CloseFileMap(): error closing anonymnous file map osfd")); michael@0: return PR_FAILURE; michael@0: } michael@0: } michael@0: PR_DELETE(fmap); michael@0: return PR_SUCCESS; michael@0: } michael@0: michael@0: PRStatus _MD_SyncMemMap( michael@0: PRFileDesc *fd, michael@0: void *addr, michael@0: PRUint32 len) michael@0: { michael@0: /* msync(..., MS_SYNC) alone is sufficient to flush modified data to disk michael@0: * synchronously. It is not necessary to call fsync. */ michael@0: if (msync(addr, len, MS_SYNC) == 0) { michael@0: return PR_SUCCESS; michael@0: } michael@0: _PR_MD_MAP_DEFAULT_ERROR(errno); michael@0: return PR_FAILURE; michael@0: } michael@0: michael@0: #if defined(_PR_NEED_FAKE_POLL) michael@0: michael@0: /* michael@0: * Some platforms don't have poll(). For easier porting of code michael@0: * that calls poll(), we emulate poll() using select(). michael@0: */ michael@0: michael@0: int poll(struct pollfd *filedes, unsigned long nfds, int timeout) michael@0: { michael@0: int i; michael@0: int rv; michael@0: int maxfd; michael@0: fd_set rd, wr, ex; michael@0: struct timeval tv, *tvp; michael@0: michael@0: if (timeout < 0 && timeout != -1) { michael@0: errno = EINVAL; michael@0: return -1; michael@0: } michael@0: michael@0: if (timeout == -1) { michael@0: tvp = NULL; michael@0: } else { michael@0: tv.tv_sec = timeout / 1000; michael@0: tv.tv_usec = (timeout % 1000) * 1000; michael@0: tvp = &tv; michael@0: } michael@0: michael@0: maxfd = -1; michael@0: FD_ZERO(&rd); michael@0: FD_ZERO(&wr); michael@0: FD_ZERO(&ex); michael@0: michael@0: for (i = 0; i < nfds; i++) { michael@0: int osfd = filedes[i].fd; michael@0: int events = filedes[i].events; michael@0: PRBool fdHasEvent = PR_FALSE; michael@0: michael@0: if (osfd < 0) { michael@0: continue; /* Skip this osfd. */ michael@0: } michael@0: michael@0: /* michael@0: * Map the poll events to the select fd_sets. michael@0: * POLLIN, POLLRDNORM ===> readable michael@0: * POLLOUT, POLLWRNORM ===> writable michael@0: * POLLPRI, POLLRDBAND ===> exception michael@0: * POLLNORM, POLLWRBAND (and POLLMSG on some platforms) michael@0: * are ignored. michael@0: * michael@0: * The output events POLLERR and POLLHUP are never turned on. michael@0: * POLLNVAL may be turned on. michael@0: */ michael@0: michael@0: if (events & (POLLIN | POLLRDNORM)) { michael@0: FD_SET(osfd, &rd); michael@0: fdHasEvent = PR_TRUE; michael@0: } michael@0: if (events & (POLLOUT | POLLWRNORM)) { michael@0: FD_SET(osfd, &wr); michael@0: fdHasEvent = PR_TRUE; michael@0: } michael@0: if (events & (POLLPRI | POLLRDBAND)) { michael@0: FD_SET(osfd, &ex); michael@0: fdHasEvent = PR_TRUE; michael@0: } michael@0: if (fdHasEvent && osfd > maxfd) { michael@0: maxfd = osfd; michael@0: } michael@0: } michael@0: michael@0: rv = select(maxfd + 1, &rd, &wr, &ex, tvp); michael@0: michael@0: /* Compute poll results */ michael@0: if (rv > 0) { michael@0: rv = 0; michael@0: for (i = 0; i < nfds; i++) { michael@0: PRBool fdHasEvent = PR_FALSE; michael@0: michael@0: filedes[i].revents = 0; michael@0: if (filedes[i].fd < 0) { michael@0: continue; michael@0: } michael@0: if (FD_ISSET(filedes[i].fd, &rd)) { michael@0: if (filedes[i].events & POLLIN) { michael@0: filedes[i].revents |= POLLIN; michael@0: } michael@0: if (filedes[i].events & POLLRDNORM) { michael@0: filedes[i].revents |= POLLRDNORM; michael@0: } michael@0: fdHasEvent = PR_TRUE; michael@0: } michael@0: if (FD_ISSET(filedes[i].fd, &wr)) { michael@0: if (filedes[i].events & POLLOUT) { michael@0: filedes[i].revents |= POLLOUT; michael@0: } michael@0: if (filedes[i].events & POLLWRNORM) { michael@0: filedes[i].revents |= POLLWRNORM; michael@0: } michael@0: fdHasEvent = PR_TRUE; michael@0: } michael@0: if (FD_ISSET(filedes[i].fd, &ex)) { michael@0: if (filedes[i].events & POLLPRI) { michael@0: filedes[i].revents |= POLLPRI; michael@0: } michael@0: if (filedes[i].events & POLLRDBAND) { michael@0: filedes[i].revents |= POLLRDBAND; michael@0: } michael@0: fdHasEvent = PR_TRUE; michael@0: } michael@0: if (fdHasEvent) { michael@0: rv++; michael@0: } michael@0: } michael@0: PR_ASSERT(rv > 0); michael@0: } else if (rv == -1 && errno == EBADF) { michael@0: rv = 0; michael@0: for (i = 0; i < nfds; i++) { michael@0: filedes[i].revents = 0; michael@0: if (filedes[i].fd < 0) { michael@0: continue; michael@0: } michael@0: if (fcntl(filedes[i].fd, F_GETFL, 0) == -1) { michael@0: filedes[i].revents = POLLNVAL; michael@0: rv++; michael@0: } michael@0: } michael@0: PR_ASSERT(rv > 0); michael@0: } michael@0: PR_ASSERT(-1 != timeout || rv != 0); michael@0: michael@0: return rv; michael@0: } michael@0: #endif /* _PR_NEED_FAKE_POLL */