michael@0: /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "primpl.h" michael@0: michael@0: #include michael@0: michael@0: /*****************************************************************************/ michael@0: /*****************************************************************************/ michael@0: /************************** File descriptor caching **************************/ michael@0: /*****************************************************************************/ michael@0: /*****************************************************************************/ michael@0: michael@0: /* michael@0: ** This code is built into debuggable versions of NSPR to assist in michael@0: ** finding misused file descriptors. Since file descritors (PRFileDesc) michael@0: ** are identified by a pointer to their structure, they can be the michael@0: ** target of dangling references. Furthermore, NSPR caches and tries michael@0: ** to aggressively reuse file descriptors, leading to more ambiguity. michael@0: ** The following code will allow a debugging client to set environment michael@0: ** variables and control the number of file descriptors that will be michael@0: ** preserved before they are recycled. The environment variables are michael@0: ** NSPR_FD_CACHE_SIZE_LOW and NSPR_FD_CACHE_SIZE_HIGH. The former sets michael@0: ** the number of descriptors NSPR will allocate before beginning to michael@0: ** recycle. The latter is the maximum number permitted in the cache michael@0: ** (exclusive of those in use) at a time. michael@0: */ michael@0: typedef struct _PR_Fd_Cache michael@0: { michael@0: PRLock *ml; michael@0: PRIntn count; michael@0: PRStack *stack; michael@0: PRFileDesc *head, *tail; michael@0: PRIntn limit_low, limit_high; michael@0: } _PR_Fd_Cache; michael@0: michael@0: static _PR_Fd_Cache _pr_fd_cache; michael@0: static PRFileDesc **stack2fd = &(((PRFileDesc*)NULL)->higher); michael@0: michael@0: michael@0: /* michael@0: ** Get a FileDescriptor from the cache if one exists. If not allocate michael@0: ** a new one from the heap. michael@0: */ michael@0: PRFileDesc *_PR_Getfd(void) michael@0: { michael@0: PRFileDesc *fd; michael@0: /* michael@0: ** $$$ michael@0: ** This may look a little wasteful. We'll see. Right now I want to michael@0: ** be able to toggle between caching and not at runtime to measure michael@0: ** the differences. If it isn't too annoying, I'll leave it in. michael@0: ** $$$$ michael@0: ** michael@0: ** The test is against _pr_fd_cache.limit_high. If that's zero, michael@0: ** we're not doing the extended cache but going for performance. michael@0: */ michael@0: if (0 == _pr_fd_cache.limit_high) michael@0: { michael@0: PRStackElem *pop; michael@0: PR_ASSERT(NULL != _pr_fd_cache.stack); michael@0: pop = PR_StackPop(_pr_fd_cache.stack); michael@0: if (NULL == pop) goto allocate; michael@0: fd = (PRFileDesc*)((PRPtrdiff)pop - (PRPtrdiff)stack2fd); michael@0: } michael@0: else michael@0: { michael@0: do michael@0: { michael@0: if (NULL == _pr_fd_cache.head) goto allocate; /* nothing there */ michael@0: if (_pr_fd_cache.count < _pr_fd_cache.limit_low) goto allocate; michael@0: michael@0: /* we "should" be able to extract an fd from the cache */ michael@0: PR_Lock(_pr_fd_cache.ml); /* need the lock to do this safely */ michael@0: fd = _pr_fd_cache.head; /* protected extraction */ michael@0: if (NULL == fd) /* unexpected, but not fatal */ michael@0: { michael@0: PR_ASSERT(0 == _pr_fd_cache.count); michael@0: PR_ASSERT(NULL == _pr_fd_cache.tail); michael@0: } michael@0: else michael@0: { michael@0: _pr_fd_cache.count -= 1; michael@0: _pr_fd_cache.head = fd->higher; michael@0: if (NULL == _pr_fd_cache.head) michael@0: { michael@0: PR_ASSERT(0 == _pr_fd_cache.count); michael@0: _pr_fd_cache.tail = NULL; michael@0: } michael@0: PR_ASSERT(&_pr_faulty_methods == fd->methods); michael@0: PR_ASSERT(PR_INVALID_IO_LAYER == fd->identity); michael@0: PR_ASSERT(_PR_FILEDESC_FREED == fd->secret->state); michael@0: } michael@0: PR_Unlock(_pr_fd_cache.ml); michael@0: michael@0: } while (NULL == fd); /* then go around and allocate a new one */ michael@0: } michael@0: michael@0: finished: michael@0: fd->dtor = NULL; michael@0: fd->lower = fd->higher = NULL; michael@0: fd->identity = PR_NSPR_IO_LAYER; michael@0: memset(fd->secret, 0, sizeof(PRFilePrivate)); michael@0: return fd; michael@0: michael@0: allocate: michael@0: fd = PR_NEW(PRFileDesc); michael@0: if (NULL != fd) michael@0: { michael@0: fd->secret = PR_NEW(PRFilePrivate); michael@0: if (NULL == fd->secret) PR_DELETE(fd); michael@0: } michael@0: if (NULL != fd) goto finished; michael@0: else return NULL; michael@0: michael@0: } /* _PR_Getfd */ michael@0: michael@0: /* michael@0: ** Return a file descriptor to the cache unless there are too many in michael@0: ** there already. If put in cache, clear the fields first. michael@0: */ michael@0: void _PR_Putfd(PRFileDesc *fd) michael@0: { michael@0: PR_ASSERT(PR_NSPR_IO_LAYER == fd->identity); michael@0: fd->methods = &_pr_faulty_methods; michael@0: fd->identity = PR_INVALID_IO_LAYER; michael@0: fd->secret->state = _PR_FILEDESC_FREED; michael@0: michael@0: if (0 == _pr_fd_cache.limit_high) michael@0: { michael@0: PR_StackPush(_pr_fd_cache.stack, (PRStackElem*)(&fd->higher)); michael@0: } michael@0: else michael@0: { michael@0: if (_pr_fd_cache.count > _pr_fd_cache.limit_high) michael@0: { michael@0: PR_Free(fd->secret); michael@0: PR_Free(fd); michael@0: } michael@0: else michael@0: { michael@0: PR_Lock(_pr_fd_cache.ml); michael@0: if (NULL == _pr_fd_cache.tail) michael@0: { michael@0: PR_ASSERT(0 == _pr_fd_cache.count); michael@0: PR_ASSERT(NULL == _pr_fd_cache.head); michael@0: _pr_fd_cache.head = _pr_fd_cache.tail = fd; michael@0: } michael@0: else michael@0: { michael@0: PR_ASSERT(NULL == _pr_fd_cache.tail->higher); michael@0: _pr_fd_cache.tail->higher = fd; michael@0: _pr_fd_cache.tail = fd; /* new value */ michael@0: } michael@0: fd->higher = NULL; /* always so */ michael@0: _pr_fd_cache.count += 1; /* count the new entry */ michael@0: PR_Unlock(_pr_fd_cache.ml); michael@0: } michael@0: } michael@0: } /* _PR_Putfd */ michael@0: michael@0: PR_IMPLEMENT(PRStatus) PR_SetFDCacheSize(PRIntn low, PRIntn high) michael@0: { michael@0: /* michael@0: ** This can be called at any time, may adjust the cache sizes, michael@0: ** turn the caches off, or turn them on. It is not dependent michael@0: ** on the compilation setting of DEBUG. michael@0: */ michael@0: if (!_pr_initialized) _PR_ImplicitInitialization(); michael@0: michael@0: if (low > high) low = high; /* sanity check the params */ michael@0: michael@0: PR_Lock(_pr_fd_cache.ml); michael@0: if (0 == high) /* shutting down or staying down */ michael@0: { michael@0: if (0 != _pr_fd_cache.limit_high) /* shutting down */ michael@0: { michael@0: _pr_fd_cache.limit_high = 0; /* stop use */ michael@0: /* michael@0: ** Hold the lock throughout - nobody's going to want it michael@0: ** other than another caller to this routine. Just don't michael@0: ** let that happen. michael@0: ** michael@0: ** Put all the cached fds onto the new cache. michael@0: */ michael@0: while (NULL != _pr_fd_cache.head) michael@0: { michael@0: PRFileDesc *fd = _pr_fd_cache.head; michael@0: _pr_fd_cache.head = fd->higher; michael@0: PR_StackPush(_pr_fd_cache.stack, (PRStackElem*)(&fd->higher)); michael@0: } michael@0: _pr_fd_cache.limit_low = 0; michael@0: _pr_fd_cache.tail = NULL; michael@0: _pr_fd_cache.count = 0; michael@0: } michael@0: } michael@0: else /* starting up or just adjusting parameters */ michael@0: { michael@0: PRBool was_using_stack = (0 == _pr_fd_cache.limit_high); michael@0: _pr_fd_cache.limit_low = low; michael@0: _pr_fd_cache.limit_high = high; michael@0: if (was_using_stack) /* was using stack - feed into cache */ michael@0: { michael@0: PRStackElem *pop; michael@0: while (NULL != (pop = PR_StackPop(_pr_fd_cache.stack))) michael@0: { michael@0: PRFileDesc *fd = (PRFileDesc*) michael@0: ((PRPtrdiff)pop - (PRPtrdiff)stack2fd); michael@0: if (NULL == _pr_fd_cache.tail) _pr_fd_cache.tail = fd; michael@0: fd->higher = _pr_fd_cache.head; michael@0: _pr_fd_cache.head = fd; michael@0: _pr_fd_cache.count += 1; michael@0: } michael@0: } michael@0: } michael@0: PR_Unlock(_pr_fd_cache.ml); michael@0: return PR_SUCCESS; michael@0: } /* PR_SetFDCacheSize */ michael@0: michael@0: void _PR_InitFdCache(void) michael@0: { michael@0: /* michael@0: ** The fd caching is enabled by default for DEBUG builds, michael@0: ** disabled by default for OPT builds. That default can michael@0: ** be overridden at runtime using environment variables michael@0: ** or a super-wiz-bang API. michael@0: */ michael@0: const char *low = PR_GetEnv("NSPR_FD_CACHE_SIZE_LOW"); michael@0: const char *high = PR_GetEnv("NSPR_FD_CACHE_SIZE_HIGH"); michael@0: michael@0: /* michael@0: ** _low is allowed to be zero, _high is not. michael@0: ** If _high is zero, we're not doing the caching. michael@0: */ michael@0: michael@0: _pr_fd_cache.limit_low = 0; michael@0: #if defined(DEBUG) michael@0: _pr_fd_cache.limit_high = FD_SETSIZE; michael@0: #else michael@0: _pr_fd_cache.limit_high = 0; michael@0: #endif /* defined(DEBUG) */ michael@0: michael@0: if (NULL != low) _pr_fd_cache.limit_low = atoi(low); michael@0: if (NULL != high) _pr_fd_cache.limit_high = atoi(high); michael@0: michael@0: if (_pr_fd_cache.limit_low < 0) michael@0: _pr_fd_cache.limit_low = 0; michael@0: if (_pr_fd_cache.limit_low > FD_SETSIZE) michael@0: _pr_fd_cache.limit_low = FD_SETSIZE; michael@0: michael@0: if (_pr_fd_cache.limit_high > FD_SETSIZE) michael@0: _pr_fd_cache.limit_high = FD_SETSIZE; michael@0: michael@0: if (_pr_fd_cache.limit_high < _pr_fd_cache.limit_low) michael@0: _pr_fd_cache.limit_high = _pr_fd_cache.limit_low; michael@0: michael@0: _pr_fd_cache.ml = PR_NewLock(); michael@0: PR_ASSERT(NULL != _pr_fd_cache.ml); michael@0: _pr_fd_cache.stack = PR_CreateStack("FD"); michael@0: PR_ASSERT(NULL != _pr_fd_cache.stack); michael@0: michael@0: } /* _PR_InitFdCache */ michael@0: michael@0: void _PR_CleanupFdCache(void) michael@0: { michael@0: PRFileDesc *fd, *next; michael@0: PRStackElem *pop; michael@0: michael@0: for (fd = _pr_fd_cache.head; fd != NULL; fd = next) michael@0: { michael@0: next = fd->higher; michael@0: PR_DELETE(fd->secret); michael@0: PR_DELETE(fd); michael@0: } michael@0: _pr_fd_cache.head = NULL; michael@0: _pr_fd_cache.tail = NULL; michael@0: _pr_fd_cache.count = 0; michael@0: PR_DestroyLock(_pr_fd_cache.ml); michael@0: _pr_fd_cache.ml = NULL; michael@0: while ((pop = PR_StackPop(_pr_fd_cache.stack)) != NULL) michael@0: { michael@0: fd = (PRFileDesc*)((PRPtrdiff)pop - (PRPtrdiff)stack2fd); michael@0: PR_DELETE(fd->secret); michael@0: PR_DELETE(fd); michael@0: } michael@0: PR_DestroyStack(_pr_fd_cache.stack); michael@0: _pr_fd_cache.stack = NULL; michael@0: } /* _PR_CleanupFdCache */ michael@0: michael@0: /* prfdcach.c */