Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | #include "primpl.h" |
michael@0 | 7 | |
michael@0 | 8 | #include <string.h> |
michael@0 | 9 | |
michael@0 | 10 | /*****************************************************************************/ |
michael@0 | 11 | /*****************************************************************************/ |
michael@0 | 12 | /************************** File descriptor caching **************************/ |
michael@0 | 13 | /*****************************************************************************/ |
michael@0 | 14 | /*****************************************************************************/ |
michael@0 | 15 | |
michael@0 | 16 | /* |
michael@0 | 17 | ** This code is built into debuggable versions of NSPR to assist in |
michael@0 | 18 | ** finding misused file descriptors. Since file descritors (PRFileDesc) |
michael@0 | 19 | ** are identified by a pointer to their structure, they can be the |
michael@0 | 20 | ** target of dangling references. Furthermore, NSPR caches and tries |
michael@0 | 21 | ** to aggressively reuse file descriptors, leading to more ambiguity. |
michael@0 | 22 | ** The following code will allow a debugging client to set environment |
michael@0 | 23 | ** variables and control the number of file descriptors that will be |
michael@0 | 24 | ** preserved before they are recycled. The environment variables are |
michael@0 | 25 | ** NSPR_FD_CACHE_SIZE_LOW and NSPR_FD_CACHE_SIZE_HIGH. The former sets |
michael@0 | 26 | ** the number of descriptors NSPR will allocate before beginning to |
michael@0 | 27 | ** recycle. The latter is the maximum number permitted in the cache |
michael@0 | 28 | ** (exclusive of those in use) at a time. |
michael@0 | 29 | */ |
michael@0 | 30 | typedef struct _PR_Fd_Cache |
michael@0 | 31 | { |
michael@0 | 32 | PRLock *ml; |
michael@0 | 33 | PRIntn count; |
michael@0 | 34 | PRStack *stack; |
michael@0 | 35 | PRFileDesc *head, *tail; |
michael@0 | 36 | PRIntn limit_low, limit_high; |
michael@0 | 37 | } _PR_Fd_Cache; |
michael@0 | 38 | |
michael@0 | 39 | static _PR_Fd_Cache _pr_fd_cache; |
michael@0 | 40 | static PRFileDesc **stack2fd = &(((PRFileDesc*)NULL)->higher); |
michael@0 | 41 | |
michael@0 | 42 | |
michael@0 | 43 | /* |
michael@0 | 44 | ** Get a FileDescriptor from the cache if one exists. If not allocate |
michael@0 | 45 | ** a new one from the heap. |
michael@0 | 46 | */ |
michael@0 | 47 | PRFileDesc *_PR_Getfd(void) |
michael@0 | 48 | { |
michael@0 | 49 | PRFileDesc *fd; |
michael@0 | 50 | /* |
michael@0 | 51 | ** $$$ |
michael@0 | 52 | ** This may look a little wasteful. We'll see. Right now I want to |
michael@0 | 53 | ** be able to toggle between caching and not at runtime to measure |
michael@0 | 54 | ** the differences. If it isn't too annoying, I'll leave it in. |
michael@0 | 55 | ** $$$$ |
michael@0 | 56 | ** |
michael@0 | 57 | ** The test is against _pr_fd_cache.limit_high. If that's zero, |
michael@0 | 58 | ** we're not doing the extended cache but going for performance. |
michael@0 | 59 | */ |
michael@0 | 60 | if (0 == _pr_fd_cache.limit_high) |
michael@0 | 61 | { |
michael@0 | 62 | PRStackElem *pop; |
michael@0 | 63 | PR_ASSERT(NULL != _pr_fd_cache.stack); |
michael@0 | 64 | pop = PR_StackPop(_pr_fd_cache.stack); |
michael@0 | 65 | if (NULL == pop) goto allocate; |
michael@0 | 66 | fd = (PRFileDesc*)((PRPtrdiff)pop - (PRPtrdiff)stack2fd); |
michael@0 | 67 | } |
michael@0 | 68 | else |
michael@0 | 69 | { |
michael@0 | 70 | do |
michael@0 | 71 | { |
michael@0 | 72 | if (NULL == _pr_fd_cache.head) goto allocate; /* nothing there */ |
michael@0 | 73 | if (_pr_fd_cache.count < _pr_fd_cache.limit_low) goto allocate; |
michael@0 | 74 | |
michael@0 | 75 | /* we "should" be able to extract an fd from the cache */ |
michael@0 | 76 | PR_Lock(_pr_fd_cache.ml); /* need the lock to do this safely */ |
michael@0 | 77 | fd = _pr_fd_cache.head; /* protected extraction */ |
michael@0 | 78 | if (NULL == fd) /* unexpected, but not fatal */ |
michael@0 | 79 | { |
michael@0 | 80 | PR_ASSERT(0 == _pr_fd_cache.count); |
michael@0 | 81 | PR_ASSERT(NULL == _pr_fd_cache.tail); |
michael@0 | 82 | } |
michael@0 | 83 | else |
michael@0 | 84 | { |
michael@0 | 85 | _pr_fd_cache.count -= 1; |
michael@0 | 86 | _pr_fd_cache.head = fd->higher; |
michael@0 | 87 | if (NULL == _pr_fd_cache.head) |
michael@0 | 88 | { |
michael@0 | 89 | PR_ASSERT(0 == _pr_fd_cache.count); |
michael@0 | 90 | _pr_fd_cache.tail = NULL; |
michael@0 | 91 | } |
michael@0 | 92 | PR_ASSERT(&_pr_faulty_methods == fd->methods); |
michael@0 | 93 | PR_ASSERT(PR_INVALID_IO_LAYER == fd->identity); |
michael@0 | 94 | PR_ASSERT(_PR_FILEDESC_FREED == fd->secret->state); |
michael@0 | 95 | } |
michael@0 | 96 | PR_Unlock(_pr_fd_cache.ml); |
michael@0 | 97 | |
michael@0 | 98 | } while (NULL == fd); /* then go around and allocate a new one */ |
michael@0 | 99 | } |
michael@0 | 100 | |
michael@0 | 101 | finished: |
michael@0 | 102 | fd->dtor = NULL; |
michael@0 | 103 | fd->lower = fd->higher = NULL; |
michael@0 | 104 | fd->identity = PR_NSPR_IO_LAYER; |
michael@0 | 105 | memset(fd->secret, 0, sizeof(PRFilePrivate)); |
michael@0 | 106 | return fd; |
michael@0 | 107 | |
michael@0 | 108 | allocate: |
michael@0 | 109 | fd = PR_NEW(PRFileDesc); |
michael@0 | 110 | if (NULL != fd) |
michael@0 | 111 | { |
michael@0 | 112 | fd->secret = PR_NEW(PRFilePrivate); |
michael@0 | 113 | if (NULL == fd->secret) PR_DELETE(fd); |
michael@0 | 114 | } |
michael@0 | 115 | if (NULL != fd) goto finished; |
michael@0 | 116 | else return NULL; |
michael@0 | 117 | |
michael@0 | 118 | } /* _PR_Getfd */ |
michael@0 | 119 | |
michael@0 | 120 | /* |
michael@0 | 121 | ** Return a file descriptor to the cache unless there are too many in |
michael@0 | 122 | ** there already. If put in cache, clear the fields first. |
michael@0 | 123 | */ |
michael@0 | 124 | void _PR_Putfd(PRFileDesc *fd) |
michael@0 | 125 | { |
michael@0 | 126 | PR_ASSERT(PR_NSPR_IO_LAYER == fd->identity); |
michael@0 | 127 | fd->methods = &_pr_faulty_methods; |
michael@0 | 128 | fd->identity = PR_INVALID_IO_LAYER; |
michael@0 | 129 | fd->secret->state = _PR_FILEDESC_FREED; |
michael@0 | 130 | |
michael@0 | 131 | if (0 == _pr_fd_cache.limit_high) |
michael@0 | 132 | { |
michael@0 | 133 | PR_StackPush(_pr_fd_cache.stack, (PRStackElem*)(&fd->higher)); |
michael@0 | 134 | } |
michael@0 | 135 | else |
michael@0 | 136 | { |
michael@0 | 137 | if (_pr_fd_cache.count > _pr_fd_cache.limit_high) |
michael@0 | 138 | { |
michael@0 | 139 | PR_Free(fd->secret); |
michael@0 | 140 | PR_Free(fd); |
michael@0 | 141 | } |
michael@0 | 142 | else |
michael@0 | 143 | { |
michael@0 | 144 | PR_Lock(_pr_fd_cache.ml); |
michael@0 | 145 | if (NULL == _pr_fd_cache.tail) |
michael@0 | 146 | { |
michael@0 | 147 | PR_ASSERT(0 == _pr_fd_cache.count); |
michael@0 | 148 | PR_ASSERT(NULL == _pr_fd_cache.head); |
michael@0 | 149 | _pr_fd_cache.head = _pr_fd_cache.tail = fd; |
michael@0 | 150 | } |
michael@0 | 151 | else |
michael@0 | 152 | { |
michael@0 | 153 | PR_ASSERT(NULL == _pr_fd_cache.tail->higher); |
michael@0 | 154 | _pr_fd_cache.tail->higher = fd; |
michael@0 | 155 | _pr_fd_cache.tail = fd; /* new value */ |
michael@0 | 156 | } |
michael@0 | 157 | fd->higher = NULL; /* always so */ |
michael@0 | 158 | _pr_fd_cache.count += 1; /* count the new entry */ |
michael@0 | 159 | PR_Unlock(_pr_fd_cache.ml); |
michael@0 | 160 | } |
michael@0 | 161 | } |
michael@0 | 162 | } /* _PR_Putfd */ |
michael@0 | 163 | |
michael@0 | 164 | PR_IMPLEMENT(PRStatus) PR_SetFDCacheSize(PRIntn low, PRIntn high) |
michael@0 | 165 | { |
michael@0 | 166 | /* |
michael@0 | 167 | ** This can be called at any time, may adjust the cache sizes, |
michael@0 | 168 | ** turn the caches off, or turn them on. It is not dependent |
michael@0 | 169 | ** on the compilation setting of DEBUG. |
michael@0 | 170 | */ |
michael@0 | 171 | if (!_pr_initialized) _PR_ImplicitInitialization(); |
michael@0 | 172 | |
michael@0 | 173 | if (low > high) low = high; /* sanity check the params */ |
michael@0 | 174 | |
michael@0 | 175 | PR_Lock(_pr_fd_cache.ml); |
michael@0 | 176 | if (0 == high) /* shutting down or staying down */ |
michael@0 | 177 | { |
michael@0 | 178 | if (0 != _pr_fd_cache.limit_high) /* shutting down */ |
michael@0 | 179 | { |
michael@0 | 180 | _pr_fd_cache.limit_high = 0; /* stop use */ |
michael@0 | 181 | /* |
michael@0 | 182 | ** Hold the lock throughout - nobody's going to want it |
michael@0 | 183 | ** other than another caller to this routine. Just don't |
michael@0 | 184 | ** let that happen. |
michael@0 | 185 | ** |
michael@0 | 186 | ** Put all the cached fds onto the new cache. |
michael@0 | 187 | */ |
michael@0 | 188 | while (NULL != _pr_fd_cache.head) |
michael@0 | 189 | { |
michael@0 | 190 | PRFileDesc *fd = _pr_fd_cache.head; |
michael@0 | 191 | _pr_fd_cache.head = fd->higher; |
michael@0 | 192 | PR_StackPush(_pr_fd_cache.stack, (PRStackElem*)(&fd->higher)); |
michael@0 | 193 | } |
michael@0 | 194 | _pr_fd_cache.limit_low = 0; |
michael@0 | 195 | _pr_fd_cache.tail = NULL; |
michael@0 | 196 | _pr_fd_cache.count = 0; |
michael@0 | 197 | } |
michael@0 | 198 | } |
michael@0 | 199 | else /* starting up or just adjusting parameters */ |
michael@0 | 200 | { |
michael@0 | 201 | PRBool was_using_stack = (0 == _pr_fd_cache.limit_high); |
michael@0 | 202 | _pr_fd_cache.limit_low = low; |
michael@0 | 203 | _pr_fd_cache.limit_high = high; |
michael@0 | 204 | if (was_using_stack) /* was using stack - feed into cache */ |
michael@0 | 205 | { |
michael@0 | 206 | PRStackElem *pop; |
michael@0 | 207 | while (NULL != (pop = PR_StackPop(_pr_fd_cache.stack))) |
michael@0 | 208 | { |
michael@0 | 209 | PRFileDesc *fd = (PRFileDesc*) |
michael@0 | 210 | ((PRPtrdiff)pop - (PRPtrdiff)stack2fd); |
michael@0 | 211 | if (NULL == _pr_fd_cache.tail) _pr_fd_cache.tail = fd; |
michael@0 | 212 | fd->higher = _pr_fd_cache.head; |
michael@0 | 213 | _pr_fd_cache.head = fd; |
michael@0 | 214 | _pr_fd_cache.count += 1; |
michael@0 | 215 | } |
michael@0 | 216 | } |
michael@0 | 217 | } |
michael@0 | 218 | PR_Unlock(_pr_fd_cache.ml); |
michael@0 | 219 | return PR_SUCCESS; |
michael@0 | 220 | } /* PR_SetFDCacheSize */ |
michael@0 | 221 | |
michael@0 | 222 | void _PR_InitFdCache(void) |
michael@0 | 223 | { |
michael@0 | 224 | /* |
michael@0 | 225 | ** The fd caching is enabled by default for DEBUG builds, |
michael@0 | 226 | ** disabled by default for OPT builds. That default can |
michael@0 | 227 | ** be overridden at runtime using environment variables |
michael@0 | 228 | ** or a super-wiz-bang API. |
michael@0 | 229 | */ |
michael@0 | 230 | const char *low = PR_GetEnv("NSPR_FD_CACHE_SIZE_LOW"); |
michael@0 | 231 | const char *high = PR_GetEnv("NSPR_FD_CACHE_SIZE_HIGH"); |
michael@0 | 232 | |
michael@0 | 233 | /* |
michael@0 | 234 | ** _low is allowed to be zero, _high is not. |
michael@0 | 235 | ** If _high is zero, we're not doing the caching. |
michael@0 | 236 | */ |
michael@0 | 237 | |
michael@0 | 238 | _pr_fd_cache.limit_low = 0; |
michael@0 | 239 | #if defined(DEBUG) |
michael@0 | 240 | _pr_fd_cache.limit_high = FD_SETSIZE; |
michael@0 | 241 | #else |
michael@0 | 242 | _pr_fd_cache.limit_high = 0; |
michael@0 | 243 | #endif /* defined(DEBUG) */ |
michael@0 | 244 | |
michael@0 | 245 | if (NULL != low) _pr_fd_cache.limit_low = atoi(low); |
michael@0 | 246 | if (NULL != high) _pr_fd_cache.limit_high = atoi(high); |
michael@0 | 247 | |
michael@0 | 248 | if (_pr_fd_cache.limit_low < 0) |
michael@0 | 249 | _pr_fd_cache.limit_low = 0; |
michael@0 | 250 | if (_pr_fd_cache.limit_low > FD_SETSIZE) |
michael@0 | 251 | _pr_fd_cache.limit_low = FD_SETSIZE; |
michael@0 | 252 | |
michael@0 | 253 | if (_pr_fd_cache.limit_high > FD_SETSIZE) |
michael@0 | 254 | _pr_fd_cache.limit_high = FD_SETSIZE; |
michael@0 | 255 | |
michael@0 | 256 | if (_pr_fd_cache.limit_high < _pr_fd_cache.limit_low) |
michael@0 | 257 | _pr_fd_cache.limit_high = _pr_fd_cache.limit_low; |
michael@0 | 258 | |
michael@0 | 259 | _pr_fd_cache.ml = PR_NewLock(); |
michael@0 | 260 | PR_ASSERT(NULL != _pr_fd_cache.ml); |
michael@0 | 261 | _pr_fd_cache.stack = PR_CreateStack("FD"); |
michael@0 | 262 | PR_ASSERT(NULL != _pr_fd_cache.stack); |
michael@0 | 263 | |
michael@0 | 264 | } /* _PR_InitFdCache */ |
michael@0 | 265 | |
michael@0 | 266 | void _PR_CleanupFdCache(void) |
michael@0 | 267 | { |
michael@0 | 268 | PRFileDesc *fd, *next; |
michael@0 | 269 | PRStackElem *pop; |
michael@0 | 270 | |
michael@0 | 271 | for (fd = _pr_fd_cache.head; fd != NULL; fd = next) |
michael@0 | 272 | { |
michael@0 | 273 | next = fd->higher; |
michael@0 | 274 | PR_DELETE(fd->secret); |
michael@0 | 275 | PR_DELETE(fd); |
michael@0 | 276 | } |
michael@0 | 277 | _pr_fd_cache.head = NULL; |
michael@0 | 278 | _pr_fd_cache.tail = NULL; |
michael@0 | 279 | _pr_fd_cache.count = 0; |
michael@0 | 280 | PR_DestroyLock(_pr_fd_cache.ml); |
michael@0 | 281 | _pr_fd_cache.ml = NULL; |
michael@0 | 282 | while ((pop = PR_StackPop(_pr_fd_cache.stack)) != NULL) |
michael@0 | 283 | { |
michael@0 | 284 | fd = (PRFileDesc*)((PRPtrdiff)pop - (PRPtrdiff)stack2fd); |
michael@0 | 285 | PR_DELETE(fd->secret); |
michael@0 | 286 | PR_DELETE(fd); |
michael@0 | 287 | } |
michael@0 | 288 | PR_DestroyStack(_pr_fd_cache.stack); |
michael@0 | 289 | _pr_fd_cache.stack = NULL; |
michael@0 | 290 | } /* _PR_CleanupFdCache */ |
michael@0 | 291 | |
michael@0 | 292 | /* prfdcach.c */ |