Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim:cindent:ts=8:et:sw=4: |
michael@0 | 3 | * |
michael@0 | 4 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 5 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 6 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 7 | #ifdef NS_TRACE_MALLOC |
michael@0 | 8 | /* |
michael@0 | 9 | * TODO: |
michael@0 | 10 | * - FIXME https://bugzilla.mozilla.org/show_bug.cgi?id=392008 |
michael@0 | 11 | * - extend logfile so 'F' record tells free stack |
michael@0 | 12 | */ |
michael@0 | 13 | #include <errno.h> |
michael@0 | 14 | #include <fcntl.h> |
michael@0 | 15 | #include <stdio.h> |
michael@0 | 16 | #include <string.h> |
michael@0 | 17 | #ifdef XP_UNIX |
michael@0 | 18 | #include <unistd.h> |
michael@0 | 19 | #include <sys/stat.h> |
michael@0 | 20 | #include <sys/time.h> |
michael@0 | 21 | #endif |
michael@0 | 22 | #include "plhash.h" |
michael@0 | 23 | #include "pratom.h" |
michael@0 | 24 | #include "prlog.h" |
michael@0 | 25 | #include "prlock.h" |
michael@0 | 26 | #include "prmon.h" |
michael@0 | 27 | #include "prprf.h" |
michael@0 | 28 | #include "prenv.h" |
michael@0 | 29 | #include "prnetdb.h" |
michael@0 | 30 | #include "nsTraceMalloc.h" |
michael@0 | 31 | #include "nscore.h" |
michael@0 | 32 | #include "prinit.h" |
michael@0 | 33 | #include "prthread.h" |
michael@0 | 34 | #include "plstr.h" |
michael@0 | 35 | #include "nsStackWalk.h" |
michael@0 | 36 | #include "nsTraceMallocCallbacks.h" |
michael@0 | 37 | #include "nsTypeInfo.h" |
michael@0 | 38 | #include "mozilla/PoisonIOInterposer.h" |
michael@0 | 39 | |
michael@0 | 40 | #if defined(XP_MACOSX) |
michael@0 | 41 | |
michael@0 | 42 | #include <malloc/malloc.h> |
michael@0 | 43 | |
michael@0 | 44 | #define WRITE_FLAGS "w" |
michael@0 | 45 | |
michael@0 | 46 | #define __libc_malloc(x) malloc(x) |
michael@0 | 47 | #define __libc_realloc(x, y) realloc(x, y) |
michael@0 | 48 | #define __libc_free(x) free(x) |
michael@0 | 49 | |
michael@0 | 50 | #elif defined(XP_UNIX) |
michael@0 | 51 | |
michael@0 | 52 | #include <malloc.h> |
michael@0 | 53 | |
michael@0 | 54 | #define WRITE_FLAGS "w" |
michael@0 | 55 | |
michael@0 | 56 | #ifdef WRAP_SYSTEM_INCLUDES |
michael@0 | 57 | #pragma GCC visibility push(default) |
michael@0 | 58 | #endif |
michael@0 | 59 | extern __ptr_t __libc_malloc(size_t); |
michael@0 | 60 | extern __ptr_t __libc_calloc(size_t, size_t); |
michael@0 | 61 | extern __ptr_t __libc_realloc(__ptr_t, size_t); |
michael@0 | 62 | extern void __libc_free(__ptr_t); |
michael@0 | 63 | extern __ptr_t __libc_memalign(size_t, size_t); |
michael@0 | 64 | extern __ptr_t __libc_valloc(size_t); |
michael@0 | 65 | #ifdef WRAP_SYSTEM_INCLUDES |
michael@0 | 66 | #pragma GCC visibility pop |
michael@0 | 67 | #endif |
michael@0 | 68 | |
michael@0 | 69 | #elif defined(XP_WIN32) |
michael@0 | 70 | |
michael@0 | 71 | #include <sys/timeb.h> /* for timeb */ |
michael@0 | 72 | #include <sys/stat.h> /* for fstat */ |
michael@0 | 73 | |
michael@0 | 74 | #include <io.h> /*for write*/ |
michael@0 | 75 | |
michael@0 | 76 | #define WRITE_FLAGS "w" |
michael@0 | 77 | |
michael@0 | 78 | #define __libc_malloc(x) dhw_orig_malloc(x) |
michael@0 | 79 | #define __libc_realloc(x, y) dhw_orig_realloc(x,y) |
michael@0 | 80 | #define __libc_free(x) dhw_orig_free(x) |
michael@0 | 81 | |
michael@0 | 82 | #else /* not XP_MACOSX, XP_UNIX, or XP_WIN32 */ |
michael@0 | 83 | |
michael@0 | 84 | # error "Unknown build configuration!" |
michael@0 | 85 | |
michael@0 | 86 | #endif |
michael@0 | 87 | |
michael@0 | 88 | typedef struct logfile logfile; |
michael@0 | 89 | |
michael@0 | 90 | #define STARTUP_TMBUFSIZE (64 * 1024) |
michael@0 | 91 | #define LOGFILE_TMBUFSIZE (16 * 1024) |
michael@0 | 92 | |
michael@0 | 93 | struct logfile { |
michael@0 | 94 | int fd; |
michael@0 | 95 | int lfd; /* logical fd, dense among all logfiles */ |
michael@0 | 96 | char *buf; |
michael@0 | 97 | int bufsize; |
michael@0 | 98 | int pos; |
michael@0 | 99 | uint32_t size; |
michael@0 | 100 | uint32_t simsize; |
michael@0 | 101 | logfile *next; |
michael@0 | 102 | logfile **prevp; |
michael@0 | 103 | }; |
michael@0 | 104 | |
michael@0 | 105 | static char default_buf[STARTUP_TMBUFSIZE]; |
michael@0 | 106 | static logfile default_logfile = |
michael@0 | 107 | {-1, 0, default_buf, STARTUP_TMBUFSIZE, 0, 0, 0, NULL, NULL}; |
michael@0 | 108 | static logfile *logfile_list = NULL; |
michael@0 | 109 | static logfile **logfile_tail = &logfile_list; |
michael@0 | 110 | static logfile *logfp = &default_logfile; |
michael@0 | 111 | static PRLock *tmlock = NULL; |
michael@0 | 112 | #ifndef PATH_MAX |
michael@0 | 113 | #define PATH_MAX 4096 |
michael@0 | 114 | #endif |
michael@0 | 115 | static char sdlogname[PATH_MAX] = ""; /* filename for shutdown leak log */ |
michael@0 | 116 | |
michael@0 | 117 | /* |
michael@0 | 118 | * This enables/disables trace-malloc logging. |
michael@0 | 119 | * |
michael@0 | 120 | * It is separate from suppress_tracing so that we do not have to pay |
michael@0 | 121 | * the performance cost of repeated TM_TLS_GET_DATA calls when |
michael@0 | 122 | * trace-malloc is disabled (which is not as bad as the locking we used |
michael@0 | 123 | * to have). |
michael@0 | 124 | * |
michael@0 | 125 | * It must default to zero, since it can be tested by the Linux malloc |
michael@0 | 126 | * hooks before NS_TraceMallocStartup sets it. |
michael@0 | 127 | */ |
michael@0 | 128 | static uint32_t tracing_enabled = 0; |
michael@0 | 129 | |
michael@0 | 130 | /* |
michael@0 | 131 | * Control whether we should log stacks |
michael@0 | 132 | */ |
michael@0 | 133 | static uint32_t stacks_enabled = 1; |
michael@0 | 134 | |
michael@0 | 135 | /* |
michael@0 | 136 | * This lock must be held while manipulating the calltree, the |
michael@0 | 137 | * allocations table, the log, or the tmstats. |
michael@0 | 138 | * |
michael@0 | 139 | * Callers should not *enter* the lock without checking suppress_tracing |
michael@0 | 140 | * first; otherwise they risk trying to re-enter on the same thread. |
michael@0 | 141 | */ |
michael@0 | 142 | #define TM_ENTER_LOCK(t) \ |
michael@0 | 143 | PR_BEGIN_MACRO \ |
michael@0 | 144 | PR_ASSERT(t->suppress_tracing != 0); \ |
michael@0 | 145 | if (tmlock) \ |
michael@0 | 146 | PR_Lock(tmlock); \ |
michael@0 | 147 | PR_END_MACRO |
michael@0 | 148 | |
michael@0 | 149 | #define TM_EXIT_LOCK(t) \ |
michael@0 | 150 | PR_BEGIN_MACRO \ |
michael@0 | 151 | PR_ASSERT(t->suppress_tracing != 0); \ |
michael@0 | 152 | if (tmlock) \ |
michael@0 | 153 | PR_Unlock(tmlock); \ |
michael@0 | 154 | PR_END_MACRO |
michael@0 | 155 | |
michael@0 | 156 | #define TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t) \ |
michael@0 | 157 | PR_BEGIN_MACRO \ |
michael@0 | 158 | t->suppress_tracing++; \ |
michael@0 | 159 | TM_ENTER_LOCK(t); \ |
michael@0 | 160 | PR_END_MACRO |
michael@0 | 161 | |
michael@0 | 162 | #define TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t) \ |
michael@0 | 163 | PR_BEGIN_MACRO \ |
michael@0 | 164 | TM_EXIT_LOCK(t); \ |
michael@0 | 165 | t->suppress_tracing--; \ |
michael@0 | 166 | PR_END_MACRO |
michael@0 | 167 | |
michael@0 | 168 | |
michael@0 | 169 | /* |
michael@0 | 170 | * Thread-local storage. |
michael@0 | 171 | * |
michael@0 | 172 | * We can't use NSPR thread-local storage for this because it mallocs |
michael@0 | 173 | * within PR_GetThreadPrivate (the first time) and PR_SetThreadPrivate |
michael@0 | 174 | * (which can be worked around by protecting all uses of those functions |
michael@0 | 175 | * with a monitor, ugh) and because it calls malloc/free when the |
michael@0 | 176 | * thread-local storage is in an inconsistent state within |
michael@0 | 177 | * PR_SetThreadPrivate (when expanding the thread-local storage array) |
michael@0 | 178 | * and _PRI_DetachThread (when and after deleting the thread-local |
michael@0 | 179 | * storage array). |
michael@0 | 180 | */ |
michael@0 | 181 | |
michael@0 | 182 | #ifdef XP_WIN32 |
michael@0 | 183 | |
michael@0 | 184 | #include <windows.h> |
michael@0 | 185 | |
michael@0 | 186 | #define TM_TLS_INDEX_TYPE DWORD |
michael@0 | 187 | #define TM_CREATE_TLS_INDEX(i_) PR_BEGIN_MACRO \ |
michael@0 | 188 | (i_) = TlsAlloc(); \ |
michael@0 | 189 | PR_END_MACRO |
michael@0 | 190 | #define TM_DESTROY_TLS_INDEX(i_) TlsFree((i_)) |
michael@0 | 191 | #define TM_GET_TLS_DATA(i_) TlsGetValue((i_)) |
michael@0 | 192 | #define TM_SET_TLS_DATA(i_, v_) TlsSetValue((i_), (v_)) |
michael@0 | 193 | |
michael@0 | 194 | #else |
michael@0 | 195 | |
michael@0 | 196 | #include <pthread.h> |
michael@0 | 197 | |
michael@0 | 198 | #define TM_TLS_INDEX_TYPE pthread_key_t |
michael@0 | 199 | #define TM_CREATE_TLS_INDEX(i_) pthread_key_create(&(i_), NULL) |
michael@0 | 200 | #define TM_DESTROY_TLS_INDEX(i_) pthread_key_delete((i_)) |
michael@0 | 201 | #define TM_GET_TLS_DATA(i_) pthread_getspecific((i_)) |
michael@0 | 202 | #define TM_SET_TLS_DATA(i_, v_) pthread_setspecific((i_), (v_)) |
michael@0 | 203 | |
michael@0 | 204 | #endif |
michael@0 | 205 | |
michael@0 | 206 | static TM_TLS_INDEX_TYPE tls_index; |
michael@0 | 207 | static PRBool tls_index_initialized = PR_FALSE; |
michael@0 | 208 | |
michael@0 | 209 | /* FIXME (maybe): This is currently unused; we leak the thread-local data. */ |
michael@0 | 210 | #if 0 |
michael@0 | 211 | static void |
michael@0 | 212 | free_tm_thread(void *priv) |
michael@0 | 213 | { |
michael@0 | 214 | tm_thread *t = (tm_thread*) priv; |
michael@0 | 215 | |
michael@0 | 216 | PR_ASSERT(t->suppress_tracing == 0); |
michael@0 | 217 | |
michael@0 | 218 | if (t->in_heap) { |
michael@0 | 219 | t->suppress_tracing = 1; |
michael@0 | 220 | if (t->backtrace_buf.buffer) |
michael@0 | 221 | __libc_free(t->backtrace_buf.buffer); |
michael@0 | 222 | |
michael@0 | 223 | __libc_free(t); |
michael@0 | 224 | } |
michael@0 | 225 | } |
michael@0 | 226 | #endif |
michael@0 | 227 | |
michael@0 | 228 | tm_thread * |
michael@0 | 229 | tm_get_thread(void) |
michael@0 | 230 | { |
michael@0 | 231 | tm_thread *t; |
michael@0 | 232 | tm_thread stack_tm_thread; |
michael@0 | 233 | |
michael@0 | 234 | if (!tls_index_initialized) { |
michael@0 | 235 | /** |
michael@0 | 236 | * Assume that the first call to |malloc| will occur before |
michael@0 | 237 | * there are multiple threads. (If that's not the case, we |
michael@0 | 238 | * probably need to do the necessary synchronization without |
michael@0 | 239 | * using NSPR primitives. See discussion in |
michael@0 | 240 | * https://bugzilla.mozilla.org/show_bug.cgi?id=442192 |
michael@0 | 241 | */ |
michael@0 | 242 | TM_CREATE_TLS_INDEX(tls_index); |
michael@0 | 243 | tls_index_initialized = PR_TRUE; |
michael@0 | 244 | } |
michael@0 | 245 | |
michael@0 | 246 | t = TM_GET_TLS_DATA(tls_index); |
michael@0 | 247 | |
michael@0 | 248 | if (!t) { |
michael@0 | 249 | /* |
michael@0 | 250 | * First, store a tm_thread on the stack to suppress for the |
michael@0 | 251 | * malloc below |
michael@0 | 252 | */ |
michael@0 | 253 | stack_tm_thread.suppress_tracing = 1; |
michael@0 | 254 | stack_tm_thread.backtrace_buf.buffer = NULL; |
michael@0 | 255 | stack_tm_thread.backtrace_buf.size = 0; |
michael@0 | 256 | stack_tm_thread.backtrace_buf.entries = 0; |
michael@0 | 257 | TM_SET_TLS_DATA(tls_index, &stack_tm_thread); |
michael@0 | 258 | |
michael@0 | 259 | t = (tm_thread*) __libc_malloc(sizeof(tm_thread)); |
michael@0 | 260 | t->suppress_tracing = 0; |
michael@0 | 261 | t->backtrace_buf = stack_tm_thread.backtrace_buf; |
michael@0 | 262 | TM_SET_TLS_DATA(tls_index, t); |
michael@0 | 263 | |
michael@0 | 264 | PR_ASSERT(stack_tm_thread.suppress_tracing == 1); /* balanced */ |
michael@0 | 265 | } |
michael@0 | 266 | |
michael@0 | 267 | return t; |
michael@0 | 268 | } |
michael@0 | 269 | |
michael@0 | 270 | /* We don't want more than 32 logfiles open at once, ok? */ |
michael@0 | 271 | typedef uint32_t lfd_set; |
michael@0 | 272 | |
michael@0 | 273 | #define LFD_SET_STATIC_INITIALIZER 0 |
michael@0 | 274 | #define LFD_SET_SIZE 32 |
michael@0 | 275 | |
michael@0 | 276 | #define LFD_ZERO(s) (*(s) = 0) |
michael@0 | 277 | #define LFD_BIT(i) ((uint32_t)1 << (i)) |
michael@0 | 278 | #define LFD_TEST(i,s) (LFD_BIT(i) & *(s)) |
michael@0 | 279 | #define LFD_SET(i,s) (*(s) |= LFD_BIT(i)) |
michael@0 | 280 | #define LFD_CLR(i,s) (*(s) &= ~LFD_BIT(i)) |
michael@0 | 281 | |
michael@0 | 282 | static logfile *get_logfile(int fd) |
michael@0 | 283 | { |
michael@0 | 284 | logfile *fp; |
michael@0 | 285 | int lfd; |
michael@0 | 286 | |
michael@0 | 287 | for (fp = logfile_list; fp; fp = fp->next) { |
michael@0 | 288 | if (fp->fd == fd) |
michael@0 | 289 | return fp; |
michael@0 | 290 | } |
michael@0 | 291 | lfd = 0; |
michael@0 | 292 | retry: |
michael@0 | 293 | for (fp = logfile_list; fp; fp = fp->next) { |
michael@0 | 294 | if (fp->fd == lfd) { |
michael@0 | 295 | if (++lfd >= LFD_SET_SIZE) |
michael@0 | 296 | return NULL; |
michael@0 | 297 | goto retry; |
michael@0 | 298 | } |
michael@0 | 299 | } |
michael@0 | 300 | fp = __libc_malloc(sizeof(logfile) + LOGFILE_TMBUFSIZE); |
michael@0 | 301 | if (!fp) |
michael@0 | 302 | return NULL; |
michael@0 | 303 | fp->fd = fd; |
michael@0 | 304 | fp->lfd = lfd; |
michael@0 | 305 | fp->buf = (char*) (fp + 1); |
michael@0 | 306 | fp->bufsize = LOGFILE_TMBUFSIZE; |
michael@0 | 307 | fp->pos = 0; |
michael@0 | 308 | fp->size = fp->simsize = 0; |
michael@0 | 309 | fp->next = NULL; |
michael@0 | 310 | fp->prevp = logfile_tail; |
michael@0 | 311 | *logfile_tail = fp; |
michael@0 | 312 | logfile_tail = &fp->next; |
michael@0 | 313 | return fp; |
michael@0 | 314 | } |
michael@0 | 315 | |
michael@0 | 316 | static void flush_logfile(logfile *fp) |
michael@0 | 317 | { |
michael@0 | 318 | int len, cnt, fd; |
michael@0 | 319 | char *bp; |
michael@0 | 320 | |
michael@0 | 321 | len = fp->pos; |
michael@0 | 322 | if (len == 0) |
michael@0 | 323 | return; |
michael@0 | 324 | fp->pos = 0; |
michael@0 | 325 | fd = fp->fd; |
michael@0 | 326 | if (fd >= 0) { |
michael@0 | 327 | fp->size += len; |
michael@0 | 328 | bp = fp->buf; |
michael@0 | 329 | do { |
michael@0 | 330 | cnt = write(fd, bp, len); |
michael@0 | 331 | if (cnt <= 0) { |
michael@0 | 332 | printf("### nsTraceMalloc: write failed or wrote 0 bytes!\n"); |
michael@0 | 333 | return; |
michael@0 | 334 | } |
michael@0 | 335 | bp += cnt; |
michael@0 | 336 | len -= cnt; |
michael@0 | 337 | } while (len > 0); |
michael@0 | 338 | } |
michael@0 | 339 | fp->simsize += len; |
michael@0 | 340 | } |
michael@0 | 341 | |
michael@0 | 342 | static void log_byte(logfile *fp, char byte) |
michael@0 | 343 | { |
michael@0 | 344 | if (fp->pos == fp->bufsize) |
michael@0 | 345 | flush_logfile(fp); |
michael@0 | 346 | fp->buf[fp->pos++] = byte; |
michael@0 | 347 | } |
michael@0 | 348 | |
michael@0 | 349 | static void log_string(logfile *fp, const char *str) |
michael@0 | 350 | { |
michael@0 | 351 | int len, rem, cnt; |
michael@0 | 352 | |
michael@0 | 353 | len = strlen(str) + 1; /* include null terminator */ |
michael@0 | 354 | while ((rem = fp->pos + len - fp->bufsize) > 0) { |
michael@0 | 355 | cnt = len - rem; |
michael@0 | 356 | memcpy(&fp->buf[fp->pos], str, cnt); |
michael@0 | 357 | str += cnt; |
michael@0 | 358 | fp->pos += cnt; |
michael@0 | 359 | flush_logfile(fp); |
michael@0 | 360 | len = rem; |
michael@0 | 361 | } |
michael@0 | 362 | memcpy(&fp->buf[fp->pos], str, len); |
michael@0 | 363 | fp->pos += len; |
michael@0 | 364 | } |
michael@0 | 365 | |
michael@0 | 366 | static void log_filename(logfile* fp, const char* filename) |
michael@0 | 367 | { |
michael@0 | 368 | if (strlen(filename) < 512) { |
michael@0 | 369 | char *bp, *cp, buf[512]; |
michael@0 | 370 | |
michael@0 | 371 | bp = strstr(strcpy(buf, filename), "mozilla"); |
michael@0 | 372 | if (!bp) |
michael@0 | 373 | bp = buf; |
michael@0 | 374 | |
michael@0 | 375 | for (cp = bp; *cp; cp++) { |
michael@0 | 376 | if (*cp == '\\') |
michael@0 | 377 | *cp = '/'; |
michael@0 | 378 | } |
michael@0 | 379 | |
michael@0 | 380 | filename = bp; |
michael@0 | 381 | } |
michael@0 | 382 | log_string(fp, filename); |
michael@0 | 383 | } |
michael@0 | 384 | |
michael@0 | 385 | static void log_uint32(logfile *fp, uint32_t ival) |
michael@0 | 386 | { |
michael@0 | 387 | if (ival < 0x80) { |
michael@0 | 388 | /* 0xxx xxxx */ |
michael@0 | 389 | log_byte(fp, (char) ival); |
michael@0 | 390 | } else if (ival < 0x4000) { |
michael@0 | 391 | /* 10xx xxxx xxxx xxxx */ |
michael@0 | 392 | log_byte(fp, (char) ((ival >> 8) | 0x80)); |
michael@0 | 393 | log_byte(fp, (char) (ival & 0xff)); |
michael@0 | 394 | } else if (ival < 0x200000) { |
michael@0 | 395 | /* 110x xxxx xxxx xxxx xxxx xxxx */ |
michael@0 | 396 | log_byte(fp, (char) ((ival >> 16) | 0xc0)); |
michael@0 | 397 | log_byte(fp, (char) ((ival >> 8) & 0xff)); |
michael@0 | 398 | log_byte(fp, (char) (ival & 0xff)); |
michael@0 | 399 | } else if (ival < 0x10000000) { |
michael@0 | 400 | /* 1110 xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ |
michael@0 | 401 | log_byte(fp, (char) ((ival >> 24) | 0xe0)); |
michael@0 | 402 | log_byte(fp, (char) ((ival >> 16) & 0xff)); |
michael@0 | 403 | log_byte(fp, (char) ((ival >> 8) & 0xff)); |
michael@0 | 404 | log_byte(fp, (char) (ival & 0xff)); |
michael@0 | 405 | } else { |
michael@0 | 406 | /* 1111 0000 xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ |
michael@0 | 407 | log_byte(fp, (char) 0xf0); |
michael@0 | 408 | log_byte(fp, (char) ((ival >> 24) & 0xff)); |
michael@0 | 409 | log_byte(fp, (char) ((ival >> 16) & 0xff)); |
michael@0 | 410 | log_byte(fp, (char) ((ival >> 8) & 0xff)); |
michael@0 | 411 | log_byte(fp, (char) (ival & 0xff)); |
michael@0 | 412 | } |
michael@0 | 413 | } |
michael@0 | 414 | |
michael@0 | 415 | static void log_event1(logfile *fp, char event, uint32_t serial) |
michael@0 | 416 | { |
michael@0 | 417 | log_byte(fp, event); |
michael@0 | 418 | log_uint32(fp, (uint32_t) serial); |
michael@0 | 419 | } |
michael@0 | 420 | |
michael@0 | 421 | static void log_event2(logfile *fp, char event, uint32_t serial, size_t size) |
michael@0 | 422 | { |
michael@0 | 423 | log_event1(fp, event, serial); |
michael@0 | 424 | log_uint32(fp, (uint32_t) size); |
michael@0 | 425 | } |
michael@0 | 426 | |
michael@0 | 427 | static void log_event3(logfile *fp, char event, uint32_t serial, size_t oldsize, |
michael@0 | 428 | size_t size) |
michael@0 | 429 | { |
michael@0 | 430 | log_event2(fp, event, serial, oldsize); |
michael@0 | 431 | log_uint32(fp, (uint32_t) size); |
michael@0 | 432 | } |
michael@0 | 433 | |
michael@0 | 434 | static void log_event4(logfile *fp, char event, uint32_t serial, uint32_t ui2, |
michael@0 | 435 | uint32_t ui3, uint32_t ui4) |
michael@0 | 436 | { |
michael@0 | 437 | log_event3(fp, event, serial, ui2, ui3); |
michael@0 | 438 | log_uint32(fp, ui4); |
michael@0 | 439 | } |
michael@0 | 440 | |
michael@0 | 441 | static void log_event5(logfile *fp, char event, uint32_t serial, uint32_t ui2, |
michael@0 | 442 | uint32_t ui3, uint32_t ui4, uint32_t ui5) |
michael@0 | 443 | { |
michael@0 | 444 | log_event4(fp, event, serial, ui2, ui3, ui4); |
michael@0 | 445 | log_uint32(fp, ui5); |
michael@0 | 446 | } |
michael@0 | 447 | |
michael@0 | 448 | static void log_event6(logfile *fp, char event, uint32_t serial, uint32_t ui2, |
michael@0 | 449 | uint32_t ui3, uint32_t ui4, uint32_t ui5, uint32_t ui6) |
michael@0 | 450 | { |
michael@0 | 451 | log_event5(fp, event, serial, ui2, ui3, ui4, ui5); |
michael@0 | 452 | log_uint32(fp, ui6); |
michael@0 | 453 | } |
michael@0 | 454 | |
michael@0 | 455 | static void log_event7(logfile *fp, char event, uint32_t serial, uint32_t ui2, |
michael@0 | 456 | uint32_t ui3, uint32_t ui4, uint32_t ui5, uint32_t ui6, |
michael@0 | 457 | uint32_t ui7) |
michael@0 | 458 | { |
michael@0 | 459 | log_event6(fp, event, serial, ui2, ui3, ui4, ui5, ui6); |
michael@0 | 460 | log_uint32(fp, ui7); |
michael@0 | 461 | } |
michael@0 | 462 | |
michael@0 | 463 | static void log_event8(logfile *fp, char event, uint32_t serial, uint32_t ui2, |
michael@0 | 464 | uint32_t ui3, uint32_t ui4, uint32_t ui5, uint32_t ui6, |
michael@0 | 465 | uint32_t ui7, uint32_t ui8) |
michael@0 | 466 | { |
michael@0 | 467 | log_event7(fp, event, serial, ui2, ui3, ui4, ui5, ui6, ui7); |
michael@0 | 468 | log_uint32(fp, ui8); |
michael@0 | 469 | } |
michael@0 | 470 | |
michael@0 | 471 | typedef struct callsite callsite; |
michael@0 | 472 | |
michael@0 | 473 | struct callsite { |
michael@0 | 474 | void* pc; |
michael@0 | 475 | uint32_t serial; |
michael@0 | 476 | lfd_set lfdset; |
michael@0 | 477 | const char *name; /* pointer to string owned by methods table */ |
michael@0 | 478 | const char *library; /* pointer to string owned by libraries table */ |
michael@0 | 479 | int offset; |
michael@0 | 480 | callsite *parent; |
michael@0 | 481 | callsite *siblings; |
michael@0 | 482 | callsite *kids; |
michael@0 | 483 | }; |
michael@0 | 484 | |
michael@0 | 485 | /* NB: these counters are incremented and decremented only within tmlock. */ |
michael@0 | 486 | static uint32_t library_serial_generator = 0; |
michael@0 | 487 | static uint32_t method_serial_generator = 0; |
michael@0 | 488 | static uint32_t callsite_serial_generator = 0; |
michael@0 | 489 | static uint32_t tmstats_serial_generator = 0; |
michael@0 | 490 | static uint32_t filename_serial_generator = 0; |
michael@0 | 491 | |
michael@0 | 492 | /* Root of the tree of callsites, the sum of all (cycle-compressed) stacks. */ |
michael@0 | 493 | static callsite calltree_root = |
michael@0 | 494 | {0, 0, LFD_SET_STATIC_INITIALIZER, NULL, NULL, 0, NULL, NULL, NULL}; |
michael@0 | 495 | |
michael@0 | 496 | /* a fake pc for when stacks are disabled; must be different from the |
michael@0 | 497 | pc in calltree_root */ |
michael@0 | 498 | #define STACK_DISABLED_PC ((void*)1) |
michael@0 | 499 | |
michael@0 | 500 | /* Basic instrumentation. */ |
michael@0 | 501 | static nsTMStats tmstats = NS_TMSTATS_STATIC_INITIALIZER; |
michael@0 | 502 | |
michael@0 | 503 | /* Parent with the most kids (tmstats.calltree_maxkids). */ |
michael@0 | 504 | static callsite *calltree_maxkids_parent; |
michael@0 | 505 | |
michael@0 | 506 | /* Calltree leaf for path with deepest stack backtrace. */ |
michael@0 | 507 | static callsite *calltree_maxstack_top; |
michael@0 | 508 | |
michael@0 | 509 | /* Last site (i.e., calling pc) that recurred during a backtrace. */ |
michael@0 | 510 | static callsite *last_callsite_recurrence; |
michael@0 | 511 | |
michael@0 | 512 | static void log_tmstats(logfile *fp) |
michael@0 | 513 | { |
michael@0 | 514 | log_event1(fp, TM_EVENT_STATS, ++tmstats_serial_generator); |
michael@0 | 515 | log_uint32(fp, tmstats.calltree_maxstack); |
michael@0 | 516 | log_uint32(fp, tmstats.calltree_maxdepth); |
michael@0 | 517 | log_uint32(fp, tmstats.calltree_parents); |
michael@0 | 518 | log_uint32(fp, tmstats.calltree_maxkids); |
michael@0 | 519 | log_uint32(fp, tmstats.calltree_kidhits); |
michael@0 | 520 | log_uint32(fp, tmstats.calltree_kidmisses); |
michael@0 | 521 | log_uint32(fp, tmstats.calltree_kidsteps); |
michael@0 | 522 | log_uint32(fp, tmstats.callsite_recurrences); |
michael@0 | 523 | log_uint32(fp, tmstats.backtrace_calls); |
michael@0 | 524 | log_uint32(fp, tmstats.backtrace_failures); |
michael@0 | 525 | log_uint32(fp, tmstats.btmalloc_failures); |
michael@0 | 526 | log_uint32(fp, tmstats.dladdr_failures); |
michael@0 | 527 | log_uint32(fp, tmstats.malloc_calls); |
michael@0 | 528 | log_uint32(fp, tmstats.malloc_failures); |
michael@0 | 529 | log_uint32(fp, tmstats.calloc_calls); |
michael@0 | 530 | log_uint32(fp, tmstats.calloc_failures); |
michael@0 | 531 | log_uint32(fp, tmstats.realloc_calls); |
michael@0 | 532 | log_uint32(fp, tmstats.realloc_failures); |
michael@0 | 533 | log_uint32(fp, tmstats.free_calls); |
michael@0 | 534 | log_uint32(fp, tmstats.null_free_calls); |
michael@0 | 535 | log_uint32(fp, calltree_maxkids_parent ? calltree_maxkids_parent->serial |
michael@0 | 536 | : 0); |
michael@0 | 537 | log_uint32(fp, calltree_maxstack_top ? calltree_maxstack_top->serial : 0); |
michael@0 | 538 | } |
michael@0 | 539 | |
michael@0 | 540 | static void *generic_alloctable(void *pool, size_t size) |
michael@0 | 541 | { |
michael@0 | 542 | return __libc_malloc(size); |
michael@0 | 543 | } |
michael@0 | 544 | |
michael@0 | 545 | static void generic_freetable(void *pool, void *item) |
michael@0 | 546 | { |
michael@0 | 547 | __libc_free(item); |
michael@0 | 548 | } |
michael@0 | 549 | |
michael@0 | 550 | typedef struct lfdset_entry { |
michael@0 | 551 | PLHashEntry base; |
michael@0 | 552 | lfd_set lfdset; |
michael@0 | 553 | } lfdset_entry; |
michael@0 | 554 | |
michael@0 | 555 | static PLHashEntry *lfdset_allocentry(void *pool, const void *key) |
michael@0 | 556 | { |
michael@0 | 557 | lfdset_entry *le = __libc_malloc(sizeof *le); |
michael@0 | 558 | if (le) |
michael@0 | 559 | LFD_ZERO(&le->lfdset); |
michael@0 | 560 | return &le->base; |
michael@0 | 561 | } |
michael@0 | 562 | |
michael@0 | 563 | static void lfdset_freeentry(void *pool, PLHashEntry *he, unsigned flag) |
michael@0 | 564 | { |
michael@0 | 565 | lfdset_entry *le; |
michael@0 | 566 | |
michael@0 | 567 | if (flag != HT_FREE_ENTRY) |
michael@0 | 568 | return; |
michael@0 | 569 | le = (lfdset_entry*) he; |
michael@0 | 570 | __libc_free((void*) le); |
michael@0 | 571 | } |
michael@0 | 572 | |
michael@0 | 573 | static PLHashAllocOps lfdset_hashallocops = { |
michael@0 | 574 | generic_alloctable, generic_freetable, |
michael@0 | 575 | lfdset_allocentry, lfdset_freeentry |
michael@0 | 576 | }; |
michael@0 | 577 | |
michael@0 | 578 | /* Table of library pathnames mapped to to logged 'L' record serial numbers. */ |
michael@0 | 579 | static PLHashTable *libraries = NULL; |
michael@0 | 580 | |
michael@0 | 581 | /* Table of filename pathnames mapped to logged 'G' record serial numbers. */ |
michael@0 | 582 | static PLHashTable *filenames = NULL; |
michael@0 | 583 | |
michael@0 | 584 | /* Table mapping method names to logged 'N' record serial numbers. */ |
michael@0 | 585 | static PLHashTable *methods = NULL; |
michael@0 | 586 | |
michael@0 | 587 | /* |
michael@0 | 588 | * Presumes that its caller is holding tmlock, but may temporarily exit |
michael@0 | 589 | * the lock. |
michael@0 | 590 | */ |
michael@0 | 591 | static callsite * |
michael@0 | 592 | calltree(void **stack, size_t num_stack_entries, tm_thread *t) |
michael@0 | 593 | { |
michael@0 | 594 | logfile *fp = logfp; |
michael@0 | 595 | void *pc; |
michael@0 | 596 | uint32_t nkids; |
michael@0 | 597 | callsite *parent, *site, **csp, *tmp; |
michael@0 | 598 | int maxstack; |
michael@0 | 599 | uint32_t library_serial, method_serial, filename_serial; |
michael@0 | 600 | const char *library, *method, *filename; |
michael@0 | 601 | char *slash; |
michael@0 | 602 | PLHashNumber hash; |
michael@0 | 603 | PLHashEntry **hep, *he; |
michael@0 | 604 | lfdset_entry *le; |
michael@0 | 605 | size_t stack_index; |
michael@0 | 606 | nsCodeAddressDetails details; |
michael@0 | 607 | nsresult rv; |
michael@0 | 608 | |
michael@0 | 609 | maxstack = (num_stack_entries > tmstats.calltree_maxstack); |
michael@0 | 610 | if (maxstack) { |
michael@0 | 611 | /* these two are the same, although that used to be less clear */ |
michael@0 | 612 | tmstats.calltree_maxstack = num_stack_entries; |
michael@0 | 613 | tmstats.calltree_maxdepth = num_stack_entries; |
michael@0 | 614 | } |
michael@0 | 615 | |
michael@0 | 616 | /* Reverse the stack again, finding and building a path in the tree. */ |
michael@0 | 617 | parent = &calltree_root; |
michael@0 | 618 | stack_index = num_stack_entries; |
michael@0 | 619 | do { |
michael@0 | 620 | --stack_index; |
michael@0 | 621 | pc = stack[stack_index]; |
michael@0 | 622 | |
michael@0 | 623 | csp = &parent->kids; |
michael@0 | 624 | while ((site = *csp) != NULL) { |
michael@0 | 625 | if (site->pc == pc) { |
michael@0 | 626 | tmstats.calltree_kidhits++; |
michael@0 | 627 | |
michael@0 | 628 | /* Put the most recently used site at the front of siblings. */ |
michael@0 | 629 | *csp = site->siblings; |
michael@0 | 630 | site->siblings = parent->kids; |
michael@0 | 631 | parent->kids = site; |
michael@0 | 632 | |
michael@0 | 633 | /* Check whether we've logged for this site and logfile yet. */ |
michael@0 | 634 | if (!LFD_TEST(fp->lfd, &site->lfdset)) { |
michael@0 | 635 | /* |
michael@0 | 636 | * Some other logfile put this site in the calltree. We |
michael@0 | 637 | * must log an event for site, and possibly first for its |
michael@0 | 638 | * method and/or library. Note the code after the while |
michael@0 | 639 | * loop that tests if (!site). |
michael@0 | 640 | */ |
michael@0 | 641 | break; |
michael@0 | 642 | } |
michael@0 | 643 | |
michael@0 | 644 | /* Site already built and logged to fp -- go up the stack. */ |
michael@0 | 645 | goto upward; |
michael@0 | 646 | } |
michael@0 | 647 | tmstats.calltree_kidsteps++; |
michael@0 | 648 | csp = &site->siblings; |
michael@0 | 649 | } |
michael@0 | 650 | |
michael@0 | 651 | if (!site) { |
michael@0 | 652 | tmstats.calltree_kidmisses++; |
michael@0 | 653 | |
michael@0 | 654 | /* Check for recursion: see if pc is on our ancestor line. */ |
michael@0 | 655 | for (site = parent; site; site = site->parent) { |
michael@0 | 656 | if (site->pc == pc) { |
michael@0 | 657 | tmstats.callsite_recurrences++; |
michael@0 | 658 | last_callsite_recurrence = site; |
michael@0 | 659 | goto upward; |
michael@0 | 660 | } |
michael@0 | 661 | } |
michael@0 | 662 | } |
michael@0 | 663 | |
michael@0 | 664 | /* |
michael@0 | 665 | * Not in tree at all, or not logged to fp: let's find our symbolic |
michael@0 | 666 | * callsite info. |
michael@0 | 667 | */ |
michael@0 | 668 | |
michael@0 | 669 | if (!stacks_enabled) { |
michael@0 | 670 | /* |
michael@0 | 671 | * Fake the necessary information for our single fake stack |
michael@0 | 672 | * frame. |
michael@0 | 673 | */ |
michael@0 | 674 | PL_strncpyz(details.library, "stacks_disabled", |
michael@0 | 675 | sizeof(details.library)); |
michael@0 | 676 | details.loffset = 0; |
michael@0 | 677 | details.filename[0] = '\0'; |
michael@0 | 678 | details.lineno = 0; |
michael@0 | 679 | details.function[0] = '\0'; |
michael@0 | 680 | details.foffset = 0; |
michael@0 | 681 | } else { |
michael@0 | 682 | /* |
michael@0 | 683 | * NS_DescribeCodeAddress can (on Linux) acquire a lock inside |
michael@0 | 684 | * the shared library loader. Another thread might call malloc |
michael@0 | 685 | * while holding that lock (when loading a shared library). So |
michael@0 | 686 | * we have to exit tmlock around this call. For details, see |
michael@0 | 687 | * https://bugzilla.mozilla.org/show_bug.cgi?id=363334#c3 |
michael@0 | 688 | * |
michael@0 | 689 | * We could be more efficient by building the nodes in the |
michael@0 | 690 | * calltree, exiting the monitor once to describe all of them, |
michael@0 | 691 | * and then filling in the descriptions for any that hadn't been |
michael@0 | 692 | * described already. But this is easier for now. |
michael@0 | 693 | */ |
michael@0 | 694 | TM_EXIT_LOCK(t); |
michael@0 | 695 | rv = NS_DescribeCodeAddress(pc, &details); |
michael@0 | 696 | TM_ENTER_LOCK(t); |
michael@0 | 697 | if (NS_FAILED(rv)) { |
michael@0 | 698 | tmstats.dladdr_failures++; |
michael@0 | 699 | goto fail; |
michael@0 | 700 | } |
michael@0 | 701 | } |
michael@0 | 702 | |
michael@0 | 703 | /* Check whether we need to emit a library trace record. */ |
michael@0 | 704 | library_serial = 0; |
michael@0 | 705 | library = NULL; |
michael@0 | 706 | if (details.library[0]) { |
michael@0 | 707 | if (!libraries) { |
michael@0 | 708 | libraries = PL_NewHashTable(100, PL_HashString, |
michael@0 | 709 | PL_CompareStrings, PL_CompareValues, |
michael@0 | 710 | &lfdset_hashallocops, NULL); |
michael@0 | 711 | if (!libraries) { |
michael@0 | 712 | tmstats.btmalloc_failures++; |
michael@0 | 713 | goto fail; |
michael@0 | 714 | } |
michael@0 | 715 | } |
michael@0 | 716 | hash = PL_HashString(details.library); |
michael@0 | 717 | hep = PL_HashTableRawLookup(libraries, hash, details.library); |
michael@0 | 718 | he = *hep; |
michael@0 | 719 | if (he) { |
michael@0 | 720 | library = (char*) he->key; |
michael@0 | 721 | library_serial = (uint32_t) NS_PTR_TO_INT32(he->value); |
michael@0 | 722 | le = (lfdset_entry *) he; |
michael@0 | 723 | if (LFD_TEST(fp->lfd, &le->lfdset)) { |
michael@0 | 724 | /* We already logged an event on fp for this library. */ |
michael@0 | 725 | le = NULL; |
michael@0 | 726 | } |
michael@0 | 727 | } else { |
michael@0 | 728 | library = strdup(details.library); |
michael@0 | 729 | if (library) { |
michael@0 | 730 | library_serial = ++library_serial_generator; |
michael@0 | 731 | he = PL_HashTableRawAdd(libraries, hep, hash, library, |
michael@0 | 732 | NS_INT32_TO_PTR(library_serial)); |
michael@0 | 733 | } |
michael@0 | 734 | if (!he) { |
michael@0 | 735 | tmstats.btmalloc_failures++; |
michael@0 | 736 | goto fail; |
michael@0 | 737 | } |
michael@0 | 738 | le = (lfdset_entry *) he; |
michael@0 | 739 | } |
michael@0 | 740 | if (le) { |
michael@0 | 741 | /* Need to log an event to fp for this lib. */ |
michael@0 | 742 | slash = strrchr(library, '/'); |
michael@0 | 743 | log_event1(fp, TM_EVENT_LIBRARY, library_serial); |
michael@0 | 744 | log_string(fp, slash ? slash + 1 : library); |
michael@0 | 745 | LFD_SET(fp->lfd, &le->lfdset); |
michael@0 | 746 | } |
michael@0 | 747 | } |
michael@0 | 748 | |
michael@0 | 749 | /* For compatibility with current log format, always emit a |
michael@0 | 750 | * filename trace record, using "noname" / 0 when no file name |
michael@0 | 751 | * is available. */ |
michael@0 | 752 | filename_serial = 0; |
michael@0 | 753 | filename = details.filename[0] ? details.filename : "noname"; |
michael@0 | 754 | if (!filenames) { |
michael@0 | 755 | filenames = PL_NewHashTable(100, PL_HashString, |
michael@0 | 756 | PL_CompareStrings, PL_CompareValues, |
michael@0 | 757 | &lfdset_hashallocops, NULL); |
michael@0 | 758 | if (!filenames) { |
michael@0 | 759 | tmstats.btmalloc_failures++; |
michael@0 | 760 | return NULL; |
michael@0 | 761 | } |
michael@0 | 762 | } |
michael@0 | 763 | hash = PL_HashString(filename); |
michael@0 | 764 | hep = PL_HashTableRawLookup(filenames, hash, filename); |
michael@0 | 765 | he = *hep; |
michael@0 | 766 | if (he) { |
michael@0 | 767 | filename = (char*) he->key; |
michael@0 | 768 | filename_serial = (uint32_t) NS_PTR_TO_INT32(he->value); |
michael@0 | 769 | le = (lfdset_entry *) he; |
michael@0 | 770 | if (LFD_TEST(fp->lfd, &le->lfdset)) { |
michael@0 | 771 | /* We already logged an event on fp for this filename. */ |
michael@0 | 772 | le = NULL; |
michael@0 | 773 | } |
michael@0 | 774 | } else { |
michael@0 | 775 | filename = strdup(filename); |
michael@0 | 776 | if (filename) { |
michael@0 | 777 | filename_serial = ++filename_serial_generator; |
michael@0 | 778 | he = PL_HashTableRawAdd(filenames, hep, hash, filename, |
michael@0 | 779 | NS_INT32_TO_PTR(filename_serial)); |
michael@0 | 780 | } |
michael@0 | 781 | if (!he) { |
michael@0 | 782 | tmstats.btmalloc_failures++; |
michael@0 | 783 | return NULL; |
michael@0 | 784 | } |
michael@0 | 785 | le = (lfdset_entry *) he; |
michael@0 | 786 | } |
michael@0 | 787 | if (le) { |
michael@0 | 788 | /* Need to log an event to fp for this filename. */ |
michael@0 | 789 | log_event1(fp, TM_EVENT_FILENAME, filename_serial); |
michael@0 | 790 | log_filename(fp, filename); |
michael@0 | 791 | LFD_SET(fp->lfd, &le->lfdset); |
michael@0 | 792 | } |
michael@0 | 793 | |
michael@0 | 794 | if (!details.function[0]) { |
michael@0 | 795 | PR_snprintf(details.function, sizeof(details.function), |
michael@0 | 796 | "%s+%X", library ? library : "main", details.loffset); |
michael@0 | 797 | } |
michael@0 | 798 | |
michael@0 | 799 | /* Emit an 'N' (for New method, 'M' is for malloc!) event if needed. */ |
michael@0 | 800 | method_serial = 0; |
michael@0 | 801 | if (!methods) { |
michael@0 | 802 | methods = PL_NewHashTable(10000, PL_HashString, |
michael@0 | 803 | PL_CompareStrings, PL_CompareValues, |
michael@0 | 804 | &lfdset_hashallocops, NULL); |
michael@0 | 805 | if (!methods) { |
michael@0 | 806 | tmstats.btmalloc_failures++; |
michael@0 | 807 | goto fail; |
michael@0 | 808 | } |
michael@0 | 809 | } |
michael@0 | 810 | hash = PL_HashString(details.function); |
michael@0 | 811 | hep = PL_HashTableRawLookup(methods, hash, details.function); |
michael@0 | 812 | he = *hep; |
michael@0 | 813 | if (he) { |
michael@0 | 814 | method = (char*) he->key; |
michael@0 | 815 | method_serial = (uint32_t) NS_PTR_TO_INT32(he->value); |
michael@0 | 816 | le = (lfdset_entry *) he; |
michael@0 | 817 | if (LFD_TEST(fp->lfd, &le->lfdset)) { |
michael@0 | 818 | /* We already logged an event on fp for this method. */ |
michael@0 | 819 | le = NULL; |
michael@0 | 820 | } |
michael@0 | 821 | } else { |
michael@0 | 822 | method = strdup(details.function); |
michael@0 | 823 | if (method) { |
michael@0 | 824 | method_serial = ++method_serial_generator; |
michael@0 | 825 | he = PL_HashTableRawAdd(methods, hep, hash, method, |
michael@0 | 826 | NS_INT32_TO_PTR(method_serial)); |
michael@0 | 827 | } |
michael@0 | 828 | if (!he) { |
michael@0 | 829 | tmstats.btmalloc_failures++; |
michael@0 | 830 | return NULL; |
michael@0 | 831 | } |
michael@0 | 832 | le = (lfdset_entry *) he; |
michael@0 | 833 | } |
michael@0 | 834 | if (le) { |
michael@0 | 835 | log_event4(fp, TM_EVENT_METHOD, method_serial, library_serial, |
michael@0 | 836 | filename_serial, details.lineno); |
michael@0 | 837 | log_string(fp, method); |
michael@0 | 838 | LFD_SET(fp->lfd, &le->lfdset); |
michael@0 | 839 | } |
michael@0 | 840 | |
michael@0 | 841 | /* Create a new callsite record. */ |
michael@0 | 842 | if (!site) { |
michael@0 | 843 | site = __libc_malloc(sizeof(callsite)); |
michael@0 | 844 | if (!site) { |
michael@0 | 845 | tmstats.btmalloc_failures++; |
michael@0 | 846 | goto fail; |
michael@0 | 847 | } |
michael@0 | 848 | |
michael@0 | 849 | /* Update parent and max-kids-per-parent stats. */ |
michael@0 | 850 | if (!parent->kids) |
michael@0 | 851 | tmstats.calltree_parents++; |
michael@0 | 852 | nkids = 1; |
michael@0 | 853 | for (tmp = parent->kids; tmp; tmp = tmp->siblings) |
michael@0 | 854 | nkids++; |
michael@0 | 855 | if (nkids > tmstats.calltree_maxkids) { |
michael@0 | 856 | tmstats.calltree_maxkids = nkids; |
michael@0 | 857 | calltree_maxkids_parent = parent; |
michael@0 | 858 | } |
michael@0 | 859 | |
michael@0 | 860 | /* Insert the new site into the tree. */ |
michael@0 | 861 | site->pc = pc; |
michael@0 | 862 | site->serial = ++callsite_serial_generator; |
michael@0 | 863 | LFD_ZERO(&site->lfdset); |
michael@0 | 864 | site->name = method; |
michael@0 | 865 | site->library = library; |
michael@0 | 866 | site->offset = details.loffset; |
michael@0 | 867 | site->parent = parent; |
michael@0 | 868 | site->siblings = parent->kids; |
michael@0 | 869 | parent->kids = site; |
michael@0 | 870 | site->kids = NULL; |
michael@0 | 871 | } |
michael@0 | 872 | |
michael@0 | 873 | /* Log the site with its parent, method, and offset. */ |
michael@0 | 874 | log_event4(fp, TM_EVENT_CALLSITE, site->serial, parent->serial, |
michael@0 | 875 | method_serial, details.foffset); |
michael@0 | 876 | LFD_SET(fp->lfd, &site->lfdset); |
michael@0 | 877 | |
michael@0 | 878 | upward: |
michael@0 | 879 | parent = site; |
michael@0 | 880 | } while (stack_index > 0); |
michael@0 | 881 | |
michael@0 | 882 | if (maxstack) |
michael@0 | 883 | calltree_maxstack_top = site; |
michael@0 | 884 | |
michael@0 | 885 | return site; |
michael@0 | 886 | |
michael@0 | 887 | fail: |
michael@0 | 888 | return NULL; |
michael@0 | 889 | } |
michael@0 | 890 | |
michael@0 | 891 | /* |
michael@0 | 892 | * Buffer the stack from top at low index to bottom at high, so that we can |
michael@0 | 893 | * reverse it in calltree. |
michael@0 | 894 | */ |
michael@0 | 895 | static void |
michael@0 | 896 | stack_callback(void *pc, void *sp, void *closure) |
michael@0 | 897 | { |
michael@0 | 898 | stack_buffer_info *info = (stack_buffer_info*) closure; |
michael@0 | 899 | |
michael@0 | 900 | /* |
michael@0 | 901 | * If we run out of buffer, keep incrementing entries so that |
michael@0 | 902 | * backtrace can call us again with a bigger buffer. |
michael@0 | 903 | */ |
michael@0 | 904 | if (info->entries < info->size) |
michael@0 | 905 | info->buffer[info->entries] = pc; |
michael@0 | 906 | ++info->entries; |
michael@0 | 907 | } |
michael@0 | 908 | |
michael@0 | 909 | /* |
michael@0 | 910 | * The caller MUST NOT be holding tmlock when calling backtrace. |
michael@0 | 911 | * On return, if *immediate_abort is set, then the return value is NULL |
michael@0 | 912 | * and the thread is in a very dangerous situation (e.g. holding |
michael@0 | 913 | * sem_pool_lock in Mac OS X pthreads); the caller should bail out |
michael@0 | 914 | * without doing anything (such as acquiring locks). |
michael@0 | 915 | */ |
michael@0 | 916 | static callsite * |
michael@0 | 917 | backtrace(tm_thread *t, int skipFrames, int *immediate_abort) |
michael@0 | 918 | { |
michael@0 | 919 | callsite *site; |
michael@0 | 920 | stack_buffer_info *info = &t->backtrace_buf; |
michael@0 | 921 | void ** new_stack_buffer; |
michael@0 | 922 | size_t new_stack_buffer_size; |
michael@0 | 923 | nsresult rv; |
michael@0 | 924 | |
michael@0 | 925 | t->suppress_tracing++; |
michael@0 | 926 | |
michael@0 | 927 | if (!stacks_enabled) { |
michael@0 | 928 | #if defined(XP_MACOSX) |
michael@0 | 929 | /* Walk the stack, even if stacks_enabled is false. We do this to |
michael@0 | 930 | check if we must set immediate_abort. */ |
michael@0 | 931 | info->entries = 0; |
michael@0 | 932 | rv = NS_StackWalk(stack_callback, skipFrames, /* maxFrames */ 0, info, |
michael@0 | 933 | 0, NULL); |
michael@0 | 934 | *immediate_abort = rv == NS_ERROR_UNEXPECTED; |
michael@0 | 935 | if (rv == NS_ERROR_UNEXPECTED || info->entries == 0) { |
michael@0 | 936 | t->suppress_tracing--; |
michael@0 | 937 | return NULL; |
michael@0 | 938 | } |
michael@0 | 939 | #endif |
michael@0 | 940 | |
michael@0 | 941 | /* |
michael@0 | 942 | * Create a single fake stack frame so that all the tools get |
michael@0 | 943 | * data in the correct format. |
michael@0 | 944 | */ |
michael@0 | 945 | *immediate_abort = 0; |
michael@0 | 946 | if (info->size < 1) { |
michael@0 | 947 | PR_ASSERT(!info->buffer); /* !info->size == !info->buffer */ |
michael@0 | 948 | info->buffer = __libc_malloc(1 * sizeof(void*)); |
michael@0 | 949 | if (!info->buffer) |
michael@0 | 950 | return NULL; |
michael@0 | 951 | info->size = 1; |
michael@0 | 952 | } |
michael@0 | 953 | |
michael@0 | 954 | info->entries = 1; |
michael@0 | 955 | info->buffer[0] = STACK_DISABLED_PC; |
michael@0 | 956 | } else { |
michael@0 | 957 | /* |
michael@0 | 958 | * NS_StackWalk can (on Windows) acquire a lock the shared library |
michael@0 | 959 | * loader. Another thread might call malloc while holding that lock |
michael@0 | 960 | * (when loading a shared library). So we can't be in tmlock during |
michael@0 | 961 | * this call. For details, see |
michael@0 | 962 | * https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8 |
michael@0 | 963 | */ |
michael@0 | 964 | |
michael@0 | 965 | /* |
michael@0 | 966 | * skipFrames == 0 means |backtrace| should show up, so don't use |
michael@0 | 967 | * skipFrames + 1. |
michael@0 | 968 | * NB: this call is repeated below if the buffer is too small. |
michael@0 | 969 | */ |
michael@0 | 970 | info->entries = 0; |
michael@0 | 971 | rv = NS_StackWalk(stack_callback, skipFrames, /* maxFrames */ 0, info, |
michael@0 | 972 | 0, NULL); |
michael@0 | 973 | *immediate_abort = rv == NS_ERROR_UNEXPECTED; |
michael@0 | 974 | if (rv == NS_ERROR_UNEXPECTED || info->entries == 0) { |
michael@0 | 975 | t->suppress_tracing--; |
michael@0 | 976 | return NULL; |
michael@0 | 977 | } |
michael@0 | 978 | |
michael@0 | 979 | /* |
michael@0 | 980 | * To avoid allocating in stack_callback (which, on Windows, is |
michael@0 | 981 | * called on a different thread from the one we're running on here), |
michael@0 | 982 | * reallocate here if it didn't have a big enough buffer (which |
michael@0 | 983 | * includes the first call on any thread), and call it again. |
michael@0 | 984 | */ |
michael@0 | 985 | if (info->entries > info->size) { |
michael@0 | 986 | new_stack_buffer_size = 2 * info->entries; |
michael@0 | 987 | new_stack_buffer = __libc_realloc(info->buffer, |
michael@0 | 988 | new_stack_buffer_size * sizeof(void*)); |
michael@0 | 989 | if (!new_stack_buffer) |
michael@0 | 990 | return NULL; |
michael@0 | 991 | info->buffer = new_stack_buffer; |
michael@0 | 992 | info->size = new_stack_buffer_size; |
michael@0 | 993 | |
michael@0 | 994 | /* and call NS_StackWalk again */ |
michael@0 | 995 | info->entries = 0; |
michael@0 | 996 | NS_StackWalk(stack_callback, skipFrames, /* maxFrames */ 0, info, |
michael@0 | 997 | 0, NULL); |
michael@0 | 998 | |
michael@0 | 999 | /* same stack */ |
michael@0 | 1000 | PR_ASSERT(info->entries * 2 == new_stack_buffer_size); |
michael@0 | 1001 | } |
michael@0 | 1002 | } |
michael@0 | 1003 | |
michael@0 | 1004 | TM_ENTER_LOCK(t); |
michael@0 | 1005 | |
michael@0 | 1006 | site = calltree(info->buffer, info->entries, t); |
michael@0 | 1007 | |
michael@0 | 1008 | tmstats.backtrace_calls++; |
michael@0 | 1009 | if (!site) { |
michael@0 | 1010 | tmstats.backtrace_failures++; |
michael@0 | 1011 | PR_ASSERT(tmstats.backtrace_failures < 100); |
michael@0 | 1012 | } |
michael@0 | 1013 | TM_EXIT_LOCK(t); |
michael@0 | 1014 | |
michael@0 | 1015 | t->suppress_tracing--; |
michael@0 | 1016 | return site; |
michael@0 | 1017 | } |
michael@0 | 1018 | |
michael@0 | 1019 | typedef struct allocation { |
michael@0 | 1020 | PLHashEntry entry; |
michael@0 | 1021 | size_t size; |
michael@0 | 1022 | FILE *trackfp; /* for allocation tracking */ |
michael@0 | 1023 | } allocation; |
michael@0 | 1024 | |
michael@0 | 1025 | #define ALLOC_HEAP_SIZE 150000 |
michael@0 | 1026 | |
michael@0 | 1027 | static allocation alloc_heap[ALLOC_HEAP_SIZE]; |
michael@0 | 1028 | static allocation *alloc_freelist = NULL; |
michael@0 | 1029 | static int alloc_heap_initialized = 0; |
michael@0 | 1030 | |
michael@0 | 1031 | static PLHashEntry *alloc_allocentry(void *pool, const void *key) |
michael@0 | 1032 | { |
michael@0 | 1033 | allocation **listp, *alloc; |
michael@0 | 1034 | int n; |
michael@0 | 1035 | |
michael@0 | 1036 | if (!alloc_heap_initialized) { |
michael@0 | 1037 | n = ALLOC_HEAP_SIZE; |
michael@0 | 1038 | listp = &alloc_freelist; |
michael@0 | 1039 | for (alloc = alloc_heap; --n >= 0; alloc++) { |
michael@0 | 1040 | *listp = alloc; |
michael@0 | 1041 | listp = (allocation**) &alloc->entry.next; |
michael@0 | 1042 | } |
michael@0 | 1043 | *listp = NULL; |
michael@0 | 1044 | alloc_heap_initialized = 1; |
michael@0 | 1045 | } |
michael@0 | 1046 | |
michael@0 | 1047 | listp = &alloc_freelist; |
michael@0 | 1048 | alloc = *listp; |
michael@0 | 1049 | if (!alloc) |
michael@0 | 1050 | return __libc_malloc(sizeof(allocation)); |
michael@0 | 1051 | *listp = (allocation*) alloc->entry.next; |
michael@0 | 1052 | return &alloc->entry; |
michael@0 | 1053 | } |
michael@0 | 1054 | |
michael@0 | 1055 | static void alloc_freeentry(void *pool, PLHashEntry *he, unsigned flag) |
michael@0 | 1056 | { |
michael@0 | 1057 | allocation *alloc; |
michael@0 | 1058 | |
michael@0 | 1059 | if (flag != HT_FREE_ENTRY) |
michael@0 | 1060 | return; |
michael@0 | 1061 | alloc = (allocation*) he; |
michael@0 | 1062 | if ((ptrdiff_t)(alloc - alloc_heap) < (ptrdiff_t)ALLOC_HEAP_SIZE) { |
michael@0 | 1063 | alloc->entry.next = &alloc_freelist->entry; |
michael@0 | 1064 | alloc_freelist = alloc; |
michael@0 | 1065 | } else { |
michael@0 | 1066 | __libc_free((void*) alloc); |
michael@0 | 1067 | } |
michael@0 | 1068 | } |
michael@0 | 1069 | |
michael@0 | 1070 | static PLHashAllocOps alloc_hashallocops = { |
michael@0 | 1071 | generic_alloctable, generic_freetable, |
michael@0 | 1072 | alloc_allocentry, alloc_freeentry |
michael@0 | 1073 | }; |
michael@0 | 1074 | |
michael@0 | 1075 | static PLHashNumber hash_pointer(const void *key) |
michael@0 | 1076 | { |
michael@0 | 1077 | return (PLHashNumber) key; |
michael@0 | 1078 | } |
michael@0 | 1079 | |
michael@0 | 1080 | static PLHashTable *allocations = NULL; |
michael@0 | 1081 | |
michael@0 | 1082 | static PLHashTable *new_allocations(void) |
michael@0 | 1083 | { |
michael@0 | 1084 | allocations = PL_NewHashTable(200000, hash_pointer, |
michael@0 | 1085 | PL_CompareValues, PL_CompareValues, |
michael@0 | 1086 | &alloc_hashallocops, NULL); |
michael@0 | 1087 | return allocations; |
michael@0 | 1088 | } |
michael@0 | 1089 | |
michael@0 | 1090 | #define get_allocations() (allocations ? allocations : new_allocations()) |
michael@0 | 1091 | |
michael@0 | 1092 | #if defined(XP_MACOSX) |
michael@0 | 1093 | |
michael@0 | 1094 | /* from malloc.c in Libc */ |
michael@0 | 1095 | typedef void |
michael@0 | 1096 | malloc_logger_t(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, |
michael@0 | 1097 | uintptr_t result, uint32_t num_hot_frames_to_skip); |
michael@0 | 1098 | |
michael@0 | 1099 | extern malloc_logger_t *malloc_logger; |
michael@0 | 1100 | |
michael@0 | 1101 | #define MALLOC_LOG_TYPE_ALLOCATE 2 |
michael@0 | 1102 | #define MALLOC_LOG_TYPE_DEALLOCATE 4 |
michael@0 | 1103 | #define MALLOC_LOG_TYPE_HAS_ZONE 8 |
michael@0 | 1104 | #define MALLOC_LOG_TYPE_CLEARED 64 |
michael@0 | 1105 | |
michael@0 | 1106 | static void |
michael@0 | 1107 | my_malloc_logger(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, |
michael@0 | 1108 | uintptr_t result, uint32_t num_hot_frames_to_skip) |
michael@0 | 1109 | { |
michael@0 | 1110 | uintptr_t all_args[3] = { arg1, arg2, arg3 }; |
michael@0 | 1111 | uintptr_t *args = all_args + ((type & MALLOC_LOG_TYPE_HAS_ZONE) ? 1 : 0); |
michael@0 | 1112 | |
michael@0 | 1113 | uint32_t alloc_type = |
michael@0 | 1114 | type & (MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE); |
michael@0 | 1115 | tm_thread *t = tm_get_thread(); |
michael@0 | 1116 | |
michael@0 | 1117 | if (alloc_type == (MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE)) { |
michael@0 | 1118 | ReallocCallback((void*)args[0], (void*)result, args[1], 0, 0, t); |
michael@0 | 1119 | } else if (alloc_type == MALLOC_LOG_TYPE_ALLOCATE) { |
michael@0 | 1120 | /* |
michael@0 | 1121 | * We don't get size/count information for calloc, so just use |
michael@0 | 1122 | * MallocCallback. |
michael@0 | 1123 | */ |
michael@0 | 1124 | MallocCallback((void*)result, args[0], 0, 0, t); |
michael@0 | 1125 | } else if (alloc_type == MALLOC_LOG_TYPE_DEALLOCATE) { |
michael@0 | 1126 | FreeCallback((void*)args[0], 0, 0, t); |
michael@0 | 1127 | } |
michael@0 | 1128 | } |
michael@0 | 1129 | |
michael@0 | 1130 | static void |
michael@0 | 1131 | StartupHooker(void) |
michael@0 | 1132 | { |
michael@0 | 1133 | PR_ASSERT(!malloc_logger); |
michael@0 | 1134 | malloc_logger = my_malloc_logger; |
michael@0 | 1135 | } |
michael@0 | 1136 | |
michael@0 | 1137 | static void |
michael@0 | 1138 | ShutdownHooker(void) |
michael@0 | 1139 | { |
michael@0 | 1140 | PR_ASSERT(malloc_logger == my_malloc_logger); |
michael@0 | 1141 | malloc_logger = NULL; |
michael@0 | 1142 | } |
michael@0 | 1143 | |
michael@0 | 1144 | #elif defined(XP_UNIX) |
michael@0 | 1145 | |
michael@0 | 1146 | /* |
michael@0 | 1147 | * We can't use glibc's malloc hooks because they can't be used in a |
michael@0 | 1148 | * threadsafe manner. They require unsetting the hooks to call into the |
michael@0 | 1149 | * original malloc implementation, and then resetting them when the |
michael@0 | 1150 | * original implementation returns. If another thread calls the same |
michael@0 | 1151 | * allocation function while the hooks are unset, we have no chance to |
michael@0 | 1152 | * intercept the call. |
michael@0 | 1153 | */ |
michael@0 | 1154 | |
michael@0 | 1155 | NS_EXTERNAL_VIS_(__ptr_t) |
michael@0 | 1156 | malloc(size_t size) |
michael@0 | 1157 | { |
michael@0 | 1158 | uint32_t start, end; |
michael@0 | 1159 | __ptr_t ptr; |
michael@0 | 1160 | tm_thread *t; |
michael@0 | 1161 | |
michael@0 | 1162 | if (!tracing_enabled || !PR_Initialized() || |
michael@0 | 1163 | (t = tm_get_thread())->suppress_tracing != 0) { |
michael@0 | 1164 | return __libc_malloc(size); |
michael@0 | 1165 | } |
michael@0 | 1166 | |
michael@0 | 1167 | t->suppress_tracing++; |
michael@0 | 1168 | start = PR_IntervalNow(); |
michael@0 | 1169 | ptr = __libc_malloc(size); |
michael@0 | 1170 | end = PR_IntervalNow(); |
michael@0 | 1171 | t->suppress_tracing--; |
michael@0 | 1172 | |
michael@0 | 1173 | MallocCallback(ptr, size, start, end, t); |
michael@0 | 1174 | |
michael@0 | 1175 | return ptr; |
michael@0 | 1176 | } |
michael@0 | 1177 | |
michael@0 | 1178 | NS_EXTERNAL_VIS_(__ptr_t) |
michael@0 | 1179 | calloc(size_t count, size_t size) |
michael@0 | 1180 | { |
michael@0 | 1181 | uint32_t start, end; |
michael@0 | 1182 | __ptr_t ptr; |
michael@0 | 1183 | tm_thread *t; |
michael@0 | 1184 | |
michael@0 | 1185 | if (!tracing_enabled || !PR_Initialized() || |
michael@0 | 1186 | (t = tm_get_thread())->suppress_tracing != 0) { |
michael@0 | 1187 | return __libc_calloc(count, size); |
michael@0 | 1188 | } |
michael@0 | 1189 | |
michael@0 | 1190 | t->suppress_tracing++; |
michael@0 | 1191 | start = PR_IntervalNow(); |
michael@0 | 1192 | ptr = __libc_calloc(count, size); |
michael@0 | 1193 | end = PR_IntervalNow(); |
michael@0 | 1194 | t->suppress_tracing--; |
michael@0 | 1195 | |
michael@0 | 1196 | CallocCallback(ptr, count, size, start, end, t); |
michael@0 | 1197 | |
michael@0 | 1198 | return ptr; |
michael@0 | 1199 | } |
michael@0 | 1200 | |
michael@0 | 1201 | NS_EXTERNAL_VIS_(__ptr_t) |
michael@0 | 1202 | realloc(__ptr_t oldptr, size_t size) |
michael@0 | 1203 | { |
michael@0 | 1204 | uint32_t start, end; |
michael@0 | 1205 | __ptr_t ptr; |
michael@0 | 1206 | tm_thread *t; |
michael@0 | 1207 | |
michael@0 | 1208 | if (!tracing_enabled || !PR_Initialized() || |
michael@0 | 1209 | (t = tm_get_thread())->suppress_tracing != 0) { |
michael@0 | 1210 | return __libc_realloc(oldptr, size); |
michael@0 | 1211 | } |
michael@0 | 1212 | |
michael@0 | 1213 | t->suppress_tracing++; |
michael@0 | 1214 | start = PR_IntervalNow(); |
michael@0 | 1215 | ptr = __libc_realloc(oldptr, size); |
michael@0 | 1216 | end = PR_IntervalNow(); |
michael@0 | 1217 | t->suppress_tracing--; |
michael@0 | 1218 | |
michael@0 | 1219 | /* FIXME bug 392008: We could race with reallocation of oldptr. */ |
michael@0 | 1220 | ReallocCallback(oldptr, ptr, size, start, end, t); |
michael@0 | 1221 | |
michael@0 | 1222 | return ptr; |
michael@0 | 1223 | } |
michael@0 | 1224 | |
michael@0 | 1225 | NS_EXTERNAL_VIS_(void*) |
michael@0 | 1226 | valloc(size_t size) |
michael@0 | 1227 | { |
michael@0 | 1228 | uint32_t start, end; |
michael@0 | 1229 | __ptr_t ptr; |
michael@0 | 1230 | tm_thread *t; |
michael@0 | 1231 | |
michael@0 | 1232 | if (!tracing_enabled || !PR_Initialized() || |
michael@0 | 1233 | (t = tm_get_thread())->suppress_tracing != 0) { |
michael@0 | 1234 | return __libc_valloc(size); |
michael@0 | 1235 | } |
michael@0 | 1236 | |
michael@0 | 1237 | t->suppress_tracing++; |
michael@0 | 1238 | start = PR_IntervalNow(); |
michael@0 | 1239 | ptr = __libc_valloc(size); |
michael@0 | 1240 | end = PR_IntervalNow(); |
michael@0 | 1241 | t->suppress_tracing--; |
michael@0 | 1242 | |
michael@0 | 1243 | MallocCallback(ptr, size, start, end, t); |
michael@0 | 1244 | |
michael@0 | 1245 | return ptr; |
michael@0 | 1246 | } |
michael@0 | 1247 | |
michael@0 | 1248 | NS_EXTERNAL_VIS_(void*) |
michael@0 | 1249 | memalign(size_t boundary, size_t size) |
michael@0 | 1250 | { |
michael@0 | 1251 | uint32_t start, end; |
michael@0 | 1252 | __ptr_t ptr; |
michael@0 | 1253 | tm_thread *t; |
michael@0 | 1254 | |
michael@0 | 1255 | if (!tracing_enabled || !PR_Initialized() || |
michael@0 | 1256 | (t = tm_get_thread())->suppress_tracing != 0) { |
michael@0 | 1257 | return __libc_memalign(boundary, size); |
michael@0 | 1258 | } |
michael@0 | 1259 | |
michael@0 | 1260 | t->suppress_tracing++; |
michael@0 | 1261 | start = PR_IntervalNow(); |
michael@0 | 1262 | ptr = __libc_memalign(boundary, size); |
michael@0 | 1263 | end = PR_IntervalNow(); |
michael@0 | 1264 | t->suppress_tracing--; |
michael@0 | 1265 | |
michael@0 | 1266 | MallocCallback(ptr, size, start, end, t); |
michael@0 | 1267 | |
michael@0 | 1268 | return ptr; |
michael@0 | 1269 | } |
michael@0 | 1270 | |
michael@0 | 1271 | NS_EXTERNAL_VIS_(int) |
michael@0 | 1272 | posix_memalign(void **memptr, size_t alignment, size_t size) |
michael@0 | 1273 | { |
michael@0 | 1274 | __ptr_t ptr = memalign(alignment, size); |
michael@0 | 1275 | if (!ptr) |
michael@0 | 1276 | return ENOMEM; |
michael@0 | 1277 | *memptr = ptr; |
michael@0 | 1278 | return 0; |
michael@0 | 1279 | } |
michael@0 | 1280 | |
michael@0 | 1281 | NS_EXTERNAL_VIS_(void) |
michael@0 | 1282 | free(__ptr_t ptr) |
michael@0 | 1283 | { |
michael@0 | 1284 | uint32_t start, end; |
michael@0 | 1285 | tm_thread *t; |
michael@0 | 1286 | |
michael@0 | 1287 | if (!tracing_enabled || !PR_Initialized() || |
michael@0 | 1288 | (t = tm_get_thread())->suppress_tracing != 0) { |
michael@0 | 1289 | __libc_free(ptr); |
michael@0 | 1290 | return; |
michael@0 | 1291 | } |
michael@0 | 1292 | |
michael@0 | 1293 | t->suppress_tracing++; |
michael@0 | 1294 | start = PR_IntervalNow(); |
michael@0 | 1295 | __libc_free(ptr); |
michael@0 | 1296 | end = PR_IntervalNow(); |
michael@0 | 1297 | t->suppress_tracing--; |
michael@0 | 1298 | |
michael@0 | 1299 | /* FIXME bug 392008: We could race with reallocation of ptr. */ |
michael@0 | 1300 | |
michael@0 | 1301 | FreeCallback(ptr, start, end, t); |
michael@0 | 1302 | } |
michael@0 | 1303 | |
michael@0 | 1304 | NS_EXTERNAL_VIS_(void) |
michael@0 | 1305 | cfree(void *ptr) |
michael@0 | 1306 | { |
michael@0 | 1307 | free(ptr); |
michael@0 | 1308 | } |
michael@0 | 1309 | |
michael@0 | 1310 | #define StartupHooker() PR_BEGIN_MACRO PR_END_MACRO |
michael@0 | 1311 | #define ShutdownHooker() PR_BEGIN_MACRO PR_END_MACRO |
michael@0 | 1312 | |
michael@0 | 1313 | #elif defined(XP_WIN32) |
michael@0 | 1314 | |
michael@0 | 1315 | /* See nsWinTraceMalloc.cpp. */ |
michael@0 | 1316 | |
michael@0 | 1317 | #endif |
michael@0 | 1318 | |
michael@0 | 1319 | static const char magic[] = NS_TRACE_MALLOC_MAGIC; |
michael@0 | 1320 | |
michael@0 | 1321 | static void |
michael@0 | 1322 | log_header(int logfd) |
michael@0 | 1323 | { |
michael@0 | 1324 | uint32_t ticksPerSec = PR_htonl(PR_TicksPerSecond()); |
michael@0 | 1325 | (void) write(logfd, magic, NS_TRACE_MALLOC_MAGIC_SIZE); |
michael@0 | 1326 | (void) write(logfd, &ticksPerSec, sizeof ticksPerSec); |
michael@0 | 1327 | } |
michael@0 | 1328 | |
michael@0 | 1329 | PR_IMPLEMENT(void) |
michael@0 | 1330 | NS_TraceMallocStartup(int logfd) |
michael@0 | 1331 | { |
michael@0 | 1332 | const char* stack_disable_env; |
michael@0 | 1333 | |
michael@0 | 1334 | /* We must be running on the primordial thread. */ |
michael@0 | 1335 | PR_ASSERT(tracing_enabled == 0); |
michael@0 | 1336 | PR_ASSERT(logfp == &default_logfile); |
michael@0 | 1337 | tracing_enabled = (logfd >= 0); |
michael@0 | 1338 | |
michael@0 | 1339 | if (logfd >= 3) |
michael@0 | 1340 | MozillaRegisterDebugFD(logfd); |
michael@0 | 1341 | |
michael@0 | 1342 | /* stacks are disabled if this env var is set to a non-empty value */ |
michael@0 | 1343 | stack_disable_env = PR_GetEnv("NS_TRACE_MALLOC_DISABLE_STACKS"); |
michael@0 | 1344 | stacks_enabled = !stack_disable_env || !*stack_disable_env; |
michael@0 | 1345 | |
michael@0 | 1346 | if (tracing_enabled) { |
michael@0 | 1347 | PR_ASSERT(logfp->simsize == 0); /* didn't overflow startup buffer */ |
michael@0 | 1348 | |
michael@0 | 1349 | /* Log everything in logfp (aka default_logfile)'s buffer to logfd. */ |
michael@0 | 1350 | logfp->fd = logfd; |
michael@0 | 1351 | logfile_list = &default_logfile; |
michael@0 | 1352 | logfp->prevp = &logfile_list; |
michael@0 | 1353 | logfile_tail = &logfp->next; |
michael@0 | 1354 | log_header(logfd); |
michael@0 | 1355 | } |
michael@0 | 1356 | |
michael@0 | 1357 | RegisterTraceMallocShutdown(); |
michael@0 | 1358 | |
michael@0 | 1359 | tmlock = PR_NewLock(); |
michael@0 | 1360 | (void) tm_get_thread(); /* ensure index initialization while it's easy */ |
michael@0 | 1361 | |
michael@0 | 1362 | if (tracing_enabled) |
michael@0 | 1363 | StartupHooker(); |
michael@0 | 1364 | } |
michael@0 | 1365 | |
michael@0 | 1366 | /* |
michael@0 | 1367 | * Options for log files, with the log file name either as the next option |
michael@0 | 1368 | * or separated by '=' (e.g. "./mozilla --trace-malloc * malloc.log" or |
michael@0 | 1369 | * "./mozilla --trace-malloc=malloc.log"). |
michael@0 | 1370 | */ |
michael@0 | 1371 | static const char TMLOG_OPTION[] = "--trace-malloc"; |
michael@0 | 1372 | static const char SDLOG_OPTION[] = "--shutdown-leaks"; |
michael@0 | 1373 | |
michael@0 | 1374 | #define SHOULD_PARSE_ARG(name_, log_, arg_) \ |
michael@0 | 1375 | (0 == strncmp(arg_, name_, sizeof(name_) - 1)) |
michael@0 | 1376 | |
michael@0 | 1377 | #define PARSE_ARG(name_, log_, argv_, i_, consumed_) \ |
michael@0 | 1378 | PR_BEGIN_MACRO \ |
michael@0 | 1379 | char _nextchar = argv_[i_][sizeof(name_) - 1]; \ |
michael@0 | 1380 | if (_nextchar == '=') { \ |
michael@0 | 1381 | log_ = argv_[i_] + sizeof(name_); \ |
michael@0 | 1382 | consumed_ = 1; \ |
michael@0 | 1383 | } else if (_nextchar == '\0') { \ |
michael@0 | 1384 | log_ = argv_[i_+1]; \ |
michael@0 | 1385 | consumed_ = 2; \ |
michael@0 | 1386 | } \ |
michael@0 | 1387 | PR_END_MACRO |
michael@0 | 1388 | |
michael@0 | 1389 | PR_IMPLEMENT(int) |
michael@0 | 1390 | NS_TraceMallocStartupArgs(int argc, char **argv) |
michael@0 | 1391 | { |
michael@0 | 1392 | int i, logfd = -1, consumed, logflags; |
michael@0 | 1393 | char *tmlogname = NULL, *sdlogname_local = NULL; |
michael@0 | 1394 | |
michael@0 | 1395 | /* |
michael@0 | 1396 | * Look for the --trace-malloc <logfile> option early, to avoid missing |
michael@0 | 1397 | * early mallocs (we miss static constructors whose output overflows the |
michael@0 | 1398 | * log file's static 16K output buffer). |
michael@0 | 1399 | */ |
michael@0 | 1400 | for (i = 1; i < argc; i += consumed) { |
michael@0 | 1401 | consumed = 0; |
michael@0 | 1402 | if (SHOULD_PARSE_ARG(TMLOG_OPTION, tmlogname, argv[i])) |
michael@0 | 1403 | PARSE_ARG(TMLOG_OPTION, tmlogname, argv, i, consumed); |
michael@0 | 1404 | else if (SHOULD_PARSE_ARG(SDLOG_OPTION, sdlogname_local, argv[i])) |
michael@0 | 1405 | PARSE_ARG(SDLOG_OPTION, sdlogname_local, argv, i, consumed); |
michael@0 | 1406 | |
michael@0 | 1407 | if (consumed) { |
michael@0 | 1408 | #ifndef XP_WIN32 /* If we don't comment this out, it will crash Windows. */ |
michael@0 | 1409 | int j; |
michael@0 | 1410 | /* Now remove --trace-malloc and its argument from argv. */ |
michael@0 | 1411 | argc -= consumed; |
michael@0 | 1412 | for (j = i; j < argc; ++j) |
michael@0 | 1413 | argv[j] = argv[j+consumed]; |
michael@0 | 1414 | argv[argc] = NULL; |
michael@0 | 1415 | consumed = 0; /* don't advance next iteration */ |
michael@0 | 1416 | #endif |
michael@0 | 1417 | } else { |
michael@0 | 1418 | consumed = 1; |
michael@0 | 1419 | } |
michael@0 | 1420 | } |
michael@0 | 1421 | |
michael@0 | 1422 | if (tmlogname) { |
michael@0 | 1423 | #ifdef XP_UNIX |
michael@0 | 1424 | int pipefds[2]; |
michael@0 | 1425 | #endif |
michael@0 | 1426 | |
michael@0 | 1427 | switch (*tmlogname) { |
michael@0 | 1428 | #ifdef XP_UNIX |
michael@0 | 1429 | case '|': |
michael@0 | 1430 | if (pipe(pipefds) == 0) { |
michael@0 | 1431 | pid_t pid = fork(); |
michael@0 | 1432 | if (pid == 0) { |
michael@0 | 1433 | /* In child: set up stdin, parse args, and exec. */ |
michael@0 | 1434 | int maxargc, nargc; |
michael@0 | 1435 | char **nargv, *token; |
michael@0 | 1436 | |
michael@0 | 1437 | if (pipefds[0] != 0) { |
michael@0 | 1438 | dup2(pipefds[0], 0); |
michael@0 | 1439 | close(pipefds[0]); |
michael@0 | 1440 | } |
michael@0 | 1441 | close(pipefds[1]); |
michael@0 | 1442 | |
michael@0 | 1443 | tmlogname = strtok(tmlogname + 1, " \t"); |
michael@0 | 1444 | maxargc = 3; |
michael@0 | 1445 | nargv = (char **) malloc((maxargc+1) * sizeof(char *)); |
michael@0 | 1446 | if (!nargv) exit(1); |
michael@0 | 1447 | nargc = 0; |
michael@0 | 1448 | nargv[nargc++] = tmlogname; |
michael@0 | 1449 | while ((token = strtok(NULL, " \t")) != NULL) { |
michael@0 | 1450 | if (nargc == maxargc) { |
michael@0 | 1451 | maxargc *= 2; |
michael@0 | 1452 | nargv = (char**) |
michael@0 | 1453 | realloc(nargv, (maxargc+1) * sizeof(char*)); |
michael@0 | 1454 | if (!nargv) exit(1); |
michael@0 | 1455 | } |
michael@0 | 1456 | nargv[nargc++] = token; |
michael@0 | 1457 | } |
michael@0 | 1458 | nargv[nargc] = NULL; |
michael@0 | 1459 | |
michael@0 | 1460 | (void) setsid(); |
michael@0 | 1461 | execvp(tmlogname, nargv); |
michael@0 | 1462 | exit(127); |
michael@0 | 1463 | } |
michael@0 | 1464 | |
michael@0 | 1465 | if (pid > 0) { |
michael@0 | 1466 | /* In parent: set logfd to the pipe's write side. */ |
michael@0 | 1467 | close(pipefds[0]); |
michael@0 | 1468 | logfd = pipefds[1]; |
michael@0 | 1469 | } |
michael@0 | 1470 | } |
michael@0 | 1471 | if (logfd < 0) { |
michael@0 | 1472 | fprintf(stderr, |
michael@0 | 1473 | "%s: can't pipe to trace-malloc child process %s: %s\n", |
michael@0 | 1474 | argv[0], tmlogname, strerror(errno)); |
michael@0 | 1475 | exit(1); |
michael@0 | 1476 | } |
michael@0 | 1477 | break; |
michael@0 | 1478 | #endif /*XP_UNIX*/ |
michael@0 | 1479 | case '-': |
michael@0 | 1480 | /* Don't log from startup, but do prepare to log later. */ |
michael@0 | 1481 | /* XXX traditional meaning of '-' as option argument is "stdin" or "stdout" */ |
michael@0 | 1482 | if (tmlogname[1] == '\0') |
michael@0 | 1483 | break; |
michael@0 | 1484 | /* FALL THROUGH */ |
michael@0 | 1485 | |
michael@0 | 1486 | default: |
michael@0 | 1487 | logflags = O_CREAT | O_WRONLY | O_TRUNC; |
michael@0 | 1488 | #if defined(XP_WIN32) |
michael@0 | 1489 | /* |
michael@0 | 1490 | * Avoid translations on WIN32. |
michael@0 | 1491 | */ |
michael@0 | 1492 | logflags |= O_BINARY; |
michael@0 | 1493 | #endif |
michael@0 | 1494 | logfd = open(tmlogname, logflags, 0644); |
michael@0 | 1495 | if (logfd < 0) { |
michael@0 | 1496 | fprintf(stderr, |
michael@0 | 1497 | "%s: can't create trace-malloc log named %s: %s\n", |
michael@0 | 1498 | argv[0], tmlogname, strerror(errno)); |
michael@0 | 1499 | exit(1); |
michael@0 | 1500 | } |
michael@0 | 1501 | break; |
michael@0 | 1502 | } |
michael@0 | 1503 | } |
michael@0 | 1504 | |
michael@0 | 1505 | if (sdlogname_local) { |
michael@0 | 1506 | strncpy(sdlogname, sdlogname_local, sizeof(sdlogname)); |
michael@0 | 1507 | sdlogname[sizeof(sdlogname) - 1] = '\0'; |
michael@0 | 1508 | } |
michael@0 | 1509 | |
michael@0 | 1510 | NS_TraceMallocStartup(logfd); |
michael@0 | 1511 | return argc; |
michael@0 | 1512 | } |
michael@0 | 1513 | |
michael@0 | 1514 | PR_IMPLEMENT(PRBool) |
michael@0 | 1515 | NS_TraceMallocHasStarted(void) |
michael@0 | 1516 | { |
michael@0 | 1517 | return tmlock ? PR_TRUE : PR_FALSE; |
michael@0 | 1518 | } |
michael@0 | 1519 | |
michael@0 | 1520 | PR_IMPLEMENT(void) |
michael@0 | 1521 | NS_TraceMallocShutdown(void) |
michael@0 | 1522 | { |
michael@0 | 1523 | logfile *fp; |
michael@0 | 1524 | |
michael@0 | 1525 | if (sdlogname[0]) |
michael@0 | 1526 | NS_TraceMallocDumpAllocations(sdlogname); |
michael@0 | 1527 | |
michael@0 | 1528 | if (tmstats.backtrace_failures) { |
michael@0 | 1529 | fprintf(stderr, |
michael@0 | 1530 | "TraceMalloc backtrace failures: %lu (malloc %lu dladdr %lu)\n", |
michael@0 | 1531 | (unsigned long) tmstats.backtrace_failures, |
michael@0 | 1532 | (unsigned long) tmstats.btmalloc_failures, |
michael@0 | 1533 | (unsigned long) tmstats.dladdr_failures); |
michael@0 | 1534 | } |
michael@0 | 1535 | while ((fp = logfile_list) != NULL) { |
michael@0 | 1536 | logfile_list = fp->next; |
michael@0 | 1537 | log_tmstats(fp); |
michael@0 | 1538 | flush_logfile(fp); |
michael@0 | 1539 | if (fp->fd >= 0) { |
michael@0 | 1540 | MozillaUnRegisterDebugFD(fp->fd); |
michael@0 | 1541 | close(fp->fd); |
michael@0 | 1542 | fp->fd = -1; |
michael@0 | 1543 | } |
michael@0 | 1544 | if (fp != &default_logfile) { |
michael@0 | 1545 | if (fp == logfp) |
michael@0 | 1546 | logfp = &default_logfile; |
michael@0 | 1547 | free((void*) fp); |
michael@0 | 1548 | } |
michael@0 | 1549 | } |
michael@0 | 1550 | if (tmlock) { |
michael@0 | 1551 | PRLock *lock = tmlock; |
michael@0 | 1552 | tmlock = NULL; |
michael@0 | 1553 | PR_DestroyLock(lock); |
michael@0 | 1554 | } |
michael@0 | 1555 | if (tracing_enabled) { |
michael@0 | 1556 | tracing_enabled = 0; |
michael@0 | 1557 | ShutdownHooker(); |
michael@0 | 1558 | } |
michael@0 | 1559 | } |
michael@0 | 1560 | |
michael@0 | 1561 | PR_IMPLEMENT(void) |
michael@0 | 1562 | NS_TraceMallocDisable(void) |
michael@0 | 1563 | { |
michael@0 | 1564 | tm_thread *t = tm_get_thread(); |
michael@0 | 1565 | logfile *fp; |
michael@0 | 1566 | uint32_t sample; |
michael@0 | 1567 | |
michael@0 | 1568 | /* Robustify in case of duplicate call. */ |
michael@0 | 1569 | PR_ASSERT(tracing_enabled); |
michael@0 | 1570 | if (tracing_enabled == 0) |
michael@0 | 1571 | return; |
michael@0 | 1572 | |
michael@0 | 1573 | TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
michael@0 | 1574 | for (fp = logfile_list; fp; fp = fp->next) |
michael@0 | 1575 | flush_logfile(fp); |
michael@0 | 1576 | sample = --tracing_enabled; |
michael@0 | 1577 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 1578 | if (sample == 0) |
michael@0 | 1579 | ShutdownHooker(); |
michael@0 | 1580 | } |
michael@0 | 1581 | |
michael@0 | 1582 | PR_IMPLEMENT(void) |
michael@0 | 1583 | NS_TraceMallocEnable(void) |
michael@0 | 1584 | { |
michael@0 | 1585 | tm_thread *t = tm_get_thread(); |
michael@0 | 1586 | uint32_t sample; |
michael@0 | 1587 | |
michael@0 | 1588 | TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
michael@0 | 1589 | sample = ++tracing_enabled; |
michael@0 | 1590 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 1591 | if (sample == 1) |
michael@0 | 1592 | StartupHooker(); |
michael@0 | 1593 | } |
michael@0 | 1594 | |
michael@0 | 1595 | PR_IMPLEMENT(int) |
michael@0 | 1596 | NS_TraceMallocChangeLogFD(int fd) |
michael@0 | 1597 | { |
michael@0 | 1598 | logfile *oldfp, *fp; |
michael@0 | 1599 | struct stat sb; |
michael@0 | 1600 | tm_thread *t = tm_get_thread(); |
michael@0 | 1601 | |
michael@0 | 1602 | TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
michael@0 | 1603 | oldfp = logfp; |
michael@0 | 1604 | if (oldfp->fd != fd) { |
michael@0 | 1605 | flush_logfile(oldfp); |
michael@0 | 1606 | fp = get_logfile(fd); |
michael@0 | 1607 | if (!fp) { |
michael@0 | 1608 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 1609 | return -2; |
michael@0 | 1610 | } |
michael@0 | 1611 | if (fd >= 0 && fstat(fd, &sb) == 0 && sb.st_size == 0) |
michael@0 | 1612 | log_header(fd); |
michael@0 | 1613 | logfp = fp; |
michael@0 | 1614 | } |
michael@0 | 1615 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 1616 | return oldfp->fd; |
michael@0 | 1617 | } |
michael@0 | 1618 | |
michael@0 | 1619 | static int |
michael@0 | 1620 | lfd_clr_enumerator(PLHashEntry *he, int i, void *arg) |
michael@0 | 1621 | { |
michael@0 | 1622 | lfdset_entry *le = (lfdset_entry*) he; |
michael@0 | 1623 | logfile *fp = (logfile*) arg; |
michael@0 | 1624 | |
michael@0 | 1625 | LFD_CLR(fp->lfd, &le->lfdset); |
michael@0 | 1626 | return HT_ENUMERATE_NEXT; |
michael@0 | 1627 | } |
michael@0 | 1628 | |
michael@0 | 1629 | static void |
michael@0 | 1630 | lfd_clr_walk(callsite *site, logfile *fp) |
michael@0 | 1631 | { |
michael@0 | 1632 | callsite *kid; |
michael@0 | 1633 | |
michael@0 | 1634 | LFD_CLR(fp->lfd, &site->lfdset); |
michael@0 | 1635 | for (kid = site->kids; kid; kid = kid->siblings) |
michael@0 | 1636 | lfd_clr_walk(kid, fp); |
michael@0 | 1637 | } |
michael@0 | 1638 | |
michael@0 | 1639 | PR_IMPLEMENT(void) |
michael@0 | 1640 | NS_TraceMallocCloseLogFD(int fd) |
michael@0 | 1641 | { |
michael@0 | 1642 | logfile *fp; |
michael@0 | 1643 | tm_thread *t = tm_get_thread(); |
michael@0 | 1644 | |
michael@0 | 1645 | TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
michael@0 | 1646 | |
michael@0 | 1647 | fp = get_logfile(fd); |
michael@0 | 1648 | if (fp) { |
michael@0 | 1649 | flush_logfile(fp); |
michael@0 | 1650 | if (fp == &default_logfile) { |
michael@0 | 1651 | /* Leave default_logfile in logfile_list with an fd of -1. */ |
michael@0 | 1652 | fp->fd = -1; |
michael@0 | 1653 | |
michael@0 | 1654 | /* NB: we can never free lfd 0, it belongs to default_logfile. */ |
michael@0 | 1655 | PR_ASSERT(fp->lfd == 0); |
michael@0 | 1656 | } else { |
michael@0 | 1657 | /* Clear fp->lfd in all possible lfdsets. */ |
michael@0 | 1658 | PL_HashTableEnumerateEntries(libraries, lfd_clr_enumerator, fp); |
michael@0 | 1659 | PL_HashTableEnumerateEntries(methods, lfd_clr_enumerator, fp); |
michael@0 | 1660 | lfd_clr_walk(&calltree_root, fp); |
michael@0 | 1661 | |
michael@0 | 1662 | /* Unlink fp from logfile_list, freeing lfd for reallocation. */ |
michael@0 | 1663 | *fp->prevp = fp->next; |
michael@0 | 1664 | if (!fp->next) { |
michael@0 | 1665 | PR_ASSERT(logfile_tail == &fp->next); |
michael@0 | 1666 | logfile_tail = fp->prevp; |
michael@0 | 1667 | } |
michael@0 | 1668 | |
michael@0 | 1669 | /* Reset logfp if we must, then free fp. */ |
michael@0 | 1670 | if (fp == logfp) |
michael@0 | 1671 | logfp = &default_logfile; |
michael@0 | 1672 | free((void*) fp); |
michael@0 | 1673 | } |
michael@0 | 1674 | } |
michael@0 | 1675 | |
michael@0 | 1676 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 1677 | MozillaUnRegisterDebugFD(fd); |
michael@0 | 1678 | close(fd); |
michael@0 | 1679 | } |
michael@0 | 1680 | |
michael@0 | 1681 | PR_IMPLEMENT(void) |
michael@0 | 1682 | NS_TraceMallocLogTimestamp(const char *caption) |
michael@0 | 1683 | { |
michael@0 | 1684 | logfile *fp; |
michael@0 | 1685 | #ifdef XP_UNIX |
michael@0 | 1686 | struct timeval tv; |
michael@0 | 1687 | #endif |
michael@0 | 1688 | #ifdef XP_WIN32 |
michael@0 | 1689 | struct _timeb tb; |
michael@0 | 1690 | #endif |
michael@0 | 1691 | tm_thread *t = tm_get_thread(); |
michael@0 | 1692 | |
michael@0 | 1693 | TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
michael@0 | 1694 | |
michael@0 | 1695 | fp = logfp; |
michael@0 | 1696 | log_byte(fp, TM_EVENT_TIMESTAMP); |
michael@0 | 1697 | |
michael@0 | 1698 | #ifdef XP_UNIX |
michael@0 | 1699 | gettimeofday(&tv, NULL); |
michael@0 | 1700 | log_uint32(fp, (uint32_t) tv.tv_sec); |
michael@0 | 1701 | log_uint32(fp, (uint32_t) tv.tv_usec); |
michael@0 | 1702 | #endif |
michael@0 | 1703 | #ifdef XP_WIN32 |
michael@0 | 1704 | _ftime(&tb); |
michael@0 | 1705 | log_uint32(fp, (uint32_t) tb.time); |
michael@0 | 1706 | log_uint32(fp, (uint32_t) tb.millitm); |
michael@0 | 1707 | #endif |
michael@0 | 1708 | log_string(fp, caption); |
michael@0 | 1709 | |
michael@0 | 1710 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 1711 | } |
michael@0 | 1712 | |
michael@0 | 1713 | static void |
michael@0 | 1714 | print_stack(FILE *ofp, callsite *site) |
michael@0 | 1715 | { |
michael@0 | 1716 | while (site) { |
michael@0 | 1717 | if (site->name || site->parent) { |
michael@0 | 1718 | fprintf(ofp, "%s[%s +0x%X]\n", |
michael@0 | 1719 | site->name, site->library, site->offset); |
michael@0 | 1720 | } |
michael@0 | 1721 | site = site->parent; |
michael@0 | 1722 | } |
michael@0 | 1723 | } |
michael@0 | 1724 | |
michael@0 | 1725 | static const char *allocation_format = |
michael@0 | 1726 | (sizeof(void*) == 4) ? "\t0x%08zX\n" : |
michael@0 | 1727 | (sizeof(void*) == 8) ? "\t0x%016zX\n" : |
michael@0 | 1728 | "UNEXPECTED sizeof(void*)"; |
michael@0 | 1729 | |
michael@0 | 1730 | static int |
michael@0 | 1731 | allocation_enumerator(PLHashEntry *he, int i, void *arg) |
michael@0 | 1732 | { |
michael@0 | 1733 | allocation *alloc = (allocation*) he; |
michael@0 | 1734 | FILE *ofp = (FILE*) arg; |
michael@0 | 1735 | callsite *site = (callsite*) he->value; |
michael@0 | 1736 | |
michael@0 | 1737 | size_t *p, *end; |
michael@0 | 1738 | |
michael@0 | 1739 | fprintf(ofp, "%p <%s> (%lu)\n", |
michael@0 | 1740 | he->key, |
michael@0 | 1741 | nsGetTypeName(he->key), |
michael@0 | 1742 | (unsigned long) alloc->size); |
michael@0 | 1743 | |
michael@0 | 1744 | for (p = (size_t*) he->key, |
michael@0 | 1745 | end = (size_t*) ((char*)he->key + alloc->size); |
michael@0 | 1746 | p < end; ++p) { |
michael@0 | 1747 | fprintf(ofp, allocation_format, *p); |
michael@0 | 1748 | } |
michael@0 | 1749 | |
michael@0 | 1750 | print_stack(ofp, site); |
michael@0 | 1751 | fputc('\n', ofp); |
michael@0 | 1752 | return HT_ENUMERATE_NEXT; |
michael@0 | 1753 | } |
michael@0 | 1754 | |
michael@0 | 1755 | PR_IMPLEMENT(void) |
michael@0 | 1756 | NS_TraceStack(int skip, FILE *ofp) |
michael@0 | 1757 | { |
michael@0 | 1758 | callsite *site; |
michael@0 | 1759 | tm_thread *t = tm_get_thread(); |
michael@0 | 1760 | int immediate_abort; |
michael@0 | 1761 | |
michael@0 | 1762 | site = backtrace(t, skip + 1, &immediate_abort); |
michael@0 | 1763 | while (site) { |
michael@0 | 1764 | if (site->name || site->parent) { |
michael@0 | 1765 | fprintf(ofp, "%s[%s +0x%X]\n", |
michael@0 | 1766 | site->name, site->library, site->offset); |
michael@0 | 1767 | } |
michael@0 | 1768 | site = site->parent; |
michael@0 | 1769 | } |
michael@0 | 1770 | } |
michael@0 | 1771 | |
michael@0 | 1772 | PR_IMPLEMENT(int) |
michael@0 | 1773 | NS_TraceMallocDumpAllocations(const char *pathname) |
michael@0 | 1774 | { |
michael@0 | 1775 | FILE *ofp; |
michael@0 | 1776 | int rv; |
michael@0 | 1777 | int fd; |
michael@0 | 1778 | |
michael@0 | 1779 | tm_thread *t = tm_get_thread(); |
michael@0 | 1780 | |
michael@0 | 1781 | TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
michael@0 | 1782 | |
michael@0 | 1783 | ofp = fopen(pathname, WRITE_FLAGS); |
michael@0 | 1784 | if (ofp) { |
michael@0 | 1785 | MozillaRegisterDebugFD(fileno(ofp)); |
michael@0 | 1786 | if (allocations) { |
michael@0 | 1787 | PL_HashTableEnumerateEntries(allocations, allocation_enumerator, |
michael@0 | 1788 | ofp); |
michael@0 | 1789 | } |
michael@0 | 1790 | rv = ferror(ofp) ? -1 : 0; |
michael@0 | 1791 | MozillaUnRegisterDebugFILE(ofp); |
michael@0 | 1792 | fclose(ofp); |
michael@0 | 1793 | } else { |
michael@0 | 1794 | rv = -1; |
michael@0 | 1795 | } |
michael@0 | 1796 | |
michael@0 | 1797 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 1798 | |
michael@0 | 1799 | return rv; |
michael@0 | 1800 | } |
michael@0 | 1801 | |
michael@0 | 1802 | PR_IMPLEMENT(void) |
michael@0 | 1803 | NS_TraceMallocFlushLogfiles(void) |
michael@0 | 1804 | { |
michael@0 | 1805 | logfile *fp; |
michael@0 | 1806 | tm_thread *t = tm_get_thread(); |
michael@0 | 1807 | |
michael@0 | 1808 | TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
michael@0 | 1809 | |
michael@0 | 1810 | for (fp = logfile_list; fp; fp = fp->next) |
michael@0 | 1811 | flush_logfile(fp); |
michael@0 | 1812 | |
michael@0 | 1813 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 1814 | } |
michael@0 | 1815 | |
michael@0 | 1816 | PR_IMPLEMENT(void) |
michael@0 | 1817 | NS_TrackAllocation(void* ptr, FILE *ofp) |
michael@0 | 1818 | { |
michael@0 | 1819 | allocation *alloc; |
michael@0 | 1820 | tm_thread *t = tm_get_thread(); |
michael@0 | 1821 | |
michael@0 | 1822 | fprintf(ofp, "Trying to track %p\n", (void*) ptr); |
michael@0 | 1823 | setlinebuf(ofp); |
michael@0 | 1824 | |
michael@0 | 1825 | TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
michael@0 | 1826 | if (get_allocations()) { |
michael@0 | 1827 | alloc = (allocation*) |
michael@0 | 1828 | *PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr); |
michael@0 | 1829 | if (alloc) { |
michael@0 | 1830 | fprintf(ofp, "Tracking %p\n", (void*) ptr); |
michael@0 | 1831 | alloc->trackfp = ofp; |
michael@0 | 1832 | } else { |
michael@0 | 1833 | fprintf(ofp, "Not tracking %p\n", (void*) ptr); |
michael@0 | 1834 | } |
michael@0 | 1835 | } |
michael@0 | 1836 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 1837 | } |
michael@0 | 1838 | |
michael@0 | 1839 | PR_IMPLEMENT(void) |
michael@0 | 1840 | MallocCallback(void *ptr, size_t size, uint32_t start, uint32_t end, tm_thread *t) |
michael@0 | 1841 | { |
michael@0 | 1842 | callsite *site; |
michael@0 | 1843 | PLHashEntry *he; |
michael@0 | 1844 | allocation *alloc; |
michael@0 | 1845 | int immediate_abort; |
michael@0 | 1846 | |
michael@0 | 1847 | if (!tracing_enabled || t->suppress_tracing != 0) |
michael@0 | 1848 | return; |
michael@0 | 1849 | |
michael@0 | 1850 | site = backtrace(t, 2, &immediate_abort); |
michael@0 | 1851 | if (immediate_abort) |
michael@0 | 1852 | return; |
michael@0 | 1853 | |
michael@0 | 1854 | TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
michael@0 | 1855 | tmstats.malloc_calls++; |
michael@0 | 1856 | if (!ptr) { |
michael@0 | 1857 | tmstats.malloc_failures++; |
michael@0 | 1858 | } else { |
michael@0 | 1859 | if (site) { |
michael@0 | 1860 | log_event5(logfp, TM_EVENT_MALLOC, |
michael@0 | 1861 | site->serial, start, end - start, |
michael@0 | 1862 | (uint32_t)NS_PTR_TO_INT32(ptr), size); |
michael@0 | 1863 | } |
michael@0 | 1864 | if (get_allocations()) { |
michael@0 | 1865 | he = PL_HashTableAdd(allocations, ptr, site); |
michael@0 | 1866 | if (he) { |
michael@0 | 1867 | alloc = (allocation*) he; |
michael@0 | 1868 | alloc->size = size; |
michael@0 | 1869 | alloc->trackfp = NULL; |
michael@0 | 1870 | } |
michael@0 | 1871 | } |
michael@0 | 1872 | } |
michael@0 | 1873 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 1874 | } |
michael@0 | 1875 | |
michael@0 | 1876 | PR_IMPLEMENT(void) |
michael@0 | 1877 | CallocCallback(void *ptr, size_t count, size_t size, uint32_t start, uint32_t end, tm_thread *t) |
michael@0 | 1878 | { |
michael@0 | 1879 | callsite *site; |
michael@0 | 1880 | PLHashEntry *he; |
michael@0 | 1881 | allocation *alloc; |
michael@0 | 1882 | int immediate_abort; |
michael@0 | 1883 | |
michael@0 | 1884 | if (!tracing_enabled || t->suppress_tracing != 0) |
michael@0 | 1885 | return; |
michael@0 | 1886 | |
michael@0 | 1887 | site = backtrace(t, 2, &immediate_abort); |
michael@0 | 1888 | if (immediate_abort) |
michael@0 | 1889 | return; |
michael@0 | 1890 | |
michael@0 | 1891 | TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
michael@0 | 1892 | tmstats.calloc_calls++; |
michael@0 | 1893 | if (!ptr) { |
michael@0 | 1894 | tmstats.calloc_failures++; |
michael@0 | 1895 | } else { |
michael@0 | 1896 | size *= count; |
michael@0 | 1897 | if (site) { |
michael@0 | 1898 | log_event5(logfp, TM_EVENT_CALLOC, |
michael@0 | 1899 | site->serial, start, end - start, |
michael@0 | 1900 | (uint32_t)NS_PTR_TO_INT32(ptr), size); |
michael@0 | 1901 | } |
michael@0 | 1902 | if (get_allocations()) { |
michael@0 | 1903 | he = PL_HashTableAdd(allocations, ptr, site); |
michael@0 | 1904 | if (he) { |
michael@0 | 1905 | alloc = (allocation*) he; |
michael@0 | 1906 | alloc->size = size; |
michael@0 | 1907 | alloc->trackfp = NULL; |
michael@0 | 1908 | } |
michael@0 | 1909 | } |
michael@0 | 1910 | } |
michael@0 | 1911 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 1912 | } |
michael@0 | 1913 | |
michael@0 | 1914 | PR_IMPLEMENT(void) |
michael@0 | 1915 | ReallocCallback(void * oldptr, void *ptr, size_t size, |
michael@0 | 1916 | uint32_t start, uint32_t end, tm_thread *t) |
michael@0 | 1917 | { |
michael@0 | 1918 | callsite *oldsite, *site; |
michael@0 | 1919 | size_t oldsize; |
michael@0 | 1920 | PLHashNumber hash; |
michael@0 | 1921 | PLHashEntry **hep, *he; |
michael@0 | 1922 | allocation *alloc; |
michael@0 | 1923 | FILE *trackfp = NULL; |
michael@0 | 1924 | int immediate_abort; |
michael@0 | 1925 | |
michael@0 | 1926 | if (!tracing_enabled || t->suppress_tracing != 0) |
michael@0 | 1927 | return; |
michael@0 | 1928 | |
michael@0 | 1929 | site = backtrace(t, 2, &immediate_abort); |
michael@0 | 1930 | if (immediate_abort) |
michael@0 | 1931 | return; |
michael@0 | 1932 | |
michael@0 | 1933 | TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
michael@0 | 1934 | tmstats.realloc_calls++; |
michael@0 | 1935 | oldsite = NULL; |
michael@0 | 1936 | oldsize = 0; |
michael@0 | 1937 | hep = NULL; |
michael@0 | 1938 | he = NULL; |
michael@0 | 1939 | if (oldptr && get_allocations()) { |
michael@0 | 1940 | hash = hash_pointer(oldptr); |
michael@0 | 1941 | hep = PL_HashTableRawLookup(allocations, hash, oldptr); |
michael@0 | 1942 | he = *hep; |
michael@0 | 1943 | if (he) { |
michael@0 | 1944 | oldsite = (callsite*) he->value; |
michael@0 | 1945 | alloc = (allocation*) he; |
michael@0 | 1946 | oldsize = alloc->size; |
michael@0 | 1947 | trackfp = alloc->trackfp; |
michael@0 | 1948 | if (trackfp) { |
michael@0 | 1949 | fprintf(alloc->trackfp, |
michael@0 | 1950 | "\nrealloc(%p, %lu), oldsize %lu, alloc site %p\n", |
michael@0 | 1951 | (void*) ptr, (unsigned long) size, |
michael@0 | 1952 | (unsigned long) oldsize, (void*) oldsite); |
michael@0 | 1953 | NS_TraceStack(1, trackfp); |
michael@0 | 1954 | } |
michael@0 | 1955 | } |
michael@0 | 1956 | } |
michael@0 | 1957 | if (!ptr && size) { |
michael@0 | 1958 | /* |
michael@0 | 1959 | * When realloc() fails, the original block is not freed or moved, so |
michael@0 | 1960 | * we'll leave the allocation entry untouched. |
michael@0 | 1961 | */ |
michael@0 | 1962 | tmstats.realloc_failures++; |
michael@0 | 1963 | } else { |
michael@0 | 1964 | if (site) { |
michael@0 | 1965 | log_event8(logfp, TM_EVENT_REALLOC, |
michael@0 | 1966 | site->serial, start, end - start, |
michael@0 | 1967 | (uint32_t)NS_PTR_TO_INT32(ptr), size, |
michael@0 | 1968 | oldsite ? oldsite->serial : 0, |
michael@0 | 1969 | (uint32_t)NS_PTR_TO_INT32(oldptr), oldsize); |
michael@0 | 1970 | } |
michael@0 | 1971 | if (ptr && allocations) { |
michael@0 | 1972 | if (ptr != oldptr) { |
michael@0 | 1973 | /* |
michael@0 | 1974 | * If we're reallocating (not allocating new space by passing |
michael@0 | 1975 | * null to realloc) and realloc moved the block, free oldptr. |
michael@0 | 1976 | */ |
michael@0 | 1977 | if (he) |
michael@0 | 1978 | PL_HashTableRawRemove(allocations, hep, he); |
michael@0 | 1979 | |
michael@0 | 1980 | /* Record the new allocation now, setting he. */ |
michael@0 | 1981 | he = PL_HashTableAdd(allocations, ptr, site); |
michael@0 | 1982 | } else { |
michael@0 | 1983 | /* |
michael@0 | 1984 | * If we haven't yet recorded an allocation (possibly due to a |
michael@0 | 1985 | * temporary memory shortage), do it now. |
michael@0 | 1986 | */ |
michael@0 | 1987 | if (!he) |
michael@0 | 1988 | he = PL_HashTableAdd(allocations, ptr, site); |
michael@0 | 1989 | } |
michael@0 | 1990 | if (he) { |
michael@0 | 1991 | alloc = (allocation*) he; |
michael@0 | 1992 | alloc->size = size; |
michael@0 | 1993 | alloc->trackfp = trackfp; |
michael@0 | 1994 | } |
michael@0 | 1995 | } |
michael@0 | 1996 | } |
michael@0 | 1997 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 1998 | } |
michael@0 | 1999 | |
michael@0 | 2000 | PR_IMPLEMENT(void) |
michael@0 | 2001 | FreeCallback(void * ptr, uint32_t start, uint32_t end, tm_thread *t) |
michael@0 | 2002 | { |
michael@0 | 2003 | PLHashEntry **hep, *he; |
michael@0 | 2004 | callsite *site; |
michael@0 | 2005 | allocation *alloc; |
michael@0 | 2006 | |
michael@0 | 2007 | if (!tracing_enabled || t->suppress_tracing != 0) |
michael@0 | 2008 | return; |
michael@0 | 2009 | |
michael@0 | 2010 | /* |
michael@0 | 2011 | * FIXME: Perhaps we should call backtrace() so we can check for |
michael@0 | 2012 | * immediate_abort. However, the only current contexts where |
michael@0 | 2013 | * immediate_abort will be true do not call free(), so for now, |
michael@0 | 2014 | * let's avoid the cost of backtrace(). See bug 478195. |
michael@0 | 2015 | */ |
michael@0 | 2016 | TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
michael@0 | 2017 | tmstats.free_calls++; |
michael@0 | 2018 | if (!ptr) { |
michael@0 | 2019 | tmstats.null_free_calls++; |
michael@0 | 2020 | } else { |
michael@0 | 2021 | if (get_allocations()) { |
michael@0 | 2022 | hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr); |
michael@0 | 2023 | he = *hep; |
michael@0 | 2024 | if (he) { |
michael@0 | 2025 | site = (callsite*) he->value; |
michael@0 | 2026 | if (site) { |
michael@0 | 2027 | alloc = (allocation*) he; |
michael@0 | 2028 | if (alloc->trackfp) { |
michael@0 | 2029 | fprintf(alloc->trackfp, "\nfree(%p), alloc site %p\n", |
michael@0 | 2030 | (void*) ptr, (void*) site); |
michael@0 | 2031 | NS_TraceStack(1, alloc->trackfp); |
michael@0 | 2032 | } |
michael@0 | 2033 | log_event5(logfp, TM_EVENT_FREE, |
michael@0 | 2034 | site->serial, start, end - start, |
michael@0 | 2035 | (uint32_t)NS_PTR_TO_INT32(ptr), alloc->size); |
michael@0 | 2036 | } |
michael@0 | 2037 | PL_HashTableRawRemove(allocations, hep, he); |
michael@0 | 2038 | } |
michael@0 | 2039 | } |
michael@0 | 2040 | } |
michael@0 | 2041 | TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
michael@0 | 2042 | } |
michael@0 | 2043 | |
michael@0 | 2044 | PR_IMPLEMENT(nsTMStackTraceID) |
michael@0 | 2045 | NS_TraceMallocGetStackTrace(void) |
michael@0 | 2046 | { |
michael@0 | 2047 | callsite *site; |
michael@0 | 2048 | int dummy; |
michael@0 | 2049 | tm_thread *t = tm_get_thread(); |
michael@0 | 2050 | |
michael@0 | 2051 | PR_ASSERT(t->suppress_tracing == 0); |
michael@0 | 2052 | |
michael@0 | 2053 | site = backtrace(t, 2, &dummy); |
michael@0 | 2054 | return (nsTMStackTraceID) site; |
michael@0 | 2055 | } |
michael@0 | 2056 | |
michael@0 | 2057 | PR_IMPLEMENT(void) |
michael@0 | 2058 | NS_TraceMallocPrintStackTrace(FILE *ofp, nsTMStackTraceID id) |
michael@0 | 2059 | { |
michael@0 | 2060 | print_stack(ofp, (callsite *)id); |
michael@0 | 2061 | } |
michael@0 | 2062 | |
michael@0 | 2063 | #endif /* NS_TRACE_MALLOC */ |