tools/trace-malloc/lib/nsTraceMalloc.c

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/tools/trace-malloc/lib/nsTraceMalloc.c	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,2063 @@
     1.4 +/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim:cindent:ts=8:et:sw=4:
     1.6 + *
     1.7 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.8 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.9 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
    1.10 +#ifdef NS_TRACE_MALLOC
    1.11 + /*
    1.12 +  * TODO:
    1.13 +  * - FIXME https://bugzilla.mozilla.org/show_bug.cgi?id=392008
    1.14 +  * - extend logfile so 'F' record tells free stack
    1.15 +  */
    1.16 +#include <errno.h>
    1.17 +#include <fcntl.h>
    1.18 +#include <stdio.h>
    1.19 +#include <string.h>
    1.20 +#ifdef XP_UNIX
    1.21 +#include <unistd.h>
    1.22 +#include <sys/stat.h>
    1.23 +#include <sys/time.h>
    1.24 +#endif
    1.25 +#include "plhash.h"
    1.26 +#include "pratom.h"
    1.27 +#include "prlog.h"
    1.28 +#include "prlock.h"
    1.29 +#include "prmon.h"
    1.30 +#include "prprf.h"
    1.31 +#include "prenv.h"
    1.32 +#include "prnetdb.h"
    1.33 +#include "nsTraceMalloc.h"
    1.34 +#include "nscore.h"
    1.35 +#include "prinit.h"
    1.36 +#include "prthread.h"
    1.37 +#include "plstr.h"
    1.38 +#include "nsStackWalk.h"
    1.39 +#include "nsTraceMallocCallbacks.h"
    1.40 +#include "nsTypeInfo.h"
    1.41 +#include "mozilla/PoisonIOInterposer.h"
    1.42 +
    1.43 +#if defined(XP_MACOSX)
    1.44 +
    1.45 +#include <malloc/malloc.h>
    1.46 +
    1.47 +#define WRITE_FLAGS "w"
    1.48 +
    1.49 +#define __libc_malloc(x)                malloc(x)
    1.50 +#define __libc_realloc(x, y)            realloc(x, y)
    1.51 +#define __libc_free(x)                  free(x)
    1.52 +
    1.53 +#elif defined(XP_UNIX)
    1.54 +
    1.55 +#include <malloc.h>
    1.56 +
    1.57 +#define WRITE_FLAGS "w"
    1.58 +
    1.59 +#ifdef WRAP_SYSTEM_INCLUDES
    1.60 +#pragma GCC visibility push(default)
    1.61 +#endif
    1.62 +extern __ptr_t __libc_malloc(size_t);
    1.63 +extern __ptr_t __libc_calloc(size_t, size_t);
    1.64 +extern __ptr_t __libc_realloc(__ptr_t, size_t);
    1.65 +extern void    __libc_free(__ptr_t);
    1.66 +extern __ptr_t __libc_memalign(size_t, size_t);
    1.67 +extern __ptr_t __libc_valloc(size_t);
    1.68 +#ifdef WRAP_SYSTEM_INCLUDES
    1.69 +#pragma GCC visibility pop
    1.70 +#endif
    1.71 +
    1.72 +#elif defined(XP_WIN32)
    1.73 +
    1.74 +#include <sys/timeb.h>                  /* for timeb */
    1.75 +#include <sys/stat.h>                   /* for fstat */
    1.76 +
    1.77 +#include <io.h> /*for write*/
    1.78 +
    1.79 +#define WRITE_FLAGS "w"
    1.80 +
    1.81 +#define __libc_malloc(x)                dhw_orig_malloc(x)
    1.82 +#define __libc_realloc(x, y)            dhw_orig_realloc(x,y)
    1.83 +#define __libc_free(x)                  dhw_orig_free(x)
    1.84 +
    1.85 +#else  /* not XP_MACOSX, XP_UNIX, or XP_WIN32 */
    1.86 +
    1.87 +# error "Unknown build configuration!"
    1.88 +
    1.89 +#endif
    1.90 +
    1.91 +typedef struct logfile logfile;
    1.92 +
    1.93 +#define STARTUP_TMBUFSIZE (64 * 1024)
    1.94 +#define LOGFILE_TMBUFSIZE (16 * 1024)
    1.95 +
    1.96 +struct logfile {
    1.97 +    int         fd;
    1.98 +    int         lfd;            /* logical fd, dense among all logfiles */
    1.99 +    char        *buf;
   1.100 +    int         bufsize;
   1.101 +    int         pos;
   1.102 +    uint32_t    size;
   1.103 +    uint32_t    simsize;
   1.104 +    logfile     *next;
   1.105 +    logfile     **prevp;
   1.106 +};
   1.107 +
   1.108 +static char      default_buf[STARTUP_TMBUFSIZE];
   1.109 +static logfile   default_logfile =
   1.110 +                   {-1, 0, default_buf, STARTUP_TMBUFSIZE, 0, 0, 0, NULL, NULL};
   1.111 +static logfile   *logfile_list = NULL;
   1.112 +static logfile   **logfile_tail = &logfile_list;
   1.113 +static logfile   *logfp = &default_logfile;
   1.114 +static PRLock    *tmlock = NULL;
   1.115 +#ifndef PATH_MAX
   1.116 +#define PATH_MAX 4096
   1.117 +#endif
   1.118 +static char      sdlogname[PATH_MAX] = ""; /* filename for shutdown leak log */
   1.119 +
   1.120 +/*
   1.121 + * This enables/disables trace-malloc logging.
   1.122 + *
   1.123 + * It is separate from suppress_tracing so that we do not have to pay
   1.124 + * the performance cost of repeated TM_TLS_GET_DATA calls when
   1.125 + * trace-malloc is disabled (which is not as bad as the locking we used
   1.126 + * to have).
   1.127 + *
   1.128 + * It must default to zero, since it can be tested by the Linux malloc
   1.129 + * hooks before NS_TraceMallocStartup sets it.
   1.130 + */
   1.131 +static uint32_t tracing_enabled = 0;
   1.132 +
   1.133 +/*
   1.134 + * Control whether we should log stacks
   1.135 + */
   1.136 +static uint32_t stacks_enabled = 1;
   1.137 +
   1.138 +/*
   1.139 + * This lock must be held while manipulating the calltree, the
   1.140 + * allocations table, the log, or the tmstats.
   1.141 + *
   1.142 + * Callers should not *enter* the lock without checking suppress_tracing
   1.143 + * first; otherwise they risk trying to re-enter on the same thread.
   1.144 + */
   1.145 +#define TM_ENTER_LOCK(t)                                                      \
   1.146 +    PR_BEGIN_MACRO                                                            \
   1.147 +        PR_ASSERT(t->suppress_tracing != 0);                                  \
   1.148 +        if (tmlock)                                                           \
   1.149 +            PR_Lock(tmlock);                                                  \
   1.150 +    PR_END_MACRO
   1.151 +
   1.152 +#define TM_EXIT_LOCK(t)                                                       \
   1.153 +    PR_BEGIN_MACRO                                                            \
   1.154 +        PR_ASSERT(t->suppress_tracing != 0);                                  \
   1.155 +        if (tmlock)                                                           \
   1.156 +            PR_Unlock(tmlock);                                                \
   1.157 +    PR_END_MACRO
   1.158 +
   1.159 +#define TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t)                                 \
   1.160 +    PR_BEGIN_MACRO                                                            \
   1.161 +        t->suppress_tracing++;                                                \
   1.162 +        TM_ENTER_LOCK(t);                                                     \
   1.163 +    PR_END_MACRO
   1.164 +
   1.165 +#define TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t)                                \
   1.166 +    PR_BEGIN_MACRO                                                            \
   1.167 +        TM_EXIT_LOCK(t);                                                      \
   1.168 +        t->suppress_tracing--;                                                \
   1.169 +    PR_END_MACRO
   1.170 +
   1.171 +
   1.172 +/*
   1.173 + * Thread-local storage.
   1.174 + *
   1.175 + * We can't use NSPR thread-local storage for this because it mallocs
   1.176 + * within PR_GetThreadPrivate (the first time) and PR_SetThreadPrivate
   1.177 + * (which can be worked around by protecting all uses of those functions
   1.178 + * with a monitor, ugh) and because it calls malloc/free when the
   1.179 + * thread-local storage is in an inconsistent state within
   1.180 + * PR_SetThreadPrivate (when expanding the thread-local storage array)
   1.181 + * and _PRI_DetachThread (when and after deleting the thread-local
   1.182 + * storage array).
   1.183 + */
   1.184 +
   1.185 +#ifdef XP_WIN32
   1.186 +
   1.187 +#include <windows.h>
   1.188 +
   1.189 +#define TM_TLS_INDEX_TYPE               DWORD
   1.190 +#define TM_CREATE_TLS_INDEX(i_)         PR_BEGIN_MACRO                        \
   1.191 +                                          (i_) = TlsAlloc();                  \
   1.192 +                                        PR_END_MACRO
   1.193 +#define TM_DESTROY_TLS_INDEX(i_)        TlsFree((i_))
   1.194 +#define TM_GET_TLS_DATA(i_)             TlsGetValue((i_))
   1.195 +#define TM_SET_TLS_DATA(i_, v_)         TlsSetValue((i_), (v_))
   1.196 +
   1.197 +#else
   1.198 +
   1.199 +#include <pthread.h>
   1.200 +
   1.201 +#define TM_TLS_INDEX_TYPE               pthread_key_t
   1.202 +#define TM_CREATE_TLS_INDEX(i_)         pthread_key_create(&(i_), NULL)
   1.203 +#define TM_DESTROY_TLS_INDEX(i_)        pthread_key_delete((i_))
   1.204 +#define TM_GET_TLS_DATA(i_)             pthread_getspecific((i_))
   1.205 +#define TM_SET_TLS_DATA(i_, v_)         pthread_setspecific((i_), (v_))
   1.206 +
   1.207 +#endif
   1.208 +
   1.209 +static TM_TLS_INDEX_TYPE tls_index;
   1.210 +static PRBool tls_index_initialized = PR_FALSE;
   1.211 +
   1.212 +/* FIXME (maybe): This is currently unused; we leak the thread-local data. */
   1.213 +#if 0
   1.214 +static void
   1.215 +free_tm_thread(void *priv)
   1.216 +{
   1.217 +    tm_thread *t = (tm_thread*) priv;
   1.218 +
   1.219 +    PR_ASSERT(t->suppress_tracing == 0);
   1.220 +
   1.221 +    if (t->in_heap) {
   1.222 +        t->suppress_tracing = 1;
   1.223 +        if (t->backtrace_buf.buffer)
   1.224 +            __libc_free(t->backtrace_buf.buffer);
   1.225 +
   1.226 +        __libc_free(t);
   1.227 +    }
   1.228 +}
   1.229 +#endif
   1.230 +
   1.231 +tm_thread *
   1.232 +tm_get_thread(void)
   1.233 +{
   1.234 +    tm_thread *t;
   1.235 +    tm_thread stack_tm_thread;
   1.236 +
   1.237 +    if (!tls_index_initialized) {
   1.238 +        /**
   1.239 +         * Assume that the first call to |malloc| will occur before
   1.240 +         * there are multiple threads.  (If that's not the case, we
   1.241 +         * probably need to do the necessary synchronization without
   1.242 +         * using NSPR primitives.  See discussion in
   1.243 +         * https://bugzilla.mozilla.org/show_bug.cgi?id=442192
   1.244 +         */
   1.245 +        TM_CREATE_TLS_INDEX(tls_index);
   1.246 +        tls_index_initialized = PR_TRUE;
   1.247 +    }
   1.248 +
   1.249 +    t = TM_GET_TLS_DATA(tls_index);
   1.250 +
   1.251 +    if (!t) {
   1.252 +        /*
   1.253 +         * First, store a tm_thread on the stack to suppress for the
   1.254 +         * malloc below
   1.255 +         */
   1.256 +        stack_tm_thread.suppress_tracing = 1;
   1.257 +        stack_tm_thread.backtrace_buf.buffer = NULL;
   1.258 +        stack_tm_thread.backtrace_buf.size = 0;
   1.259 +        stack_tm_thread.backtrace_buf.entries = 0;
   1.260 +        TM_SET_TLS_DATA(tls_index, &stack_tm_thread);
   1.261 +
   1.262 +        t = (tm_thread*) __libc_malloc(sizeof(tm_thread));
   1.263 +        t->suppress_tracing = 0;
   1.264 +        t->backtrace_buf = stack_tm_thread.backtrace_buf;
   1.265 +        TM_SET_TLS_DATA(tls_index, t);
   1.266 +
   1.267 +        PR_ASSERT(stack_tm_thread.suppress_tracing == 1); /* balanced */
   1.268 +    }
   1.269 +
   1.270 +    return t;
   1.271 +}
   1.272 +
   1.273 +/* We don't want more than 32 logfiles open at once, ok? */
   1.274 +typedef uint32_t        lfd_set;
   1.275 +
   1.276 +#define LFD_SET_STATIC_INITIALIZER 0
   1.277 +#define LFD_SET_SIZE    32
   1.278 +
   1.279 +#define LFD_ZERO(s)     (*(s) = 0)
   1.280 +#define LFD_BIT(i)      ((uint32_t)1 << (i))
   1.281 +#define LFD_TEST(i,s)   (LFD_BIT(i) & *(s))
   1.282 +#define LFD_SET(i,s)    (*(s) |= LFD_BIT(i))
   1.283 +#define LFD_CLR(i,s)    (*(s) &= ~LFD_BIT(i))
   1.284 +
   1.285 +static logfile *get_logfile(int fd)
   1.286 +{
   1.287 +    logfile *fp;
   1.288 +    int lfd;
   1.289 +
   1.290 +    for (fp = logfile_list; fp; fp = fp->next) {
   1.291 +        if (fp->fd == fd)
   1.292 +            return fp;
   1.293 +    }
   1.294 +    lfd = 0;
   1.295 +retry:
   1.296 +    for (fp = logfile_list; fp; fp = fp->next) {
   1.297 +        if (fp->fd == lfd) {
   1.298 +            if (++lfd >= LFD_SET_SIZE)
   1.299 +                return NULL;
   1.300 +            goto retry;
   1.301 +        }
   1.302 +    }
   1.303 +    fp = __libc_malloc(sizeof(logfile) + LOGFILE_TMBUFSIZE);
   1.304 +    if (!fp)
   1.305 +        return NULL;
   1.306 +    fp->fd = fd;
   1.307 +    fp->lfd = lfd;
   1.308 +    fp->buf = (char*) (fp + 1);
   1.309 +    fp->bufsize = LOGFILE_TMBUFSIZE;
   1.310 +    fp->pos = 0;
   1.311 +    fp->size = fp->simsize = 0;
   1.312 +    fp->next = NULL;
   1.313 +    fp->prevp = logfile_tail;
   1.314 +    *logfile_tail = fp;
   1.315 +    logfile_tail = &fp->next;
   1.316 +    return fp;
   1.317 +}
   1.318 +
   1.319 +static void flush_logfile(logfile *fp)
   1.320 +{
   1.321 +    int len, cnt, fd;
   1.322 +    char *bp;
   1.323 +
   1.324 +    len = fp->pos;
   1.325 +    if (len == 0)
   1.326 +        return;
   1.327 +    fp->pos = 0;
   1.328 +    fd = fp->fd;
   1.329 +    if (fd >= 0) {
   1.330 +        fp->size += len;
   1.331 +        bp = fp->buf;
   1.332 +        do {
   1.333 +            cnt = write(fd, bp, len);
   1.334 +            if (cnt <= 0) {
   1.335 +                printf("### nsTraceMalloc: write failed or wrote 0 bytes!\n");
   1.336 +                return;
   1.337 +            }
   1.338 +            bp += cnt;
   1.339 +            len -= cnt;
   1.340 +        } while (len > 0);
   1.341 +    }
   1.342 +    fp->simsize += len;
   1.343 +}
   1.344 +
   1.345 +static void log_byte(logfile *fp, char byte)
   1.346 +{
   1.347 +    if (fp->pos == fp->bufsize)
   1.348 +        flush_logfile(fp);
   1.349 +    fp->buf[fp->pos++] = byte;
   1.350 +}
   1.351 +
   1.352 +static void log_string(logfile *fp, const char *str)
   1.353 +{
   1.354 +    int len, rem, cnt;
   1.355 +
   1.356 +    len = strlen(str) + 1; /* include null terminator */
   1.357 +    while ((rem = fp->pos + len - fp->bufsize) > 0) {
   1.358 +        cnt = len - rem;
   1.359 +        memcpy(&fp->buf[fp->pos], str, cnt);
   1.360 +        str += cnt;
   1.361 +        fp->pos += cnt;
   1.362 +        flush_logfile(fp);
   1.363 +        len = rem;
   1.364 +    }
   1.365 +    memcpy(&fp->buf[fp->pos], str, len);
   1.366 +    fp->pos += len;
   1.367 +}
   1.368 +
   1.369 +static void log_filename(logfile* fp, const char* filename)
   1.370 +{
   1.371 +    if (strlen(filename) < 512) {
   1.372 +        char *bp, *cp, buf[512];
   1.373 +
   1.374 +        bp = strstr(strcpy(buf, filename), "mozilla");
   1.375 +        if (!bp)
   1.376 +            bp = buf;
   1.377 +
   1.378 +        for (cp = bp; *cp; cp++) {
   1.379 +            if (*cp == '\\')
   1.380 +                *cp = '/';
   1.381 +        }
   1.382 +
   1.383 +        filename = bp;
   1.384 +    }
   1.385 +    log_string(fp, filename);
   1.386 +}
   1.387 +
   1.388 +static void log_uint32(logfile *fp, uint32_t ival)
   1.389 +{
   1.390 +    if (ival < 0x80) {
   1.391 +        /* 0xxx xxxx */
   1.392 +        log_byte(fp, (char) ival);
   1.393 +    } else if (ival < 0x4000) {
   1.394 +        /* 10xx xxxx xxxx xxxx */
   1.395 +        log_byte(fp, (char) ((ival >> 8) | 0x80));
   1.396 +        log_byte(fp, (char) (ival & 0xff));
   1.397 +    } else if (ival < 0x200000) {
   1.398 +        /* 110x xxxx xxxx xxxx xxxx xxxx */
   1.399 +        log_byte(fp, (char) ((ival >> 16) | 0xc0));
   1.400 +        log_byte(fp, (char) ((ival >> 8) & 0xff));
   1.401 +        log_byte(fp, (char) (ival & 0xff));
   1.402 +    } else if (ival < 0x10000000) {
   1.403 +        /* 1110 xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
   1.404 +        log_byte(fp, (char) ((ival >> 24) | 0xe0));
   1.405 +        log_byte(fp, (char) ((ival >> 16) & 0xff));
   1.406 +        log_byte(fp, (char) ((ival >> 8) & 0xff));
   1.407 +        log_byte(fp, (char) (ival & 0xff));
   1.408 +    } else {
   1.409 +        /* 1111 0000 xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
   1.410 +        log_byte(fp, (char) 0xf0);
   1.411 +        log_byte(fp, (char) ((ival >> 24) & 0xff));
   1.412 +        log_byte(fp, (char) ((ival >> 16) & 0xff));
   1.413 +        log_byte(fp, (char) ((ival >> 8) & 0xff));
   1.414 +        log_byte(fp, (char) (ival & 0xff));
   1.415 +    }
   1.416 +}
   1.417 +
   1.418 +static void log_event1(logfile *fp, char event, uint32_t serial)
   1.419 +{
   1.420 +    log_byte(fp, event);
   1.421 +    log_uint32(fp, (uint32_t) serial);
   1.422 +}
   1.423 +
   1.424 +static void log_event2(logfile *fp, char event, uint32_t serial, size_t size)
   1.425 +{
   1.426 +    log_event1(fp, event, serial);
   1.427 +    log_uint32(fp, (uint32_t) size);
   1.428 +}
   1.429 +
   1.430 +static void log_event3(logfile *fp, char event, uint32_t serial, size_t oldsize,
   1.431 +                       size_t size)
   1.432 +{
   1.433 +    log_event2(fp, event, serial, oldsize);
   1.434 +    log_uint32(fp, (uint32_t) size);
   1.435 +}
   1.436 +
   1.437 +static void log_event4(logfile *fp, char event, uint32_t serial, uint32_t ui2,
   1.438 +                       uint32_t ui3, uint32_t ui4)
   1.439 +{
   1.440 +    log_event3(fp, event, serial, ui2, ui3);
   1.441 +    log_uint32(fp, ui4);
   1.442 +}
   1.443 +
   1.444 +static void log_event5(logfile *fp, char event, uint32_t serial, uint32_t ui2,
   1.445 +                       uint32_t ui3, uint32_t ui4, uint32_t ui5)
   1.446 +{
   1.447 +    log_event4(fp, event, serial, ui2, ui3, ui4);
   1.448 +    log_uint32(fp, ui5);
   1.449 +}
   1.450 +
   1.451 +static void log_event6(logfile *fp, char event, uint32_t serial, uint32_t ui2,
   1.452 +                       uint32_t ui3, uint32_t ui4, uint32_t ui5, uint32_t ui6)
   1.453 +{
   1.454 +    log_event5(fp, event, serial, ui2, ui3, ui4, ui5);
   1.455 +    log_uint32(fp, ui6);
   1.456 +}
   1.457 +
   1.458 +static void log_event7(logfile *fp, char event, uint32_t serial, uint32_t ui2,
   1.459 +                       uint32_t ui3, uint32_t ui4, uint32_t ui5, uint32_t ui6,
   1.460 +                       uint32_t ui7)
   1.461 +{
   1.462 +    log_event6(fp, event, serial, ui2, ui3, ui4, ui5, ui6);
   1.463 +    log_uint32(fp, ui7);
   1.464 +}
   1.465 +
   1.466 +static void log_event8(logfile *fp, char event, uint32_t serial, uint32_t ui2,
   1.467 +                       uint32_t ui3, uint32_t ui4, uint32_t ui5, uint32_t ui6,
   1.468 +                       uint32_t ui7, uint32_t ui8)
   1.469 +{
   1.470 +    log_event7(fp, event, serial, ui2, ui3, ui4, ui5, ui6, ui7);
   1.471 +    log_uint32(fp, ui8);
   1.472 +}
   1.473 +
   1.474 +typedef struct callsite callsite;
   1.475 +
   1.476 +struct callsite {
   1.477 +    void*       pc;
   1.478 +    uint32_t      serial;
   1.479 +    lfd_set     lfdset;
   1.480 +    const char  *name;    /* pointer to string owned by methods table */
   1.481 +    const char  *library; /* pointer to string owned by libraries table */
   1.482 +    int         offset;
   1.483 +    callsite    *parent;
   1.484 +    callsite    *siblings;
   1.485 +    callsite    *kids;
   1.486 +};
   1.487 +
   1.488 +/* NB: these counters are incremented and decremented only within tmlock. */
   1.489 +static uint32_t library_serial_generator = 0;
   1.490 +static uint32_t method_serial_generator = 0;
   1.491 +static uint32_t callsite_serial_generator = 0;
   1.492 +static uint32_t tmstats_serial_generator = 0;
   1.493 +static uint32_t filename_serial_generator = 0;
   1.494 +
   1.495 +/* Root of the tree of callsites, the sum of all (cycle-compressed) stacks. */
   1.496 +static callsite calltree_root =
   1.497 +  {0, 0, LFD_SET_STATIC_INITIALIZER, NULL, NULL, 0, NULL, NULL, NULL};
   1.498 +
   1.499 +/* a fake pc for when stacks are disabled; must be different from the
   1.500 +   pc in calltree_root */
   1.501 +#define STACK_DISABLED_PC ((void*)1)
   1.502 +
   1.503 +/* Basic instrumentation. */
   1.504 +static nsTMStats tmstats = NS_TMSTATS_STATIC_INITIALIZER;
   1.505 +
   1.506 +/* Parent with the most kids (tmstats.calltree_maxkids). */
   1.507 +static callsite *calltree_maxkids_parent;
   1.508 +
   1.509 +/* Calltree leaf for path with deepest stack backtrace. */
   1.510 +static callsite *calltree_maxstack_top;
   1.511 +
   1.512 +/* Last site (i.e., calling pc) that recurred during a backtrace. */
   1.513 +static callsite *last_callsite_recurrence;
   1.514 +
   1.515 +static void log_tmstats(logfile *fp)
   1.516 +{
   1.517 +    log_event1(fp, TM_EVENT_STATS, ++tmstats_serial_generator);
   1.518 +    log_uint32(fp, tmstats.calltree_maxstack);
   1.519 +    log_uint32(fp, tmstats.calltree_maxdepth);
   1.520 +    log_uint32(fp, tmstats.calltree_parents);
   1.521 +    log_uint32(fp, tmstats.calltree_maxkids);
   1.522 +    log_uint32(fp, tmstats.calltree_kidhits);
   1.523 +    log_uint32(fp, tmstats.calltree_kidmisses);
   1.524 +    log_uint32(fp, tmstats.calltree_kidsteps);
   1.525 +    log_uint32(fp, tmstats.callsite_recurrences);
   1.526 +    log_uint32(fp, tmstats.backtrace_calls);
   1.527 +    log_uint32(fp, tmstats.backtrace_failures);
   1.528 +    log_uint32(fp, tmstats.btmalloc_failures);
   1.529 +    log_uint32(fp, tmstats.dladdr_failures);
   1.530 +    log_uint32(fp, tmstats.malloc_calls);
   1.531 +    log_uint32(fp, tmstats.malloc_failures);
   1.532 +    log_uint32(fp, tmstats.calloc_calls);
   1.533 +    log_uint32(fp, tmstats.calloc_failures);
   1.534 +    log_uint32(fp, tmstats.realloc_calls);
   1.535 +    log_uint32(fp, tmstats.realloc_failures);
   1.536 +    log_uint32(fp, tmstats.free_calls);
   1.537 +    log_uint32(fp, tmstats.null_free_calls);
   1.538 +    log_uint32(fp, calltree_maxkids_parent ? calltree_maxkids_parent->serial
   1.539 +                                           : 0);
   1.540 +    log_uint32(fp, calltree_maxstack_top ? calltree_maxstack_top->serial : 0);
   1.541 +}
   1.542 +
   1.543 +static void *generic_alloctable(void *pool, size_t size)
   1.544 +{
   1.545 +    return __libc_malloc(size);
   1.546 +}
   1.547 +
   1.548 +static void generic_freetable(void *pool, void *item)
   1.549 +{
   1.550 +    __libc_free(item);
   1.551 +}
   1.552 +
   1.553 +typedef struct lfdset_entry {
   1.554 +    PLHashEntry base;
   1.555 +    lfd_set     lfdset;
   1.556 +} lfdset_entry;
   1.557 +
   1.558 +static PLHashEntry *lfdset_allocentry(void *pool, const void *key)
   1.559 +{
   1.560 +    lfdset_entry *le = __libc_malloc(sizeof *le);
   1.561 +    if (le)
   1.562 +        LFD_ZERO(&le->lfdset);
   1.563 +    return &le->base;
   1.564 +}
   1.565 +
   1.566 +static void lfdset_freeentry(void *pool, PLHashEntry *he, unsigned flag)
   1.567 +{
   1.568 +    lfdset_entry *le;
   1.569 +
   1.570 +    if (flag != HT_FREE_ENTRY)
   1.571 +        return;
   1.572 +    le = (lfdset_entry*) he;
   1.573 +    __libc_free((void*) le);
   1.574 +}
   1.575 +
   1.576 +static PLHashAllocOps lfdset_hashallocops = {
   1.577 +    generic_alloctable, generic_freetable,
   1.578 +    lfdset_allocentry,  lfdset_freeentry
   1.579 +};
   1.580 +
   1.581 +/* Table of library pathnames mapped to to logged 'L' record serial numbers. */
   1.582 +static PLHashTable *libraries = NULL;
   1.583 +
   1.584 +/* Table of filename pathnames mapped to logged 'G' record serial numbers. */
   1.585 +static PLHashTable *filenames = NULL;
   1.586 +
   1.587 +/* Table mapping method names to logged 'N' record serial numbers. */
   1.588 +static PLHashTable *methods = NULL;
   1.589 +
   1.590 +/*
   1.591 + * Presumes that its caller is holding tmlock, but may temporarily exit
   1.592 + * the lock.
   1.593 + */
   1.594 +static callsite *
   1.595 +calltree(void **stack, size_t num_stack_entries, tm_thread *t)
   1.596 +{
   1.597 +    logfile *fp = logfp;
   1.598 +    void *pc;
   1.599 +    uint32_t nkids;
   1.600 +    callsite *parent, *site, **csp, *tmp;
   1.601 +    int maxstack;
   1.602 +    uint32_t library_serial, method_serial, filename_serial;
   1.603 +    const char *library, *method, *filename;
   1.604 +    char *slash;
   1.605 +    PLHashNumber hash;
   1.606 +    PLHashEntry **hep, *he;
   1.607 +    lfdset_entry *le;
   1.608 +    size_t stack_index;
   1.609 +    nsCodeAddressDetails details;
   1.610 +    nsresult rv;
   1.611 +
   1.612 +    maxstack = (num_stack_entries > tmstats.calltree_maxstack);
   1.613 +    if (maxstack) {
   1.614 +        /* these two are the same, although that used to be less clear */
   1.615 +        tmstats.calltree_maxstack = num_stack_entries;
   1.616 +        tmstats.calltree_maxdepth = num_stack_entries;
   1.617 +    }
   1.618 +
   1.619 +    /* Reverse the stack again, finding and building a path in the tree. */
   1.620 +    parent = &calltree_root;
   1.621 +    stack_index = num_stack_entries;
   1.622 +    do {
   1.623 +        --stack_index;
   1.624 +        pc = stack[stack_index];
   1.625 +
   1.626 +        csp = &parent->kids;
   1.627 +        while ((site = *csp) != NULL) {
   1.628 +            if (site->pc == pc) {
   1.629 +                tmstats.calltree_kidhits++;
   1.630 +
   1.631 +                /* Put the most recently used site at the front of siblings. */
   1.632 +                *csp = site->siblings;
   1.633 +                site->siblings = parent->kids;
   1.634 +                parent->kids = site;
   1.635 +
   1.636 +                /* Check whether we've logged for this site and logfile yet. */
   1.637 +                if (!LFD_TEST(fp->lfd, &site->lfdset)) {
   1.638 +                    /*
   1.639 +                     * Some other logfile put this site in the calltree.  We
   1.640 +                     * must log an event for site, and possibly first for its
   1.641 +                     * method and/or library.  Note the code after the while
   1.642 +                     * loop that tests if (!site).
   1.643 +                     */
   1.644 +                    break;
   1.645 +                }
   1.646 +
   1.647 +                /* Site already built and logged to fp -- go up the stack. */
   1.648 +                goto upward;
   1.649 +            }
   1.650 +            tmstats.calltree_kidsteps++;
   1.651 +            csp = &site->siblings;
   1.652 +        }
   1.653 +
   1.654 +        if (!site) {
   1.655 +            tmstats.calltree_kidmisses++;
   1.656 +
   1.657 +            /* Check for recursion: see if pc is on our ancestor line. */
   1.658 +            for (site = parent; site; site = site->parent) {
   1.659 +                if (site->pc == pc) {
   1.660 +                    tmstats.callsite_recurrences++;
   1.661 +                    last_callsite_recurrence = site;
   1.662 +                    goto upward;
   1.663 +                }
   1.664 +            }
   1.665 +        }
   1.666 +
   1.667 +        /*
   1.668 +         * Not in tree at all, or not logged to fp: let's find our symbolic
   1.669 +         * callsite info.
   1.670 +         */
   1.671 +
   1.672 +        if (!stacks_enabled) {
   1.673 +            /*
   1.674 +             * Fake the necessary information for our single fake stack
   1.675 +             * frame.
   1.676 +             */
   1.677 +            PL_strncpyz(details.library, "stacks_disabled",
   1.678 +                        sizeof(details.library));
   1.679 +            details.loffset = 0;
   1.680 +            details.filename[0] = '\0';
   1.681 +            details.lineno = 0;
   1.682 +            details.function[0] = '\0';
   1.683 +            details.foffset = 0;
   1.684 +        } else {
   1.685 +            /*
   1.686 +             * NS_DescribeCodeAddress can (on Linux) acquire a lock inside
   1.687 +             * the shared library loader.  Another thread might call malloc
   1.688 +             * while holding that lock (when loading a shared library).  So
   1.689 +             * we have to exit tmlock around this call.  For details, see
   1.690 +             * https://bugzilla.mozilla.org/show_bug.cgi?id=363334#c3
   1.691 +             *
   1.692 +             * We could be more efficient by building the nodes in the
   1.693 +             * calltree, exiting the monitor once to describe all of them,
   1.694 +             * and then filling in the descriptions for any that hadn't been
   1.695 +             * described already.  But this is easier for now.
   1.696 +             */
   1.697 +            TM_EXIT_LOCK(t);
   1.698 +            rv = NS_DescribeCodeAddress(pc, &details);
   1.699 +            TM_ENTER_LOCK(t);
   1.700 +            if (NS_FAILED(rv)) {
   1.701 +                tmstats.dladdr_failures++;
   1.702 +                goto fail;
   1.703 +            }
   1.704 +        }
   1.705 +
   1.706 +        /* Check whether we need to emit a library trace record. */
   1.707 +        library_serial = 0;
   1.708 +        library = NULL;
   1.709 +        if (details.library[0]) {
   1.710 +            if (!libraries) {
   1.711 +                libraries = PL_NewHashTable(100, PL_HashString,
   1.712 +                                            PL_CompareStrings, PL_CompareValues,
   1.713 +                                            &lfdset_hashallocops, NULL);
   1.714 +                if (!libraries) {
   1.715 +                    tmstats.btmalloc_failures++;
   1.716 +                    goto fail;
   1.717 +                }
   1.718 +            }
   1.719 +            hash = PL_HashString(details.library);
   1.720 +            hep = PL_HashTableRawLookup(libraries, hash, details.library);
   1.721 +            he = *hep;
   1.722 +            if (he) {
   1.723 +                library = (char*) he->key;
   1.724 +                library_serial = (uint32_t) NS_PTR_TO_INT32(he->value);
   1.725 +                le = (lfdset_entry *) he;
   1.726 +                if (LFD_TEST(fp->lfd, &le->lfdset)) {
   1.727 +                    /* We already logged an event on fp for this library. */
   1.728 +                    le = NULL;
   1.729 +                }
   1.730 +            } else {
   1.731 +                library = strdup(details.library);
   1.732 +                if (library) {
   1.733 +                    library_serial = ++library_serial_generator;
   1.734 +                    he = PL_HashTableRawAdd(libraries, hep, hash, library,
   1.735 +                                            NS_INT32_TO_PTR(library_serial));
   1.736 +                }
   1.737 +                if (!he) {
   1.738 +                    tmstats.btmalloc_failures++;
   1.739 +                    goto fail;
   1.740 +                }
   1.741 +                le = (lfdset_entry *) he;
   1.742 +            }
   1.743 +            if (le) {
   1.744 +                /* Need to log an event to fp for this lib. */
   1.745 +                slash = strrchr(library, '/');
   1.746 +                log_event1(fp, TM_EVENT_LIBRARY, library_serial);
   1.747 +                log_string(fp, slash ? slash + 1 : library);
   1.748 +                LFD_SET(fp->lfd, &le->lfdset);
   1.749 +            }
   1.750 +        }
   1.751 +
   1.752 +        /* For compatibility with current log format, always emit a
   1.753 +         * filename trace record, using "noname" / 0 when no file name
   1.754 +         * is available. */
   1.755 +        filename_serial = 0;
   1.756 +        filename = details.filename[0] ? details.filename : "noname";
   1.757 +        if (!filenames) {
   1.758 +            filenames = PL_NewHashTable(100, PL_HashString,
   1.759 +                                        PL_CompareStrings, PL_CompareValues,
   1.760 +                                        &lfdset_hashallocops, NULL);
   1.761 +            if (!filenames) {
   1.762 +                tmstats.btmalloc_failures++;
   1.763 +                return NULL;
   1.764 +            }
   1.765 +        }
   1.766 +        hash = PL_HashString(filename);
   1.767 +        hep = PL_HashTableRawLookup(filenames, hash, filename);
   1.768 +        he = *hep;
   1.769 +        if (he) {
   1.770 +            filename = (char*) he->key;
   1.771 +            filename_serial = (uint32_t) NS_PTR_TO_INT32(he->value);
   1.772 +            le = (lfdset_entry *) he;
   1.773 +            if (LFD_TEST(fp->lfd, &le->lfdset)) {
   1.774 +                /* We already logged an event on fp for this filename. */
   1.775 +                le = NULL;
   1.776 +            }
   1.777 +        } else {
   1.778 +            filename = strdup(filename);
   1.779 +            if (filename) {
   1.780 +                filename_serial = ++filename_serial_generator;
   1.781 +                he = PL_HashTableRawAdd(filenames, hep, hash, filename,
   1.782 +                                        NS_INT32_TO_PTR(filename_serial));
   1.783 +            }
   1.784 +            if (!he) {
   1.785 +                tmstats.btmalloc_failures++;
   1.786 +                return NULL;
   1.787 +            }
   1.788 +            le = (lfdset_entry *) he;
   1.789 +        }
   1.790 +        if (le) {
   1.791 +            /* Need to log an event to fp for this filename. */
   1.792 +            log_event1(fp, TM_EVENT_FILENAME, filename_serial);
   1.793 +            log_filename(fp, filename);
   1.794 +            LFD_SET(fp->lfd, &le->lfdset);
   1.795 +        }
   1.796 +
   1.797 +        if (!details.function[0]) {
   1.798 +            PR_snprintf(details.function, sizeof(details.function),
   1.799 +                        "%s+%X", library ? library : "main", details.loffset);
   1.800 +        }
   1.801 +
   1.802 +        /* Emit an 'N' (for New method, 'M' is for malloc!) event if needed. */
   1.803 +        method_serial = 0;
   1.804 +        if (!methods) {
   1.805 +            methods = PL_NewHashTable(10000, PL_HashString,
   1.806 +                                      PL_CompareStrings, PL_CompareValues,
   1.807 +                                      &lfdset_hashallocops, NULL);
   1.808 +            if (!methods) {
   1.809 +                tmstats.btmalloc_failures++;
   1.810 +                goto fail;
   1.811 +            }
   1.812 +        }
   1.813 +        hash = PL_HashString(details.function);
   1.814 +        hep = PL_HashTableRawLookup(methods, hash, details.function);
   1.815 +        he = *hep;
   1.816 +        if (he) {
   1.817 +            method = (char*) he->key;
   1.818 +            method_serial = (uint32_t) NS_PTR_TO_INT32(he->value);
   1.819 +            le = (lfdset_entry *) he;
   1.820 +            if (LFD_TEST(fp->lfd, &le->lfdset)) {
   1.821 +                /* We already logged an event on fp for this method. */
   1.822 +                le = NULL;
   1.823 +            }
   1.824 +        } else {
   1.825 +            method = strdup(details.function);
   1.826 +            if (method) {
   1.827 +                method_serial = ++method_serial_generator;
   1.828 +                he = PL_HashTableRawAdd(methods, hep, hash, method,
   1.829 +                                        NS_INT32_TO_PTR(method_serial));
   1.830 +            }
   1.831 +            if (!he) {
   1.832 +                tmstats.btmalloc_failures++;
   1.833 +                return NULL;
   1.834 +            }
   1.835 +            le = (lfdset_entry *) he;
   1.836 +        }
   1.837 +        if (le) {
   1.838 +            log_event4(fp, TM_EVENT_METHOD, method_serial, library_serial,
   1.839 +                       filename_serial, details.lineno);
   1.840 +            log_string(fp, method);
   1.841 +            LFD_SET(fp->lfd, &le->lfdset);
   1.842 +        }
   1.843 +
   1.844 +        /* Create a new callsite record. */
   1.845 +        if (!site) {
   1.846 +            site = __libc_malloc(sizeof(callsite));
   1.847 +            if (!site) {
   1.848 +                tmstats.btmalloc_failures++;
   1.849 +                goto fail;
   1.850 +            }
   1.851 +
   1.852 +            /* Update parent and max-kids-per-parent stats. */
   1.853 +            if (!parent->kids)
   1.854 +                tmstats.calltree_parents++;
   1.855 +            nkids = 1;
   1.856 +            for (tmp = parent->kids; tmp; tmp = tmp->siblings)
   1.857 +                nkids++;
   1.858 +            if (nkids > tmstats.calltree_maxkids) {
   1.859 +                tmstats.calltree_maxkids = nkids;
   1.860 +                calltree_maxkids_parent = parent;
   1.861 +            }
   1.862 +
   1.863 +            /* Insert the new site into the tree. */
   1.864 +            site->pc = pc;
   1.865 +            site->serial = ++callsite_serial_generator;
   1.866 +            LFD_ZERO(&site->lfdset);
   1.867 +            site->name = method;
   1.868 +            site->library = library;
   1.869 +            site->offset = details.loffset;
   1.870 +            site->parent = parent;
   1.871 +            site->siblings = parent->kids;
   1.872 +            parent->kids = site;
   1.873 +            site->kids = NULL;
   1.874 +        }
   1.875 +
   1.876 +        /* Log the site with its parent, method, and offset. */
   1.877 +        log_event4(fp, TM_EVENT_CALLSITE, site->serial, parent->serial,
   1.878 +                   method_serial, details.foffset);
   1.879 +        LFD_SET(fp->lfd, &site->lfdset);
   1.880 +
   1.881 +      upward:
   1.882 +        parent = site;
   1.883 +    } while (stack_index > 0);
   1.884 +
   1.885 +    if (maxstack)
   1.886 +        calltree_maxstack_top = site;
   1.887 +
   1.888 +    return site;
   1.889 +
   1.890 +  fail:
   1.891 +    return NULL;
   1.892 +}
   1.893 +
   1.894 +/*
   1.895 + * Buffer the stack from top at low index to bottom at high, so that we can
   1.896 + * reverse it in calltree.
   1.897 + */
   1.898 +static void
   1.899 +stack_callback(void *pc, void *sp, void *closure)
   1.900 +{
   1.901 +    stack_buffer_info *info = (stack_buffer_info*) closure;
   1.902 +
   1.903 +    /*
   1.904 +     * If we run out of buffer, keep incrementing entries so that
   1.905 +     * backtrace can call us again with a bigger buffer.
   1.906 +     */
   1.907 +    if (info->entries < info->size)
   1.908 +        info->buffer[info->entries] = pc;
   1.909 +    ++info->entries;
   1.910 +}
   1.911 +
   1.912 +/*
   1.913 + * The caller MUST NOT be holding tmlock when calling backtrace.
   1.914 + * On return, if *immediate_abort is set, then the return value is NULL
   1.915 + * and the thread is in a very dangerous situation (e.g. holding
   1.916 + * sem_pool_lock in Mac OS X pthreads); the caller should bail out
   1.917 + * without doing anything (such as acquiring locks).
   1.918 + */
   1.919 +static callsite *
   1.920 +backtrace(tm_thread *t, int skipFrames, int *immediate_abort)
   1.921 +{
   1.922 +    callsite *site;
   1.923 +    stack_buffer_info *info = &t->backtrace_buf;
   1.924 +    void ** new_stack_buffer;
   1.925 +    size_t new_stack_buffer_size;
   1.926 +    nsresult rv;
   1.927 +
   1.928 +    t->suppress_tracing++;
   1.929 +
   1.930 +    if (!stacks_enabled) {
   1.931 +#if defined(XP_MACOSX)
   1.932 +        /* Walk the stack, even if stacks_enabled is false. We do this to
   1.933 +           check if we must set immediate_abort. */
   1.934 +        info->entries = 0;
   1.935 +        rv = NS_StackWalk(stack_callback, skipFrames, /* maxFrames */ 0, info,
   1.936 +                          0, NULL);
   1.937 +        *immediate_abort = rv == NS_ERROR_UNEXPECTED;
   1.938 +        if (rv == NS_ERROR_UNEXPECTED || info->entries == 0) {
   1.939 +            t->suppress_tracing--;
   1.940 +            return NULL;
   1.941 +        }
   1.942 +#endif
   1.943 +
   1.944 +        /*
   1.945 +         * Create a single fake stack frame so that all the tools get
   1.946 +         * data in the correct format.
   1.947 +         */
   1.948 +        *immediate_abort = 0;
   1.949 +        if (info->size < 1) {
   1.950 +            PR_ASSERT(!info->buffer); /* !info->size == !info->buffer */
   1.951 +            info->buffer = __libc_malloc(1 * sizeof(void*));
   1.952 +            if (!info->buffer)
   1.953 +                return NULL;
   1.954 +            info->size = 1;
   1.955 +        }
   1.956 +
   1.957 +        info->entries = 1;
   1.958 +        info->buffer[0] = STACK_DISABLED_PC;
   1.959 +    } else {
   1.960 +        /*
   1.961 +         * NS_StackWalk can (on Windows) acquire a lock the shared library
   1.962 +         * loader.  Another thread might call malloc while holding that lock
   1.963 +         * (when loading a shared library).  So we can't be in tmlock during
   1.964 +         * this call.  For details, see
   1.965 +         * https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8
   1.966 +         */
   1.967 +
   1.968 +        /*
   1.969 +         * skipFrames == 0 means |backtrace| should show up, so don't use
   1.970 +         * skipFrames + 1.
   1.971 +         * NB: this call is repeated below if the buffer is too small.
   1.972 +         */
   1.973 +        info->entries = 0;
   1.974 +        rv = NS_StackWalk(stack_callback, skipFrames, /* maxFrames */ 0, info,
   1.975 +                          0, NULL);
   1.976 +        *immediate_abort = rv == NS_ERROR_UNEXPECTED;
   1.977 +        if (rv == NS_ERROR_UNEXPECTED || info->entries == 0) {
   1.978 +            t->suppress_tracing--;
   1.979 +            return NULL;
   1.980 +        }
   1.981 +
   1.982 +        /*
   1.983 +         * To avoid allocating in stack_callback (which, on Windows, is
   1.984 +         * called on a different thread from the one we're running on here),
   1.985 +         * reallocate here if it didn't have a big enough buffer (which
   1.986 +         * includes the first call on any thread), and call it again.
   1.987 +         */
   1.988 +        if (info->entries > info->size) {
   1.989 +            new_stack_buffer_size = 2 * info->entries;
   1.990 +            new_stack_buffer = __libc_realloc(info->buffer,
   1.991 +                                   new_stack_buffer_size * sizeof(void*));
   1.992 +            if (!new_stack_buffer)
   1.993 +                return NULL;
   1.994 +            info->buffer = new_stack_buffer;
   1.995 +            info->size = new_stack_buffer_size;
   1.996 +
   1.997 +            /* and call NS_StackWalk again */
   1.998 +            info->entries = 0;
   1.999 +            NS_StackWalk(stack_callback, skipFrames, /* maxFrames */ 0, info,
  1.1000 +                         0, NULL);
  1.1001 +
  1.1002 +            /* same stack */
  1.1003 +            PR_ASSERT(info->entries * 2 == new_stack_buffer_size);
  1.1004 +        }
  1.1005 +    }
  1.1006 +
  1.1007 +    TM_ENTER_LOCK(t);
  1.1008 +
  1.1009 +    site = calltree(info->buffer, info->entries, t);
  1.1010 +
  1.1011 +    tmstats.backtrace_calls++;
  1.1012 +    if (!site) {
  1.1013 +        tmstats.backtrace_failures++;
  1.1014 +        PR_ASSERT(tmstats.backtrace_failures < 100);
  1.1015 +    }
  1.1016 +    TM_EXIT_LOCK(t);
  1.1017 +
  1.1018 +    t->suppress_tracing--;
  1.1019 +    return site;
  1.1020 +}
  1.1021 +
  1.1022 +typedef struct allocation {
  1.1023 +    PLHashEntry entry;
  1.1024 +    size_t      size;
  1.1025 +    FILE        *trackfp;       /* for allocation tracking */
  1.1026 +} allocation;
  1.1027 +
  1.1028 +#define ALLOC_HEAP_SIZE 150000
  1.1029 +
  1.1030 +static allocation alloc_heap[ALLOC_HEAP_SIZE];
  1.1031 +static allocation *alloc_freelist = NULL;
  1.1032 +static int alloc_heap_initialized = 0;
  1.1033 +
  1.1034 +static PLHashEntry *alloc_allocentry(void *pool, const void *key)
  1.1035 +{
  1.1036 +    allocation **listp, *alloc;
  1.1037 +    int n;
  1.1038 +
  1.1039 +    if (!alloc_heap_initialized) {
  1.1040 +        n = ALLOC_HEAP_SIZE;
  1.1041 +        listp = &alloc_freelist;
  1.1042 +        for (alloc = alloc_heap; --n >= 0; alloc++) {
  1.1043 +            *listp = alloc;
  1.1044 +            listp = (allocation**) &alloc->entry.next;
  1.1045 +        }
  1.1046 +        *listp = NULL;
  1.1047 +        alloc_heap_initialized = 1;
  1.1048 +    }
  1.1049 +
  1.1050 +    listp = &alloc_freelist;
  1.1051 +    alloc = *listp;
  1.1052 +    if (!alloc)
  1.1053 +        return __libc_malloc(sizeof(allocation));
  1.1054 +    *listp = (allocation*) alloc->entry.next;
  1.1055 +    return &alloc->entry;
  1.1056 +}
  1.1057 +
  1.1058 +static void alloc_freeentry(void *pool, PLHashEntry *he, unsigned flag)
  1.1059 +{
  1.1060 +    allocation *alloc;
  1.1061 +
  1.1062 +    if (flag != HT_FREE_ENTRY)
  1.1063 +        return;
  1.1064 +    alloc = (allocation*) he;
  1.1065 +    if ((ptrdiff_t)(alloc - alloc_heap) < (ptrdiff_t)ALLOC_HEAP_SIZE) {
  1.1066 +        alloc->entry.next = &alloc_freelist->entry;
  1.1067 +        alloc_freelist = alloc;
  1.1068 +    } else {
  1.1069 +        __libc_free((void*) alloc);
  1.1070 +    }
  1.1071 +}
  1.1072 +
  1.1073 +static PLHashAllocOps alloc_hashallocops = {
  1.1074 +    generic_alloctable, generic_freetable,
  1.1075 +    alloc_allocentry,   alloc_freeentry
  1.1076 +};
  1.1077 +
  1.1078 +static PLHashNumber hash_pointer(const void *key)
  1.1079 +{
  1.1080 +    return (PLHashNumber) key;
  1.1081 +}
  1.1082 +
  1.1083 +static PLHashTable *allocations = NULL;
  1.1084 +
  1.1085 +static PLHashTable *new_allocations(void)
  1.1086 +{
  1.1087 +    allocations = PL_NewHashTable(200000, hash_pointer,
  1.1088 +                                  PL_CompareValues, PL_CompareValues,
  1.1089 +                                  &alloc_hashallocops, NULL);
  1.1090 +    return allocations;
  1.1091 +}
  1.1092 +
  1.1093 +#define get_allocations() (allocations ? allocations : new_allocations())
  1.1094 +
  1.1095 +#if defined(XP_MACOSX)
  1.1096 +
  1.1097 +/* from malloc.c in Libc */
  1.1098 +typedef void
  1.1099 +malloc_logger_t(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3,
  1.1100 +                uintptr_t result, uint32_t num_hot_frames_to_skip);
  1.1101 +
  1.1102 +extern malloc_logger_t *malloc_logger;
  1.1103 +
  1.1104 +#define MALLOC_LOG_TYPE_ALLOCATE        2
  1.1105 +#define MALLOC_LOG_TYPE_DEALLOCATE      4
  1.1106 +#define MALLOC_LOG_TYPE_HAS_ZONE        8
  1.1107 +#define MALLOC_LOG_TYPE_CLEARED         64
  1.1108 +
  1.1109 +static void
  1.1110 +my_malloc_logger(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3,
  1.1111 +                 uintptr_t result, uint32_t num_hot_frames_to_skip)
  1.1112 +{
  1.1113 +    uintptr_t all_args[3] = { arg1, arg2, arg3 };
  1.1114 +    uintptr_t *args = all_args + ((type & MALLOC_LOG_TYPE_HAS_ZONE) ? 1 : 0);
  1.1115 +
  1.1116 +    uint32_t alloc_type =
  1.1117 +        type & (MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE);
  1.1118 +    tm_thread *t = tm_get_thread();
  1.1119 +
  1.1120 +    if (alloc_type == (MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE)) {
  1.1121 +        ReallocCallback((void*)args[0], (void*)result, args[1], 0, 0, t);
  1.1122 +    } else if (alloc_type == MALLOC_LOG_TYPE_ALLOCATE) {
  1.1123 +        /*
  1.1124 +         * We don't get size/count information for calloc, so just use
  1.1125 +         * MallocCallback.
  1.1126 +         */
  1.1127 +        MallocCallback((void*)result, args[0], 0, 0, t);
  1.1128 +    } else if (alloc_type == MALLOC_LOG_TYPE_DEALLOCATE) {
  1.1129 +        FreeCallback((void*)args[0], 0, 0, t);
  1.1130 +    }
  1.1131 +}
  1.1132 +
  1.1133 +static void
  1.1134 +StartupHooker(void)
  1.1135 +{
  1.1136 +    PR_ASSERT(!malloc_logger);
  1.1137 +    malloc_logger = my_malloc_logger;
  1.1138 +}
  1.1139 +
  1.1140 +static void
  1.1141 +ShutdownHooker(void)
  1.1142 +{
  1.1143 +    PR_ASSERT(malloc_logger == my_malloc_logger);
  1.1144 +    malloc_logger = NULL;
  1.1145 +}
  1.1146 +
  1.1147 +#elif defined(XP_UNIX)
  1.1148 +
  1.1149 +/*
  1.1150 + * We can't use glibc's malloc hooks because they can't be used in a
  1.1151 + * threadsafe manner.  They require unsetting the hooks to call into the
  1.1152 + * original malloc implementation, and then resetting them when the
  1.1153 + * original implementation returns.  If another thread calls the same
  1.1154 + * allocation function while the hooks are unset, we have no chance to
  1.1155 + * intercept the call.
  1.1156 + */
  1.1157 +
  1.1158 +NS_EXTERNAL_VIS_(__ptr_t)
  1.1159 +malloc(size_t size)
  1.1160 +{
  1.1161 +    uint32_t start, end;
  1.1162 +    __ptr_t ptr;
  1.1163 +    tm_thread *t;
  1.1164 +
  1.1165 +    if (!tracing_enabled || !PR_Initialized() ||
  1.1166 +        (t = tm_get_thread())->suppress_tracing != 0) {
  1.1167 +        return __libc_malloc(size);
  1.1168 +    }
  1.1169 +
  1.1170 +    t->suppress_tracing++;
  1.1171 +    start = PR_IntervalNow();
  1.1172 +    ptr = __libc_malloc(size);
  1.1173 +    end = PR_IntervalNow();
  1.1174 +    t->suppress_tracing--;
  1.1175 +
  1.1176 +    MallocCallback(ptr, size, start, end, t);
  1.1177 +
  1.1178 +    return ptr;
  1.1179 +}
  1.1180 +
  1.1181 +NS_EXTERNAL_VIS_(__ptr_t)
  1.1182 +calloc(size_t count, size_t size)
  1.1183 +{
  1.1184 +    uint32_t start, end;
  1.1185 +    __ptr_t ptr;
  1.1186 +    tm_thread *t;
  1.1187 +
  1.1188 +    if (!tracing_enabled || !PR_Initialized() ||
  1.1189 +        (t = tm_get_thread())->suppress_tracing != 0) {
  1.1190 +        return __libc_calloc(count, size);
  1.1191 +    }
  1.1192 +
  1.1193 +    t->suppress_tracing++;
  1.1194 +    start = PR_IntervalNow();
  1.1195 +    ptr = __libc_calloc(count, size);
  1.1196 +    end = PR_IntervalNow();
  1.1197 +    t->suppress_tracing--;
  1.1198 +
  1.1199 +    CallocCallback(ptr, count, size, start, end, t);
  1.1200 +
  1.1201 +    return ptr;
  1.1202 +}
  1.1203 +
  1.1204 +NS_EXTERNAL_VIS_(__ptr_t)
  1.1205 +realloc(__ptr_t oldptr, size_t size)
  1.1206 +{
  1.1207 +    uint32_t start, end;
  1.1208 +    __ptr_t ptr;
  1.1209 +    tm_thread *t;
  1.1210 +
  1.1211 +    if (!tracing_enabled || !PR_Initialized() ||
  1.1212 +        (t = tm_get_thread())->suppress_tracing != 0) {
  1.1213 +        return __libc_realloc(oldptr, size);
  1.1214 +    }
  1.1215 +
  1.1216 +    t->suppress_tracing++;
  1.1217 +    start = PR_IntervalNow();
  1.1218 +    ptr = __libc_realloc(oldptr, size);
  1.1219 +    end = PR_IntervalNow();
  1.1220 +    t->suppress_tracing--;
  1.1221 +
  1.1222 +    /* FIXME bug 392008: We could race with reallocation of oldptr. */
  1.1223 +    ReallocCallback(oldptr, ptr, size, start, end, t);
  1.1224 +
  1.1225 +    return ptr;
  1.1226 +}
  1.1227 +
  1.1228 +NS_EXTERNAL_VIS_(void*)
  1.1229 +valloc(size_t size)
  1.1230 +{
  1.1231 +    uint32_t start, end;
  1.1232 +    __ptr_t ptr;
  1.1233 +    tm_thread *t;
  1.1234 +
  1.1235 +    if (!tracing_enabled || !PR_Initialized() ||
  1.1236 +        (t = tm_get_thread())->suppress_tracing != 0) {
  1.1237 +        return __libc_valloc(size);
  1.1238 +    }
  1.1239 +
  1.1240 +    t->suppress_tracing++;
  1.1241 +    start = PR_IntervalNow();
  1.1242 +    ptr = __libc_valloc(size);
  1.1243 +    end = PR_IntervalNow();
  1.1244 +    t->suppress_tracing--;
  1.1245 +
  1.1246 +    MallocCallback(ptr, size, start, end, t);
  1.1247 +
  1.1248 +    return ptr;
  1.1249 +}
  1.1250 +
  1.1251 +NS_EXTERNAL_VIS_(void*)
  1.1252 +memalign(size_t boundary, size_t size)
  1.1253 +{
  1.1254 +    uint32_t start, end;
  1.1255 +    __ptr_t ptr;
  1.1256 +    tm_thread *t;
  1.1257 +
  1.1258 +    if (!tracing_enabled || !PR_Initialized() ||
  1.1259 +        (t = tm_get_thread())->suppress_tracing != 0) {
  1.1260 +        return __libc_memalign(boundary, size);
  1.1261 +    }
  1.1262 +
  1.1263 +    t->suppress_tracing++;
  1.1264 +    start = PR_IntervalNow();
  1.1265 +    ptr = __libc_memalign(boundary, size);
  1.1266 +    end = PR_IntervalNow();
  1.1267 +    t->suppress_tracing--;
  1.1268 +
  1.1269 +    MallocCallback(ptr, size, start, end, t);
  1.1270 +
  1.1271 +    return ptr;
  1.1272 +}
  1.1273 +
  1.1274 +NS_EXTERNAL_VIS_(int)
  1.1275 +posix_memalign(void **memptr, size_t alignment, size_t size)
  1.1276 +{
  1.1277 +    __ptr_t ptr = memalign(alignment, size);
  1.1278 +    if (!ptr)
  1.1279 +        return ENOMEM;
  1.1280 +    *memptr = ptr;
  1.1281 +    return 0;
  1.1282 +}
  1.1283 +
  1.1284 +NS_EXTERNAL_VIS_(void)
  1.1285 +free(__ptr_t ptr)
  1.1286 +{
  1.1287 +    uint32_t start, end;
  1.1288 +    tm_thread *t;
  1.1289 +
  1.1290 +    if (!tracing_enabled || !PR_Initialized() ||
  1.1291 +        (t = tm_get_thread())->suppress_tracing != 0) {
  1.1292 +        __libc_free(ptr);
  1.1293 +        return;
  1.1294 +    }
  1.1295 +
  1.1296 +    t->suppress_tracing++;
  1.1297 +    start = PR_IntervalNow();
  1.1298 +    __libc_free(ptr);
  1.1299 +    end = PR_IntervalNow();
  1.1300 +    t->suppress_tracing--;
  1.1301 +
  1.1302 +    /* FIXME bug 392008: We could race with reallocation of ptr. */
  1.1303 +
  1.1304 +    FreeCallback(ptr, start, end, t);
  1.1305 +}
  1.1306 +
  1.1307 +NS_EXTERNAL_VIS_(void)
  1.1308 +cfree(void *ptr)
  1.1309 +{
  1.1310 +    free(ptr);
  1.1311 +}
  1.1312 +
  1.1313 +#define StartupHooker()                 PR_BEGIN_MACRO PR_END_MACRO
  1.1314 +#define ShutdownHooker()                PR_BEGIN_MACRO PR_END_MACRO
  1.1315 +
  1.1316 +#elif defined(XP_WIN32)
  1.1317 +
  1.1318 +/* See nsWinTraceMalloc.cpp. */
  1.1319 +
  1.1320 +#endif
  1.1321 +
  1.1322 +static const char magic[] = NS_TRACE_MALLOC_MAGIC;
  1.1323 +
  1.1324 +static void
  1.1325 +log_header(int logfd)
  1.1326 +{
  1.1327 +    uint32_t ticksPerSec = PR_htonl(PR_TicksPerSecond());
  1.1328 +    (void) write(logfd, magic, NS_TRACE_MALLOC_MAGIC_SIZE);
  1.1329 +    (void) write(logfd, &ticksPerSec, sizeof ticksPerSec);
  1.1330 +}
  1.1331 +
  1.1332 +PR_IMPLEMENT(void)
  1.1333 +NS_TraceMallocStartup(int logfd)
  1.1334 +{
  1.1335 +    const char* stack_disable_env;
  1.1336 +
  1.1337 +    /* We must be running on the primordial thread. */
  1.1338 +    PR_ASSERT(tracing_enabled == 0);
  1.1339 +    PR_ASSERT(logfp == &default_logfile);
  1.1340 +    tracing_enabled = (logfd >= 0);
  1.1341 +
  1.1342 +    if (logfd >= 3)
  1.1343 +        MozillaRegisterDebugFD(logfd);
  1.1344 +
  1.1345 +    /* stacks are disabled if this env var is set to a non-empty value */
  1.1346 +    stack_disable_env = PR_GetEnv("NS_TRACE_MALLOC_DISABLE_STACKS");
  1.1347 +    stacks_enabled = !stack_disable_env || !*stack_disable_env;
  1.1348 +
  1.1349 +    if (tracing_enabled) {
  1.1350 +        PR_ASSERT(logfp->simsize == 0); /* didn't overflow startup buffer */
  1.1351 +
  1.1352 +        /* Log everything in logfp (aka default_logfile)'s buffer to logfd. */
  1.1353 +        logfp->fd = logfd;
  1.1354 +        logfile_list = &default_logfile;
  1.1355 +        logfp->prevp = &logfile_list;
  1.1356 +        logfile_tail = &logfp->next;
  1.1357 +        log_header(logfd);
  1.1358 +    }
  1.1359 +
  1.1360 +    RegisterTraceMallocShutdown();
  1.1361 +
  1.1362 +    tmlock = PR_NewLock();
  1.1363 +    (void) tm_get_thread(); /* ensure index initialization while it's easy */
  1.1364 +
  1.1365 +    if (tracing_enabled)
  1.1366 +        StartupHooker();
  1.1367 +}
  1.1368 +
  1.1369 +/*
  1.1370 + * Options for log files, with the log file name either as the next option
  1.1371 + * or separated by '=' (e.g. "./mozilla --trace-malloc * malloc.log" or
  1.1372 + * "./mozilla --trace-malloc=malloc.log").
  1.1373 + */
  1.1374 +static const char TMLOG_OPTION[] = "--trace-malloc";
  1.1375 +static const char SDLOG_OPTION[] = "--shutdown-leaks";
  1.1376 +
  1.1377 +#define SHOULD_PARSE_ARG(name_, log_, arg_) \
  1.1378 +    (0 == strncmp(arg_, name_, sizeof(name_) - 1))
  1.1379 +
  1.1380 +#define PARSE_ARG(name_, log_, argv_, i_, consumed_)                          \
  1.1381 +    PR_BEGIN_MACRO                                                            \
  1.1382 +        char _nextchar = argv_[i_][sizeof(name_) - 1];                        \
  1.1383 +        if (_nextchar == '=') {                                               \
  1.1384 +            log_ = argv_[i_] + sizeof(name_);                                 \
  1.1385 +            consumed_ = 1;                                                    \
  1.1386 +        } else if (_nextchar == '\0') {                                       \
  1.1387 +            log_ = argv_[i_+1];                                               \
  1.1388 +            consumed_ = 2;                                                    \
  1.1389 +        }                                                                     \
  1.1390 +    PR_END_MACRO
  1.1391 +
  1.1392 +PR_IMPLEMENT(int)
  1.1393 +NS_TraceMallocStartupArgs(int argc, char **argv)
  1.1394 +{
  1.1395 +    int i, logfd = -1, consumed, logflags;
  1.1396 +    char *tmlogname = NULL, *sdlogname_local = NULL;
  1.1397 +
  1.1398 +    /*
  1.1399 +     * Look for the --trace-malloc <logfile> option early, to avoid missing
  1.1400 +     * early mallocs (we miss static constructors whose output overflows the
  1.1401 +     * log file's static 16K output buffer).
  1.1402 +     */
  1.1403 +    for (i = 1; i < argc; i += consumed) {
  1.1404 +        consumed = 0;
  1.1405 +        if (SHOULD_PARSE_ARG(TMLOG_OPTION, tmlogname, argv[i]))
  1.1406 +            PARSE_ARG(TMLOG_OPTION, tmlogname, argv, i, consumed);
  1.1407 +        else if (SHOULD_PARSE_ARG(SDLOG_OPTION, sdlogname_local, argv[i]))
  1.1408 +            PARSE_ARG(SDLOG_OPTION, sdlogname_local, argv, i, consumed);
  1.1409 +
  1.1410 +        if (consumed) {
  1.1411 +#ifndef XP_WIN32 /* If we don't comment this out, it will crash Windows. */
  1.1412 +            int j;
  1.1413 +            /* Now remove --trace-malloc and its argument from argv. */
  1.1414 +            argc -= consumed;
  1.1415 +            for (j = i; j < argc; ++j)
  1.1416 +                argv[j] = argv[j+consumed];
  1.1417 +            argv[argc] = NULL;
  1.1418 +            consumed = 0; /* don't advance next iteration */
  1.1419 +#endif
  1.1420 +        } else {
  1.1421 +            consumed = 1;
  1.1422 +        }
  1.1423 +    }
  1.1424 +
  1.1425 +    if (tmlogname) {
  1.1426 +#ifdef XP_UNIX
  1.1427 +        int pipefds[2];
  1.1428 +#endif
  1.1429 +
  1.1430 +        switch (*tmlogname) {
  1.1431 +#ifdef XP_UNIX
  1.1432 +          case '|':
  1.1433 +            if (pipe(pipefds) == 0) {
  1.1434 +                pid_t pid = fork();
  1.1435 +                if (pid == 0) {
  1.1436 +                    /* In child: set up stdin, parse args, and exec. */
  1.1437 +                    int maxargc, nargc;
  1.1438 +                    char **nargv, *token;
  1.1439 +
  1.1440 +                    if (pipefds[0] != 0) {
  1.1441 +                        dup2(pipefds[0], 0);
  1.1442 +                        close(pipefds[0]);
  1.1443 +                    }
  1.1444 +                    close(pipefds[1]);
  1.1445 +
  1.1446 +                    tmlogname = strtok(tmlogname + 1, " \t");
  1.1447 +                    maxargc = 3;
  1.1448 +                    nargv = (char **) malloc((maxargc+1) * sizeof(char *));
  1.1449 +                    if (!nargv) exit(1);
  1.1450 +                    nargc = 0;
  1.1451 +                    nargv[nargc++] = tmlogname;
  1.1452 +                    while ((token = strtok(NULL, " \t")) != NULL) {
  1.1453 +                        if (nargc == maxargc) {
  1.1454 +                            maxargc *= 2;
  1.1455 +                            nargv = (char**)
  1.1456 +                                realloc(nargv, (maxargc+1) * sizeof(char*));
  1.1457 +                            if (!nargv) exit(1);
  1.1458 +                        }
  1.1459 +                        nargv[nargc++] = token;
  1.1460 +                    }
  1.1461 +                    nargv[nargc] = NULL;
  1.1462 +
  1.1463 +                    (void) setsid();
  1.1464 +                    execvp(tmlogname, nargv);
  1.1465 +                    exit(127);
  1.1466 +                }
  1.1467 +
  1.1468 +                if (pid > 0) {
  1.1469 +                    /* In parent: set logfd to the pipe's write side. */
  1.1470 +                    close(pipefds[0]);
  1.1471 +                    logfd = pipefds[1];
  1.1472 +                }
  1.1473 +            }
  1.1474 +            if (logfd < 0) {
  1.1475 +                fprintf(stderr,
  1.1476 +                    "%s: can't pipe to trace-malloc child process %s: %s\n",
  1.1477 +                    argv[0], tmlogname, strerror(errno));
  1.1478 +                exit(1);
  1.1479 +            }
  1.1480 +            break;
  1.1481 +#endif /*XP_UNIX*/
  1.1482 +          case '-':
  1.1483 +            /* Don't log from startup, but do prepare to log later. */
  1.1484 +            /* XXX traditional meaning of '-' as option argument is "stdin" or "stdout" */
  1.1485 +            if (tmlogname[1] == '\0')
  1.1486 +                break;
  1.1487 +            /* FALL THROUGH */
  1.1488 +
  1.1489 +          default:
  1.1490 +            logflags = O_CREAT | O_WRONLY | O_TRUNC;
  1.1491 +#if defined(XP_WIN32)
  1.1492 +            /*
  1.1493 +             * Avoid translations on WIN32.
  1.1494 +             */
  1.1495 +            logflags |= O_BINARY;
  1.1496 +#endif
  1.1497 +            logfd = open(tmlogname, logflags, 0644);
  1.1498 +            if (logfd < 0) {
  1.1499 +                fprintf(stderr,
  1.1500 +                    "%s: can't create trace-malloc log named %s: %s\n",
  1.1501 +                    argv[0], tmlogname, strerror(errno));
  1.1502 +                exit(1);
  1.1503 +            }
  1.1504 +            break;
  1.1505 +        }
  1.1506 +    }
  1.1507 +
  1.1508 +    if (sdlogname_local) {
  1.1509 +        strncpy(sdlogname, sdlogname_local, sizeof(sdlogname));
  1.1510 +        sdlogname[sizeof(sdlogname) - 1] = '\0';
  1.1511 +    }
  1.1512 +
  1.1513 +    NS_TraceMallocStartup(logfd);
  1.1514 +    return argc;
  1.1515 +}
  1.1516 +
  1.1517 +PR_IMPLEMENT(PRBool)
  1.1518 +NS_TraceMallocHasStarted(void)
  1.1519 +{
  1.1520 +    return tmlock ? PR_TRUE : PR_FALSE;
  1.1521 +}
  1.1522 +
  1.1523 +PR_IMPLEMENT(void)
  1.1524 +NS_TraceMallocShutdown(void)
  1.1525 +{
  1.1526 +    logfile *fp;
  1.1527 +
  1.1528 +    if (sdlogname[0])
  1.1529 +        NS_TraceMallocDumpAllocations(sdlogname);
  1.1530 +
  1.1531 +    if (tmstats.backtrace_failures) {
  1.1532 +        fprintf(stderr,
  1.1533 +                "TraceMalloc backtrace failures: %lu (malloc %lu dladdr %lu)\n",
  1.1534 +                (unsigned long) tmstats.backtrace_failures,
  1.1535 +                (unsigned long) tmstats.btmalloc_failures,
  1.1536 +                (unsigned long) tmstats.dladdr_failures);
  1.1537 +    }
  1.1538 +    while ((fp = logfile_list) != NULL) {
  1.1539 +        logfile_list = fp->next;
  1.1540 +        log_tmstats(fp);
  1.1541 +        flush_logfile(fp);
  1.1542 +        if (fp->fd >= 0) {
  1.1543 +            MozillaUnRegisterDebugFD(fp->fd);
  1.1544 +            close(fp->fd);
  1.1545 +            fp->fd = -1;
  1.1546 +        }
  1.1547 +        if (fp != &default_logfile) {
  1.1548 +            if (fp == logfp)
  1.1549 +                logfp = &default_logfile;
  1.1550 +            free((void*) fp);
  1.1551 +        }
  1.1552 +    }
  1.1553 +    if (tmlock) {
  1.1554 +        PRLock *lock = tmlock;
  1.1555 +        tmlock = NULL;
  1.1556 +        PR_DestroyLock(lock);
  1.1557 +    }
  1.1558 +    if (tracing_enabled) {
  1.1559 +        tracing_enabled = 0;
  1.1560 +        ShutdownHooker();
  1.1561 +    }
  1.1562 +}
  1.1563 +
  1.1564 +PR_IMPLEMENT(void)
  1.1565 +NS_TraceMallocDisable(void)
  1.1566 +{
  1.1567 +    tm_thread *t = tm_get_thread();
  1.1568 +    logfile *fp;
  1.1569 +    uint32_t sample;
  1.1570 +
  1.1571 +    /* Robustify in case of duplicate call. */
  1.1572 +    PR_ASSERT(tracing_enabled);
  1.1573 +    if (tracing_enabled == 0)
  1.1574 +        return;
  1.1575 +
  1.1576 +    TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
  1.1577 +    for (fp = logfile_list; fp; fp = fp->next)
  1.1578 +        flush_logfile(fp);
  1.1579 +    sample = --tracing_enabled;
  1.1580 +    TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.1581 +    if (sample == 0)
  1.1582 +        ShutdownHooker();
  1.1583 +}
  1.1584 +
  1.1585 +PR_IMPLEMENT(void)
  1.1586 +NS_TraceMallocEnable(void)
  1.1587 +{
  1.1588 +    tm_thread *t = tm_get_thread();
  1.1589 +    uint32_t sample;
  1.1590 +
  1.1591 +    TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
  1.1592 +    sample = ++tracing_enabled;
  1.1593 +    TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.1594 +    if (sample == 1)
  1.1595 +        StartupHooker();
  1.1596 +}
  1.1597 +
  1.1598 +PR_IMPLEMENT(int)
  1.1599 +NS_TraceMallocChangeLogFD(int fd)
  1.1600 +{
  1.1601 +    logfile *oldfp, *fp;
  1.1602 +    struct stat sb;
  1.1603 +    tm_thread *t = tm_get_thread();
  1.1604 +
  1.1605 +    TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
  1.1606 +    oldfp = logfp;
  1.1607 +    if (oldfp->fd != fd) {
  1.1608 +        flush_logfile(oldfp);
  1.1609 +        fp = get_logfile(fd);
  1.1610 +        if (!fp) {
  1.1611 +            TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.1612 +            return -2;
  1.1613 +        }
  1.1614 +        if (fd >= 0 && fstat(fd, &sb) == 0 && sb.st_size == 0)
  1.1615 +            log_header(fd);
  1.1616 +        logfp = fp;
  1.1617 +    }
  1.1618 +    TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.1619 +    return oldfp->fd;
  1.1620 +}
  1.1621 +
  1.1622 +static int
  1.1623 +lfd_clr_enumerator(PLHashEntry *he, int i, void *arg)
  1.1624 +{
  1.1625 +    lfdset_entry *le = (lfdset_entry*) he;
  1.1626 +    logfile *fp = (logfile*) arg;
  1.1627 +
  1.1628 +    LFD_CLR(fp->lfd, &le->lfdset);
  1.1629 +    return HT_ENUMERATE_NEXT;
  1.1630 +}
  1.1631 +
  1.1632 +static void
  1.1633 +lfd_clr_walk(callsite *site, logfile *fp)
  1.1634 +{
  1.1635 +    callsite *kid;
  1.1636 +
  1.1637 +    LFD_CLR(fp->lfd, &site->lfdset);
  1.1638 +    for (kid = site->kids; kid; kid = kid->siblings)
  1.1639 +        lfd_clr_walk(kid, fp);
  1.1640 +}
  1.1641 +
  1.1642 +PR_IMPLEMENT(void)
  1.1643 +NS_TraceMallocCloseLogFD(int fd)
  1.1644 +{
  1.1645 +    logfile *fp;
  1.1646 +    tm_thread *t = tm_get_thread();
  1.1647 +
  1.1648 +    TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
  1.1649 +
  1.1650 +    fp = get_logfile(fd);
  1.1651 +    if (fp) {
  1.1652 +        flush_logfile(fp);
  1.1653 +        if (fp == &default_logfile) {
  1.1654 +            /* Leave default_logfile in logfile_list with an fd of -1. */
  1.1655 +            fp->fd = -1;
  1.1656 +
  1.1657 +            /* NB: we can never free lfd 0, it belongs to default_logfile. */
  1.1658 +            PR_ASSERT(fp->lfd == 0);
  1.1659 +        } else {
  1.1660 +            /* Clear fp->lfd in all possible lfdsets. */
  1.1661 +            PL_HashTableEnumerateEntries(libraries, lfd_clr_enumerator, fp);
  1.1662 +            PL_HashTableEnumerateEntries(methods, lfd_clr_enumerator, fp);
  1.1663 +            lfd_clr_walk(&calltree_root, fp);
  1.1664 +
  1.1665 +            /* Unlink fp from logfile_list, freeing lfd for reallocation. */
  1.1666 +            *fp->prevp = fp->next;
  1.1667 +            if (!fp->next) {
  1.1668 +                PR_ASSERT(logfile_tail == &fp->next);
  1.1669 +                logfile_tail = fp->prevp;
  1.1670 +            }
  1.1671 +
  1.1672 +            /* Reset logfp if we must, then free fp. */
  1.1673 +            if (fp == logfp)
  1.1674 +                logfp = &default_logfile;
  1.1675 +            free((void*) fp);
  1.1676 +        }
  1.1677 +    }
  1.1678 +
  1.1679 +    TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.1680 +    MozillaUnRegisterDebugFD(fd);
  1.1681 +    close(fd);
  1.1682 +}
  1.1683 +
  1.1684 +PR_IMPLEMENT(void)
  1.1685 +NS_TraceMallocLogTimestamp(const char *caption)
  1.1686 +{
  1.1687 +    logfile *fp;
  1.1688 +#ifdef XP_UNIX
  1.1689 +    struct timeval tv;
  1.1690 +#endif
  1.1691 +#ifdef XP_WIN32
  1.1692 +    struct _timeb tb;
  1.1693 +#endif
  1.1694 +    tm_thread *t = tm_get_thread();
  1.1695 +
  1.1696 +    TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
  1.1697 +
  1.1698 +    fp = logfp;
  1.1699 +    log_byte(fp, TM_EVENT_TIMESTAMP);
  1.1700 +
  1.1701 +#ifdef XP_UNIX
  1.1702 +    gettimeofday(&tv, NULL);
  1.1703 +    log_uint32(fp, (uint32_t) tv.tv_sec);
  1.1704 +    log_uint32(fp, (uint32_t) tv.tv_usec);
  1.1705 +#endif
  1.1706 +#ifdef XP_WIN32
  1.1707 +    _ftime(&tb);
  1.1708 +    log_uint32(fp, (uint32_t) tb.time);
  1.1709 +    log_uint32(fp, (uint32_t) tb.millitm);
  1.1710 +#endif
  1.1711 +    log_string(fp, caption);
  1.1712 +
  1.1713 +    TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.1714 +}
  1.1715 +
  1.1716 +static void
  1.1717 +print_stack(FILE *ofp, callsite *site)
  1.1718 +{
  1.1719 +    while (site) {
  1.1720 +        if (site->name || site->parent) {
  1.1721 +            fprintf(ofp, "%s[%s +0x%X]\n",
  1.1722 +                    site->name, site->library, site->offset);
  1.1723 +        }
  1.1724 +        site = site->parent;
  1.1725 +    }
  1.1726 +}
  1.1727 +
  1.1728 +static const char *allocation_format =
  1.1729 +  (sizeof(void*) == 4) ? "\t0x%08zX\n" :
  1.1730 +  (sizeof(void*) == 8) ? "\t0x%016zX\n" :
  1.1731 +  "UNEXPECTED sizeof(void*)";
  1.1732 +
  1.1733 +static int
  1.1734 +allocation_enumerator(PLHashEntry *he, int i, void *arg)
  1.1735 +{
  1.1736 +    allocation *alloc = (allocation*) he;
  1.1737 +    FILE *ofp = (FILE*) arg;
  1.1738 +    callsite *site = (callsite*) he->value;
  1.1739 +
  1.1740 +    size_t *p, *end;
  1.1741 +
  1.1742 +    fprintf(ofp, "%p <%s> (%lu)\n",
  1.1743 +            he->key,
  1.1744 +            nsGetTypeName(he->key),
  1.1745 +            (unsigned long) alloc->size);
  1.1746 +
  1.1747 +    for (p   = (size_t*) he->key,
  1.1748 +         end = (size_t*) ((char*)he->key + alloc->size);
  1.1749 +         p < end; ++p) {
  1.1750 +        fprintf(ofp, allocation_format, *p);
  1.1751 +    }
  1.1752 +
  1.1753 +    print_stack(ofp, site);
  1.1754 +    fputc('\n', ofp);
  1.1755 +    return HT_ENUMERATE_NEXT;
  1.1756 +}
  1.1757 +
  1.1758 +PR_IMPLEMENT(void)
  1.1759 +NS_TraceStack(int skip, FILE *ofp)
  1.1760 +{
  1.1761 +    callsite *site;
  1.1762 +    tm_thread *t = tm_get_thread();
  1.1763 +    int immediate_abort;
  1.1764 +
  1.1765 +    site = backtrace(t, skip + 1, &immediate_abort);
  1.1766 +    while (site) {
  1.1767 +        if (site->name || site->parent) {
  1.1768 +            fprintf(ofp, "%s[%s +0x%X]\n",
  1.1769 +                    site->name, site->library, site->offset);
  1.1770 +        }
  1.1771 +        site = site->parent;
  1.1772 +    }
  1.1773 +}
  1.1774 +
  1.1775 +PR_IMPLEMENT(int)
  1.1776 +NS_TraceMallocDumpAllocations(const char *pathname)
  1.1777 +{
  1.1778 +    FILE *ofp;
  1.1779 +    int rv;
  1.1780 +    int fd;
  1.1781 +
  1.1782 +    tm_thread *t = tm_get_thread();
  1.1783 +
  1.1784 +    TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
  1.1785 +
  1.1786 +    ofp = fopen(pathname, WRITE_FLAGS);
  1.1787 +    if (ofp) {
  1.1788 +        MozillaRegisterDebugFD(fileno(ofp));
  1.1789 +        if (allocations) {
  1.1790 +            PL_HashTableEnumerateEntries(allocations, allocation_enumerator,
  1.1791 +                                         ofp);
  1.1792 +        }
  1.1793 +        rv = ferror(ofp) ? -1 : 0;
  1.1794 +        MozillaUnRegisterDebugFILE(ofp);
  1.1795 +        fclose(ofp);
  1.1796 +    } else {
  1.1797 +        rv = -1;
  1.1798 +    }
  1.1799 +
  1.1800 +    TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.1801 +
  1.1802 +    return rv;
  1.1803 +}
  1.1804 +
  1.1805 +PR_IMPLEMENT(void)
  1.1806 +NS_TraceMallocFlushLogfiles(void)
  1.1807 +{
  1.1808 +    logfile *fp;
  1.1809 +    tm_thread *t = tm_get_thread();
  1.1810 +
  1.1811 +    TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
  1.1812 +
  1.1813 +    for (fp = logfile_list; fp; fp = fp->next)
  1.1814 +        flush_logfile(fp);
  1.1815 +
  1.1816 +    TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.1817 +}
  1.1818 +
  1.1819 +PR_IMPLEMENT(void)
  1.1820 +NS_TrackAllocation(void* ptr, FILE *ofp)
  1.1821 +{
  1.1822 +    allocation *alloc;
  1.1823 +    tm_thread *t = tm_get_thread();
  1.1824 +
  1.1825 +    fprintf(ofp, "Trying to track %p\n", (void*) ptr);
  1.1826 +    setlinebuf(ofp);
  1.1827 +
  1.1828 +    TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
  1.1829 +    if (get_allocations()) {
  1.1830 +        alloc = (allocation*)
  1.1831 +                *PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr);
  1.1832 +        if (alloc) {
  1.1833 +            fprintf(ofp, "Tracking %p\n", (void*) ptr);
  1.1834 +            alloc->trackfp = ofp;
  1.1835 +        } else {
  1.1836 +            fprintf(ofp, "Not tracking %p\n", (void*) ptr);
  1.1837 +        }
  1.1838 +    }
  1.1839 +    TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.1840 +}
  1.1841 +
  1.1842 +PR_IMPLEMENT(void)
  1.1843 +MallocCallback(void *ptr, size_t size, uint32_t start, uint32_t end, tm_thread *t)
  1.1844 +{
  1.1845 +    callsite *site;
  1.1846 +    PLHashEntry *he;
  1.1847 +    allocation *alloc;
  1.1848 +    int immediate_abort;
  1.1849 +
  1.1850 +    if (!tracing_enabled || t->suppress_tracing != 0)
  1.1851 +        return;
  1.1852 +
  1.1853 +    site = backtrace(t, 2, &immediate_abort);
  1.1854 +    if (immediate_abort)
  1.1855 +        return;
  1.1856 +
  1.1857 +    TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
  1.1858 +    tmstats.malloc_calls++;
  1.1859 +    if (!ptr) {
  1.1860 +        tmstats.malloc_failures++;
  1.1861 +    } else {
  1.1862 +        if (site) {
  1.1863 +            log_event5(logfp, TM_EVENT_MALLOC,
  1.1864 +                       site->serial, start, end - start,
  1.1865 +                       (uint32_t)NS_PTR_TO_INT32(ptr), size);
  1.1866 +        }
  1.1867 +        if (get_allocations()) {
  1.1868 +            he = PL_HashTableAdd(allocations, ptr, site);
  1.1869 +            if (he) {
  1.1870 +                alloc = (allocation*) he;
  1.1871 +                alloc->size = size;
  1.1872 +                alloc->trackfp = NULL;
  1.1873 +            }
  1.1874 +        }
  1.1875 +    }
  1.1876 +    TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.1877 +}
  1.1878 +
  1.1879 +PR_IMPLEMENT(void)
  1.1880 +CallocCallback(void *ptr, size_t count, size_t size, uint32_t start, uint32_t end, tm_thread *t)
  1.1881 +{
  1.1882 +    callsite *site;
  1.1883 +    PLHashEntry *he;
  1.1884 +    allocation *alloc;
  1.1885 +    int immediate_abort;
  1.1886 +
  1.1887 +    if (!tracing_enabled || t->suppress_tracing != 0)
  1.1888 +        return;
  1.1889 +
  1.1890 +    site = backtrace(t, 2, &immediate_abort);
  1.1891 +    if (immediate_abort)
  1.1892 +        return;
  1.1893 +
  1.1894 +    TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
  1.1895 +    tmstats.calloc_calls++;
  1.1896 +    if (!ptr) {
  1.1897 +        tmstats.calloc_failures++;
  1.1898 +    } else {
  1.1899 +        size *= count;
  1.1900 +        if (site) {
  1.1901 +            log_event5(logfp, TM_EVENT_CALLOC,
  1.1902 +                       site->serial, start, end - start,
  1.1903 +                       (uint32_t)NS_PTR_TO_INT32(ptr), size);
  1.1904 +        }
  1.1905 +        if (get_allocations()) {
  1.1906 +            he = PL_HashTableAdd(allocations, ptr, site);
  1.1907 +            if (he) {
  1.1908 +                alloc = (allocation*) he;
  1.1909 +                alloc->size = size;
  1.1910 +                alloc->trackfp = NULL;
  1.1911 +            }
  1.1912 +        }
  1.1913 +    }
  1.1914 +    TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.1915 +}
  1.1916 +
  1.1917 +PR_IMPLEMENT(void)
  1.1918 +ReallocCallback(void * oldptr, void *ptr, size_t size,
  1.1919 +                uint32_t start, uint32_t end, tm_thread *t)
  1.1920 +{
  1.1921 +    callsite *oldsite, *site;
  1.1922 +    size_t oldsize;
  1.1923 +    PLHashNumber hash;
  1.1924 +    PLHashEntry **hep, *he;
  1.1925 +    allocation *alloc;
  1.1926 +    FILE *trackfp = NULL;
  1.1927 +    int immediate_abort;
  1.1928 +
  1.1929 +    if (!tracing_enabled || t->suppress_tracing != 0)
  1.1930 +        return;
  1.1931 +
  1.1932 +    site = backtrace(t, 2, &immediate_abort);
  1.1933 +    if (immediate_abort)
  1.1934 +        return;
  1.1935 +
  1.1936 +    TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
  1.1937 +    tmstats.realloc_calls++;
  1.1938 +    oldsite = NULL;
  1.1939 +    oldsize = 0;
  1.1940 +    hep = NULL;
  1.1941 +    he = NULL;
  1.1942 +    if (oldptr && get_allocations()) {
  1.1943 +        hash = hash_pointer(oldptr);
  1.1944 +        hep = PL_HashTableRawLookup(allocations, hash, oldptr);
  1.1945 +        he = *hep;
  1.1946 +        if (he) {
  1.1947 +            oldsite = (callsite*) he->value;
  1.1948 +            alloc = (allocation*) he;
  1.1949 +            oldsize = alloc->size;
  1.1950 +            trackfp = alloc->trackfp;
  1.1951 +            if (trackfp) {
  1.1952 +                fprintf(alloc->trackfp,
  1.1953 +                        "\nrealloc(%p, %lu), oldsize %lu, alloc site %p\n",
  1.1954 +                        (void*) ptr, (unsigned long) size,
  1.1955 +                        (unsigned long) oldsize, (void*) oldsite);
  1.1956 +                NS_TraceStack(1, trackfp);
  1.1957 +            }
  1.1958 +        }
  1.1959 +    }
  1.1960 +    if (!ptr && size) {
  1.1961 +        /*
  1.1962 +         * When realloc() fails, the original block is not freed or moved, so
  1.1963 +         * we'll leave the allocation entry untouched.
  1.1964 +         */
  1.1965 +        tmstats.realloc_failures++;
  1.1966 +    } else {
  1.1967 +        if (site) {
  1.1968 +            log_event8(logfp, TM_EVENT_REALLOC,
  1.1969 +                       site->serial, start, end - start,
  1.1970 +                       (uint32_t)NS_PTR_TO_INT32(ptr), size,
  1.1971 +                       oldsite ? oldsite->serial : 0,
  1.1972 +                       (uint32_t)NS_PTR_TO_INT32(oldptr), oldsize);
  1.1973 +        }
  1.1974 +        if (ptr && allocations) {
  1.1975 +            if (ptr != oldptr) {
  1.1976 +                /*
  1.1977 +                 * If we're reallocating (not allocating new space by passing
  1.1978 +                 * null to realloc) and realloc moved the block, free oldptr.
  1.1979 +                 */
  1.1980 +                if (he)
  1.1981 +                    PL_HashTableRawRemove(allocations, hep, he);
  1.1982 +
  1.1983 +                /* Record the new allocation now, setting he. */
  1.1984 +                he = PL_HashTableAdd(allocations, ptr, site);
  1.1985 +            } else {
  1.1986 +                /*
  1.1987 +                 * If we haven't yet recorded an allocation (possibly due to a
  1.1988 +                 * temporary memory shortage), do it now.
  1.1989 +                 */
  1.1990 +                if (!he)
  1.1991 +                    he = PL_HashTableAdd(allocations, ptr, site);
  1.1992 +            }
  1.1993 +            if (he) {
  1.1994 +                alloc = (allocation*) he;
  1.1995 +                alloc->size = size;
  1.1996 +                alloc->trackfp = trackfp;
  1.1997 +            }
  1.1998 +        }
  1.1999 +    }
  1.2000 +    TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.2001 +}
  1.2002 +
  1.2003 +PR_IMPLEMENT(void)
  1.2004 +FreeCallback(void * ptr, uint32_t start, uint32_t end, tm_thread *t)
  1.2005 +{
  1.2006 +    PLHashEntry **hep, *he;
  1.2007 +    callsite *site;
  1.2008 +    allocation *alloc;
  1.2009 +
  1.2010 +    if (!tracing_enabled || t->suppress_tracing != 0)
  1.2011 +        return;
  1.2012 +
  1.2013 +    /*
  1.2014 +     * FIXME: Perhaps we should call backtrace() so we can check for
  1.2015 +     * immediate_abort. However, the only current contexts where
  1.2016 +     * immediate_abort will be true do not call free(), so for now,
  1.2017 +     * let's avoid the cost of backtrace().  See bug 478195.
  1.2018 +     */
  1.2019 +    TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t);
  1.2020 +    tmstats.free_calls++;
  1.2021 +    if (!ptr) {
  1.2022 +        tmstats.null_free_calls++;
  1.2023 +    } else {
  1.2024 +        if (get_allocations()) {
  1.2025 +            hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr);
  1.2026 +            he = *hep;
  1.2027 +            if (he) {
  1.2028 +                site = (callsite*) he->value;
  1.2029 +                if (site) {
  1.2030 +                    alloc = (allocation*) he;
  1.2031 +                    if (alloc->trackfp) {
  1.2032 +                        fprintf(alloc->trackfp, "\nfree(%p), alloc site %p\n",
  1.2033 +                                (void*) ptr, (void*) site);
  1.2034 +                        NS_TraceStack(1, alloc->trackfp);
  1.2035 +                    }
  1.2036 +                    log_event5(logfp, TM_EVENT_FREE,
  1.2037 +                               site->serial, start, end - start,
  1.2038 +                               (uint32_t)NS_PTR_TO_INT32(ptr), alloc->size);
  1.2039 +                }
  1.2040 +                PL_HashTableRawRemove(allocations, hep, he);
  1.2041 +            }
  1.2042 +        }
  1.2043 +    }
  1.2044 +    TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t);
  1.2045 +}
  1.2046 +
  1.2047 +PR_IMPLEMENT(nsTMStackTraceID)
  1.2048 +NS_TraceMallocGetStackTrace(void)
  1.2049 +{
  1.2050 +    callsite *site;
  1.2051 +    int dummy;
  1.2052 +    tm_thread *t = tm_get_thread();
  1.2053 +
  1.2054 +    PR_ASSERT(t->suppress_tracing == 0);
  1.2055 +
  1.2056 +    site = backtrace(t, 2, &dummy);
  1.2057 +    return (nsTMStackTraceID) site;
  1.2058 +}
  1.2059 +
  1.2060 +PR_IMPLEMENT(void)
  1.2061 +NS_TraceMallocPrintStackTrace(FILE *ofp, nsTMStackTraceID id)
  1.2062 +{
  1.2063 +    print_stack(ofp, (callsite *)id);
  1.2064 +}
  1.2065 +
  1.2066 +#endif /* NS_TRACE_MALLOC */

mercurial