michael@0: /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim:cindent:ts=8:et:sw=4: michael@0: * michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: #ifdef NS_TRACE_MALLOC michael@0: /* michael@0: * TODO: michael@0: * - FIXME https://bugzilla.mozilla.org/show_bug.cgi?id=392008 michael@0: * - extend logfile so 'F' record tells free stack michael@0: */ michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #ifdef XP_UNIX michael@0: #include michael@0: #include michael@0: #include michael@0: #endif michael@0: #include "plhash.h" michael@0: #include "pratom.h" michael@0: #include "prlog.h" michael@0: #include "prlock.h" michael@0: #include "prmon.h" michael@0: #include "prprf.h" michael@0: #include "prenv.h" michael@0: #include "prnetdb.h" michael@0: #include "nsTraceMalloc.h" michael@0: #include "nscore.h" michael@0: #include "prinit.h" michael@0: #include "prthread.h" michael@0: #include "plstr.h" michael@0: #include "nsStackWalk.h" michael@0: #include "nsTraceMallocCallbacks.h" michael@0: #include "nsTypeInfo.h" michael@0: #include "mozilla/PoisonIOInterposer.h" michael@0: michael@0: #if defined(XP_MACOSX) michael@0: michael@0: #include michael@0: michael@0: #define WRITE_FLAGS "w" michael@0: michael@0: #define __libc_malloc(x) malloc(x) michael@0: #define __libc_realloc(x, y) realloc(x, y) michael@0: #define __libc_free(x) free(x) michael@0: michael@0: #elif defined(XP_UNIX) michael@0: michael@0: #include michael@0: michael@0: #define WRITE_FLAGS "w" michael@0: michael@0: #ifdef WRAP_SYSTEM_INCLUDES michael@0: #pragma GCC visibility push(default) michael@0: #endif michael@0: extern __ptr_t __libc_malloc(size_t); michael@0: extern __ptr_t __libc_calloc(size_t, size_t); michael@0: extern __ptr_t __libc_realloc(__ptr_t, size_t); michael@0: extern void __libc_free(__ptr_t); michael@0: extern __ptr_t __libc_memalign(size_t, size_t); michael@0: extern __ptr_t __libc_valloc(size_t); michael@0: #ifdef WRAP_SYSTEM_INCLUDES michael@0: #pragma GCC visibility pop michael@0: #endif michael@0: michael@0: #elif defined(XP_WIN32) michael@0: michael@0: #include /* for timeb */ michael@0: #include /* for fstat */ michael@0: michael@0: #include /*for write*/ michael@0: michael@0: #define WRITE_FLAGS "w" michael@0: michael@0: #define __libc_malloc(x) dhw_orig_malloc(x) michael@0: #define __libc_realloc(x, y) dhw_orig_realloc(x,y) michael@0: #define __libc_free(x) dhw_orig_free(x) michael@0: michael@0: #else /* not XP_MACOSX, XP_UNIX, or XP_WIN32 */ michael@0: michael@0: # error "Unknown build configuration!" michael@0: michael@0: #endif michael@0: michael@0: typedef struct logfile logfile; michael@0: michael@0: #define STARTUP_TMBUFSIZE (64 * 1024) michael@0: #define LOGFILE_TMBUFSIZE (16 * 1024) michael@0: michael@0: struct logfile { michael@0: int fd; michael@0: int lfd; /* logical fd, dense among all logfiles */ michael@0: char *buf; michael@0: int bufsize; michael@0: int pos; michael@0: uint32_t size; michael@0: uint32_t simsize; michael@0: logfile *next; michael@0: logfile **prevp; michael@0: }; michael@0: michael@0: static char default_buf[STARTUP_TMBUFSIZE]; michael@0: static logfile default_logfile = michael@0: {-1, 0, default_buf, STARTUP_TMBUFSIZE, 0, 0, 0, NULL, NULL}; michael@0: static logfile *logfile_list = NULL; michael@0: static logfile **logfile_tail = &logfile_list; michael@0: static logfile *logfp = &default_logfile; michael@0: static PRLock *tmlock = NULL; michael@0: #ifndef PATH_MAX michael@0: #define PATH_MAX 4096 michael@0: #endif michael@0: static char sdlogname[PATH_MAX] = ""; /* filename for shutdown leak log */ michael@0: michael@0: /* michael@0: * This enables/disables trace-malloc logging. michael@0: * michael@0: * It is separate from suppress_tracing so that we do not have to pay michael@0: * the performance cost of repeated TM_TLS_GET_DATA calls when michael@0: * trace-malloc is disabled (which is not as bad as the locking we used michael@0: * to have). michael@0: * michael@0: * It must default to zero, since it can be tested by the Linux malloc michael@0: * hooks before NS_TraceMallocStartup sets it. michael@0: */ michael@0: static uint32_t tracing_enabled = 0; michael@0: michael@0: /* michael@0: * Control whether we should log stacks michael@0: */ michael@0: static uint32_t stacks_enabled = 1; michael@0: michael@0: /* michael@0: * This lock must be held while manipulating the calltree, the michael@0: * allocations table, the log, or the tmstats. michael@0: * michael@0: * Callers should not *enter* the lock without checking suppress_tracing michael@0: * first; otherwise they risk trying to re-enter on the same thread. michael@0: */ michael@0: #define TM_ENTER_LOCK(t) \ michael@0: PR_BEGIN_MACRO \ michael@0: PR_ASSERT(t->suppress_tracing != 0); \ michael@0: if (tmlock) \ michael@0: PR_Lock(tmlock); \ michael@0: PR_END_MACRO michael@0: michael@0: #define TM_EXIT_LOCK(t) \ michael@0: PR_BEGIN_MACRO \ michael@0: PR_ASSERT(t->suppress_tracing != 0); \ michael@0: if (tmlock) \ michael@0: PR_Unlock(tmlock); \ michael@0: PR_END_MACRO michael@0: michael@0: #define TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t) \ michael@0: PR_BEGIN_MACRO \ michael@0: t->suppress_tracing++; \ michael@0: TM_ENTER_LOCK(t); \ michael@0: PR_END_MACRO michael@0: michael@0: #define TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t) \ michael@0: PR_BEGIN_MACRO \ michael@0: TM_EXIT_LOCK(t); \ michael@0: t->suppress_tracing--; \ michael@0: PR_END_MACRO michael@0: michael@0: michael@0: /* michael@0: * Thread-local storage. michael@0: * michael@0: * We can't use NSPR thread-local storage for this because it mallocs michael@0: * within PR_GetThreadPrivate (the first time) and PR_SetThreadPrivate michael@0: * (which can be worked around by protecting all uses of those functions michael@0: * with a monitor, ugh) and because it calls malloc/free when the michael@0: * thread-local storage is in an inconsistent state within michael@0: * PR_SetThreadPrivate (when expanding the thread-local storage array) michael@0: * and _PRI_DetachThread (when and after deleting the thread-local michael@0: * storage array). michael@0: */ michael@0: michael@0: #ifdef XP_WIN32 michael@0: michael@0: #include michael@0: michael@0: #define TM_TLS_INDEX_TYPE DWORD michael@0: #define TM_CREATE_TLS_INDEX(i_) PR_BEGIN_MACRO \ michael@0: (i_) = TlsAlloc(); \ michael@0: PR_END_MACRO michael@0: #define TM_DESTROY_TLS_INDEX(i_) TlsFree((i_)) michael@0: #define TM_GET_TLS_DATA(i_) TlsGetValue((i_)) michael@0: #define TM_SET_TLS_DATA(i_, v_) TlsSetValue((i_), (v_)) michael@0: michael@0: #else michael@0: michael@0: #include michael@0: michael@0: #define TM_TLS_INDEX_TYPE pthread_key_t michael@0: #define TM_CREATE_TLS_INDEX(i_) pthread_key_create(&(i_), NULL) michael@0: #define TM_DESTROY_TLS_INDEX(i_) pthread_key_delete((i_)) michael@0: #define TM_GET_TLS_DATA(i_) pthread_getspecific((i_)) michael@0: #define TM_SET_TLS_DATA(i_, v_) pthread_setspecific((i_), (v_)) michael@0: michael@0: #endif michael@0: michael@0: static TM_TLS_INDEX_TYPE tls_index; michael@0: static PRBool tls_index_initialized = PR_FALSE; michael@0: michael@0: /* FIXME (maybe): This is currently unused; we leak the thread-local data. */ michael@0: #if 0 michael@0: static void michael@0: free_tm_thread(void *priv) michael@0: { michael@0: tm_thread *t = (tm_thread*) priv; michael@0: michael@0: PR_ASSERT(t->suppress_tracing == 0); michael@0: michael@0: if (t->in_heap) { michael@0: t->suppress_tracing = 1; michael@0: if (t->backtrace_buf.buffer) michael@0: __libc_free(t->backtrace_buf.buffer); michael@0: michael@0: __libc_free(t); michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: tm_thread * michael@0: tm_get_thread(void) michael@0: { michael@0: tm_thread *t; michael@0: tm_thread stack_tm_thread; michael@0: michael@0: if (!tls_index_initialized) { michael@0: /** michael@0: * Assume that the first call to |malloc| will occur before michael@0: * there are multiple threads. (If that's not the case, we michael@0: * probably need to do the necessary synchronization without michael@0: * using NSPR primitives. See discussion in michael@0: * https://bugzilla.mozilla.org/show_bug.cgi?id=442192 michael@0: */ michael@0: TM_CREATE_TLS_INDEX(tls_index); michael@0: tls_index_initialized = PR_TRUE; michael@0: } michael@0: michael@0: t = TM_GET_TLS_DATA(tls_index); michael@0: michael@0: if (!t) { michael@0: /* michael@0: * First, store a tm_thread on the stack to suppress for the michael@0: * malloc below michael@0: */ michael@0: stack_tm_thread.suppress_tracing = 1; michael@0: stack_tm_thread.backtrace_buf.buffer = NULL; michael@0: stack_tm_thread.backtrace_buf.size = 0; michael@0: stack_tm_thread.backtrace_buf.entries = 0; michael@0: TM_SET_TLS_DATA(tls_index, &stack_tm_thread); michael@0: michael@0: t = (tm_thread*) __libc_malloc(sizeof(tm_thread)); michael@0: t->suppress_tracing = 0; michael@0: t->backtrace_buf = stack_tm_thread.backtrace_buf; michael@0: TM_SET_TLS_DATA(tls_index, t); michael@0: michael@0: PR_ASSERT(stack_tm_thread.suppress_tracing == 1); /* balanced */ michael@0: } michael@0: michael@0: return t; michael@0: } michael@0: michael@0: /* We don't want more than 32 logfiles open at once, ok? */ michael@0: typedef uint32_t lfd_set; michael@0: michael@0: #define LFD_SET_STATIC_INITIALIZER 0 michael@0: #define LFD_SET_SIZE 32 michael@0: michael@0: #define LFD_ZERO(s) (*(s) = 0) michael@0: #define LFD_BIT(i) ((uint32_t)1 << (i)) michael@0: #define LFD_TEST(i,s) (LFD_BIT(i) & *(s)) michael@0: #define LFD_SET(i,s) (*(s) |= LFD_BIT(i)) michael@0: #define LFD_CLR(i,s) (*(s) &= ~LFD_BIT(i)) michael@0: michael@0: static logfile *get_logfile(int fd) michael@0: { michael@0: logfile *fp; michael@0: int lfd; michael@0: michael@0: for (fp = logfile_list; fp; fp = fp->next) { michael@0: if (fp->fd == fd) michael@0: return fp; michael@0: } michael@0: lfd = 0; michael@0: retry: michael@0: for (fp = logfile_list; fp; fp = fp->next) { michael@0: if (fp->fd == lfd) { michael@0: if (++lfd >= LFD_SET_SIZE) michael@0: return NULL; michael@0: goto retry; michael@0: } michael@0: } michael@0: fp = __libc_malloc(sizeof(logfile) + LOGFILE_TMBUFSIZE); michael@0: if (!fp) michael@0: return NULL; michael@0: fp->fd = fd; michael@0: fp->lfd = lfd; michael@0: fp->buf = (char*) (fp + 1); michael@0: fp->bufsize = LOGFILE_TMBUFSIZE; michael@0: fp->pos = 0; michael@0: fp->size = fp->simsize = 0; michael@0: fp->next = NULL; michael@0: fp->prevp = logfile_tail; michael@0: *logfile_tail = fp; michael@0: logfile_tail = &fp->next; michael@0: return fp; michael@0: } michael@0: michael@0: static void flush_logfile(logfile *fp) michael@0: { michael@0: int len, cnt, fd; michael@0: char *bp; michael@0: michael@0: len = fp->pos; michael@0: if (len == 0) michael@0: return; michael@0: fp->pos = 0; michael@0: fd = fp->fd; michael@0: if (fd >= 0) { michael@0: fp->size += len; michael@0: bp = fp->buf; michael@0: do { michael@0: cnt = write(fd, bp, len); michael@0: if (cnt <= 0) { michael@0: printf("### nsTraceMalloc: write failed or wrote 0 bytes!\n"); michael@0: return; michael@0: } michael@0: bp += cnt; michael@0: len -= cnt; michael@0: } while (len > 0); michael@0: } michael@0: fp->simsize += len; michael@0: } michael@0: michael@0: static void log_byte(logfile *fp, char byte) michael@0: { michael@0: if (fp->pos == fp->bufsize) michael@0: flush_logfile(fp); michael@0: fp->buf[fp->pos++] = byte; michael@0: } michael@0: michael@0: static void log_string(logfile *fp, const char *str) michael@0: { michael@0: int len, rem, cnt; michael@0: michael@0: len = strlen(str) + 1; /* include null terminator */ michael@0: while ((rem = fp->pos + len - fp->bufsize) > 0) { michael@0: cnt = len - rem; michael@0: memcpy(&fp->buf[fp->pos], str, cnt); michael@0: str += cnt; michael@0: fp->pos += cnt; michael@0: flush_logfile(fp); michael@0: len = rem; michael@0: } michael@0: memcpy(&fp->buf[fp->pos], str, len); michael@0: fp->pos += len; michael@0: } michael@0: michael@0: static void log_filename(logfile* fp, const char* filename) michael@0: { michael@0: if (strlen(filename) < 512) { michael@0: char *bp, *cp, buf[512]; michael@0: michael@0: bp = strstr(strcpy(buf, filename), "mozilla"); michael@0: if (!bp) michael@0: bp = buf; michael@0: michael@0: for (cp = bp; *cp; cp++) { michael@0: if (*cp == '\\') michael@0: *cp = '/'; michael@0: } michael@0: michael@0: filename = bp; michael@0: } michael@0: log_string(fp, filename); michael@0: } michael@0: michael@0: static void log_uint32(logfile *fp, uint32_t ival) michael@0: { michael@0: if (ival < 0x80) { michael@0: /* 0xxx xxxx */ michael@0: log_byte(fp, (char) ival); michael@0: } else if (ival < 0x4000) { michael@0: /* 10xx xxxx xxxx xxxx */ michael@0: log_byte(fp, (char) ((ival >> 8) | 0x80)); michael@0: log_byte(fp, (char) (ival & 0xff)); michael@0: } else if (ival < 0x200000) { michael@0: /* 110x xxxx xxxx xxxx xxxx xxxx */ michael@0: log_byte(fp, (char) ((ival >> 16) | 0xc0)); michael@0: log_byte(fp, (char) ((ival >> 8) & 0xff)); michael@0: log_byte(fp, (char) (ival & 0xff)); michael@0: } else if (ival < 0x10000000) { michael@0: /* 1110 xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ michael@0: log_byte(fp, (char) ((ival >> 24) | 0xe0)); michael@0: log_byte(fp, (char) ((ival >> 16) & 0xff)); michael@0: log_byte(fp, (char) ((ival >> 8) & 0xff)); michael@0: log_byte(fp, (char) (ival & 0xff)); michael@0: } else { michael@0: /* 1111 0000 xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ michael@0: log_byte(fp, (char) 0xf0); michael@0: log_byte(fp, (char) ((ival >> 24) & 0xff)); michael@0: log_byte(fp, (char) ((ival >> 16) & 0xff)); michael@0: log_byte(fp, (char) ((ival >> 8) & 0xff)); michael@0: log_byte(fp, (char) (ival & 0xff)); michael@0: } michael@0: } michael@0: michael@0: static void log_event1(logfile *fp, char event, uint32_t serial) michael@0: { michael@0: log_byte(fp, event); michael@0: log_uint32(fp, (uint32_t) serial); michael@0: } michael@0: michael@0: static void log_event2(logfile *fp, char event, uint32_t serial, size_t size) michael@0: { michael@0: log_event1(fp, event, serial); michael@0: log_uint32(fp, (uint32_t) size); michael@0: } michael@0: michael@0: static void log_event3(logfile *fp, char event, uint32_t serial, size_t oldsize, michael@0: size_t size) michael@0: { michael@0: log_event2(fp, event, serial, oldsize); michael@0: log_uint32(fp, (uint32_t) size); michael@0: } michael@0: michael@0: static void log_event4(logfile *fp, char event, uint32_t serial, uint32_t ui2, michael@0: uint32_t ui3, uint32_t ui4) michael@0: { michael@0: log_event3(fp, event, serial, ui2, ui3); michael@0: log_uint32(fp, ui4); michael@0: } michael@0: michael@0: static void log_event5(logfile *fp, char event, uint32_t serial, uint32_t ui2, michael@0: uint32_t ui3, uint32_t ui4, uint32_t ui5) michael@0: { michael@0: log_event4(fp, event, serial, ui2, ui3, ui4); michael@0: log_uint32(fp, ui5); michael@0: } michael@0: michael@0: static void log_event6(logfile *fp, char event, uint32_t serial, uint32_t ui2, michael@0: uint32_t ui3, uint32_t ui4, uint32_t ui5, uint32_t ui6) michael@0: { michael@0: log_event5(fp, event, serial, ui2, ui3, ui4, ui5); michael@0: log_uint32(fp, ui6); michael@0: } michael@0: michael@0: static void log_event7(logfile *fp, char event, uint32_t serial, uint32_t ui2, michael@0: uint32_t ui3, uint32_t ui4, uint32_t ui5, uint32_t ui6, michael@0: uint32_t ui7) michael@0: { michael@0: log_event6(fp, event, serial, ui2, ui3, ui4, ui5, ui6); michael@0: log_uint32(fp, ui7); michael@0: } michael@0: michael@0: static void log_event8(logfile *fp, char event, uint32_t serial, uint32_t ui2, michael@0: uint32_t ui3, uint32_t ui4, uint32_t ui5, uint32_t ui6, michael@0: uint32_t ui7, uint32_t ui8) michael@0: { michael@0: log_event7(fp, event, serial, ui2, ui3, ui4, ui5, ui6, ui7); michael@0: log_uint32(fp, ui8); michael@0: } michael@0: michael@0: typedef struct callsite callsite; michael@0: michael@0: struct callsite { michael@0: void* pc; michael@0: uint32_t serial; michael@0: lfd_set lfdset; michael@0: const char *name; /* pointer to string owned by methods table */ michael@0: const char *library; /* pointer to string owned by libraries table */ michael@0: int offset; michael@0: callsite *parent; michael@0: callsite *siblings; michael@0: callsite *kids; michael@0: }; michael@0: michael@0: /* NB: these counters are incremented and decremented only within tmlock. */ michael@0: static uint32_t library_serial_generator = 0; michael@0: static uint32_t method_serial_generator = 0; michael@0: static uint32_t callsite_serial_generator = 0; michael@0: static uint32_t tmstats_serial_generator = 0; michael@0: static uint32_t filename_serial_generator = 0; michael@0: michael@0: /* Root of the tree of callsites, the sum of all (cycle-compressed) stacks. */ michael@0: static callsite calltree_root = michael@0: {0, 0, LFD_SET_STATIC_INITIALIZER, NULL, NULL, 0, NULL, NULL, NULL}; michael@0: michael@0: /* a fake pc for when stacks are disabled; must be different from the michael@0: pc in calltree_root */ michael@0: #define STACK_DISABLED_PC ((void*)1) michael@0: michael@0: /* Basic instrumentation. */ michael@0: static nsTMStats tmstats = NS_TMSTATS_STATIC_INITIALIZER; michael@0: michael@0: /* Parent with the most kids (tmstats.calltree_maxkids). */ michael@0: static callsite *calltree_maxkids_parent; michael@0: michael@0: /* Calltree leaf for path with deepest stack backtrace. */ michael@0: static callsite *calltree_maxstack_top; michael@0: michael@0: /* Last site (i.e., calling pc) that recurred during a backtrace. */ michael@0: static callsite *last_callsite_recurrence; michael@0: michael@0: static void log_tmstats(logfile *fp) michael@0: { michael@0: log_event1(fp, TM_EVENT_STATS, ++tmstats_serial_generator); michael@0: log_uint32(fp, tmstats.calltree_maxstack); michael@0: log_uint32(fp, tmstats.calltree_maxdepth); michael@0: log_uint32(fp, tmstats.calltree_parents); michael@0: log_uint32(fp, tmstats.calltree_maxkids); michael@0: log_uint32(fp, tmstats.calltree_kidhits); michael@0: log_uint32(fp, tmstats.calltree_kidmisses); michael@0: log_uint32(fp, tmstats.calltree_kidsteps); michael@0: log_uint32(fp, tmstats.callsite_recurrences); michael@0: log_uint32(fp, tmstats.backtrace_calls); michael@0: log_uint32(fp, tmstats.backtrace_failures); michael@0: log_uint32(fp, tmstats.btmalloc_failures); michael@0: log_uint32(fp, tmstats.dladdr_failures); michael@0: log_uint32(fp, tmstats.malloc_calls); michael@0: log_uint32(fp, tmstats.malloc_failures); michael@0: log_uint32(fp, tmstats.calloc_calls); michael@0: log_uint32(fp, tmstats.calloc_failures); michael@0: log_uint32(fp, tmstats.realloc_calls); michael@0: log_uint32(fp, tmstats.realloc_failures); michael@0: log_uint32(fp, tmstats.free_calls); michael@0: log_uint32(fp, tmstats.null_free_calls); michael@0: log_uint32(fp, calltree_maxkids_parent ? calltree_maxkids_parent->serial michael@0: : 0); michael@0: log_uint32(fp, calltree_maxstack_top ? calltree_maxstack_top->serial : 0); michael@0: } michael@0: michael@0: static void *generic_alloctable(void *pool, size_t size) michael@0: { michael@0: return __libc_malloc(size); michael@0: } michael@0: michael@0: static void generic_freetable(void *pool, void *item) michael@0: { michael@0: __libc_free(item); michael@0: } michael@0: michael@0: typedef struct lfdset_entry { michael@0: PLHashEntry base; michael@0: lfd_set lfdset; michael@0: } lfdset_entry; michael@0: michael@0: static PLHashEntry *lfdset_allocentry(void *pool, const void *key) michael@0: { michael@0: lfdset_entry *le = __libc_malloc(sizeof *le); michael@0: if (le) michael@0: LFD_ZERO(&le->lfdset); michael@0: return &le->base; michael@0: } michael@0: michael@0: static void lfdset_freeentry(void *pool, PLHashEntry *he, unsigned flag) michael@0: { michael@0: lfdset_entry *le; michael@0: michael@0: if (flag != HT_FREE_ENTRY) michael@0: return; michael@0: le = (lfdset_entry*) he; michael@0: __libc_free((void*) le); michael@0: } michael@0: michael@0: static PLHashAllocOps lfdset_hashallocops = { michael@0: generic_alloctable, generic_freetable, michael@0: lfdset_allocentry, lfdset_freeentry michael@0: }; michael@0: michael@0: /* Table of library pathnames mapped to to logged 'L' record serial numbers. */ michael@0: static PLHashTable *libraries = NULL; michael@0: michael@0: /* Table of filename pathnames mapped to logged 'G' record serial numbers. */ michael@0: static PLHashTable *filenames = NULL; michael@0: michael@0: /* Table mapping method names to logged 'N' record serial numbers. */ michael@0: static PLHashTable *methods = NULL; michael@0: michael@0: /* michael@0: * Presumes that its caller is holding tmlock, but may temporarily exit michael@0: * the lock. michael@0: */ michael@0: static callsite * michael@0: calltree(void **stack, size_t num_stack_entries, tm_thread *t) michael@0: { michael@0: logfile *fp = logfp; michael@0: void *pc; michael@0: uint32_t nkids; michael@0: callsite *parent, *site, **csp, *tmp; michael@0: int maxstack; michael@0: uint32_t library_serial, method_serial, filename_serial; michael@0: const char *library, *method, *filename; michael@0: char *slash; michael@0: PLHashNumber hash; michael@0: PLHashEntry **hep, *he; michael@0: lfdset_entry *le; michael@0: size_t stack_index; michael@0: nsCodeAddressDetails details; michael@0: nsresult rv; michael@0: michael@0: maxstack = (num_stack_entries > tmstats.calltree_maxstack); michael@0: if (maxstack) { michael@0: /* these two are the same, although that used to be less clear */ michael@0: tmstats.calltree_maxstack = num_stack_entries; michael@0: tmstats.calltree_maxdepth = num_stack_entries; michael@0: } michael@0: michael@0: /* Reverse the stack again, finding and building a path in the tree. */ michael@0: parent = &calltree_root; michael@0: stack_index = num_stack_entries; michael@0: do { michael@0: --stack_index; michael@0: pc = stack[stack_index]; michael@0: michael@0: csp = &parent->kids; michael@0: while ((site = *csp) != NULL) { michael@0: if (site->pc == pc) { michael@0: tmstats.calltree_kidhits++; michael@0: michael@0: /* Put the most recently used site at the front of siblings. */ michael@0: *csp = site->siblings; michael@0: site->siblings = parent->kids; michael@0: parent->kids = site; michael@0: michael@0: /* Check whether we've logged for this site and logfile yet. */ michael@0: if (!LFD_TEST(fp->lfd, &site->lfdset)) { michael@0: /* michael@0: * Some other logfile put this site in the calltree. We michael@0: * must log an event for site, and possibly first for its michael@0: * method and/or library. Note the code after the while michael@0: * loop that tests if (!site). michael@0: */ michael@0: break; michael@0: } michael@0: michael@0: /* Site already built and logged to fp -- go up the stack. */ michael@0: goto upward; michael@0: } michael@0: tmstats.calltree_kidsteps++; michael@0: csp = &site->siblings; michael@0: } michael@0: michael@0: if (!site) { michael@0: tmstats.calltree_kidmisses++; michael@0: michael@0: /* Check for recursion: see if pc is on our ancestor line. */ michael@0: for (site = parent; site; site = site->parent) { michael@0: if (site->pc == pc) { michael@0: tmstats.callsite_recurrences++; michael@0: last_callsite_recurrence = site; michael@0: goto upward; michael@0: } michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * Not in tree at all, or not logged to fp: let's find our symbolic michael@0: * callsite info. michael@0: */ michael@0: michael@0: if (!stacks_enabled) { michael@0: /* michael@0: * Fake the necessary information for our single fake stack michael@0: * frame. michael@0: */ michael@0: PL_strncpyz(details.library, "stacks_disabled", michael@0: sizeof(details.library)); michael@0: details.loffset = 0; michael@0: details.filename[0] = '\0'; michael@0: details.lineno = 0; michael@0: details.function[0] = '\0'; michael@0: details.foffset = 0; michael@0: } else { michael@0: /* michael@0: * NS_DescribeCodeAddress can (on Linux) acquire a lock inside michael@0: * the shared library loader. Another thread might call malloc michael@0: * while holding that lock (when loading a shared library). So michael@0: * we have to exit tmlock around this call. For details, see michael@0: * https://bugzilla.mozilla.org/show_bug.cgi?id=363334#c3 michael@0: * michael@0: * We could be more efficient by building the nodes in the michael@0: * calltree, exiting the monitor once to describe all of them, michael@0: * and then filling in the descriptions for any that hadn't been michael@0: * described already. But this is easier for now. michael@0: */ michael@0: TM_EXIT_LOCK(t); michael@0: rv = NS_DescribeCodeAddress(pc, &details); michael@0: TM_ENTER_LOCK(t); michael@0: if (NS_FAILED(rv)) { michael@0: tmstats.dladdr_failures++; michael@0: goto fail; michael@0: } michael@0: } michael@0: michael@0: /* Check whether we need to emit a library trace record. */ michael@0: library_serial = 0; michael@0: library = NULL; michael@0: if (details.library[0]) { michael@0: if (!libraries) { michael@0: libraries = PL_NewHashTable(100, PL_HashString, michael@0: PL_CompareStrings, PL_CompareValues, michael@0: &lfdset_hashallocops, NULL); michael@0: if (!libraries) { michael@0: tmstats.btmalloc_failures++; michael@0: goto fail; michael@0: } michael@0: } michael@0: hash = PL_HashString(details.library); michael@0: hep = PL_HashTableRawLookup(libraries, hash, details.library); michael@0: he = *hep; michael@0: if (he) { michael@0: library = (char*) he->key; michael@0: library_serial = (uint32_t) NS_PTR_TO_INT32(he->value); michael@0: le = (lfdset_entry *) he; michael@0: if (LFD_TEST(fp->lfd, &le->lfdset)) { michael@0: /* We already logged an event on fp for this library. */ michael@0: le = NULL; michael@0: } michael@0: } else { michael@0: library = strdup(details.library); michael@0: if (library) { michael@0: library_serial = ++library_serial_generator; michael@0: he = PL_HashTableRawAdd(libraries, hep, hash, library, michael@0: NS_INT32_TO_PTR(library_serial)); michael@0: } michael@0: if (!he) { michael@0: tmstats.btmalloc_failures++; michael@0: goto fail; michael@0: } michael@0: le = (lfdset_entry *) he; michael@0: } michael@0: if (le) { michael@0: /* Need to log an event to fp for this lib. */ michael@0: slash = strrchr(library, '/'); michael@0: log_event1(fp, TM_EVENT_LIBRARY, library_serial); michael@0: log_string(fp, slash ? slash + 1 : library); michael@0: LFD_SET(fp->lfd, &le->lfdset); michael@0: } michael@0: } michael@0: michael@0: /* For compatibility with current log format, always emit a michael@0: * filename trace record, using "noname" / 0 when no file name michael@0: * is available. */ michael@0: filename_serial = 0; michael@0: filename = details.filename[0] ? details.filename : "noname"; michael@0: if (!filenames) { michael@0: filenames = PL_NewHashTable(100, PL_HashString, michael@0: PL_CompareStrings, PL_CompareValues, michael@0: &lfdset_hashallocops, NULL); michael@0: if (!filenames) { michael@0: tmstats.btmalloc_failures++; michael@0: return NULL; michael@0: } michael@0: } michael@0: hash = PL_HashString(filename); michael@0: hep = PL_HashTableRawLookup(filenames, hash, filename); michael@0: he = *hep; michael@0: if (he) { michael@0: filename = (char*) he->key; michael@0: filename_serial = (uint32_t) NS_PTR_TO_INT32(he->value); michael@0: le = (lfdset_entry *) he; michael@0: if (LFD_TEST(fp->lfd, &le->lfdset)) { michael@0: /* We already logged an event on fp for this filename. */ michael@0: le = NULL; michael@0: } michael@0: } else { michael@0: filename = strdup(filename); michael@0: if (filename) { michael@0: filename_serial = ++filename_serial_generator; michael@0: he = PL_HashTableRawAdd(filenames, hep, hash, filename, michael@0: NS_INT32_TO_PTR(filename_serial)); michael@0: } michael@0: if (!he) { michael@0: tmstats.btmalloc_failures++; michael@0: return NULL; michael@0: } michael@0: le = (lfdset_entry *) he; michael@0: } michael@0: if (le) { michael@0: /* Need to log an event to fp for this filename. */ michael@0: log_event1(fp, TM_EVENT_FILENAME, filename_serial); michael@0: log_filename(fp, filename); michael@0: LFD_SET(fp->lfd, &le->lfdset); michael@0: } michael@0: michael@0: if (!details.function[0]) { michael@0: PR_snprintf(details.function, sizeof(details.function), michael@0: "%s+%X", library ? library : "main", details.loffset); michael@0: } michael@0: michael@0: /* Emit an 'N' (for New method, 'M' is for malloc!) event if needed. */ michael@0: method_serial = 0; michael@0: if (!methods) { michael@0: methods = PL_NewHashTable(10000, PL_HashString, michael@0: PL_CompareStrings, PL_CompareValues, michael@0: &lfdset_hashallocops, NULL); michael@0: if (!methods) { michael@0: tmstats.btmalloc_failures++; michael@0: goto fail; michael@0: } michael@0: } michael@0: hash = PL_HashString(details.function); michael@0: hep = PL_HashTableRawLookup(methods, hash, details.function); michael@0: he = *hep; michael@0: if (he) { michael@0: method = (char*) he->key; michael@0: method_serial = (uint32_t) NS_PTR_TO_INT32(he->value); michael@0: le = (lfdset_entry *) he; michael@0: if (LFD_TEST(fp->lfd, &le->lfdset)) { michael@0: /* We already logged an event on fp for this method. */ michael@0: le = NULL; michael@0: } michael@0: } else { michael@0: method = strdup(details.function); michael@0: if (method) { michael@0: method_serial = ++method_serial_generator; michael@0: he = PL_HashTableRawAdd(methods, hep, hash, method, michael@0: NS_INT32_TO_PTR(method_serial)); michael@0: } michael@0: if (!he) { michael@0: tmstats.btmalloc_failures++; michael@0: return NULL; michael@0: } michael@0: le = (lfdset_entry *) he; michael@0: } michael@0: if (le) { michael@0: log_event4(fp, TM_EVENT_METHOD, method_serial, library_serial, michael@0: filename_serial, details.lineno); michael@0: log_string(fp, method); michael@0: LFD_SET(fp->lfd, &le->lfdset); michael@0: } michael@0: michael@0: /* Create a new callsite record. */ michael@0: if (!site) { michael@0: site = __libc_malloc(sizeof(callsite)); michael@0: if (!site) { michael@0: tmstats.btmalloc_failures++; michael@0: goto fail; michael@0: } michael@0: michael@0: /* Update parent and max-kids-per-parent stats. */ michael@0: if (!parent->kids) michael@0: tmstats.calltree_parents++; michael@0: nkids = 1; michael@0: for (tmp = parent->kids; tmp; tmp = tmp->siblings) michael@0: nkids++; michael@0: if (nkids > tmstats.calltree_maxkids) { michael@0: tmstats.calltree_maxkids = nkids; michael@0: calltree_maxkids_parent = parent; michael@0: } michael@0: michael@0: /* Insert the new site into the tree. */ michael@0: site->pc = pc; michael@0: site->serial = ++callsite_serial_generator; michael@0: LFD_ZERO(&site->lfdset); michael@0: site->name = method; michael@0: site->library = library; michael@0: site->offset = details.loffset; michael@0: site->parent = parent; michael@0: site->siblings = parent->kids; michael@0: parent->kids = site; michael@0: site->kids = NULL; michael@0: } michael@0: michael@0: /* Log the site with its parent, method, and offset. */ michael@0: log_event4(fp, TM_EVENT_CALLSITE, site->serial, parent->serial, michael@0: method_serial, details.foffset); michael@0: LFD_SET(fp->lfd, &site->lfdset); michael@0: michael@0: upward: michael@0: parent = site; michael@0: } while (stack_index > 0); michael@0: michael@0: if (maxstack) michael@0: calltree_maxstack_top = site; michael@0: michael@0: return site; michael@0: michael@0: fail: michael@0: return NULL; michael@0: } michael@0: michael@0: /* michael@0: * Buffer the stack from top at low index to bottom at high, so that we can michael@0: * reverse it in calltree. michael@0: */ michael@0: static void michael@0: stack_callback(void *pc, void *sp, void *closure) michael@0: { michael@0: stack_buffer_info *info = (stack_buffer_info*) closure; michael@0: michael@0: /* michael@0: * If we run out of buffer, keep incrementing entries so that michael@0: * backtrace can call us again with a bigger buffer. michael@0: */ michael@0: if (info->entries < info->size) michael@0: info->buffer[info->entries] = pc; michael@0: ++info->entries; michael@0: } michael@0: michael@0: /* michael@0: * The caller MUST NOT be holding tmlock when calling backtrace. michael@0: * On return, if *immediate_abort is set, then the return value is NULL michael@0: * and the thread is in a very dangerous situation (e.g. holding michael@0: * sem_pool_lock in Mac OS X pthreads); the caller should bail out michael@0: * without doing anything (such as acquiring locks). michael@0: */ michael@0: static callsite * michael@0: backtrace(tm_thread *t, int skipFrames, int *immediate_abort) michael@0: { michael@0: callsite *site; michael@0: stack_buffer_info *info = &t->backtrace_buf; michael@0: void ** new_stack_buffer; michael@0: size_t new_stack_buffer_size; michael@0: nsresult rv; michael@0: michael@0: t->suppress_tracing++; michael@0: michael@0: if (!stacks_enabled) { michael@0: #if defined(XP_MACOSX) michael@0: /* Walk the stack, even if stacks_enabled is false. We do this to michael@0: check if we must set immediate_abort. */ michael@0: info->entries = 0; michael@0: rv = NS_StackWalk(stack_callback, skipFrames, /* maxFrames */ 0, info, michael@0: 0, NULL); michael@0: *immediate_abort = rv == NS_ERROR_UNEXPECTED; michael@0: if (rv == NS_ERROR_UNEXPECTED || info->entries == 0) { michael@0: t->suppress_tracing--; michael@0: return NULL; michael@0: } michael@0: #endif michael@0: michael@0: /* michael@0: * Create a single fake stack frame so that all the tools get michael@0: * data in the correct format. michael@0: */ michael@0: *immediate_abort = 0; michael@0: if (info->size < 1) { michael@0: PR_ASSERT(!info->buffer); /* !info->size == !info->buffer */ michael@0: info->buffer = __libc_malloc(1 * sizeof(void*)); michael@0: if (!info->buffer) michael@0: return NULL; michael@0: info->size = 1; michael@0: } michael@0: michael@0: info->entries = 1; michael@0: info->buffer[0] = STACK_DISABLED_PC; michael@0: } else { michael@0: /* michael@0: * NS_StackWalk can (on Windows) acquire a lock the shared library michael@0: * loader. Another thread might call malloc while holding that lock michael@0: * (when loading a shared library). So we can't be in tmlock during michael@0: * this call. For details, see michael@0: * https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8 michael@0: */ michael@0: michael@0: /* michael@0: * skipFrames == 0 means |backtrace| should show up, so don't use michael@0: * skipFrames + 1. michael@0: * NB: this call is repeated below if the buffer is too small. michael@0: */ michael@0: info->entries = 0; michael@0: rv = NS_StackWalk(stack_callback, skipFrames, /* maxFrames */ 0, info, michael@0: 0, NULL); michael@0: *immediate_abort = rv == NS_ERROR_UNEXPECTED; michael@0: if (rv == NS_ERROR_UNEXPECTED || info->entries == 0) { michael@0: t->suppress_tracing--; michael@0: return NULL; michael@0: } michael@0: michael@0: /* michael@0: * To avoid allocating in stack_callback (which, on Windows, is michael@0: * called on a different thread from the one we're running on here), michael@0: * reallocate here if it didn't have a big enough buffer (which michael@0: * includes the first call on any thread), and call it again. michael@0: */ michael@0: if (info->entries > info->size) { michael@0: new_stack_buffer_size = 2 * info->entries; michael@0: new_stack_buffer = __libc_realloc(info->buffer, michael@0: new_stack_buffer_size * sizeof(void*)); michael@0: if (!new_stack_buffer) michael@0: return NULL; michael@0: info->buffer = new_stack_buffer; michael@0: info->size = new_stack_buffer_size; michael@0: michael@0: /* and call NS_StackWalk again */ michael@0: info->entries = 0; michael@0: NS_StackWalk(stack_callback, skipFrames, /* maxFrames */ 0, info, michael@0: 0, NULL); michael@0: michael@0: /* same stack */ michael@0: PR_ASSERT(info->entries * 2 == new_stack_buffer_size); michael@0: } michael@0: } michael@0: michael@0: TM_ENTER_LOCK(t); michael@0: michael@0: site = calltree(info->buffer, info->entries, t); michael@0: michael@0: tmstats.backtrace_calls++; michael@0: if (!site) { michael@0: tmstats.backtrace_failures++; michael@0: PR_ASSERT(tmstats.backtrace_failures < 100); michael@0: } michael@0: TM_EXIT_LOCK(t); michael@0: michael@0: t->suppress_tracing--; michael@0: return site; michael@0: } michael@0: michael@0: typedef struct allocation { michael@0: PLHashEntry entry; michael@0: size_t size; michael@0: FILE *trackfp; /* for allocation tracking */ michael@0: } allocation; michael@0: michael@0: #define ALLOC_HEAP_SIZE 150000 michael@0: michael@0: static allocation alloc_heap[ALLOC_HEAP_SIZE]; michael@0: static allocation *alloc_freelist = NULL; michael@0: static int alloc_heap_initialized = 0; michael@0: michael@0: static PLHashEntry *alloc_allocentry(void *pool, const void *key) michael@0: { michael@0: allocation **listp, *alloc; michael@0: int n; michael@0: michael@0: if (!alloc_heap_initialized) { michael@0: n = ALLOC_HEAP_SIZE; michael@0: listp = &alloc_freelist; michael@0: for (alloc = alloc_heap; --n >= 0; alloc++) { michael@0: *listp = alloc; michael@0: listp = (allocation**) &alloc->entry.next; michael@0: } michael@0: *listp = NULL; michael@0: alloc_heap_initialized = 1; michael@0: } michael@0: michael@0: listp = &alloc_freelist; michael@0: alloc = *listp; michael@0: if (!alloc) michael@0: return __libc_malloc(sizeof(allocation)); michael@0: *listp = (allocation*) alloc->entry.next; michael@0: return &alloc->entry; michael@0: } michael@0: michael@0: static void alloc_freeentry(void *pool, PLHashEntry *he, unsigned flag) michael@0: { michael@0: allocation *alloc; michael@0: michael@0: if (flag != HT_FREE_ENTRY) michael@0: return; michael@0: alloc = (allocation*) he; michael@0: if ((ptrdiff_t)(alloc - alloc_heap) < (ptrdiff_t)ALLOC_HEAP_SIZE) { michael@0: alloc->entry.next = &alloc_freelist->entry; michael@0: alloc_freelist = alloc; michael@0: } else { michael@0: __libc_free((void*) alloc); michael@0: } michael@0: } michael@0: michael@0: static PLHashAllocOps alloc_hashallocops = { michael@0: generic_alloctable, generic_freetable, michael@0: alloc_allocentry, alloc_freeentry michael@0: }; michael@0: michael@0: static PLHashNumber hash_pointer(const void *key) michael@0: { michael@0: return (PLHashNumber) key; michael@0: } michael@0: michael@0: static PLHashTable *allocations = NULL; michael@0: michael@0: static PLHashTable *new_allocations(void) michael@0: { michael@0: allocations = PL_NewHashTable(200000, hash_pointer, michael@0: PL_CompareValues, PL_CompareValues, michael@0: &alloc_hashallocops, NULL); michael@0: return allocations; michael@0: } michael@0: michael@0: #define get_allocations() (allocations ? allocations : new_allocations()) michael@0: michael@0: #if defined(XP_MACOSX) michael@0: michael@0: /* from malloc.c in Libc */ michael@0: typedef void michael@0: malloc_logger_t(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, michael@0: uintptr_t result, uint32_t num_hot_frames_to_skip); michael@0: michael@0: extern malloc_logger_t *malloc_logger; michael@0: michael@0: #define MALLOC_LOG_TYPE_ALLOCATE 2 michael@0: #define MALLOC_LOG_TYPE_DEALLOCATE 4 michael@0: #define MALLOC_LOG_TYPE_HAS_ZONE 8 michael@0: #define MALLOC_LOG_TYPE_CLEARED 64 michael@0: michael@0: static void michael@0: my_malloc_logger(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, michael@0: uintptr_t result, uint32_t num_hot_frames_to_skip) michael@0: { michael@0: uintptr_t all_args[3] = { arg1, arg2, arg3 }; michael@0: uintptr_t *args = all_args + ((type & MALLOC_LOG_TYPE_HAS_ZONE) ? 1 : 0); michael@0: michael@0: uint32_t alloc_type = michael@0: type & (MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE); michael@0: tm_thread *t = tm_get_thread(); michael@0: michael@0: if (alloc_type == (MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE)) { michael@0: ReallocCallback((void*)args[0], (void*)result, args[1], 0, 0, t); michael@0: } else if (alloc_type == MALLOC_LOG_TYPE_ALLOCATE) { michael@0: /* michael@0: * We don't get size/count information for calloc, so just use michael@0: * MallocCallback. michael@0: */ michael@0: MallocCallback((void*)result, args[0], 0, 0, t); michael@0: } else if (alloc_type == MALLOC_LOG_TYPE_DEALLOCATE) { michael@0: FreeCallback((void*)args[0], 0, 0, t); michael@0: } michael@0: } michael@0: michael@0: static void michael@0: StartupHooker(void) michael@0: { michael@0: PR_ASSERT(!malloc_logger); michael@0: malloc_logger = my_malloc_logger; michael@0: } michael@0: michael@0: static void michael@0: ShutdownHooker(void) michael@0: { michael@0: PR_ASSERT(malloc_logger == my_malloc_logger); michael@0: malloc_logger = NULL; michael@0: } michael@0: michael@0: #elif defined(XP_UNIX) michael@0: michael@0: /* michael@0: * We can't use glibc's malloc hooks because they can't be used in a michael@0: * threadsafe manner. They require unsetting the hooks to call into the michael@0: * original malloc implementation, and then resetting them when the michael@0: * original implementation returns. If another thread calls the same michael@0: * allocation function while the hooks are unset, we have no chance to michael@0: * intercept the call. michael@0: */ michael@0: michael@0: NS_EXTERNAL_VIS_(__ptr_t) michael@0: malloc(size_t size) michael@0: { michael@0: uint32_t start, end; michael@0: __ptr_t ptr; michael@0: tm_thread *t; michael@0: michael@0: if (!tracing_enabled || !PR_Initialized() || michael@0: (t = tm_get_thread())->suppress_tracing != 0) { michael@0: return __libc_malloc(size); michael@0: } michael@0: michael@0: t->suppress_tracing++; michael@0: start = PR_IntervalNow(); michael@0: ptr = __libc_malloc(size); michael@0: end = PR_IntervalNow(); michael@0: t->suppress_tracing--; michael@0: michael@0: MallocCallback(ptr, size, start, end, t); michael@0: michael@0: return ptr; michael@0: } michael@0: michael@0: NS_EXTERNAL_VIS_(__ptr_t) michael@0: calloc(size_t count, size_t size) michael@0: { michael@0: uint32_t start, end; michael@0: __ptr_t ptr; michael@0: tm_thread *t; michael@0: michael@0: if (!tracing_enabled || !PR_Initialized() || michael@0: (t = tm_get_thread())->suppress_tracing != 0) { michael@0: return __libc_calloc(count, size); michael@0: } michael@0: michael@0: t->suppress_tracing++; michael@0: start = PR_IntervalNow(); michael@0: ptr = __libc_calloc(count, size); michael@0: end = PR_IntervalNow(); michael@0: t->suppress_tracing--; michael@0: michael@0: CallocCallback(ptr, count, size, start, end, t); michael@0: michael@0: return ptr; michael@0: } michael@0: michael@0: NS_EXTERNAL_VIS_(__ptr_t) michael@0: realloc(__ptr_t oldptr, size_t size) michael@0: { michael@0: uint32_t start, end; michael@0: __ptr_t ptr; michael@0: tm_thread *t; michael@0: michael@0: if (!tracing_enabled || !PR_Initialized() || michael@0: (t = tm_get_thread())->suppress_tracing != 0) { michael@0: return __libc_realloc(oldptr, size); michael@0: } michael@0: michael@0: t->suppress_tracing++; michael@0: start = PR_IntervalNow(); michael@0: ptr = __libc_realloc(oldptr, size); michael@0: end = PR_IntervalNow(); michael@0: t->suppress_tracing--; michael@0: michael@0: /* FIXME bug 392008: We could race with reallocation of oldptr. */ michael@0: ReallocCallback(oldptr, ptr, size, start, end, t); michael@0: michael@0: return ptr; michael@0: } michael@0: michael@0: NS_EXTERNAL_VIS_(void*) michael@0: valloc(size_t size) michael@0: { michael@0: uint32_t start, end; michael@0: __ptr_t ptr; michael@0: tm_thread *t; michael@0: michael@0: if (!tracing_enabled || !PR_Initialized() || michael@0: (t = tm_get_thread())->suppress_tracing != 0) { michael@0: return __libc_valloc(size); michael@0: } michael@0: michael@0: t->suppress_tracing++; michael@0: start = PR_IntervalNow(); michael@0: ptr = __libc_valloc(size); michael@0: end = PR_IntervalNow(); michael@0: t->suppress_tracing--; michael@0: michael@0: MallocCallback(ptr, size, start, end, t); michael@0: michael@0: return ptr; michael@0: } michael@0: michael@0: NS_EXTERNAL_VIS_(void*) michael@0: memalign(size_t boundary, size_t size) michael@0: { michael@0: uint32_t start, end; michael@0: __ptr_t ptr; michael@0: tm_thread *t; michael@0: michael@0: if (!tracing_enabled || !PR_Initialized() || michael@0: (t = tm_get_thread())->suppress_tracing != 0) { michael@0: return __libc_memalign(boundary, size); michael@0: } michael@0: michael@0: t->suppress_tracing++; michael@0: start = PR_IntervalNow(); michael@0: ptr = __libc_memalign(boundary, size); michael@0: end = PR_IntervalNow(); michael@0: t->suppress_tracing--; michael@0: michael@0: MallocCallback(ptr, size, start, end, t); michael@0: michael@0: return ptr; michael@0: } michael@0: michael@0: NS_EXTERNAL_VIS_(int) michael@0: posix_memalign(void **memptr, size_t alignment, size_t size) michael@0: { michael@0: __ptr_t ptr = memalign(alignment, size); michael@0: if (!ptr) michael@0: return ENOMEM; michael@0: *memptr = ptr; michael@0: return 0; michael@0: } michael@0: michael@0: NS_EXTERNAL_VIS_(void) michael@0: free(__ptr_t ptr) michael@0: { michael@0: uint32_t start, end; michael@0: tm_thread *t; michael@0: michael@0: if (!tracing_enabled || !PR_Initialized() || michael@0: (t = tm_get_thread())->suppress_tracing != 0) { michael@0: __libc_free(ptr); michael@0: return; michael@0: } michael@0: michael@0: t->suppress_tracing++; michael@0: start = PR_IntervalNow(); michael@0: __libc_free(ptr); michael@0: end = PR_IntervalNow(); michael@0: t->suppress_tracing--; michael@0: michael@0: /* FIXME bug 392008: We could race with reallocation of ptr. */ michael@0: michael@0: FreeCallback(ptr, start, end, t); michael@0: } michael@0: michael@0: NS_EXTERNAL_VIS_(void) michael@0: cfree(void *ptr) michael@0: { michael@0: free(ptr); michael@0: } michael@0: michael@0: #define StartupHooker() PR_BEGIN_MACRO PR_END_MACRO michael@0: #define ShutdownHooker() PR_BEGIN_MACRO PR_END_MACRO michael@0: michael@0: #elif defined(XP_WIN32) michael@0: michael@0: /* See nsWinTraceMalloc.cpp. */ michael@0: michael@0: #endif michael@0: michael@0: static const char magic[] = NS_TRACE_MALLOC_MAGIC; michael@0: michael@0: static void michael@0: log_header(int logfd) michael@0: { michael@0: uint32_t ticksPerSec = PR_htonl(PR_TicksPerSecond()); michael@0: (void) write(logfd, magic, NS_TRACE_MALLOC_MAGIC_SIZE); michael@0: (void) write(logfd, &ticksPerSec, sizeof ticksPerSec); michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: NS_TraceMallocStartup(int logfd) michael@0: { michael@0: const char* stack_disable_env; michael@0: michael@0: /* We must be running on the primordial thread. */ michael@0: PR_ASSERT(tracing_enabled == 0); michael@0: PR_ASSERT(logfp == &default_logfile); michael@0: tracing_enabled = (logfd >= 0); michael@0: michael@0: if (logfd >= 3) michael@0: MozillaRegisterDebugFD(logfd); michael@0: michael@0: /* stacks are disabled if this env var is set to a non-empty value */ michael@0: stack_disable_env = PR_GetEnv("NS_TRACE_MALLOC_DISABLE_STACKS"); michael@0: stacks_enabled = !stack_disable_env || !*stack_disable_env; michael@0: michael@0: if (tracing_enabled) { michael@0: PR_ASSERT(logfp->simsize == 0); /* didn't overflow startup buffer */ michael@0: michael@0: /* Log everything in logfp (aka default_logfile)'s buffer to logfd. */ michael@0: logfp->fd = logfd; michael@0: logfile_list = &default_logfile; michael@0: logfp->prevp = &logfile_list; michael@0: logfile_tail = &logfp->next; michael@0: log_header(logfd); michael@0: } michael@0: michael@0: RegisterTraceMallocShutdown(); michael@0: michael@0: tmlock = PR_NewLock(); michael@0: (void) tm_get_thread(); /* ensure index initialization while it's easy */ michael@0: michael@0: if (tracing_enabled) michael@0: StartupHooker(); michael@0: } michael@0: michael@0: /* michael@0: * Options for log files, with the log file name either as the next option michael@0: * or separated by '=' (e.g. "./mozilla --trace-malloc * malloc.log" or michael@0: * "./mozilla --trace-malloc=malloc.log"). michael@0: */ michael@0: static const char TMLOG_OPTION[] = "--trace-malloc"; michael@0: static const char SDLOG_OPTION[] = "--shutdown-leaks"; michael@0: michael@0: #define SHOULD_PARSE_ARG(name_, log_, arg_) \ michael@0: (0 == strncmp(arg_, name_, sizeof(name_) - 1)) michael@0: michael@0: #define PARSE_ARG(name_, log_, argv_, i_, consumed_) \ michael@0: PR_BEGIN_MACRO \ michael@0: char _nextchar = argv_[i_][sizeof(name_) - 1]; \ michael@0: if (_nextchar == '=') { \ michael@0: log_ = argv_[i_] + sizeof(name_); \ michael@0: consumed_ = 1; \ michael@0: } else if (_nextchar == '\0') { \ michael@0: log_ = argv_[i_+1]; \ michael@0: consumed_ = 2; \ michael@0: } \ michael@0: PR_END_MACRO michael@0: michael@0: PR_IMPLEMENT(int) michael@0: NS_TraceMallocStartupArgs(int argc, char **argv) michael@0: { michael@0: int i, logfd = -1, consumed, logflags; michael@0: char *tmlogname = NULL, *sdlogname_local = NULL; michael@0: michael@0: /* michael@0: * Look for the --trace-malloc option early, to avoid missing michael@0: * early mallocs (we miss static constructors whose output overflows the michael@0: * log file's static 16K output buffer). michael@0: */ michael@0: for (i = 1; i < argc; i += consumed) { michael@0: consumed = 0; michael@0: if (SHOULD_PARSE_ARG(TMLOG_OPTION, tmlogname, argv[i])) michael@0: PARSE_ARG(TMLOG_OPTION, tmlogname, argv, i, consumed); michael@0: else if (SHOULD_PARSE_ARG(SDLOG_OPTION, sdlogname_local, argv[i])) michael@0: PARSE_ARG(SDLOG_OPTION, sdlogname_local, argv, i, consumed); michael@0: michael@0: if (consumed) { michael@0: #ifndef XP_WIN32 /* If we don't comment this out, it will crash Windows. */ michael@0: int j; michael@0: /* Now remove --trace-malloc and its argument from argv. */ michael@0: argc -= consumed; michael@0: for (j = i; j < argc; ++j) michael@0: argv[j] = argv[j+consumed]; michael@0: argv[argc] = NULL; michael@0: consumed = 0; /* don't advance next iteration */ michael@0: #endif michael@0: } else { michael@0: consumed = 1; michael@0: } michael@0: } michael@0: michael@0: if (tmlogname) { michael@0: #ifdef XP_UNIX michael@0: int pipefds[2]; michael@0: #endif michael@0: michael@0: switch (*tmlogname) { michael@0: #ifdef XP_UNIX michael@0: case '|': michael@0: if (pipe(pipefds) == 0) { michael@0: pid_t pid = fork(); michael@0: if (pid == 0) { michael@0: /* In child: set up stdin, parse args, and exec. */ michael@0: int maxargc, nargc; michael@0: char **nargv, *token; michael@0: michael@0: if (pipefds[0] != 0) { michael@0: dup2(pipefds[0], 0); michael@0: close(pipefds[0]); michael@0: } michael@0: close(pipefds[1]); michael@0: michael@0: tmlogname = strtok(tmlogname + 1, " \t"); michael@0: maxargc = 3; michael@0: nargv = (char **) malloc((maxargc+1) * sizeof(char *)); michael@0: if (!nargv) exit(1); michael@0: nargc = 0; michael@0: nargv[nargc++] = tmlogname; michael@0: while ((token = strtok(NULL, " \t")) != NULL) { michael@0: if (nargc == maxargc) { michael@0: maxargc *= 2; michael@0: nargv = (char**) michael@0: realloc(nargv, (maxargc+1) * sizeof(char*)); michael@0: if (!nargv) exit(1); michael@0: } michael@0: nargv[nargc++] = token; michael@0: } michael@0: nargv[nargc] = NULL; michael@0: michael@0: (void) setsid(); michael@0: execvp(tmlogname, nargv); michael@0: exit(127); michael@0: } michael@0: michael@0: if (pid > 0) { michael@0: /* In parent: set logfd to the pipe's write side. */ michael@0: close(pipefds[0]); michael@0: logfd = pipefds[1]; michael@0: } michael@0: } michael@0: if (logfd < 0) { michael@0: fprintf(stderr, michael@0: "%s: can't pipe to trace-malloc child process %s: %s\n", michael@0: argv[0], tmlogname, strerror(errno)); michael@0: exit(1); michael@0: } michael@0: break; michael@0: #endif /*XP_UNIX*/ michael@0: case '-': michael@0: /* Don't log from startup, but do prepare to log later. */ michael@0: /* XXX traditional meaning of '-' as option argument is "stdin" or "stdout" */ michael@0: if (tmlogname[1] == '\0') michael@0: break; michael@0: /* FALL THROUGH */ michael@0: michael@0: default: michael@0: logflags = O_CREAT | O_WRONLY | O_TRUNC; michael@0: #if defined(XP_WIN32) michael@0: /* michael@0: * Avoid translations on WIN32. michael@0: */ michael@0: logflags |= O_BINARY; michael@0: #endif michael@0: logfd = open(tmlogname, logflags, 0644); michael@0: if (logfd < 0) { michael@0: fprintf(stderr, michael@0: "%s: can't create trace-malloc log named %s: %s\n", michael@0: argv[0], tmlogname, strerror(errno)); michael@0: exit(1); michael@0: } michael@0: break; michael@0: } michael@0: } michael@0: michael@0: if (sdlogname_local) { michael@0: strncpy(sdlogname, sdlogname_local, sizeof(sdlogname)); michael@0: sdlogname[sizeof(sdlogname) - 1] = '\0'; michael@0: } michael@0: michael@0: NS_TraceMallocStartup(logfd); michael@0: return argc; michael@0: } michael@0: michael@0: PR_IMPLEMENT(PRBool) michael@0: NS_TraceMallocHasStarted(void) michael@0: { michael@0: return tmlock ? PR_TRUE : PR_FALSE; michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: NS_TraceMallocShutdown(void) michael@0: { michael@0: logfile *fp; michael@0: michael@0: if (sdlogname[0]) michael@0: NS_TraceMallocDumpAllocations(sdlogname); michael@0: michael@0: if (tmstats.backtrace_failures) { michael@0: fprintf(stderr, michael@0: "TraceMalloc backtrace failures: %lu (malloc %lu dladdr %lu)\n", michael@0: (unsigned long) tmstats.backtrace_failures, michael@0: (unsigned long) tmstats.btmalloc_failures, michael@0: (unsigned long) tmstats.dladdr_failures); michael@0: } michael@0: while ((fp = logfile_list) != NULL) { michael@0: logfile_list = fp->next; michael@0: log_tmstats(fp); michael@0: flush_logfile(fp); michael@0: if (fp->fd >= 0) { michael@0: MozillaUnRegisterDebugFD(fp->fd); michael@0: close(fp->fd); michael@0: fp->fd = -1; michael@0: } michael@0: if (fp != &default_logfile) { michael@0: if (fp == logfp) michael@0: logfp = &default_logfile; michael@0: free((void*) fp); michael@0: } michael@0: } michael@0: if (tmlock) { michael@0: PRLock *lock = tmlock; michael@0: tmlock = NULL; michael@0: PR_DestroyLock(lock); michael@0: } michael@0: if (tracing_enabled) { michael@0: tracing_enabled = 0; michael@0: ShutdownHooker(); michael@0: } michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: NS_TraceMallocDisable(void) michael@0: { michael@0: tm_thread *t = tm_get_thread(); michael@0: logfile *fp; michael@0: uint32_t sample; michael@0: michael@0: /* Robustify in case of duplicate call. */ michael@0: PR_ASSERT(tracing_enabled); michael@0: if (tracing_enabled == 0) michael@0: return; michael@0: michael@0: TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); michael@0: for (fp = logfile_list; fp; fp = fp->next) michael@0: flush_logfile(fp); michael@0: sample = --tracing_enabled; michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: if (sample == 0) michael@0: ShutdownHooker(); michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: NS_TraceMallocEnable(void) michael@0: { michael@0: tm_thread *t = tm_get_thread(); michael@0: uint32_t sample; michael@0: michael@0: TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); michael@0: sample = ++tracing_enabled; michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: if (sample == 1) michael@0: StartupHooker(); michael@0: } michael@0: michael@0: PR_IMPLEMENT(int) michael@0: NS_TraceMallocChangeLogFD(int fd) michael@0: { michael@0: logfile *oldfp, *fp; michael@0: struct stat sb; michael@0: tm_thread *t = tm_get_thread(); michael@0: michael@0: TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); michael@0: oldfp = logfp; michael@0: if (oldfp->fd != fd) { michael@0: flush_logfile(oldfp); michael@0: fp = get_logfile(fd); michael@0: if (!fp) { michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: return -2; michael@0: } michael@0: if (fd >= 0 && fstat(fd, &sb) == 0 && sb.st_size == 0) michael@0: log_header(fd); michael@0: logfp = fp; michael@0: } michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: return oldfp->fd; michael@0: } michael@0: michael@0: static int michael@0: lfd_clr_enumerator(PLHashEntry *he, int i, void *arg) michael@0: { michael@0: lfdset_entry *le = (lfdset_entry*) he; michael@0: logfile *fp = (logfile*) arg; michael@0: michael@0: LFD_CLR(fp->lfd, &le->lfdset); michael@0: return HT_ENUMERATE_NEXT; michael@0: } michael@0: michael@0: static void michael@0: lfd_clr_walk(callsite *site, logfile *fp) michael@0: { michael@0: callsite *kid; michael@0: michael@0: LFD_CLR(fp->lfd, &site->lfdset); michael@0: for (kid = site->kids; kid; kid = kid->siblings) michael@0: lfd_clr_walk(kid, fp); michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: NS_TraceMallocCloseLogFD(int fd) michael@0: { michael@0: logfile *fp; michael@0: tm_thread *t = tm_get_thread(); michael@0: michael@0: TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); michael@0: michael@0: fp = get_logfile(fd); michael@0: if (fp) { michael@0: flush_logfile(fp); michael@0: if (fp == &default_logfile) { michael@0: /* Leave default_logfile in logfile_list with an fd of -1. */ michael@0: fp->fd = -1; michael@0: michael@0: /* NB: we can never free lfd 0, it belongs to default_logfile. */ michael@0: PR_ASSERT(fp->lfd == 0); michael@0: } else { michael@0: /* Clear fp->lfd in all possible lfdsets. */ michael@0: PL_HashTableEnumerateEntries(libraries, lfd_clr_enumerator, fp); michael@0: PL_HashTableEnumerateEntries(methods, lfd_clr_enumerator, fp); michael@0: lfd_clr_walk(&calltree_root, fp); michael@0: michael@0: /* Unlink fp from logfile_list, freeing lfd for reallocation. */ michael@0: *fp->prevp = fp->next; michael@0: if (!fp->next) { michael@0: PR_ASSERT(logfile_tail == &fp->next); michael@0: logfile_tail = fp->prevp; michael@0: } michael@0: michael@0: /* Reset logfp if we must, then free fp. */ michael@0: if (fp == logfp) michael@0: logfp = &default_logfile; michael@0: free((void*) fp); michael@0: } michael@0: } michael@0: michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: MozillaUnRegisterDebugFD(fd); michael@0: close(fd); michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: NS_TraceMallocLogTimestamp(const char *caption) michael@0: { michael@0: logfile *fp; michael@0: #ifdef XP_UNIX michael@0: struct timeval tv; michael@0: #endif michael@0: #ifdef XP_WIN32 michael@0: struct _timeb tb; michael@0: #endif michael@0: tm_thread *t = tm_get_thread(); michael@0: michael@0: TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); michael@0: michael@0: fp = logfp; michael@0: log_byte(fp, TM_EVENT_TIMESTAMP); michael@0: michael@0: #ifdef XP_UNIX michael@0: gettimeofday(&tv, NULL); michael@0: log_uint32(fp, (uint32_t) tv.tv_sec); michael@0: log_uint32(fp, (uint32_t) tv.tv_usec); michael@0: #endif michael@0: #ifdef XP_WIN32 michael@0: _ftime(&tb); michael@0: log_uint32(fp, (uint32_t) tb.time); michael@0: log_uint32(fp, (uint32_t) tb.millitm); michael@0: #endif michael@0: log_string(fp, caption); michael@0: michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: } michael@0: michael@0: static void michael@0: print_stack(FILE *ofp, callsite *site) michael@0: { michael@0: while (site) { michael@0: if (site->name || site->parent) { michael@0: fprintf(ofp, "%s[%s +0x%X]\n", michael@0: site->name, site->library, site->offset); michael@0: } michael@0: site = site->parent; michael@0: } michael@0: } michael@0: michael@0: static const char *allocation_format = michael@0: (sizeof(void*) == 4) ? "\t0x%08zX\n" : michael@0: (sizeof(void*) == 8) ? "\t0x%016zX\n" : michael@0: "UNEXPECTED sizeof(void*)"; michael@0: michael@0: static int michael@0: allocation_enumerator(PLHashEntry *he, int i, void *arg) michael@0: { michael@0: allocation *alloc = (allocation*) he; michael@0: FILE *ofp = (FILE*) arg; michael@0: callsite *site = (callsite*) he->value; michael@0: michael@0: size_t *p, *end; michael@0: michael@0: fprintf(ofp, "%p <%s> (%lu)\n", michael@0: he->key, michael@0: nsGetTypeName(he->key), michael@0: (unsigned long) alloc->size); michael@0: michael@0: for (p = (size_t*) he->key, michael@0: end = (size_t*) ((char*)he->key + alloc->size); michael@0: p < end; ++p) { michael@0: fprintf(ofp, allocation_format, *p); michael@0: } michael@0: michael@0: print_stack(ofp, site); michael@0: fputc('\n', ofp); michael@0: return HT_ENUMERATE_NEXT; michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: NS_TraceStack(int skip, FILE *ofp) michael@0: { michael@0: callsite *site; michael@0: tm_thread *t = tm_get_thread(); michael@0: int immediate_abort; michael@0: michael@0: site = backtrace(t, skip + 1, &immediate_abort); michael@0: while (site) { michael@0: if (site->name || site->parent) { michael@0: fprintf(ofp, "%s[%s +0x%X]\n", michael@0: site->name, site->library, site->offset); michael@0: } michael@0: site = site->parent; michael@0: } michael@0: } michael@0: michael@0: PR_IMPLEMENT(int) michael@0: NS_TraceMallocDumpAllocations(const char *pathname) michael@0: { michael@0: FILE *ofp; michael@0: int rv; michael@0: int fd; michael@0: michael@0: tm_thread *t = tm_get_thread(); michael@0: michael@0: TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); michael@0: michael@0: ofp = fopen(pathname, WRITE_FLAGS); michael@0: if (ofp) { michael@0: MozillaRegisterDebugFD(fileno(ofp)); michael@0: if (allocations) { michael@0: PL_HashTableEnumerateEntries(allocations, allocation_enumerator, michael@0: ofp); michael@0: } michael@0: rv = ferror(ofp) ? -1 : 0; michael@0: MozillaUnRegisterDebugFILE(ofp); michael@0: fclose(ofp); michael@0: } else { michael@0: rv = -1; michael@0: } michael@0: michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: michael@0: return rv; michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: NS_TraceMallocFlushLogfiles(void) michael@0: { michael@0: logfile *fp; michael@0: tm_thread *t = tm_get_thread(); michael@0: michael@0: TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); michael@0: michael@0: for (fp = logfile_list; fp; fp = fp->next) michael@0: flush_logfile(fp); michael@0: michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: NS_TrackAllocation(void* ptr, FILE *ofp) michael@0: { michael@0: allocation *alloc; michael@0: tm_thread *t = tm_get_thread(); michael@0: michael@0: fprintf(ofp, "Trying to track %p\n", (void*) ptr); michael@0: setlinebuf(ofp); michael@0: michael@0: TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); michael@0: if (get_allocations()) { michael@0: alloc = (allocation*) michael@0: *PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr); michael@0: if (alloc) { michael@0: fprintf(ofp, "Tracking %p\n", (void*) ptr); michael@0: alloc->trackfp = ofp; michael@0: } else { michael@0: fprintf(ofp, "Not tracking %p\n", (void*) ptr); michael@0: } michael@0: } michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: MallocCallback(void *ptr, size_t size, uint32_t start, uint32_t end, tm_thread *t) michael@0: { michael@0: callsite *site; michael@0: PLHashEntry *he; michael@0: allocation *alloc; michael@0: int immediate_abort; michael@0: michael@0: if (!tracing_enabled || t->suppress_tracing != 0) michael@0: return; michael@0: michael@0: site = backtrace(t, 2, &immediate_abort); michael@0: if (immediate_abort) michael@0: return; michael@0: michael@0: TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); michael@0: tmstats.malloc_calls++; michael@0: if (!ptr) { michael@0: tmstats.malloc_failures++; michael@0: } else { michael@0: if (site) { michael@0: log_event5(logfp, TM_EVENT_MALLOC, michael@0: site->serial, start, end - start, michael@0: (uint32_t)NS_PTR_TO_INT32(ptr), size); michael@0: } michael@0: if (get_allocations()) { michael@0: he = PL_HashTableAdd(allocations, ptr, site); michael@0: if (he) { michael@0: alloc = (allocation*) he; michael@0: alloc->size = size; michael@0: alloc->trackfp = NULL; michael@0: } michael@0: } michael@0: } michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: CallocCallback(void *ptr, size_t count, size_t size, uint32_t start, uint32_t end, tm_thread *t) michael@0: { michael@0: callsite *site; michael@0: PLHashEntry *he; michael@0: allocation *alloc; michael@0: int immediate_abort; michael@0: michael@0: if (!tracing_enabled || t->suppress_tracing != 0) michael@0: return; michael@0: michael@0: site = backtrace(t, 2, &immediate_abort); michael@0: if (immediate_abort) michael@0: return; michael@0: michael@0: TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); michael@0: tmstats.calloc_calls++; michael@0: if (!ptr) { michael@0: tmstats.calloc_failures++; michael@0: } else { michael@0: size *= count; michael@0: if (site) { michael@0: log_event5(logfp, TM_EVENT_CALLOC, michael@0: site->serial, start, end - start, michael@0: (uint32_t)NS_PTR_TO_INT32(ptr), size); michael@0: } michael@0: if (get_allocations()) { michael@0: he = PL_HashTableAdd(allocations, ptr, site); michael@0: if (he) { michael@0: alloc = (allocation*) he; michael@0: alloc->size = size; michael@0: alloc->trackfp = NULL; michael@0: } michael@0: } michael@0: } michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: ReallocCallback(void * oldptr, void *ptr, size_t size, michael@0: uint32_t start, uint32_t end, tm_thread *t) michael@0: { michael@0: callsite *oldsite, *site; michael@0: size_t oldsize; michael@0: PLHashNumber hash; michael@0: PLHashEntry **hep, *he; michael@0: allocation *alloc; michael@0: FILE *trackfp = NULL; michael@0: int immediate_abort; michael@0: michael@0: if (!tracing_enabled || t->suppress_tracing != 0) michael@0: return; michael@0: michael@0: site = backtrace(t, 2, &immediate_abort); michael@0: if (immediate_abort) michael@0: return; michael@0: michael@0: TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); michael@0: tmstats.realloc_calls++; michael@0: oldsite = NULL; michael@0: oldsize = 0; michael@0: hep = NULL; michael@0: he = NULL; michael@0: if (oldptr && get_allocations()) { michael@0: hash = hash_pointer(oldptr); michael@0: hep = PL_HashTableRawLookup(allocations, hash, oldptr); michael@0: he = *hep; michael@0: if (he) { michael@0: oldsite = (callsite*) he->value; michael@0: alloc = (allocation*) he; michael@0: oldsize = alloc->size; michael@0: trackfp = alloc->trackfp; michael@0: if (trackfp) { michael@0: fprintf(alloc->trackfp, michael@0: "\nrealloc(%p, %lu), oldsize %lu, alloc site %p\n", michael@0: (void*) ptr, (unsigned long) size, michael@0: (unsigned long) oldsize, (void*) oldsite); michael@0: NS_TraceStack(1, trackfp); michael@0: } michael@0: } michael@0: } michael@0: if (!ptr && size) { michael@0: /* michael@0: * When realloc() fails, the original block is not freed or moved, so michael@0: * we'll leave the allocation entry untouched. michael@0: */ michael@0: tmstats.realloc_failures++; michael@0: } else { michael@0: if (site) { michael@0: log_event8(logfp, TM_EVENT_REALLOC, michael@0: site->serial, start, end - start, michael@0: (uint32_t)NS_PTR_TO_INT32(ptr), size, michael@0: oldsite ? oldsite->serial : 0, michael@0: (uint32_t)NS_PTR_TO_INT32(oldptr), oldsize); michael@0: } michael@0: if (ptr && allocations) { michael@0: if (ptr != oldptr) { michael@0: /* michael@0: * If we're reallocating (not allocating new space by passing michael@0: * null to realloc) and realloc moved the block, free oldptr. michael@0: */ michael@0: if (he) michael@0: PL_HashTableRawRemove(allocations, hep, he); michael@0: michael@0: /* Record the new allocation now, setting he. */ michael@0: he = PL_HashTableAdd(allocations, ptr, site); michael@0: } else { michael@0: /* michael@0: * If we haven't yet recorded an allocation (possibly due to a michael@0: * temporary memory shortage), do it now. michael@0: */ michael@0: if (!he) michael@0: he = PL_HashTableAdd(allocations, ptr, site); michael@0: } michael@0: if (he) { michael@0: alloc = (allocation*) he; michael@0: alloc->size = size; michael@0: alloc->trackfp = trackfp; michael@0: } michael@0: } michael@0: } michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: FreeCallback(void * ptr, uint32_t start, uint32_t end, tm_thread *t) michael@0: { michael@0: PLHashEntry **hep, *he; michael@0: callsite *site; michael@0: allocation *alloc; michael@0: michael@0: if (!tracing_enabled || t->suppress_tracing != 0) michael@0: return; michael@0: michael@0: /* michael@0: * FIXME: Perhaps we should call backtrace() so we can check for michael@0: * immediate_abort. However, the only current contexts where michael@0: * immediate_abort will be true do not call free(), so for now, michael@0: * let's avoid the cost of backtrace(). See bug 478195. michael@0: */ michael@0: TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); michael@0: tmstats.free_calls++; michael@0: if (!ptr) { michael@0: tmstats.null_free_calls++; michael@0: } else { michael@0: if (get_allocations()) { michael@0: hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr); michael@0: he = *hep; michael@0: if (he) { michael@0: site = (callsite*) he->value; michael@0: if (site) { michael@0: alloc = (allocation*) he; michael@0: if (alloc->trackfp) { michael@0: fprintf(alloc->trackfp, "\nfree(%p), alloc site %p\n", michael@0: (void*) ptr, (void*) site); michael@0: NS_TraceStack(1, alloc->trackfp); michael@0: } michael@0: log_event5(logfp, TM_EVENT_FREE, michael@0: site->serial, start, end - start, michael@0: (uint32_t)NS_PTR_TO_INT32(ptr), alloc->size); michael@0: } michael@0: PL_HashTableRawRemove(allocations, hep, he); michael@0: } michael@0: } michael@0: } michael@0: TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); michael@0: } michael@0: michael@0: PR_IMPLEMENT(nsTMStackTraceID) michael@0: NS_TraceMallocGetStackTrace(void) michael@0: { michael@0: callsite *site; michael@0: int dummy; michael@0: tm_thread *t = tm_get_thread(); michael@0: michael@0: PR_ASSERT(t->suppress_tracing == 0); michael@0: michael@0: site = backtrace(t, 2, &dummy); michael@0: return (nsTMStackTraceID) site; michael@0: } michael@0: michael@0: PR_IMPLEMENT(void) michael@0: NS_TraceMallocPrintStackTrace(FILE *ofp, nsTMStackTraceID id) michael@0: { michael@0: print_stack(ofp, (callsite *)id); michael@0: } michael@0: michael@0: #endif /* NS_TRACE_MALLOC */