|
1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
|
2 * vim:cindent:ts=8:et:sw=4: |
|
3 * |
|
4 * This Source Code Form is subject to the terms of the Mozilla Public |
|
5 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
6 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
7 #ifdef NS_TRACE_MALLOC |
|
8 /* |
|
9 * TODO: |
|
10 * - FIXME https://bugzilla.mozilla.org/show_bug.cgi?id=392008 |
|
11 * - extend logfile so 'F' record tells free stack |
|
12 */ |
|
13 #include <errno.h> |
|
14 #include <fcntl.h> |
|
15 #include <stdio.h> |
|
16 #include <string.h> |
|
17 #ifdef XP_UNIX |
|
18 #include <unistd.h> |
|
19 #include <sys/stat.h> |
|
20 #include <sys/time.h> |
|
21 #endif |
|
22 #include "plhash.h" |
|
23 #include "pratom.h" |
|
24 #include "prlog.h" |
|
25 #include "prlock.h" |
|
26 #include "prmon.h" |
|
27 #include "prprf.h" |
|
28 #include "prenv.h" |
|
29 #include "prnetdb.h" |
|
30 #include "nsTraceMalloc.h" |
|
31 #include "nscore.h" |
|
32 #include "prinit.h" |
|
33 #include "prthread.h" |
|
34 #include "plstr.h" |
|
35 #include "nsStackWalk.h" |
|
36 #include "nsTraceMallocCallbacks.h" |
|
37 #include "nsTypeInfo.h" |
|
38 #include "mozilla/PoisonIOInterposer.h" |
|
39 |
|
40 #if defined(XP_MACOSX) |
|
41 |
|
42 #include <malloc/malloc.h> |
|
43 |
|
44 #define WRITE_FLAGS "w" |
|
45 |
|
46 #define __libc_malloc(x) malloc(x) |
|
47 #define __libc_realloc(x, y) realloc(x, y) |
|
48 #define __libc_free(x) free(x) |
|
49 |
|
50 #elif defined(XP_UNIX) |
|
51 |
|
52 #include <malloc.h> |
|
53 |
|
54 #define WRITE_FLAGS "w" |
|
55 |
|
56 #ifdef WRAP_SYSTEM_INCLUDES |
|
57 #pragma GCC visibility push(default) |
|
58 #endif |
|
59 extern __ptr_t __libc_malloc(size_t); |
|
60 extern __ptr_t __libc_calloc(size_t, size_t); |
|
61 extern __ptr_t __libc_realloc(__ptr_t, size_t); |
|
62 extern void __libc_free(__ptr_t); |
|
63 extern __ptr_t __libc_memalign(size_t, size_t); |
|
64 extern __ptr_t __libc_valloc(size_t); |
|
65 #ifdef WRAP_SYSTEM_INCLUDES |
|
66 #pragma GCC visibility pop |
|
67 #endif |
|
68 |
|
69 #elif defined(XP_WIN32) |
|
70 |
|
71 #include <sys/timeb.h> /* for timeb */ |
|
72 #include <sys/stat.h> /* for fstat */ |
|
73 |
|
74 #include <io.h> /*for write*/ |
|
75 |
|
76 #define WRITE_FLAGS "w" |
|
77 |
|
78 #define __libc_malloc(x) dhw_orig_malloc(x) |
|
79 #define __libc_realloc(x, y) dhw_orig_realloc(x,y) |
|
80 #define __libc_free(x) dhw_orig_free(x) |
|
81 |
|
82 #else /* not XP_MACOSX, XP_UNIX, or XP_WIN32 */ |
|
83 |
|
84 # error "Unknown build configuration!" |
|
85 |
|
86 #endif |
|
87 |
|
88 typedef struct logfile logfile; |
|
89 |
|
90 #define STARTUP_TMBUFSIZE (64 * 1024) |
|
91 #define LOGFILE_TMBUFSIZE (16 * 1024) |
|
92 |
|
93 struct logfile { |
|
94 int fd; |
|
95 int lfd; /* logical fd, dense among all logfiles */ |
|
96 char *buf; |
|
97 int bufsize; |
|
98 int pos; |
|
99 uint32_t size; |
|
100 uint32_t simsize; |
|
101 logfile *next; |
|
102 logfile **prevp; |
|
103 }; |
|
104 |
|
105 static char default_buf[STARTUP_TMBUFSIZE]; |
|
106 static logfile default_logfile = |
|
107 {-1, 0, default_buf, STARTUP_TMBUFSIZE, 0, 0, 0, NULL, NULL}; |
|
108 static logfile *logfile_list = NULL; |
|
109 static logfile **logfile_tail = &logfile_list; |
|
110 static logfile *logfp = &default_logfile; |
|
111 static PRLock *tmlock = NULL; |
|
112 #ifndef PATH_MAX |
|
113 #define PATH_MAX 4096 |
|
114 #endif |
|
115 static char sdlogname[PATH_MAX] = ""; /* filename for shutdown leak log */ |
|
116 |
|
117 /* |
|
118 * This enables/disables trace-malloc logging. |
|
119 * |
|
120 * It is separate from suppress_tracing so that we do not have to pay |
|
121 * the performance cost of repeated TM_TLS_GET_DATA calls when |
|
122 * trace-malloc is disabled (which is not as bad as the locking we used |
|
123 * to have). |
|
124 * |
|
125 * It must default to zero, since it can be tested by the Linux malloc |
|
126 * hooks before NS_TraceMallocStartup sets it. |
|
127 */ |
|
128 static uint32_t tracing_enabled = 0; |
|
129 |
|
130 /* |
|
131 * Control whether we should log stacks |
|
132 */ |
|
133 static uint32_t stacks_enabled = 1; |
|
134 |
|
135 /* |
|
136 * This lock must be held while manipulating the calltree, the |
|
137 * allocations table, the log, or the tmstats. |
|
138 * |
|
139 * Callers should not *enter* the lock without checking suppress_tracing |
|
140 * first; otherwise they risk trying to re-enter on the same thread. |
|
141 */ |
|
142 #define TM_ENTER_LOCK(t) \ |
|
143 PR_BEGIN_MACRO \ |
|
144 PR_ASSERT(t->suppress_tracing != 0); \ |
|
145 if (tmlock) \ |
|
146 PR_Lock(tmlock); \ |
|
147 PR_END_MACRO |
|
148 |
|
149 #define TM_EXIT_LOCK(t) \ |
|
150 PR_BEGIN_MACRO \ |
|
151 PR_ASSERT(t->suppress_tracing != 0); \ |
|
152 if (tmlock) \ |
|
153 PR_Unlock(tmlock); \ |
|
154 PR_END_MACRO |
|
155 |
|
156 #define TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t) \ |
|
157 PR_BEGIN_MACRO \ |
|
158 t->suppress_tracing++; \ |
|
159 TM_ENTER_LOCK(t); \ |
|
160 PR_END_MACRO |
|
161 |
|
162 #define TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t) \ |
|
163 PR_BEGIN_MACRO \ |
|
164 TM_EXIT_LOCK(t); \ |
|
165 t->suppress_tracing--; \ |
|
166 PR_END_MACRO |
|
167 |
|
168 |
|
169 /* |
|
170 * Thread-local storage. |
|
171 * |
|
172 * We can't use NSPR thread-local storage for this because it mallocs |
|
173 * within PR_GetThreadPrivate (the first time) and PR_SetThreadPrivate |
|
174 * (which can be worked around by protecting all uses of those functions |
|
175 * with a monitor, ugh) and because it calls malloc/free when the |
|
176 * thread-local storage is in an inconsistent state within |
|
177 * PR_SetThreadPrivate (when expanding the thread-local storage array) |
|
178 * and _PRI_DetachThread (when and after deleting the thread-local |
|
179 * storage array). |
|
180 */ |
|
181 |
|
182 #ifdef XP_WIN32 |
|
183 |
|
184 #include <windows.h> |
|
185 |
|
186 #define TM_TLS_INDEX_TYPE DWORD |
|
187 #define TM_CREATE_TLS_INDEX(i_) PR_BEGIN_MACRO \ |
|
188 (i_) = TlsAlloc(); \ |
|
189 PR_END_MACRO |
|
190 #define TM_DESTROY_TLS_INDEX(i_) TlsFree((i_)) |
|
191 #define TM_GET_TLS_DATA(i_) TlsGetValue((i_)) |
|
192 #define TM_SET_TLS_DATA(i_, v_) TlsSetValue((i_), (v_)) |
|
193 |
|
194 #else |
|
195 |
|
196 #include <pthread.h> |
|
197 |
|
198 #define TM_TLS_INDEX_TYPE pthread_key_t |
|
199 #define TM_CREATE_TLS_INDEX(i_) pthread_key_create(&(i_), NULL) |
|
200 #define TM_DESTROY_TLS_INDEX(i_) pthread_key_delete((i_)) |
|
201 #define TM_GET_TLS_DATA(i_) pthread_getspecific((i_)) |
|
202 #define TM_SET_TLS_DATA(i_, v_) pthread_setspecific((i_), (v_)) |
|
203 |
|
204 #endif |
|
205 |
|
206 static TM_TLS_INDEX_TYPE tls_index; |
|
207 static PRBool tls_index_initialized = PR_FALSE; |
|
208 |
|
209 /* FIXME (maybe): This is currently unused; we leak the thread-local data. */ |
|
210 #if 0 |
|
211 static void |
|
212 free_tm_thread(void *priv) |
|
213 { |
|
214 tm_thread *t = (tm_thread*) priv; |
|
215 |
|
216 PR_ASSERT(t->suppress_tracing == 0); |
|
217 |
|
218 if (t->in_heap) { |
|
219 t->suppress_tracing = 1; |
|
220 if (t->backtrace_buf.buffer) |
|
221 __libc_free(t->backtrace_buf.buffer); |
|
222 |
|
223 __libc_free(t); |
|
224 } |
|
225 } |
|
226 #endif |
|
227 |
|
228 tm_thread * |
|
229 tm_get_thread(void) |
|
230 { |
|
231 tm_thread *t; |
|
232 tm_thread stack_tm_thread; |
|
233 |
|
234 if (!tls_index_initialized) { |
|
235 /** |
|
236 * Assume that the first call to |malloc| will occur before |
|
237 * there are multiple threads. (If that's not the case, we |
|
238 * probably need to do the necessary synchronization without |
|
239 * using NSPR primitives. See discussion in |
|
240 * https://bugzilla.mozilla.org/show_bug.cgi?id=442192 |
|
241 */ |
|
242 TM_CREATE_TLS_INDEX(tls_index); |
|
243 tls_index_initialized = PR_TRUE; |
|
244 } |
|
245 |
|
246 t = TM_GET_TLS_DATA(tls_index); |
|
247 |
|
248 if (!t) { |
|
249 /* |
|
250 * First, store a tm_thread on the stack to suppress for the |
|
251 * malloc below |
|
252 */ |
|
253 stack_tm_thread.suppress_tracing = 1; |
|
254 stack_tm_thread.backtrace_buf.buffer = NULL; |
|
255 stack_tm_thread.backtrace_buf.size = 0; |
|
256 stack_tm_thread.backtrace_buf.entries = 0; |
|
257 TM_SET_TLS_DATA(tls_index, &stack_tm_thread); |
|
258 |
|
259 t = (tm_thread*) __libc_malloc(sizeof(tm_thread)); |
|
260 t->suppress_tracing = 0; |
|
261 t->backtrace_buf = stack_tm_thread.backtrace_buf; |
|
262 TM_SET_TLS_DATA(tls_index, t); |
|
263 |
|
264 PR_ASSERT(stack_tm_thread.suppress_tracing == 1); /* balanced */ |
|
265 } |
|
266 |
|
267 return t; |
|
268 } |
|
269 |
|
270 /* We don't want more than 32 logfiles open at once, ok? */ |
|
271 typedef uint32_t lfd_set; |
|
272 |
|
273 #define LFD_SET_STATIC_INITIALIZER 0 |
|
274 #define LFD_SET_SIZE 32 |
|
275 |
|
276 #define LFD_ZERO(s) (*(s) = 0) |
|
277 #define LFD_BIT(i) ((uint32_t)1 << (i)) |
|
278 #define LFD_TEST(i,s) (LFD_BIT(i) & *(s)) |
|
279 #define LFD_SET(i,s) (*(s) |= LFD_BIT(i)) |
|
280 #define LFD_CLR(i,s) (*(s) &= ~LFD_BIT(i)) |
|
281 |
|
282 static logfile *get_logfile(int fd) |
|
283 { |
|
284 logfile *fp; |
|
285 int lfd; |
|
286 |
|
287 for (fp = logfile_list; fp; fp = fp->next) { |
|
288 if (fp->fd == fd) |
|
289 return fp; |
|
290 } |
|
291 lfd = 0; |
|
292 retry: |
|
293 for (fp = logfile_list; fp; fp = fp->next) { |
|
294 if (fp->fd == lfd) { |
|
295 if (++lfd >= LFD_SET_SIZE) |
|
296 return NULL; |
|
297 goto retry; |
|
298 } |
|
299 } |
|
300 fp = __libc_malloc(sizeof(logfile) + LOGFILE_TMBUFSIZE); |
|
301 if (!fp) |
|
302 return NULL; |
|
303 fp->fd = fd; |
|
304 fp->lfd = lfd; |
|
305 fp->buf = (char*) (fp + 1); |
|
306 fp->bufsize = LOGFILE_TMBUFSIZE; |
|
307 fp->pos = 0; |
|
308 fp->size = fp->simsize = 0; |
|
309 fp->next = NULL; |
|
310 fp->prevp = logfile_tail; |
|
311 *logfile_tail = fp; |
|
312 logfile_tail = &fp->next; |
|
313 return fp; |
|
314 } |
|
315 |
|
316 static void flush_logfile(logfile *fp) |
|
317 { |
|
318 int len, cnt, fd; |
|
319 char *bp; |
|
320 |
|
321 len = fp->pos; |
|
322 if (len == 0) |
|
323 return; |
|
324 fp->pos = 0; |
|
325 fd = fp->fd; |
|
326 if (fd >= 0) { |
|
327 fp->size += len; |
|
328 bp = fp->buf; |
|
329 do { |
|
330 cnt = write(fd, bp, len); |
|
331 if (cnt <= 0) { |
|
332 printf("### nsTraceMalloc: write failed or wrote 0 bytes!\n"); |
|
333 return; |
|
334 } |
|
335 bp += cnt; |
|
336 len -= cnt; |
|
337 } while (len > 0); |
|
338 } |
|
339 fp->simsize += len; |
|
340 } |
|
341 |
|
342 static void log_byte(logfile *fp, char byte) |
|
343 { |
|
344 if (fp->pos == fp->bufsize) |
|
345 flush_logfile(fp); |
|
346 fp->buf[fp->pos++] = byte; |
|
347 } |
|
348 |
|
349 static void log_string(logfile *fp, const char *str) |
|
350 { |
|
351 int len, rem, cnt; |
|
352 |
|
353 len = strlen(str) + 1; /* include null terminator */ |
|
354 while ((rem = fp->pos + len - fp->bufsize) > 0) { |
|
355 cnt = len - rem; |
|
356 memcpy(&fp->buf[fp->pos], str, cnt); |
|
357 str += cnt; |
|
358 fp->pos += cnt; |
|
359 flush_logfile(fp); |
|
360 len = rem; |
|
361 } |
|
362 memcpy(&fp->buf[fp->pos], str, len); |
|
363 fp->pos += len; |
|
364 } |
|
365 |
|
366 static void log_filename(logfile* fp, const char* filename) |
|
367 { |
|
368 if (strlen(filename) < 512) { |
|
369 char *bp, *cp, buf[512]; |
|
370 |
|
371 bp = strstr(strcpy(buf, filename), "mozilla"); |
|
372 if (!bp) |
|
373 bp = buf; |
|
374 |
|
375 for (cp = bp; *cp; cp++) { |
|
376 if (*cp == '\\') |
|
377 *cp = '/'; |
|
378 } |
|
379 |
|
380 filename = bp; |
|
381 } |
|
382 log_string(fp, filename); |
|
383 } |
|
384 |
|
385 static void log_uint32(logfile *fp, uint32_t ival) |
|
386 { |
|
387 if (ival < 0x80) { |
|
388 /* 0xxx xxxx */ |
|
389 log_byte(fp, (char) ival); |
|
390 } else if (ival < 0x4000) { |
|
391 /* 10xx xxxx xxxx xxxx */ |
|
392 log_byte(fp, (char) ((ival >> 8) | 0x80)); |
|
393 log_byte(fp, (char) (ival & 0xff)); |
|
394 } else if (ival < 0x200000) { |
|
395 /* 110x xxxx xxxx xxxx xxxx xxxx */ |
|
396 log_byte(fp, (char) ((ival >> 16) | 0xc0)); |
|
397 log_byte(fp, (char) ((ival >> 8) & 0xff)); |
|
398 log_byte(fp, (char) (ival & 0xff)); |
|
399 } else if (ival < 0x10000000) { |
|
400 /* 1110 xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ |
|
401 log_byte(fp, (char) ((ival >> 24) | 0xe0)); |
|
402 log_byte(fp, (char) ((ival >> 16) & 0xff)); |
|
403 log_byte(fp, (char) ((ival >> 8) & 0xff)); |
|
404 log_byte(fp, (char) (ival & 0xff)); |
|
405 } else { |
|
406 /* 1111 0000 xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ |
|
407 log_byte(fp, (char) 0xf0); |
|
408 log_byte(fp, (char) ((ival >> 24) & 0xff)); |
|
409 log_byte(fp, (char) ((ival >> 16) & 0xff)); |
|
410 log_byte(fp, (char) ((ival >> 8) & 0xff)); |
|
411 log_byte(fp, (char) (ival & 0xff)); |
|
412 } |
|
413 } |
|
414 |
|
415 static void log_event1(logfile *fp, char event, uint32_t serial) |
|
416 { |
|
417 log_byte(fp, event); |
|
418 log_uint32(fp, (uint32_t) serial); |
|
419 } |
|
420 |
|
421 static void log_event2(logfile *fp, char event, uint32_t serial, size_t size) |
|
422 { |
|
423 log_event1(fp, event, serial); |
|
424 log_uint32(fp, (uint32_t) size); |
|
425 } |
|
426 |
|
427 static void log_event3(logfile *fp, char event, uint32_t serial, size_t oldsize, |
|
428 size_t size) |
|
429 { |
|
430 log_event2(fp, event, serial, oldsize); |
|
431 log_uint32(fp, (uint32_t) size); |
|
432 } |
|
433 |
|
434 static void log_event4(logfile *fp, char event, uint32_t serial, uint32_t ui2, |
|
435 uint32_t ui3, uint32_t ui4) |
|
436 { |
|
437 log_event3(fp, event, serial, ui2, ui3); |
|
438 log_uint32(fp, ui4); |
|
439 } |
|
440 |
|
441 static void log_event5(logfile *fp, char event, uint32_t serial, uint32_t ui2, |
|
442 uint32_t ui3, uint32_t ui4, uint32_t ui5) |
|
443 { |
|
444 log_event4(fp, event, serial, ui2, ui3, ui4); |
|
445 log_uint32(fp, ui5); |
|
446 } |
|
447 |
|
448 static void log_event6(logfile *fp, char event, uint32_t serial, uint32_t ui2, |
|
449 uint32_t ui3, uint32_t ui4, uint32_t ui5, uint32_t ui6) |
|
450 { |
|
451 log_event5(fp, event, serial, ui2, ui3, ui4, ui5); |
|
452 log_uint32(fp, ui6); |
|
453 } |
|
454 |
|
455 static void log_event7(logfile *fp, char event, uint32_t serial, uint32_t ui2, |
|
456 uint32_t ui3, uint32_t ui4, uint32_t ui5, uint32_t ui6, |
|
457 uint32_t ui7) |
|
458 { |
|
459 log_event6(fp, event, serial, ui2, ui3, ui4, ui5, ui6); |
|
460 log_uint32(fp, ui7); |
|
461 } |
|
462 |
|
463 static void log_event8(logfile *fp, char event, uint32_t serial, uint32_t ui2, |
|
464 uint32_t ui3, uint32_t ui4, uint32_t ui5, uint32_t ui6, |
|
465 uint32_t ui7, uint32_t ui8) |
|
466 { |
|
467 log_event7(fp, event, serial, ui2, ui3, ui4, ui5, ui6, ui7); |
|
468 log_uint32(fp, ui8); |
|
469 } |
|
470 |
|
471 typedef struct callsite callsite; |
|
472 |
|
473 struct callsite { |
|
474 void* pc; |
|
475 uint32_t serial; |
|
476 lfd_set lfdset; |
|
477 const char *name; /* pointer to string owned by methods table */ |
|
478 const char *library; /* pointer to string owned by libraries table */ |
|
479 int offset; |
|
480 callsite *parent; |
|
481 callsite *siblings; |
|
482 callsite *kids; |
|
483 }; |
|
484 |
|
485 /* NB: these counters are incremented and decremented only within tmlock. */ |
|
486 static uint32_t library_serial_generator = 0; |
|
487 static uint32_t method_serial_generator = 0; |
|
488 static uint32_t callsite_serial_generator = 0; |
|
489 static uint32_t tmstats_serial_generator = 0; |
|
490 static uint32_t filename_serial_generator = 0; |
|
491 |
|
492 /* Root of the tree of callsites, the sum of all (cycle-compressed) stacks. */ |
|
493 static callsite calltree_root = |
|
494 {0, 0, LFD_SET_STATIC_INITIALIZER, NULL, NULL, 0, NULL, NULL, NULL}; |
|
495 |
|
496 /* a fake pc for when stacks are disabled; must be different from the |
|
497 pc in calltree_root */ |
|
498 #define STACK_DISABLED_PC ((void*)1) |
|
499 |
|
500 /* Basic instrumentation. */ |
|
501 static nsTMStats tmstats = NS_TMSTATS_STATIC_INITIALIZER; |
|
502 |
|
503 /* Parent with the most kids (tmstats.calltree_maxkids). */ |
|
504 static callsite *calltree_maxkids_parent; |
|
505 |
|
506 /* Calltree leaf for path with deepest stack backtrace. */ |
|
507 static callsite *calltree_maxstack_top; |
|
508 |
|
509 /* Last site (i.e., calling pc) that recurred during a backtrace. */ |
|
510 static callsite *last_callsite_recurrence; |
|
511 |
|
512 static void log_tmstats(logfile *fp) |
|
513 { |
|
514 log_event1(fp, TM_EVENT_STATS, ++tmstats_serial_generator); |
|
515 log_uint32(fp, tmstats.calltree_maxstack); |
|
516 log_uint32(fp, tmstats.calltree_maxdepth); |
|
517 log_uint32(fp, tmstats.calltree_parents); |
|
518 log_uint32(fp, tmstats.calltree_maxkids); |
|
519 log_uint32(fp, tmstats.calltree_kidhits); |
|
520 log_uint32(fp, tmstats.calltree_kidmisses); |
|
521 log_uint32(fp, tmstats.calltree_kidsteps); |
|
522 log_uint32(fp, tmstats.callsite_recurrences); |
|
523 log_uint32(fp, tmstats.backtrace_calls); |
|
524 log_uint32(fp, tmstats.backtrace_failures); |
|
525 log_uint32(fp, tmstats.btmalloc_failures); |
|
526 log_uint32(fp, tmstats.dladdr_failures); |
|
527 log_uint32(fp, tmstats.malloc_calls); |
|
528 log_uint32(fp, tmstats.malloc_failures); |
|
529 log_uint32(fp, tmstats.calloc_calls); |
|
530 log_uint32(fp, tmstats.calloc_failures); |
|
531 log_uint32(fp, tmstats.realloc_calls); |
|
532 log_uint32(fp, tmstats.realloc_failures); |
|
533 log_uint32(fp, tmstats.free_calls); |
|
534 log_uint32(fp, tmstats.null_free_calls); |
|
535 log_uint32(fp, calltree_maxkids_parent ? calltree_maxkids_parent->serial |
|
536 : 0); |
|
537 log_uint32(fp, calltree_maxstack_top ? calltree_maxstack_top->serial : 0); |
|
538 } |
|
539 |
|
540 static void *generic_alloctable(void *pool, size_t size) |
|
541 { |
|
542 return __libc_malloc(size); |
|
543 } |
|
544 |
|
545 static void generic_freetable(void *pool, void *item) |
|
546 { |
|
547 __libc_free(item); |
|
548 } |
|
549 |
|
550 typedef struct lfdset_entry { |
|
551 PLHashEntry base; |
|
552 lfd_set lfdset; |
|
553 } lfdset_entry; |
|
554 |
|
555 static PLHashEntry *lfdset_allocentry(void *pool, const void *key) |
|
556 { |
|
557 lfdset_entry *le = __libc_malloc(sizeof *le); |
|
558 if (le) |
|
559 LFD_ZERO(&le->lfdset); |
|
560 return &le->base; |
|
561 } |
|
562 |
|
563 static void lfdset_freeentry(void *pool, PLHashEntry *he, unsigned flag) |
|
564 { |
|
565 lfdset_entry *le; |
|
566 |
|
567 if (flag != HT_FREE_ENTRY) |
|
568 return; |
|
569 le = (lfdset_entry*) he; |
|
570 __libc_free((void*) le); |
|
571 } |
|
572 |
|
573 static PLHashAllocOps lfdset_hashallocops = { |
|
574 generic_alloctable, generic_freetable, |
|
575 lfdset_allocentry, lfdset_freeentry |
|
576 }; |
|
577 |
|
578 /* Table of library pathnames mapped to to logged 'L' record serial numbers. */ |
|
579 static PLHashTable *libraries = NULL; |
|
580 |
|
581 /* Table of filename pathnames mapped to logged 'G' record serial numbers. */ |
|
582 static PLHashTable *filenames = NULL; |
|
583 |
|
584 /* Table mapping method names to logged 'N' record serial numbers. */ |
|
585 static PLHashTable *methods = NULL; |
|
586 |
|
587 /* |
|
588 * Presumes that its caller is holding tmlock, but may temporarily exit |
|
589 * the lock. |
|
590 */ |
|
591 static callsite * |
|
592 calltree(void **stack, size_t num_stack_entries, tm_thread *t) |
|
593 { |
|
594 logfile *fp = logfp; |
|
595 void *pc; |
|
596 uint32_t nkids; |
|
597 callsite *parent, *site, **csp, *tmp; |
|
598 int maxstack; |
|
599 uint32_t library_serial, method_serial, filename_serial; |
|
600 const char *library, *method, *filename; |
|
601 char *slash; |
|
602 PLHashNumber hash; |
|
603 PLHashEntry **hep, *he; |
|
604 lfdset_entry *le; |
|
605 size_t stack_index; |
|
606 nsCodeAddressDetails details; |
|
607 nsresult rv; |
|
608 |
|
609 maxstack = (num_stack_entries > tmstats.calltree_maxstack); |
|
610 if (maxstack) { |
|
611 /* these two are the same, although that used to be less clear */ |
|
612 tmstats.calltree_maxstack = num_stack_entries; |
|
613 tmstats.calltree_maxdepth = num_stack_entries; |
|
614 } |
|
615 |
|
616 /* Reverse the stack again, finding and building a path in the tree. */ |
|
617 parent = &calltree_root; |
|
618 stack_index = num_stack_entries; |
|
619 do { |
|
620 --stack_index; |
|
621 pc = stack[stack_index]; |
|
622 |
|
623 csp = &parent->kids; |
|
624 while ((site = *csp) != NULL) { |
|
625 if (site->pc == pc) { |
|
626 tmstats.calltree_kidhits++; |
|
627 |
|
628 /* Put the most recently used site at the front of siblings. */ |
|
629 *csp = site->siblings; |
|
630 site->siblings = parent->kids; |
|
631 parent->kids = site; |
|
632 |
|
633 /* Check whether we've logged for this site and logfile yet. */ |
|
634 if (!LFD_TEST(fp->lfd, &site->lfdset)) { |
|
635 /* |
|
636 * Some other logfile put this site in the calltree. We |
|
637 * must log an event for site, and possibly first for its |
|
638 * method and/or library. Note the code after the while |
|
639 * loop that tests if (!site). |
|
640 */ |
|
641 break; |
|
642 } |
|
643 |
|
644 /* Site already built and logged to fp -- go up the stack. */ |
|
645 goto upward; |
|
646 } |
|
647 tmstats.calltree_kidsteps++; |
|
648 csp = &site->siblings; |
|
649 } |
|
650 |
|
651 if (!site) { |
|
652 tmstats.calltree_kidmisses++; |
|
653 |
|
654 /* Check for recursion: see if pc is on our ancestor line. */ |
|
655 for (site = parent; site; site = site->parent) { |
|
656 if (site->pc == pc) { |
|
657 tmstats.callsite_recurrences++; |
|
658 last_callsite_recurrence = site; |
|
659 goto upward; |
|
660 } |
|
661 } |
|
662 } |
|
663 |
|
664 /* |
|
665 * Not in tree at all, or not logged to fp: let's find our symbolic |
|
666 * callsite info. |
|
667 */ |
|
668 |
|
669 if (!stacks_enabled) { |
|
670 /* |
|
671 * Fake the necessary information for our single fake stack |
|
672 * frame. |
|
673 */ |
|
674 PL_strncpyz(details.library, "stacks_disabled", |
|
675 sizeof(details.library)); |
|
676 details.loffset = 0; |
|
677 details.filename[0] = '\0'; |
|
678 details.lineno = 0; |
|
679 details.function[0] = '\0'; |
|
680 details.foffset = 0; |
|
681 } else { |
|
682 /* |
|
683 * NS_DescribeCodeAddress can (on Linux) acquire a lock inside |
|
684 * the shared library loader. Another thread might call malloc |
|
685 * while holding that lock (when loading a shared library). So |
|
686 * we have to exit tmlock around this call. For details, see |
|
687 * https://bugzilla.mozilla.org/show_bug.cgi?id=363334#c3 |
|
688 * |
|
689 * We could be more efficient by building the nodes in the |
|
690 * calltree, exiting the monitor once to describe all of them, |
|
691 * and then filling in the descriptions for any that hadn't been |
|
692 * described already. But this is easier for now. |
|
693 */ |
|
694 TM_EXIT_LOCK(t); |
|
695 rv = NS_DescribeCodeAddress(pc, &details); |
|
696 TM_ENTER_LOCK(t); |
|
697 if (NS_FAILED(rv)) { |
|
698 tmstats.dladdr_failures++; |
|
699 goto fail; |
|
700 } |
|
701 } |
|
702 |
|
703 /* Check whether we need to emit a library trace record. */ |
|
704 library_serial = 0; |
|
705 library = NULL; |
|
706 if (details.library[0]) { |
|
707 if (!libraries) { |
|
708 libraries = PL_NewHashTable(100, PL_HashString, |
|
709 PL_CompareStrings, PL_CompareValues, |
|
710 &lfdset_hashallocops, NULL); |
|
711 if (!libraries) { |
|
712 tmstats.btmalloc_failures++; |
|
713 goto fail; |
|
714 } |
|
715 } |
|
716 hash = PL_HashString(details.library); |
|
717 hep = PL_HashTableRawLookup(libraries, hash, details.library); |
|
718 he = *hep; |
|
719 if (he) { |
|
720 library = (char*) he->key; |
|
721 library_serial = (uint32_t) NS_PTR_TO_INT32(he->value); |
|
722 le = (lfdset_entry *) he; |
|
723 if (LFD_TEST(fp->lfd, &le->lfdset)) { |
|
724 /* We already logged an event on fp for this library. */ |
|
725 le = NULL; |
|
726 } |
|
727 } else { |
|
728 library = strdup(details.library); |
|
729 if (library) { |
|
730 library_serial = ++library_serial_generator; |
|
731 he = PL_HashTableRawAdd(libraries, hep, hash, library, |
|
732 NS_INT32_TO_PTR(library_serial)); |
|
733 } |
|
734 if (!he) { |
|
735 tmstats.btmalloc_failures++; |
|
736 goto fail; |
|
737 } |
|
738 le = (lfdset_entry *) he; |
|
739 } |
|
740 if (le) { |
|
741 /* Need to log an event to fp for this lib. */ |
|
742 slash = strrchr(library, '/'); |
|
743 log_event1(fp, TM_EVENT_LIBRARY, library_serial); |
|
744 log_string(fp, slash ? slash + 1 : library); |
|
745 LFD_SET(fp->lfd, &le->lfdset); |
|
746 } |
|
747 } |
|
748 |
|
749 /* For compatibility with current log format, always emit a |
|
750 * filename trace record, using "noname" / 0 when no file name |
|
751 * is available. */ |
|
752 filename_serial = 0; |
|
753 filename = details.filename[0] ? details.filename : "noname"; |
|
754 if (!filenames) { |
|
755 filenames = PL_NewHashTable(100, PL_HashString, |
|
756 PL_CompareStrings, PL_CompareValues, |
|
757 &lfdset_hashallocops, NULL); |
|
758 if (!filenames) { |
|
759 tmstats.btmalloc_failures++; |
|
760 return NULL; |
|
761 } |
|
762 } |
|
763 hash = PL_HashString(filename); |
|
764 hep = PL_HashTableRawLookup(filenames, hash, filename); |
|
765 he = *hep; |
|
766 if (he) { |
|
767 filename = (char*) he->key; |
|
768 filename_serial = (uint32_t) NS_PTR_TO_INT32(he->value); |
|
769 le = (lfdset_entry *) he; |
|
770 if (LFD_TEST(fp->lfd, &le->lfdset)) { |
|
771 /* We already logged an event on fp for this filename. */ |
|
772 le = NULL; |
|
773 } |
|
774 } else { |
|
775 filename = strdup(filename); |
|
776 if (filename) { |
|
777 filename_serial = ++filename_serial_generator; |
|
778 he = PL_HashTableRawAdd(filenames, hep, hash, filename, |
|
779 NS_INT32_TO_PTR(filename_serial)); |
|
780 } |
|
781 if (!he) { |
|
782 tmstats.btmalloc_failures++; |
|
783 return NULL; |
|
784 } |
|
785 le = (lfdset_entry *) he; |
|
786 } |
|
787 if (le) { |
|
788 /* Need to log an event to fp for this filename. */ |
|
789 log_event1(fp, TM_EVENT_FILENAME, filename_serial); |
|
790 log_filename(fp, filename); |
|
791 LFD_SET(fp->lfd, &le->lfdset); |
|
792 } |
|
793 |
|
794 if (!details.function[0]) { |
|
795 PR_snprintf(details.function, sizeof(details.function), |
|
796 "%s+%X", library ? library : "main", details.loffset); |
|
797 } |
|
798 |
|
799 /* Emit an 'N' (for New method, 'M' is for malloc!) event if needed. */ |
|
800 method_serial = 0; |
|
801 if (!methods) { |
|
802 methods = PL_NewHashTable(10000, PL_HashString, |
|
803 PL_CompareStrings, PL_CompareValues, |
|
804 &lfdset_hashallocops, NULL); |
|
805 if (!methods) { |
|
806 tmstats.btmalloc_failures++; |
|
807 goto fail; |
|
808 } |
|
809 } |
|
810 hash = PL_HashString(details.function); |
|
811 hep = PL_HashTableRawLookup(methods, hash, details.function); |
|
812 he = *hep; |
|
813 if (he) { |
|
814 method = (char*) he->key; |
|
815 method_serial = (uint32_t) NS_PTR_TO_INT32(he->value); |
|
816 le = (lfdset_entry *) he; |
|
817 if (LFD_TEST(fp->lfd, &le->lfdset)) { |
|
818 /* We already logged an event on fp for this method. */ |
|
819 le = NULL; |
|
820 } |
|
821 } else { |
|
822 method = strdup(details.function); |
|
823 if (method) { |
|
824 method_serial = ++method_serial_generator; |
|
825 he = PL_HashTableRawAdd(methods, hep, hash, method, |
|
826 NS_INT32_TO_PTR(method_serial)); |
|
827 } |
|
828 if (!he) { |
|
829 tmstats.btmalloc_failures++; |
|
830 return NULL; |
|
831 } |
|
832 le = (lfdset_entry *) he; |
|
833 } |
|
834 if (le) { |
|
835 log_event4(fp, TM_EVENT_METHOD, method_serial, library_serial, |
|
836 filename_serial, details.lineno); |
|
837 log_string(fp, method); |
|
838 LFD_SET(fp->lfd, &le->lfdset); |
|
839 } |
|
840 |
|
841 /* Create a new callsite record. */ |
|
842 if (!site) { |
|
843 site = __libc_malloc(sizeof(callsite)); |
|
844 if (!site) { |
|
845 tmstats.btmalloc_failures++; |
|
846 goto fail; |
|
847 } |
|
848 |
|
849 /* Update parent and max-kids-per-parent stats. */ |
|
850 if (!parent->kids) |
|
851 tmstats.calltree_parents++; |
|
852 nkids = 1; |
|
853 for (tmp = parent->kids; tmp; tmp = tmp->siblings) |
|
854 nkids++; |
|
855 if (nkids > tmstats.calltree_maxkids) { |
|
856 tmstats.calltree_maxkids = nkids; |
|
857 calltree_maxkids_parent = parent; |
|
858 } |
|
859 |
|
860 /* Insert the new site into the tree. */ |
|
861 site->pc = pc; |
|
862 site->serial = ++callsite_serial_generator; |
|
863 LFD_ZERO(&site->lfdset); |
|
864 site->name = method; |
|
865 site->library = library; |
|
866 site->offset = details.loffset; |
|
867 site->parent = parent; |
|
868 site->siblings = parent->kids; |
|
869 parent->kids = site; |
|
870 site->kids = NULL; |
|
871 } |
|
872 |
|
873 /* Log the site with its parent, method, and offset. */ |
|
874 log_event4(fp, TM_EVENT_CALLSITE, site->serial, parent->serial, |
|
875 method_serial, details.foffset); |
|
876 LFD_SET(fp->lfd, &site->lfdset); |
|
877 |
|
878 upward: |
|
879 parent = site; |
|
880 } while (stack_index > 0); |
|
881 |
|
882 if (maxstack) |
|
883 calltree_maxstack_top = site; |
|
884 |
|
885 return site; |
|
886 |
|
887 fail: |
|
888 return NULL; |
|
889 } |
|
890 |
|
891 /* |
|
892 * Buffer the stack from top at low index to bottom at high, so that we can |
|
893 * reverse it in calltree. |
|
894 */ |
|
895 static void |
|
896 stack_callback(void *pc, void *sp, void *closure) |
|
897 { |
|
898 stack_buffer_info *info = (stack_buffer_info*) closure; |
|
899 |
|
900 /* |
|
901 * If we run out of buffer, keep incrementing entries so that |
|
902 * backtrace can call us again with a bigger buffer. |
|
903 */ |
|
904 if (info->entries < info->size) |
|
905 info->buffer[info->entries] = pc; |
|
906 ++info->entries; |
|
907 } |
|
908 |
|
909 /* |
|
910 * The caller MUST NOT be holding tmlock when calling backtrace. |
|
911 * On return, if *immediate_abort is set, then the return value is NULL |
|
912 * and the thread is in a very dangerous situation (e.g. holding |
|
913 * sem_pool_lock in Mac OS X pthreads); the caller should bail out |
|
914 * without doing anything (such as acquiring locks). |
|
915 */ |
|
916 static callsite * |
|
917 backtrace(tm_thread *t, int skipFrames, int *immediate_abort) |
|
918 { |
|
919 callsite *site; |
|
920 stack_buffer_info *info = &t->backtrace_buf; |
|
921 void ** new_stack_buffer; |
|
922 size_t new_stack_buffer_size; |
|
923 nsresult rv; |
|
924 |
|
925 t->suppress_tracing++; |
|
926 |
|
927 if (!stacks_enabled) { |
|
928 #if defined(XP_MACOSX) |
|
929 /* Walk the stack, even if stacks_enabled is false. We do this to |
|
930 check if we must set immediate_abort. */ |
|
931 info->entries = 0; |
|
932 rv = NS_StackWalk(stack_callback, skipFrames, /* maxFrames */ 0, info, |
|
933 0, NULL); |
|
934 *immediate_abort = rv == NS_ERROR_UNEXPECTED; |
|
935 if (rv == NS_ERROR_UNEXPECTED || info->entries == 0) { |
|
936 t->suppress_tracing--; |
|
937 return NULL; |
|
938 } |
|
939 #endif |
|
940 |
|
941 /* |
|
942 * Create a single fake stack frame so that all the tools get |
|
943 * data in the correct format. |
|
944 */ |
|
945 *immediate_abort = 0; |
|
946 if (info->size < 1) { |
|
947 PR_ASSERT(!info->buffer); /* !info->size == !info->buffer */ |
|
948 info->buffer = __libc_malloc(1 * sizeof(void*)); |
|
949 if (!info->buffer) |
|
950 return NULL; |
|
951 info->size = 1; |
|
952 } |
|
953 |
|
954 info->entries = 1; |
|
955 info->buffer[0] = STACK_DISABLED_PC; |
|
956 } else { |
|
957 /* |
|
958 * NS_StackWalk can (on Windows) acquire a lock the shared library |
|
959 * loader. Another thread might call malloc while holding that lock |
|
960 * (when loading a shared library). So we can't be in tmlock during |
|
961 * this call. For details, see |
|
962 * https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8 |
|
963 */ |
|
964 |
|
965 /* |
|
966 * skipFrames == 0 means |backtrace| should show up, so don't use |
|
967 * skipFrames + 1. |
|
968 * NB: this call is repeated below if the buffer is too small. |
|
969 */ |
|
970 info->entries = 0; |
|
971 rv = NS_StackWalk(stack_callback, skipFrames, /* maxFrames */ 0, info, |
|
972 0, NULL); |
|
973 *immediate_abort = rv == NS_ERROR_UNEXPECTED; |
|
974 if (rv == NS_ERROR_UNEXPECTED || info->entries == 0) { |
|
975 t->suppress_tracing--; |
|
976 return NULL; |
|
977 } |
|
978 |
|
979 /* |
|
980 * To avoid allocating in stack_callback (which, on Windows, is |
|
981 * called on a different thread from the one we're running on here), |
|
982 * reallocate here if it didn't have a big enough buffer (which |
|
983 * includes the first call on any thread), and call it again. |
|
984 */ |
|
985 if (info->entries > info->size) { |
|
986 new_stack_buffer_size = 2 * info->entries; |
|
987 new_stack_buffer = __libc_realloc(info->buffer, |
|
988 new_stack_buffer_size * sizeof(void*)); |
|
989 if (!new_stack_buffer) |
|
990 return NULL; |
|
991 info->buffer = new_stack_buffer; |
|
992 info->size = new_stack_buffer_size; |
|
993 |
|
994 /* and call NS_StackWalk again */ |
|
995 info->entries = 0; |
|
996 NS_StackWalk(stack_callback, skipFrames, /* maxFrames */ 0, info, |
|
997 0, NULL); |
|
998 |
|
999 /* same stack */ |
|
1000 PR_ASSERT(info->entries * 2 == new_stack_buffer_size); |
|
1001 } |
|
1002 } |
|
1003 |
|
1004 TM_ENTER_LOCK(t); |
|
1005 |
|
1006 site = calltree(info->buffer, info->entries, t); |
|
1007 |
|
1008 tmstats.backtrace_calls++; |
|
1009 if (!site) { |
|
1010 tmstats.backtrace_failures++; |
|
1011 PR_ASSERT(tmstats.backtrace_failures < 100); |
|
1012 } |
|
1013 TM_EXIT_LOCK(t); |
|
1014 |
|
1015 t->suppress_tracing--; |
|
1016 return site; |
|
1017 } |
|
1018 |
|
1019 typedef struct allocation { |
|
1020 PLHashEntry entry; |
|
1021 size_t size; |
|
1022 FILE *trackfp; /* for allocation tracking */ |
|
1023 } allocation; |
|
1024 |
|
1025 #define ALLOC_HEAP_SIZE 150000 |
|
1026 |
|
1027 static allocation alloc_heap[ALLOC_HEAP_SIZE]; |
|
1028 static allocation *alloc_freelist = NULL; |
|
1029 static int alloc_heap_initialized = 0; |
|
1030 |
|
1031 static PLHashEntry *alloc_allocentry(void *pool, const void *key) |
|
1032 { |
|
1033 allocation **listp, *alloc; |
|
1034 int n; |
|
1035 |
|
1036 if (!alloc_heap_initialized) { |
|
1037 n = ALLOC_HEAP_SIZE; |
|
1038 listp = &alloc_freelist; |
|
1039 for (alloc = alloc_heap; --n >= 0; alloc++) { |
|
1040 *listp = alloc; |
|
1041 listp = (allocation**) &alloc->entry.next; |
|
1042 } |
|
1043 *listp = NULL; |
|
1044 alloc_heap_initialized = 1; |
|
1045 } |
|
1046 |
|
1047 listp = &alloc_freelist; |
|
1048 alloc = *listp; |
|
1049 if (!alloc) |
|
1050 return __libc_malloc(sizeof(allocation)); |
|
1051 *listp = (allocation*) alloc->entry.next; |
|
1052 return &alloc->entry; |
|
1053 } |
|
1054 |
|
1055 static void alloc_freeentry(void *pool, PLHashEntry *he, unsigned flag) |
|
1056 { |
|
1057 allocation *alloc; |
|
1058 |
|
1059 if (flag != HT_FREE_ENTRY) |
|
1060 return; |
|
1061 alloc = (allocation*) he; |
|
1062 if ((ptrdiff_t)(alloc - alloc_heap) < (ptrdiff_t)ALLOC_HEAP_SIZE) { |
|
1063 alloc->entry.next = &alloc_freelist->entry; |
|
1064 alloc_freelist = alloc; |
|
1065 } else { |
|
1066 __libc_free((void*) alloc); |
|
1067 } |
|
1068 } |
|
1069 |
|
1070 static PLHashAllocOps alloc_hashallocops = { |
|
1071 generic_alloctable, generic_freetable, |
|
1072 alloc_allocentry, alloc_freeentry |
|
1073 }; |
|
1074 |
|
1075 static PLHashNumber hash_pointer(const void *key) |
|
1076 { |
|
1077 return (PLHashNumber) key; |
|
1078 } |
|
1079 |
|
1080 static PLHashTable *allocations = NULL; |
|
1081 |
|
1082 static PLHashTable *new_allocations(void) |
|
1083 { |
|
1084 allocations = PL_NewHashTable(200000, hash_pointer, |
|
1085 PL_CompareValues, PL_CompareValues, |
|
1086 &alloc_hashallocops, NULL); |
|
1087 return allocations; |
|
1088 } |
|
1089 |
|
1090 #define get_allocations() (allocations ? allocations : new_allocations()) |
|
1091 |
|
1092 #if defined(XP_MACOSX) |
|
1093 |
|
1094 /* from malloc.c in Libc */ |
|
1095 typedef void |
|
1096 malloc_logger_t(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, |
|
1097 uintptr_t result, uint32_t num_hot_frames_to_skip); |
|
1098 |
|
1099 extern malloc_logger_t *malloc_logger; |
|
1100 |
|
1101 #define MALLOC_LOG_TYPE_ALLOCATE 2 |
|
1102 #define MALLOC_LOG_TYPE_DEALLOCATE 4 |
|
1103 #define MALLOC_LOG_TYPE_HAS_ZONE 8 |
|
1104 #define MALLOC_LOG_TYPE_CLEARED 64 |
|
1105 |
|
1106 static void |
|
1107 my_malloc_logger(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, |
|
1108 uintptr_t result, uint32_t num_hot_frames_to_skip) |
|
1109 { |
|
1110 uintptr_t all_args[3] = { arg1, arg2, arg3 }; |
|
1111 uintptr_t *args = all_args + ((type & MALLOC_LOG_TYPE_HAS_ZONE) ? 1 : 0); |
|
1112 |
|
1113 uint32_t alloc_type = |
|
1114 type & (MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE); |
|
1115 tm_thread *t = tm_get_thread(); |
|
1116 |
|
1117 if (alloc_type == (MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE)) { |
|
1118 ReallocCallback((void*)args[0], (void*)result, args[1], 0, 0, t); |
|
1119 } else if (alloc_type == MALLOC_LOG_TYPE_ALLOCATE) { |
|
1120 /* |
|
1121 * We don't get size/count information for calloc, so just use |
|
1122 * MallocCallback. |
|
1123 */ |
|
1124 MallocCallback((void*)result, args[0], 0, 0, t); |
|
1125 } else if (alloc_type == MALLOC_LOG_TYPE_DEALLOCATE) { |
|
1126 FreeCallback((void*)args[0], 0, 0, t); |
|
1127 } |
|
1128 } |
|
1129 |
|
1130 static void |
|
1131 StartupHooker(void) |
|
1132 { |
|
1133 PR_ASSERT(!malloc_logger); |
|
1134 malloc_logger = my_malloc_logger; |
|
1135 } |
|
1136 |
|
1137 static void |
|
1138 ShutdownHooker(void) |
|
1139 { |
|
1140 PR_ASSERT(malloc_logger == my_malloc_logger); |
|
1141 malloc_logger = NULL; |
|
1142 } |
|
1143 |
|
1144 #elif defined(XP_UNIX) |
|
1145 |
|
1146 /* |
|
1147 * We can't use glibc's malloc hooks because they can't be used in a |
|
1148 * threadsafe manner. They require unsetting the hooks to call into the |
|
1149 * original malloc implementation, and then resetting them when the |
|
1150 * original implementation returns. If another thread calls the same |
|
1151 * allocation function while the hooks are unset, we have no chance to |
|
1152 * intercept the call. |
|
1153 */ |
|
1154 |
|
1155 NS_EXTERNAL_VIS_(__ptr_t) |
|
1156 malloc(size_t size) |
|
1157 { |
|
1158 uint32_t start, end; |
|
1159 __ptr_t ptr; |
|
1160 tm_thread *t; |
|
1161 |
|
1162 if (!tracing_enabled || !PR_Initialized() || |
|
1163 (t = tm_get_thread())->suppress_tracing != 0) { |
|
1164 return __libc_malloc(size); |
|
1165 } |
|
1166 |
|
1167 t->suppress_tracing++; |
|
1168 start = PR_IntervalNow(); |
|
1169 ptr = __libc_malloc(size); |
|
1170 end = PR_IntervalNow(); |
|
1171 t->suppress_tracing--; |
|
1172 |
|
1173 MallocCallback(ptr, size, start, end, t); |
|
1174 |
|
1175 return ptr; |
|
1176 } |
|
1177 |
|
1178 NS_EXTERNAL_VIS_(__ptr_t) |
|
1179 calloc(size_t count, size_t size) |
|
1180 { |
|
1181 uint32_t start, end; |
|
1182 __ptr_t ptr; |
|
1183 tm_thread *t; |
|
1184 |
|
1185 if (!tracing_enabled || !PR_Initialized() || |
|
1186 (t = tm_get_thread())->suppress_tracing != 0) { |
|
1187 return __libc_calloc(count, size); |
|
1188 } |
|
1189 |
|
1190 t->suppress_tracing++; |
|
1191 start = PR_IntervalNow(); |
|
1192 ptr = __libc_calloc(count, size); |
|
1193 end = PR_IntervalNow(); |
|
1194 t->suppress_tracing--; |
|
1195 |
|
1196 CallocCallback(ptr, count, size, start, end, t); |
|
1197 |
|
1198 return ptr; |
|
1199 } |
|
1200 |
|
1201 NS_EXTERNAL_VIS_(__ptr_t) |
|
1202 realloc(__ptr_t oldptr, size_t size) |
|
1203 { |
|
1204 uint32_t start, end; |
|
1205 __ptr_t ptr; |
|
1206 tm_thread *t; |
|
1207 |
|
1208 if (!tracing_enabled || !PR_Initialized() || |
|
1209 (t = tm_get_thread())->suppress_tracing != 0) { |
|
1210 return __libc_realloc(oldptr, size); |
|
1211 } |
|
1212 |
|
1213 t->suppress_tracing++; |
|
1214 start = PR_IntervalNow(); |
|
1215 ptr = __libc_realloc(oldptr, size); |
|
1216 end = PR_IntervalNow(); |
|
1217 t->suppress_tracing--; |
|
1218 |
|
1219 /* FIXME bug 392008: We could race with reallocation of oldptr. */ |
|
1220 ReallocCallback(oldptr, ptr, size, start, end, t); |
|
1221 |
|
1222 return ptr; |
|
1223 } |
|
1224 |
|
1225 NS_EXTERNAL_VIS_(void*) |
|
1226 valloc(size_t size) |
|
1227 { |
|
1228 uint32_t start, end; |
|
1229 __ptr_t ptr; |
|
1230 tm_thread *t; |
|
1231 |
|
1232 if (!tracing_enabled || !PR_Initialized() || |
|
1233 (t = tm_get_thread())->suppress_tracing != 0) { |
|
1234 return __libc_valloc(size); |
|
1235 } |
|
1236 |
|
1237 t->suppress_tracing++; |
|
1238 start = PR_IntervalNow(); |
|
1239 ptr = __libc_valloc(size); |
|
1240 end = PR_IntervalNow(); |
|
1241 t->suppress_tracing--; |
|
1242 |
|
1243 MallocCallback(ptr, size, start, end, t); |
|
1244 |
|
1245 return ptr; |
|
1246 } |
|
1247 |
|
1248 NS_EXTERNAL_VIS_(void*) |
|
1249 memalign(size_t boundary, size_t size) |
|
1250 { |
|
1251 uint32_t start, end; |
|
1252 __ptr_t ptr; |
|
1253 tm_thread *t; |
|
1254 |
|
1255 if (!tracing_enabled || !PR_Initialized() || |
|
1256 (t = tm_get_thread())->suppress_tracing != 0) { |
|
1257 return __libc_memalign(boundary, size); |
|
1258 } |
|
1259 |
|
1260 t->suppress_tracing++; |
|
1261 start = PR_IntervalNow(); |
|
1262 ptr = __libc_memalign(boundary, size); |
|
1263 end = PR_IntervalNow(); |
|
1264 t->suppress_tracing--; |
|
1265 |
|
1266 MallocCallback(ptr, size, start, end, t); |
|
1267 |
|
1268 return ptr; |
|
1269 } |
|
1270 |
|
1271 NS_EXTERNAL_VIS_(int) |
|
1272 posix_memalign(void **memptr, size_t alignment, size_t size) |
|
1273 { |
|
1274 __ptr_t ptr = memalign(alignment, size); |
|
1275 if (!ptr) |
|
1276 return ENOMEM; |
|
1277 *memptr = ptr; |
|
1278 return 0; |
|
1279 } |
|
1280 |
|
1281 NS_EXTERNAL_VIS_(void) |
|
1282 free(__ptr_t ptr) |
|
1283 { |
|
1284 uint32_t start, end; |
|
1285 tm_thread *t; |
|
1286 |
|
1287 if (!tracing_enabled || !PR_Initialized() || |
|
1288 (t = tm_get_thread())->suppress_tracing != 0) { |
|
1289 __libc_free(ptr); |
|
1290 return; |
|
1291 } |
|
1292 |
|
1293 t->suppress_tracing++; |
|
1294 start = PR_IntervalNow(); |
|
1295 __libc_free(ptr); |
|
1296 end = PR_IntervalNow(); |
|
1297 t->suppress_tracing--; |
|
1298 |
|
1299 /* FIXME bug 392008: We could race with reallocation of ptr. */ |
|
1300 |
|
1301 FreeCallback(ptr, start, end, t); |
|
1302 } |
|
1303 |
|
1304 NS_EXTERNAL_VIS_(void) |
|
1305 cfree(void *ptr) |
|
1306 { |
|
1307 free(ptr); |
|
1308 } |
|
1309 |
|
1310 #define StartupHooker() PR_BEGIN_MACRO PR_END_MACRO |
|
1311 #define ShutdownHooker() PR_BEGIN_MACRO PR_END_MACRO |
|
1312 |
|
1313 #elif defined(XP_WIN32) |
|
1314 |
|
1315 /* See nsWinTraceMalloc.cpp. */ |
|
1316 |
|
1317 #endif |
|
1318 |
|
1319 static const char magic[] = NS_TRACE_MALLOC_MAGIC; |
|
1320 |
|
1321 static void |
|
1322 log_header(int logfd) |
|
1323 { |
|
1324 uint32_t ticksPerSec = PR_htonl(PR_TicksPerSecond()); |
|
1325 (void) write(logfd, magic, NS_TRACE_MALLOC_MAGIC_SIZE); |
|
1326 (void) write(logfd, &ticksPerSec, sizeof ticksPerSec); |
|
1327 } |
|
1328 |
|
1329 PR_IMPLEMENT(void) |
|
1330 NS_TraceMallocStartup(int logfd) |
|
1331 { |
|
1332 const char* stack_disable_env; |
|
1333 |
|
1334 /* We must be running on the primordial thread. */ |
|
1335 PR_ASSERT(tracing_enabled == 0); |
|
1336 PR_ASSERT(logfp == &default_logfile); |
|
1337 tracing_enabled = (logfd >= 0); |
|
1338 |
|
1339 if (logfd >= 3) |
|
1340 MozillaRegisterDebugFD(logfd); |
|
1341 |
|
1342 /* stacks are disabled if this env var is set to a non-empty value */ |
|
1343 stack_disable_env = PR_GetEnv("NS_TRACE_MALLOC_DISABLE_STACKS"); |
|
1344 stacks_enabled = !stack_disable_env || !*stack_disable_env; |
|
1345 |
|
1346 if (tracing_enabled) { |
|
1347 PR_ASSERT(logfp->simsize == 0); /* didn't overflow startup buffer */ |
|
1348 |
|
1349 /* Log everything in logfp (aka default_logfile)'s buffer to logfd. */ |
|
1350 logfp->fd = logfd; |
|
1351 logfile_list = &default_logfile; |
|
1352 logfp->prevp = &logfile_list; |
|
1353 logfile_tail = &logfp->next; |
|
1354 log_header(logfd); |
|
1355 } |
|
1356 |
|
1357 RegisterTraceMallocShutdown(); |
|
1358 |
|
1359 tmlock = PR_NewLock(); |
|
1360 (void) tm_get_thread(); /* ensure index initialization while it's easy */ |
|
1361 |
|
1362 if (tracing_enabled) |
|
1363 StartupHooker(); |
|
1364 } |
|
1365 |
|
1366 /* |
|
1367 * Options for log files, with the log file name either as the next option |
|
1368 * or separated by '=' (e.g. "./mozilla --trace-malloc * malloc.log" or |
|
1369 * "./mozilla --trace-malloc=malloc.log"). |
|
1370 */ |
|
1371 static const char TMLOG_OPTION[] = "--trace-malloc"; |
|
1372 static const char SDLOG_OPTION[] = "--shutdown-leaks"; |
|
1373 |
|
1374 #define SHOULD_PARSE_ARG(name_, log_, arg_) \ |
|
1375 (0 == strncmp(arg_, name_, sizeof(name_) - 1)) |
|
1376 |
|
1377 #define PARSE_ARG(name_, log_, argv_, i_, consumed_) \ |
|
1378 PR_BEGIN_MACRO \ |
|
1379 char _nextchar = argv_[i_][sizeof(name_) - 1]; \ |
|
1380 if (_nextchar == '=') { \ |
|
1381 log_ = argv_[i_] + sizeof(name_); \ |
|
1382 consumed_ = 1; \ |
|
1383 } else if (_nextchar == '\0') { \ |
|
1384 log_ = argv_[i_+1]; \ |
|
1385 consumed_ = 2; \ |
|
1386 } \ |
|
1387 PR_END_MACRO |
|
1388 |
|
1389 PR_IMPLEMENT(int) |
|
1390 NS_TraceMallocStartupArgs(int argc, char **argv) |
|
1391 { |
|
1392 int i, logfd = -1, consumed, logflags; |
|
1393 char *tmlogname = NULL, *sdlogname_local = NULL; |
|
1394 |
|
1395 /* |
|
1396 * Look for the --trace-malloc <logfile> option early, to avoid missing |
|
1397 * early mallocs (we miss static constructors whose output overflows the |
|
1398 * log file's static 16K output buffer). |
|
1399 */ |
|
1400 for (i = 1; i < argc; i += consumed) { |
|
1401 consumed = 0; |
|
1402 if (SHOULD_PARSE_ARG(TMLOG_OPTION, tmlogname, argv[i])) |
|
1403 PARSE_ARG(TMLOG_OPTION, tmlogname, argv, i, consumed); |
|
1404 else if (SHOULD_PARSE_ARG(SDLOG_OPTION, sdlogname_local, argv[i])) |
|
1405 PARSE_ARG(SDLOG_OPTION, sdlogname_local, argv, i, consumed); |
|
1406 |
|
1407 if (consumed) { |
|
1408 #ifndef XP_WIN32 /* If we don't comment this out, it will crash Windows. */ |
|
1409 int j; |
|
1410 /* Now remove --trace-malloc and its argument from argv. */ |
|
1411 argc -= consumed; |
|
1412 for (j = i; j < argc; ++j) |
|
1413 argv[j] = argv[j+consumed]; |
|
1414 argv[argc] = NULL; |
|
1415 consumed = 0; /* don't advance next iteration */ |
|
1416 #endif |
|
1417 } else { |
|
1418 consumed = 1; |
|
1419 } |
|
1420 } |
|
1421 |
|
1422 if (tmlogname) { |
|
1423 #ifdef XP_UNIX |
|
1424 int pipefds[2]; |
|
1425 #endif |
|
1426 |
|
1427 switch (*tmlogname) { |
|
1428 #ifdef XP_UNIX |
|
1429 case '|': |
|
1430 if (pipe(pipefds) == 0) { |
|
1431 pid_t pid = fork(); |
|
1432 if (pid == 0) { |
|
1433 /* In child: set up stdin, parse args, and exec. */ |
|
1434 int maxargc, nargc; |
|
1435 char **nargv, *token; |
|
1436 |
|
1437 if (pipefds[0] != 0) { |
|
1438 dup2(pipefds[0], 0); |
|
1439 close(pipefds[0]); |
|
1440 } |
|
1441 close(pipefds[1]); |
|
1442 |
|
1443 tmlogname = strtok(tmlogname + 1, " \t"); |
|
1444 maxargc = 3; |
|
1445 nargv = (char **) malloc((maxargc+1) * sizeof(char *)); |
|
1446 if (!nargv) exit(1); |
|
1447 nargc = 0; |
|
1448 nargv[nargc++] = tmlogname; |
|
1449 while ((token = strtok(NULL, " \t")) != NULL) { |
|
1450 if (nargc == maxargc) { |
|
1451 maxargc *= 2; |
|
1452 nargv = (char**) |
|
1453 realloc(nargv, (maxargc+1) * sizeof(char*)); |
|
1454 if (!nargv) exit(1); |
|
1455 } |
|
1456 nargv[nargc++] = token; |
|
1457 } |
|
1458 nargv[nargc] = NULL; |
|
1459 |
|
1460 (void) setsid(); |
|
1461 execvp(tmlogname, nargv); |
|
1462 exit(127); |
|
1463 } |
|
1464 |
|
1465 if (pid > 0) { |
|
1466 /* In parent: set logfd to the pipe's write side. */ |
|
1467 close(pipefds[0]); |
|
1468 logfd = pipefds[1]; |
|
1469 } |
|
1470 } |
|
1471 if (logfd < 0) { |
|
1472 fprintf(stderr, |
|
1473 "%s: can't pipe to trace-malloc child process %s: %s\n", |
|
1474 argv[0], tmlogname, strerror(errno)); |
|
1475 exit(1); |
|
1476 } |
|
1477 break; |
|
1478 #endif /*XP_UNIX*/ |
|
1479 case '-': |
|
1480 /* Don't log from startup, but do prepare to log later. */ |
|
1481 /* XXX traditional meaning of '-' as option argument is "stdin" or "stdout" */ |
|
1482 if (tmlogname[1] == '\0') |
|
1483 break; |
|
1484 /* FALL THROUGH */ |
|
1485 |
|
1486 default: |
|
1487 logflags = O_CREAT | O_WRONLY | O_TRUNC; |
|
1488 #if defined(XP_WIN32) |
|
1489 /* |
|
1490 * Avoid translations on WIN32. |
|
1491 */ |
|
1492 logflags |= O_BINARY; |
|
1493 #endif |
|
1494 logfd = open(tmlogname, logflags, 0644); |
|
1495 if (logfd < 0) { |
|
1496 fprintf(stderr, |
|
1497 "%s: can't create trace-malloc log named %s: %s\n", |
|
1498 argv[0], tmlogname, strerror(errno)); |
|
1499 exit(1); |
|
1500 } |
|
1501 break; |
|
1502 } |
|
1503 } |
|
1504 |
|
1505 if (sdlogname_local) { |
|
1506 strncpy(sdlogname, sdlogname_local, sizeof(sdlogname)); |
|
1507 sdlogname[sizeof(sdlogname) - 1] = '\0'; |
|
1508 } |
|
1509 |
|
1510 NS_TraceMallocStartup(logfd); |
|
1511 return argc; |
|
1512 } |
|
1513 |
|
1514 PR_IMPLEMENT(PRBool) |
|
1515 NS_TraceMallocHasStarted(void) |
|
1516 { |
|
1517 return tmlock ? PR_TRUE : PR_FALSE; |
|
1518 } |
|
1519 |
|
1520 PR_IMPLEMENT(void) |
|
1521 NS_TraceMallocShutdown(void) |
|
1522 { |
|
1523 logfile *fp; |
|
1524 |
|
1525 if (sdlogname[0]) |
|
1526 NS_TraceMallocDumpAllocations(sdlogname); |
|
1527 |
|
1528 if (tmstats.backtrace_failures) { |
|
1529 fprintf(stderr, |
|
1530 "TraceMalloc backtrace failures: %lu (malloc %lu dladdr %lu)\n", |
|
1531 (unsigned long) tmstats.backtrace_failures, |
|
1532 (unsigned long) tmstats.btmalloc_failures, |
|
1533 (unsigned long) tmstats.dladdr_failures); |
|
1534 } |
|
1535 while ((fp = logfile_list) != NULL) { |
|
1536 logfile_list = fp->next; |
|
1537 log_tmstats(fp); |
|
1538 flush_logfile(fp); |
|
1539 if (fp->fd >= 0) { |
|
1540 MozillaUnRegisterDebugFD(fp->fd); |
|
1541 close(fp->fd); |
|
1542 fp->fd = -1; |
|
1543 } |
|
1544 if (fp != &default_logfile) { |
|
1545 if (fp == logfp) |
|
1546 logfp = &default_logfile; |
|
1547 free((void*) fp); |
|
1548 } |
|
1549 } |
|
1550 if (tmlock) { |
|
1551 PRLock *lock = tmlock; |
|
1552 tmlock = NULL; |
|
1553 PR_DestroyLock(lock); |
|
1554 } |
|
1555 if (tracing_enabled) { |
|
1556 tracing_enabled = 0; |
|
1557 ShutdownHooker(); |
|
1558 } |
|
1559 } |
|
1560 |
|
1561 PR_IMPLEMENT(void) |
|
1562 NS_TraceMallocDisable(void) |
|
1563 { |
|
1564 tm_thread *t = tm_get_thread(); |
|
1565 logfile *fp; |
|
1566 uint32_t sample; |
|
1567 |
|
1568 /* Robustify in case of duplicate call. */ |
|
1569 PR_ASSERT(tracing_enabled); |
|
1570 if (tracing_enabled == 0) |
|
1571 return; |
|
1572 |
|
1573 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
|
1574 for (fp = logfile_list; fp; fp = fp->next) |
|
1575 flush_logfile(fp); |
|
1576 sample = --tracing_enabled; |
|
1577 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
1578 if (sample == 0) |
|
1579 ShutdownHooker(); |
|
1580 } |
|
1581 |
|
1582 PR_IMPLEMENT(void) |
|
1583 NS_TraceMallocEnable(void) |
|
1584 { |
|
1585 tm_thread *t = tm_get_thread(); |
|
1586 uint32_t sample; |
|
1587 |
|
1588 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
|
1589 sample = ++tracing_enabled; |
|
1590 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
1591 if (sample == 1) |
|
1592 StartupHooker(); |
|
1593 } |
|
1594 |
|
1595 PR_IMPLEMENT(int) |
|
1596 NS_TraceMallocChangeLogFD(int fd) |
|
1597 { |
|
1598 logfile *oldfp, *fp; |
|
1599 struct stat sb; |
|
1600 tm_thread *t = tm_get_thread(); |
|
1601 |
|
1602 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
|
1603 oldfp = logfp; |
|
1604 if (oldfp->fd != fd) { |
|
1605 flush_logfile(oldfp); |
|
1606 fp = get_logfile(fd); |
|
1607 if (!fp) { |
|
1608 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
1609 return -2; |
|
1610 } |
|
1611 if (fd >= 0 && fstat(fd, &sb) == 0 && sb.st_size == 0) |
|
1612 log_header(fd); |
|
1613 logfp = fp; |
|
1614 } |
|
1615 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
1616 return oldfp->fd; |
|
1617 } |
|
1618 |
|
1619 static int |
|
1620 lfd_clr_enumerator(PLHashEntry *he, int i, void *arg) |
|
1621 { |
|
1622 lfdset_entry *le = (lfdset_entry*) he; |
|
1623 logfile *fp = (logfile*) arg; |
|
1624 |
|
1625 LFD_CLR(fp->lfd, &le->lfdset); |
|
1626 return HT_ENUMERATE_NEXT; |
|
1627 } |
|
1628 |
|
1629 static void |
|
1630 lfd_clr_walk(callsite *site, logfile *fp) |
|
1631 { |
|
1632 callsite *kid; |
|
1633 |
|
1634 LFD_CLR(fp->lfd, &site->lfdset); |
|
1635 for (kid = site->kids; kid; kid = kid->siblings) |
|
1636 lfd_clr_walk(kid, fp); |
|
1637 } |
|
1638 |
|
1639 PR_IMPLEMENT(void) |
|
1640 NS_TraceMallocCloseLogFD(int fd) |
|
1641 { |
|
1642 logfile *fp; |
|
1643 tm_thread *t = tm_get_thread(); |
|
1644 |
|
1645 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
|
1646 |
|
1647 fp = get_logfile(fd); |
|
1648 if (fp) { |
|
1649 flush_logfile(fp); |
|
1650 if (fp == &default_logfile) { |
|
1651 /* Leave default_logfile in logfile_list with an fd of -1. */ |
|
1652 fp->fd = -1; |
|
1653 |
|
1654 /* NB: we can never free lfd 0, it belongs to default_logfile. */ |
|
1655 PR_ASSERT(fp->lfd == 0); |
|
1656 } else { |
|
1657 /* Clear fp->lfd in all possible lfdsets. */ |
|
1658 PL_HashTableEnumerateEntries(libraries, lfd_clr_enumerator, fp); |
|
1659 PL_HashTableEnumerateEntries(methods, lfd_clr_enumerator, fp); |
|
1660 lfd_clr_walk(&calltree_root, fp); |
|
1661 |
|
1662 /* Unlink fp from logfile_list, freeing lfd for reallocation. */ |
|
1663 *fp->prevp = fp->next; |
|
1664 if (!fp->next) { |
|
1665 PR_ASSERT(logfile_tail == &fp->next); |
|
1666 logfile_tail = fp->prevp; |
|
1667 } |
|
1668 |
|
1669 /* Reset logfp if we must, then free fp. */ |
|
1670 if (fp == logfp) |
|
1671 logfp = &default_logfile; |
|
1672 free((void*) fp); |
|
1673 } |
|
1674 } |
|
1675 |
|
1676 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
1677 MozillaUnRegisterDebugFD(fd); |
|
1678 close(fd); |
|
1679 } |
|
1680 |
|
1681 PR_IMPLEMENT(void) |
|
1682 NS_TraceMallocLogTimestamp(const char *caption) |
|
1683 { |
|
1684 logfile *fp; |
|
1685 #ifdef XP_UNIX |
|
1686 struct timeval tv; |
|
1687 #endif |
|
1688 #ifdef XP_WIN32 |
|
1689 struct _timeb tb; |
|
1690 #endif |
|
1691 tm_thread *t = tm_get_thread(); |
|
1692 |
|
1693 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
|
1694 |
|
1695 fp = logfp; |
|
1696 log_byte(fp, TM_EVENT_TIMESTAMP); |
|
1697 |
|
1698 #ifdef XP_UNIX |
|
1699 gettimeofday(&tv, NULL); |
|
1700 log_uint32(fp, (uint32_t) tv.tv_sec); |
|
1701 log_uint32(fp, (uint32_t) tv.tv_usec); |
|
1702 #endif |
|
1703 #ifdef XP_WIN32 |
|
1704 _ftime(&tb); |
|
1705 log_uint32(fp, (uint32_t) tb.time); |
|
1706 log_uint32(fp, (uint32_t) tb.millitm); |
|
1707 #endif |
|
1708 log_string(fp, caption); |
|
1709 |
|
1710 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
1711 } |
|
1712 |
|
1713 static void |
|
1714 print_stack(FILE *ofp, callsite *site) |
|
1715 { |
|
1716 while (site) { |
|
1717 if (site->name || site->parent) { |
|
1718 fprintf(ofp, "%s[%s +0x%X]\n", |
|
1719 site->name, site->library, site->offset); |
|
1720 } |
|
1721 site = site->parent; |
|
1722 } |
|
1723 } |
|
1724 |
|
1725 static const char *allocation_format = |
|
1726 (sizeof(void*) == 4) ? "\t0x%08zX\n" : |
|
1727 (sizeof(void*) == 8) ? "\t0x%016zX\n" : |
|
1728 "UNEXPECTED sizeof(void*)"; |
|
1729 |
|
1730 static int |
|
1731 allocation_enumerator(PLHashEntry *he, int i, void *arg) |
|
1732 { |
|
1733 allocation *alloc = (allocation*) he; |
|
1734 FILE *ofp = (FILE*) arg; |
|
1735 callsite *site = (callsite*) he->value; |
|
1736 |
|
1737 size_t *p, *end; |
|
1738 |
|
1739 fprintf(ofp, "%p <%s> (%lu)\n", |
|
1740 he->key, |
|
1741 nsGetTypeName(he->key), |
|
1742 (unsigned long) alloc->size); |
|
1743 |
|
1744 for (p = (size_t*) he->key, |
|
1745 end = (size_t*) ((char*)he->key + alloc->size); |
|
1746 p < end; ++p) { |
|
1747 fprintf(ofp, allocation_format, *p); |
|
1748 } |
|
1749 |
|
1750 print_stack(ofp, site); |
|
1751 fputc('\n', ofp); |
|
1752 return HT_ENUMERATE_NEXT; |
|
1753 } |
|
1754 |
|
1755 PR_IMPLEMENT(void) |
|
1756 NS_TraceStack(int skip, FILE *ofp) |
|
1757 { |
|
1758 callsite *site; |
|
1759 tm_thread *t = tm_get_thread(); |
|
1760 int immediate_abort; |
|
1761 |
|
1762 site = backtrace(t, skip + 1, &immediate_abort); |
|
1763 while (site) { |
|
1764 if (site->name || site->parent) { |
|
1765 fprintf(ofp, "%s[%s +0x%X]\n", |
|
1766 site->name, site->library, site->offset); |
|
1767 } |
|
1768 site = site->parent; |
|
1769 } |
|
1770 } |
|
1771 |
|
1772 PR_IMPLEMENT(int) |
|
1773 NS_TraceMallocDumpAllocations(const char *pathname) |
|
1774 { |
|
1775 FILE *ofp; |
|
1776 int rv; |
|
1777 int fd; |
|
1778 |
|
1779 tm_thread *t = tm_get_thread(); |
|
1780 |
|
1781 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
|
1782 |
|
1783 ofp = fopen(pathname, WRITE_FLAGS); |
|
1784 if (ofp) { |
|
1785 MozillaRegisterDebugFD(fileno(ofp)); |
|
1786 if (allocations) { |
|
1787 PL_HashTableEnumerateEntries(allocations, allocation_enumerator, |
|
1788 ofp); |
|
1789 } |
|
1790 rv = ferror(ofp) ? -1 : 0; |
|
1791 MozillaUnRegisterDebugFILE(ofp); |
|
1792 fclose(ofp); |
|
1793 } else { |
|
1794 rv = -1; |
|
1795 } |
|
1796 |
|
1797 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
1798 |
|
1799 return rv; |
|
1800 } |
|
1801 |
|
1802 PR_IMPLEMENT(void) |
|
1803 NS_TraceMallocFlushLogfiles(void) |
|
1804 { |
|
1805 logfile *fp; |
|
1806 tm_thread *t = tm_get_thread(); |
|
1807 |
|
1808 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
|
1809 |
|
1810 for (fp = logfile_list; fp; fp = fp->next) |
|
1811 flush_logfile(fp); |
|
1812 |
|
1813 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
1814 } |
|
1815 |
|
1816 PR_IMPLEMENT(void) |
|
1817 NS_TrackAllocation(void* ptr, FILE *ofp) |
|
1818 { |
|
1819 allocation *alloc; |
|
1820 tm_thread *t = tm_get_thread(); |
|
1821 |
|
1822 fprintf(ofp, "Trying to track %p\n", (void*) ptr); |
|
1823 setlinebuf(ofp); |
|
1824 |
|
1825 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
|
1826 if (get_allocations()) { |
|
1827 alloc = (allocation*) |
|
1828 *PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr); |
|
1829 if (alloc) { |
|
1830 fprintf(ofp, "Tracking %p\n", (void*) ptr); |
|
1831 alloc->trackfp = ofp; |
|
1832 } else { |
|
1833 fprintf(ofp, "Not tracking %p\n", (void*) ptr); |
|
1834 } |
|
1835 } |
|
1836 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
1837 } |
|
1838 |
|
1839 PR_IMPLEMENT(void) |
|
1840 MallocCallback(void *ptr, size_t size, uint32_t start, uint32_t end, tm_thread *t) |
|
1841 { |
|
1842 callsite *site; |
|
1843 PLHashEntry *he; |
|
1844 allocation *alloc; |
|
1845 int immediate_abort; |
|
1846 |
|
1847 if (!tracing_enabled || t->suppress_tracing != 0) |
|
1848 return; |
|
1849 |
|
1850 site = backtrace(t, 2, &immediate_abort); |
|
1851 if (immediate_abort) |
|
1852 return; |
|
1853 |
|
1854 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
|
1855 tmstats.malloc_calls++; |
|
1856 if (!ptr) { |
|
1857 tmstats.malloc_failures++; |
|
1858 } else { |
|
1859 if (site) { |
|
1860 log_event5(logfp, TM_EVENT_MALLOC, |
|
1861 site->serial, start, end - start, |
|
1862 (uint32_t)NS_PTR_TO_INT32(ptr), size); |
|
1863 } |
|
1864 if (get_allocations()) { |
|
1865 he = PL_HashTableAdd(allocations, ptr, site); |
|
1866 if (he) { |
|
1867 alloc = (allocation*) he; |
|
1868 alloc->size = size; |
|
1869 alloc->trackfp = NULL; |
|
1870 } |
|
1871 } |
|
1872 } |
|
1873 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
1874 } |
|
1875 |
|
1876 PR_IMPLEMENT(void) |
|
1877 CallocCallback(void *ptr, size_t count, size_t size, uint32_t start, uint32_t end, tm_thread *t) |
|
1878 { |
|
1879 callsite *site; |
|
1880 PLHashEntry *he; |
|
1881 allocation *alloc; |
|
1882 int immediate_abort; |
|
1883 |
|
1884 if (!tracing_enabled || t->suppress_tracing != 0) |
|
1885 return; |
|
1886 |
|
1887 site = backtrace(t, 2, &immediate_abort); |
|
1888 if (immediate_abort) |
|
1889 return; |
|
1890 |
|
1891 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
|
1892 tmstats.calloc_calls++; |
|
1893 if (!ptr) { |
|
1894 tmstats.calloc_failures++; |
|
1895 } else { |
|
1896 size *= count; |
|
1897 if (site) { |
|
1898 log_event5(logfp, TM_EVENT_CALLOC, |
|
1899 site->serial, start, end - start, |
|
1900 (uint32_t)NS_PTR_TO_INT32(ptr), size); |
|
1901 } |
|
1902 if (get_allocations()) { |
|
1903 he = PL_HashTableAdd(allocations, ptr, site); |
|
1904 if (he) { |
|
1905 alloc = (allocation*) he; |
|
1906 alloc->size = size; |
|
1907 alloc->trackfp = NULL; |
|
1908 } |
|
1909 } |
|
1910 } |
|
1911 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
1912 } |
|
1913 |
|
1914 PR_IMPLEMENT(void) |
|
1915 ReallocCallback(void * oldptr, void *ptr, size_t size, |
|
1916 uint32_t start, uint32_t end, tm_thread *t) |
|
1917 { |
|
1918 callsite *oldsite, *site; |
|
1919 size_t oldsize; |
|
1920 PLHashNumber hash; |
|
1921 PLHashEntry **hep, *he; |
|
1922 allocation *alloc; |
|
1923 FILE *trackfp = NULL; |
|
1924 int immediate_abort; |
|
1925 |
|
1926 if (!tracing_enabled || t->suppress_tracing != 0) |
|
1927 return; |
|
1928 |
|
1929 site = backtrace(t, 2, &immediate_abort); |
|
1930 if (immediate_abort) |
|
1931 return; |
|
1932 |
|
1933 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
|
1934 tmstats.realloc_calls++; |
|
1935 oldsite = NULL; |
|
1936 oldsize = 0; |
|
1937 hep = NULL; |
|
1938 he = NULL; |
|
1939 if (oldptr && get_allocations()) { |
|
1940 hash = hash_pointer(oldptr); |
|
1941 hep = PL_HashTableRawLookup(allocations, hash, oldptr); |
|
1942 he = *hep; |
|
1943 if (he) { |
|
1944 oldsite = (callsite*) he->value; |
|
1945 alloc = (allocation*) he; |
|
1946 oldsize = alloc->size; |
|
1947 trackfp = alloc->trackfp; |
|
1948 if (trackfp) { |
|
1949 fprintf(alloc->trackfp, |
|
1950 "\nrealloc(%p, %lu), oldsize %lu, alloc site %p\n", |
|
1951 (void*) ptr, (unsigned long) size, |
|
1952 (unsigned long) oldsize, (void*) oldsite); |
|
1953 NS_TraceStack(1, trackfp); |
|
1954 } |
|
1955 } |
|
1956 } |
|
1957 if (!ptr && size) { |
|
1958 /* |
|
1959 * When realloc() fails, the original block is not freed or moved, so |
|
1960 * we'll leave the allocation entry untouched. |
|
1961 */ |
|
1962 tmstats.realloc_failures++; |
|
1963 } else { |
|
1964 if (site) { |
|
1965 log_event8(logfp, TM_EVENT_REALLOC, |
|
1966 site->serial, start, end - start, |
|
1967 (uint32_t)NS_PTR_TO_INT32(ptr), size, |
|
1968 oldsite ? oldsite->serial : 0, |
|
1969 (uint32_t)NS_PTR_TO_INT32(oldptr), oldsize); |
|
1970 } |
|
1971 if (ptr && allocations) { |
|
1972 if (ptr != oldptr) { |
|
1973 /* |
|
1974 * If we're reallocating (not allocating new space by passing |
|
1975 * null to realloc) and realloc moved the block, free oldptr. |
|
1976 */ |
|
1977 if (he) |
|
1978 PL_HashTableRawRemove(allocations, hep, he); |
|
1979 |
|
1980 /* Record the new allocation now, setting he. */ |
|
1981 he = PL_HashTableAdd(allocations, ptr, site); |
|
1982 } else { |
|
1983 /* |
|
1984 * If we haven't yet recorded an allocation (possibly due to a |
|
1985 * temporary memory shortage), do it now. |
|
1986 */ |
|
1987 if (!he) |
|
1988 he = PL_HashTableAdd(allocations, ptr, site); |
|
1989 } |
|
1990 if (he) { |
|
1991 alloc = (allocation*) he; |
|
1992 alloc->size = size; |
|
1993 alloc->trackfp = trackfp; |
|
1994 } |
|
1995 } |
|
1996 } |
|
1997 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
1998 } |
|
1999 |
|
2000 PR_IMPLEMENT(void) |
|
2001 FreeCallback(void * ptr, uint32_t start, uint32_t end, tm_thread *t) |
|
2002 { |
|
2003 PLHashEntry **hep, *he; |
|
2004 callsite *site; |
|
2005 allocation *alloc; |
|
2006 |
|
2007 if (!tracing_enabled || t->suppress_tracing != 0) |
|
2008 return; |
|
2009 |
|
2010 /* |
|
2011 * FIXME: Perhaps we should call backtrace() so we can check for |
|
2012 * immediate_abort. However, the only current contexts where |
|
2013 * immediate_abort will be true do not call free(), so for now, |
|
2014 * let's avoid the cost of backtrace(). See bug 478195. |
|
2015 */ |
|
2016 TM_SUPPRESS_TRACING_AND_ENTER_LOCK(t); |
|
2017 tmstats.free_calls++; |
|
2018 if (!ptr) { |
|
2019 tmstats.null_free_calls++; |
|
2020 } else { |
|
2021 if (get_allocations()) { |
|
2022 hep = PL_HashTableRawLookup(allocations, hash_pointer(ptr), ptr); |
|
2023 he = *hep; |
|
2024 if (he) { |
|
2025 site = (callsite*) he->value; |
|
2026 if (site) { |
|
2027 alloc = (allocation*) he; |
|
2028 if (alloc->trackfp) { |
|
2029 fprintf(alloc->trackfp, "\nfree(%p), alloc site %p\n", |
|
2030 (void*) ptr, (void*) site); |
|
2031 NS_TraceStack(1, alloc->trackfp); |
|
2032 } |
|
2033 log_event5(logfp, TM_EVENT_FREE, |
|
2034 site->serial, start, end - start, |
|
2035 (uint32_t)NS_PTR_TO_INT32(ptr), alloc->size); |
|
2036 } |
|
2037 PL_HashTableRawRemove(allocations, hep, he); |
|
2038 } |
|
2039 } |
|
2040 } |
|
2041 TM_EXIT_LOCK_AND_UNSUPPRESS_TRACING(t); |
|
2042 } |
|
2043 |
|
2044 PR_IMPLEMENT(nsTMStackTraceID) |
|
2045 NS_TraceMallocGetStackTrace(void) |
|
2046 { |
|
2047 callsite *site; |
|
2048 int dummy; |
|
2049 tm_thread *t = tm_get_thread(); |
|
2050 |
|
2051 PR_ASSERT(t->suppress_tracing == 0); |
|
2052 |
|
2053 site = backtrace(t, 2, &dummy); |
|
2054 return (nsTMStackTraceID) site; |
|
2055 } |
|
2056 |
|
2057 PR_IMPLEMENT(void) |
|
2058 NS_TraceMallocPrintStackTrace(FILE *ofp, nsTMStackTraceID id) |
|
2059 { |
|
2060 print_stack(ofp, (callsite *)id); |
|
2061 } |
|
2062 |
|
2063 #endif /* NS_TRACE_MALLOC */ |