|
1 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
2 * License, v. 2.0. If a copy of the MPL was not distributed with this file, |
|
3 * You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
4 |
|
5 #ifndef MOZ_MEMORY |
|
6 # error Should not compile this file when MOZ_MEMORY is not set |
|
7 #endif |
|
8 |
|
9 #ifndef MOZ_REPLACE_MALLOC |
|
10 # error Should not compile this file when replace-malloc is disabled |
|
11 #endif |
|
12 |
|
13 #ifdef MOZ_NATIVE_JEMALLOC |
|
14 # error Should not compile this file when we want to use native jemalloc |
|
15 #endif |
|
16 |
|
17 #include "mozmemory_wrap.h" |
|
18 |
|
19 /* Declare all je_* functions */ |
|
20 #define MALLOC_DECL(name, return_type, ...) \ |
|
21 return_type je_ ## name(__VA_ARGS__); |
|
22 #include "malloc_decls.h" |
|
23 |
|
24 #include "mozilla/Likely.h" |
|
25 /* |
|
26 * Windows doesn't come with weak imports as they are possible with |
|
27 * LD_PRELOAD or DYLD_INSERT_LIBRARIES on Linux/OSX. On this platform, |
|
28 * the replacement functions are defined as variable pointers to the |
|
29 * function resolved with GetProcAddress() instead of weak definitions |
|
30 * of functions. On Android, the same needs to happen as well, because |
|
31 * the Android linker doesn't handle weak linking with non LD_PRELOADed |
|
32 * libraries, but LD_PRELOADing is not very convenient on Android, with |
|
33 * the zygote. |
|
34 */ |
|
35 #ifdef XP_DARWIN |
|
36 # define MOZ_REPLACE_WEAK __attribute__((weak_import)) |
|
37 #elif defined(XP_WIN) || defined(MOZ_WIDGET_ANDROID) |
|
38 # define MOZ_NO_REPLACE_FUNC_DECL |
|
39 #elif defined(__GNUC__) |
|
40 # define MOZ_REPLACE_WEAK __attribute__((weak)) |
|
41 #endif |
|
42 |
|
43 #include "replace_malloc.h" |
|
44 |
|
45 #define MALLOC_DECL(name, return_type, ...) \ |
|
46 je_ ## name, |
|
47 |
|
48 static const malloc_table_t malloc_table = { |
|
49 #include "malloc_decls.h" |
|
50 }; |
|
51 |
|
52 #ifdef MOZ_NO_REPLACE_FUNC_DECL |
|
53 # define MALLOC_DECL(name, return_type, ...) \ |
|
54 typedef return_type (replace_ ## name ## _impl_t)(__VA_ARGS__); \ |
|
55 replace_ ## name ## _impl_t *replace_ ## name = NULL; |
|
56 # define MALLOC_FUNCS MALLOC_FUNCS_ALL |
|
57 # include "malloc_decls.h" |
|
58 |
|
59 # ifdef XP_WIN |
|
60 # include <windows.h> |
|
61 static void |
|
62 replace_malloc_init_funcs() |
|
63 { |
|
64 char replace_malloc_lib[1024]; |
|
65 if (GetEnvironmentVariableA("MOZ_REPLACE_MALLOC_LIB", (LPSTR)&replace_malloc_lib, |
|
66 sizeof(replace_malloc_lib)) > 0) { |
|
67 HMODULE handle = LoadLibraryA(replace_malloc_lib); |
|
68 if (handle) { |
|
69 #define MALLOC_DECL(name, ...) \ |
|
70 replace_ ## name = (replace_ ## name ## _impl_t *) GetProcAddress(handle, "replace_" # name); |
|
71 |
|
72 # define MALLOC_FUNCS MALLOC_FUNCS_ALL |
|
73 #include "malloc_decls.h" |
|
74 } |
|
75 } |
|
76 } |
|
77 # elif defined(MOZ_WIDGET_ANDROID) |
|
78 # include <dlfcn.h> |
|
79 static void |
|
80 replace_malloc_init_funcs() |
|
81 { |
|
82 char *replace_malloc_lib = getenv("MOZ_REPLACE_MALLOC_LIB"); |
|
83 if (replace_malloc_lib && *replace_malloc_lib) { |
|
84 void *handle = dlopen(replace_malloc_lib, RTLD_LAZY); |
|
85 if (handle) { |
|
86 #define MALLOC_DECL(name, ...) \ |
|
87 replace_ ## name = (replace_ ## name ## _impl_t *) dlsym(handle, "replace_" # name); |
|
88 |
|
89 # define MALLOC_FUNCS MALLOC_FUNCS_ALL |
|
90 #include "malloc_decls.h" |
|
91 } |
|
92 } |
|
93 } |
|
94 # else |
|
95 # error No implementation for replace_malloc_init_funcs() |
|
96 # endif |
|
97 |
|
98 #endif /* MOZ_NO_REPLACE_FUNC_DECL */ |
|
99 |
|
100 /* |
|
101 * Below is the malloc implementation overriding jemalloc and calling the |
|
102 * replacement functions if they exist. |
|
103 */ |
|
104 |
|
105 /* |
|
106 * On OSX, MOZ_MEMORY_API is defined to nothing, because malloc functions |
|
107 * are meant to have hidden visibility. But since the functions are only |
|
108 * used locally in the zone allocator further below, we can allow the |
|
109 * compiler to optimize more by switching to static. |
|
110 */ |
|
111 #ifdef XP_DARWIN |
|
112 #undef MOZ_MEMORY_API |
|
113 #define MOZ_MEMORY_API static |
|
114 #endif |
|
115 |
|
116 /* |
|
117 * Malloc implementation functions are MOZ_MEMORY_API, and jemalloc |
|
118 * specific functions MOZ_JEMALLOC_API; see mozmemory_wrap.h |
|
119 */ |
|
120 #define MALLOC_DECL(name, return_type, ...) \ |
|
121 MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__); |
|
122 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC |
|
123 #include "malloc_decls.h" |
|
124 |
|
125 #define MALLOC_DECL(name, return_type, ...) \ |
|
126 MOZ_JEMALLOC_API return_type name ## _impl(__VA_ARGS__); |
|
127 #define MALLOC_FUNCS MALLOC_FUNCS_JEMALLOC |
|
128 #include "malloc_decls.h" |
|
129 |
|
130 static int replace_malloc_initialized = 0; |
|
131 static void |
|
132 init() |
|
133 { |
|
134 #ifdef MOZ_NO_REPLACE_FUNC_DECL |
|
135 replace_malloc_init_funcs(); |
|
136 #endif |
|
137 // Set this *before* calling replace_init, otherwise if replace_init calls |
|
138 // malloc() we'll get an infinite loop. |
|
139 replace_malloc_initialized = 1; |
|
140 if (replace_init) |
|
141 replace_init(&malloc_table); |
|
142 } |
|
143 |
|
144 void* |
|
145 malloc_impl(size_t size) |
|
146 { |
|
147 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
148 init(); |
|
149 if (MOZ_LIKELY(!replace_malloc)) |
|
150 return je_malloc(size); |
|
151 return replace_malloc(size); |
|
152 } |
|
153 |
|
154 int |
|
155 posix_memalign_impl(void **memptr, size_t alignment, size_t size) |
|
156 { |
|
157 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
158 init(); |
|
159 if (MOZ_LIKELY(!replace_posix_memalign)) |
|
160 return je_posix_memalign(memptr, alignment, size); |
|
161 return replace_posix_memalign(memptr, alignment, size); |
|
162 } |
|
163 |
|
164 void* |
|
165 aligned_alloc_impl(size_t alignment, size_t size) |
|
166 { |
|
167 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
168 init(); |
|
169 if (MOZ_LIKELY(!replace_aligned_alloc)) |
|
170 return je_aligned_alloc(alignment, size); |
|
171 return replace_aligned_alloc(alignment, size); |
|
172 } |
|
173 |
|
174 void* |
|
175 calloc_impl(size_t num, size_t size) |
|
176 { |
|
177 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
178 init(); |
|
179 if (MOZ_LIKELY(!replace_calloc)) |
|
180 return je_calloc(num, size); |
|
181 return replace_calloc(num, size); |
|
182 } |
|
183 |
|
184 void* |
|
185 realloc_impl(void *ptr, size_t size) |
|
186 { |
|
187 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
188 init(); |
|
189 if (MOZ_LIKELY(!replace_realloc)) |
|
190 return je_realloc(ptr, size); |
|
191 return replace_realloc(ptr, size); |
|
192 } |
|
193 |
|
194 void |
|
195 free_impl(void *ptr) |
|
196 { |
|
197 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
198 init(); |
|
199 if (MOZ_LIKELY(!replace_free)) |
|
200 je_free(ptr); |
|
201 else |
|
202 replace_free(ptr); |
|
203 } |
|
204 |
|
205 void* |
|
206 memalign_impl(size_t alignment, size_t size) |
|
207 { |
|
208 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
209 init(); |
|
210 if (MOZ_LIKELY(!replace_memalign)) |
|
211 return je_memalign(alignment, size); |
|
212 return replace_memalign(alignment, size); |
|
213 } |
|
214 |
|
215 void* |
|
216 valloc_impl(size_t size) |
|
217 { |
|
218 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
219 init(); |
|
220 if (MOZ_LIKELY(!replace_valloc)) |
|
221 return je_valloc(size); |
|
222 return replace_valloc(size); |
|
223 } |
|
224 |
|
225 size_t |
|
226 malloc_usable_size_impl(usable_ptr_t ptr) |
|
227 { |
|
228 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
229 init(); |
|
230 if (MOZ_LIKELY(!replace_malloc_usable_size)) |
|
231 return je_malloc_usable_size(ptr); |
|
232 return replace_malloc_usable_size(ptr); |
|
233 } |
|
234 |
|
235 size_t |
|
236 malloc_good_size_impl(size_t size) |
|
237 { |
|
238 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
239 init(); |
|
240 if (MOZ_LIKELY(!replace_malloc_good_size)) |
|
241 return je_malloc_good_size(size); |
|
242 return replace_malloc_good_size(size); |
|
243 } |
|
244 |
|
245 void |
|
246 jemalloc_stats_impl(jemalloc_stats_t *stats) |
|
247 { |
|
248 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
249 init(); |
|
250 if (MOZ_LIKELY(!replace_jemalloc_stats)) |
|
251 je_jemalloc_stats(stats); |
|
252 else |
|
253 replace_jemalloc_stats(stats); |
|
254 } |
|
255 |
|
256 void |
|
257 jemalloc_purge_freed_pages_impl() |
|
258 { |
|
259 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
260 init(); |
|
261 if (MOZ_LIKELY(!replace_jemalloc_purge_freed_pages)) |
|
262 je_jemalloc_purge_freed_pages(); |
|
263 else |
|
264 replace_jemalloc_purge_freed_pages(); |
|
265 } |
|
266 |
|
267 void |
|
268 jemalloc_free_dirty_pages_impl() |
|
269 { |
|
270 if (MOZ_UNLIKELY(!replace_malloc_initialized)) |
|
271 init(); |
|
272 if (MOZ_LIKELY(!replace_jemalloc_free_dirty_pages)) |
|
273 je_jemalloc_free_dirty_pages(); |
|
274 else |
|
275 replace_jemalloc_free_dirty_pages(); |
|
276 } |
|
277 |
|
278 /* The following comment and definitions are from jemalloc.c: */ |
|
279 #if defined(__GLIBC__) && !defined(__UCLIBC__) |
|
280 |
|
281 /* |
|
282 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible |
|
283 * to inconsistently reference libc's malloc(3)-compatible functions |
|
284 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). |
|
285 * |
|
286 * These definitions interpose hooks in glibc. The functions are actually |
|
287 * passed an extra argument for the caller return address, which will be |
|
288 * ignored. |
|
289 */ |
|
290 |
|
291 typedef void (* __free_hook_type)(void *ptr); |
|
292 typedef void *(* __malloc_hook_type)(size_t size); |
|
293 typedef void *(* __realloc_hook_type)(void *ptr, size_t size); |
|
294 typedef void *(* __memalign_hook_type)(size_t alignment, size_t size); |
|
295 |
|
296 MOZ_MEMORY_API __free_hook_type __free_hook = free_impl; |
|
297 MOZ_MEMORY_API __malloc_hook_type __malloc_hook = malloc_impl; |
|
298 MOZ_MEMORY_API __realloc_hook_type __realloc_hook = realloc_impl; |
|
299 MOZ_MEMORY_API __memalign_hook_type __memalign_hook = memalign_impl; |
|
300 |
|
301 #endif |
|
302 |
|
303 /* |
|
304 * The following is a OSX zone allocator implementation. |
|
305 * /!\ WARNING. It assumes the underlying malloc implementation's |
|
306 * malloc_usable_size returns 0 when the given pointer is not owned by |
|
307 * the allocator. Sadly, OSX does call zone_size with pointers not |
|
308 * owned by the allocator. |
|
309 */ |
|
310 |
|
311 #ifdef XP_DARWIN |
|
312 #include <stdlib.h> |
|
313 #include <malloc/malloc.h> |
|
314 #include "mozilla/Assertions.h" |
|
315 |
|
316 static size_t |
|
317 zone_size(malloc_zone_t *zone, void *ptr) |
|
318 { |
|
319 return malloc_usable_size_impl(ptr); |
|
320 } |
|
321 |
|
322 static void * |
|
323 zone_malloc(malloc_zone_t *zone, size_t size) |
|
324 { |
|
325 return malloc_impl(size); |
|
326 } |
|
327 |
|
328 static void * |
|
329 zone_calloc(malloc_zone_t *zone, size_t num, size_t size) |
|
330 { |
|
331 return calloc_impl(num, size); |
|
332 } |
|
333 |
|
334 static void * |
|
335 zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) |
|
336 { |
|
337 if (malloc_usable_size_impl(ptr)) |
|
338 return realloc_impl(ptr, size); |
|
339 return realloc(ptr, size); |
|
340 } |
|
341 |
|
342 static void |
|
343 zone_free(malloc_zone_t *zone, void *ptr) |
|
344 { |
|
345 if (malloc_usable_size_impl(ptr)) { |
|
346 free_impl(ptr); |
|
347 return; |
|
348 } |
|
349 free(ptr); |
|
350 } |
|
351 |
|
352 static void |
|
353 zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) |
|
354 { |
|
355 size_t current_size = malloc_usable_size_impl(ptr); |
|
356 if (current_size) { |
|
357 MOZ_ASSERT(current_size == size); |
|
358 free_impl(ptr); |
|
359 return; |
|
360 } |
|
361 free(ptr); |
|
362 } |
|
363 |
|
364 static void * |
|
365 zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) |
|
366 { |
|
367 void *ptr; |
|
368 if (posix_memalign_impl(&ptr, alignment, size) == 0) |
|
369 return ptr; |
|
370 return NULL; |
|
371 } |
|
372 |
|
373 static void * |
|
374 zone_valloc(malloc_zone_t *zone, size_t size) |
|
375 { |
|
376 return valloc_impl(size); |
|
377 } |
|
378 |
|
379 static void * |
|
380 zone_destroy(malloc_zone_t *zone) |
|
381 { |
|
382 /* This function should never be called. */ |
|
383 MOZ_CRASH(); |
|
384 } |
|
385 |
|
386 static size_t |
|
387 zone_good_size(malloc_zone_t *zone, size_t size) |
|
388 { |
|
389 return malloc_good_size_impl(size); |
|
390 } |
|
391 |
|
392 #ifdef MOZ_JEMALLOC |
|
393 |
|
394 #include "jemalloc/internal/jemalloc_internal.h" |
|
395 |
|
396 static void |
|
397 zone_force_lock(malloc_zone_t *zone) |
|
398 { |
|
399 /* /!\ This calls into jemalloc. It works because we're linked in the |
|
400 * same library. Stolen from jemalloc's zone.c. */ |
|
401 if (isthreaded) |
|
402 jemalloc_prefork(); |
|
403 } |
|
404 |
|
405 static void |
|
406 zone_force_unlock(malloc_zone_t *zone) |
|
407 { |
|
408 /* /!\ This calls into jemalloc. It works because we're linked in the |
|
409 * same library. Stolen from jemalloc's zone.c. */ |
|
410 if (isthreaded) |
|
411 jemalloc_postfork_parent(); |
|
412 } |
|
413 |
|
414 #else |
|
415 |
|
416 #define JEMALLOC_ZONE_VERSION 6 |
|
417 |
|
418 /* Empty implementations are needed, because fork() calls zone->force_(un)lock |
|
419 * unconditionally. */ |
|
420 static void |
|
421 zone_force_lock(malloc_zone_t *zone) |
|
422 { |
|
423 } |
|
424 |
|
425 static void |
|
426 zone_force_unlock(malloc_zone_t *zone) |
|
427 { |
|
428 } |
|
429 |
|
430 #endif |
|
431 |
|
432 static malloc_zone_t zone; |
|
433 static struct malloc_introspection_t zone_introspect; |
|
434 |
|
435 __attribute__((constructor)) void |
|
436 register_zone(void) |
|
437 { |
|
438 zone.size = (void *)zone_size; |
|
439 zone.malloc = (void *)zone_malloc; |
|
440 zone.calloc = (void *)zone_calloc; |
|
441 zone.valloc = (void *)zone_valloc; |
|
442 zone.free = (void *)zone_free; |
|
443 zone.realloc = (void *)zone_realloc; |
|
444 zone.destroy = (void *)zone_destroy; |
|
445 zone.zone_name = "replace_malloc_zone"; |
|
446 zone.batch_malloc = NULL; |
|
447 zone.batch_free = NULL; |
|
448 zone.introspect = &zone_introspect; |
|
449 zone.version = JEMALLOC_ZONE_VERSION; |
|
450 zone.memalign = zone_memalign; |
|
451 zone.free_definite_size = zone_free_definite_size; |
|
452 #if (JEMALLOC_ZONE_VERSION >= 8) |
|
453 zone.pressure_relief = NULL; |
|
454 #endif |
|
455 zone_introspect.enumerator = NULL; |
|
456 zone_introspect.good_size = (void *)zone_good_size; |
|
457 zone_introspect.check = NULL; |
|
458 zone_introspect.print = NULL; |
|
459 zone_introspect.log = NULL; |
|
460 zone_introspect.force_lock = (void *)zone_force_lock; |
|
461 zone_introspect.force_unlock = (void *)zone_force_unlock; |
|
462 zone_introspect.statistics = NULL; |
|
463 zone_introspect.zone_locked = NULL; |
|
464 #if (JEMALLOC_ZONE_VERSION >= 7) |
|
465 zone_introspect.enable_discharge_checking = NULL; |
|
466 zone_introspect.disable_discharge_checking = NULL; |
|
467 zone_introspect.discharge = NULL; |
|
468 #ifdef __BLOCKS__ |
|
469 zone_introspect.enumerate_discharged_pointers = NULL; |
|
470 #else |
|
471 zone_introspect.enumerate_unavailable_without_blocks = NULL; |
|
472 #endif |
|
473 #endif |
|
474 |
|
475 /* |
|
476 * The default purgeable zone is created lazily by OSX's libc. It uses |
|
477 * the default zone when it is created for "small" allocations |
|
478 * (< 15 KiB), but assumes the default zone is a scalable_zone. This |
|
479 * obviously fails when the default zone is the jemalloc zone, so |
|
480 * malloc_default_purgeable_zone is called beforehand so that the |
|
481 * default purgeable zone is created when the default zone is still |
|
482 * a scalable_zone. As purgeable zones only exist on >= 10.6, we need |
|
483 * to check for the existence of malloc_default_purgeable_zone() at |
|
484 * run time. |
|
485 */ |
|
486 malloc_default_purgeable_zone(); |
|
487 |
|
488 /* Register the custom zone. At this point it won't be the default. */ |
|
489 malloc_zone_register(&zone); |
|
490 |
|
491 /* |
|
492 * Unregister and reregister the default zone. On OSX >= 10.6, |
|
493 * unregistering takes the last registered zone and places it at the |
|
494 * location of the specified zone. Unregistering the default zone thus |
|
495 * makes the last registered one the default. On OSX < 10.6, |
|
496 * unregistering shifts all registered zones. The first registered zone |
|
497 * then becomes the default. |
|
498 */ |
|
499 do { |
|
500 malloc_zone_t *default_zone = malloc_default_zone(); |
|
501 malloc_zone_unregister(default_zone); |
|
502 malloc_zone_register(default_zone); |
|
503 } while (malloc_default_zone() != &zone); |
|
504 } |
|
505 #endif |