memory/build/replace_malloc.c

Wed, 31 Dec 2014 06:55:46 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:55:46 +0100
changeset 1
ca08bd8f51b2
permissions
-rw-r--r--

Added tag TORBROWSER_REPLICA for changeset 6474c204b198

michael@0 1 /* This Source Code Form is subject to the terms of the Mozilla Public
michael@0 2 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
michael@0 3 * You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 4
michael@0 5 #ifndef MOZ_MEMORY
michael@0 6 # error Should not compile this file when MOZ_MEMORY is not set
michael@0 7 #endif
michael@0 8
michael@0 9 #ifndef MOZ_REPLACE_MALLOC
michael@0 10 # error Should not compile this file when replace-malloc is disabled
michael@0 11 #endif
michael@0 12
michael@0 13 #ifdef MOZ_NATIVE_JEMALLOC
michael@0 14 # error Should not compile this file when we want to use native jemalloc
michael@0 15 #endif
michael@0 16
michael@0 17 #include "mozmemory_wrap.h"
michael@0 18
michael@0 19 /* Declare all je_* functions */
michael@0 20 #define MALLOC_DECL(name, return_type, ...) \
michael@0 21 return_type je_ ## name(__VA_ARGS__);
michael@0 22 #include "malloc_decls.h"
michael@0 23
michael@0 24 #include "mozilla/Likely.h"
michael@0 25 /*
michael@0 26 * Windows doesn't come with weak imports as they are possible with
michael@0 27 * LD_PRELOAD or DYLD_INSERT_LIBRARIES on Linux/OSX. On this platform,
michael@0 28 * the replacement functions are defined as variable pointers to the
michael@0 29 * function resolved with GetProcAddress() instead of weak definitions
michael@0 30 * of functions. On Android, the same needs to happen as well, because
michael@0 31 * the Android linker doesn't handle weak linking with non LD_PRELOADed
michael@0 32 * libraries, but LD_PRELOADing is not very convenient on Android, with
michael@0 33 * the zygote.
michael@0 34 */
michael@0 35 #ifdef XP_DARWIN
michael@0 36 # define MOZ_REPLACE_WEAK __attribute__((weak_import))
michael@0 37 #elif defined(XP_WIN) || defined(MOZ_WIDGET_ANDROID)
michael@0 38 # define MOZ_NO_REPLACE_FUNC_DECL
michael@0 39 #elif defined(__GNUC__)
michael@0 40 # define MOZ_REPLACE_WEAK __attribute__((weak))
michael@0 41 #endif
michael@0 42
michael@0 43 #include "replace_malloc.h"
michael@0 44
michael@0 45 #define MALLOC_DECL(name, return_type, ...) \
michael@0 46 je_ ## name,
michael@0 47
michael@0 48 static const malloc_table_t malloc_table = {
michael@0 49 #include "malloc_decls.h"
michael@0 50 };
michael@0 51
michael@0 52 #ifdef MOZ_NO_REPLACE_FUNC_DECL
michael@0 53 # define MALLOC_DECL(name, return_type, ...) \
michael@0 54 typedef return_type (replace_ ## name ## _impl_t)(__VA_ARGS__); \
michael@0 55 replace_ ## name ## _impl_t *replace_ ## name = NULL;
michael@0 56 # define MALLOC_FUNCS MALLOC_FUNCS_ALL
michael@0 57 # include "malloc_decls.h"
michael@0 58
michael@0 59 # ifdef XP_WIN
michael@0 60 # include <windows.h>
michael@0 61 static void
michael@0 62 replace_malloc_init_funcs()
michael@0 63 {
michael@0 64 char replace_malloc_lib[1024];
michael@0 65 if (GetEnvironmentVariableA("MOZ_REPLACE_MALLOC_LIB", (LPSTR)&replace_malloc_lib,
michael@0 66 sizeof(replace_malloc_lib)) > 0) {
michael@0 67 HMODULE handle = LoadLibraryA(replace_malloc_lib);
michael@0 68 if (handle) {
michael@0 69 #define MALLOC_DECL(name, ...) \
michael@0 70 replace_ ## name = (replace_ ## name ## _impl_t *) GetProcAddress(handle, "replace_" # name);
michael@0 71
michael@0 72 # define MALLOC_FUNCS MALLOC_FUNCS_ALL
michael@0 73 #include "malloc_decls.h"
michael@0 74 }
michael@0 75 }
michael@0 76 }
michael@0 77 # elif defined(MOZ_WIDGET_ANDROID)
michael@0 78 # include <dlfcn.h>
michael@0 79 static void
michael@0 80 replace_malloc_init_funcs()
michael@0 81 {
michael@0 82 char *replace_malloc_lib = getenv("MOZ_REPLACE_MALLOC_LIB");
michael@0 83 if (replace_malloc_lib && *replace_malloc_lib) {
michael@0 84 void *handle = dlopen(replace_malloc_lib, RTLD_LAZY);
michael@0 85 if (handle) {
michael@0 86 #define MALLOC_DECL(name, ...) \
michael@0 87 replace_ ## name = (replace_ ## name ## _impl_t *) dlsym(handle, "replace_" # name);
michael@0 88
michael@0 89 # define MALLOC_FUNCS MALLOC_FUNCS_ALL
michael@0 90 #include "malloc_decls.h"
michael@0 91 }
michael@0 92 }
michael@0 93 }
michael@0 94 # else
michael@0 95 # error No implementation for replace_malloc_init_funcs()
michael@0 96 # endif
michael@0 97
michael@0 98 #endif /* MOZ_NO_REPLACE_FUNC_DECL */
michael@0 99
michael@0 100 /*
michael@0 101 * Below is the malloc implementation overriding jemalloc and calling the
michael@0 102 * replacement functions if they exist.
michael@0 103 */
michael@0 104
michael@0 105 /*
michael@0 106 * On OSX, MOZ_MEMORY_API is defined to nothing, because malloc functions
michael@0 107 * are meant to have hidden visibility. But since the functions are only
michael@0 108 * used locally in the zone allocator further below, we can allow the
michael@0 109 * compiler to optimize more by switching to static.
michael@0 110 */
michael@0 111 #ifdef XP_DARWIN
michael@0 112 #undef MOZ_MEMORY_API
michael@0 113 #define MOZ_MEMORY_API static
michael@0 114 #endif
michael@0 115
michael@0 116 /*
michael@0 117 * Malloc implementation functions are MOZ_MEMORY_API, and jemalloc
michael@0 118 * specific functions MOZ_JEMALLOC_API; see mozmemory_wrap.h
michael@0 119 */
michael@0 120 #define MALLOC_DECL(name, return_type, ...) \
michael@0 121 MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
michael@0 122 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
michael@0 123 #include "malloc_decls.h"
michael@0 124
michael@0 125 #define MALLOC_DECL(name, return_type, ...) \
michael@0 126 MOZ_JEMALLOC_API return_type name ## _impl(__VA_ARGS__);
michael@0 127 #define MALLOC_FUNCS MALLOC_FUNCS_JEMALLOC
michael@0 128 #include "malloc_decls.h"
michael@0 129
michael@0 130 static int replace_malloc_initialized = 0;
michael@0 131 static void
michael@0 132 init()
michael@0 133 {
michael@0 134 #ifdef MOZ_NO_REPLACE_FUNC_DECL
michael@0 135 replace_malloc_init_funcs();
michael@0 136 #endif
michael@0 137 // Set this *before* calling replace_init, otherwise if replace_init calls
michael@0 138 // malloc() we'll get an infinite loop.
michael@0 139 replace_malloc_initialized = 1;
michael@0 140 if (replace_init)
michael@0 141 replace_init(&malloc_table);
michael@0 142 }
michael@0 143
michael@0 144 void*
michael@0 145 malloc_impl(size_t size)
michael@0 146 {
michael@0 147 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 148 init();
michael@0 149 if (MOZ_LIKELY(!replace_malloc))
michael@0 150 return je_malloc(size);
michael@0 151 return replace_malloc(size);
michael@0 152 }
michael@0 153
michael@0 154 int
michael@0 155 posix_memalign_impl(void **memptr, size_t alignment, size_t size)
michael@0 156 {
michael@0 157 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 158 init();
michael@0 159 if (MOZ_LIKELY(!replace_posix_memalign))
michael@0 160 return je_posix_memalign(memptr, alignment, size);
michael@0 161 return replace_posix_memalign(memptr, alignment, size);
michael@0 162 }
michael@0 163
michael@0 164 void*
michael@0 165 aligned_alloc_impl(size_t alignment, size_t size)
michael@0 166 {
michael@0 167 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 168 init();
michael@0 169 if (MOZ_LIKELY(!replace_aligned_alloc))
michael@0 170 return je_aligned_alloc(alignment, size);
michael@0 171 return replace_aligned_alloc(alignment, size);
michael@0 172 }
michael@0 173
michael@0 174 void*
michael@0 175 calloc_impl(size_t num, size_t size)
michael@0 176 {
michael@0 177 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 178 init();
michael@0 179 if (MOZ_LIKELY(!replace_calloc))
michael@0 180 return je_calloc(num, size);
michael@0 181 return replace_calloc(num, size);
michael@0 182 }
michael@0 183
michael@0 184 void*
michael@0 185 realloc_impl(void *ptr, size_t size)
michael@0 186 {
michael@0 187 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 188 init();
michael@0 189 if (MOZ_LIKELY(!replace_realloc))
michael@0 190 return je_realloc(ptr, size);
michael@0 191 return replace_realloc(ptr, size);
michael@0 192 }
michael@0 193
michael@0 194 void
michael@0 195 free_impl(void *ptr)
michael@0 196 {
michael@0 197 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 198 init();
michael@0 199 if (MOZ_LIKELY(!replace_free))
michael@0 200 je_free(ptr);
michael@0 201 else
michael@0 202 replace_free(ptr);
michael@0 203 }
michael@0 204
michael@0 205 void*
michael@0 206 memalign_impl(size_t alignment, size_t size)
michael@0 207 {
michael@0 208 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 209 init();
michael@0 210 if (MOZ_LIKELY(!replace_memalign))
michael@0 211 return je_memalign(alignment, size);
michael@0 212 return replace_memalign(alignment, size);
michael@0 213 }
michael@0 214
michael@0 215 void*
michael@0 216 valloc_impl(size_t size)
michael@0 217 {
michael@0 218 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 219 init();
michael@0 220 if (MOZ_LIKELY(!replace_valloc))
michael@0 221 return je_valloc(size);
michael@0 222 return replace_valloc(size);
michael@0 223 }
michael@0 224
michael@0 225 size_t
michael@0 226 malloc_usable_size_impl(usable_ptr_t ptr)
michael@0 227 {
michael@0 228 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 229 init();
michael@0 230 if (MOZ_LIKELY(!replace_malloc_usable_size))
michael@0 231 return je_malloc_usable_size(ptr);
michael@0 232 return replace_malloc_usable_size(ptr);
michael@0 233 }
michael@0 234
michael@0 235 size_t
michael@0 236 malloc_good_size_impl(size_t size)
michael@0 237 {
michael@0 238 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 239 init();
michael@0 240 if (MOZ_LIKELY(!replace_malloc_good_size))
michael@0 241 return je_malloc_good_size(size);
michael@0 242 return replace_malloc_good_size(size);
michael@0 243 }
michael@0 244
michael@0 245 void
michael@0 246 jemalloc_stats_impl(jemalloc_stats_t *stats)
michael@0 247 {
michael@0 248 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 249 init();
michael@0 250 if (MOZ_LIKELY(!replace_jemalloc_stats))
michael@0 251 je_jemalloc_stats(stats);
michael@0 252 else
michael@0 253 replace_jemalloc_stats(stats);
michael@0 254 }
michael@0 255
michael@0 256 void
michael@0 257 jemalloc_purge_freed_pages_impl()
michael@0 258 {
michael@0 259 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 260 init();
michael@0 261 if (MOZ_LIKELY(!replace_jemalloc_purge_freed_pages))
michael@0 262 je_jemalloc_purge_freed_pages();
michael@0 263 else
michael@0 264 replace_jemalloc_purge_freed_pages();
michael@0 265 }
michael@0 266
michael@0 267 void
michael@0 268 jemalloc_free_dirty_pages_impl()
michael@0 269 {
michael@0 270 if (MOZ_UNLIKELY(!replace_malloc_initialized))
michael@0 271 init();
michael@0 272 if (MOZ_LIKELY(!replace_jemalloc_free_dirty_pages))
michael@0 273 je_jemalloc_free_dirty_pages();
michael@0 274 else
michael@0 275 replace_jemalloc_free_dirty_pages();
michael@0 276 }
michael@0 277
michael@0 278 /* The following comment and definitions are from jemalloc.c: */
michael@0 279 #if defined(__GLIBC__) && !defined(__UCLIBC__)
michael@0 280
michael@0 281 /*
michael@0 282 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
michael@0 283 * to inconsistently reference libc's malloc(3)-compatible functions
michael@0 284 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
michael@0 285 *
michael@0 286 * These definitions interpose hooks in glibc. The functions are actually
michael@0 287 * passed an extra argument for the caller return address, which will be
michael@0 288 * ignored.
michael@0 289 */
michael@0 290
michael@0 291 typedef void (* __free_hook_type)(void *ptr);
michael@0 292 typedef void *(* __malloc_hook_type)(size_t size);
michael@0 293 typedef void *(* __realloc_hook_type)(void *ptr, size_t size);
michael@0 294 typedef void *(* __memalign_hook_type)(size_t alignment, size_t size);
michael@0 295
michael@0 296 MOZ_MEMORY_API __free_hook_type __free_hook = free_impl;
michael@0 297 MOZ_MEMORY_API __malloc_hook_type __malloc_hook = malloc_impl;
michael@0 298 MOZ_MEMORY_API __realloc_hook_type __realloc_hook = realloc_impl;
michael@0 299 MOZ_MEMORY_API __memalign_hook_type __memalign_hook = memalign_impl;
michael@0 300
michael@0 301 #endif
michael@0 302
michael@0 303 /*
michael@0 304 * The following is a OSX zone allocator implementation.
michael@0 305 * /!\ WARNING. It assumes the underlying malloc implementation's
michael@0 306 * malloc_usable_size returns 0 when the given pointer is not owned by
michael@0 307 * the allocator. Sadly, OSX does call zone_size with pointers not
michael@0 308 * owned by the allocator.
michael@0 309 */
michael@0 310
michael@0 311 #ifdef XP_DARWIN
michael@0 312 #include <stdlib.h>
michael@0 313 #include <malloc/malloc.h>
michael@0 314 #include "mozilla/Assertions.h"
michael@0 315
michael@0 316 static size_t
michael@0 317 zone_size(malloc_zone_t *zone, void *ptr)
michael@0 318 {
michael@0 319 return malloc_usable_size_impl(ptr);
michael@0 320 }
michael@0 321
michael@0 322 static void *
michael@0 323 zone_malloc(malloc_zone_t *zone, size_t size)
michael@0 324 {
michael@0 325 return malloc_impl(size);
michael@0 326 }
michael@0 327
michael@0 328 static void *
michael@0 329 zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
michael@0 330 {
michael@0 331 return calloc_impl(num, size);
michael@0 332 }
michael@0 333
michael@0 334 static void *
michael@0 335 zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
michael@0 336 {
michael@0 337 if (malloc_usable_size_impl(ptr))
michael@0 338 return realloc_impl(ptr, size);
michael@0 339 return realloc(ptr, size);
michael@0 340 }
michael@0 341
michael@0 342 static void
michael@0 343 zone_free(malloc_zone_t *zone, void *ptr)
michael@0 344 {
michael@0 345 if (malloc_usable_size_impl(ptr)) {
michael@0 346 free_impl(ptr);
michael@0 347 return;
michael@0 348 }
michael@0 349 free(ptr);
michael@0 350 }
michael@0 351
michael@0 352 static void
michael@0 353 zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
michael@0 354 {
michael@0 355 size_t current_size = malloc_usable_size_impl(ptr);
michael@0 356 if (current_size) {
michael@0 357 MOZ_ASSERT(current_size == size);
michael@0 358 free_impl(ptr);
michael@0 359 return;
michael@0 360 }
michael@0 361 free(ptr);
michael@0 362 }
michael@0 363
michael@0 364 static void *
michael@0 365 zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
michael@0 366 {
michael@0 367 void *ptr;
michael@0 368 if (posix_memalign_impl(&ptr, alignment, size) == 0)
michael@0 369 return ptr;
michael@0 370 return NULL;
michael@0 371 }
michael@0 372
michael@0 373 static void *
michael@0 374 zone_valloc(malloc_zone_t *zone, size_t size)
michael@0 375 {
michael@0 376 return valloc_impl(size);
michael@0 377 }
michael@0 378
michael@0 379 static void *
michael@0 380 zone_destroy(malloc_zone_t *zone)
michael@0 381 {
michael@0 382 /* This function should never be called. */
michael@0 383 MOZ_CRASH();
michael@0 384 }
michael@0 385
michael@0 386 static size_t
michael@0 387 zone_good_size(malloc_zone_t *zone, size_t size)
michael@0 388 {
michael@0 389 return malloc_good_size_impl(size);
michael@0 390 }
michael@0 391
michael@0 392 #ifdef MOZ_JEMALLOC
michael@0 393
michael@0 394 #include "jemalloc/internal/jemalloc_internal.h"
michael@0 395
michael@0 396 static void
michael@0 397 zone_force_lock(malloc_zone_t *zone)
michael@0 398 {
michael@0 399 /* /!\ This calls into jemalloc. It works because we're linked in the
michael@0 400 * same library. Stolen from jemalloc's zone.c. */
michael@0 401 if (isthreaded)
michael@0 402 jemalloc_prefork();
michael@0 403 }
michael@0 404
michael@0 405 static void
michael@0 406 zone_force_unlock(malloc_zone_t *zone)
michael@0 407 {
michael@0 408 /* /!\ This calls into jemalloc. It works because we're linked in the
michael@0 409 * same library. Stolen from jemalloc's zone.c. */
michael@0 410 if (isthreaded)
michael@0 411 jemalloc_postfork_parent();
michael@0 412 }
michael@0 413
michael@0 414 #else
michael@0 415
michael@0 416 #define JEMALLOC_ZONE_VERSION 6
michael@0 417
michael@0 418 /* Empty implementations are needed, because fork() calls zone->force_(un)lock
michael@0 419 * unconditionally. */
michael@0 420 static void
michael@0 421 zone_force_lock(malloc_zone_t *zone)
michael@0 422 {
michael@0 423 }
michael@0 424
michael@0 425 static void
michael@0 426 zone_force_unlock(malloc_zone_t *zone)
michael@0 427 {
michael@0 428 }
michael@0 429
michael@0 430 #endif
michael@0 431
michael@0 432 static malloc_zone_t zone;
michael@0 433 static struct malloc_introspection_t zone_introspect;
michael@0 434
michael@0 435 __attribute__((constructor)) void
michael@0 436 register_zone(void)
michael@0 437 {
michael@0 438 zone.size = (void *)zone_size;
michael@0 439 zone.malloc = (void *)zone_malloc;
michael@0 440 zone.calloc = (void *)zone_calloc;
michael@0 441 zone.valloc = (void *)zone_valloc;
michael@0 442 zone.free = (void *)zone_free;
michael@0 443 zone.realloc = (void *)zone_realloc;
michael@0 444 zone.destroy = (void *)zone_destroy;
michael@0 445 zone.zone_name = "replace_malloc_zone";
michael@0 446 zone.batch_malloc = NULL;
michael@0 447 zone.batch_free = NULL;
michael@0 448 zone.introspect = &zone_introspect;
michael@0 449 zone.version = JEMALLOC_ZONE_VERSION;
michael@0 450 zone.memalign = zone_memalign;
michael@0 451 zone.free_definite_size = zone_free_definite_size;
michael@0 452 #if (JEMALLOC_ZONE_VERSION >= 8)
michael@0 453 zone.pressure_relief = NULL;
michael@0 454 #endif
michael@0 455 zone_introspect.enumerator = NULL;
michael@0 456 zone_introspect.good_size = (void *)zone_good_size;
michael@0 457 zone_introspect.check = NULL;
michael@0 458 zone_introspect.print = NULL;
michael@0 459 zone_introspect.log = NULL;
michael@0 460 zone_introspect.force_lock = (void *)zone_force_lock;
michael@0 461 zone_introspect.force_unlock = (void *)zone_force_unlock;
michael@0 462 zone_introspect.statistics = NULL;
michael@0 463 zone_introspect.zone_locked = NULL;
michael@0 464 #if (JEMALLOC_ZONE_VERSION >= 7)
michael@0 465 zone_introspect.enable_discharge_checking = NULL;
michael@0 466 zone_introspect.disable_discharge_checking = NULL;
michael@0 467 zone_introspect.discharge = NULL;
michael@0 468 #ifdef __BLOCKS__
michael@0 469 zone_introspect.enumerate_discharged_pointers = NULL;
michael@0 470 #else
michael@0 471 zone_introspect.enumerate_unavailable_without_blocks = NULL;
michael@0 472 #endif
michael@0 473 #endif
michael@0 474
michael@0 475 /*
michael@0 476 * The default purgeable zone is created lazily by OSX's libc. It uses
michael@0 477 * the default zone when it is created for "small" allocations
michael@0 478 * (< 15 KiB), but assumes the default zone is a scalable_zone. This
michael@0 479 * obviously fails when the default zone is the jemalloc zone, so
michael@0 480 * malloc_default_purgeable_zone is called beforehand so that the
michael@0 481 * default purgeable zone is created when the default zone is still
michael@0 482 * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
michael@0 483 * to check for the existence of malloc_default_purgeable_zone() at
michael@0 484 * run time.
michael@0 485 */
michael@0 486 malloc_default_purgeable_zone();
michael@0 487
michael@0 488 /* Register the custom zone. At this point it won't be the default. */
michael@0 489 malloc_zone_register(&zone);
michael@0 490
michael@0 491 /*
michael@0 492 * Unregister and reregister the default zone. On OSX >= 10.6,
michael@0 493 * unregistering takes the last registered zone and places it at the
michael@0 494 * location of the specified zone. Unregistering the default zone thus
michael@0 495 * makes the last registered one the default. On OSX < 10.6,
michael@0 496 * unregistering shifts all registered zones. The first registered zone
michael@0 497 * then becomes the default.
michael@0 498 */
michael@0 499 do {
michael@0 500 malloc_zone_t *default_zone = malloc_default_zone();
michael@0 501 malloc_zone_unregister(default_zone);
michael@0 502 malloc_zone_register(default_zone);
michael@0 503 } while (malloc_default_zone() != &zone);
michael@0 504 }
michael@0 505 #endif

mercurial