memory/jemalloc/src/src/zone.c

Thu, 15 Jan 2015 15:59:08 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 15 Jan 2015 15:59:08 +0100
branch
TOR_BUG_9701
changeset 10
ac0c01689b40
permissions
-rw-r--r--

Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

michael@0 1 #include "jemalloc/internal/jemalloc_internal.h"
michael@0 2 #ifndef JEMALLOC_ZONE
michael@0 3 # error "This source file is for zones on Darwin (OS X)."
michael@0 4 #endif
michael@0 5
michael@0 6 /*
michael@0 7 * The malloc_default_purgeable_zone function is only available on >= 10.6.
michael@0 8 * We need to check whether it is present at runtime, thus the weak_import.
michael@0 9 */
michael@0 10 extern malloc_zone_t *malloc_default_purgeable_zone(void)
michael@0 11 JEMALLOC_ATTR(weak_import);
michael@0 12
michael@0 13 /******************************************************************************/
michael@0 14 /* Data. */
michael@0 15
michael@0 16 static malloc_zone_t zone;
michael@0 17 static struct malloc_introspection_t zone_introspect;
michael@0 18
michael@0 19 /******************************************************************************/
michael@0 20 /* Function prototypes for non-inline static functions. */
michael@0 21
michael@0 22 static size_t zone_size(malloc_zone_t *zone, void *ptr);
michael@0 23 static void *zone_malloc(malloc_zone_t *zone, size_t size);
michael@0 24 static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
michael@0 25 static void *zone_valloc(malloc_zone_t *zone, size_t size);
michael@0 26 static void zone_free(malloc_zone_t *zone, void *ptr);
michael@0 27 static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
michael@0 28 #if (JEMALLOC_ZONE_VERSION >= 5)
michael@0 29 static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
michael@0 30 #endif
michael@0 31 #if (JEMALLOC_ZONE_VERSION >= 6)
michael@0 32 size_t size);
michael@0 33 static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
michael@0 34 size_t size);
michael@0 35 #endif
michael@0 36 static void *zone_destroy(malloc_zone_t *zone);
michael@0 37 static size_t zone_good_size(malloc_zone_t *zone, size_t size);
michael@0 38 static void zone_force_lock(malloc_zone_t *zone);
michael@0 39 static void zone_force_unlock(malloc_zone_t *zone);
michael@0 40
michael@0 41 /******************************************************************************/
michael@0 42 /*
michael@0 43 * Functions.
michael@0 44 */
michael@0 45
michael@0 46 static size_t
michael@0 47 zone_size(malloc_zone_t *zone, void *ptr)
michael@0 48 {
michael@0 49
michael@0 50 /*
michael@0 51 * There appear to be places within Darwin (such as setenv(3)) that
michael@0 52 * cause calls to this function with pointers that *no* zone owns. If
michael@0 53 * we knew that all pointers were owned by *some* zone, we could split
michael@0 54 * our zone into two parts, and use one as the default allocator and
michael@0 55 * the other as the default deallocator/reallocator. Since that will
michael@0 56 * not work in practice, we must check all pointers to assure that they
michael@0 57 * reside within a mapped chunk before determining size.
michael@0 58 */
michael@0 59 return (ivsalloc(ptr, config_prof));
michael@0 60 }
michael@0 61
michael@0 62 static void *
michael@0 63 zone_malloc(malloc_zone_t *zone, size_t size)
michael@0 64 {
michael@0 65
michael@0 66 return (je_malloc(size));
michael@0 67 }
michael@0 68
michael@0 69 static void *
michael@0 70 zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
michael@0 71 {
michael@0 72
michael@0 73 return (je_calloc(num, size));
michael@0 74 }
michael@0 75
michael@0 76 static void *
michael@0 77 zone_valloc(malloc_zone_t *zone, size_t size)
michael@0 78 {
michael@0 79 void *ret = NULL; /* Assignment avoids useless compiler warning. */
michael@0 80
michael@0 81 je_posix_memalign(&ret, PAGE, size);
michael@0 82
michael@0 83 return (ret);
michael@0 84 }
michael@0 85
michael@0 86 static void
michael@0 87 zone_free(malloc_zone_t *zone, void *ptr)
michael@0 88 {
michael@0 89
michael@0 90 if (ivsalloc(ptr, config_prof) != 0) {
michael@0 91 je_free(ptr);
michael@0 92 return;
michael@0 93 }
michael@0 94
michael@0 95 free(ptr);
michael@0 96 }
michael@0 97
michael@0 98 static void *
michael@0 99 zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
michael@0 100 {
michael@0 101
michael@0 102 if (ivsalloc(ptr, config_prof) != 0)
michael@0 103 return (je_realloc(ptr, size));
michael@0 104
michael@0 105 return (realloc(ptr, size));
michael@0 106 }
michael@0 107
michael@0 108 #if (JEMALLOC_ZONE_VERSION >= 5)
michael@0 109 static void *
michael@0 110 zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
michael@0 111 {
michael@0 112 void *ret = NULL; /* Assignment avoids useless compiler warning. */
michael@0 113
michael@0 114 je_posix_memalign(&ret, alignment, size);
michael@0 115
michael@0 116 return (ret);
michael@0 117 }
michael@0 118 #endif
michael@0 119
michael@0 120 #if (JEMALLOC_ZONE_VERSION >= 6)
michael@0 121 static void
michael@0 122 zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
michael@0 123 {
michael@0 124
michael@0 125 if (ivsalloc(ptr, config_prof) != 0) {
michael@0 126 assert(ivsalloc(ptr, config_prof) == size);
michael@0 127 je_free(ptr);
michael@0 128 return;
michael@0 129 }
michael@0 130
michael@0 131 free(ptr);
michael@0 132 }
michael@0 133 #endif
michael@0 134
michael@0 135 static void *
michael@0 136 zone_destroy(malloc_zone_t *zone)
michael@0 137 {
michael@0 138
michael@0 139 /* This function should never be called. */
michael@0 140 assert(false);
michael@0 141 return (NULL);
michael@0 142 }
michael@0 143
michael@0 144 static size_t
michael@0 145 zone_good_size(malloc_zone_t *zone, size_t size)
michael@0 146 {
michael@0 147
michael@0 148 if (size == 0)
michael@0 149 size = 1;
michael@0 150 return (s2u(size));
michael@0 151 }
michael@0 152
michael@0 153 static void
michael@0 154 zone_force_lock(malloc_zone_t *zone)
michael@0 155 {
michael@0 156
michael@0 157 if (isthreaded)
michael@0 158 jemalloc_prefork();
michael@0 159 }
michael@0 160
michael@0 161 static void
michael@0 162 zone_force_unlock(malloc_zone_t *zone)
michael@0 163 {
michael@0 164
michael@0 165 if (isthreaded)
michael@0 166 jemalloc_postfork_parent();
michael@0 167 }
michael@0 168
michael@0 169 JEMALLOC_ATTR(constructor)
michael@0 170 void
michael@0 171 register_zone(void)
michael@0 172 {
michael@0 173
michael@0 174 /*
michael@0 175 * If something else replaced the system default zone allocator, don't
michael@0 176 * register jemalloc's.
michael@0 177 */
michael@0 178 malloc_zone_t *default_zone = malloc_default_zone();
michael@0 179 if (!default_zone->zone_name ||
michael@0 180 strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
michael@0 181 return;
michael@0 182 }
michael@0 183
michael@0 184 zone.size = (void *)zone_size;
michael@0 185 zone.malloc = (void *)zone_malloc;
michael@0 186 zone.calloc = (void *)zone_calloc;
michael@0 187 zone.valloc = (void *)zone_valloc;
michael@0 188 zone.free = (void *)zone_free;
michael@0 189 zone.realloc = (void *)zone_realloc;
michael@0 190 zone.destroy = (void *)zone_destroy;
michael@0 191 zone.zone_name = "jemalloc_zone";
michael@0 192 zone.batch_malloc = NULL;
michael@0 193 zone.batch_free = NULL;
michael@0 194 zone.introspect = &zone_introspect;
michael@0 195 zone.version = JEMALLOC_ZONE_VERSION;
michael@0 196 #if (JEMALLOC_ZONE_VERSION >= 5)
michael@0 197 zone.memalign = zone_memalign;
michael@0 198 #endif
michael@0 199 #if (JEMALLOC_ZONE_VERSION >= 6)
michael@0 200 zone.free_definite_size = zone_free_definite_size;
michael@0 201 #endif
michael@0 202 #if (JEMALLOC_ZONE_VERSION >= 8)
michael@0 203 zone.pressure_relief = NULL;
michael@0 204 #endif
michael@0 205
michael@0 206 zone_introspect.enumerator = NULL;
michael@0 207 zone_introspect.good_size = (void *)zone_good_size;
michael@0 208 zone_introspect.check = NULL;
michael@0 209 zone_introspect.print = NULL;
michael@0 210 zone_introspect.log = NULL;
michael@0 211 zone_introspect.force_lock = (void *)zone_force_lock;
michael@0 212 zone_introspect.force_unlock = (void *)zone_force_unlock;
michael@0 213 zone_introspect.statistics = NULL;
michael@0 214 #if (JEMALLOC_ZONE_VERSION >= 6)
michael@0 215 zone_introspect.zone_locked = NULL;
michael@0 216 #endif
michael@0 217 #if (JEMALLOC_ZONE_VERSION >= 7)
michael@0 218 zone_introspect.enable_discharge_checking = NULL;
michael@0 219 zone_introspect.disable_discharge_checking = NULL;
michael@0 220 zone_introspect.discharge = NULL;
michael@0 221 #ifdef __BLOCKS__
michael@0 222 zone_introspect.enumerate_discharged_pointers = NULL;
michael@0 223 #else
michael@0 224 zone_introspect.enumerate_unavailable_without_blocks = NULL;
michael@0 225 #endif
michael@0 226 #endif
michael@0 227
michael@0 228 /*
michael@0 229 * The default purgeable zone is created lazily by OSX's libc. It uses
michael@0 230 * the default zone when it is created for "small" allocations
michael@0 231 * (< 15 KiB), but assumes the default zone is a scalable_zone. This
michael@0 232 * obviously fails when the default zone is the jemalloc zone, so
michael@0 233 * malloc_default_purgeable_zone is called beforehand so that the
michael@0 234 * default purgeable zone is created when the default zone is still
michael@0 235 * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
michael@0 236 * to check for the existence of malloc_default_purgeable_zone() at
michael@0 237 * run time.
michael@0 238 */
michael@0 239 if (malloc_default_purgeable_zone != NULL)
michael@0 240 malloc_default_purgeable_zone();
michael@0 241
michael@0 242 /* Register the custom zone. At this point it won't be the default. */
michael@0 243 malloc_zone_register(&zone);
michael@0 244
michael@0 245 /*
michael@0 246 * Unregister and reregister the default zone. On OSX >= 10.6,
michael@0 247 * unregistering takes the last registered zone and places it at the
michael@0 248 * location of the specified zone. Unregistering the default zone thus
michael@0 249 * makes the last registered one the default. On OSX < 10.6,
michael@0 250 * unregistering shifts all registered zones. The first registered zone
michael@0 251 * then becomes the default.
michael@0 252 */
michael@0 253 do {
michael@0 254 default_zone = malloc_default_zone();
michael@0 255 malloc_zone_unregister(default_zone);
michael@0 256 malloc_zone_register(default_zone);
michael@0 257 } while (malloc_default_zone() != &zone);
michael@0 258 }

mercurial