memory/jemalloc/src/src/arena.c

Thu, 22 Jan 2015 13:21:57 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 22 Jan 2015 13:21:57 +0100
branch
TOR_BUG_9701
changeset 15
b8a032363ba2
permissions
-rw-r--r--

Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6

michael@0 1 #define JEMALLOC_ARENA_C_
michael@0 2 #include "jemalloc/internal/jemalloc_internal.h"
michael@0 3
michael@0 4 /******************************************************************************/
michael@0 5 /* Data. */
michael@0 6
michael@0 7 ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
michael@0 8 arena_bin_info_t arena_bin_info[NBINS];
michael@0 9
michael@0 10 JEMALLOC_ALIGNED(CACHELINE)
michael@0 11 const uint8_t small_size2bin[] = {
michael@0 12 #define S2B_8(i) i,
michael@0 13 #define S2B_16(i) S2B_8(i) S2B_8(i)
michael@0 14 #define S2B_32(i) S2B_16(i) S2B_16(i)
michael@0 15 #define S2B_64(i) S2B_32(i) S2B_32(i)
michael@0 16 #define S2B_128(i) S2B_64(i) S2B_64(i)
michael@0 17 #define S2B_256(i) S2B_128(i) S2B_128(i)
michael@0 18 #define S2B_512(i) S2B_256(i) S2B_256(i)
michael@0 19 #define S2B_1024(i) S2B_512(i) S2B_512(i)
michael@0 20 #define S2B_2048(i) S2B_1024(i) S2B_1024(i)
michael@0 21 #define S2B_4096(i) S2B_2048(i) S2B_2048(i)
michael@0 22 #define S2B_8192(i) S2B_4096(i) S2B_4096(i)
michael@0 23 #define SIZE_CLASS(bin, delta, size) \
michael@0 24 S2B_##delta(bin)
michael@0 25 SIZE_CLASSES
michael@0 26 #undef S2B_8
michael@0 27 #undef S2B_16
michael@0 28 #undef S2B_32
michael@0 29 #undef S2B_64
michael@0 30 #undef S2B_128
michael@0 31 #undef S2B_256
michael@0 32 #undef S2B_512
michael@0 33 #undef S2B_1024
michael@0 34 #undef S2B_2048
michael@0 35 #undef S2B_4096
michael@0 36 #undef S2B_8192
michael@0 37 #undef SIZE_CLASS
michael@0 38 };
michael@0 39
michael@0 40 /******************************************************************************/
michael@0 41 /* Function prototypes for non-inline static functions. */
michael@0 42
michael@0 43 static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
michael@0 44 size_t pageind, size_t npages, bool maybe_adjac_pred,
michael@0 45 bool maybe_adjac_succ);
michael@0 46 static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
michael@0 47 size_t pageind, size_t npages, bool maybe_adjac_pred,
michael@0 48 bool maybe_adjac_succ);
michael@0 49 static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
michael@0 50 bool large, size_t binind, bool zero);
michael@0 51 static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
michael@0 52 static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
michael@0 53 static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
michael@0 54 bool large, size_t binind, bool zero);
michael@0 55 static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
michael@0 56 size_t binind, bool zero);
michael@0 57 static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
michael@0 58 arena_chunk_t *chunk, void *arg);
michael@0 59 static void arena_purge(arena_t *arena, bool all);
michael@0 60 static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
michael@0 61 bool cleaned);
michael@0 62 static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
michael@0 63 arena_run_t *run, size_t oldsize, size_t newsize);
michael@0 64 static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
michael@0 65 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
michael@0 66 static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
michael@0 67 static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
michael@0 68 static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
michael@0 69 static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
michael@0 70 static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
michael@0 71 static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
michael@0 72 static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
michael@0 73 arena_bin_t *bin);
michael@0 74 static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
michael@0 75 arena_run_t *run, arena_bin_t *bin);
michael@0 76 static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
michael@0 77 arena_run_t *run, arena_bin_t *bin);
michael@0 78 static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
michael@0 79 void *ptr, size_t oldsize, size_t size);
michael@0 80 static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
michael@0 81 void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
michael@0 82 static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
michael@0 83 size_t extra, bool zero);
michael@0 84 static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
michael@0 85 size_t min_run_size);
michael@0 86 static void bin_info_init(void);
michael@0 87
michael@0 88 /******************************************************************************/
michael@0 89
michael@0 90 static inline int
michael@0 91 arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
michael@0 92 {
michael@0 93 uintptr_t a_mapelm = (uintptr_t)a;
michael@0 94 uintptr_t b_mapelm = (uintptr_t)b;
michael@0 95
michael@0 96 assert(a != NULL);
michael@0 97 assert(b != NULL);
michael@0 98
michael@0 99 return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
michael@0 100 }
michael@0 101
michael@0 102 /* Generate red-black tree functions. */
michael@0 103 rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
michael@0 104 u.rb_link, arena_run_comp)
michael@0 105
michael@0 106 static inline int
michael@0 107 arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
michael@0 108 {
michael@0 109 int ret;
michael@0 110 size_t a_size = a->bits & ~PAGE_MASK;
michael@0 111 size_t b_size = b->bits & ~PAGE_MASK;
michael@0 112
michael@0 113 ret = (a_size > b_size) - (a_size < b_size);
michael@0 114 if (ret == 0) {
michael@0 115 uintptr_t a_mapelm, b_mapelm;
michael@0 116
michael@0 117 if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
michael@0 118 a_mapelm = (uintptr_t)a;
michael@0 119 else {
michael@0 120 /*
michael@0 121 * Treat keys as though they are lower than anything
michael@0 122 * else.
michael@0 123 */
michael@0 124 a_mapelm = 0;
michael@0 125 }
michael@0 126 b_mapelm = (uintptr_t)b;
michael@0 127
michael@0 128 ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
michael@0 129 }
michael@0 130
michael@0 131 return (ret);
michael@0 132 }
michael@0 133
michael@0 134 /* Generate red-black tree functions. */
michael@0 135 rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
michael@0 136 u.rb_link, arena_avail_comp)
michael@0 137
michael@0 138 static inline int
michael@0 139 arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
michael@0 140 {
michael@0 141
michael@0 142 assert(a != NULL);
michael@0 143 assert(b != NULL);
michael@0 144
michael@0 145 /*
michael@0 146 * Short-circuit for self comparison. The following comparison code
michael@0 147 * would come to the same result, but at the cost of executing the slow
michael@0 148 * path.
michael@0 149 */
michael@0 150 if (a == b)
michael@0 151 return (0);
michael@0 152
michael@0 153 /*
michael@0 154 * Order such that chunks with higher fragmentation are "less than"
michael@0 155 * those with lower fragmentation -- purging order is from "least" to
michael@0 156 * "greatest". Fragmentation is measured as:
michael@0 157 *
michael@0 158 * mean current avail run size
michael@0 159 * --------------------------------
michael@0 160 * mean defragmented avail run size
michael@0 161 *
michael@0 162 * navail
michael@0 163 * -----------
michael@0 164 * nruns_avail nruns_avail-nruns_adjac
michael@0 165 * = ========================= = -----------------------
michael@0 166 * navail nruns_avail
michael@0 167 * -----------------------
michael@0 168 * nruns_avail-nruns_adjac
michael@0 169 *
michael@0 170 * The following code multiplies away the denominator prior to
michael@0 171 * comparison, in order to avoid division.
michael@0 172 *
michael@0 173 */
michael@0 174 {
michael@0 175 size_t a_val = (a->nruns_avail - a->nruns_adjac) *
michael@0 176 b->nruns_avail;
michael@0 177 size_t b_val = (b->nruns_avail - b->nruns_adjac) *
michael@0 178 a->nruns_avail;
michael@0 179
michael@0 180 if (a_val < b_val)
michael@0 181 return (1);
michael@0 182 if (a_val > b_val)
michael@0 183 return (-1);
michael@0 184 }
michael@0 185 /*
michael@0 186 * Break ties by chunk address. For fragmented chunks, report lower
michael@0 187 * addresses as "lower", so that fragmentation reduction happens first
michael@0 188 * at lower addresses. However, use the opposite ordering for
michael@0 189 * unfragmented chunks, in order to increase the chances of
michael@0 190 * re-allocating dirty runs.
michael@0 191 */
michael@0 192 {
michael@0 193 uintptr_t a_chunk = (uintptr_t)a;
michael@0 194 uintptr_t b_chunk = (uintptr_t)b;
michael@0 195 int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
michael@0 196 if (a->nruns_adjac == 0) {
michael@0 197 assert(b->nruns_adjac == 0);
michael@0 198 ret = -ret;
michael@0 199 }
michael@0 200 return (ret);
michael@0 201 }
michael@0 202 }
michael@0 203
michael@0 204 /* Generate red-black tree functions. */
michael@0 205 rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
michael@0 206 dirty_link, arena_chunk_dirty_comp)
michael@0 207
michael@0 208 static inline bool
michael@0 209 arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
michael@0 210 {
michael@0 211 bool ret;
michael@0 212
michael@0 213 if (pageind-1 < map_bias)
michael@0 214 ret = false;
michael@0 215 else {
michael@0 216 ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
michael@0 217 assert(ret == false || arena_mapbits_dirty_get(chunk,
michael@0 218 pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
michael@0 219 }
michael@0 220 return (ret);
michael@0 221 }
michael@0 222
michael@0 223 static inline bool
michael@0 224 arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
michael@0 225 {
michael@0 226 bool ret;
michael@0 227
michael@0 228 if (pageind+npages == chunk_npages)
michael@0 229 ret = false;
michael@0 230 else {
michael@0 231 assert(pageind+npages < chunk_npages);
michael@0 232 ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
michael@0 233 assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
michael@0 234 != arena_mapbits_dirty_get(chunk, pageind+npages));
michael@0 235 }
michael@0 236 return (ret);
michael@0 237 }
michael@0 238
michael@0 239 static inline bool
michael@0 240 arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
michael@0 241 {
michael@0 242
michael@0 243 return (arena_avail_adjac_pred(chunk, pageind) ||
michael@0 244 arena_avail_adjac_succ(chunk, pageind, npages));
michael@0 245 }
michael@0 246
michael@0 247 static void
michael@0 248 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
michael@0 249 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
michael@0 250 {
michael@0 251
michael@0 252 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
michael@0 253 LG_PAGE));
michael@0 254
michael@0 255 /*
michael@0 256 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
michael@0 257 * removed and reinserted even if the run to be inserted is clean.
michael@0 258 */
michael@0 259 if (chunk->ndirty != 0)
michael@0 260 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
michael@0 261
michael@0 262 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
michael@0 263 chunk->nruns_adjac++;
michael@0 264 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
michael@0 265 chunk->nruns_adjac++;
michael@0 266 chunk->nruns_avail++;
michael@0 267 assert(chunk->nruns_avail > chunk->nruns_adjac);
michael@0 268
michael@0 269 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
michael@0 270 arena->ndirty += npages;
michael@0 271 chunk->ndirty += npages;
michael@0 272 }
michael@0 273 if (chunk->ndirty != 0)
michael@0 274 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
michael@0 275
michael@0 276 arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
michael@0 277 pageind));
michael@0 278 }
michael@0 279
michael@0 280 static void
michael@0 281 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
michael@0 282 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
michael@0 283 {
michael@0 284
michael@0 285 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
michael@0 286 LG_PAGE));
michael@0 287
michael@0 288 /*
michael@0 289 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
michael@0 290 * removed and reinserted even if the run to be removed is clean.
michael@0 291 */
michael@0 292 if (chunk->ndirty != 0)
michael@0 293 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
michael@0 294
michael@0 295 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
michael@0 296 chunk->nruns_adjac--;
michael@0 297 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
michael@0 298 chunk->nruns_adjac--;
michael@0 299 chunk->nruns_avail--;
michael@0 300 assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
michael@0 301 == 0 && chunk->nruns_adjac == 0));
michael@0 302
michael@0 303 if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
michael@0 304 arena->ndirty -= npages;
michael@0 305 chunk->ndirty -= npages;
michael@0 306 }
michael@0 307 if (chunk->ndirty != 0)
michael@0 308 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
michael@0 309
michael@0 310 arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
michael@0 311 pageind));
michael@0 312 }
michael@0 313
michael@0 314 static inline void *
michael@0 315 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
michael@0 316 {
michael@0 317 void *ret;
michael@0 318 unsigned regind;
michael@0 319 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
michael@0 320 (uintptr_t)bin_info->bitmap_offset);
michael@0 321
michael@0 322 assert(run->nfree > 0);
michael@0 323 assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
michael@0 324
michael@0 325 regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
michael@0 326 ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
michael@0 327 (uintptr_t)(bin_info->reg_interval * regind));
michael@0 328 run->nfree--;
michael@0 329 if (regind == run->nextind)
michael@0 330 run->nextind++;
michael@0 331 assert(regind < run->nextind);
michael@0 332 return (ret);
michael@0 333 }
michael@0 334
michael@0 335 static inline void
michael@0 336 arena_run_reg_dalloc(arena_run_t *run, void *ptr)
michael@0 337 {
michael@0 338 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
michael@0 339 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
michael@0 340 size_t mapbits = arena_mapbits_get(chunk, pageind);
michael@0 341 size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
michael@0 342 arena_bin_info_t *bin_info = &arena_bin_info[binind];
michael@0 343 unsigned regind = arena_run_regind(run, bin_info, ptr);
michael@0 344 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
michael@0 345 (uintptr_t)bin_info->bitmap_offset);
michael@0 346
michael@0 347 assert(run->nfree < bin_info->nregs);
michael@0 348 /* Freeing an interior pointer can cause assertion failure. */
michael@0 349 assert(((uintptr_t)ptr - ((uintptr_t)run +
michael@0 350 (uintptr_t)bin_info->reg0_offset)) %
michael@0 351 (uintptr_t)bin_info->reg_interval == 0);
michael@0 352 assert((uintptr_t)ptr >= (uintptr_t)run +
michael@0 353 (uintptr_t)bin_info->reg0_offset);
michael@0 354 /* Freeing an unallocated pointer can cause assertion failure. */
michael@0 355 assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
michael@0 356
michael@0 357 bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
michael@0 358 run->nfree++;
michael@0 359 }
michael@0 360
michael@0 361 static inline void
michael@0 362 arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
michael@0 363 {
michael@0 364 size_t i;
michael@0 365 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
michael@0 366
michael@0 367 for (i = 0; i < PAGE / sizeof(size_t); i++)
michael@0 368 assert(p[i] == 0);
michael@0 369 }
michael@0 370
michael@0 371 static void
michael@0 372 arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
michael@0 373 size_t binind, bool zero)
michael@0 374 {
michael@0 375 arena_chunk_t *chunk;
michael@0 376 size_t run_ind, total_pages, need_pages, rem_pages, i;
michael@0 377 size_t flag_dirty;
michael@0 378
michael@0 379 assert((large && binind == BININD_INVALID) || (large == false && binind
michael@0 380 != BININD_INVALID));
michael@0 381
michael@0 382 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
michael@0 383 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
michael@0 384 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
michael@0 385 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
michael@0 386 LG_PAGE;
michael@0 387 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
michael@0 388 flag_dirty);
michael@0 389 need_pages = (size >> LG_PAGE);
michael@0 390 assert(need_pages > 0);
michael@0 391 assert(need_pages <= total_pages);
michael@0 392 rem_pages = total_pages - need_pages;
michael@0 393
michael@0 394 arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
michael@0 395 if (config_stats) {
michael@0 396 /*
michael@0 397 * Update stats_cactive if nactive is crossing a chunk
michael@0 398 * multiple.
michael@0 399 */
michael@0 400 size_t cactive_diff = CHUNK_CEILING((arena->nactive +
michael@0 401 need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
michael@0 402 LG_PAGE);
michael@0 403 if (cactive_diff != 0)
michael@0 404 stats_cactive_add(cactive_diff);
michael@0 405 }
michael@0 406 arena->nactive += need_pages;
michael@0 407
michael@0 408 /* Keep track of trailing unused pages for later use. */
michael@0 409 if (rem_pages > 0) {
michael@0 410 if (flag_dirty != 0) {
michael@0 411 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
michael@0 412 (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
michael@0 413 arena_mapbits_unallocated_set(chunk,
michael@0 414 run_ind+total_pages-1, (rem_pages << LG_PAGE),
michael@0 415 CHUNK_MAP_DIRTY);
michael@0 416 } else {
michael@0 417 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
michael@0 418 (rem_pages << LG_PAGE),
michael@0 419 arena_mapbits_unzeroed_get(chunk,
michael@0 420 run_ind+need_pages));
michael@0 421 arena_mapbits_unallocated_set(chunk,
michael@0 422 run_ind+total_pages-1, (rem_pages << LG_PAGE),
michael@0 423 arena_mapbits_unzeroed_get(chunk,
michael@0 424 run_ind+total_pages-1));
michael@0 425 }
michael@0 426 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
michael@0 427 false, true);
michael@0 428 }
michael@0 429
michael@0 430 /*
michael@0 431 * Update the page map separately for large vs. small runs, since it is
michael@0 432 * possible to avoid iteration for large mallocs.
michael@0 433 */
michael@0 434 if (large) {
michael@0 435 if (zero) {
michael@0 436 if (flag_dirty == 0) {
michael@0 437 /*
michael@0 438 * The run is clean, so some pages may be
michael@0 439 * zeroed (i.e. never before touched).
michael@0 440 */
michael@0 441 for (i = 0; i < need_pages; i++) {
michael@0 442 if (arena_mapbits_unzeroed_get(chunk,
michael@0 443 run_ind+i) != 0) {
michael@0 444 VALGRIND_MAKE_MEM_UNDEFINED(
michael@0 445 (void *)((uintptr_t)
michael@0 446 chunk + ((run_ind+i) <<
michael@0 447 LG_PAGE)), PAGE);
michael@0 448 memset((void *)((uintptr_t)
michael@0 449 chunk + ((run_ind+i) <<
michael@0 450 LG_PAGE)), 0, PAGE);
michael@0 451 } else if (config_debug) {
michael@0 452 VALGRIND_MAKE_MEM_DEFINED(
michael@0 453 (void *)((uintptr_t)
michael@0 454 chunk + ((run_ind+i) <<
michael@0 455 LG_PAGE)), PAGE);
michael@0 456 arena_chunk_validate_zeroed(
michael@0 457 chunk, run_ind+i);
michael@0 458 }
michael@0 459 }
michael@0 460 } else {
michael@0 461 /*
michael@0 462 * The run is dirty, so all pages must be
michael@0 463 * zeroed.
michael@0 464 */
michael@0 465 VALGRIND_MAKE_MEM_UNDEFINED((void
michael@0 466 *)((uintptr_t)chunk + (run_ind <<
michael@0 467 LG_PAGE)), (need_pages << LG_PAGE));
michael@0 468 memset((void *)((uintptr_t)chunk + (run_ind <<
michael@0 469 LG_PAGE)), 0, (need_pages << LG_PAGE));
michael@0 470 }
michael@0 471 }
michael@0 472
michael@0 473 /*
michael@0 474 * Set the last element first, in case the run only contains one
michael@0 475 * page (i.e. both statements set the same element).
michael@0 476 */
michael@0 477 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
michael@0 478 flag_dirty);
michael@0 479 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
michael@0 480 } else {
michael@0 481 assert(zero == false);
michael@0 482 /*
michael@0 483 * Propagate the dirty and unzeroed flags to the allocated
michael@0 484 * small run, so that arena_dalloc_bin_run() has the ability to
michael@0 485 * conditionally trim clean pages.
michael@0 486 */
michael@0 487 arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
michael@0 488 /*
michael@0 489 * The first page will always be dirtied during small run
michael@0 490 * initialization, so a validation failure here would not
michael@0 491 * actually cause an observable failure.
michael@0 492 */
michael@0 493 if (config_debug && flag_dirty == 0 &&
michael@0 494 arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
michael@0 495 arena_chunk_validate_zeroed(chunk, run_ind);
michael@0 496 for (i = 1; i < need_pages - 1; i++) {
michael@0 497 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
michael@0 498 if (config_debug && flag_dirty == 0 &&
michael@0 499 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
michael@0 500 arena_chunk_validate_zeroed(chunk, run_ind+i);
michael@0 501 }
michael@0 502 arena_mapbits_small_set(chunk, run_ind+need_pages-1,
michael@0 503 need_pages-1, binind, flag_dirty);
michael@0 504 if (config_debug && flag_dirty == 0 &&
michael@0 505 arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
michael@0 506 0) {
michael@0 507 arena_chunk_validate_zeroed(chunk,
michael@0 508 run_ind+need_pages-1);
michael@0 509 }
michael@0 510 }
michael@0 511 }
michael@0 512
michael@0 513 static arena_chunk_t *
michael@0 514 arena_chunk_alloc(arena_t *arena)
michael@0 515 {
michael@0 516 arena_chunk_t *chunk;
michael@0 517 size_t i;
michael@0 518
michael@0 519 if (arena->spare != NULL) {
michael@0 520 chunk = arena->spare;
michael@0 521 arena->spare = NULL;
michael@0 522
michael@0 523 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
michael@0 524 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
michael@0 525 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
michael@0 526 arena_maxclass);
michael@0 527 assert(arena_mapbits_unallocated_size_get(chunk,
michael@0 528 chunk_npages-1) == arena_maxclass);
michael@0 529 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
michael@0 530 arena_mapbits_dirty_get(chunk, chunk_npages-1));
michael@0 531 } else {
michael@0 532 bool zero;
michael@0 533 size_t unzeroed;
michael@0 534
michael@0 535 zero = false;
michael@0 536 malloc_mutex_unlock(&arena->lock);
michael@0 537 chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
michael@0 538 false, &zero, arena->dss_prec);
michael@0 539 malloc_mutex_lock(&arena->lock);
michael@0 540 if (chunk == NULL)
michael@0 541 return (NULL);
michael@0 542 if (config_stats)
michael@0 543 arena->stats.mapped += chunksize;
michael@0 544
michael@0 545 chunk->arena = arena;
michael@0 546
michael@0 547 /*
michael@0 548 * Claim that no pages are in use, since the header is merely
michael@0 549 * overhead.
michael@0 550 */
michael@0 551 chunk->ndirty = 0;
michael@0 552
michael@0 553 chunk->nruns_avail = 0;
michael@0 554 chunk->nruns_adjac = 0;
michael@0 555
michael@0 556 /*
michael@0 557 * Initialize the map to contain one maximal free untouched run.
michael@0 558 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
michael@0 559 * chunk.
michael@0 560 */
michael@0 561 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
michael@0 562 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
michael@0 563 unzeroed);
michael@0 564 /*
michael@0 565 * There is no need to initialize the internal page map entries
michael@0 566 * unless the chunk is not zeroed.
michael@0 567 */
michael@0 568 if (zero == false) {
michael@0 569 for (i = map_bias+1; i < chunk_npages-1; i++)
michael@0 570 arena_mapbits_unzeroed_set(chunk, i, unzeroed);
michael@0 571 } else if (config_debug) {
michael@0 572 for (i = map_bias+1; i < chunk_npages-1; i++) {
michael@0 573 assert(arena_mapbits_unzeroed_get(chunk, i) ==
michael@0 574 unzeroed);
michael@0 575 }
michael@0 576 }
michael@0 577 arena_mapbits_unallocated_set(chunk, chunk_npages-1,
michael@0 578 arena_maxclass, unzeroed);
michael@0 579 }
michael@0 580
michael@0 581 /* Insert the run into the runs_avail tree. */
michael@0 582 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
michael@0 583 false, false);
michael@0 584
michael@0 585 return (chunk);
michael@0 586 }
michael@0 587
michael@0 588 static void
michael@0 589 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
michael@0 590 {
michael@0 591 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
michael@0 592 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
michael@0 593 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
michael@0 594 arena_maxclass);
michael@0 595 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
michael@0 596 arena_maxclass);
michael@0 597 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
michael@0 598 arena_mapbits_dirty_get(chunk, chunk_npages-1));
michael@0 599
michael@0 600 /*
michael@0 601 * Remove run from the runs_avail tree, so that the arena does not use
michael@0 602 * it.
michael@0 603 */
michael@0 604 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
michael@0 605 false, false);
michael@0 606
michael@0 607 if (arena->spare != NULL) {
michael@0 608 arena_chunk_t *spare = arena->spare;
michael@0 609
michael@0 610 arena->spare = chunk;
michael@0 611 malloc_mutex_unlock(&arena->lock);
michael@0 612 chunk_dealloc((void *)spare, chunksize, true);
michael@0 613 malloc_mutex_lock(&arena->lock);
michael@0 614 if (config_stats)
michael@0 615 arena->stats.mapped -= chunksize;
michael@0 616 } else
michael@0 617 arena->spare = chunk;
michael@0 618 }
michael@0 619
michael@0 620 static arena_run_t *
michael@0 621 arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
michael@0 622 bool zero)
michael@0 623 {
michael@0 624 arena_run_t *run;
michael@0 625 arena_chunk_map_t *mapelm, key;
michael@0 626
michael@0 627 key.bits = size | CHUNK_MAP_KEY;
michael@0 628 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
michael@0 629 if (mapelm != NULL) {
michael@0 630 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
michael@0 631 size_t pageind = (((uintptr_t)mapelm -
michael@0 632 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
michael@0 633 + map_bias;
michael@0 634
michael@0 635 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
michael@0 636 LG_PAGE));
michael@0 637 arena_run_split(arena, run, size, large, binind, zero);
michael@0 638 return (run);
michael@0 639 }
michael@0 640
michael@0 641 return (NULL);
michael@0 642 }
michael@0 643
michael@0 644 static arena_run_t *
michael@0 645 arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
michael@0 646 bool zero)
michael@0 647 {
michael@0 648 arena_chunk_t *chunk;
michael@0 649 arena_run_t *run;
michael@0 650
michael@0 651 assert(size <= arena_maxclass);
michael@0 652 assert((size & PAGE_MASK) == 0);
michael@0 653 assert((large && binind == BININD_INVALID) || (large == false && binind
michael@0 654 != BININD_INVALID));
michael@0 655
michael@0 656 /* Search the arena's chunks for the lowest best fit. */
michael@0 657 run = arena_run_alloc_helper(arena, size, large, binind, zero);
michael@0 658 if (run != NULL)
michael@0 659 return (run);
michael@0 660
michael@0 661 /*
michael@0 662 * No usable runs. Create a new chunk from which to allocate the run.
michael@0 663 */
michael@0 664 chunk = arena_chunk_alloc(arena);
michael@0 665 if (chunk != NULL) {
michael@0 666 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
michael@0 667 arena_run_split(arena, run, size, large, binind, zero);
michael@0 668 return (run);
michael@0 669 }
michael@0 670
michael@0 671 /*
michael@0 672 * arena_chunk_alloc() failed, but another thread may have made
michael@0 673 * sufficient memory available while this one dropped arena->lock in
michael@0 674 * arena_chunk_alloc(), so search one more time.
michael@0 675 */
michael@0 676 return (arena_run_alloc_helper(arena, size, large, binind, zero));
michael@0 677 }
michael@0 678
michael@0 679 static inline void
michael@0 680 arena_maybe_purge(arena_t *arena)
michael@0 681 {
michael@0 682 size_t npurgeable, threshold;
michael@0 683
michael@0 684 /* Don't purge if the option is disabled. */
michael@0 685 if (opt_lg_dirty_mult < 0)
michael@0 686 return;
michael@0 687 /* Don't purge if all dirty pages are already being purged. */
michael@0 688 if (arena->ndirty <= arena->npurgatory)
michael@0 689 return;
michael@0 690 npurgeable = arena->ndirty - arena->npurgatory;
michael@0 691 threshold = (arena->nactive >> opt_lg_dirty_mult);
michael@0 692 /*
michael@0 693 * Don't purge unless the number of purgeable pages exceeds the
michael@0 694 * threshold.
michael@0 695 */
michael@0 696 if (npurgeable <= threshold)
michael@0 697 return;
michael@0 698
michael@0 699 arena_purge(arena, false);
michael@0 700 }
michael@0 701
michael@0 702 static inline size_t
michael@0 703 arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
michael@0 704 {
michael@0 705 size_t npurged;
michael@0 706 ql_head(arena_chunk_map_t) mapelms;
michael@0 707 arena_chunk_map_t *mapelm;
michael@0 708 size_t pageind, npages;
michael@0 709 size_t nmadvise;
michael@0 710
michael@0 711 ql_new(&mapelms);
michael@0 712
michael@0 713 /*
michael@0 714 * If chunk is the spare, temporarily re-allocate it, 1) so that its
michael@0 715 * run is reinserted into runs_avail, and 2) so that it cannot be
michael@0 716 * completely discarded by another thread while arena->lock is dropped
michael@0 717 * by this thread. Note that the arena_run_dalloc() call will
michael@0 718 * implicitly deallocate the chunk, so no explicit action is required
michael@0 719 * in this function to deallocate the chunk.
michael@0 720 *
michael@0 721 * Note that once a chunk contains dirty pages, it cannot again contain
michael@0 722 * a single run unless 1) it is a dirty run, or 2) this function purges
michael@0 723 * dirty pages and causes the transition to a single clean run. Thus
michael@0 724 * (chunk == arena->spare) is possible, but it is not possible for
michael@0 725 * this function to be called on the spare unless it contains a dirty
michael@0 726 * run.
michael@0 727 */
michael@0 728 if (chunk == arena->spare) {
michael@0 729 assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
michael@0 730 assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
michael@0 731
michael@0 732 arena_chunk_alloc(arena);
michael@0 733 }
michael@0 734
michael@0 735 if (config_stats)
michael@0 736 arena->stats.purged += chunk->ndirty;
michael@0 737
michael@0 738 /*
michael@0 739 * Operate on all dirty runs if there is no clean/dirty run
michael@0 740 * fragmentation.
michael@0 741 */
michael@0 742 if (chunk->nruns_adjac == 0)
michael@0 743 all = true;
michael@0 744
michael@0 745 /*
michael@0 746 * Temporarily allocate free dirty runs within chunk. If all is false,
michael@0 747 * only operate on dirty runs that are fragments; otherwise operate on
michael@0 748 * all dirty runs.
michael@0 749 */
michael@0 750 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
michael@0 751 mapelm = arena_mapp_get(chunk, pageind);
michael@0 752 if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
michael@0 753 size_t run_size =
michael@0 754 arena_mapbits_unallocated_size_get(chunk, pageind);
michael@0 755
michael@0 756 npages = run_size >> LG_PAGE;
michael@0 757 assert(pageind + npages <= chunk_npages);
michael@0 758 assert(arena_mapbits_dirty_get(chunk, pageind) ==
michael@0 759 arena_mapbits_dirty_get(chunk, pageind+npages-1));
michael@0 760
michael@0 761 if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
michael@0 762 (all || arena_avail_adjac(chunk, pageind,
michael@0 763 npages))) {
michael@0 764 arena_run_t *run = (arena_run_t *)((uintptr_t)
michael@0 765 chunk + (uintptr_t)(pageind << LG_PAGE));
michael@0 766
michael@0 767 arena_run_split(arena, run, run_size, true,
michael@0 768 BININD_INVALID, false);
michael@0 769 /* Append to list for later processing. */
michael@0 770 ql_elm_new(mapelm, u.ql_link);
michael@0 771 ql_tail_insert(&mapelms, mapelm, u.ql_link);
michael@0 772 }
michael@0 773 } else {
michael@0 774 /* Skip run. */
michael@0 775 if (arena_mapbits_large_get(chunk, pageind) != 0) {
michael@0 776 npages = arena_mapbits_large_size_get(chunk,
michael@0 777 pageind) >> LG_PAGE;
michael@0 778 } else {
michael@0 779 size_t binind;
michael@0 780 arena_bin_info_t *bin_info;
michael@0 781 arena_run_t *run = (arena_run_t *)((uintptr_t)
michael@0 782 chunk + (uintptr_t)(pageind << LG_PAGE));
michael@0 783
michael@0 784 assert(arena_mapbits_small_runind_get(chunk,
michael@0 785 pageind) == 0);
michael@0 786 binind = arena_bin_index(arena, run->bin);
michael@0 787 bin_info = &arena_bin_info[binind];
michael@0 788 npages = bin_info->run_size >> LG_PAGE;
michael@0 789 }
michael@0 790 }
michael@0 791 }
michael@0 792 assert(pageind == chunk_npages);
michael@0 793 assert(chunk->ndirty == 0 || all == false);
michael@0 794 assert(chunk->nruns_adjac == 0);
michael@0 795
michael@0 796 malloc_mutex_unlock(&arena->lock);
michael@0 797 if (config_stats)
michael@0 798 nmadvise = 0;
michael@0 799 npurged = 0;
michael@0 800 ql_foreach(mapelm, &mapelms, u.ql_link) {
michael@0 801 bool unzeroed;
michael@0 802 size_t flag_unzeroed, i;
michael@0 803
michael@0 804 pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
michael@0 805 sizeof(arena_chunk_map_t)) + map_bias;
michael@0 806 npages = arena_mapbits_large_size_get(chunk, pageind) >>
michael@0 807 LG_PAGE;
michael@0 808 assert(pageind + npages <= chunk_npages);
michael@0 809 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
michael@0 810 LG_PAGE)), (npages << LG_PAGE));
michael@0 811 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
michael@0 812 /*
michael@0 813 * Set the unzeroed flag for all pages, now that pages_purge()
michael@0 814 * has returned whether the pages were zeroed as a side effect
michael@0 815 * of purging. This chunk map modification is safe even though
michael@0 816 * the arena mutex isn't currently owned by this thread,
michael@0 817 * because the run is marked as allocated, thus protecting it
michael@0 818 * from being modified by any other thread. As long as these
michael@0 819 * writes don't perturb the first and last elements'
michael@0 820 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
michael@0 821 */
michael@0 822 for (i = 0; i < npages; i++) {
michael@0 823 arena_mapbits_unzeroed_set(chunk, pageind+i,
michael@0 824 flag_unzeroed);
michael@0 825 }
michael@0 826 npurged += npages;
michael@0 827 if (config_stats)
michael@0 828 nmadvise++;
michael@0 829 }
michael@0 830 malloc_mutex_lock(&arena->lock);
michael@0 831 if (config_stats)
michael@0 832 arena->stats.nmadvise += nmadvise;
michael@0 833
michael@0 834 /* Deallocate runs. */
michael@0 835 for (mapelm = ql_first(&mapelms); mapelm != NULL;
michael@0 836 mapelm = ql_first(&mapelms)) {
michael@0 837 arena_run_t *run;
michael@0 838
michael@0 839 pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
michael@0 840 sizeof(arena_chunk_map_t)) + map_bias;
michael@0 841 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
michael@0 842 LG_PAGE));
michael@0 843 ql_remove(&mapelms, mapelm, u.ql_link);
michael@0 844 arena_run_dalloc(arena, run, false, true);
michael@0 845 }
michael@0 846
michael@0 847 return (npurged);
michael@0 848 }
michael@0 849
michael@0 850 static arena_chunk_t *
michael@0 851 chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
michael@0 852 {
michael@0 853 size_t *ndirty = (size_t *)arg;
michael@0 854
michael@0 855 assert(chunk->ndirty != 0);
michael@0 856 *ndirty += chunk->ndirty;
michael@0 857 return (NULL);
michael@0 858 }
michael@0 859
michael@0 860 static void
michael@0 861 arena_purge(arena_t *arena, bool all)
michael@0 862 {
michael@0 863 arena_chunk_t *chunk;
michael@0 864 size_t npurgatory;
michael@0 865 if (config_debug) {
michael@0 866 size_t ndirty = 0;
michael@0 867
michael@0 868 arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
michael@0 869 chunks_dirty_iter_cb, (void *)&ndirty);
michael@0 870 assert(ndirty == arena->ndirty);
michael@0 871 }
michael@0 872 assert(arena->ndirty > arena->npurgatory || all);
michael@0 873 assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
michael@0 874 arena->npurgatory) || all);
michael@0 875
michael@0 876 if (config_stats)
michael@0 877 arena->stats.npurge++;
michael@0 878
michael@0 879 /*
michael@0 880 * Compute the minimum number of pages that this thread should try to
michael@0 881 * purge, and add the result to arena->npurgatory. This will keep
michael@0 882 * multiple threads from racing to reduce ndirty below the threshold.
michael@0 883 */
michael@0 884 {
michael@0 885 size_t npurgeable = arena->ndirty - arena->npurgatory;
michael@0 886
michael@0 887 if (all == false) {
michael@0 888 size_t threshold = (arena->nactive >>
michael@0 889 opt_lg_dirty_mult);
michael@0 890
michael@0 891 npurgatory = npurgeable - threshold;
michael@0 892 } else
michael@0 893 npurgatory = npurgeable;
michael@0 894 }
michael@0 895 arena->npurgatory += npurgatory;
michael@0 896
michael@0 897 while (npurgatory > 0) {
michael@0 898 size_t npurgeable, npurged, nunpurged;
michael@0 899
michael@0 900 /* Get next chunk with dirty pages. */
michael@0 901 chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
michael@0 902 if (chunk == NULL) {
michael@0 903 /*
michael@0 904 * This thread was unable to purge as many pages as
michael@0 905 * originally intended, due to races with other threads
michael@0 906 * that either did some of the purging work, or re-used
michael@0 907 * dirty pages.
michael@0 908 */
michael@0 909 arena->npurgatory -= npurgatory;
michael@0 910 return;
michael@0 911 }
michael@0 912 npurgeable = chunk->ndirty;
michael@0 913 assert(npurgeable != 0);
michael@0 914
michael@0 915 if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
michael@0 916 /*
michael@0 917 * This thread will purge all the dirty pages in chunk,
michael@0 918 * so set npurgatory to reflect this thread's intent to
michael@0 919 * purge the pages. This tends to reduce the chances
michael@0 920 * of the following scenario:
michael@0 921 *
michael@0 922 * 1) This thread sets arena->npurgatory such that
michael@0 923 * (arena->ndirty - arena->npurgatory) is at the
michael@0 924 * threshold.
michael@0 925 * 2) This thread drops arena->lock.
michael@0 926 * 3) Another thread causes one or more pages to be
michael@0 927 * dirtied, and immediately determines that it must
michael@0 928 * purge dirty pages.
michael@0 929 *
michael@0 930 * If this scenario *does* play out, that's okay,
michael@0 931 * because all of the purging work being done really
michael@0 932 * needs to happen.
michael@0 933 */
michael@0 934 arena->npurgatory += npurgeable - npurgatory;
michael@0 935 npurgatory = npurgeable;
michael@0 936 }
michael@0 937
michael@0 938 /*
michael@0 939 * Keep track of how many pages are purgeable, versus how many
michael@0 940 * actually get purged, and adjust counters accordingly.
michael@0 941 */
michael@0 942 arena->npurgatory -= npurgeable;
michael@0 943 npurgatory -= npurgeable;
michael@0 944 npurged = arena_chunk_purge(arena, chunk, all);
michael@0 945 nunpurged = npurgeable - npurged;
michael@0 946 arena->npurgatory += nunpurged;
michael@0 947 npurgatory += nunpurged;
michael@0 948 }
michael@0 949 }
michael@0 950
michael@0 951 void
michael@0 952 arena_purge_all(arena_t *arena)
michael@0 953 {
michael@0 954
michael@0 955 malloc_mutex_lock(&arena->lock);
michael@0 956 arena_purge(arena, true);
michael@0 957 malloc_mutex_unlock(&arena->lock);
michael@0 958 }
michael@0 959
michael@0 960 static void
michael@0 961 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
michael@0 962 {
michael@0 963 arena_chunk_t *chunk;
michael@0 964 size_t size, run_ind, run_pages, flag_dirty;
michael@0 965
michael@0 966 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
michael@0 967 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
michael@0 968 assert(run_ind >= map_bias);
michael@0 969 assert(run_ind < chunk_npages);
michael@0 970 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
michael@0 971 size = arena_mapbits_large_size_get(chunk, run_ind);
michael@0 972 assert(size == PAGE ||
michael@0 973 arena_mapbits_large_size_get(chunk,
michael@0 974 run_ind+(size>>LG_PAGE)-1) == 0);
michael@0 975 } else {
michael@0 976 size_t binind = arena_bin_index(arena, run->bin);
michael@0 977 arena_bin_info_t *bin_info = &arena_bin_info[binind];
michael@0 978 size = bin_info->run_size;
michael@0 979 }
michael@0 980 run_pages = (size >> LG_PAGE);
michael@0 981 if (config_stats) {
michael@0 982 /*
michael@0 983 * Update stats_cactive if nactive is crossing a chunk
michael@0 984 * multiple.
michael@0 985 */
michael@0 986 size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
michael@0 987 CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
michael@0 988 if (cactive_diff != 0)
michael@0 989 stats_cactive_sub(cactive_diff);
michael@0 990 }
michael@0 991 arena->nactive -= run_pages;
michael@0 992
michael@0 993 /*
michael@0 994 * The run is dirty if the caller claims to have dirtied it, as well as
michael@0 995 * if it was already dirty before being allocated and the caller
michael@0 996 * doesn't claim to have cleaned it.
michael@0 997 */
michael@0 998 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
michael@0 999 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
michael@0 1000 if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
michael@0 1001 dirty = true;
michael@0 1002 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
michael@0 1003
michael@0 1004 /* Mark pages as unallocated in the chunk map. */
michael@0 1005 if (dirty) {
michael@0 1006 arena_mapbits_unallocated_set(chunk, run_ind, size,
michael@0 1007 CHUNK_MAP_DIRTY);
michael@0 1008 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
michael@0 1009 CHUNK_MAP_DIRTY);
michael@0 1010 } else {
michael@0 1011 arena_mapbits_unallocated_set(chunk, run_ind, size,
michael@0 1012 arena_mapbits_unzeroed_get(chunk, run_ind));
michael@0 1013 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
michael@0 1014 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
michael@0 1015 }
michael@0 1016
michael@0 1017 /* Try to coalesce forward. */
michael@0 1018 if (run_ind + run_pages < chunk_npages &&
michael@0 1019 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
michael@0 1020 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
michael@0 1021 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
michael@0 1022 run_ind+run_pages);
michael@0 1023 size_t nrun_pages = nrun_size >> LG_PAGE;
michael@0 1024
michael@0 1025 /*
michael@0 1026 * Remove successor from runs_avail; the coalesced run is
michael@0 1027 * inserted later.
michael@0 1028 */
michael@0 1029 assert(arena_mapbits_unallocated_size_get(chunk,
michael@0 1030 run_ind+run_pages+nrun_pages-1) == nrun_size);
michael@0 1031 assert(arena_mapbits_dirty_get(chunk,
michael@0 1032 run_ind+run_pages+nrun_pages-1) == flag_dirty);
michael@0 1033 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
michael@0 1034 false, true);
michael@0 1035
michael@0 1036 size += nrun_size;
michael@0 1037 run_pages += nrun_pages;
michael@0 1038
michael@0 1039 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
michael@0 1040 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
michael@0 1041 size);
michael@0 1042 }
michael@0 1043
michael@0 1044 /* Try to coalesce backward. */
michael@0 1045 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
michael@0 1046 == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
michael@0 1047 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
michael@0 1048 run_ind-1);
michael@0 1049 size_t prun_pages = prun_size >> LG_PAGE;
michael@0 1050
michael@0 1051 run_ind -= prun_pages;
michael@0 1052
michael@0 1053 /*
michael@0 1054 * Remove predecessor from runs_avail; the coalesced run is
michael@0 1055 * inserted later.
michael@0 1056 */
michael@0 1057 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
michael@0 1058 prun_size);
michael@0 1059 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
michael@0 1060 arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
michael@0 1061 false);
michael@0 1062
michael@0 1063 size += prun_size;
michael@0 1064 run_pages += prun_pages;
michael@0 1065
michael@0 1066 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
michael@0 1067 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
michael@0 1068 size);
michael@0 1069 }
michael@0 1070
michael@0 1071 /* Insert into runs_avail, now that coalescing is complete. */
michael@0 1072 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
michael@0 1073 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
michael@0 1074 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
michael@0 1075 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
michael@0 1076 arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
michael@0 1077
michael@0 1078 /* Deallocate chunk if it is now completely unused. */
michael@0 1079 if (size == arena_maxclass) {
michael@0 1080 assert(run_ind == map_bias);
michael@0 1081 assert(run_pages == (arena_maxclass >> LG_PAGE));
michael@0 1082 arena_chunk_dealloc(arena, chunk);
michael@0 1083 }
michael@0 1084
michael@0 1085 /*
michael@0 1086 * It is okay to do dirty page processing here even if the chunk was
michael@0 1087 * deallocated above, since in that case it is the spare. Waiting
michael@0 1088 * until after possible chunk deallocation to do dirty processing
michael@0 1089 * allows for an old spare to be fully deallocated, thus decreasing the
michael@0 1090 * chances of spuriously crossing the dirty page purging threshold.
michael@0 1091 */
michael@0 1092 if (dirty)
michael@0 1093 arena_maybe_purge(arena);
michael@0 1094 }
michael@0 1095
michael@0 1096 static void
michael@0 1097 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
michael@0 1098 size_t oldsize, size_t newsize)
michael@0 1099 {
michael@0 1100 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
michael@0 1101 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
michael@0 1102 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
michael@0 1103
michael@0 1104 assert(oldsize > newsize);
michael@0 1105
michael@0 1106 /*
michael@0 1107 * Update the chunk map so that arena_run_dalloc() can treat the
michael@0 1108 * leading run as separately allocated. Set the last element of each
michael@0 1109 * run first, in case of single-page runs.
michael@0 1110 */
michael@0 1111 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
michael@0 1112 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
michael@0 1113 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
michael@0 1114
michael@0 1115 if (config_debug) {
michael@0 1116 UNUSED size_t tail_npages = newsize >> LG_PAGE;
michael@0 1117 assert(arena_mapbits_large_size_get(chunk,
michael@0 1118 pageind+head_npages+tail_npages-1) == 0);
michael@0 1119 assert(arena_mapbits_dirty_get(chunk,
michael@0 1120 pageind+head_npages+tail_npages-1) == flag_dirty);
michael@0 1121 }
michael@0 1122 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
michael@0 1123 flag_dirty);
michael@0 1124
michael@0 1125 arena_run_dalloc(arena, run, false, false);
michael@0 1126 }
michael@0 1127
michael@0 1128 static void
michael@0 1129 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
michael@0 1130 size_t oldsize, size_t newsize, bool dirty)
michael@0 1131 {
michael@0 1132 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
michael@0 1133 size_t head_npages = newsize >> LG_PAGE;
michael@0 1134 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
michael@0 1135
michael@0 1136 assert(oldsize > newsize);
michael@0 1137
michael@0 1138 /*
michael@0 1139 * Update the chunk map so that arena_run_dalloc() can treat the
michael@0 1140 * trailing run as separately allocated. Set the last element of each
michael@0 1141 * run first, in case of single-page runs.
michael@0 1142 */
michael@0 1143 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
michael@0 1144 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
michael@0 1145 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
michael@0 1146
michael@0 1147 if (config_debug) {
michael@0 1148 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
michael@0 1149 assert(arena_mapbits_large_size_get(chunk,
michael@0 1150 pageind+head_npages+tail_npages-1) == 0);
michael@0 1151 assert(arena_mapbits_dirty_get(chunk,
michael@0 1152 pageind+head_npages+tail_npages-1) == flag_dirty);
michael@0 1153 }
michael@0 1154 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
michael@0 1155 flag_dirty);
michael@0 1156
michael@0 1157 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
michael@0 1158 dirty, false);
michael@0 1159 }
michael@0 1160
michael@0 1161 static arena_run_t *
michael@0 1162 arena_bin_runs_first(arena_bin_t *bin)
michael@0 1163 {
michael@0 1164 arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
michael@0 1165 if (mapelm != NULL) {
michael@0 1166 arena_chunk_t *chunk;
michael@0 1167 size_t pageind;
michael@0 1168 arena_run_t *run;
michael@0 1169
michael@0 1170 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
michael@0 1171 pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
michael@0 1172 sizeof(arena_chunk_map_t))) + map_bias;
michael@0 1173 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
michael@0 1174 arena_mapbits_small_runind_get(chunk, pageind)) <<
michael@0 1175 LG_PAGE));
michael@0 1176 return (run);
michael@0 1177 }
michael@0 1178
michael@0 1179 return (NULL);
michael@0 1180 }
michael@0 1181
michael@0 1182 static void
michael@0 1183 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
michael@0 1184 {
michael@0 1185 arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
michael@0 1186 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
michael@0 1187 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
michael@0 1188
michael@0 1189 assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
michael@0 1190
michael@0 1191 arena_run_tree_insert(&bin->runs, mapelm);
michael@0 1192 }
michael@0 1193
michael@0 1194 static void
michael@0 1195 arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
michael@0 1196 {
michael@0 1197 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
michael@0 1198 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
michael@0 1199 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
michael@0 1200
michael@0 1201 assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
michael@0 1202
michael@0 1203 arena_run_tree_remove(&bin->runs, mapelm);
michael@0 1204 }
michael@0 1205
michael@0 1206 static arena_run_t *
michael@0 1207 arena_bin_nonfull_run_tryget(arena_bin_t *bin)
michael@0 1208 {
michael@0 1209 arena_run_t *run = arena_bin_runs_first(bin);
michael@0 1210 if (run != NULL) {
michael@0 1211 arena_bin_runs_remove(bin, run);
michael@0 1212 if (config_stats)
michael@0 1213 bin->stats.reruns++;
michael@0 1214 }
michael@0 1215 return (run);
michael@0 1216 }
michael@0 1217
michael@0 1218 static arena_run_t *
michael@0 1219 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
michael@0 1220 {
michael@0 1221 arena_run_t *run;
michael@0 1222 size_t binind;
michael@0 1223 arena_bin_info_t *bin_info;
michael@0 1224
michael@0 1225 /* Look for a usable run. */
michael@0 1226 run = arena_bin_nonfull_run_tryget(bin);
michael@0 1227 if (run != NULL)
michael@0 1228 return (run);
michael@0 1229 /* No existing runs have any space available. */
michael@0 1230
michael@0 1231 binind = arena_bin_index(arena, bin);
michael@0 1232 bin_info = &arena_bin_info[binind];
michael@0 1233
michael@0 1234 /* Allocate a new run. */
michael@0 1235 malloc_mutex_unlock(&bin->lock);
michael@0 1236 /******************************/
michael@0 1237 malloc_mutex_lock(&arena->lock);
michael@0 1238 run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
michael@0 1239 if (run != NULL) {
michael@0 1240 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
michael@0 1241 (uintptr_t)bin_info->bitmap_offset);
michael@0 1242
michael@0 1243 /* Initialize run internals. */
michael@0 1244 VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
michael@0 1245 bin_info->redzone_size);
michael@0 1246 run->bin = bin;
michael@0 1247 run->nextind = 0;
michael@0 1248 run->nfree = bin_info->nregs;
michael@0 1249 bitmap_init(bitmap, &bin_info->bitmap_info);
michael@0 1250 }
michael@0 1251 malloc_mutex_unlock(&arena->lock);
michael@0 1252 /********************************/
michael@0 1253 malloc_mutex_lock(&bin->lock);
michael@0 1254 if (run != NULL) {
michael@0 1255 if (config_stats) {
michael@0 1256 bin->stats.nruns++;
michael@0 1257 bin->stats.curruns++;
michael@0 1258 }
michael@0 1259 return (run);
michael@0 1260 }
michael@0 1261
michael@0 1262 /*
michael@0 1263 * arena_run_alloc() failed, but another thread may have made
michael@0 1264 * sufficient memory available while this one dropped bin->lock above,
michael@0 1265 * so search one more time.
michael@0 1266 */
michael@0 1267 run = arena_bin_nonfull_run_tryget(bin);
michael@0 1268 if (run != NULL)
michael@0 1269 return (run);
michael@0 1270
michael@0 1271 return (NULL);
michael@0 1272 }
michael@0 1273
michael@0 1274 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
michael@0 1275 static void *
michael@0 1276 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
michael@0 1277 {
michael@0 1278 void *ret;
michael@0 1279 size_t binind;
michael@0 1280 arena_bin_info_t *bin_info;
michael@0 1281 arena_run_t *run;
michael@0 1282
michael@0 1283 binind = arena_bin_index(arena, bin);
michael@0 1284 bin_info = &arena_bin_info[binind];
michael@0 1285 bin->runcur = NULL;
michael@0 1286 run = arena_bin_nonfull_run_get(arena, bin);
michael@0 1287 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
michael@0 1288 /*
michael@0 1289 * Another thread updated runcur while this one ran without the
michael@0 1290 * bin lock in arena_bin_nonfull_run_get().
michael@0 1291 */
michael@0 1292 assert(bin->runcur->nfree > 0);
michael@0 1293 ret = arena_run_reg_alloc(bin->runcur, bin_info);
michael@0 1294 if (run != NULL) {
michael@0 1295 arena_chunk_t *chunk;
michael@0 1296
michael@0 1297 /*
michael@0 1298 * arena_run_alloc() may have allocated run, or it may
michael@0 1299 * have pulled run from the bin's run tree. Therefore
michael@0 1300 * it is unsafe to make any assumptions about how run
michael@0 1301 * has previously been used, and arena_bin_lower_run()
michael@0 1302 * must be called, as if a region were just deallocated
michael@0 1303 * from the run.
michael@0 1304 */
michael@0 1305 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
michael@0 1306 if (run->nfree == bin_info->nregs)
michael@0 1307 arena_dalloc_bin_run(arena, chunk, run, bin);
michael@0 1308 else
michael@0 1309 arena_bin_lower_run(arena, chunk, run, bin);
michael@0 1310 }
michael@0 1311 return (ret);
michael@0 1312 }
michael@0 1313
michael@0 1314 if (run == NULL)
michael@0 1315 return (NULL);
michael@0 1316
michael@0 1317 bin->runcur = run;
michael@0 1318
michael@0 1319 assert(bin->runcur->nfree > 0);
michael@0 1320
michael@0 1321 return (arena_run_reg_alloc(bin->runcur, bin_info));
michael@0 1322 }
michael@0 1323
michael@0 1324 void
michael@0 1325 arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
michael@0 1326 uint64_t prof_accumbytes)
michael@0 1327 {
michael@0 1328 unsigned i, nfill;
michael@0 1329 arena_bin_t *bin;
michael@0 1330 arena_run_t *run;
michael@0 1331 void *ptr;
michael@0 1332
michael@0 1333 assert(tbin->ncached == 0);
michael@0 1334
michael@0 1335 if (config_prof)
michael@0 1336 arena_prof_accum(arena, prof_accumbytes);
michael@0 1337 bin = &arena->bins[binind];
michael@0 1338 malloc_mutex_lock(&bin->lock);
michael@0 1339 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
michael@0 1340 tbin->lg_fill_div); i < nfill; i++) {
michael@0 1341 if ((run = bin->runcur) != NULL && run->nfree > 0)
michael@0 1342 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
michael@0 1343 else
michael@0 1344 ptr = arena_bin_malloc_hard(arena, bin);
michael@0 1345 if (ptr == NULL)
michael@0 1346 break;
michael@0 1347 if (config_fill && opt_junk) {
michael@0 1348 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
michael@0 1349 true);
michael@0 1350 }
michael@0 1351 /* Insert such that low regions get used first. */
michael@0 1352 tbin->avail[nfill - 1 - i] = ptr;
michael@0 1353 }
michael@0 1354 if (config_stats) {
michael@0 1355 bin->stats.allocated += i * arena_bin_info[binind].reg_size;
michael@0 1356 bin->stats.nmalloc += i;
michael@0 1357 bin->stats.nrequests += tbin->tstats.nrequests;
michael@0 1358 bin->stats.nfills++;
michael@0 1359 tbin->tstats.nrequests = 0;
michael@0 1360 }
michael@0 1361 malloc_mutex_unlock(&bin->lock);
michael@0 1362 tbin->ncached = i;
michael@0 1363 }
michael@0 1364
michael@0 1365 void
michael@0 1366 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
michael@0 1367 {
michael@0 1368
michael@0 1369 if (zero) {
michael@0 1370 size_t redzone_size = bin_info->redzone_size;
michael@0 1371 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
michael@0 1372 redzone_size);
michael@0 1373 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
michael@0 1374 redzone_size);
michael@0 1375 } else {
michael@0 1376 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
michael@0 1377 bin_info->reg_interval);
michael@0 1378 }
michael@0 1379 }
michael@0 1380
michael@0 1381 void
michael@0 1382 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
michael@0 1383 {
michael@0 1384 size_t size = bin_info->reg_size;
michael@0 1385 size_t redzone_size = bin_info->redzone_size;
michael@0 1386 size_t i;
michael@0 1387 bool error = false;
michael@0 1388
michael@0 1389 for (i = 1; i <= redzone_size; i++) {
michael@0 1390 unsigned byte;
michael@0 1391 if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
michael@0 1392 error = true;
michael@0 1393 malloc_printf("<jemalloc>: Corrupt redzone "
michael@0 1394 "%zu byte%s before %p (size %zu), byte=%#x\n", i,
michael@0 1395 (i == 1) ? "" : "s", ptr, size, byte);
michael@0 1396 }
michael@0 1397 }
michael@0 1398 for (i = 0; i < redzone_size; i++) {
michael@0 1399 unsigned byte;
michael@0 1400 if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
michael@0 1401 error = true;
michael@0 1402 malloc_printf("<jemalloc>: Corrupt redzone "
michael@0 1403 "%zu byte%s after end of %p (size %zu), byte=%#x\n",
michael@0 1404 i, (i == 1) ? "" : "s", ptr, size, byte);
michael@0 1405 }
michael@0 1406 }
michael@0 1407 if (opt_abort && error)
michael@0 1408 abort();
michael@0 1409
michael@0 1410 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
michael@0 1411 bin_info->reg_interval);
michael@0 1412 }
michael@0 1413
michael@0 1414 void *
michael@0 1415 arena_malloc_small(arena_t *arena, size_t size, bool zero)
michael@0 1416 {
michael@0 1417 void *ret;
michael@0 1418 arena_bin_t *bin;
michael@0 1419 arena_run_t *run;
michael@0 1420 size_t binind;
michael@0 1421
michael@0 1422 binind = SMALL_SIZE2BIN(size);
michael@0 1423 assert(binind < NBINS);
michael@0 1424 bin = &arena->bins[binind];
michael@0 1425 size = arena_bin_info[binind].reg_size;
michael@0 1426
michael@0 1427 malloc_mutex_lock(&bin->lock);
michael@0 1428 if ((run = bin->runcur) != NULL && run->nfree > 0)
michael@0 1429 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
michael@0 1430 else
michael@0 1431 ret = arena_bin_malloc_hard(arena, bin);
michael@0 1432
michael@0 1433 if (ret == NULL) {
michael@0 1434 malloc_mutex_unlock(&bin->lock);
michael@0 1435 return (NULL);
michael@0 1436 }
michael@0 1437
michael@0 1438 if (config_stats) {
michael@0 1439 bin->stats.allocated += size;
michael@0 1440 bin->stats.nmalloc++;
michael@0 1441 bin->stats.nrequests++;
michael@0 1442 }
michael@0 1443 malloc_mutex_unlock(&bin->lock);
michael@0 1444 if (config_prof && isthreaded == false)
michael@0 1445 arena_prof_accum(arena, size);
michael@0 1446
michael@0 1447 if (zero == false) {
michael@0 1448 if (config_fill) {
michael@0 1449 if (opt_junk) {
michael@0 1450 arena_alloc_junk_small(ret,
michael@0 1451 &arena_bin_info[binind], false);
michael@0 1452 } else if (opt_zero)
michael@0 1453 memset(ret, 0, size);
michael@0 1454 }
michael@0 1455 } else {
michael@0 1456 if (config_fill && opt_junk) {
michael@0 1457 arena_alloc_junk_small(ret, &arena_bin_info[binind],
michael@0 1458 true);
michael@0 1459 }
michael@0 1460 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
michael@0 1461 memset(ret, 0, size);
michael@0 1462 }
michael@0 1463
michael@0 1464 return (ret);
michael@0 1465 }
michael@0 1466
michael@0 1467 void *
michael@0 1468 arena_malloc_large(arena_t *arena, size_t size, bool zero)
michael@0 1469 {
michael@0 1470 void *ret;
michael@0 1471
michael@0 1472 /* Large allocation. */
michael@0 1473 size = PAGE_CEILING(size);
michael@0 1474 malloc_mutex_lock(&arena->lock);
michael@0 1475 ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
michael@0 1476 if (ret == NULL) {
michael@0 1477 malloc_mutex_unlock(&arena->lock);
michael@0 1478 return (NULL);
michael@0 1479 }
michael@0 1480 if (config_stats) {
michael@0 1481 arena->stats.nmalloc_large++;
michael@0 1482 arena->stats.nrequests_large++;
michael@0 1483 arena->stats.allocated_large += size;
michael@0 1484 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
michael@0 1485 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
michael@0 1486 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
michael@0 1487 }
michael@0 1488 if (config_prof)
michael@0 1489 arena_prof_accum_locked(arena, size);
michael@0 1490 malloc_mutex_unlock(&arena->lock);
michael@0 1491
michael@0 1492 if (zero == false) {
michael@0 1493 if (config_fill) {
michael@0 1494 if (opt_junk)
michael@0 1495 memset(ret, 0xa5, size);
michael@0 1496 else if (opt_zero)
michael@0 1497 memset(ret, 0, size);
michael@0 1498 }
michael@0 1499 }
michael@0 1500
michael@0 1501 return (ret);
michael@0 1502 }
michael@0 1503
michael@0 1504 /* Only handles large allocations that require more than page alignment. */
michael@0 1505 void *
michael@0 1506 arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
michael@0 1507 {
michael@0 1508 void *ret;
michael@0 1509 size_t alloc_size, leadsize, trailsize;
michael@0 1510 arena_run_t *run;
michael@0 1511 arena_chunk_t *chunk;
michael@0 1512
michael@0 1513 assert((size & PAGE_MASK) == 0);
michael@0 1514
michael@0 1515 alignment = PAGE_CEILING(alignment);
michael@0 1516 alloc_size = size + alignment - PAGE;
michael@0 1517
michael@0 1518 malloc_mutex_lock(&arena->lock);
michael@0 1519 run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
michael@0 1520 if (run == NULL) {
michael@0 1521 malloc_mutex_unlock(&arena->lock);
michael@0 1522 return (NULL);
michael@0 1523 }
michael@0 1524 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
michael@0 1525
michael@0 1526 leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
michael@0 1527 (uintptr_t)run;
michael@0 1528 assert(alloc_size >= leadsize + size);
michael@0 1529 trailsize = alloc_size - leadsize - size;
michael@0 1530 ret = (void *)((uintptr_t)run + leadsize);
michael@0 1531 if (leadsize != 0) {
michael@0 1532 arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
michael@0 1533 leadsize);
michael@0 1534 }
michael@0 1535 if (trailsize != 0) {
michael@0 1536 arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
michael@0 1537 false);
michael@0 1538 }
michael@0 1539
michael@0 1540 if (config_stats) {
michael@0 1541 arena->stats.nmalloc_large++;
michael@0 1542 arena->stats.nrequests_large++;
michael@0 1543 arena->stats.allocated_large += size;
michael@0 1544 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
michael@0 1545 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
michael@0 1546 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
michael@0 1547 }
michael@0 1548 malloc_mutex_unlock(&arena->lock);
michael@0 1549
michael@0 1550 if (config_fill && zero == false) {
michael@0 1551 if (opt_junk)
michael@0 1552 memset(ret, 0xa5, size);
michael@0 1553 else if (opt_zero)
michael@0 1554 memset(ret, 0, size);
michael@0 1555 }
michael@0 1556 return (ret);
michael@0 1557 }
michael@0 1558
michael@0 1559 void
michael@0 1560 arena_prof_promoted(const void *ptr, size_t size)
michael@0 1561 {
michael@0 1562 arena_chunk_t *chunk;
michael@0 1563 size_t pageind, binind;
michael@0 1564
michael@0 1565 cassert(config_prof);
michael@0 1566 assert(ptr != NULL);
michael@0 1567 assert(CHUNK_ADDR2BASE(ptr) != ptr);
michael@0 1568 assert(isalloc(ptr, false) == PAGE);
michael@0 1569 assert(isalloc(ptr, true) == PAGE);
michael@0 1570 assert(size <= SMALL_MAXCLASS);
michael@0 1571
michael@0 1572 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
michael@0 1573 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
michael@0 1574 binind = SMALL_SIZE2BIN(size);
michael@0 1575 assert(binind < NBINS);
michael@0 1576 arena_mapbits_large_binind_set(chunk, pageind, binind);
michael@0 1577
michael@0 1578 assert(isalloc(ptr, false) == PAGE);
michael@0 1579 assert(isalloc(ptr, true) == size);
michael@0 1580 }
michael@0 1581
michael@0 1582 static void
michael@0 1583 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
michael@0 1584 arena_bin_t *bin)
michael@0 1585 {
michael@0 1586
michael@0 1587 /* Dissociate run from bin. */
michael@0 1588 if (run == bin->runcur)
michael@0 1589 bin->runcur = NULL;
michael@0 1590 else {
michael@0 1591 size_t binind = arena_bin_index(chunk->arena, bin);
michael@0 1592 arena_bin_info_t *bin_info = &arena_bin_info[binind];
michael@0 1593
michael@0 1594 if (bin_info->nregs != 1) {
michael@0 1595 /*
michael@0 1596 * This block's conditional is necessary because if the
michael@0 1597 * run only contains one region, then it never gets
michael@0 1598 * inserted into the non-full runs tree.
michael@0 1599 */
michael@0 1600 arena_bin_runs_remove(bin, run);
michael@0 1601 }
michael@0 1602 }
michael@0 1603 }
michael@0 1604
michael@0 1605 static void
michael@0 1606 arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
michael@0 1607 arena_bin_t *bin)
michael@0 1608 {
michael@0 1609 size_t binind;
michael@0 1610 arena_bin_info_t *bin_info;
michael@0 1611 size_t npages, run_ind, past;
michael@0 1612
michael@0 1613 assert(run != bin->runcur);
michael@0 1614 assert(arena_run_tree_search(&bin->runs,
michael@0 1615 arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
michael@0 1616 == NULL);
michael@0 1617
michael@0 1618 binind = arena_bin_index(chunk->arena, run->bin);
michael@0 1619 bin_info = &arena_bin_info[binind];
michael@0 1620
michael@0 1621 malloc_mutex_unlock(&bin->lock);
michael@0 1622 /******************************/
michael@0 1623 npages = bin_info->run_size >> LG_PAGE;
michael@0 1624 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
michael@0 1625 past = (size_t)(PAGE_CEILING((uintptr_t)run +
michael@0 1626 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
michael@0 1627 bin_info->reg_interval - bin_info->redzone_size) -
michael@0 1628 (uintptr_t)chunk) >> LG_PAGE);
michael@0 1629 malloc_mutex_lock(&arena->lock);
michael@0 1630
michael@0 1631 /*
michael@0 1632 * If the run was originally clean, and some pages were never touched,
michael@0 1633 * trim the clean pages before deallocating the dirty portion of the
michael@0 1634 * run.
michael@0 1635 */
michael@0 1636 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
michael@0 1637 arena_mapbits_dirty_get(chunk, run_ind+npages-1));
michael@0 1638 if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
michael@0 1639 npages) {
michael@0 1640 /* Trim clean pages. Convert to large run beforehand. */
michael@0 1641 assert(npages > 0);
michael@0 1642 arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
michael@0 1643 arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
michael@0 1644 arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
michael@0 1645 ((past - run_ind) << LG_PAGE), false);
michael@0 1646 /* npages = past - run_ind; */
michael@0 1647 }
michael@0 1648 arena_run_dalloc(arena, run, true, false);
michael@0 1649 malloc_mutex_unlock(&arena->lock);
michael@0 1650 /****************************/
michael@0 1651 malloc_mutex_lock(&bin->lock);
michael@0 1652 if (config_stats)
michael@0 1653 bin->stats.curruns--;
michael@0 1654 }
michael@0 1655
michael@0 1656 static void
michael@0 1657 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
michael@0 1658 arena_bin_t *bin)
michael@0 1659 {
michael@0 1660
michael@0 1661 /*
michael@0 1662 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
michael@0 1663 * non-full run. It is okay to NULL runcur out rather than proactively
michael@0 1664 * keeping it pointing at the lowest non-full run.
michael@0 1665 */
michael@0 1666 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
michael@0 1667 /* Switch runcur. */
michael@0 1668 if (bin->runcur->nfree > 0)
michael@0 1669 arena_bin_runs_insert(bin, bin->runcur);
michael@0 1670 bin->runcur = run;
michael@0 1671 if (config_stats)
michael@0 1672 bin->stats.reruns++;
michael@0 1673 } else
michael@0 1674 arena_bin_runs_insert(bin, run);
michael@0 1675 }
michael@0 1676
michael@0 1677 void
michael@0 1678 arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
michael@0 1679 arena_chunk_map_t *mapelm)
michael@0 1680 {
michael@0 1681 size_t pageind;
michael@0 1682 arena_run_t *run;
michael@0 1683 arena_bin_t *bin;
michael@0 1684 arena_bin_info_t *bin_info;
michael@0 1685 size_t size, binind;
michael@0 1686
michael@0 1687 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
michael@0 1688 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
michael@0 1689 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
michael@0 1690 bin = run->bin;
michael@0 1691 binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
michael@0 1692 bin_info = &arena_bin_info[binind];
michael@0 1693 if (config_fill || config_stats)
michael@0 1694 size = bin_info->reg_size;
michael@0 1695
michael@0 1696 if (config_fill && opt_junk)
michael@0 1697 arena_dalloc_junk_small(ptr, bin_info);
michael@0 1698
michael@0 1699 arena_run_reg_dalloc(run, ptr);
michael@0 1700 if (run->nfree == bin_info->nregs) {
michael@0 1701 arena_dissociate_bin_run(chunk, run, bin);
michael@0 1702 arena_dalloc_bin_run(arena, chunk, run, bin);
michael@0 1703 } else if (run->nfree == 1 && run != bin->runcur)
michael@0 1704 arena_bin_lower_run(arena, chunk, run, bin);
michael@0 1705
michael@0 1706 if (config_stats) {
michael@0 1707 bin->stats.allocated -= size;
michael@0 1708 bin->stats.ndalloc++;
michael@0 1709 }
michael@0 1710 }
michael@0 1711
michael@0 1712 void
michael@0 1713 arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
michael@0 1714 size_t pageind, arena_chunk_map_t *mapelm)
michael@0 1715 {
michael@0 1716 arena_run_t *run;
michael@0 1717 arena_bin_t *bin;
michael@0 1718
michael@0 1719 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
michael@0 1720 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
michael@0 1721 bin = run->bin;
michael@0 1722 malloc_mutex_lock(&bin->lock);
michael@0 1723 arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
michael@0 1724 malloc_mutex_unlock(&bin->lock);
michael@0 1725 }
michael@0 1726
michael@0 1727 void
michael@0 1728 arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
michael@0 1729 size_t pageind)
michael@0 1730 {
michael@0 1731 arena_chunk_map_t *mapelm;
michael@0 1732
michael@0 1733 if (config_debug) {
michael@0 1734 /* arena_ptr_small_binind_get() does extra sanity checking. */
michael@0 1735 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
michael@0 1736 pageind)) != BININD_INVALID);
michael@0 1737 }
michael@0 1738 mapelm = arena_mapp_get(chunk, pageind);
michael@0 1739 arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
michael@0 1740 }
michael@0 1741
michael@0 1742 void
michael@0 1743 arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
michael@0 1744 {
michael@0 1745
michael@0 1746 if (config_fill || config_stats) {
michael@0 1747 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
michael@0 1748 size_t size = arena_mapbits_large_size_get(chunk, pageind);
michael@0 1749
michael@0 1750 if (config_fill && config_stats && opt_junk)
michael@0 1751 memset(ptr, 0x5a, size);
michael@0 1752 if (config_stats) {
michael@0 1753 arena->stats.ndalloc_large++;
michael@0 1754 arena->stats.allocated_large -= size;
michael@0 1755 arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
michael@0 1756 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
michael@0 1757 }
michael@0 1758 }
michael@0 1759
michael@0 1760 arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
michael@0 1761 }
michael@0 1762
michael@0 1763 void
michael@0 1764 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
michael@0 1765 {
michael@0 1766
michael@0 1767 malloc_mutex_lock(&arena->lock);
michael@0 1768 arena_dalloc_large_locked(arena, chunk, ptr);
michael@0 1769 malloc_mutex_unlock(&arena->lock);
michael@0 1770 }
michael@0 1771
michael@0 1772 static void
michael@0 1773 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
michael@0 1774 size_t oldsize, size_t size)
michael@0 1775 {
michael@0 1776
michael@0 1777 assert(size < oldsize);
michael@0 1778
michael@0 1779 /*
michael@0 1780 * Shrink the run, and make trailing pages available for other
michael@0 1781 * allocations.
michael@0 1782 */
michael@0 1783 malloc_mutex_lock(&arena->lock);
michael@0 1784 arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
michael@0 1785 true);
michael@0 1786 if (config_stats) {
michael@0 1787 arena->stats.ndalloc_large++;
michael@0 1788 arena->stats.allocated_large -= oldsize;
michael@0 1789 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
michael@0 1790 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
michael@0 1791
michael@0 1792 arena->stats.nmalloc_large++;
michael@0 1793 arena->stats.nrequests_large++;
michael@0 1794 arena->stats.allocated_large += size;
michael@0 1795 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
michael@0 1796 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
michael@0 1797 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
michael@0 1798 }
michael@0 1799 malloc_mutex_unlock(&arena->lock);
michael@0 1800 }
michael@0 1801
michael@0 1802 static bool
michael@0 1803 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
michael@0 1804 size_t oldsize, size_t size, size_t extra, bool zero)
michael@0 1805 {
michael@0 1806 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
michael@0 1807 size_t npages = oldsize >> LG_PAGE;
michael@0 1808 size_t followsize;
michael@0 1809
michael@0 1810 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
michael@0 1811
michael@0 1812 /* Try to extend the run. */
michael@0 1813 assert(size + extra > oldsize);
michael@0 1814 malloc_mutex_lock(&arena->lock);
michael@0 1815 if (pageind + npages < chunk_npages &&
michael@0 1816 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
michael@0 1817 (followsize = arena_mapbits_unallocated_size_get(chunk,
michael@0 1818 pageind+npages)) >= size - oldsize) {
michael@0 1819 /*
michael@0 1820 * The next run is available and sufficiently large. Split the
michael@0 1821 * following run, then merge the first part with the existing
michael@0 1822 * allocation.
michael@0 1823 */
michael@0 1824 size_t flag_dirty;
michael@0 1825 size_t splitsize = (oldsize + followsize <= size + extra)
michael@0 1826 ? followsize : size + extra - oldsize;
michael@0 1827 arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
michael@0 1828 ((pageind+npages) << LG_PAGE)), splitsize, true,
michael@0 1829 BININD_INVALID, zero);
michael@0 1830
michael@0 1831 size = oldsize + splitsize;
michael@0 1832 npages = size >> LG_PAGE;
michael@0 1833
michael@0 1834 /*
michael@0 1835 * Mark the extended run as dirty if either portion of the run
michael@0 1836 * was dirty before allocation. This is rather pedantic,
michael@0 1837 * because there's not actually any sequence of events that
michael@0 1838 * could cause the resulting run to be passed to
michael@0 1839 * arena_run_dalloc() with the dirty argument set to false
michael@0 1840 * (which is when dirty flag consistency would really matter).
michael@0 1841 */
michael@0 1842 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
michael@0 1843 arena_mapbits_dirty_get(chunk, pageind+npages-1);
michael@0 1844 arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
michael@0 1845 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
michael@0 1846
michael@0 1847 if (config_stats) {
michael@0 1848 arena->stats.ndalloc_large++;
michael@0 1849 arena->stats.allocated_large -= oldsize;
michael@0 1850 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
michael@0 1851 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
michael@0 1852
michael@0 1853 arena->stats.nmalloc_large++;
michael@0 1854 arena->stats.nrequests_large++;
michael@0 1855 arena->stats.allocated_large += size;
michael@0 1856 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
michael@0 1857 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
michael@0 1858 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
michael@0 1859 }
michael@0 1860 malloc_mutex_unlock(&arena->lock);
michael@0 1861 return (false);
michael@0 1862 }
michael@0 1863 malloc_mutex_unlock(&arena->lock);
michael@0 1864
michael@0 1865 return (true);
michael@0 1866 }
michael@0 1867
michael@0 1868 /*
michael@0 1869 * Try to resize a large allocation, in order to avoid copying. This will
michael@0 1870 * always fail if growing an object, and the following run is already in use.
michael@0 1871 */
michael@0 1872 static bool
michael@0 1873 arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
michael@0 1874 bool zero)
michael@0 1875 {
michael@0 1876 size_t psize;
michael@0 1877
michael@0 1878 psize = PAGE_CEILING(size + extra);
michael@0 1879 if (psize == oldsize) {
michael@0 1880 /* Same size class. */
michael@0 1881 if (config_fill && opt_junk && size < oldsize) {
michael@0 1882 memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
michael@0 1883 size);
michael@0 1884 }
michael@0 1885 return (false);
michael@0 1886 } else {
michael@0 1887 arena_chunk_t *chunk;
michael@0 1888 arena_t *arena;
michael@0 1889
michael@0 1890 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
michael@0 1891 arena = chunk->arena;
michael@0 1892
michael@0 1893 if (psize < oldsize) {
michael@0 1894 /* Fill before shrinking in order avoid a race. */
michael@0 1895 if (config_fill && opt_junk) {
michael@0 1896 memset((void *)((uintptr_t)ptr + size), 0x5a,
michael@0 1897 oldsize - size);
michael@0 1898 }
michael@0 1899 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
michael@0 1900 psize);
michael@0 1901 return (false);
michael@0 1902 } else {
michael@0 1903 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
michael@0 1904 oldsize, PAGE_CEILING(size),
michael@0 1905 psize - PAGE_CEILING(size), zero);
michael@0 1906 if (config_fill && ret == false && zero == false &&
michael@0 1907 opt_zero) {
michael@0 1908 memset((void *)((uintptr_t)ptr + oldsize), 0,
michael@0 1909 size - oldsize);
michael@0 1910 }
michael@0 1911 return (ret);
michael@0 1912 }
michael@0 1913 }
michael@0 1914 }
michael@0 1915
michael@0 1916 void *
michael@0 1917 arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
michael@0 1918 bool zero)
michael@0 1919 {
michael@0 1920
michael@0 1921 /*
michael@0 1922 * Avoid moving the allocation if the size class can be left the same.
michael@0 1923 */
michael@0 1924 if (oldsize <= arena_maxclass) {
michael@0 1925 if (oldsize <= SMALL_MAXCLASS) {
michael@0 1926 assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
michael@0 1927 == oldsize);
michael@0 1928 if ((size + extra <= SMALL_MAXCLASS &&
michael@0 1929 SMALL_SIZE2BIN(size + extra) ==
michael@0 1930 SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
michael@0 1931 size + extra >= oldsize)) {
michael@0 1932 if (config_fill && opt_junk && size < oldsize) {
michael@0 1933 memset((void *)((uintptr_t)ptr + size),
michael@0 1934 0x5a, oldsize - size);
michael@0 1935 }
michael@0 1936 return (ptr);
michael@0 1937 }
michael@0 1938 } else {
michael@0 1939 assert(size <= arena_maxclass);
michael@0 1940 if (size + extra > SMALL_MAXCLASS) {
michael@0 1941 if (arena_ralloc_large(ptr, oldsize, size,
michael@0 1942 extra, zero) == false)
michael@0 1943 return (ptr);
michael@0 1944 }
michael@0 1945 }
michael@0 1946 }
michael@0 1947
michael@0 1948 /* Reallocation would require a move. */
michael@0 1949 return (NULL);
michael@0 1950 }
michael@0 1951
michael@0 1952 void *
michael@0 1953 arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
michael@0 1954 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
michael@0 1955 bool try_tcache_dalloc)
michael@0 1956 {
michael@0 1957 void *ret;
michael@0 1958 size_t copysize;
michael@0 1959
michael@0 1960 /* Try to avoid moving the allocation. */
michael@0 1961 ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
michael@0 1962 if (ret != NULL)
michael@0 1963 return (ret);
michael@0 1964
michael@0 1965 /*
michael@0 1966 * size and oldsize are different enough that we need to move the
michael@0 1967 * object. In that case, fall back to allocating new space and
michael@0 1968 * copying.
michael@0 1969 */
michael@0 1970 if (alignment != 0) {
michael@0 1971 size_t usize = sa2u(size + extra, alignment);
michael@0 1972 if (usize == 0)
michael@0 1973 return (NULL);
michael@0 1974 ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
michael@0 1975 } else
michael@0 1976 ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
michael@0 1977
michael@0 1978 if (ret == NULL) {
michael@0 1979 if (extra == 0)
michael@0 1980 return (NULL);
michael@0 1981 /* Try again, this time without extra. */
michael@0 1982 if (alignment != 0) {
michael@0 1983 size_t usize = sa2u(size, alignment);
michael@0 1984 if (usize == 0)
michael@0 1985 return (NULL);
michael@0 1986 ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
michael@0 1987 arena);
michael@0 1988 } else
michael@0 1989 ret = arena_malloc(arena, size, zero, try_tcache_alloc);
michael@0 1990
michael@0 1991 if (ret == NULL)
michael@0 1992 return (NULL);
michael@0 1993 }
michael@0 1994
michael@0 1995 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
michael@0 1996
michael@0 1997 /*
michael@0 1998 * Copy at most size bytes (not size+extra), since the caller has no
michael@0 1999 * expectation that the extra bytes will be reliably preserved.
michael@0 2000 */
michael@0 2001 copysize = (size < oldsize) ? size : oldsize;
michael@0 2002 VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
michael@0 2003 memcpy(ret, ptr, copysize);
michael@0 2004 iqallocx(ptr, try_tcache_dalloc);
michael@0 2005 return (ret);
michael@0 2006 }
michael@0 2007
michael@0 2008 dss_prec_t
michael@0 2009 arena_dss_prec_get(arena_t *arena)
michael@0 2010 {
michael@0 2011 dss_prec_t ret;
michael@0 2012
michael@0 2013 malloc_mutex_lock(&arena->lock);
michael@0 2014 ret = arena->dss_prec;
michael@0 2015 malloc_mutex_unlock(&arena->lock);
michael@0 2016 return (ret);
michael@0 2017 }
michael@0 2018
michael@0 2019 void
michael@0 2020 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
michael@0 2021 {
michael@0 2022
michael@0 2023 malloc_mutex_lock(&arena->lock);
michael@0 2024 arena->dss_prec = dss_prec;
michael@0 2025 malloc_mutex_unlock(&arena->lock);
michael@0 2026 }
michael@0 2027
michael@0 2028 void
michael@0 2029 arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
michael@0 2030 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
michael@0 2031 malloc_large_stats_t *lstats)
michael@0 2032 {
michael@0 2033 unsigned i;
michael@0 2034
michael@0 2035 malloc_mutex_lock(&arena->lock);
michael@0 2036 *dss = dss_prec_names[arena->dss_prec];
michael@0 2037 *nactive += arena->nactive;
michael@0 2038 *ndirty += arena->ndirty;
michael@0 2039
michael@0 2040 astats->mapped += arena->stats.mapped;
michael@0 2041 astats->npurge += arena->stats.npurge;
michael@0 2042 astats->nmadvise += arena->stats.nmadvise;
michael@0 2043 astats->purged += arena->stats.purged;
michael@0 2044 astats->allocated_large += arena->stats.allocated_large;
michael@0 2045 astats->nmalloc_large += arena->stats.nmalloc_large;
michael@0 2046 astats->ndalloc_large += arena->stats.ndalloc_large;
michael@0 2047 astats->nrequests_large += arena->stats.nrequests_large;
michael@0 2048
michael@0 2049 for (i = 0; i < nlclasses; i++) {
michael@0 2050 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
michael@0 2051 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
michael@0 2052 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
michael@0 2053 lstats[i].curruns += arena->stats.lstats[i].curruns;
michael@0 2054 }
michael@0 2055 malloc_mutex_unlock(&arena->lock);
michael@0 2056
michael@0 2057 for (i = 0; i < NBINS; i++) {
michael@0 2058 arena_bin_t *bin = &arena->bins[i];
michael@0 2059
michael@0 2060 malloc_mutex_lock(&bin->lock);
michael@0 2061 bstats[i].allocated += bin->stats.allocated;
michael@0 2062 bstats[i].nmalloc += bin->stats.nmalloc;
michael@0 2063 bstats[i].ndalloc += bin->stats.ndalloc;
michael@0 2064 bstats[i].nrequests += bin->stats.nrequests;
michael@0 2065 if (config_tcache) {
michael@0 2066 bstats[i].nfills += bin->stats.nfills;
michael@0 2067 bstats[i].nflushes += bin->stats.nflushes;
michael@0 2068 }
michael@0 2069 bstats[i].nruns += bin->stats.nruns;
michael@0 2070 bstats[i].reruns += bin->stats.reruns;
michael@0 2071 bstats[i].curruns += bin->stats.curruns;
michael@0 2072 malloc_mutex_unlock(&bin->lock);
michael@0 2073 }
michael@0 2074 }
michael@0 2075
michael@0 2076 bool
michael@0 2077 arena_new(arena_t *arena, unsigned ind)
michael@0 2078 {
michael@0 2079 unsigned i;
michael@0 2080 arena_bin_t *bin;
michael@0 2081
michael@0 2082 arena->ind = ind;
michael@0 2083 arena->nthreads = 0;
michael@0 2084
michael@0 2085 if (malloc_mutex_init(&arena->lock))
michael@0 2086 return (true);
michael@0 2087
michael@0 2088 if (config_stats) {
michael@0 2089 memset(&arena->stats, 0, sizeof(arena_stats_t));
michael@0 2090 arena->stats.lstats =
michael@0 2091 (malloc_large_stats_t *)base_alloc(nlclasses *
michael@0 2092 sizeof(malloc_large_stats_t));
michael@0 2093 if (arena->stats.lstats == NULL)
michael@0 2094 return (true);
michael@0 2095 memset(arena->stats.lstats, 0, nlclasses *
michael@0 2096 sizeof(malloc_large_stats_t));
michael@0 2097 if (config_tcache)
michael@0 2098 ql_new(&arena->tcache_ql);
michael@0 2099 }
michael@0 2100
michael@0 2101 if (config_prof)
michael@0 2102 arena->prof_accumbytes = 0;
michael@0 2103
michael@0 2104 arena->dss_prec = chunk_dss_prec_get();
michael@0 2105
michael@0 2106 /* Initialize chunks. */
michael@0 2107 arena_chunk_dirty_new(&arena->chunks_dirty);
michael@0 2108 arena->spare = NULL;
michael@0 2109
michael@0 2110 arena->nactive = 0;
michael@0 2111 arena->ndirty = 0;
michael@0 2112 arena->npurgatory = 0;
michael@0 2113
michael@0 2114 arena_avail_tree_new(&arena->runs_avail);
michael@0 2115
michael@0 2116 /* Initialize bins. */
michael@0 2117 for (i = 0; i < NBINS; i++) {
michael@0 2118 bin = &arena->bins[i];
michael@0 2119 if (malloc_mutex_init(&bin->lock))
michael@0 2120 return (true);
michael@0 2121 bin->runcur = NULL;
michael@0 2122 arena_run_tree_new(&bin->runs);
michael@0 2123 if (config_stats)
michael@0 2124 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
michael@0 2125 }
michael@0 2126
michael@0 2127 return (false);
michael@0 2128 }
michael@0 2129
michael@0 2130 /*
michael@0 2131 * Calculate bin_info->run_size such that it meets the following constraints:
michael@0 2132 *
michael@0 2133 * *) bin_info->run_size >= min_run_size
michael@0 2134 * *) bin_info->run_size <= arena_maxclass
michael@0 2135 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
michael@0 2136 * *) bin_info->nregs <= RUN_MAXREGS
michael@0 2137 *
michael@0 2138 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
michael@0 2139 * calculated here, since these settings are all interdependent.
michael@0 2140 */
michael@0 2141 static size_t
michael@0 2142 bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
michael@0 2143 {
michael@0 2144 size_t pad_size;
michael@0 2145 size_t try_run_size, good_run_size;
michael@0 2146 uint32_t try_nregs, good_nregs;
michael@0 2147 uint32_t try_hdr_size, good_hdr_size;
michael@0 2148 uint32_t try_bitmap_offset, good_bitmap_offset;
michael@0 2149 uint32_t try_ctx0_offset, good_ctx0_offset;
michael@0 2150 uint32_t try_redzone0_offset, good_redzone0_offset;
michael@0 2151
michael@0 2152 assert(min_run_size >= PAGE);
michael@0 2153 assert(min_run_size <= arena_maxclass);
michael@0 2154
michael@0 2155 /*
michael@0 2156 * Determine redzone size based on minimum alignment and minimum
michael@0 2157 * redzone size. Add padding to the end of the run if it is needed to
michael@0 2158 * align the regions. The padding allows each redzone to be half the
michael@0 2159 * minimum alignment; without the padding, each redzone would have to
michael@0 2160 * be twice as large in order to maintain alignment.
michael@0 2161 */
michael@0 2162 if (config_fill && opt_redzone) {
michael@0 2163 size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
michael@0 2164 if (align_min <= REDZONE_MINSIZE) {
michael@0 2165 bin_info->redzone_size = REDZONE_MINSIZE;
michael@0 2166 pad_size = 0;
michael@0 2167 } else {
michael@0 2168 bin_info->redzone_size = align_min >> 1;
michael@0 2169 pad_size = bin_info->redzone_size;
michael@0 2170 }
michael@0 2171 } else {
michael@0 2172 bin_info->redzone_size = 0;
michael@0 2173 pad_size = 0;
michael@0 2174 }
michael@0 2175 bin_info->reg_interval = bin_info->reg_size +
michael@0 2176 (bin_info->redzone_size << 1);
michael@0 2177
michael@0 2178 /*
michael@0 2179 * Calculate known-valid settings before entering the run_size
michael@0 2180 * expansion loop, so that the first part of the loop always copies
michael@0 2181 * valid settings.
michael@0 2182 *
michael@0 2183 * The do..while loop iteratively reduces the number of regions until
michael@0 2184 * the run header and the regions no longer overlap. A closed formula
michael@0 2185 * would be quite messy, since there is an interdependency between the
michael@0 2186 * header's mask length and the number of regions.
michael@0 2187 */
michael@0 2188 try_run_size = min_run_size;
michael@0 2189 try_nregs = ((try_run_size - sizeof(arena_run_t)) /
michael@0 2190 bin_info->reg_interval)
michael@0 2191 + 1; /* Counter-act try_nregs-- in loop. */
michael@0 2192 if (try_nregs > RUN_MAXREGS) {
michael@0 2193 try_nregs = RUN_MAXREGS
michael@0 2194 + 1; /* Counter-act try_nregs-- in loop. */
michael@0 2195 }
michael@0 2196 do {
michael@0 2197 try_nregs--;
michael@0 2198 try_hdr_size = sizeof(arena_run_t);
michael@0 2199 /* Pad to a long boundary. */
michael@0 2200 try_hdr_size = LONG_CEILING(try_hdr_size);
michael@0 2201 try_bitmap_offset = try_hdr_size;
michael@0 2202 /* Add space for bitmap. */
michael@0 2203 try_hdr_size += bitmap_size(try_nregs);
michael@0 2204 if (config_prof && opt_prof && prof_promote == false) {
michael@0 2205 /* Pad to a quantum boundary. */
michael@0 2206 try_hdr_size = QUANTUM_CEILING(try_hdr_size);
michael@0 2207 try_ctx0_offset = try_hdr_size;
michael@0 2208 /* Add space for one (prof_ctx_t *) per region. */
michael@0 2209 try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
michael@0 2210 } else
michael@0 2211 try_ctx0_offset = 0;
michael@0 2212 try_redzone0_offset = try_run_size - (try_nregs *
michael@0 2213 bin_info->reg_interval) - pad_size;
michael@0 2214 } while (try_hdr_size > try_redzone0_offset);
michael@0 2215
michael@0 2216 /* run_size expansion loop. */
michael@0 2217 do {
michael@0 2218 /*
michael@0 2219 * Copy valid settings before trying more aggressive settings.
michael@0 2220 */
michael@0 2221 good_run_size = try_run_size;
michael@0 2222 good_nregs = try_nregs;
michael@0 2223 good_hdr_size = try_hdr_size;
michael@0 2224 good_bitmap_offset = try_bitmap_offset;
michael@0 2225 good_ctx0_offset = try_ctx0_offset;
michael@0 2226 good_redzone0_offset = try_redzone0_offset;
michael@0 2227
michael@0 2228 /* Try more aggressive settings. */
michael@0 2229 try_run_size += PAGE;
michael@0 2230 try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
michael@0 2231 bin_info->reg_interval)
michael@0 2232 + 1; /* Counter-act try_nregs-- in loop. */
michael@0 2233 if (try_nregs > RUN_MAXREGS) {
michael@0 2234 try_nregs = RUN_MAXREGS
michael@0 2235 + 1; /* Counter-act try_nregs-- in loop. */
michael@0 2236 }
michael@0 2237 do {
michael@0 2238 try_nregs--;
michael@0 2239 try_hdr_size = sizeof(arena_run_t);
michael@0 2240 /* Pad to a long boundary. */
michael@0 2241 try_hdr_size = LONG_CEILING(try_hdr_size);
michael@0 2242 try_bitmap_offset = try_hdr_size;
michael@0 2243 /* Add space for bitmap. */
michael@0 2244 try_hdr_size += bitmap_size(try_nregs);
michael@0 2245 if (config_prof && opt_prof && prof_promote == false) {
michael@0 2246 /* Pad to a quantum boundary. */
michael@0 2247 try_hdr_size = QUANTUM_CEILING(try_hdr_size);
michael@0 2248 try_ctx0_offset = try_hdr_size;
michael@0 2249 /*
michael@0 2250 * Add space for one (prof_ctx_t *) per region.
michael@0 2251 */
michael@0 2252 try_hdr_size += try_nregs *
michael@0 2253 sizeof(prof_ctx_t *);
michael@0 2254 }
michael@0 2255 try_redzone0_offset = try_run_size - (try_nregs *
michael@0 2256 bin_info->reg_interval) - pad_size;
michael@0 2257 } while (try_hdr_size > try_redzone0_offset);
michael@0 2258 } while (try_run_size <= arena_maxclass
michael@0 2259 && try_run_size <= arena_maxclass
michael@0 2260 && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
michael@0 2261 RUN_MAX_OVRHD_RELAX
michael@0 2262 && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
michael@0 2263 && try_nregs < RUN_MAXREGS);
michael@0 2264
michael@0 2265 assert(good_hdr_size <= good_redzone0_offset);
michael@0 2266
michael@0 2267 /* Copy final settings. */
michael@0 2268 bin_info->run_size = good_run_size;
michael@0 2269 bin_info->nregs = good_nregs;
michael@0 2270 bin_info->bitmap_offset = good_bitmap_offset;
michael@0 2271 bin_info->ctx0_offset = good_ctx0_offset;
michael@0 2272 bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
michael@0 2273
michael@0 2274 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
michael@0 2275 * bin_info->reg_interval) + pad_size == bin_info->run_size);
michael@0 2276
michael@0 2277 return (good_run_size);
michael@0 2278 }
michael@0 2279
michael@0 2280 static void
michael@0 2281 bin_info_init(void)
michael@0 2282 {
michael@0 2283 arena_bin_info_t *bin_info;
michael@0 2284 size_t prev_run_size = PAGE;
michael@0 2285
michael@0 2286 #define SIZE_CLASS(bin, delta, size) \
michael@0 2287 bin_info = &arena_bin_info[bin]; \
michael@0 2288 bin_info->reg_size = size; \
michael@0 2289 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
michael@0 2290 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
michael@0 2291 SIZE_CLASSES
michael@0 2292 #undef SIZE_CLASS
michael@0 2293 }
michael@0 2294
michael@0 2295 void
michael@0 2296 arena_boot(void)
michael@0 2297 {
michael@0 2298 size_t header_size;
michael@0 2299 unsigned i;
michael@0 2300
michael@0 2301 /*
michael@0 2302 * Compute the header size such that it is large enough to contain the
michael@0 2303 * page map. The page map is biased to omit entries for the header
michael@0 2304 * itself, so some iteration is necessary to compute the map bias.
michael@0 2305 *
michael@0 2306 * 1) Compute safe header_size and map_bias values that include enough
michael@0 2307 * space for an unbiased page map.
michael@0 2308 * 2) Refine map_bias based on (1) to omit the header pages in the page
michael@0 2309 * map. The resulting map_bias may be one too small.
michael@0 2310 * 3) Refine map_bias based on (2). The result will be >= the result
michael@0 2311 * from (2), and will always be correct.
michael@0 2312 */
michael@0 2313 map_bias = 0;
michael@0 2314 for (i = 0; i < 3; i++) {
michael@0 2315 header_size = offsetof(arena_chunk_t, map) +
michael@0 2316 (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
michael@0 2317 map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
michael@0 2318 != 0);
michael@0 2319 }
michael@0 2320 assert(map_bias > 0);
michael@0 2321
michael@0 2322 arena_maxclass = chunksize - (map_bias << LG_PAGE);
michael@0 2323
michael@0 2324 bin_info_init();
michael@0 2325 }
michael@0 2326
michael@0 2327 void
michael@0 2328 arena_prefork(arena_t *arena)
michael@0 2329 {
michael@0 2330 unsigned i;
michael@0 2331
michael@0 2332 malloc_mutex_prefork(&arena->lock);
michael@0 2333 for (i = 0; i < NBINS; i++)
michael@0 2334 malloc_mutex_prefork(&arena->bins[i].lock);
michael@0 2335 }
michael@0 2336
michael@0 2337 void
michael@0 2338 arena_postfork_parent(arena_t *arena)
michael@0 2339 {
michael@0 2340 unsigned i;
michael@0 2341
michael@0 2342 for (i = 0; i < NBINS; i++)
michael@0 2343 malloc_mutex_postfork_parent(&arena->bins[i].lock);
michael@0 2344 malloc_mutex_postfork_parent(&arena->lock);
michael@0 2345 }
michael@0 2346
michael@0 2347 void
michael@0 2348 arena_postfork_child(arena_t *arena)
michael@0 2349 {
michael@0 2350 unsigned i;
michael@0 2351
michael@0 2352 for (i = 0; i < NBINS; i++)
michael@0 2353 malloc_mutex_postfork_child(&arena->bins[i].lock);
michael@0 2354 malloc_mutex_postfork_child(&arena->lock);
michael@0 2355 }

mercurial