memory/jemalloc/src/src/tcache.c

Thu, 15 Jan 2015 15:59:08 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 15 Jan 2015 15:59:08 +0100
branch
TOR_BUG_9701
changeset 10
ac0c01689b40
permissions
-rw-r--r--

Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

     1 #define	JEMALLOC_TCACHE_C_
     2 #include "jemalloc/internal/jemalloc_internal.h"
     4 /******************************************************************************/
     5 /* Data. */
     7 malloc_tsd_data(, tcache, tcache_t *, NULL)
     8 malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
    10 bool	opt_tcache = true;
    11 ssize_t	opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
    13 tcache_bin_info_t	*tcache_bin_info;
    14 static unsigned		stack_nelms; /* Total stack elms per tcache. */
    16 size_t			nhbins;
    17 size_t			tcache_maxclass;
    19 /******************************************************************************/
    21 size_t	tcache_salloc(const void *ptr)
    22 {
    24 	return (arena_salloc(ptr, false));
    25 }
    27 void
    28 tcache_event_hard(tcache_t *tcache)
    29 {
    30 	size_t binind = tcache->next_gc_bin;
    31 	tcache_bin_t *tbin = &tcache->tbins[binind];
    32 	tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
    34 	if (tbin->low_water > 0) {
    35 		/*
    36 		 * Flush (ceiling) 3/4 of the objects below the low water mark.
    37 		 */
    38 		if (binind < NBINS) {
    39 			tcache_bin_flush_small(tbin, binind, tbin->ncached -
    40 			    tbin->low_water + (tbin->low_water >> 2), tcache);
    41 		} else {
    42 			tcache_bin_flush_large(tbin, binind, tbin->ncached -
    43 			    tbin->low_water + (tbin->low_water >> 2), tcache);
    44 		}
    45 		/*
    46 		 * Reduce fill count by 2X.  Limit lg_fill_div such that the
    47 		 * fill count is always at least 1.
    48 		 */
    49 		if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
    50 			tbin->lg_fill_div++;
    51 	} else if (tbin->low_water < 0) {
    52 		/*
    53 		 * Increase fill count by 2X.  Make sure lg_fill_div stays
    54 		 * greater than 0.
    55 		 */
    56 		if (tbin->lg_fill_div > 1)
    57 			tbin->lg_fill_div--;
    58 	}
    59 	tbin->low_water = tbin->ncached;
    61 	tcache->next_gc_bin++;
    62 	if (tcache->next_gc_bin == nhbins)
    63 		tcache->next_gc_bin = 0;
    64 	tcache->ev_cnt = 0;
    65 }
    67 void *
    68 tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
    69 {
    70 	void *ret;
    72 	arena_tcache_fill_small(tcache->arena, tbin, binind,
    73 	    config_prof ? tcache->prof_accumbytes : 0);
    74 	if (config_prof)
    75 		tcache->prof_accumbytes = 0;
    76 	ret = tcache_alloc_easy(tbin);
    78 	return (ret);
    79 }
    81 void
    82 tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
    83     tcache_t *tcache)
    84 {
    85 	void *ptr;
    86 	unsigned i, nflush, ndeferred;
    87 	bool merged_stats = false;
    89 	assert(binind < NBINS);
    90 	assert(rem <= tbin->ncached);
    92 	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
    93 		/* Lock the arena bin associated with the first object. */
    94 		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
    95 		    tbin->avail[0]);
    96 		arena_t *arena = chunk->arena;
    97 		arena_bin_t *bin = &arena->bins[binind];
    99 		if (config_prof && arena == tcache->arena) {
   100 			arena_prof_accum(arena, tcache->prof_accumbytes);
   101 			tcache->prof_accumbytes = 0;
   102 		}
   104 		malloc_mutex_lock(&bin->lock);
   105 		if (config_stats && arena == tcache->arena) {
   106 			assert(merged_stats == false);
   107 			merged_stats = true;
   108 			bin->stats.nflushes++;
   109 			bin->stats.nrequests += tbin->tstats.nrequests;
   110 			tbin->tstats.nrequests = 0;
   111 		}
   112 		ndeferred = 0;
   113 		for (i = 0; i < nflush; i++) {
   114 			ptr = tbin->avail[i];
   115 			assert(ptr != NULL);
   116 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
   117 			if (chunk->arena == arena) {
   118 				size_t pageind = ((uintptr_t)ptr -
   119 				    (uintptr_t)chunk) >> LG_PAGE;
   120 				arena_chunk_map_t *mapelm =
   121 				    arena_mapp_get(chunk, pageind);
   122 				if (config_fill && opt_junk) {
   123 					arena_alloc_junk_small(ptr,
   124 					    &arena_bin_info[binind], true);
   125 				}
   126 				arena_dalloc_bin_locked(arena, chunk, ptr,
   127 				    mapelm);
   128 			} else {
   129 				/*
   130 				 * This object was allocated via a different
   131 				 * arena bin than the one that is currently
   132 				 * locked.  Stash the object, so that it can be
   133 				 * handled in a future pass.
   134 				 */
   135 				tbin->avail[ndeferred] = ptr;
   136 				ndeferred++;
   137 			}
   138 		}
   139 		malloc_mutex_unlock(&bin->lock);
   140 	}
   141 	if (config_stats && merged_stats == false) {
   142 		/*
   143 		 * The flush loop didn't happen to flush to this thread's
   144 		 * arena, so the stats didn't get merged.  Manually do so now.
   145 		 */
   146 		arena_bin_t *bin = &tcache->arena->bins[binind];
   147 		malloc_mutex_lock(&bin->lock);
   148 		bin->stats.nflushes++;
   149 		bin->stats.nrequests += tbin->tstats.nrequests;
   150 		tbin->tstats.nrequests = 0;
   151 		malloc_mutex_unlock(&bin->lock);
   152 	}
   154 	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
   155 	    rem * sizeof(void *));
   156 	tbin->ncached = rem;
   157 	if ((int)tbin->ncached < tbin->low_water)
   158 		tbin->low_water = tbin->ncached;
   159 }
   161 void
   162 tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
   163     tcache_t *tcache)
   164 {
   165 	void *ptr;
   166 	unsigned i, nflush, ndeferred;
   167 	bool merged_stats = false;
   169 	assert(binind < nhbins);
   170 	assert(rem <= tbin->ncached);
   172 	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
   173 		/* Lock the arena associated with the first object. */
   174 		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
   175 		    tbin->avail[0]);
   176 		arena_t *arena = chunk->arena;
   178 		malloc_mutex_lock(&arena->lock);
   179 		if ((config_prof || config_stats) && arena == tcache->arena) {
   180 			if (config_prof) {
   181 				arena_prof_accum_locked(arena,
   182 				    tcache->prof_accumbytes);
   183 				tcache->prof_accumbytes = 0;
   184 			}
   185 			if (config_stats) {
   186 				merged_stats = true;
   187 				arena->stats.nrequests_large +=
   188 				    tbin->tstats.nrequests;
   189 				arena->stats.lstats[binind - NBINS].nrequests +=
   190 				    tbin->tstats.nrequests;
   191 				tbin->tstats.nrequests = 0;
   192 			}
   193 		}
   194 		ndeferred = 0;
   195 		for (i = 0; i < nflush; i++) {
   196 			ptr = tbin->avail[i];
   197 			assert(ptr != NULL);
   198 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
   199 			if (chunk->arena == arena)
   200 				arena_dalloc_large_locked(arena, chunk, ptr);
   201 			else {
   202 				/*
   203 				 * This object was allocated via a different
   204 				 * arena than the one that is currently locked.
   205 				 * Stash the object, so that it can be handled
   206 				 * in a future pass.
   207 				 */
   208 				tbin->avail[ndeferred] = ptr;
   209 				ndeferred++;
   210 			}
   211 		}
   212 		malloc_mutex_unlock(&arena->lock);
   213 	}
   214 	if (config_stats && merged_stats == false) {
   215 		/*
   216 		 * The flush loop didn't happen to flush to this thread's
   217 		 * arena, so the stats didn't get merged.  Manually do so now.
   218 		 */
   219 		arena_t *arena = tcache->arena;
   220 		malloc_mutex_lock(&arena->lock);
   221 		arena->stats.nrequests_large += tbin->tstats.nrequests;
   222 		arena->stats.lstats[binind - NBINS].nrequests +=
   223 		    tbin->tstats.nrequests;
   224 		tbin->tstats.nrequests = 0;
   225 		malloc_mutex_unlock(&arena->lock);
   226 	}
   228 	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
   229 	    rem * sizeof(void *));
   230 	tbin->ncached = rem;
   231 	if ((int)tbin->ncached < tbin->low_water)
   232 		tbin->low_water = tbin->ncached;
   233 }
   235 void
   236 tcache_arena_associate(tcache_t *tcache, arena_t *arena)
   237 {
   239 	if (config_stats) {
   240 		/* Link into list of extant tcaches. */
   241 		malloc_mutex_lock(&arena->lock);
   242 		ql_elm_new(tcache, link);
   243 		ql_tail_insert(&arena->tcache_ql, tcache, link);
   244 		malloc_mutex_unlock(&arena->lock);
   245 	}
   246 	tcache->arena = arena;
   247 }
   249 void
   250 tcache_arena_dissociate(tcache_t *tcache)
   251 {
   253 	if (config_stats) {
   254 		/* Unlink from list of extant tcaches. */
   255 		malloc_mutex_lock(&tcache->arena->lock);
   256 		ql_remove(&tcache->arena->tcache_ql, tcache, link);
   257 		malloc_mutex_unlock(&tcache->arena->lock);
   258 		tcache_stats_merge(tcache, tcache->arena);
   259 	}
   260 }
   262 tcache_t *
   263 tcache_create(arena_t *arena)
   264 {
   265 	tcache_t *tcache;
   266 	size_t size, stack_offset;
   267 	unsigned i;
   269 	size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
   270 	/* Naturally align the pointer stacks. */
   271 	size = PTR_CEILING(size);
   272 	stack_offset = size;
   273 	size += stack_nelms * sizeof(void *);
   274 	/*
   275 	 * Round up to the nearest multiple of the cacheline size, in order to
   276 	 * avoid the possibility of false cacheline sharing.
   277 	 *
   278 	 * That this works relies on the same logic as in ipalloc(), but we
   279 	 * cannot directly call ipalloc() here due to tcache bootstrapping
   280 	 * issues.
   281 	 */
   282 	size = (size + CACHELINE_MASK) & (-CACHELINE);
   284 	if (size <= SMALL_MAXCLASS)
   285 		tcache = (tcache_t *)arena_malloc_small(arena, size, true);
   286 	else if (size <= tcache_maxclass)
   287 		tcache = (tcache_t *)arena_malloc_large(arena, size, true);
   288 	else
   289 		tcache = (tcache_t *)icallocx(size, false, arena);
   291 	if (tcache == NULL)
   292 		return (NULL);
   294 	tcache_arena_associate(tcache, arena);
   296 	assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
   297 	for (i = 0; i < nhbins; i++) {
   298 		tcache->tbins[i].lg_fill_div = 1;
   299 		tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
   300 		    (uintptr_t)stack_offset);
   301 		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
   302 	}
   304 	tcache_tsd_set(&tcache);
   306 	return (tcache);
   307 }
   309 void
   310 tcache_destroy(tcache_t *tcache)
   311 {
   312 	unsigned i;
   313 	size_t tcache_size;
   315 	tcache_arena_dissociate(tcache);
   317 	for (i = 0; i < NBINS; i++) {
   318 		tcache_bin_t *tbin = &tcache->tbins[i];
   319 		tcache_bin_flush_small(tbin, i, 0, tcache);
   321 		if (config_stats && tbin->tstats.nrequests != 0) {
   322 			arena_t *arena = tcache->arena;
   323 			arena_bin_t *bin = &arena->bins[i];
   324 			malloc_mutex_lock(&bin->lock);
   325 			bin->stats.nrequests += tbin->tstats.nrequests;
   326 			malloc_mutex_unlock(&bin->lock);
   327 		}
   328 	}
   330 	for (; i < nhbins; i++) {
   331 		tcache_bin_t *tbin = &tcache->tbins[i];
   332 		tcache_bin_flush_large(tbin, i, 0, tcache);
   334 		if (config_stats && tbin->tstats.nrequests != 0) {
   335 			arena_t *arena = tcache->arena;
   336 			malloc_mutex_lock(&arena->lock);
   337 			arena->stats.nrequests_large += tbin->tstats.nrequests;
   338 			arena->stats.lstats[i - NBINS].nrequests +=
   339 			    tbin->tstats.nrequests;
   340 			malloc_mutex_unlock(&arena->lock);
   341 		}
   342 	}
   344 	if (config_prof && tcache->prof_accumbytes > 0)
   345 		arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
   347 	tcache_size = arena_salloc(tcache, false);
   348 	if (tcache_size <= SMALL_MAXCLASS) {
   349 		arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
   350 		arena_t *arena = chunk->arena;
   351 		size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
   352 		    LG_PAGE;
   353 		arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
   355 		arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
   356 	} else if (tcache_size <= tcache_maxclass) {
   357 		arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
   358 		arena_t *arena = chunk->arena;
   360 		arena_dalloc_large(arena, chunk, tcache);
   361 	} else
   362 		idallocx(tcache, false);
   363 }
   365 void
   366 tcache_thread_cleanup(void *arg)
   367 {
   368 	tcache_t *tcache = *(tcache_t **)arg;
   370 	if (tcache == TCACHE_STATE_DISABLED) {
   371 		/* Do nothing. */
   372 	} else if (tcache == TCACHE_STATE_REINCARNATED) {
   373 		/*
   374 		 * Another destructor called an allocator function after this
   375 		 * destructor was called.  Reset tcache to
   376 		 * TCACHE_STATE_PURGATORY in order to receive another callback.
   377 		 */
   378 		tcache = TCACHE_STATE_PURGATORY;
   379 		tcache_tsd_set(&tcache);
   380 	} else if (tcache == TCACHE_STATE_PURGATORY) {
   381 		/*
   382 		 * The previous time this destructor was called, we set the key
   383 		 * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
   384 		 * cause re-creation of the tcache.  This time, do nothing, so
   385 		 * that the destructor will not be called again.
   386 		 */
   387 	} else if (tcache != NULL) {
   388 		assert(tcache != TCACHE_STATE_PURGATORY);
   389 		tcache_destroy(tcache);
   390 		tcache = TCACHE_STATE_PURGATORY;
   391 		tcache_tsd_set(&tcache);
   392 	}
   393 }
   395 void
   396 tcache_stats_merge(tcache_t *tcache, arena_t *arena)
   397 {
   398 	unsigned i;
   400 	/* Merge and reset tcache stats. */
   401 	for (i = 0; i < NBINS; i++) {
   402 		arena_bin_t *bin = &arena->bins[i];
   403 		tcache_bin_t *tbin = &tcache->tbins[i];
   404 		malloc_mutex_lock(&bin->lock);
   405 		bin->stats.nrequests += tbin->tstats.nrequests;
   406 		malloc_mutex_unlock(&bin->lock);
   407 		tbin->tstats.nrequests = 0;
   408 	}
   410 	for (; i < nhbins; i++) {
   411 		malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
   412 		tcache_bin_t *tbin = &tcache->tbins[i];
   413 		arena->stats.nrequests_large += tbin->tstats.nrequests;
   414 		lstats->nrequests += tbin->tstats.nrequests;
   415 		tbin->tstats.nrequests = 0;
   416 	}
   417 }
   419 bool
   420 tcache_boot0(void)
   421 {
   422 	unsigned i;
   424 	/*
   425 	 * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
   426 	 * known.
   427 	 */
   428 	if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
   429 		tcache_maxclass = SMALL_MAXCLASS;
   430 	else if ((1U << opt_lg_tcache_max) > arena_maxclass)
   431 		tcache_maxclass = arena_maxclass;
   432 	else
   433 		tcache_maxclass = (1U << opt_lg_tcache_max);
   435 	nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
   437 	/* Initialize tcache_bin_info. */
   438 	tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
   439 	    sizeof(tcache_bin_info_t));
   440 	if (tcache_bin_info == NULL)
   441 		return (true);
   442 	stack_nelms = 0;
   443 	for (i = 0; i < NBINS; i++) {
   444 		if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
   445 			tcache_bin_info[i].ncached_max =
   446 			    (arena_bin_info[i].nregs << 1);
   447 		} else {
   448 			tcache_bin_info[i].ncached_max =
   449 			    TCACHE_NSLOTS_SMALL_MAX;
   450 		}
   451 		stack_nelms += tcache_bin_info[i].ncached_max;
   452 	}
   453 	for (; i < nhbins; i++) {
   454 		tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
   455 		stack_nelms += tcache_bin_info[i].ncached_max;
   456 	}
   458 	return (false);
   459 }
   461 bool
   462 tcache_boot1(void)
   463 {
   465 	if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
   466 		return (true);
   468 	return (false);
   469 }

mercurial