1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/xpcom/glue/pldhash.cpp Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,866 @@ 1.4 +/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ 1.5 +/* This Source Code Form is subject to the terms of the Mozilla Public 1.6 + * License, v. 2.0. If a copy of the MPL was not distributed with this 1.7 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 1.8 + 1.9 +/* 1.10 + * Double hashing implementation. 1.11 + */ 1.12 +#include <stdio.h> 1.13 +#include <stdlib.h> 1.14 +#include <string.h> 1.15 +#include "pldhash.h" 1.16 +#include "mozilla/HashFunctions.h" 1.17 +#include "mozilla/MathAlgorithms.h" 1.18 +#include "nsDebug.h" /* for PR_ASSERT */ 1.19 +#include "nsAlgorithm.h" 1.20 +#include "mozilla/Likely.h" 1.21 +#include "mozilla/MemoryReporting.h" 1.22 +#include "mozilla/ChaosMode.h" 1.23 + 1.24 +#ifdef PL_DHASHMETER 1.25 +# define METER(x) x 1.26 +#else 1.27 +# define METER(x) /* nothing */ 1.28 +#endif 1.29 + 1.30 +/* 1.31 + * The following DEBUG-only code is used to assert that calls to one of 1.32 + * table->ops or to an enumerator do not cause re-entry into a call that 1.33 + * can mutate the table. 1.34 + */ 1.35 +#ifdef DEBUG 1.36 + 1.37 +/* 1.38 + * Most callers that assert about the recursion level don't care about 1.39 + * this magical value because they are asserting that mutation is 1.40 + * allowed (and therefore the level is 0 or 1, depending on whether they 1.41 + * incremented it). 1.42 + * 1.43 + * Only PL_DHashTableFinish needs to allow this special value. 1.44 + */ 1.45 +#define IMMUTABLE_RECURSION_LEVEL ((uint16_t)-1) 1.46 + 1.47 +#define RECURSION_LEVEL_SAFE_TO_FINISH(table_) \ 1.48 + (table_->recursionLevel == 0 || \ 1.49 + table_->recursionLevel == IMMUTABLE_RECURSION_LEVEL) 1.50 + 1.51 +#define INCREMENT_RECURSION_LEVEL(table_) \ 1.52 + do { \ 1.53 + if (table_->recursionLevel != IMMUTABLE_RECURSION_LEVEL) \ 1.54 + ++table_->recursionLevel; \ 1.55 + } while(0) 1.56 +#define DECREMENT_RECURSION_LEVEL(table_) \ 1.57 + do { \ 1.58 + if (table->recursionLevel != IMMUTABLE_RECURSION_LEVEL) { \ 1.59 + MOZ_ASSERT(table->recursionLevel > 0); \ 1.60 + --table->recursionLevel; \ 1.61 + } \ 1.62 + } while(0) 1.63 + 1.64 +#else 1.65 + 1.66 +#define INCREMENT_RECURSION_LEVEL(table_) do { } while(0) 1.67 +#define DECREMENT_RECURSION_LEVEL(table_) do { } while(0) 1.68 + 1.69 +#endif /* defined(DEBUG) */ 1.70 + 1.71 +using namespace mozilla; 1.72 + 1.73 +void * 1.74 +PL_DHashAllocTable(PLDHashTable *table, uint32_t nbytes) 1.75 +{ 1.76 + return malloc(nbytes); 1.77 +} 1.78 + 1.79 +void 1.80 +PL_DHashFreeTable(PLDHashTable *table, void *ptr) 1.81 +{ 1.82 + free(ptr); 1.83 +} 1.84 + 1.85 +PLDHashNumber 1.86 +PL_DHashStringKey(PLDHashTable *table, const void *key) 1.87 +{ 1.88 + return HashString(static_cast<const char*>(key)); 1.89 +} 1.90 + 1.91 +PLDHashNumber 1.92 +PL_DHashVoidPtrKeyStub(PLDHashTable *table, const void *key) 1.93 +{ 1.94 + return (PLDHashNumber)(ptrdiff_t)key >> 2; 1.95 +} 1.96 + 1.97 +bool 1.98 +PL_DHashMatchEntryStub(PLDHashTable *table, 1.99 + const PLDHashEntryHdr *entry, 1.100 + const void *key) 1.101 +{ 1.102 + const PLDHashEntryStub *stub = (const PLDHashEntryStub *)entry; 1.103 + 1.104 + return stub->key == key; 1.105 +} 1.106 + 1.107 +bool 1.108 +PL_DHashMatchStringKey(PLDHashTable *table, 1.109 + const PLDHashEntryHdr *entry, 1.110 + const void *key) 1.111 +{ 1.112 + const PLDHashEntryStub *stub = (const PLDHashEntryStub *)entry; 1.113 + 1.114 + /* XXX tolerate null keys on account of sloppy Mozilla callers. */ 1.115 + return stub->key == key || 1.116 + (stub->key && key && 1.117 + strcmp((const char *) stub->key, (const char *) key) == 0); 1.118 +} 1.119 + 1.120 +void 1.121 +PL_DHashMoveEntryStub(PLDHashTable *table, 1.122 + const PLDHashEntryHdr *from, 1.123 + PLDHashEntryHdr *to) 1.124 +{ 1.125 + memcpy(to, from, table->entrySize); 1.126 +} 1.127 + 1.128 +void 1.129 +PL_DHashClearEntryStub(PLDHashTable *table, PLDHashEntryHdr *entry) 1.130 +{ 1.131 + memset(entry, 0, table->entrySize); 1.132 +} 1.133 + 1.134 +void 1.135 +PL_DHashFreeStringKey(PLDHashTable *table, PLDHashEntryHdr *entry) 1.136 +{ 1.137 + const PLDHashEntryStub *stub = (const PLDHashEntryStub *)entry; 1.138 + 1.139 + free((void *) stub->key); 1.140 + memset(entry, 0, table->entrySize); 1.141 +} 1.142 + 1.143 +void 1.144 +PL_DHashFinalizeStub(PLDHashTable *table) 1.145 +{ 1.146 +} 1.147 + 1.148 +static const PLDHashTableOps stub_ops = { 1.149 + PL_DHashAllocTable, 1.150 + PL_DHashFreeTable, 1.151 + PL_DHashVoidPtrKeyStub, 1.152 + PL_DHashMatchEntryStub, 1.153 + PL_DHashMoveEntryStub, 1.154 + PL_DHashClearEntryStub, 1.155 + PL_DHashFinalizeStub, 1.156 + nullptr 1.157 +}; 1.158 + 1.159 +const PLDHashTableOps * 1.160 +PL_DHashGetStubOps(void) 1.161 +{ 1.162 + return &stub_ops; 1.163 +} 1.164 + 1.165 +static bool 1.166 +SizeOfEntryStore(uint32_t capacity, uint32_t entrySize, uint32_t *nbytes) 1.167 +{ 1.168 + uint64_t nbytes64 = uint64_t(capacity) * uint64_t(entrySize); 1.169 + *nbytes = capacity * entrySize; 1.170 + return uint64_t(*nbytes) == nbytes64; // returns false on overflow 1.171 +} 1.172 + 1.173 +PLDHashTable * 1.174 +PL_NewDHashTable(const PLDHashTableOps *ops, void *data, uint32_t entrySize, 1.175 + uint32_t capacity) 1.176 +{ 1.177 + PLDHashTable *table = (PLDHashTable *) malloc(sizeof *table); 1.178 + if (!table) 1.179 + return nullptr; 1.180 + if (!PL_DHashTableInit(table, ops, data, entrySize, capacity, fallible_t())) { 1.181 + free(table); 1.182 + return nullptr; 1.183 + } 1.184 + return table; 1.185 +} 1.186 + 1.187 +void 1.188 +PL_DHashTableDestroy(PLDHashTable *table) 1.189 +{ 1.190 + PL_DHashTableFinish(table); 1.191 + free(table); 1.192 +} 1.193 + 1.194 +bool 1.195 +PL_DHashTableInit(PLDHashTable *table, const PLDHashTableOps *ops, 1.196 + void *data, uint32_t entrySize, uint32_t capacity, 1.197 + const fallible_t& ) 1.198 +{ 1.199 +#ifdef DEBUG 1.200 + if (entrySize > 16 * sizeof(void *)) { 1.201 + printf_stderr( 1.202 + "pldhash: for the table at address %p, the given entrySize" 1.203 + " of %lu definitely favors chaining over double hashing.\n", 1.204 + (void *) table, 1.205 + (unsigned long) entrySize); 1.206 + } 1.207 +#endif 1.208 + 1.209 + table->ops = ops; 1.210 + table->data = data; 1.211 + if (capacity < PL_DHASH_MIN_SIZE) 1.212 + capacity = PL_DHASH_MIN_SIZE; 1.213 + 1.214 + int log2 = CeilingLog2(capacity); 1.215 + 1.216 + capacity = 1u << log2; 1.217 + if (capacity > PL_DHASH_MAX_SIZE) 1.218 + return false; 1.219 + table->hashShift = PL_DHASH_BITS - log2; 1.220 + table->entrySize = entrySize; 1.221 + table->entryCount = table->removedCount = 0; 1.222 + table->generation = 0; 1.223 + uint32_t nbytes; 1.224 + if (!SizeOfEntryStore(capacity, entrySize, &nbytes)) 1.225 + return false; // overflowed 1.226 + 1.227 + table->entryStore = (char *) ops->allocTable(table, nbytes); 1.228 + if (!table->entryStore) 1.229 + return false; 1.230 + memset(table->entryStore, 0, nbytes); 1.231 + METER(memset(&table->stats, 0, sizeof table->stats)); 1.232 + 1.233 +#ifdef DEBUG 1.234 + table->recursionLevel = 0; 1.235 +#endif 1.236 + 1.237 + return true; 1.238 +} 1.239 + 1.240 +void 1.241 +PL_DHashTableInit(PLDHashTable *table, const PLDHashTableOps *ops, void *data, 1.242 + uint32_t entrySize, uint32_t capacity) 1.243 +{ 1.244 + if (!PL_DHashTableInit(table, ops, data, entrySize, capacity, fallible_t())) { 1.245 + if (capacity > PL_DHASH_MAX_SIZE) { 1.246 + MOZ_CRASH(); 1.247 + } 1.248 + uint32_t nbytes; 1.249 + if (!SizeOfEntryStore(capacity, entrySize, &nbytes)) { 1.250 + MOZ_CRASH(); 1.251 + } 1.252 + NS_ABORT_OOM(nbytes); 1.253 + } 1.254 +} 1.255 + 1.256 +/* 1.257 + * Compute max and min load numbers (entry counts). We have a secondary max 1.258 + * that allows us to overload a table reasonably if it cannot be grown further 1.259 + * (i.e. if ChangeTable() fails). The table slows down drastically if the 1.260 + * secondary max is too close to 1, but 0.96875 gives only a slight slowdown 1.261 + * while allowing 1.3x more elements. 1.262 + */ 1.263 +static inline uint32_t MaxLoad(uint32_t size) { 1.264 + return size - (size >> 2); // == size * 0.75 1.265 +} 1.266 +static inline uint32_t MaxLoadOnGrowthFailure(uint32_t size) { 1.267 + return size - (size >> 5); // == size * 0.96875 1.268 +} 1.269 +static inline uint32_t MinLoad(uint32_t size) { 1.270 + return size >> 2; // == size * 0.25 1.271 +} 1.272 + 1.273 +/* 1.274 + * Double hashing needs the second hash code to be relatively prime to table 1.275 + * size, so we simply make hash2 odd. 1.276 + */ 1.277 +#define HASH1(hash0, shift) ((hash0) >> (shift)) 1.278 +#define HASH2(hash0,log2,shift) ((((hash0) << (log2)) >> (shift)) | 1) 1.279 + 1.280 +/* 1.281 + * Reserve keyHash 0 for free entries and 1 for removed-entry sentinels. Note 1.282 + * that a removed-entry sentinel need be stored only if the removed entry had 1.283 + * a colliding entry added after it. Therefore we can use 1 as the collision 1.284 + * flag in addition to the removed-entry sentinel value. Multiplicative hash 1.285 + * uses the high order bits of keyHash, so this least-significant reservation 1.286 + * should not hurt the hash function's effectiveness much. 1.287 + * 1.288 + * If you change any of these magic numbers, also update PL_DHASH_ENTRY_IS_LIVE 1.289 + * in pldhash.h. It used to be private to pldhash.c, but then became public to 1.290 + * assist iterator writers who inspect table->entryStore directly. 1.291 + */ 1.292 +#define COLLISION_FLAG ((PLDHashNumber) 1) 1.293 +#define MARK_ENTRY_FREE(entry) ((entry)->keyHash = 0) 1.294 +#define MARK_ENTRY_REMOVED(entry) ((entry)->keyHash = 1) 1.295 +#define ENTRY_IS_REMOVED(entry) ((entry)->keyHash == 1) 1.296 +#define ENTRY_IS_LIVE(entry) PL_DHASH_ENTRY_IS_LIVE(entry) 1.297 +#define ENSURE_LIVE_KEYHASH(hash0) if (hash0 < 2) hash0 -= 2; else (void)0 1.298 + 1.299 +/* Match an entry's keyHash against an unstored one computed from a key. */ 1.300 +#define MATCH_ENTRY_KEYHASH(entry,hash0) \ 1.301 + (((entry)->keyHash & ~COLLISION_FLAG) == (hash0)) 1.302 + 1.303 +/* Compute the address of the indexed entry in table. */ 1.304 +#define ADDRESS_ENTRY(table, index) \ 1.305 + ((PLDHashEntryHdr *)((table)->entryStore + (index) * (table)->entrySize)) 1.306 + 1.307 +void 1.308 +PL_DHashTableFinish(PLDHashTable *table) 1.309 +{ 1.310 + INCREMENT_RECURSION_LEVEL(table); 1.311 + 1.312 + /* Call finalize before clearing entries, so it can enumerate them. */ 1.313 + table->ops->finalize(table); 1.314 + 1.315 + /* Clear any remaining live entries. */ 1.316 + char *entryAddr = table->entryStore; 1.317 + uint32_t entrySize = table->entrySize; 1.318 + char *entryLimit = entryAddr + PL_DHASH_TABLE_SIZE(table) * entrySize; 1.319 + while (entryAddr < entryLimit) { 1.320 + PLDHashEntryHdr *entry = (PLDHashEntryHdr *)entryAddr; 1.321 + if (ENTRY_IS_LIVE(entry)) { 1.322 + METER(table->stats.removeEnums++); 1.323 + table->ops->clearEntry(table, entry); 1.324 + } 1.325 + entryAddr += entrySize; 1.326 + } 1.327 + 1.328 + DECREMENT_RECURSION_LEVEL(table); 1.329 + MOZ_ASSERT(RECURSION_LEVEL_SAFE_TO_FINISH(table)); 1.330 + 1.331 + /* Free entry storage last. */ 1.332 + table->ops->freeTable(table, table->entryStore); 1.333 +} 1.334 + 1.335 +static PLDHashEntryHdr * PL_DHASH_FASTCALL 1.336 +SearchTable(PLDHashTable *table, const void *key, PLDHashNumber keyHash, 1.337 + PLDHashOperator op) 1.338 +{ 1.339 + METER(table->stats.searches++); 1.340 + NS_ASSERTION(!(keyHash & COLLISION_FLAG), 1.341 + "!(keyHash & COLLISION_FLAG)"); 1.342 + 1.343 + /* Compute the primary hash address. */ 1.344 + int hashShift = table->hashShift; 1.345 + PLDHashNumber hash1 = HASH1(keyHash, hashShift); 1.346 + PLDHashEntryHdr *entry = ADDRESS_ENTRY(table, hash1); 1.347 + 1.348 + /* Miss: return space for a new entry. */ 1.349 + if (PL_DHASH_ENTRY_IS_FREE(entry)) { 1.350 + METER(table->stats.misses++); 1.351 + return entry; 1.352 + } 1.353 + 1.354 + /* Hit: return entry. */ 1.355 + PLDHashMatchEntry matchEntry = table->ops->matchEntry; 1.356 + if (MATCH_ENTRY_KEYHASH(entry, keyHash) && matchEntry(table, entry, key)) { 1.357 + METER(table->stats.hits++); 1.358 + return entry; 1.359 + } 1.360 + 1.361 + /* Collision: double hash. */ 1.362 + int sizeLog2 = PL_DHASH_BITS - table->hashShift; 1.363 + PLDHashNumber hash2 = HASH2(keyHash, sizeLog2, hashShift); 1.364 + uint32_t sizeMask = (1u << sizeLog2) - 1; 1.365 + 1.366 + /* Save the first removed entry pointer so PL_DHASH_ADD can recycle it. */ 1.367 + PLDHashEntryHdr *firstRemoved = nullptr; 1.368 + 1.369 + for (;;) { 1.370 + if (MOZ_UNLIKELY(ENTRY_IS_REMOVED(entry))) { 1.371 + if (!firstRemoved) 1.372 + firstRemoved = entry; 1.373 + } else { 1.374 + if (op == PL_DHASH_ADD) 1.375 + entry->keyHash |= COLLISION_FLAG; 1.376 + } 1.377 + 1.378 + METER(table->stats.steps++); 1.379 + hash1 -= hash2; 1.380 + hash1 &= sizeMask; 1.381 + 1.382 + entry = ADDRESS_ENTRY(table, hash1); 1.383 + if (PL_DHASH_ENTRY_IS_FREE(entry)) { 1.384 + METER(table->stats.misses++); 1.385 + return (firstRemoved && op == PL_DHASH_ADD) ? firstRemoved : entry; 1.386 + } 1.387 + 1.388 + if (MATCH_ENTRY_KEYHASH(entry, keyHash) && 1.389 + matchEntry(table, entry, key)) { 1.390 + METER(table->stats.hits++); 1.391 + return entry; 1.392 + } 1.393 + } 1.394 + 1.395 + /* NOTREACHED */ 1.396 + return nullptr; 1.397 +} 1.398 + 1.399 +/* 1.400 + * This is a copy of SearchTable, used by ChangeTable, hardcoded to 1.401 + * 1. assume |op == PL_DHASH_ADD|, 1.402 + * 2. assume that |key| will never match an existing entry, and 1.403 + * 3. assume that no entries have been removed from the current table 1.404 + * structure. 1.405 + * Avoiding the need for |key| means we can avoid needing a way to map 1.406 + * entries to keys, which means callers can use complex key types more 1.407 + * easily. 1.408 + */ 1.409 +static PLDHashEntryHdr * PL_DHASH_FASTCALL 1.410 +FindFreeEntry(PLDHashTable *table, PLDHashNumber keyHash) 1.411 +{ 1.412 + METER(table->stats.searches++); 1.413 + NS_ASSERTION(!(keyHash & COLLISION_FLAG), 1.414 + "!(keyHash & COLLISION_FLAG)"); 1.415 + 1.416 + /* Compute the primary hash address. */ 1.417 + int hashShift = table->hashShift; 1.418 + PLDHashNumber hash1 = HASH1(keyHash, hashShift); 1.419 + PLDHashEntryHdr *entry = ADDRESS_ENTRY(table, hash1); 1.420 + 1.421 + /* Miss: return space for a new entry. */ 1.422 + if (PL_DHASH_ENTRY_IS_FREE(entry)) { 1.423 + METER(table->stats.misses++); 1.424 + return entry; 1.425 + } 1.426 + 1.427 + /* Collision: double hash. */ 1.428 + int sizeLog2 = PL_DHASH_BITS - table->hashShift; 1.429 + PLDHashNumber hash2 = HASH2(keyHash, sizeLog2, hashShift); 1.430 + uint32_t sizeMask = (1u << sizeLog2) - 1; 1.431 + 1.432 + for (;;) { 1.433 + NS_ASSERTION(!ENTRY_IS_REMOVED(entry), 1.434 + "!ENTRY_IS_REMOVED(entry)"); 1.435 + entry->keyHash |= COLLISION_FLAG; 1.436 + 1.437 + METER(table->stats.steps++); 1.438 + hash1 -= hash2; 1.439 + hash1 &= sizeMask; 1.440 + 1.441 + entry = ADDRESS_ENTRY(table, hash1); 1.442 + if (PL_DHASH_ENTRY_IS_FREE(entry)) { 1.443 + METER(table->stats.misses++); 1.444 + return entry; 1.445 + } 1.446 + } 1.447 + 1.448 + /* NOTREACHED */ 1.449 + return nullptr; 1.450 +} 1.451 + 1.452 +static bool 1.453 +ChangeTable(PLDHashTable *table, int deltaLog2) 1.454 +{ 1.455 + /* Look, but don't touch, until we succeed in getting new entry store. */ 1.456 + int oldLog2 = PL_DHASH_BITS - table->hashShift; 1.457 + int newLog2 = oldLog2 + deltaLog2; 1.458 + uint32_t newCapacity = 1u << newLog2; 1.459 + if (newCapacity > PL_DHASH_MAX_SIZE) 1.460 + return false; 1.461 + 1.462 + uint32_t entrySize = table->entrySize; 1.463 + uint32_t nbytes; 1.464 + if (!SizeOfEntryStore(newCapacity, entrySize, &nbytes)) 1.465 + return false; // overflowed 1.466 + 1.467 + char *newEntryStore = (char *) table->ops->allocTable(table, nbytes); 1.468 + if (!newEntryStore) 1.469 + return false; 1.470 + 1.471 + /* We can't fail from here on, so update table parameters. */ 1.472 +#ifdef DEBUG 1.473 + uint32_t recursionLevel = table->recursionLevel; 1.474 +#endif 1.475 + table->hashShift = PL_DHASH_BITS - newLog2; 1.476 + table->removedCount = 0; 1.477 + table->generation++; 1.478 + 1.479 + /* Assign the new entry store to table. */ 1.480 + memset(newEntryStore, 0, nbytes); 1.481 + char *oldEntryStore, *oldEntryAddr; 1.482 + oldEntryAddr = oldEntryStore = table->entryStore; 1.483 + table->entryStore = newEntryStore; 1.484 + PLDHashMoveEntry moveEntry = table->ops->moveEntry; 1.485 +#ifdef DEBUG 1.486 + table->recursionLevel = recursionLevel; 1.487 +#endif 1.488 + 1.489 + /* Copy only live entries, leaving removed ones behind. */ 1.490 + uint32_t oldCapacity = 1u << oldLog2; 1.491 + for (uint32_t i = 0; i < oldCapacity; i++) { 1.492 + PLDHashEntryHdr *oldEntry = (PLDHashEntryHdr *)oldEntryAddr; 1.493 + if (ENTRY_IS_LIVE(oldEntry)) { 1.494 + oldEntry->keyHash &= ~COLLISION_FLAG; 1.495 + PLDHashEntryHdr *newEntry = FindFreeEntry(table, oldEntry->keyHash); 1.496 + NS_ASSERTION(PL_DHASH_ENTRY_IS_FREE(newEntry), 1.497 + "PL_DHASH_ENTRY_IS_FREE(newEntry)"); 1.498 + moveEntry(table, oldEntry, newEntry); 1.499 + newEntry->keyHash = oldEntry->keyHash; 1.500 + } 1.501 + oldEntryAddr += entrySize; 1.502 + } 1.503 + 1.504 + table->ops->freeTable(table, oldEntryStore); 1.505 + return true; 1.506 +} 1.507 + 1.508 +PLDHashEntryHdr * PL_DHASH_FASTCALL 1.509 +PL_DHashTableOperate(PLDHashTable *table, const void *key, PLDHashOperator op) 1.510 +{ 1.511 + PLDHashEntryHdr *entry; 1.512 + 1.513 + MOZ_ASSERT(op == PL_DHASH_LOOKUP || table->recursionLevel == 0); 1.514 + INCREMENT_RECURSION_LEVEL(table); 1.515 + 1.516 + PLDHashNumber keyHash = table->ops->hashKey(table, key); 1.517 + keyHash *= PL_DHASH_GOLDEN_RATIO; 1.518 + 1.519 + /* Avoid 0 and 1 hash codes, they indicate free and removed entries. */ 1.520 + ENSURE_LIVE_KEYHASH(keyHash); 1.521 + keyHash &= ~COLLISION_FLAG; 1.522 + 1.523 + switch (op) { 1.524 + case PL_DHASH_LOOKUP: 1.525 + METER(table->stats.lookups++); 1.526 + entry = SearchTable(table, key, keyHash, op); 1.527 + break; 1.528 + 1.529 + case PL_DHASH_ADD: { 1.530 + /* 1.531 + * If alpha is >= .75, grow or compress the table. If key is already 1.532 + * in the table, we may grow once more than necessary, but only if we 1.533 + * are on the edge of being overloaded. 1.534 + */ 1.535 + uint32_t size = PL_DHASH_TABLE_SIZE(table); 1.536 + if (table->entryCount + table->removedCount >= MaxLoad(size)) { 1.537 + /* Compress if a quarter or more of all entries are removed. */ 1.538 + int deltaLog2; 1.539 + if (table->removedCount >= size >> 2) { 1.540 + METER(table->stats.compresses++); 1.541 + deltaLog2 = 0; 1.542 + } else { 1.543 + METER(table->stats.grows++); 1.544 + deltaLog2 = 1; 1.545 + } 1.546 + 1.547 + /* 1.548 + * Grow or compress table. If ChangeTable() fails, allow 1.549 + * overloading up to the secondary max. Once we hit the secondary 1.550 + * max, return null. 1.551 + */ 1.552 + if (!ChangeTable(table, deltaLog2) && 1.553 + table->entryCount + table->removedCount >= 1.554 + MaxLoadOnGrowthFailure(size)) 1.555 + { 1.556 + METER(table->stats.addFailures++); 1.557 + entry = nullptr; 1.558 + break; 1.559 + } 1.560 + } 1.561 + 1.562 + /* 1.563 + * Look for entry after possibly growing, so we don't have to add it, 1.564 + * then skip it while growing the table and re-add it after. 1.565 + */ 1.566 + entry = SearchTable(table, key, keyHash, op); 1.567 + if (!ENTRY_IS_LIVE(entry)) { 1.568 + /* Initialize the entry, indicating that it's no longer free. */ 1.569 + METER(table->stats.addMisses++); 1.570 + if (ENTRY_IS_REMOVED(entry)) { 1.571 + METER(table->stats.addOverRemoved++); 1.572 + table->removedCount--; 1.573 + keyHash |= COLLISION_FLAG; 1.574 + } 1.575 + if (table->ops->initEntry && 1.576 + !table->ops->initEntry(table, entry, key)) { 1.577 + /* We haven't claimed entry yet; fail with null return. */ 1.578 + memset(entry + 1, 0, table->entrySize - sizeof *entry); 1.579 + entry = nullptr; 1.580 + break; 1.581 + } 1.582 + entry->keyHash = keyHash; 1.583 + table->entryCount++; 1.584 + } 1.585 + METER(else table->stats.addHits++); 1.586 + break; 1.587 + } 1.588 + 1.589 + case PL_DHASH_REMOVE: 1.590 + entry = SearchTable(table, key, keyHash, op); 1.591 + if (ENTRY_IS_LIVE(entry)) { 1.592 + /* Clear this entry and mark it as "removed". */ 1.593 + METER(table->stats.removeHits++); 1.594 + PL_DHashTableRawRemove(table, entry); 1.595 + 1.596 + /* Shrink if alpha is <= .25 and table isn't too small already. */ 1.597 + uint32_t size = PL_DHASH_TABLE_SIZE(table); 1.598 + if (size > PL_DHASH_MIN_SIZE && 1.599 + table->entryCount <= MinLoad(size)) { 1.600 + METER(table->stats.shrinks++); 1.601 + (void) ChangeTable(table, -1); 1.602 + } 1.603 + } 1.604 + METER(else table->stats.removeMisses++); 1.605 + entry = nullptr; 1.606 + break; 1.607 + 1.608 + default: 1.609 + NS_NOTREACHED("0"); 1.610 + entry = nullptr; 1.611 + } 1.612 + 1.613 + DECREMENT_RECURSION_LEVEL(table); 1.614 + 1.615 + return entry; 1.616 +} 1.617 + 1.618 +void 1.619 +PL_DHashTableRawRemove(PLDHashTable *table, PLDHashEntryHdr *entry) 1.620 +{ 1.621 + MOZ_ASSERT(table->recursionLevel != IMMUTABLE_RECURSION_LEVEL); 1.622 + 1.623 + NS_ASSERTION(PL_DHASH_ENTRY_IS_LIVE(entry), 1.624 + "PL_DHASH_ENTRY_IS_LIVE(entry)"); 1.625 + 1.626 + /* Load keyHash first in case clearEntry() goofs it. */ 1.627 + PLDHashNumber keyHash = entry->keyHash; 1.628 + table->ops->clearEntry(table, entry); 1.629 + if (keyHash & COLLISION_FLAG) { 1.630 + MARK_ENTRY_REMOVED(entry); 1.631 + table->removedCount++; 1.632 + } else { 1.633 + METER(table->stats.removeFrees++); 1.634 + MARK_ENTRY_FREE(entry); 1.635 + } 1.636 + table->entryCount--; 1.637 +} 1.638 + 1.639 +uint32_t 1.640 +PL_DHashTableEnumerate(PLDHashTable *table, PLDHashEnumerator etor, void *arg) 1.641 +{ 1.642 + INCREMENT_RECURSION_LEVEL(table); 1.643 + 1.644 + char *entryAddr = table->entryStore; 1.645 + uint32_t entrySize = table->entrySize; 1.646 + uint32_t capacity = PL_DHASH_TABLE_SIZE(table); 1.647 + uint32_t tableSize = capacity * entrySize; 1.648 + char *entryLimit = entryAddr + tableSize; 1.649 + uint32_t i = 0; 1.650 + bool didRemove = false; 1.651 + 1.652 + if (ChaosMode::isActive()) { 1.653 + // Start iterating at a random point in the hashtable. It would be 1.654 + // even more chaotic to iterate in fully random order, but that's a lot 1.655 + // more work. 1.656 + entryAddr += ChaosMode::randomUint32LessThan(capacity) * entrySize; 1.657 + if (entryAddr >= entryLimit) { 1.658 + entryAddr -= tableSize; 1.659 + } 1.660 + } 1.661 + 1.662 + for (uint32_t e = 0; e < capacity; ++e) { 1.663 + PLDHashEntryHdr *entry = (PLDHashEntryHdr *)entryAddr; 1.664 + if (ENTRY_IS_LIVE(entry)) { 1.665 + PLDHashOperator op = etor(table, entry, i++, arg); 1.666 + if (op & PL_DHASH_REMOVE) { 1.667 + METER(table->stats.removeEnums++); 1.668 + PL_DHashTableRawRemove(table, entry); 1.669 + didRemove = true; 1.670 + } 1.671 + if (op & PL_DHASH_STOP) 1.672 + break; 1.673 + } 1.674 + entryAddr += entrySize; 1.675 + if (entryAddr >= entryLimit) { 1.676 + entryAddr -= tableSize; 1.677 + } 1.678 + } 1.679 + 1.680 + MOZ_ASSERT(!didRemove || table->recursionLevel == 1); 1.681 + 1.682 + /* 1.683 + * Shrink or compress if a quarter or more of all entries are removed, or 1.684 + * if the table is underloaded according to the minimum alpha, and is not 1.685 + * minimal-size already. Do this only if we removed above, so non-removing 1.686 + * enumerations can count on stable table->entryStore until the next 1.687 + * non-lookup-Operate or removing-Enumerate. 1.688 + */ 1.689 + if (didRemove && 1.690 + (table->removedCount >= capacity >> 2 || 1.691 + (capacity > PL_DHASH_MIN_SIZE && 1.692 + table->entryCount <= MinLoad(capacity)))) { 1.693 + METER(table->stats.enumShrinks++); 1.694 + capacity = table->entryCount; 1.695 + capacity += capacity >> 1; 1.696 + if (capacity < PL_DHASH_MIN_SIZE) 1.697 + capacity = PL_DHASH_MIN_SIZE; 1.698 + 1.699 + uint32_t ceiling = CeilingLog2(capacity); 1.700 + ceiling -= PL_DHASH_BITS - table->hashShift; 1.701 + 1.702 + (void) ChangeTable(table, ceiling); 1.703 + } 1.704 + 1.705 + DECREMENT_RECURSION_LEVEL(table); 1.706 + 1.707 + return i; 1.708 +} 1.709 + 1.710 +struct SizeOfEntryExcludingThisArg 1.711 +{ 1.712 + size_t total; 1.713 + PLDHashSizeOfEntryExcludingThisFun sizeOfEntryExcludingThis; 1.714 + MallocSizeOf mallocSizeOf; 1.715 + void *arg; // the arg passed by the user 1.716 +}; 1.717 + 1.718 +static PLDHashOperator 1.719 +SizeOfEntryExcludingThisEnumerator(PLDHashTable *table, PLDHashEntryHdr *hdr, 1.720 + uint32_t number, void *arg) 1.721 +{ 1.722 + SizeOfEntryExcludingThisArg *e = (SizeOfEntryExcludingThisArg *)arg; 1.723 + e->total += e->sizeOfEntryExcludingThis(hdr, e->mallocSizeOf, e->arg); 1.724 + return PL_DHASH_NEXT; 1.725 +} 1.726 + 1.727 +size_t 1.728 +PL_DHashTableSizeOfExcludingThis(const PLDHashTable *table, 1.729 + PLDHashSizeOfEntryExcludingThisFun sizeOfEntryExcludingThis, 1.730 + MallocSizeOf mallocSizeOf, 1.731 + void *arg /* = nullptr */) 1.732 +{ 1.733 + size_t n = 0; 1.734 + n += mallocSizeOf(table->entryStore); 1.735 + if (sizeOfEntryExcludingThis) { 1.736 + SizeOfEntryExcludingThisArg arg2 = { 0, sizeOfEntryExcludingThis, mallocSizeOf, arg }; 1.737 + PL_DHashTableEnumerate(const_cast<PLDHashTable *>(table), 1.738 + SizeOfEntryExcludingThisEnumerator, &arg2); 1.739 + n += arg2.total; 1.740 + } 1.741 + return n; 1.742 +} 1.743 + 1.744 +size_t 1.745 +PL_DHashTableSizeOfIncludingThis(const PLDHashTable *table, 1.746 + PLDHashSizeOfEntryExcludingThisFun sizeOfEntryExcludingThis, 1.747 + MallocSizeOf mallocSizeOf, 1.748 + void *arg /* = nullptr */) 1.749 +{ 1.750 + return mallocSizeOf(table) + 1.751 + PL_DHashTableSizeOfExcludingThis(table, sizeOfEntryExcludingThis, 1.752 + mallocSizeOf, arg); 1.753 +} 1.754 + 1.755 +#ifdef DEBUG 1.756 +void 1.757 +PL_DHashMarkTableImmutable(PLDHashTable *table) 1.758 +{ 1.759 + table->recursionLevel = IMMUTABLE_RECURSION_LEVEL; 1.760 +} 1.761 +#endif 1.762 + 1.763 +#ifdef PL_DHASHMETER 1.764 +#include <math.h> 1.765 + 1.766 +void 1.767 +PL_DHashTableDumpMeter(PLDHashTable *table, PLDHashEnumerator dump, FILE *fp) 1.768 +{ 1.769 + PLDHashNumber hash1, hash2, maxChainHash1, maxChainHash2; 1.770 + double sqsum, mean, variance, sigma; 1.771 + PLDHashEntryHdr *entry; 1.772 + 1.773 + char *entryAddr = table->entryStore; 1.774 + uint32_t entrySize = table->entrySize; 1.775 + int hashShift = table->hashShift; 1.776 + int sizeLog2 = PL_DHASH_BITS - hashShift; 1.777 + uint32_t tableSize = PL_DHASH_TABLE_SIZE(table); 1.778 + uint32_t sizeMask = (1u << sizeLog2) - 1; 1.779 + uint32_t chainCount = 0, maxChainLen = 0; 1.780 + hash2 = 0; 1.781 + sqsum = 0; 1.782 + 1.783 + for (uint32_t i = 0; i < tableSize; i++) { 1.784 + entry = (PLDHashEntryHdr *)entryAddr; 1.785 + entryAddr += entrySize; 1.786 + if (!ENTRY_IS_LIVE(entry)) 1.787 + continue; 1.788 + hash1 = HASH1(entry->keyHash & ~COLLISION_FLAG, hashShift); 1.789 + PLDHashNumber saveHash1 = hash1; 1.790 + PLDHashEntryHdr *probe = ADDRESS_ENTRY(table, hash1); 1.791 + uint32_t chainLen = 1; 1.792 + if (probe == entry) { 1.793 + /* Start of a (possibly unit-length) chain. */ 1.794 + chainCount++; 1.795 + } else { 1.796 + hash2 = HASH2(entry->keyHash & ~COLLISION_FLAG, sizeLog2, 1.797 + hashShift); 1.798 + do { 1.799 + chainLen++; 1.800 + hash1 -= hash2; 1.801 + hash1 &= sizeMask; 1.802 + probe = ADDRESS_ENTRY(table, hash1); 1.803 + } while (probe != entry); 1.804 + } 1.805 + sqsum += chainLen * chainLen; 1.806 + if (chainLen > maxChainLen) { 1.807 + maxChainLen = chainLen; 1.808 + maxChainHash1 = saveHash1; 1.809 + maxChainHash2 = hash2; 1.810 + } 1.811 + } 1.812 + 1.813 + uint32_t entryCount = table->entryCount; 1.814 + if (entryCount && chainCount) { 1.815 + mean = (double)entryCount / chainCount; 1.816 + variance = chainCount * sqsum - entryCount * entryCount; 1.817 + if (variance < 0 || chainCount == 1) 1.818 + variance = 0; 1.819 + else 1.820 + variance /= chainCount * (chainCount - 1); 1.821 + sigma = sqrt(variance); 1.822 + } else { 1.823 + mean = sigma = 0; 1.824 + } 1.825 + 1.826 + fprintf(fp, "Double hashing statistics:\n"); 1.827 + fprintf(fp, " table size (in entries): %u\n", tableSize); 1.828 + fprintf(fp, " number of entries: %u\n", table->entryCount); 1.829 + fprintf(fp, " number of removed entries: %u\n", table->removedCount); 1.830 + fprintf(fp, " number of searches: %u\n", table->stats.searches); 1.831 + fprintf(fp, " number of hits: %u\n", table->stats.hits); 1.832 + fprintf(fp, " number of misses: %u\n", table->stats.misses); 1.833 + fprintf(fp, " mean steps per search: %g\n", table->stats.searches ? 1.834 + (double)table->stats.steps 1.835 + / table->stats.searches : 1.836 + 0.); 1.837 + fprintf(fp, " mean hash chain length: %g\n", mean); 1.838 + fprintf(fp, " standard deviation: %g\n", sigma); 1.839 + fprintf(fp, " maximum hash chain length: %u\n", maxChainLen); 1.840 + fprintf(fp, " number of lookups: %u\n", table->stats.lookups); 1.841 + fprintf(fp, " adds that made a new entry: %u\n", table->stats.addMisses); 1.842 + fprintf(fp, "adds that recycled removeds: %u\n", table->stats.addOverRemoved); 1.843 + fprintf(fp, " adds that found an entry: %u\n", table->stats.addHits); 1.844 + fprintf(fp, " add failures: %u\n", table->stats.addFailures); 1.845 + fprintf(fp, " useful removes: %u\n", table->stats.removeHits); 1.846 + fprintf(fp, " useless removes: %u\n", table->stats.removeMisses); 1.847 + fprintf(fp, "removes that freed an entry: %u\n", table->stats.removeFrees); 1.848 + fprintf(fp, " removes while enumerating: %u\n", table->stats.removeEnums); 1.849 + fprintf(fp, " number of grows: %u\n", table->stats.grows); 1.850 + fprintf(fp, " number of shrinks: %u\n", table->stats.shrinks); 1.851 + fprintf(fp, " number of compresses: %u\n", table->stats.compresses); 1.852 + fprintf(fp, "number of enumerate shrinks: %u\n", table->stats.enumShrinks); 1.853 + 1.854 + if (dump && maxChainLen && hash2) { 1.855 + fputs("Maximum hash chain:\n", fp); 1.856 + hash1 = maxChainHash1; 1.857 + hash2 = maxChainHash2; 1.858 + entry = ADDRESS_ENTRY(table, hash1); 1.859 + uint32_t i = 0; 1.860 + do { 1.861 + if (dump(table, entry, i++, fp) != PL_DHASH_NEXT) 1.862 + break; 1.863 + hash1 -= hash2; 1.864 + hash1 &= sizeMask; 1.865 + entry = ADDRESS_ENTRY(table, hash1); 1.866 + } while (PL_DHASH_ENTRY_IS_BUSY(entry)); 1.867 + } 1.868 +} 1.869 +#endif /* PL_DHASHMETER */