Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
1 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 /*
7 * Double hashing implementation.
8 */
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include "pldhash.h"
13 #include "mozilla/HashFunctions.h"
14 #include "mozilla/MathAlgorithms.h"
15 #include "nsDebug.h" /* for PR_ASSERT */
16 #include "nsAlgorithm.h"
17 #include "mozilla/Likely.h"
18 #include "mozilla/MemoryReporting.h"
19 #include "mozilla/ChaosMode.h"
21 #ifdef PL_DHASHMETER
22 # define METER(x) x
23 #else
24 # define METER(x) /* nothing */
25 #endif
27 /*
28 * The following DEBUG-only code is used to assert that calls to one of
29 * table->ops or to an enumerator do not cause re-entry into a call that
30 * can mutate the table.
31 */
32 #ifdef DEBUG
34 /*
35 * Most callers that assert about the recursion level don't care about
36 * this magical value because they are asserting that mutation is
37 * allowed (and therefore the level is 0 or 1, depending on whether they
38 * incremented it).
39 *
40 * Only PL_DHashTableFinish needs to allow this special value.
41 */
42 #define IMMUTABLE_RECURSION_LEVEL ((uint16_t)-1)
44 #define RECURSION_LEVEL_SAFE_TO_FINISH(table_) \
45 (table_->recursionLevel == 0 || \
46 table_->recursionLevel == IMMUTABLE_RECURSION_LEVEL)
48 #define INCREMENT_RECURSION_LEVEL(table_) \
49 do { \
50 if (table_->recursionLevel != IMMUTABLE_RECURSION_LEVEL) \
51 ++table_->recursionLevel; \
52 } while(0)
53 #define DECREMENT_RECURSION_LEVEL(table_) \
54 do { \
55 if (table->recursionLevel != IMMUTABLE_RECURSION_LEVEL) { \
56 MOZ_ASSERT(table->recursionLevel > 0); \
57 --table->recursionLevel; \
58 } \
59 } while(0)
61 #else
63 #define INCREMENT_RECURSION_LEVEL(table_) do { } while(0)
64 #define DECREMENT_RECURSION_LEVEL(table_) do { } while(0)
66 #endif /* defined(DEBUG) */
68 using namespace mozilla;
70 void *
71 PL_DHashAllocTable(PLDHashTable *table, uint32_t nbytes)
72 {
73 return malloc(nbytes);
74 }
76 void
77 PL_DHashFreeTable(PLDHashTable *table, void *ptr)
78 {
79 free(ptr);
80 }
82 PLDHashNumber
83 PL_DHashStringKey(PLDHashTable *table, const void *key)
84 {
85 return HashString(static_cast<const char*>(key));
86 }
88 PLDHashNumber
89 PL_DHashVoidPtrKeyStub(PLDHashTable *table, const void *key)
90 {
91 return (PLDHashNumber)(ptrdiff_t)key >> 2;
92 }
94 bool
95 PL_DHashMatchEntryStub(PLDHashTable *table,
96 const PLDHashEntryHdr *entry,
97 const void *key)
98 {
99 const PLDHashEntryStub *stub = (const PLDHashEntryStub *)entry;
101 return stub->key == key;
102 }
104 bool
105 PL_DHashMatchStringKey(PLDHashTable *table,
106 const PLDHashEntryHdr *entry,
107 const void *key)
108 {
109 const PLDHashEntryStub *stub = (const PLDHashEntryStub *)entry;
111 /* XXX tolerate null keys on account of sloppy Mozilla callers. */
112 return stub->key == key ||
113 (stub->key && key &&
114 strcmp((const char *) stub->key, (const char *) key) == 0);
115 }
117 void
118 PL_DHashMoveEntryStub(PLDHashTable *table,
119 const PLDHashEntryHdr *from,
120 PLDHashEntryHdr *to)
121 {
122 memcpy(to, from, table->entrySize);
123 }
125 void
126 PL_DHashClearEntryStub(PLDHashTable *table, PLDHashEntryHdr *entry)
127 {
128 memset(entry, 0, table->entrySize);
129 }
131 void
132 PL_DHashFreeStringKey(PLDHashTable *table, PLDHashEntryHdr *entry)
133 {
134 const PLDHashEntryStub *stub = (const PLDHashEntryStub *)entry;
136 free((void *) stub->key);
137 memset(entry, 0, table->entrySize);
138 }
140 void
141 PL_DHashFinalizeStub(PLDHashTable *table)
142 {
143 }
145 static const PLDHashTableOps stub_ops = {
146 PL_DHashAllocTable,
147 PL_DHashFreeTable,
148 PL_DHashVoidPtrKeyStub,
149 PL_DHashMatchEntryStub,
150 PL_DHashMoveEntryStub,
151 PL_DHashClearEntryStub,
152 PL_DHashFinalizeStub,
153 nullptr
154 };
156 const PLDHashTableOps *
157 PL_DHashGetStubOps(void)
158 {
159 return &stub_ops;
160 }
162 static bool
163 SizeOfEntryStore(uint32_t capacity, uint32_t entrySize, uint32_t *nbytes)
164 {
165 uint64_t nbytes64 = uint64_t(capacity) * uint64_t(entrySize);
166 *nbytes = capacity * entrySize;
167 return uint64_t(*nbytes) == nbytes64; // returns false on overflow
168 }
170 PLDHashTable *
171 PL_NewDHashTable(const PLDHashTableOps *ops, void *data, uint32_t entrySize,
172 uint32_t capacity)
173 {
174 PLDHashTable *table = (PLDHashTable *) malloc(sizeof *table);
175 if (!table)
176 return nullptr;
177 if (!PL_DHashTableInit(table, ops, data, entrySize, capacity, fallible_t())) {
178 free(table);
179 return nullptr;
180 }
181 return table;
182 }
184 void
185 PL_DHashTableDestroy(PLDHashTable *table)
186 {
187 PL_DHashTableFinish(table);
188 free(table);
189 }
191 bool
192 PL_DHashTableInit(PLDHashTable *table, const PLDHashTableOps *ops,
193 void *data, uint32_t entrySize, uint32_t capacity,
194 const fallible_t& )
195 {
196 #ifdef DEBUG
197 if (entrySize > 16 * sizeof(void *)) {
198 printf_stderr(
199 "pldhash: for the table at address %p, the given entrySize"
200 " of %lu definitely favors chaining over double hashing.\n",
201 (void *) table,
202 (unsigned long) entrySize);
203 }
204 #endif
206 table->ops = ops;
207 table->data = data;
208 if (capacity < PL_DHASH_MIN_SIZE)
209 capacity = PL_DHASH_MIN_SIZE;
211 int log2 = CeilingLog2(capacity);
213 capacity = 1u << log2;
214 if (capacity > PL_DHASH_MAX_SIZE)
215 return false;
216 table->hashShift = PL_DHASH_BITS - log2;
217 table->entrySize = entrySize;
218 table->entryCount = table->removedCount = 0;
219 table->generation = 0;
220 uint32_t nbytes;
221 if (!SizeOfEntryStore(capacity, entrySize, &nbytes))
222 return false; // overflowed
224 table->entryStore = (char *) ops->allocTable(table, nbytes);
225 if (!table->entryStore)
226 return false;
227 memset(table->entryStore, 0, nbytes);
228 METER(memset(&table->stats, 0, sizeof table->stats));
230 #ifdef DEBUG
231 table->recursionLevel = 0;
232 #endif
234 return true;
235 }
237 void
238 PL_DHashTableInit(PLDHashTable *table, const PLDHashTableOps *ops, void *data,
239 uint32_t entrySize, uint32_t capacity)
240 {
241 if (!PL_DHashTableInit(table, ops, data, entrySize, capacity, fallible_t())) {
242 if (capacity > PL_DHASH_MAX_SIZE) {
243 MOZ_CRASH();
244 }
245 uint32_t nbytes;
246 if (!SizeOfEntryStore(capacity, entrySize, &nbytes)) {
247 MOZ_CRASH();
248 }
249 NS_ABORT_OOM(nbytes);
250 }
251 }
253 /*
254 * Compute max and min load numbers (entry counts). We have a secondary max
255 * that allows us to overload a table reasonably if it cannot be grown further
256 * (i.e. if ChangeTable() fails). The table slows down drastically if the
257 * secondary max is too close to 1, but 0.96875 gives only a slight slowdown
258 * while allowing 1.3x more elements.
259 */
260 static inline uint32_t MaxLoad(uint32_t size) {
261 return size - (size >> 2); // == size * 0.75
262 }
263 static inline uint32_t MaxLoadOnGrowthFailure(uint32_t size) {
264 return size - (size >> 5); // == size * 0.96875
265 }
266 static inline uint32_t MinLoad(uint32_t size) {
267 return size >> 2; // == size * 0.25
268 }
270 /*
271 * Double hashing needs the second hash code to be relatively prime to table
272 * size, so we simply make hash2 odd.
273 */
274 #define HASH1(hash0, shift) ((hash0) >> (shift))
275 #define HASH2(hash0,log2,shift) ((((hash0) << (log2)) >> (shift)) | 1)
277 /*
278 * Reserve keyHash 0 for free entries and 1 for removed-entry sentinels. Note
279 * that a removed-entry sentinel need be stored only if the removed entry had
280 * a colliding entry added after it. Therefore we can use 1 as the collision
281 * flag in addition to the removed-entry sentinel value. Multiplicative hash
282 * uses the high order bits of keyHash, so this least-significant reservation
283 * should not hurt the hash function's effectiveness much.
284 *
285 * If you change any of these magic numbers, also update PL_DHASH_ENTRY_IS_LIVE
286 * in pldhash.h. It used to be private to pldhash.c, but then became public to
287 * assist iterator writers who inspect table->entryStore directly.
288 */
289 #define COLLISION_FLAG ((PLDHashNumber) 1)
290 #define MARK_ENTRY_FREE(entry) ((entry)->keyHash = 0)
291 #define MARK_ENTRY_REMOVED(entry) ((entry)->keyHash = 1)
292 #define ENTRY_IS_REMOVED(entry) ((entry)->keyHash == 1)
293 #define ENTRY_IS_LIVE(entry) PL_DHASH_ENTRY_IS_LIVE(entry)
294 #define ENSURE_LIVE_KEYHASH(hash0) if (hash0 < 2) hash0 -= 2; else (void)0
296 /* Match an entry's keyHash against an unstored one computed from a key. */
297 #define MATCH_ENTRY_KEYHASH(entry,hash0) \
298 (((entry)->keyHash & ~COLLISION_FLAG) == (hash0))
300 /* Compute the address of the indexed entry in table. */
301 #define ADDRESS_ENTRY(table, index) \
302 ((PLDHashEntryHdr *)((table)->entryStore + (index) * (table)->entrySize))
304 void
305 PL_DHashTableFinish(PLDHashTable *table)
306 {
307 INCREMENT_RECURSION_LEVEL(table);
309 /* Call finalize before clearing entries, so it can enumerate them. */
310 table->ops->finalize(table);
312 /* Clear any remaining live entries. */
313 char *entryAddr = table->entryStore;
314 uint32_t entrySize = table->entrySize;
315 char *entryLimit = entryAddr + PL_DHASH_TABLE_SIZE(table) * entrySize;
316 while (entryAddr < entryLimit) {
317 PLDHashEntryHdr *entry = (PLDHashEntryHdr *)entryAddr;
318 if (ENTRY_IS_LIVE(entry)) {
319 METER(table->stats.removeEnums++);
320 table->ops->clearEntry(table, entry);
321 }
322 entryAddr += entrySize;
323 }
325 DECREMENT_RECURSION_LEVEL(table);
326 MOZ_ASSERT(RECURSION_LEVEL_SAFE_TO_FINISH(table));
328 /* Free entry storage last. */
329 table->ops->freeTable(table, table->entryStore);
330 }
332 static PLDHashEntryHdr * PL_DHASH_FASTCALL
333 SearchTable(PLDHashTable *table, const void *key, PLDHashNumber keyHash,
334 PLDHashOperator op)
335 {
336 METER(table->stats.searches++);
337 NS_ASSERTION(!(keyHash & COLLISION_FLAG),
338 "!(keyHash & COLLISION_FLAG)");
340 /* Compute the primary hash address. */
341 int hashShift = table->hashShift;
342 PLDHashNumber hash1 = HASH1(keyHash, hashShift);
343 PLDHashEntryHdr *entry = ADDRESS_ENTRY(table, hash1);
345 /* Miss: return space for a new entry. */
346 if (PL_DHASH_ENTRY_IS_FREE(entry)) {
347 METER(table->stats.misses++);
348 return entry;
349 }
351 /* Hit: return entry. */
352 PLDHashMatchEntry matchEntry = table->ops->matchEntry;
353 if (MATCH_ENTRY_KEYHASH(entry, keyHash) && matchEntry(table, entry, key)) {
354 METER(table->stats.hits++);
355 return entry;
356 }
358 /* Collision: double hash. */
359 int sizeLog2 = PL_DHASH_BITS - table->hashShift;
360 PLDHashNumber hash2 = HASH2(keyHash, sizeLog2, hashShift);
361 uint32_t sizeMask = (1u << sizeLog2) - 1;
363 /* Save the first removed entry pointer so PL_DHASH_ADD can recycle it. */
364 PLDHashEntryHdr *firstRemoved = nullptr;
366 for (;;) {
367 if (MOZ_UNLIKELY(ENTRY_IS_REMOVED(entry))) {
368 if (!firstRemoved)
369 firstRemoved = entry;
370 } else {
371 if (op == PL_DHASH_ADD)
372 entry->keyHash |= COLLISION_FLAG;
373 }
375 METER(table->stats.steps++);
376 hash1 -= hash2;
377 hash1 &= sizeMask;
379 entry = ADDRESS_ENTRY(table, hash1);
380 if (PL_DHASH_ENTRY_IS_FREE(entry)) {
381 METER(table->stats.misses++);
382 return (firstRemoved && op == PL_DHASH_ADD) ? firstRemoved : entry;
383 }
385 if (MATCH_ENTRY_KEYHASH(entry, keyHash) &&
386 matchEntry(table, entry, key)) {
387 METER(table->stats.hits++);
388 return entry;
389 }
390 }
392 /* NOTREACHED */
393 return nullptr;
394 }
396 /*
397 * This is a copy of SearchTable, used by ChangeTable, hardcoded to
398 * 1. assume |op == PL_DHASH_ADD|,
399 * 2. assume that |key| will never match an existing entry, and
400 * 3. assume that no entries have been removed from the current table
401 * structure.
402 * Avoiding the need for |key| means we can avoid needing a way to map
403 * entries to keys, which means callers can use complex key types more
404 * easily.
405 */
406 static PLDHashEntryHdr * PL_DHASH_FASTCALL
407 FindFreeEntry(PLDHashTable *table, PLDHashNumber keyHash)
408 {
409 METER(table->stats.searches++);
410 NS_ASSERTION(!(keyHash & COLLISION_FLAG),
411 "!(keyHash & COLLISION_FLAG)");
413 /* Compute the primary hash address. */
414 int hashShift = table->hashShift;
415 PLDHashNumber hash1 = HASH1(keyHash, hashShift);
416 PLDHashEntryHdr *entry = ADDRESS_ENTRY(table, hash1);
418 /* Miss: return space for a new entry. */
419 if (PL_DHASH_ENTRY_IS_FREE(entry)) {
420 METER(table->stats.misses++);
421 return entry;
422 }
424 /* Collision: double hash. */
425 int sizeLog2 = PL_DHASH_BITS - table->hashShift;
426 PLDHashNumber hash2 = HASH2(keyHash, sizeLog2, hashShift);
427 uint32_t sizeMask = (1u << sizeLog2) - 1;
429 for (;;) {
430 NS_ASSERTION(!ENTRY_IS_REMOVED(entry),
431 "!ENTRY_IS_REMOVED(entry)");
432 entry->keyHash |= COLLISION_FLAG;
434 METER(table->stats.steps++);
435 hash1 -= hash2;
436 hash1 &= sizeMask;
438 entry = ADDRESS_ENTRY(table, hash1);
439 if (PL_DHASH_ENTRY_IS_FREE(entry)) {
440 METER(table->stats.misses++);
441 return entry;
442 }
443 }
445 /* NOTREACHED */
446 return nullptr;
447 }
449 static bool
450 ChangeTable(PLDHashTable *table, int deltaLog2)
451 {
452 /* Look, but don't touch, until we succeed in getting new entry store. */
453 int oldLog2 = PL_DHASH_BITS - table->hashShift;
454 int newLog2 = oldLog2 + deltaLog2;
455 uint32_t newCapacity = 1u << newLog2;
456 if (newCapacity > PL_DHASH_MAX_SIZE)
457 return false;
459 uint32_t entrySize = table->entrySize;
460 uint32_t nbytes;
461 if (!SizeOfEntryStore(newCapacity, entrySize, &nbytes))
462 return false; // overflowed
464 char *newEntryStore = (char *) table->ops->allocTable(table, nbytes);
465 if (!newEntryStore)
466 return false;
468 /* We can't fail from here on, so update table parameters. */
469 #ifdef DEBUG
470 uint32_t recursionLevel = table->recursionLevel;
471 #endif
472 table->hashShift = PL_DHASH_BITS - newLog2;
473 table->removedCount = 0;
474 table->generation++;
476 /* Assign the new entry store to table. */
477 memset(newEntryStore, 0, nbytes);
478 char *oldEntryStore, *oldEntryAddr;
479 oldEntryAddr = oldEntryStore = table->entryStore;
480 table->entryStore = newEntryStore;
481 PLDHashMoveEntry moveEntry = table->ops->moveEntry;
482 #ifdef DEBUG
483 table->recursionLevel = recursionLevel;
484 #endif
486 /* Copy only live entries, leaving removed ones behind. */
487 uint32_t oldCapacity = 1u << oldLog2;
488 for (uint32_t i = 0; i < oldCapacity; i++) {
489 PLDHashEntryHdr *oldEntry = (PLDHashEntryHdr *)oldEntryAddr;
490 if (ENTRY_IS_LIVE(oldEntry)) {
491 oldEntry->keyHash &= ~COLLISION_FLAG;
492 PLDHashEntryHdr *newEntry = FindFreeEntry(table, oldEntry->keyHash);
493 NS_ASSERTION(PL_DHASH_ENTRY_IS_FREE(newEntry),
494 "PL_DHASH_ENTRY_IS_FREE(newEntry)");
495 moveEntry(table, oldEntry, newEntry);
496 newEntry->keyHash = oldEntry->keyHash;
497 }
498 oldEntryAddr += entrySize;
499 }
501 table->ops->freeTable(table, oldEntryStore);
502 return true;
503 }
505 PLDHashEntryHdr * PL_DHASH_FASTCALL
506 PL_DHashTableOperate(PLDHashTable *table, const void *key, PLDHashOperator op)
507 {
508 PLDHashEntryHdr *entry;
510 MOZ_ASSERT(op == PL_DHASH_LOOKUP || table->recursionLevel == 0);
511 INCREMENT_RECURSION_LEVEL(table);
513 PLDHashNumber keyHash = table->ops->hashKey(table, key);
514 keyHash *= PL_DHASH_GOLDEN_RATIO;
516 /* Avoid 0 and 1 hash codes, they indicate free and removed entries. */
517 ENSURE_LIVE_KEYHASH(keyHash);
518 keyHash &= ~COLLISION_FLAG;
520 switch (op) {
521 case PL_DHASH_LOOKUP:
522 METER(table->stats.lookups++);
523 entry = SearchTable(table, key, keyHash, op);
524 break;
526 case PL_DHASH_ADD: {
527 /*
528 * If alpha is >= .75, grow or compress the table. If key is already
529 * in the table, we may grow once more than necessary, but only if we
530 * are on the edge of being overloaded.
531 */
532 uint32_t size = PL_DHASH_TABLE_SIZE(table);
533 if (table->entryCount + table->removedCount >= MaxLoad(size)) {
534 /* Compress if a quarter or more of all entries are removed. */
535 int deltaLog2;
536 if (table->removedCount >= size >> 2) {
537 METER(table->stats.compresses++);
538 deltaLog2 = 0;
539 } else {
540 METER(table->stats.grows++);
541 deltaLog2 = 1;
542 }
544 /*
545 * Grow or compress table. If ChangeTable() fails, allow
546 * overloading up to the secondary max. Once we hit the secondary
547 * max, return null.
548 */
549 if (!ChangeTable(table, deltaLog2) &&
550 table->entryCount + table->removedCount >=
551 MaxLoadOnGrowthFailure(size))
552 {
553 METER(table->stats.addFailures++);
554 entry = nullptr;
555 break;
556 }
557 }
559 /*
560 * Look for entry after possibly growing, so we don't have to add it,
561 * then skip it while growing the table and re-add it after.
562 */
563 entry = SearchTable(table, key, keyHash, op);
564 if (!ENTRY_IS_LIVE(entry)) {
565 /* Initialize the entry, indicating that it's no longer free. */
566 METER(table->stats.addMisses++);
567 if (ENTRY_IS_REMOVED(entry)) {
568 METER(table->stats.addOverRemoved++);
569 table->removedCount--;
570 keyHash |= COLLISION_FLAG;
571 }
572 if (table->ops->initEntry &&
573 !table->ops->initEntry(table, entry, key)) {
574 /* We haven't claimed entry yet; fail with null return. */
575 memset(entry + 1, 0, table->entrySize - sizeof *entry);
576 entry = nullptr;
577 break;
578 }
579 entry->keyHash = keyHash;
580 table->entryCount++;
581 }
582 METER(else table->stats.addHits++);
583 break;
584 }
586 case PL_DHASH_REMOVE:
587 entry = SearchTable(table, key, keyHash, op);
588 if (ENTRY_IS_LIVE(entry)) {
589 /* Clear this entry and mark it as "removed". */
590 METER(table->stats.removeHits++);
591 PL_DHashTableRawRemove(table, entry);
593 /* Shrink if alpha is <= .25 and table isn't too small already. */
594 uint32_t size = PL_DHASH_TABLE_SIZE(table);
595 if (size > PL_DHASH_MIN_SIZE &&
596 table->entryCount <= MinLoad(size)) {
597 METER(table->stats.shrinks++);
598 (void) ChangeTable(table, -1);
599 }
600 }
601 METER(else table->stats.removeMisses++);
602 entry = nullptr;
603 break;
605 default:
606 NS_NOTREACHED("0");
607 entry = nullptr;
608 }
610 DECREMENT_RECURSION_LEVEL(table);
612 return entry;
613 }
615 void
616 PL_DHashTableRawRemove(PLDHashTable *table, PLDHashEntryHdr *entry)
617 {
618 MOZ_ASSERT(table->recursionLevel != IMMUTABLE_RECURSION_LEVEL);
620 NS_ASSERTION(PL_DHASH_ENTRY_IS_LIVE(entry),
621 "PL_DHASH_ENTRY_IS_LIVE(entry)");
623 /* Load keyHash first in case clearEntry() goofs it. */
624 PLDHashNumber keyHash = entry->keyHash;
625 table->ops->clearEntry(table, entry);
626 if (keyHash & COLLISION_FLAG) {
627 MARK_ENTRY_REMOVED(entry);
628 table->removedCount++;
629 } else {
630 METER(table->stats.removeFrees++);
631 MARK_ENTRY_FREE(entry);
632 }
633 table->entryCount--;
634 }
636 uint32_t
637 PL_DHashTableEnumerate(PLDHashTable *table, PLDHashEnumerator etor, void *arg)
638 {
639 INCREMENT_RECURSION_LEVEL(table);
641 char *entryAddr = table->entryStore;
642 uint32_t entrySize = table->entrySize;
643 uint32_t capacity = PL_DHASH_TABLE_SIZE(table);
644 uint32_t tableSize = capacity * entrySize;
645 char *entryLimit = entryAddr + tableSize;
646 uint32_t i = 0;
647 bool didRemove = false;
649 if (ChaosMode::isActive()) {
650 // Start iterating at a random point in the hashtable. It would be
651 // even more chaotic to iterate in fully random order, but that's a lot
652 // more work.
653 entryAddr += ChaosMode::randomUint32LessThan(capacity) * entrySize;
654 if (entryAddr >= entryLimit) {
655 entryAddr -= tableSize;
656 }
657 }
659 for (uint32_t e = 0; e < capacity; ++e) {
660 PLDHashEntryHdr *entry = (PLDHashEntryHdr *)entryAddr;
661 if (ENTRY_IS_LIVE(entry)) {
662 PLDHashOperator op = etor(table, entry, i++, arg);
663 if (op & PL_DHASH_REMOVE) {
664 METER(table->stats.removeEnums++);
665 PL_DHashTableRawRemove(table, entry);
666 didRemove = true;
667 }
668 if (op & PL_DHASH_STOP)
669 break;
670 }
671 entryAddr += entrySize;
672 if (entryAddr >= entryLimit) {
673 entryAddr -= tableSize;
674 }
675 }
677 MOZ_ASSERT(!didRemove || table->recursionLevel == 1);
679 /*
680 * Shrink or compress if a quarter or more of all entries are removed, or
681 * if the table is underloaded according to the minimum alpha, and is not
682 * minimal-size already. Do this only if we removed above, so non-removing
683 * enumerations can count on stable table->entryStore until the next
684 * non-lookup-Operate or removing-Enumerate.
685 */
686 if (didRemove &&
687 (table->removedCount >= capacity >> 2 ||
688 (capacity > PL_DHASH_MIN_SIZE &&
689 table->entryCount <= MinLoad(capacity)))) {
690 METER(table->stats.enumShrinks++);
691 capacity = table->entryCount;
692 capacity += capacity >> 1;
693 if (capacity < PL_DHASH_MIN_SIZE)
694 capacity = PL_DHASH_MIN_SIZE;
696 uint32_t ceiling = CeilingLog2(capacity);
697 ceiling -= PL_DHASH_BITS - table->hashShift;
699 (void) ChangeTable(table, ceiling);
700 }
702 DECREMENT_RECURSION_LEVEL(table);
704 return i;
705 }
707 struct SizeOfEntryExcludingThisArg
708 {
709 size_t total;
710 PLDHashSizeOfEntryExcludingThisFun sizeOfEntryExcludingThis;
711 MallocSizeOf mallocSizeOf;
712 void *arg; // the arg passed by the user
713 };
715 static PLDHashOperator
716 SizeOfEntryExcludingThisEnumerator(PLDHashTable *table, PLDHashEntryHdr *hdr,
717 uint32_t number, void *arg)
718 {
719 SizeOfEntryExcludingThisArg *e = (SizeOfEntryExcludingThisArg *)arg;
720 e->total += e->sizeOfEntryExcludingThis(hdr, e->mallocSizeOf, e->arg);
721 return PL_DHASH_NEXT;
722 }
724 size_t
725 PL_DHashTableSizeOfExcludingThis(const PLDHashTable *table,
726 PLDHashSizeOfEntryExcludingThisFun sizeOfEntryExcludingThis,
727 MallocSizeOf mallocSizeOf,
728 void *arg /* = nullptr */)
729 {
730 size_t n = 0;
731 n += mallocSizeOf(table->entryStore);
732 if (sizeOfEntryExcludingThis) {
733 SizeOfEntryExcludingThisArg arg2 = { 0, sizeOfEntryExcludingThis, mallocSizeOf, arg };
734 PL_DHashTableEnumerate(const_cast<PLDHashTable *>(table),
735 SizeOfEntryExcludingThisEnumerator, &arg2);
736 n += arg2.total;
737 }
738 return n;
739 }
741 size_t
742 PL_DHashTableSizeOfIncludingThis(const PLDHashTable *table,
743 PLDHashSizeOfEntryExcludingThisFun sizeOfEntryExcludingThis,
744 MallocSizeOf mallocSizeOf,
745 void *arg /* = nullptr */)
746 {
747 return mallocSizeOf(table) +
748 PL_DHashTableSizeOfExcludingThis(table, sizeOfEntryExcludingThis,
749 mallocSizeOf, arg);
750 }
752 #ifdef DEBUG
753 void
754 PL_DHashMarkTableImmutable(PLDHashTable *table)
755 {
756 table->recursionLevel = IMMUTABLE_RECURSION_LEVEL;
757 }
758 #endif
760 #ifdef PL_DHASHMETER
761 #include <math.h>
763 void
764 PL_DHashTableDumpMeter(PLDHashTable *table, PLDHashEnumerator dump, FILE *fp)
765 {
766 PLDHashNumber hash1, hash2, maxChainHash1, maxChainHash2;
767 double sqsum, mean, variance, sigma;
768 PLDHashEntryHdr *entry;
770 char *entryAddr = table->entryStore;
771 uint32_t entrySize = table->entrySize;
772 int hashShift = table->hashShift;
773 int sizeLog2 = PL_DHASH_BITS - hashShift;
774 uint32_t tableSize = PL_DHASH_TABLE_SIZE(table);
775 uint32_t sizeMask = (1u << sizeLog2) - 1;
776 uint32_t chainCount = 0, maxChainLen = 0;
777 hash2 = 0;
778 sqsum = 0;
780 for (uint32_t i = 0; i < tableSize; i++) {
781 entry = (PLDHashEntryHdr *)entryAddr;
782 entryAddr += entrySize;
783 if (!ENTRY_IS_LIVE(entry))
784 continue;
785 hash1 = HASH1(entry->keyHash & ~COLLISION_FLAG, hashShift);
786 PLDHashNumber saveHash1 = hash1;
787 PLDHashEntryHdr *probe = ADDRESS_ENTRY(table, hash1);
788 uint32_t chainLen = 1;
789 if (probe == entry) {
790 /* Start of a (possibly unit-length) chain. */
791 chainCount++;
792 } else {
793 hash2 = HASH2(entry->keyHash & ~COLLISION_FLAG, sizeLog2,
794 hashShift);
795 do {
796 chainLen++;
797 hash1 -= hash2;
798 hash1 &= sizeMask;
799 probe = ADDRESS_ENTRY(table, hash1);
800 } while (probe != entry);
801 }
802 sqsum += chainLen * chainLen;
803 if (chainLen > maxChainLen) {
804 maxChainLen = chainLen;
805 maxChainHash1 = saveHash1;
806 maxChainHash2 = hash2;
807 }
808 }
810 uint32_t entryCount = table->entryCount;
811 if (entryCount && chainCount) {
812 mean = (double)entryCount / chainCount;
813 variance = chainCount * sqsum - entryCount * entryCount;
814 if (variance < 0 || chainCount == 1)
815 variance = 0;
816 else
817 variance /= chainCount * (chainCount - 1);
818 sigma = sqrt(variance);
819 } else {
820 mean = sigma = 0;
821 }
823 fprintf(fp, "Double hashing statistics:\n");
824 fprintf(fp, " table size (in entries): %u\n", tableSize);
825 fprintf(fp, " number of entries: %u\n", table->entryCount);
826 fprintf(fp, " number of removed entries: %u\n", table->removedCount);
827 fprintf(fp, " number of searches: %u\n", table->stats.searches);
828 fprintf(fp, " number of hits: %u\n", table->stats.hits);
829 fprintf(fp, " number of misses: %u\n", table->stats.misses);
830 fprintf(fp, " mean steps per search: %g\n", table->stats.searches ?
831 (double)table->stats.steps
832 / table->stats.searches :
833 0.);
834 fprintf(fp, " mean hash chain length: %g\n", mean);
835 fprintf(fp, " standard deviation: %g\n", sigma);
836 fprintf(fp, " maximum hash chain length: %u\n", maxChainLen);
837 fprintf(fp, " number of lookups: %u\n", table->stats.lookups);
838 fprintf(fp, " adds that made a new entry: %u\n", table->stats.addMisses);
839 fprintf(fp, "adds that recycled removeds: %u\n", table->stats.addOverRemoved);
840 fprintf(fp, " adds that found an entry: %u\n", table->stats.addHits);
841 fprintf(fp, " add failures: %u\n", table->stats.addFailures);
842 fprintf(fp, " useful removes: %u\n", table->stats.removeHits);
843 fprintf(fp, " useless removes: %u\n", table->stats.removeMisses);
844 fprintf(fp, "removes that freed an entry: %u\n", table->stats.removeFrees);
845 fprintf(fp, " removes while enumerating: %u\n", table->stats.removeEnums);
846 fprintf(fp, " number of grows: %u\n", table->stats.grows);
847 fprintf(fp, " number of shrinks: %u\n", table->stats.shrinks);
848 fprintf(fp, " number of compresses: %u\n", table->stats.compresses);
849 fprintf(fp, "number of enumerate shrinks: %u\n", table->stats.enumShrinks);
851 if (dump && maxChainLen && hash2) {
852 fputs("Maximum hash chain:\n", fp);
853 hash1 = maxChainHash1;
854 hash2 = maxChainHash2;
855 entry = ADDRESS_ENTRY(table, hash1);
856 uint32_t i = 0;
857 do {
858 if (dump(table, entry, i++, fp) != PL_DHASH_NEXT)
859 break;
860 hash1 -= hash2;
861 hash1 &= sizeMask;
862 entry = ADDRESS_ENTRY(table, hash1);
863 } while (PL_DHASH_ENTRY_IS_BUSY(entry));
864 }
865 }
866 #endif /* PL_DHASHMETER */