Fri, 16 Jan 2015 18:13:44 +0100
Integrate suggestion from review to improve consistency with existing code.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef js_HashTable_h
8 #define js_HashTable_h
10 #include "mozilla/Alignment.h"
11 #include "mozilla/Assertions.h"
12 #include "mozilla/Attributes.h"
13 #include "mozilla/Casting.h"
14 #include "mozilla/DebugOnly.h"
15 #include "mozilla/MemoryReporting.h"
16 #include "mozilla/Move.h"
17 #include "mozilla/NullPtr.h"
18 #include "mozilla/PodOperations.h"
19 #include "mozilla/ReentrancyGuard.h"
20 #include "mozilla/TemplateLib.h"
21 #include "mozilla/TypeTraits.h"
23 #include "js/Utility.h"
25 namespace js {
27 class TempAllocPolicy;
28 template <class> struct DefaultHasher;
29 template <class, class> class HashMapEntry;
30 namespace detail {
31 template <class T> class HashTableEntry;
32 template <class T, class HashPolicy, class AllocPolicy> class HashTable;
33 }
35 /*****************************************************************************/
37 // A JS-friendly, STL-like container providing a hash-based map from keys to
38 // values. In particular, HashMap calls constructors and destructors of all
39 // objects added so non-PODs may be used safely.
40 //
41 // Key/Value requirements:
42 // - movable, destructible, assignable
43 // HashPolicy requirements:
44 // - see Hash Policy section below
45 // AllocPolicy:
46 // - see jsalloc.h
47 //
48 // Note:
49 // - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
50 // called by HashMap must not call back into the same HashMap object.
51 // - Due to the lack of exception handling, the user must call |init()|.
52 template <class Key,
53 class Value,
54 class HashPolicy = DefaultHasher<Key>,
55 class AllocPolicy = TempAllocPolicy>
56 class HashMap
57 {
58 typedef HashMapEntry<Key, Value> TableEntry;
60 struct MapHashPolicy : HashPolicy
61 {
62 typedef Key KeyType;
63 static const Key &getKey(TableEntry &e) { return e.key(); }
64 static void setKey(TableEntry &e, Key &k) { HashPolicy::rekey(e.mutableKey(), k); }
65 };
67 typedef detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy> Impl;
68 Impl impl;
70 public:
71 typedef typename HashPolicy::Lookup Lookup;
72 typedef TableEntry Entry;
74 // HashMap construction is fallible (due to OOM); thus the user must call
75 // init after constructing a HashMap and check the return value.
76 HashMap(AllocPolicy a = AllocPolicy()) : impl(a) {}
77 bool init(uint32_t len = 16) { return impl.init(len); }
78 bool initialized() const { return impl.initialized(); }
80 // Return whether the given lookup value is present in the map. E.g.:
81 //
82 // typedef HashMap<int,char> HM;
83 // HM h;
84 // if (HM::Ptr p = h.lookup(3)) {
85 // const HM::Entry &e = *p; // p acts like a pointer to Entry
86 // assert(p->key == 3); // Entry contains the key
87 // char val = p->value; // and value
88 // }
89 //
90 // Also see the definition of Ptr in HashTable above (with T = Entry).
91 typedef typename Impl::Ptr Ptr;
92 Ptr lookup(const Lookup &l) const { return impl.lookup(l); }
94 // Like lookup, but does not assert if two threads call lookup at the same
95 // time. Only use this method when none of the threads will modify the map.
96 Ptr readonlyThreadsafeLookup(const Lookup &l) const { return impl.readonlyThreadsafeLookup(l); }
98 // Assuming |p.found()|, remove |*p|.
99 void remove(Ptr p) { impl.remove(p); }
101 // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
102 // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
103 // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.:
104 //
105 // typedef HashMap<int,char> HM;
106 // HM h;
107 // HM::AddPtr p = h.lookupForAdd(3);
108 // if (!p) {
109 // if (!h.add(p, 3, 'a'))
110 // return false;
111 // }
112 // const HM::Entry &e = *p; // p acts like a pointer to Entry
113 // assert(p->key == 3); // Entry contains the key
114 // char val = p->value; // and value
115 //
116 // Also see the definition of AddPtr in HashTable above (with T = Entry).
117 //
118 // N.B. The caller must ensure that no mutating hash table operations
119 // occur between a pair of |lookupForAdd| and |add| calls. To avoid
120 // looking up the key a second time, the caller may use the more efficient
121 // relookupOrAdd method. This method reuses part of the hashing computation
122 // to more efficiently insert the key if it has not been added. For
123 // example, a mutation-handling version of the previous example:
124 //
125 // HM::AddPtr p = h.lookupForAdd(3);
126 // if (!p) {
127 // call_that_may_mutate_h();
128 // if (!h.relookupOrAdd(p, 3, 'a'))
129 // return false;
130 // }
131 // const HM::Entry &e = *p;
132 // assert(p->key == 3);
133 // char val = p->value;
134 typedef typename Impl::AddPtr AddPtr;
135 AddPtr lookupForAdd(const Lookup &l) const {
136 return impl.lookupForAdd(l);
137 }
139 template<typename KeyInput, typename ValueInput>
140 bool add(AddPtr &p, KeyInput &&k, ValueInput &&v) {
141 Entry e(mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
142 return impl.add(p, mozilla::Move(e));
143 }
145 template<typename KeyInput>
146 bool add(AddPtr &p, KeyInput &&k) {
147 Entry e(mozilla::Forward<KeyInput>(k), Value());
148 return impl.add(p, mozilla::Move(e));
149 }
151 template<typename KeyInput, typename ValueInput>
152 bool relookupOrAdd(AddPtr &p, KeyInput &&k, ValueInput &&v) {
153 Entry e(mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
154 return impl.relookupOrAdd(p, e.key(), mozilla::Move(e));
155 }
157 // |all()| returns a Range containing |count()| elements. E.g.:
158 //
159 // typedef HashMap<int,char> HM;
160 // HM h;
161 // for (HM::Range r = h.all(); !r.empty(); r.popFront())
162 // char c = r.front().value();
163 //
164 // Also see the definition of Range in HashTable above (with T = Entry).
165 typedef typename Impl::Range Range;
166 Range all() const { return impl.all(); }
168 // Typedef for the enumeration class. An Enum may be used to examine and
169 // remove table entries:
170 //
171 // typedef HashMap<int,char> HM;
172 // HM s;
173 // for (HM::Enum e(s); !e.empty(); e.popFront())
174 // if (e.front().value() == 'l')
175 // e.removeFront();
176 //
177 // Table resize may occur in Enum's destructor. Also see the definition of
178 // Enum in HashTable above (with T = Entry).
179 typedef typename Impl::Enum Enum;
181 // Remove all entries. This does not shrink the table. For that consider
182 // using the finish() method.
183 void clear() { impl.clear(); }
185 // Remove all entries without triggering destructors. This method is unsafe.
186 void clearWithoutCallingDestructors() { impl.clearWithoutCallingDestructors(); }
188 // Remove all the entries and release all internal buffers. The map must
189 // be initialized again before any use.
190 void finish() { impl.finish(); }
192 // Does the table contain any entries?
193 bool empty() const { return impl.empty(); }
195 // Number of live elements in the map.
196 uint32_t count() const { return impl.count(); }
198 // Total number of allocation in the dynamic table. Note: resize will
199 // happen well before count() == capacity().
200 size_t capacity() const { return impl.capacity(); }
202 // Don't just call |impl.sizeOfExcludingThis()| because there's no
203 // guarantee that |impl| is the first field in HashMap.
204 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
205 return impl.sizeOfExcludingThis(mallocSizeOf);
206 }
207 size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
208 return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
209 }
211 // If |generation()| is the same before and after a HashMap operation,
212 // pointers into the table remain valid.
213 unsigned generation() const { return impl.generation(); }
215 /************************************************** Shorthand operations */
217 bool has(const Lookup &l) const {
218 return impl.lookup(l) != nullptr;
219 }
221 // Overwrite existing value with v. Return false on oom.
222 template<typename KeyInput, typename ValueInput>
223 bool put(KeyInput &&k, ValueInput &&v) {
224 AddPtr p = lookupForAdd(k);
225 if (p) {
226 p->value() = mozilla::Forward<ValueInput>(v);
227 return true;
228 }
229 return add(p, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
230 }
232 // Like put, but assert that the given key is not already present.
233 template<typename KeyInput, typename ValueInput>
234 bool putNew(KeyInput &&k, ValueInput &&v) {
235 Entry e(mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
236 return impl.putNew(e.key(), mozilla::Move(e));
237 }
239 // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom.
240 Ptr lookupWithDefault(const Key &k, const Value &defaultValue) {
241 AddPtr p = lookupForAdd(k);
242 if (p)
243 return p;
244 (void)add(p, k, defaultValue); // p is left false-y on oom.
245 return p;
246 }
248 // Remove if present.
249 void remove(const Lookup &l) {
250 if (Ptr p = lookup(l))
251 remove(p);
252 }
254 // Infallibly rekey one entry, if necessary.
255 // Requires template parameters Key and HashPolicy::Lookup to be the same type.
256 void rekeyIfMoved(const Key &old_key, const Key &new_key) {
257 if (old_key != new_key)
258 rekeyAs(old_key, new_key, new_key);
259 }
261 // Infallibly rekey one entry, if present.
262 void rekeyAs(const Lookup &old_lookup, const Lookup &new_lookup, const Key &new_key) {
263 if (Ptr p = lookup(old_lookup))
264 impl.rekeyAndMaybeRehash(p, new_lookup, new_key);
265 }
267 // HashMap is movable
268 HashMap(HashMap &&rhs) : impl(mozilla::Move(rhs.impl)) {}
269 void operator=(HashMap &&rhs) {
270 MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
271 impl = mozilla::Move(rhs.impl);
272 }
274 private:
275 // HashMap is not copyable or assignable
276 HashMap(const HashMap &hm) MOZ_DELETE;
277 HashMap &operator=(const HashMap &hm) MOZ_DELETE;
279 friend class Impl::Enum;
280 };
282 /*****************************************************************************/
284 // A JS-friendly, STL-like container providing a hash-based set of values. In
285 // particular, HashSet calls constructors and destructors of all objects added
286 // so non-PODs may be used safely.
287 //
288 // T requirements:
289 // - movable, destructible, assignable
290 // HashPolicy requirements:
291 // - see Hash Policy section below
292 // AllocPolicy:
293 // - see jsalloc.h
294 //
295 // Note:
296 // - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
297 // HashSet must not call back into the same HashSet object.
298 // - Due to the lack of exception handling, the user must call |init()|.
299 template <class T,
300 class HashPolicy = DefaultHasher<T>,
301 class AllocPolicy = TempAllocPolicy>
302 class HashSet
303 {
304 struct SetOps : HashPolicy
305 {
306 typedef T KeyType;
307 static const KeyType &getKey(const T &t) { return t; }
308 static void setKey(T &t, KeyType &k) { HashPolicy::rekey(t, k); }
309 };
311 typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl;
312 Impl impl;
314 public:
315 typedef typename HashPolicy::Lookup Lookup;
316 typedef T Entry;
318 // HashSet construction is fallible (due to OOM); thus the user must call
319 // init after constructing a HashSet and check the return value.
320 HashSet(AllocPolicy a = AllocPolicy()) : impl(a) {}
321 bool init(uint32_t len = 16) { return impl.init(len); }
322 bool initialized() const { return impl.initialized(); }
324 // Return whether the given lookup value is present in the map. E.g.:
325 //
326 // typedef HashSet<int> HS;
327 // HS h;
328 // if (HS::Ptr p = h.lookup(3)) {
329 // assert(*p == 3); // p acts like a pointer to int
330 // }
331 //
332 // Also see the definition of Ptr in HashTable above.
333 typedef typename Impl::Ptr Ptr;
334 Ptr lookup(const Lookup &l) const { return impl.lookup(l); }
336 // Like lookup, but does not assert if two threads call lookup at the same
337 // time. Only use this method when none of the threads will modify the map.
338 Ptr readonlyThreadsafeLookup(const Lookup &l) const { return impl.readonlyThreadsafeLookup(l); }
340 // Assuming |p.found()|, remove |*p|.
341 void remove(Ptr p) { impl.remove(p); }
343 // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
344 // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
345 // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
346 //
347 // typedef HashSet<int> HS;
348 // HS h;
349 // HS::AddPtr p = h.lookupForAdd(3);
350 // if (!p) {
351 // if (!h.add(p, 3))
352 // return false;
353 // }
354 // assert(*p == 3); // p acts like a pointer to int
355 //
356 // Also see the definition of AddPtr in HashTable above.
357 //
358 // N.B. The caller must ensure that no mutating hash table operations
359 // occur between a pair of |lookupForAdd| and |add| calls. To avoid
360 // looking up the key a second time, the caller may use the more efficient
361 // relookupOrAdd method. This method reuses part of the hashing computation
362 // to more efficiently insert the key if it has not been added. For
363 // example, a mutation-handling version of the previous example:
364 //
365 // HS::AddPtr p = h.lookupForAdd(3);
366 // if (!p) {
367 // call_that_may_mutate_h();
368 // if (!h.relookupOrAdd(p, 3, 3))
369 // return false;
370 // }
371 // assert(*p == 3);
372 //
373 // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
374 // entry |t|, where the caller ensures match(l,t).
375 typedef typename Impl::AddPtr AddPtr;
376 AddPtr lookupForAdd(const Lookup &l) const { return impl.lookupForAdd(l); }
378 template <typename U>
379 bool add(AddPtr &p, U &&u) {
380 return impl.add(p, mozilla::Forward<U>(u));
381 }
383 template <typename U>
384 bool relookupOrAdd(AddPtr &p, const Lookup &l, U &&u) {
385 return impl.relookupOrAdd(p, l, mozilla::Forward<U>(u));
386 }
388 // |all()| returns a Range containing |count()| elements:
389 //
390 // typedef HashSet<int> HS;
391 // HS h;
392 // for (HS::Range r = h.all(); !r.empty(); r.popFront())
393 // int i = r.front();
394 //
395 // Also see the definition of Range in HashTable above.
396 typedef typename Impl::Range Range;
397 Range all() const { return impl.all(); }
399 // Typedef for the enumeration class. An Enum may be used to examine and
400 // remove table entries:
401 //
402 // typedef HashSet<int> HS;
403 // HS s;
404 // for (HS::Enum e(s); !e.empty(); e.popFront())
405 // if (e.front() == 42)
406 // e.removeFront();
407 //
408 // Table resize may occur in Enum's destructor. Also see the definition of
409 // Enum in HashTable above.
410 typedef typename Impl::Enum Enum;
412 // Remove all entries. This does not shrink the table. For that consider
413 // using the finish() method.
414 void clear() { impl.clear(); }
416 // Remove all the entries and release all internal buffers. The set must
417 // be initialized again before any use.
418 void finish() { impl.finish(); }
420 // Does the table contain any entries?
421 bool empty() const { return impl.empty(); }
423 // Number of live elements in the map.
424 uint32_t count() const { return impl.count(); }
426 // Total number of allocation in the dynamic table. Note: resize will
427 // happen well before count() == capacity().
428 size_t capacity() const { return impl.capacity(); }
430 // Don't just call |impl.sizeOfExcludingThis()| because there's no
431 // guarantee that |impl| is the first field in HashSet.
432 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
433 return impl.sizeOfExcludingThis(mallocSizeOf);
434 }
435 size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
436 return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
437 }
439 // If |generation()| is the same before and after a HashSet operation,
440 // pointers into the table remain valid.
441 unsigned generation() const { return impl.generation(); }
443 /************************************************** Shorthand operations */
445 bool has(const Lookup &l) const {
446 return impl.lookup(l) != nullptr;
447 }
449 // Add |u| if it is not present already. Return false on oom.
450 template <typename U>
451 bool put(U &&u) {
452 AddPtr p = lookupForAdd(u);
453 return p ? true : add(p, mozilla::Forward<U>(u));
454 }
456 // Like put, but assert that the given key is not already present.
457 template <typename U>
458 bool putNew(U &&u) {
459 return impl.putNew(u, mozilla::Forward<U>(u));
460 }
462 template <typename U>
463 bool putNew(const Lookup &l, U &&u) {
464 return impl.putNew(l, mozilla::Forward<U>(u));
465 }
467 void remove(const Lookup &l) {
468 if (Ptr p = lookup(l))
469 remove(p);
470 }
472 // Infallibly rekey one entry, if present.
473 // Requires template parameters T and HashPolicy::Lookup to be the same type.
474 void rekeyIfMoved(const Lookup &old_value, const T &new_value) {
475 if (old_value != new_value)
476 rekeyAs(old_value, new_value, new_value);
477 }
479 // Infallibly rekey one entry, if present.
480 void rekeyAs(const Lookup &old_lookup, const Lookup &new_lookup, const T &new_value) {
481 if (Ptr p = lookup(old_lookup))
482 impl.rekeyAndMaybeRehash(p, new_lookup, new_value);
483 }
485 // HashSet is movable
486 HashSet(HashSet &&rhs) : impl(mozilla::Move(rhs.impl)) {}
487 void operator=(HashSet &&rhs) {
488 MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
489 impl = mozilla::Move(rhs.impl);
490 }
492 private:
493 // HashSet is not copyable or assignable
494 HashSet(const HashSet &hs) MOZ_DELETE;
495 HashSet &operator=(const HashSet &hs) MOZ_DELETE;
497 friend class Impl::Enum;
498 };
500 /*****************************************************************************/
502 // Hash Policy
503 //
504 // A hash policy P for a hash table with key-type Key must provide:
505 // - a type |P::Lookup| to use to lookup table entries;
506 // - a static member function |P::hash| with signature
507 //
508 // static js::HashNumber hash(Lookup)
509 //
510 // to use to hash the lookup type; and
511 // - a static member function |P::match| with signature
512 //
513 // static bool match(Key, Lookup)
514 //
515 // to use to test equality of key and lookup values.
516 //
517 // Normally, Lookup = Key. In general, though, different values and types of
518 // values can be used to lookup and store. If a Lookup value |l| is != to the
519 // added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.:
520 //
521 // js::HashSet<Key, P>::AddPtr p = h.lookup(l);
522 // if (!p) {
523 // assert(P::match(k, l)); // must hold
524 // h.add(p, k);
525 // }
527 // Pointer hashing policy that strips the lowest zeroBits when calculating the
528 // hash to improve key distribution.
529 template <typename Key, size_t zeroBits>
530 struct PointerHasher
531 {
532 typedef Key Lookup;
533 static HashNumber hash(const Lookup &l) {
534 MOZ_ASSERT(!JS::IsPoisonedPtr(l));
535 size_t word = reinterpret_cast<size_t>(l) >> zeroBits;
536 JS_STATIC_ASSERT(sizeof(HashNumber) == 4);
537 #if JS_BITS_PER_WORD == 32
538 return HashNumber(word);
539 #else
540 JS_STATIC_ASSERT(sizeof word == 8);
541 return HashNumber((word >> 32) ^ word);
542 #endif
543 }
544 static bool match(const Key &k, const Lookup &l) {
545 MOZ_ASSERT(!JS::IsPoisonedPtr(k));
546 MOZ_ASSERT(!JS::IsPoisonedPtr(l));
547 return k == l;
548 }
549 static void rekey(Key &k, const Key& newKey) {
550 k = newKey;
551 }
552 };
554 // Default hash policy: just use the 'lookup' value. This of course only
555 // works if the lookup value is integral. HashTable applies ScrambleHashCode to
556 // the result of the 'hash' which means that it is 'ok' if the lookup value is
557 // not well distributed over the HashNumber domain.
558 template <class Key>
559 struct DefaultHasher
560 {
561 typedef Key Lookup;
562 static HashNumber hash(const Lookup &l) {
563 // Hash if can implicitly cast to hash number type.
564 return l;
565 }
566 static bool match(const Key &k, const Lookup &l) {
567 // Use builtin or overloaded operator==.
568 return k == l;
569 }
570 static void rekey(Key &k, const Key& newKey) {
571 k = newKey;
572 }
573 };
575 // Specialize hashing policy for pointer types. It assumes that the type is
576 // at least word-aligned. For types with smaller size use PointerHasher.
577 template <class T>
578 struct DefaultHasher<T *> : PointerHasher<T *, mozilla::tl::FloorLog2<sizeof(void *)>::value>
579 {};
581 // For doubles, we can xor the two uint32s.
582 template <>
583 struct DefaultHasher<double>
584 {
585 typedef double Lookup;
586 static HashNumber hash(double d) {
587 JS_STATIC_ASSERT(sizeof(HashNumber) == 4);
588 uint64_t u = mozilla::BitwiseCast<uint64_t>(d);
589 return HashNumber(u ^ (u >> 32));
590 }
591 static bool match(double lhs, double rhs) {
592 return mozilla::BitwiseCast<uint64_t>(lhs) == mozilla::BitwiseCast<uint64_t>(rhs);
593 }
594 };
596 template <>
597 struct DefaultHasher<float>
598 {
599 typedef float Lookup;
600 static HashNumber hash(float f) {
601 JS_STATIC_ASSERT(sizeof(HashNumber) == 4);
602 return HashNumber(mozilla::BitwiseCast<uint32_t>(f));
603 }
604 static bool match(float lhs, float rhs) {
605 return mozilla::BitwiseCast<uint32_t>(lhs) == mozilla::BitwiseCast<uint32_t>(rhs);
606 }
607 };
609 /*****************************************************************************/
611 // Both HashMap and HashSet are implemented by a single HashTable that is even
612 // more heavily parameterized than the other two. This leaves HashTable gnarly
613 // and extremely coupled to HashMap and HashSet; thus code should not use
614 // HashTable directly.
616 template <class Key, class Value>
617 class HashMapEntry
618 {
619 Key key_;
620 Value value_;
622 template <class, class, class> friend class detail::HashTable;
623 template <class> friend class detail::HashTableEntry;
624 template <class, class, class, class> friend class HashMap;
626 Key & mutableKey() { return key_; }
628 public:
629 template<typename KeyInput, typename ValueInput>
630 HashMapEntry(KeyInput &&k, ValueInput &&v)
631 : key_(mozilla::Forward<KeyInput>(k)),
632 value_(mozilla::Forward<ValueInput>(v))
633 {}
635 HashMapEntry(HashMapEntry &&rhs)
636 : key_(mozilla::Move(rhs.key_)),
637 value_(mozilla::Move(rhs.value_))
638 {}
640 typedef Key KeyType;
641 typedef Value ValueType;
643 const Key & key() const { return key_; }
644 const Value & value() const { return value_; }
645 Value & value() { return value_; }
647 private:
648 HashMapEntry(const HashMapEntry &) MOZ_DELETE;
649 void operator=(const HashMapEntry &) MOZ_DELETE;
650 };
652 } // namespace js
654 namespace mozilla {
656 template <typename T>
657 struct IsPod<js::detail::HashTableEntry<T> > : IsPod<T> {};
659 template <typename K, typename V>
660 struct IsPod<js::HashMapEntry<K, V> >
661 : IntegralConstant<bool, IsPod<K>::value && IsPod<V>::value>
662 {};
664 } // namespace mozilla
666 namespace js {
668 namespace detail {
670 template <class T, class HashPolicy, class AllocPolicy>
671 class HashTable;
673 template <class T>
674 class HashTableEntry
675 {
676 template <class, class, class> friend class HashTable;
677 typedef typename mozilla::RemoveConst<T>::Type NonConstT;
679 HashNumber keyHash;
680 mozilla::AlignedStorage2<NonConstT> mem;
682 static const HashNumber sFreeKey = 0;
683 static const HashNumber sRemovedKey = 1;
684 static const HashNumber sCollisionBit = 1;
686 static bool isLiveHash(HashNumber hash)
687 {
688 return hash > sRemovedKey;
689 }
691 HashTableEntry(const HashTableEntry &) MOZ_DELETE;
692 void operator=(const HashTableEntry &) MOZ_DELETE;
693 ~HashTableEntry() MOZ_DELETE;
695 public:
696 // NB: HashTableEntry is treated as a POD: no constructor or destructor calls.
698 void destroyIfLive() {
699 if (isLive())
700 mem.addr()->~T();
701 }
703 void destroy() {
704 MOZ_ASSERT(isLive());
705 mem.addr()->~T();
706 }
708 void swap(HashTableEntry *other) {
709 mozilla::Swap(keyHash, other->keyHash);
710 mozilla::Swap(mem, other->mem);
711 }
713 T &get() { MOZ_ASSERT(isLive()); return *mem.addr(); }
715 bool isFree() const { return keyHash == sFreeKey; }
716 void clearLive() { MOZ_ASSERT(isLive()); keyHash = sFreeKey; mem.addr()->~T(); }
717 void clear() { if (isLive()) mem.addr()->~T(); keyHash = sFreeKey; }
718 bool isRemoved() const { return keyHash == sRemovedKey; }
719 void removeLive() { MOZ_ASSERT(isLive()); keyHash = sRemovedKey; mem.addr()->~T(); }
720 bool isLive() const { return isLiveHash(keyHash); }
721 void setCollision() { MOZ_ASSERT(isLive()); keyHash |= sCollisionBit; }
722 void setCollision(HashNumber bit) { MOZ_ASSERT(isLive()); keyHash |= bit; }
723 void unsetCollision() { keyHash &= ~sCollisionBit; }
724 bool hasCollision() const { return keyHash & sCollisionBit; }
725 bool matchHash(HashNumber hn) { return (keyHash & ~sCollisionBit) == hn; }
726 HashNumber getKeyHash() const { return keyHash & ~sCollisionBit; }
728 template <class U>
729 void setLive(HashNumber hn, U &&u)
730 {
731 MOZ_ASSERT(!isLive());
732 keyHash = hn;
733 new(mem.addr()) T(mozilla::Forward<U>(u));
734 MOZ_ASSERT(isLive());
735 }
736 };
738 template <class T, class HashPolicy, class AllocPolicy>
739 class HashTable : private AllocPolicy
740 {
741 typedef typename mozilla::RemoveConst<T>::Type NonConstT;
742 typedef typename HashPolicy::KeyType Key;
743 typedef typename HashPolicy::Lookup Lookup;
745 public:
746 typedef HashTableEntry<T> Entry;
748 // A nullable pointer to a hash table element. A Ptr |p| can be tested
749 // either explicitly |if (p.found()) p->...| or using boolean conversion
750 // |if (p) p->...|. Ptr objects must not be used after any mutating hash
751 // table operations unless |generation()| is tested.
752 class Ptr
753 {
754 friend class HashTable;
755 typedef void (Ptr::* ConvertibleToBool)();
756 void nonNull() {}
758 Entry *entry_;
759 #ifdef DEBUG
760 const HashTable *table_;
761 uint32_t generation;
762 #endif
764 protected:
765 Ptr(Entry &entry, const HashTable &tableArg)
766 : entry_(&entry)
767 #ifdef DEBUG
768 , table_(&tableArg)
769 , generation(tableArg.generation())
770 #endif
771 {}
773 public:
774 // Leaves Ptr uninitialized.
775 Ptr() {
776 #ifdef JS_DEBUG
777 entry_ = (Entry *)0xbad;
778 #endif
779 }
781 bool found() const {
782 MOZ_ASSERT(generation == table_->generation());
783 return entry_->isLive();
784 }
786 operator ConvertibleToBool() const {
787 return found() ? &Ptr::nonNull : 0;
788 }
790 bool operator==(const Ptr &rhs) const {
791 MOZ_ASSERT(found() && rhs.found());
792 return entry_ == rhs.entry_;
793 }
795 bool operator!=(const Ptr &rhs) const {
796 MOZ_ASSERT(generation == table_->generation());
797 return !(*this == rhs);
798 }
800 T &operator*() const {
801 MOZ_ASSERT(generation == table_->generation());
802 return entry_->get();
803 }
805 T *operator->() const {
806 MOZ_ASSERT(generation == table_->generation());
807 return &entry_->get();
808 }
809 };
811 // A Ptr that can be used to add a key after a failed lookup.
812 class AddPtr : public Ptr
813 {
814 friend class HashTable;
815 HashNumber keyHash;
816 mozilla::DebugOnly<uint64_t> mutationCount;
818 AddPtr(Entry &entry, const HashTable &tableArg, HashNumber hn)
819 : Ptr(entry, tableArg), keyHash(hn), mutationCount(tableArg.mutationCount)
820 {}
822 public:
823 // Leaves AddPtr uninitialized.
824 AddPtr() {}
825 };
827 // A collection of hash table entries. The collection is enumerated by
828 // calling |front()| followed by |popFront()| as long as |!empty()|. As
829 // with Ptr/AddPtr, Range objects must not be used after any mutating hash
830 // table operation unless the |generation()| is tested.
831 class Range
832 {
833 protected:
834 friend class HashTable;
836 Range(const HashTable &tableArg, Entry *c, Entry *e)
837 : cur(c)
838 , end(e)
839 #ifdef DEBUG
840 , table_(&tableArg)
841 , mutationCount(tableArg.mutationCount)
842 , generation(tableArg.generation())
843 , validEntry(true)
844 #endif
845 {
846 while (cur < end && !cur->isLive())
847 ++cur;
848 }
850 Entry *cur, *end;
851 #ifdef DEBUG
852 const HashTable *table_;
853 uint64_t mutationCount;
854 uint32_t generation;
855 bool validEntry;
856 #endif
858 public:
859 Range()
860 : cur(nullptr)
861 , end(nullptr)
862 #ifdef DEBUG
863 , table_(nullptr)
864 , mutationCount(0)
865 , generation(0)
866 , validEntry(false)
867 #endif
868 {}
870 bool empty() const {
871 MOZ_ASSERT(generation == table_->generation());
872 MOZ_ASSERT(mutationCount == table_->mutationCount);
873 return cur == end;
874 }
876 T &front() const {
877 MOZ_ASSERT(validEntry);
878 MOZ_ASSERT(!empty());
879 MOZ_ASSERT(generation == table_->generation());
880 MOZ_ASSERT(mutationCount == table_->mutationCount);
881 return cur->get();
882 }
884 void popFront() {
885 MOZ_ASSERT(!empty());
886 MOZ_ASSERT(generation == table_->generation());
887 MOZ_ASSERT(mutationCount == table_->mutationCount);
888 while (++cur < end && !cur->isLive())
889 continue;
890 #ifdef DEBUG
891 validEntry = true;
892 #endif
893 }
894 };
896 // A Range whose lifetime delimits a mutating enumeration of a hash table.
897 // Since rehashing when elements were removed during enumeration would be
898 // bad, it is postponed until the Enum is destructed. Since the Enum's
899 // destructor touches the hash table, the user must ensure that the hash
900 // table is still alive when the destructor runs.
901 class Enum : public Range
902 {
903 friend class HashTable;
905 HashTable &table_;
906 bool rekeyed;
907 bool removed;
909 /* Not copyable. */
910 Enum(const Enum &) MOZ_DELETE;
911 void operator=(const Enum &) MOZ_DELETE;
913 public:
914 template<class Map> explicit
915 Enum(Map &map) : Range(map.all()), table_(map.impl), rekeyed(false), removed(false) {}
917 // Removes the |front()| element from the table, leaving |front()|
918 // invalid until the next call to |popFront()|. For example:
919 //
920 // HashSet<int> s;
921 // for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
922 // if (e.front() == 42)
923 // e.removeFront();
924 void removeFront() {
925 table_.remove(*this->cur);
926 removed = true;
927 #ifdef DEBUG
928 this->validEntry = false;
929 this->mutationCount = table_.mutationCount;
930 #endif
931 }
933 // Removes the |front()| element and re-inserts it into the table with
934 // a new key at the new Lookup position. |front()| is invalid after
935 // this operation until the next call to |popFront()|.
936 void rekeyFront(const Lookup &l, const Key &k) {
937 Ptr p(*this->cur, table_);
938 table_.rekeyWithoutRehash(p, l, k);
939 rekeyed = true;
940 #ifdef DEBUG
941 this->validEntry = false;
942 this->mutationCount = table_.mutationCount;
943 #endif
944 }
946 void rekeyFront(const Key &k) {
947 rekeyFront(k, k);
948 }
950 // Potentially rehashes the table.
951 ~Enum() {
952 if (rekeyed) {
953 table_.gen++;
954 table_.checkOverRemoved();
955 }
957 if (removed)
958 table_.compactIfUnderloaded();
959 }
960 };
962 // HashTable is movable
963 HashTable(HashTable &&rhs)
964 : AllocPolicy(rhs)
965 {
966 mozilla::PodAssign(this, &rhs);
967 rhs.table = nullptr;
968 }
969 void operator=(HashTable &&rhs) {
970 MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
971 if (table)
972 destroyTable(*this, table, capacity());
973 mozilla::PodAssign(this, &rhs);
974 rhs.table = nullptr;
975 }
977 private:
978 // HashTable is not copyable or assignable
979 HashTable(const HashTable &) MOZ_DELETE;
980 void operator=(const HashTable &) MOZ_DELETE;
982 private:
983 uint32_t hashShift; // multiplicative hash shift
984 uint32_t entryCount; // number of entries in table
985 uint32_t gen; // entry storage generation number
986 uint32_t removedCount; // removed entry sentinels in table
987 Entry *table; // entry storage
989 void setTableSizeLog2(unsigned sizeLog2)
990 {
991 hashShift = sHashBits - sizeLog2;
992 }
994 #ifdef JS_DEBUG
995 mutable struct Stats
996 {
997 uint32_t searches; // total number of table searches
998 uint32_t steps; // hash chain links traversed
999 uint32_t hits; // searches that found key
1000 uint32_t misses; // searches that didn't find key
1001 uint32_t addOverRemoved; // adds that recycled a removed entry
1002 uint32_t removes; // calls to remove
1003 uint32_t removeFrees; // calls to remove that freed the entry
1004 uint32_t grows; // table expansions
1005 uint32_t shrinks; // table contractions
1006 uint32_t compresses; // table compressions
1007 uint32_t rehashes; // tombstone decontaminations
1008 } stats;
1009 # define METER(x) x
1010 #else
1011 # define METER(x)
1012 #endif
1014 friend class mozilla::ReentrancyGuard;
1015 mutable mozilla::DebugOnly<bool> entered;
1016 mozilla::DebugOnly<uint64_t> mutationCount;
1018 // The default initial capacity is 32 (enough to hold 16 elements), but it
1019 // can be as low as 4.
1020 static const unsigned sMinCapacityLog2 = 2;
1021 static const unsigned sMinCapacity = 1 << sMinCapacityLog2;
1022 static const unsigned sMaxInit = JS_BIT(23);
1023 static const unsigned sMaxCapacity = JS_BIT(24);
1024 static const unsigned sHashBits = mozilla::tl::BitSize<HashNumber>::value;
1026 // Hash-table alpha is conceptually a fraction, but to avoid floating-point
1027 // math we implement it as a ratio of integers.
1028 static const uint8_t sAlphaDenominator = 4;
1029 static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
1030 static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
1032 static const HashNumber sFreeKey = Entry::sFreeKey;
1033 static const HashNumber sRemovedKey = Entry::sRemovedKey;
1034 static const HashNumber sCollisionBit = Entry::sCollisionBit;
1036 static bool isLiveHash(HashNumber hash)
1037 {
1038 return Entry::isLiveHash(hash);
1039 }
1041 static HashNumber prepareHash(const Lookup& l)
1042 {
1043 HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l));
1045 // Avoid reserved hash codes.
1046 if (!isLiveHash(keyHash))
1047 keyHash -= (sRemovedKey + 1);
1048 return keyHash & ~sCollisionBit;
1049 }
1051 static Entry *createTable(AllocPolicy &alloc, uint32_t capacity)
1052 {
1053 static_assert(sFreeKey == 0,
1054 "newly-calloc'd tables have to be considered empty");
1055 static_assert(sMaxCapacity <= SIZE_MAX / sizeof(Entry),
1056 "would overflow allocating max number of entries");
1057 return static_cast<Entry*>(alloc.calloc_(capacity * sizeof(Entry)));
1058 }
1060 static void destroyTable(AllocPolicy &alloc, Entry *oldTable, uint32_t capacity)
1061 {
1062 for (Entry *e = oldTable, *end = e + capacity; e < end; ++e)
1063 e->destroyIfLive();
1064 alloc.free_(oldTable);
1065 }
1067 public:
1068 HashTable(AllocPolicy ap)
1069 : AllocPolicy(ap),
1070 hashShift(sHashBits),
1071 entryCount(0),
1072 gen(0),
1073 removedCount(0),
1074 table(nullptr),
1075 entered(false),
1076 mutationCount(0)
1077 {}
1079 MOZ_WARN_UNUSED_RESULT bool init(uint32_t length)
1080 {
1081 MOZ_ASSERT(!initialized());
1083 // Reject all lengths whose initial computed capacity would exceed
1084 // sMaxCapacity. Round that maximum length down to the nearest power
1085 // of two for speedier code.
1086 if (length > sMaxInit) {
1087 this->reportAllocOverflow();
1088 return false;
1089 }
1091 static_assert((sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit,
1092 "multiplication in numerator below could overflow");
1093 static_assert(sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator,
1094 "numerator calculation below could potentially overflow");
1096 // Compute the smallest capacity allowing |length| elements to be
1097 // inserted without rehashing: ceil(length / max-alpha). (Ceiling
1098 // integral division: <http://stackoverflow.com/a/2745086>.)
1099 uint32_t newCapacity =
1100 (length * sAlphaDenominator + sMaxAlphaNumerator - 1) / sMaxAlphaNumerator;
1101 if (newCapacity < sMinCapacity)
1102 newCapacity = sMinCapacity;
1104 // FIXME: use JS_CEILING_LOG2 when PGO stops crashing (bug 543034).
1105 uint32_t roundUp = sMinCapacity, roundUpLog2 = sMinCapacityLog2;
1106 while (roundUp < newCapacity) {
1107 roundUp <<= 1;
1108 ++roundUpLog2;
1109 }
1111 newCapacity = roundUp;
1112 MOZ_ASSERT(newCapacity >= length);
1113 MOZ_ASSERT(newCapacity <= sMaxCapacity);
1115 table = createTable(*this, newCapacity);
1116 if (!table)
1117 return false;
1119 setTableSizeLog2(roundUpLog2);
1120 METER(memset(&stats, 0, sizeof(stats)));
1121 return true;
1122 }
1124 bool initialized() const
1125 {
1126 return !!table;
1127 }
1129 ~HashTable()
1130 {
1131 if (table)
1132 destroyTable(*this, table, capacity());
1133 }
1135 private:
1136 HashNumber hash1(HashNumber hash0) const
1137 {
1138 return hash0 >> hashShift;
1139 }
1141 struct DoubleHash
1142 {
1143 HashNumber h2;
1144 HashNumber sizeMask;
1145 };
1147 DoubleHash hash2(HashNumber curKeyHash) const
1148 {
1149 unsigned sizeLog2 = sHashBits - hashShift;
1150 DoubleHash dh = {
1151 ((curKeyHash << sizeLog2) >> hashShift) | 1,
1152 (HashNumber(1) << sizeLog2) - 1
1153 };
1154 return dh;
1155 }
1157 static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash &dh)
1158 {
1159 return (h1 - dh.h2) & dh.sizeMask;
1160 }
1162 bool overloaded()
1163 {
1164 static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
1165 "multiplication below could overflow");
1166 return entryCount + removedCount >=
1167 capacity() * sMaxAlphaNumerator / sAlphaDenominator;
1168 }
1170 // Would the table be underloaded if it had the given capacity and entryCount?
1171 static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount)
1172 {
1173 static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
1174 "multiplication below could overflow");
1175 return capacity > sMinCapacity &&
1176 entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator;
1177 }
1179 bool underloaded()
1180 {
1181 return wouldBeUnderloaded(capacity(), entryCount);
1182 }
1184 static bool match(Entry &e, const Lookup &l)
1185 {
1186 return HashPolicy::match(HashPolicy::getKey(e.get()), l);
1187 }
1189 Entry &lookup(const Lookup &l, HashNumber keyHash, unsigned collisionBit) const
1190 {
1191 MOZ_ASSERT(isLiveHash(keyHash));
1192 MOZ_ASSERT(!(keyHash & sCollisionBit));
1193 MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit);
1194 MOZ_ASSERT(table);
1195 METER(stats.searches++);
1197 // Compute the primary hash address.
1198 HashNumber h1 = hash1(keyHash);
1199 Entry *entry = &table[h1];
1201 // Miss: return space for a new entry.
1202 if (entry->isFree()) {
1203 METER(stats.misses++);
1204 return *entry;
1205 }
1207 // Hit: return entry.
1208 if (entry->matchHash(keyHash) && match(*entry, l)) {
1209 METER(stats.hits++);
1210 return *entry;
1211 }
1213 // Collision: double hash.
1214 DoubleHash dh = hash2(keyHash);
1216 // Save the first removed entry pointer so we can recycle later.
1217 Entry *firstRemoved = nullptr;
1219 while(true) {
1220 if (MOZ_UNLIKELY(entry->isRemoved())) {
1221 if (!firstRemoved)
1222 firstRemoved = entry;
1223 } else {
1224 entry->setCollision(collisionBit);
1225 }
1227 METER(stats.steps++);
1228 h1 = applyDoubleHash(h1, dh);
1230 entry = &table[h1];
1231 if (entry->isFree()) {
1232 METER(stats.misses++);
1233 return firstRemoved ? *firstRemoved : *entry;
1234 }
1236 if (entry->matchHash(keyHash) && match(*entry, l)) {
1237 METER(stats.hits++);
1238 return *entry;
1239 }
1240 }
1241 }
1243 // This is a copy of lookup hardcoded to the assumptions:
1244 // 1. the lookup is a lookupForAdd
1245 // 2. the key, whose |keyHash| has been passed is not in the table,
1246 // 3. no entries have been removed from the table.
1247 // This specialized search avoids the need for recovering lookup values
1248 // from entries, which allows more flexible Lookup/Key types.
1249 Entry &findFreeEntry(HashNumber keyHash)
1250 {
1251 MOZ_ASSERT(!(keyHash & sCollisionBit));
1252 MOZ_ASSERT(table);
1253 METER(stats.searches++);
1255 // We assume 'keyHash' has already been distributed.
1257 // Compute the primary hash address.
1258 HashNumber h1 = hash1(keyHash);
1259 Entry *entry = &table[h1];
1261 // Miss: return space for a new entry.
1262 if (!entry->isLive()) {
1263 METER(stats.misses++);
1264 return *entry;
1265 }
1267 // Collision: double hash.
1268 DoubleHash dh = hash2(keyHash);
1270 while(true) {
1271 MOZ_ASSERT(!entry->isRemoved());
1272 entry->setCollision();
1274 METER(stats.steps++);
1275 h1 = applyDoubleHash(h1, dh);
1277 entry = &table[h1];
1278 if (!entry->isLive()) {
1279 METER(stats.misses++);
1280 return *entry;
1281 }
1282 }
1283 }
1285 enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
1287 RebuildStatus changeTableSize(int deltaLog2)
1288 {
1289 // Look, but don't touch, until we succeed in getting new entry store.
1290 Entry *oldTable = table;
1291 uint32_t oldCap = capacity();
1292 uint32_t newLog2 = sHashBits - hashShift + deltaLog2;
1293 uint32_t newCapacity = JS_BIT(newLog2);
1294 if (newCapacity > sMaxCapacity) {
1295 this->reportAllocOverflow();
1296 return RehashFailed;
1297 }
1299 Entry *newTable = createTable(*this, newCapacity);
1300 if (!newTable)
1301 return RehashFailed;
1303 // We can't fail from here on, so update table parameters.
1304 setTableSizeLog2(newLog2);
1305 removedCount = 0;
1306 gen++;
1307 table = newTable;
1309 // Copy only live entries, leaving removed ones behind.
1310 for (Entry *src = oldTable, *end = src + oldCap; src < end; ++src) {
1311 if (src->isLive()) {
1312 HashNumber hn = src->getKeyHash();
1313 findFreeEntry(hn).setLive(hn, mozilla::Move(src->get()));
1314 src->destroy();
1315 }
1316 }
1318 // All entries have been destroyed, no need to destroyTable.
1319 this->free_(oldTable);
1320 return Rehashed;
1321 }
1323 RebuildStatus checkOverloaded()
1324 {
1325 if (!overloaded())
1326 return NotOverloaded;
1328 // Compress if a quarter or more of all entries are removed.
1329 int deltaLog2;
1330 if (removedCount >= (capacity() >> 2)) {
1331 METER(stats.compresses++);
1332 deltaLog2 = 0;
1333 } else {
1334 METER(stats.grows++);
1335 deltaLog2 = 1;
1336 }
1338 return changeTableSize(deltaLog2);
1339 }
1341 // Infallibly rehash the table if we are overloaded with removals.
1342 void checkOverRemoved()
1343 {
1344 if (overloaded()) {
1345 if (checkOverloaded() == RehashFailed)
1346 rehashTableInPlace();
1347 }
1348 }
1350 void remove(Entry &e)
1351 {
1352 MOZ_ASSERT(table);
1353 METER(stats.removes++);
1355 if (e.hasCollision()) {
1356 e.removeLive();
1357 removedCount++;
1358 } else {
1359 METER(stats.removeFrees++);
1360 e.clearLive();
1361 }
1362 entryCount--;
1363 mutationCount++;
1364 }
1366 void checkUnderloaded()
1367 {
1368 if (underloaded()) {
1369 METER(stats.shrinks++);
1370 (void) changeTableSize(-1);
1371 }
1372 }
1374 // Resize the table down to the largest capacity which doesn't underload the
1375 // table. Since we call checkUnderloaded() on every remove, you only need
1376 // to call this after a bulk removal of items done without calling remove().
1377 void compactIfUnderloaded()
1378 {
1379 int32_t resizeLog2 = 0;
1380 uint32_t newCapacity = capacity();
1381 while (wouldBeUnderloaded(newCapacity, entryCount)) {
1382 newCapacity = newCapacity >> 1;
1383 resizeLog2--;
1384 }
1386 if (resizeLog2 != 0) {
1387 changeTableSize(resizeLog2);
1388 }
1389 }
1391 // This is identical to changeTableSize(currentSize), but without requiring
1392 // a second table. We do this by recycling the collision bits to tell us if
1393 // the element is already inserted or still waiting to be inserted. Since
1394 // already-inserted elements win any conflicts, we get the same table as we
1395 // would have gotten through random insertion order.
1396 void rehashTableInPlace()
1397 {
1398 METER(stats.rehashes++);
1399 removedCount = 0;
1400 for (size_t i = 0; i < capacity(); ++i)
1401 table[i].unsetCollision();
1403 for (size_t i = 0; i < capacity();) {
1404 Entry *src = &table[i];
1406 if (!src->isLive() || src->hasCollision()) {
1407 ++i;
1408 continue;
1409 }
1411 HashNumber keyHash = src->getKeyHash();
1412 HashNumber h1 = hash1(keyHash);
1413 DoubleHash dh = hash2(keyHash);
1414 Entry *tgt = &table[h1];
1415 while (true) {
1416 if (!tgt->hasCollision()) {
1417 src->swap(tgt);
1418 tgt->setCollision();
1419 break;
1420 }
1422 h1 = applyDoubleHash(h1, dh);
1423 tgt = &table[h1];
1424 }
1425 }
1427 // TODO: this algorithm leaves collision bits on *all* elements, even if
1428 // they are on no collision path. We have the option of setting the
1429 // collision bits correctly on a subsequent pass or skipping the rehash
1430 // unless we are totally filled with tombstones: benchmark to find out
1431 // which approach is best.
1432 }
1434 public:
1435 void clear()
1436 {
1437 if (mozilla::IsPod<Entry>::value) {
1438 memset(table, 0, sizeof(*table) * capacity());
1439 } else {
1440 uint32_t tableCapacity = capacity();
1441 for (Entry *e = table, *end = table + tableCapacity; e < end; ++e)
1442 e->clear();
1443 }
1444 removedCount = 0;
1445 entryCount = 0;
1446 mutationCount++;
1447 }
1449 void finish()
1450 {
1451 MOZ_ASSERT(!entered);
1453 if (!table)
1454 return;
1456 destroyTable(*this, table, capacity());
1457 table = nullptr;
1458 gen++;
1459 entryCount = 0;
1460 removedCount = 0;
1461 mutationCount++;
1462 }
1464 Range all() const
1465 {
1466 MOZ_ASSERT(table);
1467 return Range(*this, table, table + capacity());
1468 }
1470 bool empty() const
1471 {
1472 MOZ_ASSERT(table);
1473 return !entryCount;
1474 }
1476 uint32_t count() const
1477 {
1478 MOZ_ASSERT(table);
1479 return entryCount;
1480 }
1482 uint32_t capacity() const
1483 {
1484 MOZ_ASSERT(table);
1485 return JS_BIT(sHashBits - hashShift);
1486 }
1488 uint32_t generation() const
1489 {
1490 MOZ_ASSERT(table);
1491 return gen;
1492 }
1494 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
1495 {
1496 return mallocSizeOf(table);
1497 }
1499 size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
1500 {
1501 return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
1502 }
1504 Ptr lookup(const Lookup &l) const
1505 {
1506 mozilla::ReentrancyGuard g(*this);
1507 HashNumber keyHash = prepareHash(l);
1508 return Ptr(lookup(l, keyHash, 0), *this);
1509 }
1511 Ptr readonlyThreadsafeLookup(const Lookup &l) const
1512 {
1513 HashNumber keyHash = prepareHash(l);
1514 return Ptr(lookup(l, keyHash, 0), *this);
1515 }
1517 AddPtr lookupForAdd(const Lookup &l) const
1518 {
1519 mozilla::ReentrancyGuard g(*this);
1520 HashNumber keyHash = prepareHash(l);
1521 Entry &entry = lookup(l, keyHash, sCollisionBit);
1522 AddPtr p(entry, *this, keyHash);
1523 return p;
1524 }
1526 template <class U>
1527 bool add(AddPtr &p, U &&u)
1528 {
1529 mozilla::ReentrancyGuard g(*this);
1530 MOZ_ASSERT(table);
1531 MOZ_ASSERT(!p.found());
1532 MOZ_ASSERT(!(p.keyHash & sCollisionBit));
1534 // Changing an entry from removed to live does not affect whether we
1535 // are overloaded and can be handled separately.
1536 if (p.entry_->isRemoved()) {
1537 METER(stats.addOverRemoved++);
1538 removedCount--;
1539 p.keyHash |= sCollisionBit;
1540 } else {
1541 // Preserve the validity of |p.entry_|.
1542 RebuildStatus status = checkOverloaded();
1543 if (status == RehashFailed)
1544 return false;
1545 if (status == Rehashed)
1546 p.entry_ = &findFreeEntry(p.keyHash);
1547 }
1549 p.entry_->setLive(p.keyHash, mozilla::Forward<U>(u));
1550 entryCount++;
1551 mutationCount++;
1552 #ifdef DEBUG
1553 p.generation = generation();
1554 p.mutationCount = mutationCount;
1555 #endif
1556 return true;
1557 }
1559 // Note: |l| may be a reference to a piece of |u|, so this function
1560 // must take care not to use |l| after moving |u|.
1561 template <class U>
1562 void putNewInfallible(const Lookup &l, U &&u)
1563 {
1564 MOZ_ASSERT(table);
1566 HashNumber keyHash = prepareHash(l);
1567 Entry *entry = &findFreeEntry(keyHash);
1569 if (entry->isRemoved()) {
1570 METER(stats.addOverRemoved++);
1571 removedCount--;
1572 keyHash |= sCollisionBit;
1573 }
1575 entry->setLive(keyHash, mozilla::Forward<U>(u));
1576 entryCount++;
1577 mutationCount++;
1578 }
1580 // Note: |l| may be a reference to a piece of |u|, so this function
1581 // must take care not to use |l| after moving |u|.
1582 template <class U>
1583 bool putNew(const Lookup &l, U &&u)
1584 {
1585 if (checkOverloaded() == RehashFailed)
1586 return false;
1588 putNewInfallible(l, mozilla::Forward<U>(u));
1589 return true;
1590 }
1592 // Note: |l| may be a reference to a piece of |u|, so this function
1593 // must take care not to use |l| after moving |u|.
1594 template <class U>
1595 bool relookupOrAdd(AddPtr& p, const Lookup &l, U &&u)
1596 {
1597 #ifdef DEBUG
1598 p.generation = generation();
1599 p.mutationCount = mutationCount;
1600 #endif
1601 {
1602 mozilla::ReentrancyGuard g(*this);
1603 MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed
1604 p.entry_ = &lookup(l, p.keyHash, sCollisionBit);
1605 }
1606 return p.found() || add(p, mozilla::Forward<U>(u));
1607 }
1609 void remove(Ptr p)
1610 {
1611 MOZ_ASSERT(table);
1612 mozilla::ReentrancyGuard g(*this);
1613 MOZ_ASSERT(p.found());
1614 remove(*p.entry_);
1615 checkUnderloaded();
1616 }
1618 void rekeyWithoutRehash(Ptr p, const Lookup &l, const Key &k)
1619 {
1620 MOZ_ASSERT(table);
1621 mozilla::ReentrancyGuard g(*this);
1622 MOZ_ASSERT(p.found());
1623 typename HashTableEntry<T>::NonConstT t(mozilla::Move(*p));
1624 HashPolicy::setKey(t, const_cast<Key &>(k));
1625 remove(*p.entry_);
1626 putNewInfallible(l, mozilla::Move(t));
1627 }
1629 void rekeyAndMaybeRehash(Ptr p, const Lookup &l, const Key &k)
1630 {
1631 rekeyWithoutRehash(p, l, k);
1632 checkOverRemoved();
1633 }
1635 #undef METER
1636 };
1638 } // namespace detail
1639 } // namespace js
1641 #endif /* js_HashTable_h */