js/public/HashTable.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/public/HashTable.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,1641 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#ifndef js_HashTable_h
    1.11 +#define js_HashTable_h
    1.12 +
    1.13 +#include "mozilla/Alignment.h"
    1.14 +#include "mozilla/Assertions.h"
    1.15 +#include "mozilla/Attributes.h"
    1.16 +#include "mozilla/Casting.h"
    1.17 +#include "mozilla/DebugOnly.h"
    1.18 +#include "mozilla/MemoryReporting.h"
    1.19 +#include "mozilla/Move.h"
    1.20 +#include "mozilla/NullPtr.h"
    1.21 +#include "mozilla/PodOperations.h"
    1.22 +#include "mozilla/ReentrancyGuard.h"
    1.23 +#include "mozilla/TemplateLib.h"
    1.24 +#include "mozilla/TypeTraits.h"
    1.25 +
    1.26 +#include "js/Utility.h"
    1.27 +
    1.28 +namespace js {
    1.29 +
    1.30 +class TempAllocPolicy;
    1.31 +template <class> struct DefaultHasher;
    1.32 +template <class, class> class HashMapEntry;
    1.33 +namespace detail {
    1.34 +    template <class T> class HashTableEntry;
    1.35 +    template <class T, class HashPolicy, class AllocPolicy> class HashTable;
    1.36 +}
    1.37 +
    1.38 +/*****************************************************************************/
    1.39 +
    1.40 +// A JS-friendly, STL-like container providing a hash-based map from keys to
    1.41 +// values. In particular, HashMap calls constructors and destructors of all
    1.42 +// objects added so non-PODs may be used safely.
    1.43 +//
    1.44 +// Key/Value requirements:
    1.45 +//  - movable, destructible, assignable
    1.46 +// HashPolicy requirements:
    1.47 +//  - see Hash Policy section below
    1.48 +// AllocPolicy:
    1.49 +//  - see jsalloc.h
    1.50 +//
    1.51 +// Note:
    1.52 +// - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
    1.53 +//   called by HashMap must not call back into the same HashMap object.
    1.54 +// - Due to the lack of exception handling, the user must call |init()|.
    1.55 +template <class Key,
    1.56 +          class Value,
    1.57 +          class HashPolicy = DefaultHasher<Key>,
    1.58 +          class AllocPolicy = TempAllocPolicy>
    1.59 +class HashMap
    1.60 +{
    1.61 +    typedef HashMapEntry<Key, Value> TableEntry;
    1.62 +
    1.63 +    struct MapHashPolicy : HashPolicy
    1.64 +    {
    1.65 +        typedef Key KeyType;
    1.66 +        static const Key &getKey(TableEntry &e) { return e.key(); }
    1.67 +        static void setKey(TableEntry &e, Key &k) { HashPolicy::rekey(e.mutableKey(), k); }
    1.68 +    };
    1.69 +
    1.70 +    typedef detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy> Impl;
    1.71 +    Impl impl;
    1.72 +
    1.73 +  public:
    1.74 +    typedef typename HashPolicy::Lookup Lookup;
    1.75 +    typedef TableEntry Entry;
    1.76 +
    1.77 +    // HashMap construction is fallible (due to OOM); thus the user must call
    1.78 +    // init after constructing a HashMap and check the return value.
    1.79 +    HashMap(AllocPolicy a = AllocPolicy()) : impl(a)  {}
    1.80 +    bool init(uint32_t len = 16)                      { return impl.init(len); }
    1.81 +    bool initialized() const                          { return impl.initialized(); }
    1.82 +
    1.83 +    // Return whether the given lookup value is present in the map. E.g.:
    1.84 +    //
    1.85 +    //   typedef HashMap<int,char> HM;
    1.86 +    //   HM h;
    1.87 +    //   if (HM::Ptr p = h.lookup(3)) {
    1.88 +    //     const HM::Entry &e = *p; // p acts like a pointer to Entry
    1.89 +    //     assert(p->key == 3);     // Entry contains the key
    1.90 +    //     char val = p->value;     // and value
    1.91 +    //   }
    1.92 +    //
    1.93 +    // Also see the definition of Ptr in HashTable above (with T = Entry).
    1.94 +    typedef typename Impl::Ptr Ptr;
    1.95 +    Ptr lookup(const Lookup &l) const                 { return impl.lookup(l); }
    1.96 +
    1.97 +    // Like lookup, but does not assert if two threads call lookup at the same
    1.98 +    // time. Only use this method when none of the threads will modify the map.
    1.99 +    Ptr readonlyThreadsafeLookup(const Lookup &l) const { return impl.readonlyThreadsafeLookup(l); }
   1.100 +
   1.101 +    // Assuming |p.found()|, remove |*p|.
   1.102 +    void remove(Ptr p)                                { impl.remove(p); }
   1.103 +
   1.104 +    // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
   1.105 +    // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
   1.106 +    // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.:
   1.107 +    //
   1.108 +    //   typedef HashMap<int,char> HM;
   1.109 +    //   HM h;
   1.110 +    //   HM::AddPtr p = h.lookupForAdd(3);
   1.111 +    //   if (!p) {
   1.112 +    //     if (!h.add(p, 3, 'a'))
   1.113 +    //       return false;
   1.114 +    //   }
   1.115 +    //   const HM::Entry &e = *p;   // p acts like a pointer to Entry
   1.116 +    //   assert(p->key == 3);       // Entry contains the key
   1.117 +    //   char val = p->value;       // and value
   1.118 +    //
   1.119 +    // Also see the definition of AddPtr in HashTable above (with T = Entry).
   1.120 +    //
   1.121 +    // N.B. The caller must ensure that no mutating hash table operations
   1.122 +    // occur between a pair of |lookupForAdd| and |add| calls. To avoid
   1.123 +    // looking up the key a second time, the caller may use the more efficient
   1.124 +    // relookupOrAdd method. This method reuses part of the hashing computation
   1.125 +    // to more efficiently insert the key if it has not been added. For
   1.126 +    // example, a mutation-handling version of the previous example:
   1.127 +    //
   1.128 +    //    HM::AddPtr p = h.lookupForAdd(3);
   1.129 +    //    if (!p) {
   1.130 +    //      call_that_may_mutate_h();
   1.131 +    //      if (!h.relookupOrAdd(p, 3, 'a'))
   1.132 +    //        return false;
   1.133 +    //    }
   1.134 +    //    const HM::Entry &e = *p;
   1.135 +    //    assert(p->key == 3);
   1.136 +    //    char val = p->value;
   1.137 +    typedef typename Impl::AddPtr AddPtr;
   1.138 +    AddPtr lookupForAdd(const Lookup &l) const {
   1.139 +        return impl.lookupForAdd(l);
   1.140 +    }
   1.141 +
   1.142 +    template<typename KeyInput, typename ValueInput>
   1.143 +    bool add(AddPtr &p, KeyInput &&k, ValueInput &&v) {
   1.144 +        Entry e(mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
   1.145 +        return impl.add(p, mozilla::Move(e));
   1.146 +    }
   1.147 +
   1.148 +    template<typename KeyInput>
   1.149 +    bool add(AddPtr &p, KeyInput &&k) {
   1.150 +        Entry e(mozilla::Forward<KeyInput>(k), Value());
   1.151 +        return impl.add(p, mozilla::Move(e));
   1.152 +    }
   1.153 +
   1.154 +    template<typename KeyInput, typename ValueInput>
   1.155 +    bool relookupOrAdd(AddPtr &p, KeyInput &&k, ValueInput &&v) {
   1.156 +        Entry e(mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
   1.157 +        return impl.relookupOrAdd(p, e.key(), mozilla::Move(e));
   1.158 +    }
   1.159 +
   1.160 +    // |all()| returns a Range containing |count()| elements. E.g.:
   1.161 +    //
   1.162 +    //   typedef HashMap<int,char> HM;
   1.163 +    //   HM h;
   1.164 +    //   for (HM::Range r = h.all(); !r.empty(); r.popFront())
   1.165 +    //     char c = r.front().value();
   1.166 +    //
   1.167 +    // Also see the definition of Range in HashTable above (with T = Entry).
   1.168 +    typedef typename Impl::Range Range;
   1.169 +    Range all() const                                 { return impl.all(); }
   1.170 +
   1.171 +    // Typedef for the enumeration class. An Enum may be used to examine and
   1.172 +    // remove table entries:
   1.173 +    //
   1.174 +    //   typedef HashMap<int,char> HM;
   1.175 +    //   HM s;
   1.176 +    //   for (HM::Enum e(s); !e.empty(); e.popFront())
   1.177 +    //     if (e.front().value() == 'l')
   1.178 +    //       e.removeFront();
   1.179 +    //
   1.180 +    // Table resize may occur in Enum's destructor. Also see the definition of
   1.181 +    // Enum in HashTable above (with T = Entry).
   1.182 +    typedef typename Impl::Enum Enum;
   1.183 +
   1.184 +    // Remove all entries. This does not shrink the table. For that consider
   1.185 +    // using the finish() method.
   1.186 +    void clear()                                      { impl.clear(); }
   1.187 +
   1.188 +    // Remove all entries without triggering destructors. This method is unsafe.
   1.189 +    void clearWithoutCallingDestructors()             { impl.clearWithoutCallingDestructors(); }
   1.190 +
   1.191 +    // Remove all the entries and release all internal buffers. The map must
   1.192 +    // be initialized again before any use.
   1.193 +    void finish()                                     { impl.finish(); }
   1.194 +
   1.195 +    // Does the table contain any entries?
   1.196 +    bool empty() const                                { return impl.empty(); }
   1.197 +
   1.198 +    // Number of live elements in the map.
   1.199 +    uint32_t count() const                            { return impl.count(); }
   1.200 +
   1.201 +    // Total number of allocation in the dynamic table. Note: resize will
   1.202 +    // happen well before count() == capacity().
   1.203 +    size_t capacity() const                           { return impl.capacity(); }
   1.204 +
   1.205 +    // Don't just call |impl.sizeOfExcludingThis()| because there's no
   1.206 +    // guarantee that |impl| is the first field in HashMap.
   1.207 +    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
   1.208 +        return impl.sizeOfExcludingThis(mallocSizeOf);
   1.209 +    }
   1.210 +    size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
   1.211 +        return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
   1.212 +    }
   1.213 +
   1.214 +    // If |generation()| is the same before and after a HashMap operation,
   1.215 +    // pointers into the table remain valid.
   1.216 +    unsigned generation() const                       { return impl.generation(); }
   1.217 +
   1.218 +    /************************************************** Shorthand operations */
   1.219 +
   1.220 +    bool has(const Lookup &l) const {
   1.221 +        return impl.lookup(l) != nullptr;
   1.222 +    }
   1.223 +
   1.224 +    // Overwrite existing value with v. Return false on oom.
   1.225 +    template<typename KeyInput, typename ValueInput>
   1.226 +    bool put(KeyInput &&k, ValueInput &&v) {
   1.227 +        AddPtr p = lookupForAdd(k);
   1.228 +        if (p) {
   1.229 +            p->value() = mozilla::Forward<ValueInput>(v);
   1.230 +            return true;
   1.231 +        }
   1.232 +        return add(p, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
   1.233 +    }
   1.234 +
   1.235 +    // Like put, but assert that the given key is not already present.
   1.236 +    template<typename KeyInput, typename ValueInput>
   1.237 +    bool putNew(KeyInput &&k, ValueInput &&v) {
   1.238 +        Entry e(mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
   1.239 +        return impl.putNew(e.key(), mozilla::Move(e));
   1.240 +    }
   1.241 +
   1.242 +    // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom.
   1.243 +    Ptr lookupWithDefault(const Key &k, const Value &defaultValue) {
   1.244 +        AddPtr p = lookupForAdd(k);
   1.245 +        if (p)
   1.246 +            return p;
   1.247 +        (void)add(p, k, defaultValue);  // p is left false-y on oom.
   1.248 +        return p;
   1.249 +    }
   1.250 +
   1.251 +    // Remove if present.
   1.252 +    void remove(const Lookup &l) {
   1.253 +        if (Ptr p = lookup(l))
   1.254 +            remove(p);
   1.255 +    }
   1.256 +
   1.257 +    // Infallibly rekey one entry, if necessary.
   1.258 +    // Requires template parameters Key and HashPolicy::Lookup to be the same type.
   1.259 +    void rekeyIfMoved(const Key &old_key, const Key &new_key) {
   1.260 +        if (old_key != new_key)
   1.261 +            rekeyAs(old_key, new_key, new_key);
   1.262 +    }
   1.263 +
   1.264 +    // Infallibly rekey one entry, if present.
   1.265 +    void rekeyAs(const Lookup &old_lookup, const Lookup &new_lookup, const Key &new_key) {
   1.266 +        if (Ptr p = lookup(old_lookup))
   1.267 +            impl.rekeyAndMaybeRehash(p, new_lookup, new_key);
   1.268 +    }
   1.269 +
   1.270 +    // HashMap is movable
   1.271 +    HashMap(HashMap &&rhs) : impl(mozilla::Move(rhs.impl)) {}
   1.272 +    void operator=(HashMap &&rhs) {
   1.273 +        MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
   1.274 +        impl = mozilla::Move(rhs.impl);
   1.275 +    }
   1.276 +
   1.277 +  private:
   1.278 +    // HashMap is not copyable or assignable
   1.279 +    HashMap(const HashMap &hm) MOZ_DELETE;
   1.280 +    HashMap &operator=(const HashMap &hm) MOZ_DELETE;
   1.281 +
   1.282 +    friend class Impl::Enum;
   1.283 +};
   1.284 +
   1.285 +/*****************************************************************************/
   1.286 +
   1.287 +// A JS-friendly, STL-like container providing a hash-based set of values. In
   1.288 +// particular, HashSet calls constructors and destructors of all objects added
   1.289 +// so non-PODs may be used safely.
   1.290 +//
   1.291 +// T requirements:
   1.292 +//  - movable, destructible, assignable
   1.293 +// HashPolicy requirements:
   1.294 +//  - see Hash Policy section below
   1.295 +// AllocPolicy:
   1.296 +//  - see jsalloc.h
   1.297 +//
   1.298 +// Note:
   1.299 +// - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
   1.300 +//   HashSet must not call back into the same HashSet object.
   1.301 +// - Due to the lack of exception handling, the user must call |init()|.
   1.302 +template <class T,
   1.303 +          class HashPolicy = DefaultHasher<T>,
   1.304 +          class AllocPolicy = TempAllocPolicy>
   1.305 +class HashSet
   1.306 +{
   1.307 +    struct SetOps : HashPolicy
   1.308 +    {
   1.309 +        typedef T KeyType;
   1.310 +        static const KeyType &getKey(const T &t) { return t; }
   1.311 +        static void setKey(T &t, KeyType &k) { HashPolicy::rekey(t, k); }
   1.312 +    };
   1.313 +
   1.314 +    typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl;
   1.315 +    Impl impl;
   1.316 +
   1.317 +  public:
   1.318 +    typedef typename HashPolicy::Lookup Lookup;
   1.319 +    typedef T Entry;
   1.320 +
   1.321 +    // HashSet construction is fallible (due to OOM); thus the user must call
   1.322 +    // init after constructing a HashSet and check the return value.
   1.323 +    HashSet(AllocPolicy a = AllocPolicy()) : impl(a)  {}
   1.324 +    bool init(uint32_t len = 16)                      { return impl.init(len); }
   1.325 +    bool initialized() const                          { return impl.initialized(); }
   1.326 +
   1.327 +    // Return whether the given lookup value is present in the map. E.g.:
   1.328 +    //
   1.329 +    //   typedef HashSet<int> HS;
   1.330 +    //   HS h;
   1.331 +    //   if (HS::Ptr p = h.lookup(3)) {
   1.332 +    //     assert(*p == 3);   // p acts like a pointer to int
   1.333 +    //   }
   1.334 +    //
   1.335 +    // Also see the definition of Ptr in HashTable above.
   1.336 +    typedef typename Impl::Ptr Ptr;
   1.337 +    Ptr lookup(const Lookup &l) const                 { return impl.lookup(l); }
   1.338 +
   1.339 +    // Like lookup, but does not assert if two threads call lookup at the same
   1.340 +    // time. Only use this method when none of the threads will modify the map.
   1.341 +    Ptr readonlyThreadsafeLookup(const Lookup &l) const { return impl.readonlyThreadsafeLookup(l); }
   1.342 +
   1.343 +    // Assuming |p.found()|, remove |*p|.
   1.344 +    void remove(Ptr p)                                { impl.remove(p); }
   1.345 +
   1.346 +    // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
   1.347 +    // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
   1.348 +    // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
   1.349 +    //
   1.350 +    //   typedef HashSet<int> HS;
   1.351 +    //   HS h;
   1.352 +    //   HS::AddPtr p = h.lookupForAdd(3);
   1.353 +    //   if (!p) {
   1.354 +    //     if (!h.add(p, 3))
   1.355 +    //       return false;
   1.356 +    //   }
   1.357 +    //   assert(*p == 3);   // p acts like a pointer to int
   1.358 +    //
   1.359 +    // Also see the definition of AddPtr in HashTable above.
   1.360 +    //
   1.361 +    // N.B. The caller must ensure that no mutating hash table operations
   1.362 +    // occur between a pair of |lookupForAdd| and |add| calls. To avoid
   1.363 +    // looking up the key a second time, the caller may use the more efficient
   1.364 +    // relookupOrAdd method. This method reuses part of the hashing computation
   1.365 +    // to more efficiently insert the key if it has not been added. For
   1.366 +    // example, a mutation-handling version of the previous example:
   1.367 +    //
   1.368 +    //    HS::AddPtr p = h.lookupForAdd(3);
   1.369 +    //    if (!p) {
   1.370 +    //      call_that_may_mutate_h();
   1.371 +    //      if (!h.relookupOrAdd(p, 3, 3))
   1.372 +    //        return false;
   1.373 +    //    }
   1.374 +    //    assert(*p == 3);
   1.375 +    //
   1.376 +    // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
   1.377 +    // entry |t|, where the caller ensures match(l,t).
   1.378 +    typedef typename Impl::AddPtr AddPtr;
   1.379 +    AddPtr lookupForAdd(const Lookup &l) const        { return impl.lookupForAdd(l); }
   1.380 +
   1.381 +    template <typename U>
   1.382 +    bool add(AddPtr &p, U &&u) {
   1.383 +        return impl.add(p, mozilla::Forward<U>(u));
   1.384 +    }
   1.385 +
   1.386 +    template <typename U>
   1.387 +    bool relookupOrAdd(AddPtr &p, const Lookup &l, U &&u) {
   1.388 +        return impl.relookupOrAdd(p, l, mozilla::Forward<U>(u));
   1.389 +    }
   1.390 +
   1.391 +    // |all()| returns a Range containing |count()| elements:
   1.392 +    //
   1.393 +    //   typedef HashSet<int> HS;
   1.394 +    //   HS h;
   1.395 +    //   for (HS::Range r = h.all(); !r.empty(); r.popFront())
   1.396 +    //     int i = r.front();
   1.397 +    //
   1.398 +    // Also see the definition of Range in HashTable above.
   1.399 +    typedef typename Impl::Range Range;
   1.400 +    Range all() const                                 { return impl.all(); }
   1.401 +
   1.402 +    // Typedef for the enumeration class. An Enum may be used to examine and
   1.403 +    // remove table entries:
   1.404 +    //
   1.405 +    //   typedef HashSet<int> HS;
   1.406 +    //   HS s;
   1.407 +    //   for (HS::Enum e(s); !e.empty(); e.popFront())
   1.408 +    //     if (e.front() == 42)
   1.409 +    //       e.removeFront();
   1.410 +    //
   1.411 +    // Table resize may occur in Enum's destructor. Also see the definition of
   1.412 +    // Enum in HashTable above.
   1.413 +    typedef typename Impl::Enum Enum;
   1.414 +
   1.415 +    // Remove all entries. This does not shrink the table. For that consider
   1.416 +    // using the finish() method.
   1.417 +    void clear()                                      { impl.clear(); }
   1.418 +
   1.419 +    // Remove all the entries and release all internal buffers. The set must
   1.420 +    // be initialized again before any use.
   1.421 +    void finish()                                     { impl.finish(); }
   1.422 +
   1.423 +    // Does the table contain any entries?
   1.424 +    bool empty() const                                { return impl.empty(); }
   1.425 +
   1.426 +    // Number of live elements in the map.
   1.427 +    uint32_t count() const                            { return impl.count(); }
   1.428 +
   1.429 +    // Total number of allocation in the dynamic table. Note: resize will
   1.430 +    // happen well before count() == capacity().
   1.431 +    size_t capacity() const                           { return impl.capacity(); }
   1.432 +
   1.433 +    // Don't just call |impl.sizeOfExcludingThis()| because there's no
   1.434 +    // guarantee that |impl| is the first field in HashSet.
   1.435 +    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
   1.436 +        return impl.sizeOfExcludingThis(mallocSizeOf);
   1.437 +    }
   1.438 +    size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
   1.439 +        return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
   1.440 +    }
   1.441 +
   1.442 +    // If |generation()| is the same before and after a HashSet operation,
   1.443 +    // pointers into the table remain valid.
   1.444 +    unsigned generation() const                       { return impl.generation(); }
   1.445 +
   1.446 +    /************************************************** Shorthand operations */
   1.447 +
   1.448 +    bool has(const Lookup &l) const {
   1.449 +        return impl.lookup(l) != nullptr;
   1.450 +    }
   1.451 +
   1.452 +    // Add |u| if it is not present already. Return false on oom.
   1.453 +    template <typename U>
   1.454 +    bool put(U &&u) {
   1.455 +        AddPtr p = lookupForAdd(u);
   1.456 +        return p ? true : add(p, mozilla::Forward<U>(u));
   1.457 +    }
   1.458 +
   1.459 +    // Like put, but assert that the given key is not already present.
   1.460 +    template <typename U>
   1.461 +    bool putNew(U &&u) {
   1.462 +        return impl.putNew(u, mozilla::Forward<U>(u));
   1.463 +    }
   1.464 +
   1.465 +    template <typename U>
   1.466 +    bool putNew(const Lookup &l, U &&u) {
   1.467 +        return impl.putNew(l, mozilla::Forward<U>(u));
   1.468 +    }
   1.469 +
   1.470 +    void remove(const Lookup &l) {
   1.471 +        if (Ptr p = lookup(l))
   1.472 +            remove(p);
   1.473 +    }
   1.474 +
   1.475 +    // Infallibly rekey one entry, if present.
   1.476 +    // Requires template parameters T and HashPolicy::Lookup to be the same type.
   1.477 +    void rekeyIfMoved(const Lookup &old_value, const T &new_value) {
   1.478 +        if (old_value != new_value)
   1.479 +            rekeyAs(old_value, new_value, new_value);
   1.480 +    }
   1.481 +
   1.482 +    // Infallibly rekey one entry, if present.
   1.483 +    void rekeyAs(const Lookup &old_lookup, const Lookup &new_lookup, const T &new_value) {
   1.484 +        if (Ptr p = lookup(old_lookup))
   1.485 +            impl.rekeyAndMaybeRehash(p, new_lookup, new_value);
   1.486 +    }
   1.487 +
   1.488 +    // HashSet is movable
   1.489 +    HashSet(HashSet &&rhs) : impl(mozilla::Move(rhs.impl)) {}
   1.490 +    void operator=(HashSet &&rhs) {
   1.491 +        MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
   1.492 +        impl = mozilla::Move(rhs.impl);
   1.493 +    }
   1.494 +
   1.495 +  private:
   1.496 +    // HashSet is not copyable or assignable
   1.497 +    HashSet(const HashSet &hs) MOZ_DELETE;
   1.498 +    HashSet &operator=(const HashSet &hs) MOZ_DELETE;
   1.499 +
   1.500 +    friend class Impl::Enum;
   1.501 +};
   1.502 +
   1.503 +/*****************************************************************************/
   1.504 +
   1.505 +// Hash Policy
   1.506 +//
   1.507 +// A hash policy P for a hash table with key-type Key must provide:
   1.508 +//  - a type |P::Lookup| to use to lookup table entries;
   1.509 +//  - a static member function |P::hash| with signature
   1.510 +//
   1.511 +//      static js::HashNumber hash(Lookup)
   1.512 +//
   1.513 +//    to use to hash the lookup type; and
   1.514 +//  - a static member function |P::match| with signature
   1.515 +//
   1.516 +//      static bool match(Key, Lookup)
   1.517 +//
   1.518 +//    to use to test equality of key and lookup values.
   1.519 +//
   1.520 +// Normally, Lookup = Key. In general, though, different values and types of
   1.521 +// values can be used to lookup and store. If a Lookup value |l| is != to the
   1.522 +// added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.:
   1.523 +//
   1.524 +//   js::HashSet<Key, P>::AddPtr p = h.lookup(l);
   1.525 +//   if (!p) {
   1.526 +//     assert(P::match(k, l));  // must hold
   1.527 +//     h.add(p, k);
   1.528 +//   }
   1.529 +
   1.530 +// Pointer hashing policy that strips the lowest zeroBits when calculating the
   1.531 +// hash to improve key distribution.
   1.532 +template <typename Key, size_t zeroBits>
   1.533 +struct PointerHasher
   1.534 +{
   1.535 +    typedef Key Lookup;
   1.536 +    static HashNumber hash(const Lookup &l) {
   1.537 +        MOZ_ASSERT(!JS::IsPoisonedPtr(l));
   1.538 +        size_t word = reinterpret_cast<size_t>(l) >> zeroBits;
   1.539 +        JS_STATIC_ASSERT(sizeof(HashNumber) == 4);
   1.540 +#if JS_BITS_PER_WORD == 32
   1.541 +        return HashNumber(word);
   1.542 +#else
   1.543 +        JS_STATIC_ASSERT(sizeof word == 8);
   1.544 +        return HashNumber((word >> 32) ^ word);
   1.545 +#endif
   1.546 +    }
   1.547 +    static bool match(const Key &k, const Lookup &l) {
   1.548 +        MOZ_ASSERT(!JS::IsPoisonedPtr(k));
   1.549 +        MOZ_ASSERT(!JS::IsPoisonedPtr(l));
   1.550 +        return k == l;
   1.551 +    }
   1.552 +    static void rekey(Key &k, const Key& newKey) {
   1.553 +        k = newKey;
   1.554 +    }
   1.555 +};
   1.556 +
   1.557 +// Default hash policy: just use the 'lookup' value. This of course only
   1.558 +// works if the lookup value is integral. HashTable applies ScrambleHashCode to
   1.559 +// the result of the 'hash' which means that it is 'ok' if the lookup value is
   1.560 +// not well distributed over the HashNumber domain.
   1.561 +template <class Key>
   1.562 +struct DefaultHasher
   1.563 +{
   1.564 +    typedef Key Lookup;
   1.565 +    static HashNumber hash(const Lookup &l) {
   1.566 +        // Hash if can implicitly cast to hash number type.
   1.567 +        return l;
   1.568 +    }
   1.569 +    static bool match(const Key &k, const Lookup &l) {
   1.570 +        // Use builtin or overloaded operator==.
   1.571 +        return k == l;
   1.572 +    }
   1.573 +    static void rekey(Key &k, const Key& newKey) {
   1.574 +        k = newKey;
   1.575 +    }
   1.576 +};
   1.577 +
   1.578 +// Specialize hashing policy for pointer types. It assumes that the type is
   1.579 +// at least word-aligned. For types with smaller size use PointerHasher.
   1.580 +template <class T>
   1.581 +struct DefaultHasher<T *> : PointerHasher<T *, mozilla::tl::FloorLog2<sizeof(void *)>::value>
   1.582 +{};
   1.583 +
   1.584 +// For doubles, we can xor the two uint32s.
   1.585 +template <>
   1.586 +struct DefaultHasher<double>
   1.587 +{
   1.588 +    typedef double Lookup;
   1.589 +    static HashNumber hash(double d) {
   1.590 +        JS_STATIC_ASSERT(sizeof(HashNumber) == 4);
   1.591 +        uint64_t u = mozilla::BitwiseCast<uint64_t>(d);
   1.592 +        return HashNumber(u ^ (u >> 32));
   1.593 +    }
   1.594 +    static bool match(double lhs, double rhs) {
   1.595 +        return mozilla::BitwiseCast<uint64_t>(lhs) == mozilla::BitwiseCast<uint64_t>(rhs);
   1.596 +    }
   1.597 +};
   1.598 +
   1.599 +template <>
   1.600 +struct DefaultHasher<float>
   1.601 +{
   1.602 +    typedef float Lookup;
   1.603 +    static HashNumber hash(float f) {
   1.604 +        JS_STATIC_ASSERT(sizeof(HashNumber) == 4);
   1.605 +        return HashNumber(mozilla::BitwiseCast<uint32_t>(f));
   1.606 +    }
   1.607 +    static bool match(float lhs, float rhs) {
   1.608 +        return mozilla::BitwiseCast<uint32_t>(lhs) == mozilla::BitwiseCast<uint32_t>(rhs);
   1.609 +    }
   1.610 +};
   1.611 +
   1.612 +/*****************************************************************************/
   1.613 +
   1.614 +// Both HashMap and HashSet are implemented by a single HashTable that is even
   1.615 +// more heavily parameterized than the other two. This leaves HashTable gnarly
   1.616 +// and extremely coupled to HashMap and HashSet; thus code should not use
   1.617 +// HashTable directly.
   1.618 +
   1.619 +template <class Key, class Value>
   1.620 +class HashMapEntry
   1.621 +{
   1.622 +    Key key_;
   1.623 +    Value value_;
   1.624 +
   1.625 +    template <class, class, class> friend class detail::HashTable;
   1.626 +    template <class> friend class detail::HashTableEntry;
   1.627 +    template <class, class, class, class> friend class HashMap;
   1.628 +
   1.629 +    Key & mutableKey() { return key_; }
   1.630 +
   1.631 +  public:
   1.632 +    template<typename KeyInput, typename ValueInput>
   1.633 +    HashMapEntry(KeyInput &&k, ValueInput &&v)
   1.634 +      : key_(mozilla::Forward<KeyInput>(k)),
   1.635 +        value_(mozilla::Forward<ValueInput>(v))
   1.636 +    {}
   1.637 +
   1.638 +    HashMapEntry(HashMapEntry &&rhs)
   1.639 +      : key_(mozilla::Move(rhs.key_)),
   1.640 +        value_(mozilla::Move(rhs.value_))
   1.641 +    {}
   1.642 +
   1.643 +    typedef Key KeyType;
   1.644 +    typedef Value ValueType;
   1.645 +
   1.646 +    const Key & key() const { return key_; }
   1.647 +    const Value & value() const { return value_; }
   1.648 +    Value & value() { return value_; }
   1.649 +
   1.650 +  private:
   1.651 +    HashMapEntry(const HashMapEntry &) MOZ_DELETE;
   1.652 +    void operator=(const HashMapEntry &) MOZ_DELETE;
   1.653 +};
   1.654 +
   1.655 +} // namespace js
   1.656 +
   1.657 +namespace mozilla {
   1.658 +
   1.659 +template <typename T>
   1.660 +struct IsPod<js::detail::HashTableEntry<T> > : IsPod<T> {};
   1.661 +
   1.662 +template <typename K, typename V>
   1.663 +struct IsPod<js::HashMapEntry<K, V> >
   1.664 +  : IntegralConstant<bool, IsPod<K>::value && IsPod<V>::value>
   1.665 +{};
   1.666 +
   1.667 +} // namespace mozilla
   1.668 +
   1.669 +namespace js {
   1.670 +
   1.671 +namespace detail {
   1.672 +
   1.673 +template <class T, class HashPolicy, class AllocPolicy>
   1.674 +class HashTable;
   1.675 +
   1.676 +template <class T>
   1.677 +class HashTableEntry
   1.678 +{
   1.679 +    template <class, class, class> friend class HashTable;
   1.680 +    typedef typename mozilla::RemoveConst<T>::Type NonConstT;
   1.681 +
   1.682 +    HashNumber keyHash;
   1.683 +    mozilla::AlignedStorage2<NonConstT> mem;
   1.684 +
   1.685 +    static const HashNumber sFreeKey = 0;
   1.686 +    static const HashNumber sRemovedKey = 1;
   1.687 +    static const HashNumber sCollisionBit = 1;
   1.688 +
   1.689 +    static bool isLiveHash(HashNumber hash)
   1.690 +    {
   1.691 +        return hash > sRemovedKey;
   1.692 +    }
   1.693 +
   1.694 +    HashTableEntry(const HashTableEntry &) MOZ_DELETE;
   1.695 +    void operator=(const HashTableEntry &) MOZ_DELETE;
   1.696 +    ~HashTableEntry() MOZ_DELETE;
   1.697 +
   1.698 +  public:
   1.699 +    // NB: HashTableEntry is treated as a POD: no constructor or destructor calls.
   1.700 +
   1.701 +    void destroyIfLive() {
   1.702 +        if (isLive())
   1.703 +            mem.addr()->~T();
   1.704 +    }
   1.705 +
   1.706 +    void destroy() {
   1.707 +        MOZ_ASSERT(isLive());
   1.708 +        mem.addr()->~T();
   1.709 +    }
   1.710 +
   1.711 +    void swap(HashTableEntry *other) {
   1.712 +        mozilla::Swap(keyHash, other->keyHash);
   1.713 +        mozilla::Swap(mem, other->mem);
   1.714 +    }
   1.715 +
   1.716 +    T &get() { MOZ_ASSERT(isLive()); return *mem.addr(); }
   1.717 +
   1.718 +    bool isFree() const    { return keyHash == sFreeKey; }
   1.719 +    void clearLive()       { MOZ_ASSERT(isLive()); keyHash = sFreeKey; mem.addr()->~T(); }
   1.720 +    void clear()           { if (isLive()) mem.addr()->~T(); keyHash = sFreeKey; }
   1.721 +    bool isRemoved() const { return keyHash == sRemovedKey; }
   1.722 +    void removeLive()      { MOZ_ASSERT(isLive()); keyHash = sRemovedKey; mem.addr()->~T(); }
   1.723 +    bool isLive() const    { return isLiveHash(keyHash); }
   1.724 +    void setCollision()               { MOZ_ASSERT(isLive()); keyHash |= sCollisionBit; }
   1.725 +    void setCollision(HashNumber bit) { MOZ_ASSERT(isLive()); keyHash |= bit; }
   1.726 +    void unsetCollision()             { keyHash &= ~sCollisionBit; }
   1.727 +    bool hasCollision() const         { return keyHash & sCollisionBit; }
   1.728 +    bool matchHash(HashNumber hn)     { return (keyHash & ~sCollisionBit) == hn; }
   1.729 +    HashNumber getKeyHash() const     { return keyHash & ~sCollisionBit; }
   1.730 +
   1.731 +    template <class U>
   1.732 +    void setLive(HashNumber hn, U &&u)
   1.733 +    {
   1.734 +        MOZ_ASSERT(!isLive());
   1.735 +        keyHash = hn;
   1.736 +        new(mem.addr()) T(mozilla::Forward<U>(u));
   1.737 +        MOZ_ASSERT(isLive());
   1.738 +    }
   1.739 +};
   1.740 +
   1.741 +template <class T, class HashPolicy, class AllocPolicy>
   1.742 +class HashTable : private AllocPolicy
   1.743 +{
   1.744 +    typedef typename mozilla::RemoveConst<T>::Type NonConstT;
   1.745 +    typedef typename HashPolicy::KeyType Key;
   1.746 +    typedef typename HashPolicy::Lookup Lookup;
   1.747 +
   1.748 +  public:
   1.749 +    typedef HashTableEntry<T> Entry;
   1.750 +
   1.751 +    // A nullable pointer to a hash table element. A Ptr |p| can be tested
   1.752 +    // either explicitly |if (p.found()) p->...| or using boolean conversion
   1.753 +    // |if (p) p->...|. Ptr objects must not be used after any mutating hash
   1.754 +    // table operations unless |generation()| is tested.
   1.755 +    class Ptr
   1.756 +    {
   1.757 +        friend class HashTable;
   1.758 +        typedef void (Ptr::* ConvertibleToBool)();
   1.759 +        void nonNull() {}
   1.760 +
   1.761 +        Entry *entry_;
   1.762 +#ifdef DEBUG
   1.763 +        const HashTable *table_;
   1.764 +        uint32_t generation;
   1.765 +#endif
   1.766 +
   1.767 +      protected:
   1.768 +        Ptr(Entry &entry, const HashTable &tableArg)
   1.769 +          : entry_(&entry)
   1.770 +#ifdef DEBUG
   1.771 +          , table_(&tableArg)
   1.772 +          , generation(tableArg.generation())
   1.773 +#endif
   1.774 +        {}
   1.775 +
   1.776 +      public:
   1.777 +        // Leaves Ptr uninitialized.
   1.778 +        Ptr() {
   1.779 +#ifdef JS_DEBUG
   1.780 +            entry_ = (Entry *)0xbad;
   1.781 +#endif
   1.782 +        }
   1.783 +
   1.784 +        bool found() const {
   1.785 +            MOZ_ASSERT(generation == table_->generation());
   1.786 +            return entry_->isLive();
   1.787 +        }
   1.788 +
   1.789 +        operator ConvertibleToBool() const {
   1.790 +            return found() ? &Ptr::nonNull : 0;
   1.791 +        }
   1.792 +
   1.793 +        bool operator==(const Ptr &rhs) const {
   1.794 +            MOZ_ASSERT(found() && rhs.found());
   1.795 +            return entry_ == rhs.entry_;
   1.796 +        }
   1.797 +
   1.798 +        bool operator!=(const Ptr &rhs) const {
   1.799 +            MOZ_ASSERT(generation == table_->generation());
   1.800 +            return !(*this == rhs);
   1.801 +        }
   1.802 +
   1.803 +        T &operator*() const {
   1.804 +            MOZ_ASSERT(generation == table_->generation());
   1.805 +            return entry_->get();
   1.806 +        }
   1.807 +
   1.808 +        T *operator->() const {
   1.809 +            MOZ_ASSERT(generation == table_->generation());
   1.810 +            return &entry_->get();
   1.811 +        }
   1.812 +    };
   1.813 +
   1.814 +    // A Ptr that can be used to add a key after a failed lookup.
   1.815 +    class AddPtr : public Ptr
   1.816 +    {
   1.817 +        friend class HashTable;
   1.818 +        HashNumber keyHash;
   1.819 +        mozilla::DebugOnly<uint64_t> mutationCount;
   1.820 +
   1.821 +        AddPtr(Entry &entry, const HashTable &tableArg, HashNumber hn)
   1.822 +          : Ptr(entry, tableArg), keyHash(hn), mutationCount(tableArg.mutationCount)
   1.823 +        {}
   1.824 +
   1.825 +      public:
   1.826 +        // Leaves AddPtr uninitialized.
   1.827 +        AddPtr() {}
   1.828 +    };
   1.829 +
   1.830 +    // A collection of hash table entries. The collection is enumerated by
   1.831 +    // calling |front()| followed by |popFront()| as long as |!empty()|. As
   1.832 +    // with Ptr/AddPtr, Range objects must not be used after any mutating hash
   1.833 +    // table operation unless the |generation()| is tested.
   1.834 +    class Range
   1.835 +    {
   1.836 +      protected:
   1.837 +        friend class HashTable;
   1.838 +
   1.839 +        Range(const HashTable &tableArg, Entry *c, Entry *e)
   1.840 +          : cur(c)
   1.841 +          , end(e)
   1.842 +#ifdef DEBUG
   1.843 +          , table_(&tableArg)
   1.844 +          , mutationCount(tableArg.mutationCount)
   1.845 +          , generation(tableArg.generation())
   1.846 +          , validEntry(true)
   1.847 +#endif
   1.848 +        {
   1.849 +            while (cur < end && !cur->isLive())
   1.850 +                ++cur;
   1.851 +        }
   1.852 +
   1.853 +        Entry *cur, *end;
   1.854 +#ifdef DEBUG
   1.855 +        const HashTable *table_;
   1.856 +        uint64_t mutationCount;
   1.857 +        uint32_t generation;
   1.858 +        bool validEntry;
   1.859 +#endif
   1.860 +
   1.861 +      public:
   1.862 +        Range()
   1.863 +          : cur(nullptr)
   1.864 +          , end(nullptr)
   1.865 +#ifdef DEBUG
   1.866 +          , table_(nullptr)
   1.867 +          , mutationCount(0)
   1.868 +          , generation(0)
   1.869 +          , validEntry(false)
   1.870 +#endif
   1.871 +        {}
   1.872 +
   1.873 +        bool empty() const {
   1.874 +            MOZ_ASSERT(generation == table_->generation());
   1.875 +            MOZ_ASSERT(mutationCount == table_->mutationCount);
   1.876 +            return cur == end;
   1.877 +        }
   1.878 +
   1.879 +        T &front() const {
   1.880 +            MOZ_ASSERT(validEntry);
   1.881 +            MOZ_ASSERT(!empty());
   1.882 +            MOZ_ASSERT(generation == table_->generation());
   1.883 +            MOZ_ASSERT(mutationCount == table_->mutationCount);
   1.884 +            return cur->get();
   1.885 +        }
   1.886 +
   1.887 +        void popFront() {
   1.888 +            MOZ_ASSERT(!empty());
   1.889 +            MOZ_ASSERT(generation == table_->generation());
   1.890 +            MOZ_ASSERT(mutationCount == table_->mutationCount);
   1.891 +            while (++cur < end && !cur->isLive())
   1.892 +                continue;
   1.893 +#ifdef DEBUG
   1.894 +            validEntry = true;
   1.895 +#endif
   1.896 +        }
   1.897 +    };
   1.898 +
   1.899 +    // A Range whose lifetime delimits a mutating enumeration of a hash table.
   1.900 +    // Since rehashing when elements were removed during enumeration would be
   1.901 +    // bad, it is postponed until the Enum is destructed.  Since the Enum's
   1.902 +    // destructor touches the hash table, the user must ensure that the hash
   1.903 +    // table is still alive when the destructor runs.
   1.904 +    class Enum : public Range
   1.905 +    {
   1.906 +        friend class HashTable;
   1.907 +
   1.908 +        HashTable &table_;
   1.909 +        bool rekeyed;
   1.910 +        bool removed;
   1.911 +
   1.912 +        /* Not copyable. */
   1.913 +        Enum(const Enum &) MOZ_DELETE;
   1.914 +        void operator=(const Enum &) MOZ_DELETE;
   1.915 +
   1.916 +      public:
   1.917 +        template<class Map> explicit
   1.918 +        Enum(Map &map) : Range(map.all()), table_(map.impl), rekeyed(false), removed(false) {}
   1.919 +
   1.920 +        // Removes the |front()| element from the table, leaving |front()|
   1.921 +        // invalid until the next call to |popFront()|. For example:
   1.922 +        //
   1.923 +        //   HashSet<int> s;
   1.924 +        //   for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
   1.925 +        //     if (e.front() == 42)
   1.926 +        //       e.removeFront();
   1.927 +        void removeFront() {
   1.928 +            table_.remove(*this->cur);
   1.929 +            removed = true;
   1.930 +#ifdef DEBUG
   1.931 +            this->validEntry = false;
   1.932 +            this->mutationCount = table_.mutationCount;
   1.933 +#endif
   1.934 +        }
   1.935 +
   1.936 +        // Removes the |front()| element and re-inserts it into the table with
   1.937 +        // a new key at the new Lookup position.  |front()| is invalid after
   1.938 +        // this operation until the next call to |popFront()|.
   1.939 +        void rekeyFront(const Lookup &l, const Key &k) {
   1.940 +            Ptr p(*this->cur, table_);
   1.941 +            table_.rekeyWithoutRehash(p, l, k);
   1.942 +            rekeyed = true;
   1.943 +#ifdef DEBUG
   1.944 +            this->validEntry = false;
   1.945 +            this->mutationCount = table_.mutationCount;
   1.946 +#endif
   1.947 +        }
   1.948 +
   1.949 +        void rekeyFront(const Key &k) {
   1.950 +            rekeyFront(k, k);
   1.951 +        }
   1.952 +
   1.953 +        // Potentially rehashes the table.
   1.954 +        ~Enum() {
   1.955 +            if (rekeyed) {
   1.956 +                table_.gen++;
   1.957 +                table_.checkOverRemoved();
   1.958 +            }
   1.959 +
   1.960 +            if (removed)
   1.961 +                table_.compactIfUnderloaded();
   1.962 +        }
   1.963 +    };
   1.964 +
   1.965 +    // HashTable is movable
   1.966 +    HashTable(HashTable &&rhs)
   1.967 +      : AllocPolicy(rhs)
   1.968 +    {
   1.969 +        mozilla::PodAssign(this, &rhs);
   1.970 +        rhs.table = nullptr;
   1.971 +    }
   1.972 +    void operator=(HashTable &&rhs) {
   1.973 +        MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
   1.974 +        if (table)
   1.975 +            destroyTable(*this, table, capacity());
   1.976 +        mozilla::PodAssign(this, &rhs);
   1.977 +        rhs.table = nullptr;
   1.978 +    }
   1.979 +
   1.980 +  private:
   1.981 +    // HashTable is not copyable or assignable
   1.982 +    HashTable(const HashTable &) MOZ_DELETE;
   1.983 +    void operator=(const HashTable &) MOZ_DELETE;
   1.984 +
   1.985 +  private:
   1.986 +    uint32_t    hashShift;      // multiplicative hash shift
   1.987 +    uint32_t    entryCount;     // number of entries in table
   1.988 +    uint32_t    gen;            // entry storage generation number
   1.989 +    uint32_t    removedCount;   // removed entry sentinels in table
   1.990 +    Entry       *table;         // entry storage
   1.991 +
   1.992 +    void setTableSizeLog2(unsigned sizeLog2)
   1.993 +    {
   1.994 +        hashShift = sHashBits - sizeLog2;
   1.995 +    }
   1.996 +
   1.997 +#ifdef JS_DEBUG
   1.998 +    mutable struct Stats
   1.999 +    {
  1.1000 +        uint32_t        searches;       // total number of table searches
  1.1001 +        uint32_t        steps;          // hash chain links traversed
  1.1002 +        uint32_t        hits;           // searches that found key
  1.1003 +        uint32_t        misses;         // searches that didn't find key
  1.1004 +        uint32_t        addOverRemoved; // adds that recycled a removed entry
  1.1005 +        uint32_t        removes;        // calls to remove
  1.1006 +        uint32_t        removeFrees;    // calls to remove that freed the entry
  1.1007 +        uint32_t        grows;          // table expansions
  1.1008 +        uint32_t        shrinks;        // table contractions
  1.1009 +        uint32_t        compresses;     // table compressions
  1.1010 +        uint32_t        rehashes;       // tombstone decontaminations
  1.1011 +    } stats;
  1.1012 +#   define METER(x) x
  1.1013 +#else
  1.1014 +#   define METER(x)
  1.1015 +#endif
  1.1016 +
  1.1017 +    friend class mozilla::ReentrancyGuard;
  1.1018 +    mutable mozilla::DebugOnly<bool> entered;
  1.1019 +    mozilla::DebugOnly<uint64_t>     mutationCount;
  1.1020 +
  1.1021 +    // The default initial capacity is 32 (enough to hold 16 elements), but it
  1.1022 +    // can be as low as 4.
  1.1023 +    static const unsigned sMinCapacityLog2 = 2;
  1.1024 +    static const unsigned sMinCapacity  = 1 << sMinCapacityLog2;
  1.1025 +    static const unsigned sMaxInit      = JS_BIT(23);
  1.1026 +    static const unsigned sMaxCapacity  = JS_BIT(24);
  1.1027 +    static const unsigned sHashBits     = mozilla::tl::BitSize<HashNumber>::value;
  1.1028 +
  1.1029 +    // Hash-table alpha is conceptually a fraction, but to avoid floating-point
  1.1030 +    // math we implement it as a ratio of integers.
  1.1031 +    static const uint8_t sAlphaDenominator = 4;
  1.1032 +    static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
  1.1033 +    static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
  1.1034 +
  1.1035 +    static const HashNumber sFreeKey = Entry::sFreeKey;
  1.1036 +    static const HashNumber sRemovedKey = Entry::sRemovedKey;
  1.1037 +    static const HashNumber sCollisionBit = Entry::sCollisionBit;
  1.1038 +
  1.1039 +    static bool isLiveHash(HashNumber hash)
  1.1040 +    {
  1.1041 +        return Entry::isLiveHash(hash);
  1.1042 +    }
  1.1043 +
  1.1044 +    static HashNumber prepareHash(const Lookup& l)
  1.1045 +    {
  1.1046 +        HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l));
  1.1047 +
  1.1048 +        // Avoid reserved hash codes.
  1.1049 +        if (!isLiveHash(keyHash))
  1.1050 +            keyHash -= (sRemovedKey + 1);
  1.1051 +        return keyHash & ~sCollisionBit;
  1.1052 +    }
  1.1053 +
  1.1054 +    static Entry *createTable(AllocPolicy &alloc, uint32_t capacity)
  1.1055 +    {
  1.1056 +        static_assert(sFreeKey == 0,
  1.1057 +                      "newly-calloc'd tables have to be considered empty");
  1.1058 +        static_assert(sMaxCapacity <= SIZE_MAX / sizeof(Entry),
  1.1059 +                      "would overflow allocating max number of entries");
  1.1060 +        return static_cast<Entry*>(alloc.calloc_(capacity * sizeof(Entry)));
  1.1061 +    }
  1.1062 +
  1.1063 +    static void destroyTable(AllocPolicy &alloc, Entry *oldTable, uint32_t capacity)
  1.1064 +    {
  1.1065 +        for (Entry *e = oldTable, *end = e + capacity; e < end; ++e)
  1.1066 +            e->destroyIfLive();
  1.1067 +        alloc.free_(oldTable);
  1.1068 +    }
  1.1069 +
  1.1070 +  public:
  1.1071 +    HashTable(AllocPolicy ap)
  1.1072 +      : AllocPolicy(ap),
  1.1073 +        hashShift(sHashBits),
  1.1074 +        entryCount(0),
  1.1075 +        gen(0),
  1.1076 +        removedCount(0),
  1.1077 +        table(nullptr),
  1.1078 +        entered(false),
  1.1079 +        mutationCount(0)
  1.1080 +    {}
  1.1081 +
  1.1082 +    MOZ_WARN_UNUSED_RESULT bool init(uint32_t length)
  1.1083 +    {
  1.1084 +        MOZ_ASSERT(!initialized());
  1.1085 +
  1.1086 +        // Reject all lengths whose initial computed capacity would exceed
  1.1087 +        // sMaxCapacity.  Round that maximum length down to the nearest power
  1.1088 +        // of two for speedier code.
  1.1089 +        if (length > sMaxInit) {
  1.1090 +            this->reportAllocOverflow();
  1.1091 +            return false;
  1.1092 +        }
  1.1093 +
  1.1094 +        static_assert((sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit,
  1.1095 +                      "multiplication in numerator below could overflow");
  1.1096 +        static_assert(sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator,
  1.1097 +                      "numerator calculation below could potentially overflow");
  1.1098 +
  1.1099 +        // Compute the smallest capacity allowing |length| elements to be
  1.1100 +        // inserted without rehashing: ceil(length / max-alpha).  (Ceiling
  1.1101 +        // integral division: <http://stackoverflow.com/a/2745086>.)
  1.1102 +        uint32_t newCapacity =
  1.1103 +            (length * sAlphaDenominator + sMaxAlphaNumerator - 1) / sMaxAlphaNumerator;
  1.1104 +        if (newCapacity < sMinCapacity)
  1.1105 +            newCapacity = sMinCapacity;
  1.1106 +
  1.1107 +        // FIXME: use JS_CEILING_LOG2 when PGO stops crashing (bug 543034).
  1.1108 +        uint32_t roundUp = sMinCapacity, roundUpLog2 = sMinCapacityLog2;
  1.1109 +        while (roundUp < newCapacity) {
  1.1110 +            roundUp <<= 1;
  1.1111 +            ++roundUpLog2;
  1.1112 +        }
  1.1113 +
  1.1114 +        newCapacity = roundUp;
  1.1115 +        MOZ_ASSERT(newCapacity >= length);
  1.1116 +        MOZ_ASSERT(newCapacity <= sMaxCapacity);
  1.1117 +
  1.1118 +        table = createTable(*this, newCapacity);
  1.1119 +        if (!table)
  1.1120 +            return false;
  1.1121 +
  1.1122 +        setTableSizeLog2(roundUpLog2);
  1.1123 +        METER(memset(&stats, 0, sizeof(stats)));
  1.1124 +        return true;
  1.1125 +    }
  1.1126 +
  1.1127 +    bool initialized() const
  1.1128 +    {
  1.1129 +        return !!table;
  1.1130 +    }
  1.1131 +
  1.1132 +    ~HashTable()
  1.1133 +    {
  1.1134 +        if (table)
  1.1135 +            destroyTable(*this, table, capacity());
  1.1136 +    }
  1.1137 +
  1.1138 +  private:
  1.1139 +    HashNumber hash1(HashNumber hash0) const
  1.1140 +    {
  1.1141 +        return hash0 >> hashShift;
  1.1142 +    }
  1.1143 +
  1.1144 +    struct DoubleHash
  1.1145 +    {
  1.1146 +        HashNumber h2;
  1.1147 +        HashNumber sizeMask;
  1.1148 +    };
  1.1149 +
  1.1150 +    DoubleHash hash2(HashNumber curKeyHash) const
  1.1151 +    {
  1.1152 +        unsigned sizeLog2 = sHashBits - hashShift;
  1.1153 +        DoubleHash dh = {
  1.1154 +            ((curKeyHash << sizeLog2) >> hashShift) | 1,
  1.1155 +            (HashNumber(1) << sizeLog2) - 1
  1.1156 +        };
  1.1157 +        return dh;
  1.1158 +    }
  1.1159 +
  1.1160 +    static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash &dh)
  1.1161 +    {
  1.1162 +        return (h1 - dh.h2) & dh.sizeMask;
  1.1163 +    }
  1.1164 +
  1.1165 +    bool overloaded()
  1.1166 +    {
  1.1167 +        static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
  1.1168 +                      "multiplication below could overflow");
  1.1169 +        return entryCount + removedCount >=
  1.1170 +               capacity() * sMaxAlphaNumerator / sAlphaDenominator;
  1.1171 +    }
  1.1172 +
  1.1173 +    // Would the table be underloaded if it had the given capacity and entryCount?
  1.1174 +    static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount)
  1.1175 +    {
  1.1176 +        static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
  1.1177 +                      "multiplication below could overflow");
  1.1178 +        return capacity > sMinCapacity &&
  1.1179 +               entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator;
  1.1180 +    }
  1.1181 +
  1.1182 +    bool underloaded()
  1.1183 +    {
  1.1184 +        return wouldBeUnderloaded(capacity(), entryCount);
  1.1185 +    }
  1.1186 +
  1.1187 +    static bool match(Entry &e, const Lookup &l)
  1.1188 +    {
  1.1189 +        return HashPolicy::match(HashPolicy::getKey(e.get()), l);
  1.1190 +    }
  1.1191 +
  1.1192 +    Entry &lookup(const Lookup &l, HashNumber keyHash, unsigned collisionBit) const
  1.1193 +    {
  1.1194 +        MOZ_ASSERT(isLiveHash(keyHash));
  1.1195 +        MOZ_ASSERT(!(keyHash & sCollisionBit));
  1.1196 +        MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit);
  1.1197 +        MOZ_ASSERT(table);
  1.1198 +        METER(stats.searches++);
  1.1199 +
  1.1200 +        // Compute the primary hash address.
  1.1201 +        HashNumber h1 = hash1(keyHash);
  1.1202 +        Entry *entry = &table[h1];
  1.1203 +
  1.1204 +        // Miss: return space for a new entry.
  1.1205 +        if (entry->isFree()) {
  1.1206 +            METER(stats.misses++);
  1.1207 +            return *entry;
  1.1208 +        }
  1.1209 +
  1.1210 +        // Hit: return entry.
  1.1211 +        if (entry->matchHash(keyHash) && match(*entry, l)) {
  1.1212 +            METER(stats.hits++);
  1.1213 +            return *entry;
  1.1214 +        }
  1.1215 +
  1.1216 +        // Collision: double hash.
  1.1217 +        DoubleHash dh = hash2(keyHash);
  1.1218 +
  1.1219 +        // Save the first removed entry pointer so we can recycle later.
  1.1220 +        Entry *firstRemoved = nullptr;
  1.1221 +
  1.1222 +        while(true) {
  1.1223 +            if (MOZ_UNLIKELY(entry->isRemoved())) {
  1.1224 +                if (!firstRemoved)
  1.1225 +                    firstRemoved = entry;
  1.1226 +            } else {
  1.1227 +                entry->setCollision(collisionBit);
  1.1228 +            }
  1.1229 +
  1.1230 +            METER(stats.steps++);
  1.1231 +            h1 = applyDoubleHash(h1, dh);
  1.1232 +
  1.1233 +            entry = &table[h1];
  1.1234 +            if (entry->isFree()) {
  1.1235 +                METER(stats.misses++);
  1.1236 +                return firstRemoved ? *firstRemoved : *entry;
  1.1237 +            }
  1.1238 +
  1.1239 +            if (entry->matchHash(keyHash) && match(*entry, l)) {
  1.1240 +                METER(stats.hits++);
  1.1241 +                return *entry;
  1.1242 +            }
  1.1243 +        }
  1.1244 +    }
  1.1245 +
  1.1246 +    // This is a copy of lookup hardcoded to the assumptions:
  1.1247 +    //   1. the lookup is a lookupForAdd
  1.1248 +    //   2. the key, whose |keyHash| has been passed is not in the table,
  1.1249 +    //   3. no entries have been removed from the table.
  1.1250 +    // This specialized search avoids the need for recovering lookup values
  1.1251 +    // from entries, which allows more flexible Lookup/Key types.
  1.1252 +    Entry &findFreeEntry(HashNumber keyHash)
  1.1253 +    {
  1.1254 +        MOZ_ASSERT(!(keyHash & sCollisionBit));
  1.1255 +        MOZ_ASSERT(table);
  1.1256 +        METER(stats.searches++);
  1.1257 +
  1.1258 +        // We assume 'keyHash' has already been distributed.
  1.1259 +
  1.1260 +        // Compute the primary hash address.
  1.1261 +        HashNumber h1 = hash1(keyHash);
  1.1262 +        Entry *entry = &table[h1];
  1.1263 +
  1.1264 +        // Miss: return space for a new entry.
  1.1265 +        if (!entry->isLive()) {
  1.1266 +            METER(stats.misses++);
  1.1267 +            return *entry;
  1.1268 +        }
  1.1269 +
  1.1270 +        // Collision: double hash.
  1.1271 +        DoubleHash dh = hash2(keyHash);
  1.1272 +
  1.1273 +        while(true) {
  1.1274 +            MOZ_ASSERT(!entry->isRemoved());
  1.1275 +            entry->setCollision();
  1.1276 +
  1.1277 +            METER(stats.steps++);
  1.1278 +            h1 = applyDoubleHash(h1, dh);
  1.1279 +
  1.1280 +            entry = &table[h1];
  1.1281 +            if (!entry->isLive()) {
  1.1282 +                METER(stats.misses++);
  1.1283 +                return *entry;
  1.1284 +            }
  1.1285 +        }
  1.1286 +    }
  1.1287 +
  1.1288 +    enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
  1.1289 +
  1.1290 +    RebuildStatus changeTableSize(int deltaLog2)
  1.1291 +    {
  1.1292 +        // Look, but don't touch, until we succeed in getting new entry store.
  1.1293 +        Entry *oldTable = table;
  1.1294 +        uint32_t oldCap = capacity();
  1.1295 +        uint32_t newLog2 = sHashBits - hashShift + deltaLog2;
  1.1296 +        uint32_t newCapacity = JS_BIT(newLog2);
  1.1297 +        if (newCapacity > sMaxCapacity) {
  1.1298 +            this->reportAllocOverflow();
  1.1299 +            return RehashFailed;
  1.1300 +        }
  1.1301 +
  1.1302 +        Entry *newTable = createTable(*this, newCapacity);
  1.1303 +        if (!newTable)
  1.1304 +            return RehashFailed;
  1.1305 +
  1.1306 +        // We can't fail from here on, so update table parameters.
  1.1307 +        setTableSizeLog2(newLog2);
  1.1308 +        removedCount = 0;
  1.1309 +        gen++;
  1.1310 +        table = newTable;
  1.1311 +
  1.1312 +        // Copy only live entries, leaving removed ones behind.
  1.1313 +        for (Entry *src = oldTable, *end = src + oldCap; src < end; ++src) {
  1.1314 +            if (src->isLive()) {
  1.1315 +                HashNumber hn = src->getKeyHash();
  1.1316 +                findFreeEntry(hn).setLive(hn, mozilla::Move(src->get()));
  1.1317 +                src->destroy();
  1.1318 +            }
  1.1319 +        }
  1.1320 +
  1.1321 +        // All entries have been destroyed, no need to destroyTable.
  1.1322 +        this->free_(oldTable);
  1.1323 +        return Rehashed;
  1.1324 +    }
  1.1325 +
  1.1326 +    RebuildStatus checkOverloaded()
  1.1327 +    {
  1.1328 +        if (!overloaded())
  1.1329 +            return NotOverloaded;
  1.1330 +
  1.1331 +        // Compress if a quarter or more of all entries are removed.
  1.1332 +        int deltaLog2;
  1.1333 +        if (removedCount >= (capacity() >> 2)) {
  1.1334 +            METER(stats.compresses++);
  1.1335 +            deltaLog2 = 0;
  1.1336 +        } else {
  1.1337 +            METER(stats.grows++);
  1.1338 +            deltaLog2 = 1;
  1.1339 +        }
  1.1340 +
  1.1341 +        return changeTableSize(deltaLog2);
  1.1342 +    }
  1.1343 +
  1.1344 +    // Infallibly rehash the table if we are overloaded with removals.
  1.1345 +    void checkOverRemoved()
  1.1346 +    {
  1.1347 +        if (overloaded()) {
  1.1348 +            if (checkOverloaded() == RehashFailed)
  1.1349 +                rehashTableInPlace();
  1.1350 +        }
  1.1351 +    }
  1.1352 +
  1.1353 +    void remove(Entry &e)
  1.1354 +    {
  1.1355 +        MOZ_ASSERT(table);
  1.1356 +        METER(stats.removes++);
  1.1357 +
  1.1358 +        if (e.hasCollision()) {
  1.1359 +            e.removeLive();
  1.1360 +            removedCount++;
  1.1361 +        } else {
  1.1362 +            METER(stats.removeFrees++);
  1.1363 +            e.clearLive();
  1.1364 +        }
  1.1365 +        entryCount--;
  1.1366 +        mutationCount++;
  1.1367 +    }
  1.1368 +
  1.1369 +    void checkUnderloaded()
  1.1370 +    {
  1.1371 +        if (underloaded()) {
  1.1372 +            METER(stats.shrinks++);
  1.1373 +            (void) changeTableSize(-1);
  1.1374 +        }
  1.1375 +    }
  1.1376 +
  1.1377 +    // Resize the table down to the largest capacity which doesn't underload the
  1.1378 +    // table.  Since we call checkUnderloaded() on every remove, you only need
  1.1379 +    // to call this after a bulk removal of items done without calling remove().
  1.1380 +    void compactIfUnderloaded()
  1.1381 +    {
  1.1382 +        int32_t resizeLog2 = 0;
  1.1383 +        uint32_t newCapacity = capacity();
  1.1384 +        while (wouldBeUnderloaded(newCapacity, entryCount)) {
  1.1385 +            newCapacity = newCapacity >> 1;
  1.1386 +            resizeLog2--;
  1.1387 +        }
  1.1388 +
  1.1389 +        if (resizeLog2 != 0) {
  1.1390 +            changeTableSize(resizeLog2);
  1.1391 +        }
  1.1392 +    }
  1.1393 +
  1.1394 +    // This is identical to changeTableSize(currentSize), but without requiring
  1.1395 +    // a second table.  We do this by recycling the collision bits to tell us if
  1.1396 +    // the element is already inserted or still waiting to be inserted.  Since
  1.1397 +    // already-inserted elements win any conflicts, we get the same table as we
  1.1398 +    // would have gotten through random insertion order.
  1.1399 +    void rehashTableInPlace()
  1.1400 +    {
  1.1401 +        METER(stats.rehashes++);
  1.1402 +        removedCount = 0;
  1.1403 +        for (size_t i = 0; i < capacity(); ++i)
  1.1404 +            table[i].unsetCollision();
  1.1405 +
  1.1406 +        for (size_t i = 0; i < capacity();) {
  1.1407 +            Entry *src = &table[i];
  1.1408 +
  1.1409 +            if (!src->isLive() || src->hasCollision()) {
  1.1410 +                ++i;
  1.1411 +                continue;
  1.1412 +            }
  1.1413 +
  1.1414 +            HashNumber keyHash = src->getKeyHash();
  1.1415 +            HashNumber h1 = hash1(keyHash);
  1.1416 +            DoubleHash dh = hash2(keyHash);
  1.1417 +            Entry *tgt = &table[h1];
  1.1418 +            while (true) {
  1.1419 +                if (!tgt->hasCollision()) {
  1.1420 +                    src->swap(tgt);
  1.1421 +                    tgt->setCollision();
  1.1422 +                    break;
  1.1423 +                }
  1.1424 +
  1.1425 +                h1 = applyDoubleHash(h1, dh);
  1.1426 +                tgt = &table[h1];
  1.1427 +            }
  1.1428 +        }
  1.1429 +
  1.1430 +        // TODO: this algorithm leaves collision bits on *all* elements, even if
  1.1431 +        // they are on no collision path. We have the option of setting the
  1.1432 +        // collision bits correctly on a subsequent pass or skipping the rehash
  1.1433 +        // unless we are totally filled with tombstones: benchmark to find out
  1.1434 +        // which approach is best.
  1.1435 +    }
  1.1436 +
  1.1437 +  public:
  1.1438 +    void clear()
  1.1439 +    {
  1.1440 +        if (mozilla::IsPod<Entry>::value) {
  1.1441 +            memset(table, 0, sizeof(*table) * capacity());
  1.1442 +        } else {
  1.1443 +            uint32_t tableCapacity = capacity();
  1.1444 +            for (Entry *e = table, *end = table + tableCapacity; e < end; ++e)
  1.1445 +                e->clear();
  1.1446 +        }
  1.1447 +        removedCount = 0;
  1.1448 +        entryCount = 0;
  1.1449 +        mutationCount++;
  1.1450 +    }
  1.1451 +
  1.1452 +    void finish()
  1.1453 +    {
  1.1454 +        MOZ_ASSERT(!entered);
  1.1455 +
  1.1456 +        if (!table)
  1.1457 +            return;
  1.1458 +
  1.1459 +        destroyTable(*this, table, capacity());
  1.1460 +        table = nullptr;
  1.1461 +        gen++;
  1.1462 +        entryCount = 0;
  1.1463 +        removedCount = 0;
  1.1464 +        mutationCount++;
  1.1465 +    }
  1.1466 +
  1.1467 +    Range all() const
  1.1468 +    {
  1.1469 +        MOZ_ASSERT(table);
  1.1470 +        return Range(*this, table, table + capacity());
  1.1471 +    }
  1.1472 +
  1.1473 +    bool empty() const
  1.1474 +    {
  1.1475 +        MOZ_ASSERT(table);
  1.1476 +        return !entryCount;
  1.1477 +    }
  1.1478 +
  1.1479 +    uint32_t count() const
  1.1480 +    {
  1.1481 +        MOZ_ASSERT(table);
  1.1482 +        return entryCount;
  1.1483 +    }
  1.1484 +
  1.1485 +    uint32_t capacity() const
  1.1486 +    {
  1.1487 +        MOZ_ASSERT(table);
  1.1488 +        return JS_BIT(sHashBits - hashShift);
  1.1489 +    }
  1.1490 +
  1.1491 +    uint32_t generation() const
  1.1492 +    {
  1.1493 +        MOZ_ASSERT(table);
  1.1494 +        return gen;
  1.1495 +    }
  1.1496 +
  1.1497 +    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
  1.1498 +    {
  1.1499 +        return mallocSizeOf(table);
  1.1500 +    }
  1.1501 +
  1.1502 +    size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
  1.1503 +    {
  1.1504 +        return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
  1.1505 +    }
  1.1506 +
  1.1507 +    Ptr lookup(const Lookup &l) const
  1.1508 +    {
  1.1509 +        mozilla::ReentrancyGuard g(*this);
  1.1510 +        HashNumber keyHash = prepareHash(l);
  1.1511 +        return Ptr(lookup(l, keyHash, 0), *this);
  1.1512 +    }
  1.1513 +
  1.1514 +    Ptr readonlyThreadsafeLookup(const Lookup &l) const
  1.1515 +    {
  1.1516 +        HashNumber keyHash = prepareHash(l);
  1.1517 +        return Ptr(lookup(l, keyHash, 0), *this);
  1.1518 +    }
  1.1519 +
  1.1520 +    AddPtr lookupForAdd(const Lookup &l) const
  1.1521 +    {
  1.1522 +        mozilla::ReentrancyGuard g(*this);
  1.1523 +        HashNumber keyHash = prepareHash(l);
  1.1524 +        Entry &entry = lookup(l, keyHash, sCollisionBit);
  1.1525 +        AddPtr p(entry, *this, keyHash);
  1.1526 +        return p;
  1.1527 +    }
  1.1528 +
  1.1529 +    template <class U>
  1.1530 +    bool add(AddPtr &p, U &&u)
  1.1531 +    {
  1.1532 +        mozilla::ReentrancyGuard g(*this);
  1.1533 +        MOZ_ASSERT(table);
  1.1534 +        MOZ_ASSERT(!p.found());
  1.1535 +        MOZ_ASSERT(!(p.keyHash & sCollisionBit));
  1.1536 +
  1.1537 +        // Changing an entry from removed to live does not affect whether we
  1.1538 +        // are overloaded and can be handled separately.
  1.1539 +        if (p.entry_->isRemoved()) {
  1.1540 +            METER(stats.addOverRemoved++);
  1.1541 +            removedCount--;
  1.1542 +            p.keyHash |= sCollisionBit;
  1.1543 +        } else {
  1.1544 +            // Preserve the validity of |p.entry_|.
  1.1545 +            RebuildStatus status = checkOverloaded();
  1.1546 +            if (status == RehashFailed)
  1.1547 +                return false;
  1.1548 +            if (status == Rehashed)
  1.1549 +                p.entry_ = &findFreeEntry(p.keyHash);
  1.1550 +        }
  1.1551 +
  1.1552 +        p.entry_->setLive(p.keyHash, mozilla::Forward<U>(u));
  1.1553 +        entryCount++;
  1.1554 +        mutationCount++;
  1.1555 +#ifdef DEBUG
  1.1556 +        p.generation = generation();
  1.1557 +        p.mutationCount = mutationCount;
  1.1558 +#endif
  1.1559 +        return true;
  1.1560 +    }
  1.1561 +
  1.1562 +    // Note: |l| may be a reference to a piece of |u|, so this function
  1.1563 +    // must take care not to use |l| after moving |u|.
  1.1564 +    template <class U>
  1.1565 +    void putNewInfallible(const Lookup &l, U &&u)
  1.1566 +    {
  1.1567 +        MOZ_ASSERT(table);
  1.1568 +
  1.1569 +        HashNumber keyHash = prepareHash(l);
  1.1570 +        Entry *entry = &findFreeEntry(keyHash);
  1.1571 +
  1.1572 +        if (entry->isRemoved()) {
  1.1573 +            METER(stats.addOverRemoved++);
  1.1574 +            removedCount--;
  1.1575 +            keyHash |= sCollisionBit;
  1.1576 +        }
  1.1577 +
  1.1578 +        entry->setLive(keyHash, mozilla::Forward<U>(u));
  1.1579 +        entryCount++;
  1.1580 +        mutationCount++;
  1.1581 +    }
  1.1582 +
  1.1583 +    // Note: |l| may be a reference to a piece of |u|, so this function
  1.1584 +    // must take care not to use |l| after moving |u|.
  1.1585 +    template <class U>
  1.1586 +    bool putNew(const Lookup &l, U &&u)
  1.1587 +    {
  1.1588 +        if (checkOverloaded() == RehashFailed)
  1.1589 +            return false;
  1.1590 +
  1.1591 +        putNewInfallible(l, mozilla::Forward<U>(u));
  1.1592 +        return true;
  1.1593 +    }
  1.1594 +
  1.1595 +    // Note: |l| may be a reference to a piece of |u|, so this function
  1.1596 +    // must take care not to use |l| after moving |u|.
  1.1597 +    template <class U>
  1.1598 +    bool relookupOrAdd(AddPtr& p, const Lookup &l, U &&u)
  1.1599 +    {
  1.1600 +#ifdef DEBUG
  1.1601 +        p.generation = generation();
  1.1602 +        p.mutationCount = mutationCount;
  1.1603 +#endif
  1.1604 +        {
  1.1605 +            mozilla::ReentrancyGuard g(*this);
  1.1606 +            MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed
  1.1607 +            p.entry_ = &lookup(l, p.keyHash, sCollisionBit);
  1.1608 +        }
  1.1609 +        return p.found() || add(p, mozilla::Forward<U>(u));
  1.1610 +    }
  1.1611 +
  1.1612 +    void remove(Ptr p)
  1.1613 +    {
  1.1614 +        MOZ_ASSERT(table);
  1.1615 +        mozilla::ReentrancyGuard g(*this);
  1.1616 +        MOZ_ASSERT(p.found());
  1.1617 +        remove(*p.entry_);
  1.1618 +        checkUnderloaded();
  1.1619 +    }
  1.1620 +
  1.1621 +    void rekeyWithoutRehash(Ptr p, const Lookup &l, const Key &k)
  1.1622 +    {
  1.1623 +        MOZ_ASSERT(table);
  1.1624 +        mozilla::ReentrancyGuard g(*this);
  1.1625 +        MOZ_ASSERT(p.found());
  1.1626 +        typename HashTableEntry<T>::NonConstT t(mozilla::Move(*p));
  1.1627 +        HashPolicy::setKey(t, const_cast<Key &>(k));
  1.1628 +        remove(*p.entry_);
  1.1629 +        putNewInfallible(l, mozilla::Move(t));
  1.1630 +    }
  1.1631 +
  1.1632 +    void rekeyAndMaybeRehash(Ptr p, const Lookup &l, const Key &k)
  1.1633 +    {
  1.1634 +        rekeyWithoutRehash(p, l, k);
  1.1635 +        checkOverRemoved();
  1.1636 +    }
  1.1637 +
  1.1638 +#undef METER
  1.1639 +};
  1.1640 +
  1.1641 +}  // namespace detail
  1.1642 +}  // namespace js
  1.1643 +
  1.1644 +#endif  /* js_HashTable_h */

mercurial