Fri, 16 Jan 2015 18:13:44 +0100
Integrate suggestion from review to improve consistency with existing code.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #ifndef js_HashTable_h |
michael@0 | 8 | #define js_HashTable_h |
michael@0 | 9 | |
michael@0 | 10 | #include "mozilla/Alignment.h" |
michael@0 | 11 | #include "mozilla/Assertions.h" |
michael@0 | 12 | #include "mozilla/Attributes.h" |
michael@0 | 13 | #include "mozilla/Casting.h" |
michael@0 | 14 | #include "mozilla/DebugOnly.h" |
michael@0 | 15 | #include "mozilla/MemoryReporting.h" |
michael@0 | 16 | #include "mozilla/Move.h" |
michael@0 | 17 | #include "mozilla/NullPtr.h" |
michael@0 | 18 | #include "mozilla/PodOperations.h" |
michael@0 | 19 | #include "mozilla/ReentrancyGuard.h" |
michael@0 | 20 | #include "mozilla/TemplateLib.h" |
michael@0 | 21 | #include "mozilla/TypeTraits.h" |
michael@0 | 22 | |
michael@0 | 23 | #include "js/Utility.h" |
michael@0 | 24 | |
michael@0 | 25 | namespace js { |
michael@0 | 26 | |
michael@0 | 27 | class TempAllocPolicy; |
michael@0 | 28 | template <class> struct DefaultHasher; |
michael@0 | 29 | template <class, class> class HashMapEntry; |
michael@0 | 30 | namespace detail { |
michael@0 | 31 | template <class T> class HashTableEntry; |
michael@0 | 32 | template <class T, class HashPolicy, class AllocPolicy> class HashTable; |
michael@0 | 33 | } |
michael@0 | 34 | |
michael@0 | 35 | /*****************************************************************************/ |
michael@0 | 36 | |
michael@0 | 37 | // A JS-friendly, STL-like container providing a hash-based map from keys to |
michael@0 | 38 | // values. In particular, HashMap calls constructors and destructors of all |
michael@0 | 39 | // objects added so non-PODs may be used safely. |
michael@0 | 40 | // |
michael@0 | 41 | // Key/Value requirements: |
michael@0 | 42 | // - movable, destructible, assignable |
michael@0 | 43 | // HashPolicy requirements: |
michael@0 | 44 | // - see Hash Policy section below |
michael@0 | 45 | // AllocPolicy: |
michael@0 | 46 | // - see jsalloc.h |
michael@0 | 47 | // |
michael@0 | 48 | // Note: |
michael@0 | 49 | // - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members |
michael@0 | 50 | // called by HashMap must not call back into the same HashMap object. |
michael@0 | 51 | // - Due to the lack of exception handling, the user must call |init()|. |
michael@0 | 52 | template <class Key, |
michael@0 | 53 | class Value, |
michael@0 | 54 | class HashPolicy = DefaultHasher<Key>, |
michael@0 | 55 | class AllocPolicy = TempAllocPolicy> |
michael@0 | 56 | class HashMap |
michael@0 | 57 | { |
michael@0 | 58 | typedef HashMapEntry<Key, Value> TableEntry; |
michael@0 | 59 | |
michael@0 | 60 | struct MapHashPolicy : HashPolicy |
michael@0 | 61 | { |
michael@0 | 62 | typedef Key KeyType; |
michael@0 | 63 | static const Key &getKey(TableEntry &e) { return e.key(); } |
michael@0 | 64 | static void setKey(TableEntry &e, Key &k) { HashPolicy::rekey(e.mutableKey(), k); } |
michael@0 | 65 | }; |
michael@0 | 66 | |
michael@0 | 67 | typedef detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy> Impl; |
michael@0 | 68 | Impl impl; |
michael@0 | 69 | |
michael@0 | 70 | public: |
michael@0 | 71 | typedef typename HashPolicy::Lookup Lookup; |
michael@0 | 72 | typedef TableEntry Entry; |
michael@0 | 73 | |
michael@0 | 74 | // HashMap construction is fallible (due to OOM); thus the user must call |
michael@0 | 75 | // init after constructing a HashMap and check the return value. |
michael@0 | 76 | HashMap(AllocPolicy a = AllocPolicy()) : impl(a) {} |
michael@0 | 77 | bool init(uint32_t len = 16) { return impl.init(len); } |
michael@0 | 78 | bool initialized() const { return impl.initialized(); } |
michael@0 | 79 | |
michael@0 | 80 | // Return whether the given lookup value is present in the map. E.g.: |
michael@0 | 81 | // |
michael@0 | 82 | // typedef HashMap<int,char> HM; |
michael@0 | 83 | // HM h; |
michael@0 | 84 | // if (HM::Ptr p = h.lookup(3)) { |
michael@0 | 85 | // const HM::Entry &e = *p; // p acts like a pointer to Entry |
michael@0 | 86 | // assert(p->key == 3); // Entry contains the key |
michael@0 | 87 | // char val = p->value; // and value |
michael@0 | 88 | // } |
michael@0 | 89 | // |
michael@0 | 90 | // Also see the definition of Ptr in HashTable above (with T = Entry). |
michael@0 | 91 | typedef typename Impl::Ptr Ptr; |
michael@0 | 92 | Ptr lookup(const Lookup &l) const { return impl.lookup(l); } |
michael@0 | 93 | |
michael@0 | 94 | // Like lookup, but does not assert if two threads call lookup at the same |
michael@0 | 95 | // time. Only use this method when none of the threads will modify the map. |
michael@0 | 96 | Ptr readonlyThreadsafeLookup(const Lookup &l) const { return impl.readonlyThreadsafeLookup(l); } |
michael@0 | 97 | |
michael@0 | 98 | // Assuming |p.found()|, remove |*p|. |
michael@0 | 99 | void remove(Ptr p) { impl.remove(p); } |
michael@0 | 100 | |
michael@0 | 101 | // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient |
michael@0 | 102 | // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using |
michael@0 | 103 | // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.: |
michael@0 | 104 | // |
michael@0 | 105 | // typedef HashMap<int,char> HM; |
michael@0 | 106 | // HM h; |
michael@0 | 107 | // HM::AddPtr p = h.lookupForAdd(3); |
michael@0 | 108 | // if (!p) { |
michael@0 | 109 | // if (!h.add(p, 3, 'a')) |
michael@0 | 110 | // return false; |
michael@0 | 111 | // } |
michael@0 | 112 | // const HM::Entry &e = *p; // p acts like a pointer to Entry |
michael@0 | 113 | // assert(p->key == 3); // Entry contains the key |
michael@0 | 114 | // char val = p->value; // and value |
michael@0 | 115 | // |
michael@0 | 116 | // Also see the definition of AddPtr in HashTable above (with T = Entry). |
michael@0 | 117 | // |
michael@0 | 118 | // N.B. The caller must ensure that no mutating hash table operations |
michael@0 | 119 | // occur between a pair of |lookupForAdd| and |add| calls. To avoid |
michael@0 | 120 | // looking up the key a second time, the caller may use the more efficient |
michael@0 | 121 | // relookupOrAdd method. This method reuses part of the hashing computation |
michael@0 | 122 | // to more efficiently insert the key if it has not been added. For |
michael@0 | 123 | // example, a mutation-handling version of the previous example: |
michael@0 | 124 | // |
michael@0 | 125 | // HM::AddPtr p = h.lookupForAdd(3); |
michael@0 | 126 | // if (!p) { |
michael@0 | 127 | // call_that_may_mutate_h(); |
michael@0 | 128 | // if (!h.relookupOrAdd(p, 3, 'a')) |
michael@0 | 129 | // return false; |
michael@0 | 130 | // } |
michael@0 | 131 | // const HM::Entry &e = *p; |
michael@0 | 132 | // assert(p->key == 3); |
michael@0 | 133 | // char val = p->value; |
michael@0 | 134 | typedef typename Impl::AddPtr AddPtr; |
michael@0 | 135 | AddPtr lookupForAdd(const Lookup &l) const { |
michael@0 | 136 | return impl.lookupForAdd(l); |
michael@0 | 137 | } |
michael@0 | 138 | |
michael@0 | 139 | template<typename KeyInput, typename ValueInput> |
michael@0 | 140 | bool add(AddPtr &p, KeyInput &&k, ValueInput &&v) { |
michael@0 | 141 | Entry e(mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v)); |
michael@0 | 142 | return impl.add(p, mozilla::Move(e)); |
michael@0 | 143 | } |
michael@0 | 144 | |
michael@0 | 145 | template<typename KeyInput> |
michael@0 | 146 | bool add(AddPtr &p, KeyInput &&k) { |
michael@0 | 147 | Entry e(mozilla::Forward<KeyInput>(k), Value()); |
michael@0 | 148 | return impl.add(p, mozilla::Move(e)); |
michael@0 | 149 | } |
michael@0 | 150 | |
michael@0 | 151 | template<typename KeyInput, typename ValueInput> |
michael@0 | 152 | bool relookupOrAdd(AddPtr &p, KeyInput &&k, ValueInput &&v) { |
michael@0 | 153 | Entry e(mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v)); |
michael@0 | 154 | return impl.relookupOrAdd(p, e.key(), mozilla::Move(e)); |
michael@0 | 155 | } |
michael@0 | 156 | |
michael@0 | 157 | // |all()| returns a Range containing |count()| elements. E.g.: |
michael@0 | 158 | // |
michael@0 | 159 | // typedef HashMap<int,char> HM; |
michael@0 | 160 | // HM h; |
michael@0 | 161 | // for (HM::Range r = h.all(); !r.empty(); r.popFront()) |
michael@0 | 162 | // char c = r.front().value(); |
michael@0 | 163 | // |
michael@0 | 164 | // Also see the definition of Range in HashTable above (with T = Entry). |
michael@0 | 165 | typedef typename Impl::Range Range; |
michael@0 | 166 | Range all() const { return impl.all(); } |
michael@0 | 167 | |
michael@0 | 168 | // Typedef for the enumeration class. An Enum may be used to examine and |
michael@0 | 169 | // remove table entries: |
michael@0 | 170 | // |
michael@0 | 171 | // typedef HashMap<int,char> HM; |
michael@0 | 172 | // HM s; |
michael@0 | 173 | // for (HM::Enum e(s); !e.empty(); e.popFront()) |
michael@0 | 174 | // if (e.front().value() == 'l') |
michael@0 | 175 | // e.removeFront(); |
michael@0 | 176 | // |
michael@0 | 177 | // Table resize may occur in Enum's destructor. Also see the definition of |
michael@0 | 178 | // Enum in HashTable above (with T = Entry). |
michael@0 | 179 | typedef typename Impl::Enum Enum; |
michael@0 | 180 | |
michael@0 | 181 | // Remove all entries. This does not shrink the table. For that consider |
michael@0 | 182 | // using the finish() method. |
michael@0 | 183 | void clear() { impl.clear(); } |
michael@0 | 184 | |
michael@0 | 185 | // Remove all entries without triggering destructors. This method is unsafe. |
michael@0 | 186 | void clearWithoutCallingDestructors() { impl.clearWithoutCallingDestructors(); } |
michael@0 | 187 | |
michael@0 | 188 | // Remove all the entries and release all internal buffers. The map must |
michael@0 | 189 | // be initialized again before any use. |
michael@0 | 190 | void finish() { impl.finish(); } |
michael@0 | 191 | |
michael@0 | 192 | // Does the table contain any entries? |
michael@0 | 193 | bool empty() const { return impl.empty(); } |
michael@0 | 194 | |
michael@0 | 195 | // Number of live elements in the map. |
michael@0 | 196 | uint32_t count() const { return impl.count(); } |
michael@0 | 197 | |
michael@0 | 198 | // Total number of allocation in the dynamic table. Note: resize will |
michael@0 | 199 | // happen well before count() == capacity(). |
michael@0 | 200 | size_t capacity() const { return impl.capacity(); } |
michael@0 | 201 | |
michael@0 | 202 | // Don't just call |impl.sizeOfExcludingThis()| because there's no |
michael@0 | 203 | // guarantee that |impl| is the first field in HashMap. |
michael@0 | 204 | size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { |
michael@0 | 205 | return impl.sizeOfExcludingThis(mallocSizeOf); |
michael@0 | 206 | } |
michael@0 | 207 | size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { |
michael@0 | 208 | return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf); |
michael@0 | 209 | } |
michael@0 | 210 | |
michael@0 | 211 | // If |generation()| is the same before and after a HashMap operation, |
michael@0 | 212 | // pointers into the table remain valid. |
michael@0 | 213 | unsigned generation() const { return impl.generation(); } |
michael@0 | 214 | |
michael@0 | 215 | /************************************************** Shorthand operations */ |
michael@0 | 216 | |
michael@0 | 217 | bool has(const Lookup &l) const { |
michael@0 | 218 | return impl.lookup(l) != nullptr; |
michael@0 | 219 | } |
michael@0 | 220 | |
michael@0 | 221 | // Overwrite existing value with v. Return false on oom. |
michael@0 | 222 | template<typename KeyInput, typename ValueInput> |
michael@0 | 223 | bool put(KeyInput &&k, ValueInput &&v) { |
michael@0 | 224 | AddPtr p = lookupForAdd(k); |
michael@0 | 225 | if (p) { |
michael@0 | 226 | p->value() = mozilla::Forward<ValueInput>(v); |
michael@0 | 227 | return true; |
michael@0 | 228 | } |
michael@0 | 229 | return add(p, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v)); |
michael@0 | 230 | } |
michael@0 | 231 | |
michael@0 | 232 | // Like put, but assert that the given key is not already present. |
michael@0 | 233 | template<typename KeyInput, typename ValueInput> |
michael@0 | 234 | bool putNew(KeyInput &&k, ValueInput &&v) { |
michael@0 | 235 | Entry e(mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v)); |
michael@0 | 236 | return impl.putNew(e.key(), mozilla::Move(e)); |
michael@0 | 237 | } |
michael@0 | 238 | |
michael@0 | 239 | // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom. |
michael@0 | 240 | Ptr lookupWithDefault(const Key &k, const Value &defaultValue) { |
michael@0 | 241 | AddPtr p = lookupForAdd(k); |
michael@0 | 242 | if (p) |
michael@0 | 243 | return p; |
michael@0 | 244 | (void)add(p, k, defaultValue); // p is left false-y on oom. |
michael@0 | 245 | return p; |
michael@0 | 246 | } |
michael@0 | 247 | |
michael@0 | 248 | // Remove if present. |
michael@0 | 249 | void remove(const Lookup &l) { |
michael@0 | 250 | if (Ptr p = lookup(l)) |
michael@0 | 251 | remove(p); |
michael@0 | 252 | } |
michael@0 | 253 | |
michael@0 | 254 | // Infallibly rekey one entry, if necessary. |
michael@0 | 255 | // Requires template parameters Key and HashPolicy::Lookup to be the same type. |
michael@0 | 256 | void rekeyIfMoved(const Key &old_key, const Key &new_key) { |
michael@0 | 257 | if (old_key != new_key) |
michael@0 | 258 | rekeyAs(old_key, new_key, new_key); |
michael@0 | 259 | } |
michael@0 | 260 | |
michael@0 | 261 | // Infallibly rekey one entry, if present. |
michael@0 | 262 | void rekeyAs(const Lookup &old_lookup, const Lookup &new_lookup, const Key &new_key) { |
michael@0 | 263 | if (Ptr p = lookup(old_lookup)) |
michael@0 | 264 | impl.rekeyAndMaybeRehash(p, new_lookup, new_key); |
michael@0 | 265 | } |
michael@0 | 266 | |
michael@0 | 267 | // HashMap is movable |
michael@0 | 268 | HashMap(HashMap &&rhs) : impl(mozilla::Move(rhs.impl)) {} |
michael@0 | 269 | void operator=(HashMap &&rhs) { |
michael@0 | 270 | MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited"); |
michael@0 | 271 | impl = mozilla::Move(rhs.impl); |
michael@0 | 272 | } |
michael@0 | 273 | |
michael@0 | 274 | private: |
michael@0 | 275 | // HashMap is not copyable or assignable |
michael@0 | 276 | HashMap(const HashMap &hm) MOZ_DELETE; |
michael@0 | 277 | HashMap &operator=(const HashMap &hm) MOZ_DELETE; |
michael@0 | 278 | |
michael@0 | 279 | friend class Impl::Enum; |
michael@0 | 280 | }; |
michael@0 | 281 | |
michael@0 | 282 | /*****************************************************************************/ |
michael@0 | 283 | |
michael@0 | 284 | // A JS-friendly, STL-like container providing a hash-based set of values. In |
michael@0 | 285 | // particular, HashSet calls constructors and destructors of all objects added |
michael@0 | 286 | // so non-PODs may be used safely. |
michael@0 | 287 | // |
michael@0 | 288 | // T requirements: |
michael@0 | 289 | // - movable, destructible, assignable |
michael@0 | 290 | // HashPolicy requirements: |
michael@0 | 291 | // - see Hash Policy section below |
michael@0 | 292 | // AllocPolicy: |
michael@0 | 293 | // - see jsalloc.h |
michael@0 | 294 | // |
michael@0 | 295 | // Note: |
michael@0 | 296 | // - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by |
michael@0 | 297 | // HashSet must not call back into the same HashSet object. |
michael@0 | 298 | // - Due to the lack of exception handling, the user must call |init()|. |
michael@0 | 299 | template <class T, |
michael@0 | 300 | class HashPolicy = DefaultHasher<T>, |
michael@0 | 301 | class AllocPolicy = TempAllocPolicy> |
michael@0 | 302 | class HashSet |
michael@0 | 303 | { |
michael@0 | 304 | struct SetOps : HashPolicy |
michael@0 | 305 | { |
michael@0 | 306 | typedef T KeyType; |
michael@0 | 307 | static const KeyType &getKey(const T &t) { return t; } |
michael@0 | 308 | static void setKey(T &t, KeyType &k) { HashPolicy::rekey(t, k); } |
michael@0 | 309 | }; |
michael@0 | 310 | |
michael@0 | 311 | typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl; |
michael@0 | 312 | Impl impl; |
michael@0 | 313 | |
michael@0 | 314 | public: |
michael@0 | 315 | typedef typename HashPolicy::Lookup Lookup; |
michael@0 | 316 | typedef T Entry; |
michael@0 | 317 | |
michael@0 | 318 | // HashSet construction is fallible (due to OOM); thus the user must call |
michael@0 | 319 | // init after constructing a HashSet and check the return value. |
michael@0 | 320 | HashSet(AllocPolicy a = AllocPolicy()) : impl(a) {} |
michael@0 | 321 | bool init(uint32_t len = 16) { return impl.init(len); } |
michael@0 | 322 | bool initialized() const { return impl.initialized(); } |
michael@0 | 323 | |
michael@0 | 324 | // Return whether the given lookup value is present in the map. E.g.: |
michael@0 | 325 | // |
michael@0 | 326 | // typedef HashSet<int> HS; |
michael@0 | 327 | // HS h; |
michael@0 | 328 | // if (HS::Ptr p = h.lookup(3)) { |
michael@0 | 329 | // assert(*p == 3); // p acts like a pointer to int |
michael@0 | 330 | // } |
michael@0 | 331 | // |
michael@0 | 332 | // Also see the definition of Ptr in HashTable above. |
michael@0 | 333 | typedef typename Impl::Ptr Ptr; |
michael@0 | 334 | Ptr lookup(const Lookup &l) const { return impl.lookup(l); } |
michael@0 | 335 | |
michael@0 | 336 | // Like lookup, but does not assert if two threads call lookup at the same |
michael@0 | 337 | // time. Only use this method when none of the threads will modify the map. |
michael@0 | 338 | Ptr readonlyThreadsafeLookup(const Lookup &l) const { return impl.readonlyThreadsafeLookup(l); } |
michael@0 | 339 | |
michael@0 | 340 | // Assuming |p.found()|, remove |*p|. |
michael@0 | 341 | void remove(Ptr p) { impl.remove(p); } |
michael@0 | 342 | |
michael@0 | 343 | // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient |
michael@0 | 344 | // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using |
michael@0 | 345 | // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.: |
michael@0 | 346 | // |
michael@0 | 347 | // typedef HashSet<int> HS; |
michael@0 | 348 | // HS h; |
michael@0 | 349 | // HS::AddPtr p = h.lookupForAdd(3); |
michael@0 | 350 | // if (!p) { |
michael@0 | 351 | // if (!h.add(p, 3)) |
michael@0 | 352 | // return false; |
michael@0 | 353 | // } |
michael@0 | 354 | // assert(*p == 3); // p acts like a pointer to int |
michael@0 | 355 | // |
michael@0 | 356 | // Also see the definition of AddPtr in HashTable above. |
michael@0 | 357 | // |
michael@0 | 358 | // N.B. The caller must ensure that no mutating hash table operations |
michael@0 | 359 | // occur between a pair of |lookupForAdd| and |add| calls. To avoid |
michael@0 | 360 | // looking up the key a second time, the caller may use the more efficient |
michael@0 | 361 | // relookupOrAdd method. This method reuses part of the hashing computation |
michael@0 | 362 | // to more efficiently insert the key if it has not been added. For |
michael@0 | 363 | // example, a mutation-handling version of the previous example: |
michael@0 | 364 | // |
michael@0 | 365 | // HS::AddPtr p = h.lookupForAdd(3); |
michael@0 | 366 | // if (!p) { |
michael@0 | 367 | // call_that_may_mutate_h(); |
michael@0 | 368 | // if (!h.relookupOrAdd(p, 3, 3)) |
michael@0 | 369 | // return false; |
michael@0 | 370 | // } |
michael@0 | 371 | // assert(*p == 3); |
michael@0 | 372 | // |
michael@0 | 373 | // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the |
michael@0 | 374 | // entry |t|, where the caller ensures match(l,t). |
michael@0 | 375 | typedef typename Impl::AddPtr AddPtr; |
michael@0 | 376 | AddPtr lookupForAdd(const Lookup &l) const { return impl.lookupForAdd(l); } |
michael@0 | 377 | |
michael@0 | 378 | template <typename U> |
michael@0 | 379 | bool add(AddPtr &p, U &&u) { |
michael@0 | 380 | return impl.add(p, mozilla::Forward<U>(u)); |
michael@0 | 381 | } |
michael@0 | 382 | |
michael@0 | 383 | template <typename U> |
michael@0 | 384 | bool relookupOrAdd(AddPtr &p, const Lookup &l, U &&u) { |
michael@0 | 385 | return impl.relookupOrAdd(p, l, mozilla::Forward<U>(u)); |
michael@0 | 386 | } |
michael@0 | 387 | |
michael@0 | 388 | // |all()| returns a Range containing |count()| elements: |
michael@0 | 389 | // |
michael@0 | 390 | // typedef HashSet<int> HS; |
michael@0 | 391 | // HS h; |
michael@0 | 392 | // for (HS::Range r = h.all(); !r.empty(); r.popFront()) |
michael@0 | 393 | // int i = r.front(); |
michael@0 | 394 | // |
michael@0 | 395 | // Also see the definition of Range in HashTable above. |
michael@0 | 396 | typedef typename Impl::Range Range; |
michael@0 | 397 | Range all() const { return impl.all(); } |
michael@0 | 398 | |
michael@0 | 399 | // Typedef for the enumeration class. An Enum may be used to examine and |
michael@0 | 400 | // remove table entries: |
michael@0 | 401 | // |
michael@0 | 402 | // typedef HashSet<int> HS; |
michael@0 | 403 | // HS s; |
michael@0 | 404 | // for (HS::Enum e(s); !e.empty(); e.popFront()) |
michael@0 | 405 | // if (e.front() == 42) |
michael@0 | 406 | // e.removeFront(); |
michael@0 | 407 | // |
michael@0 | 408 | // Table resize may occur in Enum's destructor. Also see the definition of |
michael@0 | 409 | // Enum in HashTable above. |
michael@0 | 410 | typedef typename Impl::Enum Enum; |
michael@0 | 411 | |
michael@0 | 412 | // Remove all entries. This does not shrink the table. For that consider |
michael@0 | 413 | // using the finish() method. |
michael@0 | 414 | void clear() { impl.clear(); } |
michael@0 | 415 | |
michael@0 | 416 | // Remove all the entries and release all internal buffers. The set must |
michael@0 | 417 | // be initialized again before any use. |
michael@0 | 418 | void finish() { impl.finish(); } |
michael@0 | 419 | |
michael@0 | 420 | // Does the table contain any entries? |
michael@0 | 421 | bool empty() const { return impl.empty(); } |
michael@0 | 422 | |
michael@0 | 423 | // Number of live elements in the map. |
michael@0 | 424 | uint32_t count() const { return impl.count(); } |
michael@0 | 425 | |
michael@0 | 426 | // Total number of allocation in the dynamic table. Note: resize will |
michael@0 | 427 | // happen well before count() == capacity(). |
michael@0 | 428 | size_t capacity() const { return impl.capacity(); } |
michael@0 | 429 | |
michael@0 | 430 | // Don't just call |impl.sizeOfExcludingThis()| because there's no |
michael@0 | 431 | // guarantee that |impl| is the first field in HashSet. |
michael@0 | 432 | size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { |
michael@0 | 433 | return impl.sizeOfExcludingThis(mallocSizeOf); |
michael@0 | 434 | } |
michael@0 | 435 | size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { |
michael@0 | 436 | return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf); |
michael@0 | 437 | } |
michael@0 | 438 | |
michael@0 | 439 | // If |generation()| is the same before and after a HashSet operation, |
michael@0 | 440 | // pointers into the table remain valid. |
michael@0 | 441 | unsigned generation() const { return impl.generation(); } |
michael@0 | 442 | |
michael@0 | 443 | /************************************************** Shorthand operations */ |
michael@0 | 444 | |
michael@0 | 445 | bool has(const Lookup &l) const { |
michael@0 | 446 | return impl.lookup(l) != nullptr; |
michael@0 | 447 | } |
michael@0 | 448 | |
michael@0 | 449 | // Add |u| if it is not present already. Return false on oom. |
michael@0 | 450 | template <typename U> |
michael@0 | 451 | bool put(U &&u) { |
michael@0 | 452 | AddPtr p = lookupForAdd(u); |
michael@0 | 453 | return p ? true : add(p, mozilla::Forward<U>(u)); |
michael@0 | 454 | } |
michael@0 | 455 | |
michael@0 | 456 | // Like put, but assert that the given key is not already present. |
michael@0 | 457 | template <typename U> |
michael@0 | 458 | bool putNew(U &&u) { |
michael@0 | 459 | return impl.putNew(u, mozilla::Forward<U>(u)); |
michael@0 | 460 | } |
michael@0 | 461 | |
michael@0 | 462 | template <typename U> |
michael@0 | 463 | bool putNew(const Lookup &l, U &&u) { |
michael@0 | 464 | return impl.putNew(l, mozilla::Forward<U>(u)); |
michael@0 | 465 | } |
michael@0 | 466 | |
michael@0 | 467 | void remove(const Lookup &l) { |
michael@0 | 468 | if (Ptr p = lookup(l)) |
michael@0 | 469 | remove(p); |
michael@0 | 470 | } |
michael@0 | 471 | |
michael@0 | 472 | // Infallibly rekey one entry, if present. |
michael@0 | 473 | // Requires template parameters T and HashPolicy::Lookup to be the same type. |
michael@0 | 474 | void rekeyIfMoved(const Lookup &old_value, const T &new_value) { |
michael@0 | 475 | if (old_value != new_value) |
michael@0 | 476 | rekeyAs(old_value, new_value, new_value); |
michael@0 | 477 | } |
michael@0 | 478 | |
michael@0 | 479 | // Infallibly rekey one entry, if present. |
michael@0 | 480 | void rekeyAs(const Lookup &old_lookup, const Lookup &new_lookup, const T &new_value) { |
michael@0 | 481 | if (Ptr p = lookup(old_lookup)) |
michael@0 | 482 | impl.rekeyAndMaybeRehash(p, new_lookup, new_value); |
michael@0 | 483 | } |
michael@0 | 484 | |
michael@0 | 485 | // HashSet is movable |
michael@0 | 486 | HashSet(HashSet &&rhs) : impl(mozilla::Move(rhs.impl)) {} |
michael@0 | 487 | void operator=(HashSet &&rhs) { |
michael@0 | 488 | MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited"); |
michael@0 | 489 | impl = mozilla::Move(rhs.impl); |
michael@0 | 490 | } |
michael@0 | 491 | |
michael@0 | 492 | private: |
michael@0 | 493 | // HashSet is not copyable or assignable |
michael@0 | 494 | HashSet(const HashSet &hs) MOZ_DELETE; |
michael@0 | 495 | HashSet &operator=(const HashSet &hs) MOZ_DELETE; |
michael@0 | 496 | |
michael@0 | 497 | friend class Impl::Enum; |
michael@0 | 498 | }; |
michael@0 | 499 | |
michael@0 | 500 | /*****************************************************************************/ |
michael@0 | 501 | |
michael@0 | 502 | // Hash Policy |
michael@0 | 503 | // |
michael@0 | 504 | // A hash policy P for a hash table with key-type Key must provide: |
michael@0 | 505 | // - a type |P::Lookup| to use to lookup table entries; |
michael@0 | 506 | // - a static member function |P::hash| with signature |
michael@0 | 507 | // |
michael@0 | 508 | // static js::HashNumber hash(Lookup) |
michael@0 | 509 | // |
michael@0 | 510 | // to use to hash the lookup type; and |
michael@0 | 511 | // - a static member function |P::match| with signature |
michael@0 | 512 | // |
michael@0 | 513 | // static bool match(Key, Lookup) |
michael@0 | 514 | // |
michael@0 | 515 | // to use to test equality of key and lookup values. |
michael@0 | 516 | // |
michael@0 | 517 | // Normally, Lookup = Key. In general, though, different values and types of |
michael@0 | 518 | // values can be used to lookup and store. If a Lookup value |l| is != to the |
michael@0 | 519 | // added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.: |
michael@0 | 520 | // |
michael@0 | 521 | // js::HashSet<Key, P>::AddPtr p = h.lookup(l); |
michael@0 | 522 | // if (!p) { |
michael@0 | 523 | // assert(P::match(k, l)); // must hold |
michael@0 | 524 | // h.add(p, k); |
michael@0 | 525 | // } |
michael@0 | 526 | |
michael@0 | 527 | // Pointer hashing policy that strips the lowest zeroBits when calculating the |
michael@0 | 528 | // hash to improve key distribution. |
michael@0 | 529 | template <typename Key, size_t zeroBits> |
michael@0 | 530 | struct PointerHasher |
michael@0 | 531 | { |
michael@0 | 532 | typedef Key Lookup; |
michael@0 | 533 | static HashNumber hash(const Lookup &l) { |
michael@0 | 534 | MOZ_ASSERT(!JS::IsPoisonedPtr(l)); |
michael@0 | 535 | size_t word = reinterpret_cast<size_t>(l) >> zeroBits; |
michael@0 | 536 | JS_STATIC_ASSERT(sizeof(HashNumber) == 4); |
michael@0 | 537 | #if JS_BITS_PER_WORD == 32 |
michael@0 | 538 | return HashNumber(word); |
michael@0 | 539 | #else |
michael@0 | 540 | JS_STATIC_ASSERT(sizeof word == 8); |
michael@0 | 541 | return HashNumber((word >> 32) ^ word); |
michael@0 | 542 | #endif |
michael@0 | 543 | } |
michael@0 | 544 | static bool match(const Key &k, const Lookup &l) { |
michael@0 | 545 | MOZ_ASSERT(!JS::IsPoisonedPtr(k)); |
michael@0 | 546 | MOZ_ASSERT(!JS::IsPoisonedPtr(l)); |
michael@0 | 547 | return k == l; |
michael@0 | 548 | } |
michael@0 | 549 | static void rekey(Key &k, const Key& newKey) { |
michael@0 | 550 | k = newKey; |
michael@0 | 551 | } |
michael@0 | 552 | }; |
michael@0 | 553 | |
michael@0 | 554 | // Default hash policy: just use the 'lookup' value. This of course only |
michael@0 | 555 | // works if the lookup value is integral. HashTable applies ScrambleHashCode to |
michael@0 | 556 | // the result of the 'hash' which means that it is 'ok' if the lookup value is |
michael@0 | 557 | // not well distributed over the HashNumber domain. |
michael@0 | 558 | template <class Key> |
michael@0 | 559 | struct DefaultHasher |
michael@0 | 560 | { |
michael@0 | 561 | typedef Key Lookup; |
michael@0 | 562 | static HashNumber hash(const Lookup &l) { |
michael@0 | 563 | // Hash if can implicitly cast to hash number type. |
michael@0 | 564 | return l; |
michael@0 | 565 | } |
michael@0 | 566 | static bool match(const Key &k, const Lookup &l) { |
michael@0 | 567 | // Use builtin or overloaded operator==. |
michael@0 | 568 | return k == l; |
michael@0 | 569 | } |
michael@0 | 570 | static void rekey(Key &k, const Key& newKey) { |
michael@0 | 571 | k = newKey; |
michael@0 | 572 | } |
michael@0 | 573 | }; |
michael@0 | 574 | |
michael@0 | 575 | // Specialize hashing policy for pointer types. It assumes that the type is |
michael@0 | 576 | // at least word-aligned. For types with smaller size use PointerHasher. |
michael@0 | 577 | template <class T> |
michael@0 | 578 | struct DefaultHasher<T *> : PointerHasher<T *, mozilla::tl::FloorLog2<sizeof(void *)>::value> |
michael@0 | 579 | {}; |
michael@0 | 580 | |
michael@0 | 581 | // For doubles, we can xor the two uint32s. |
michael@0 | 582 | template <> |
michael@0 | 583 | struct DefaultHasher<double> |
michael@0 | 584 | { |
michael@0 | 585 | typedef double Lookup; |
michael@0 | 586 | static HashNumber hash(double d) { |
michael@0 | 587 | JS_STATIC_ASSERT(sizeof(HashNumber) == 4); |
michael@0 | 588 | uint64_t u = mozilla::BitwiseCast<uint64_t>(d); |
michael@0 | 589 | return HashNumber(u ^ (u >> 32)); |
michael@0 | 590 | } |
michael@0 | 591 | static bool match(double lhs, double rhs) { |
michael@0 | 592 | return mozilla::BitwiseCast<uint64_t>(lhs) == mozilla::BitwiseCast<uint64_t>(rhs); |
michael@0 | 593 | } |
michael@0 | 594 | }; |
michael@0 | 595 | |
michael@0 | 596 | template <> |
michael@0 | 597 | struct DefaultHasher<float> |
michael@0 | 598 | { |
michael@0 | 599 | typedef float Lookup; |
michael@0 | 600 | static HashNumber hash(float f) { |
michael@0 | 601 | JS_STATIC_ASSERT(sizeof(HashNumber) == 4); |
michael@0 | 602 | return HashNumber(mozilla::BitwiseCast<uint32_t>(f)); |
michael@0 | 603 | } |
michael@0 | 604 | static bool match(float lhs, float rhs) { |
michael@0 | 605 | return mozilla::BitwiseCast<uint32_t>(lhs) == mozilla::BitwiseCast<uint32_t>(rhs); |
michael@0 | 606 | } |
michael@0 | 607 | }; |
michael@0 | 608 | |
michael@0 | 609 | /*****************************************************************************/ |
michael@0 | 610 | |
michael@0 | 611 | // Both HashMap and HashSet are implemented by a single HashTable that is even |
michael@0 | 612 | // more heavily parameterized than the other two. This leaves HashTable gnarly |
michael@0 | 613 | // and extremely coupled to HashMap and HashSet; thus code should not use |
michael@0 | 614 | // HashTable directly. |
michael@0 | 615 | |
michael@0 | 616 | template <class Key, class Value> |
michael@0 | 617 | class HashMapEntry |
michael@0 | 618 | { |
michael@0 | 619 | Key key_; |
michael@0 | 620 | Value value_; |
michael@0 | 621 | |
michael@0 | 622 | template <class, class, class> friend class detail::HashTable; |
michael@0 | 623 | template <class> friend class detail::HashTableEntry; |
michael@0 | 624 | template <class, class, class, class> friend class HashMap; |
michael@0 | 625 | |
michael@0 | 626 | Key & mutableKey() { return key_; } |
michael@0 | 627 | |
michael@0 | 628 | public: |
michael@0 | 629 | template<typename KeyInput, typename ValueInput> |
michael@0 | 630 | HashMapEntry(KeyInput &&k, ValueInput &&v) |
michael@0 | 631 | : key_(mozilla::Forward<KeyInput>(k)), |
michael@0 | 632 | value_(mozilla::Forward<ValueInput>(v)) |
michael@0 | 633 | {} |
michael@0 | 634 | |
michael@0 | 635 | HashMapEntry(HashMapEntry &&rhs) |
michael@0 | 636 | : key_(mozilla::Move(rhs.key_)), |
michael@0 | 637 | value_(mozilla::Move(rhs.value_)) |
michael@0 | 638 | {} |
michael@0 | 639 | |
michael@0 | 640 | typedef Key KeyType; |
michael@0 | 641 | typedef Value ValueType; |
michael@0 | 642 | |
michael@0 | 643 | const Key & key() const { return key_; } |
michael@0 | 644 | const Value & value() const { return value_; } |
michael@0 | 645 | Value & value() { return value_; } |
michael@0 | 646 | |
michael@0 | 647 | private: |
michael@0 | 648 | HashMapEntry(const HashMapEntry &) MOZ_DELETE; |
michael@0 | 649 | void operator=(const HashMapEntry &) MOZ_DELETE; |
michael@0 | 650 | }; |
michael@0 | 651 | |
michael@0 | 652 | } // namespace js |
michael@0 | 653 | |
michael@0 | 654 | namespace mozilla { |
michael@0 | 655 | |
michael@0 | 656 | template <typename T> |
michael@0 | 657 | struct IsPod<js::detail::HashTableEntry<T> > : IsPod<T> {}; |
michael@0 | 658 | |
michael@0 | 659 | template <typename K, typename V> |
michael@0 | 660 | struct IsPod<js::HashMapEntry<K, V> > |
michael@0 | 661 | : IntegralConstant<bool, IsPod<K>::value && IsPod<V>::value> |
michael@0 | 662 | {}; |
michael@0 | 663 | |
michael@0 | 664 | } // namespace mozilla |
michael@0 | 665 | |
michael@0 | 666 | namespace js { |
michael@0 | 667 | |
michael@0 | 668 | namespace detail { |
michael@0 | 669 | |
michael@0 | 670 | template <class T, class HashPolicy, class AllocPolicy> |
michael@0 | 671 | class HashTable; |
michael@0 | 672 | |
michael@0 | 673 | template <class T> |
michael@0 | 674 | class HashTableEntry |
michael@0 | 675 | { |
michael@0 | 676 | template <class, class, class> friend class HashTable; |
michael@0 | 677 | typedef typename mozilla::RemoveConst<T>::Type NonConstT; |
michael@0 | 678 | |
michael@0 | 679 | HashNumber keyHash; |
michael@0 | 680 | mozilla::AlignedStorage2<NonConstT> mem; |
michael@0 | 681 | |
michael@0 | 682 | static const HashNumber sFreeKey = 0; |
michael@0 | 683 | static const HashNumber sRemovedKey = 1; |
michael@0 | 684 | static const HashNumber sCollisionBit = 1; |
michael@0 | 685 | |
michael@0 | 686 | static bool isLiveHash(HashNumber hash) |
michael@0 | 687 | { |
michael@0 | 688 | return hash > sRemovedKey; |
michael@0 | 689 | } |
michael@0 | 690 | |
michael@0 | 691 | HashTableEntry(const HashTableEntry &) MOZ_DELETE; |
michael@0 | 692 | void operator=(const HashTableEntry &) MOZ_DELETE; |
michael@0 | 693 | ~HashTableEntry() MOZ_DELETE; |
michael@0 | 694 | |
michael@0 | 695 | public: |
michael@0 | 696 | // NB: HashTableEntry is treated as a POD: no constructor or destructor calls. |
michael@0 | 697 | |
michael@0 | 698 | void destroyIfLive() { |
michael@0 | 699 | if (isLive()) |
michael@0 | 700 | mem.addr()->~T(); |
michael@0 | 701 | } |
michael@0 | 702 | |
michael@0 | 703 | void destroy() { |
michael@0 | 704 | MOZ_ASSERT(isLive()); |
michael@0 | 705 | mem.addr()->~T(); |
michael@0 | 706 | } |
michael@0 | 707 | |
michael@0 | 708 | void swap(HashTableEntry *other) { |
michael@0 | 709 | mozilla::Swap(keyHash, other->keyHash); |
michael@0 | 710 | mozilla::Swap(mem, other->mem); |
michael@0 | 711 | } |
michael@0 | 712 | |
michael@0 | 713 | T &get() { MOZ_ASSERT(isLive()); return *mem.addr(); } |
michael@0 | 714 | |
michael@0 | 715 | bool isFree() const { return keyHash == sFreeKey; } |
michael@0 | 716 | void clearLive() { MOZ_ASSERT(isLive()); keyHash = sFreeKey; mem.addr()->~T(); } |
michael@0 | 717 | void clear() { if (isLive()) mem.addr()->~T(); keyHash = sFreeKey; } |
michael@0 | 718 | bool isRemoved() const { return keyHash == sRemovedKey; } |
michael@0 | 719 | void removeLive() { MOZ_ASSERT(isLive()); keyHash = sRemovedKey; mem.addr()->~T(); } |
michael@0 | 720 | bool isLive() const { return isLiveHash(keyHash); } |
michael@0 | 721 | void setCollision() { MOZ_ASSERT(isLive()); keyHash |= sCollisionBit; } |
michael@0 | 722 | void setCollision(HashNumber bit) { MOZ_ASSERT(isLive()); keyHash |= bit; } |
michael@0 | 723 | void unsetCollision() { keyHash &= ~sCollisionBit; } |
michael@0 | 724 | bool hasCollision() const { return keyHash & sCollisionBit; } |
michael@0 | 725 | bool matchHash(HashNumber hn) { return (keyHash & ~sCollisionBit) == hn; } |
michael@0 | 726 | HashNumber getKeyHash() const { return keyHash & ~sCollisionBit; } |
michael@0 | 727 | |
michael@0 | 728 | template <class U> |
michael@0 | 729 | void setLive(HashNumber hn, U &&u) |
michael@0 | 730 | { |
michael@0 | 731 | MOZ_ASSERT(!isLive()); |
michael@0 | 732 | keyHash = hn; |
michael@0 | 733 | new(mem.addr()) T(mozilla::Forward<U>(u)); |
michael@0 | 734 | MOZ_ASSERT(isLive()); |
michael@0 | 735 | } |
michael@0 | 736 | }; |
michael@0 | 737 | |
michael@0 | 738 | template <class T, class HashPolicy, class AllocPolicy> |
michael@0 | 739 | class HashTable : private AllocPolicy |
michael@0 | 740 | { |
michael@0 | 741 | typedef typename mozilla::RemoveConst<T>::Type NonConstT; |
michael@0 | 742 | typedef typename HashPolicy::KeyType Key; |
michael@0 | 743 | typedef typename HashPolicy::Lookup Lookup; |
michael@0 | 744 | |
michael@0 | 745 | public: |
michael@0 | 746 | typedef HashTableEntry<T> Entry; |
michael@0 | 747 | |
michael@0 | 748 | // A nullable pointer to a hash table element. A Ptr |p| can be tested |
michael@0 | 749 | // either explicitly |if (p.found()) p->...| or using boolean conversion |
michael@0 | 750 | // |if (p) p->...|. Ptr objects must not be used after any mutating hash |
michael@0 | 751 | // table operations unless |generation()| is tested. |
michael@0 | 752 | class Ptr |
michael@0 | 753 | { |
michael@0 | 754 | friend class HashTable; |
michael@0 | 755 | typedef void (Ptr::* ConvertibleToBool)(); |
michael@0 | 756 | void nonNull() {} |
michael@0 | 757 | |
michael@0 | 758 | Entry *entry_; |
michael@0 | 759 | #ifdef DEBUG |
michael@0 | 760 | const HashTable *table_; |
michael@0 | 761 | uint32_t generation; |
michael@0 | 762 | #endif |
michael@0 | 763 | |
michael@0 | 764 | protected: |
michael@0 | 765 | Ptr(Entry &entry, const HashTable &tableArg) |
michael@0 | 766 | : entry_(&entry) |
michael@0 | 767 | #ifdef DEBUG |
michael@0 | 768 | , table_(&tableArg) |
michael@0 | 769 | , generation(tableArg.generation()) |
michael@0 | 770 | #endif |
michael@0 | 771 | {} |
michael@0 | 772 | |
michael@0 | 773 | public: |
michael@0 | 774 | // Leaves Ptr uninitialized. |
michael@0 | 775 | Ptr() { |
michael@0 | 776 | #ifdef JS_DEBUG |
michael@0 | 777 | entry_ = (Entry *)0xbad; |
michael@0 | 778 | #endif |
michael@0 | 779 | } |
michael@0 | 780 | |
michael@0 | 781 | bool found() const { |
michael@0 | 782 | MOZ_ASSERT(generation == table_->generation()); |
michael@0 | 783 | return entry_->isLive(); |
michael@0 | 784 | } |
michael@0 | 785 | |
michael@0 | 786 | operator ConvertibleToBool() const { |
michael@0 | 787 | return found() ? &Ptr::nonNull : 0; |
michael@0 | 788 | } |
michael@0 | 789 | |
michael@0 | 790 | bool operator==(const Ptr &rhs) const { |
michael@0 | 791 | MOZ_ASSERT(found() && rhs.found()); |
michael@0 | 792 | return entry_ == rhs.entry_; |
michael@0 | 793 | } |
michael@0 | 794 | |
michael@0 | 795 | bool operator!=(const Ptr &rhs) const { |
michael@0 | 796 | MOZ_ASSERT(generation == table_->generation()); |
michael@0 | 797 | return !(*this == rhs); |
michael@0 | 798 | } |
michael@0 | 799 | |
michael@0 | 800 | T &operator*() const { |
michael@0 | 801 | MOZ_ASSERT(generation == table_->generation()); |
michael@0 | 802 | return entry_->get(); |
michael@0 | 803 | } |
michael@0 | 804 | |
michael@0 | 805 | T *operator->() const { |
michael@0 | 806 | MOZ_ASSERT(generation == table_->generation()); |
michael@0 | 807 | return &entry_->get(); |
michael@0 | 808 | } |
michael@0 | 809 | }; |
michael@0 | 810 | |
michael@0 | 811 | // A Ptr that can be used to add a key after a failed lookup. |
michael@0 | 812 | class AddPtr : public Ptr |
michael@0 | 813 | { |
michael@0 | 814 | friend class HashTable; |
michael@0 | 815 | HashNumber keyHash; |
michael@0 | 816 | mozilla::DebugOnly<uint64_t> mutationCount; |
michael@0 | 817 | |
michael@0 | 818 | AddPtr(Entry &entry, const HashTable &tableArg, HashNumber hn) |
michael@0 | 819 | : Ptr(entry, tableArg), keyHash(hn), mutationCount(tableArg.mutationCount) |
michael@0 | 820 | {} |
michael@0 | 821 | |
michael@0 | 822 | public: |
michael@0 | 823 | // Leaves AddPtr uninitialized. |
michael@0 | 824 | AddPtr() {} |
michael@0 | 825 | }; |
michael@0 | 826 | |
michael@0 | 827 | // A collection of hash table entries. The collection is enumerated by |
michael@0 | 828 | // calling |front()| followed by |popFront()| as long as |!empty()|. As |
michael@0 | 829 | // with Ptr/AddPtr, Range objects must not be used after any mutating hash |
michael@0 | 830 | // table operation unless the |generation()| is tested. |
michael@0 | 831 | class Range |
michael@0 | 832 | { |
michael@0 | 833 | protected: |
michael@0 | 834 | friend class HashTable; |
michael@0 | 835 | |
michael@0 | 836 | Range(const HashTable &tableArg, Entry *c, Entry *e) |
michael@0 | 837 | : cur(c) |
michael@0 | 838 | , end(e) |
michael@0 | 839 | #ifdef DEBUG |
michael@0 | 840 | , table_(&tableArg) |
michael@0 | 841 | , mutationCount(tableArg.mutationCount) |
michael@0 | 842 | , generation(tableArg.generation()) |
michael@0 | 843 | , validEntry(true) |
michael@0 | 844 | #endif |
michael@0 | 845 | { |
michael@0 | 846 | while (cur < end && !cur->isLive()) |
michael@0 | 847 | ++cur; |
michael@0 | 848 | } |
michael@0 | 849 | |
michael@0 | 850 | Entry *cur, *end; |
michael@0 | 851 | #ifdef DEBUG |
michael@0 | 852 | const HashTable *table_; |
michael@0 | 853 | uint64_t mutationCount; |
michael@0 | 854 | uint32_t generation; |
michael@0 | 855 | bool validEntry; |
michael@0 | 856 | #endif |
michael@0 | 857 | |
michael@0 | 858 | public: |
michael@0 | 859 | Range() |
michael@0 | 860 | : cur(nullptr) |
michael@0 | 861 | , end(nullptr) |
michael@0 | 862 | #ifdef DEBUG |
michael@0 | 863 | , table_(nullptr) |
michael@0 | 864 | , mutationCount(0) |
michael@0 | 865 | , generation(0) |
michael@0 | 866 | , validEntry(false) |
michael@0 | 867 | #endif |
michael@0 | 868 | {} |
michael@0 | 869 | |
michael@0 | 870 | bool empty() const { |
michael@0 | 871 | MOZ_ASSERT(generation == table_->generation()); |
michael@0 | 872 | MOZ_ASSERT(mutationCount == table_->mutationCount); |
michael@0 | 873 | return cur == end; |
michael@0 | 874 | } |
michael@0 | 875 | |
michael@0 | 876 | T &front() const { |
michael@0 | 877 | MOZ_ASSERT(validEntry); |
michael@0 | 878 | MOZ_ASSERT(!empty()); |
michael@0 | 879 | MOZ_ASSERT(generation == table_->generation()); |
michael@0 | 880 | MOZ_ASSERT(mutationCount == table_->mutationCount); |
michael@0 | 881 | return cur->get(); |
michael@0 | 882 | } |
michael@0 | 883 | |
michael@0 | 884 | void popFront() { |
michael@0 | 885 | MOZ_ASSERT(!empty()); |
michael@0 | 886 | MOZ_ASSERT(generation == table_->generation()); |
michael@0 | 887 | MOZ_ASSERT(mutationCount == table_->mutationCount); |
michael@0 | 888 | while (++cur < end && !cur->isLive()) |
michael@0 | 889 | continue; |
michael@0 | 890 | #ifdef DEBUG |
michael@0 | 891 | validEntry = true; |
michael@0 | 892 | #endif |
michael@0 | 893 | } |
michael@0 | 894 | }; |
michael@0 | 895 | |
michael@0 | 896 | // A Range whose lifetime delimits a mutating enumeration of a hash table. |
michael@0 | 897 | // Since rehashing when elements were removed during enumeration would be |
michael@0 | 898 | // bad, it is postponed until the Enum is destructed. Since the Enum's |
michael@0 | 899 | // destructor touches the hash table, the user must ensure that the hash |
michael@0 | 900 | // table is still alive when the destructor runs. |
michael@0 | 901 | class Enum : public Range |
michael@0 | 902 | { |
michael@0 | 903 | friend class HashTable; |
michael@0 | 904 | |
michael@0 | 905 | HashTable &table_; |
michael@0 | 906 | bool rekeyed; |
michael@0 | 907 | bool removed; |
michael@0 | 908 | |
michael@0 | 909 | /* Not copyable. */ |
michael@0 | 910 | Enum(const Enum &) MOZ_DELETE; |
michael@0 | 911 | void operator=(const Enum &) MOZ_DELETE; |
michael@0 | 912 | |
michael@0 | 913 | public: |
michael@0 | 914 | template<class Map> explicit |
michael@0 | 915 | Enum(Map &map) : Range(map.all()), table_(map.impl), rekeyed(false), removed(false) {} |
michael@0 | 916 | |
michael@0 | 917 | // Removes the |front()| element from the table, leaving |front()| |
michael@0 | 918 | // invalid until the next call to |popFront()|. For example: |
michael@0 | 919 | // |
michael@0 | 920 | // HashSet<int> s; |
michael@0 | 921 | // for (HashSet<int>::Enum e(s); !e.empty(); e.popFront()) |
michael@0 | 922 | // if (e.front() == 42) |
michael@0 | 923 | // e.removeFront(); |
michael@0 | 924 | void removeFront() { |
michael@0 | 925 | table_.remove(*this->cur); |
michael@0 | 926 | removed = true; |
michael@0 | 927 | #ifdef DEBUG |
michael@0 | 928 | this->validEntry = false; |
michael@0 | 929 | this->mutationCount = table_.mutationCount; |
michael@0 | 930 | #endif |
michael@0 | 931 | } |
michael@0 | 932 | |
michael@0 | 933 | // Removes the |front()| element and re-inserts it into the table with |
michael@0 | 934 | // a new key at the new Lookup position. |front()| is invalid after |
michael@0 | 935 | // this operation until the next call to |popFront()|. |
michael@0 | 936 | void rekeyFront(const Lookup &l, const Key &k) { |
michael@0 | 937 | Ptr p(*this->cur, table_); |
michael@0 | 938 | table_.rekeyWithoutRehash(p, l, k); |
michael@0 | 939 | rekeyed = true; |
michael@0 | 940 | #ifdef DEBUG |
michael@0 | 941 | this->validEntry = false; |
michael@0 | 942 | this->mutationCount = table_.mutationCount; |
michael@0 | 943 | #endif |
michael@0 | 944 | } |
michael@0 | 945 | |
michael@0 | 946 | void rekeyFront(const Key &k) { |
michael@0 | 947 | rekeyFront(k, k); |
michael@0 | 948 | } |
michael@0 | 949 | |
michael@0 | 950 | // Potentially rehashes the table. |
michael@0 | 951 | ~Enum() { |
michael@0 | 952 | if (rekeyed) { |
michael@0 | 953 | table_.gen++; |
michael@0 | 954 | table_.checkOverRemoved(); |
michael@0 | 955 | } |
michael@0 | 956 | |
michael@0 | 957 | if (removed) |
michael@0 | 958 | table_.compactIfUnderloaded(); |
michael@0 | 959 | } |
michael@0 | 960 | }; |
michael@0 | 961 | |
michael@0 | 962 | // HashTable is movable |
michael@0 | 963 | HashTable(HashTable &&rhs) |
michael@0 | 964 | : AllocPolicy(rhs) |
michael@0 | 965 | { |
michael@0 | 966 | mozilla::PodAssign(this, &rhs); |
michael@0 | 967 | rhs.table = nullptr; |
michael@0 | 968 | } |
michael@0 | 969 | void operator=(HashTable &&rhs) { |
michael@0 | 970 | MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited"); |
michael@0 | 971 | if (table) |
michael@0 | 972 | destroyTable(*this, table, capacity()); |
michael@0 | 973 | mozilla::PodAssign(this, &rhs); |
michael@0 | 974 | rhs.table = nullptr; |
michael@0 | 975 | } |
michael@0 | 976 | |
michael@0 | 977 | private: |
michael@0 | 978 | // HashTable is not copyable or assignable |
michael@0 | 979 | HashTable(const HashTable &) MOZ_DELETE; |
michael@0 | 980 | void operator=(const HashTable &) MOZ_DELETE; |
michael@0 | 981 | |
michael@0 | 982 | private: |
michael@0 | 983 | uint32_t hashShift; // multiplicative hash shift |
michael@0 | 984 | uint32_t entryCount; // number of entries in table |
michael@0 | 985 | uint32_t gen; // entry storage generation number |
michael@0 | 986 | uint32_t removedCount; // removed entry sentinels in table |
michael@0 | 987 | Entry *table; // entry storage |
michael@0 | 988 | |
michael@0 | 989 | void setTableSizeLog2(unsigned sizeLog2) |
michael@0 | 990 | { |
michael@0 | 991 | hashShift = sHashBits - sizeLog2; |
michael@0 | 992 | } |
michael@0 | 993 | |
michael@0 | 994 | #ifdef JS_DEBUG |
michael@0 | 995 | mutable struct Stats |
michael@0 | 996 | { |
michael@0 | 997 | uint32_t searches; // total number of table searches |
michael@0 | 998 | uint32_t steps; // hash chain links traversed |
michael@0 | 999 | uint32_t hits; // searches that found key |
michael@0 | 1000 | uint32_t misses; // searches that didn't find key |
michael@0 | 1001 | uint32_t addOverRemoved; // adds that recycled a removed entry |
michael@0 | 1002 | uint32_t removes; // calls to remove |
michael@0 | 1003 | uint32_t removeFrees; // calls to remove that freed the entry |
michael@0 | 1004 | uint32_t grows; // table expansions |
michael@0 | 1005 | uint32_t shrinks; // table contractions |
michael@0 | 1006 | uint32_t compresses; // table compressions |
michael@0 | 1007 | uint32_t rehashes; // tombstone decontaminations |
michael@0 | 1008 | } stats; |
michael@0 | 1009 | # define METER(x) x |
michael@0 | 1010 | #else |
michael@0 | 1011 | # define METER(x) |
michael@0 | 1012 | #endif |
michael@0 | 1013 | |
michael@0 | 1014 | friend class mozilla::ReentrancyGuard; |
michael@0 | 1015 | mutable mozilla::DebugOnly<bool> entered; |
michael@0 | 1016 | mozilla::DebugOnly<uint64_t> mutationCount; |
michael@0 | 1017 | |
michael@0 | 1018 | // The default initial capacity is 32 (enough to hold 16 elements), but it |
michael@0 | 1019 | // can be as low as 4. |
michael@0 | 1020 | static const unsigned sMinCapacityLog2 = 2; |
michael@0 | 1021 | static const unsigned sMinCapacity = 1 << sMinCapacityLog2; |
michael@0 | 1022 | static const unsigned sMaxInit = JS_BIT(23); |
michael@0 | 1023 | static const unsigned sMaxCapacity = JS_BIT(24); |
michael@0 | 1024 | static const unsigned sHashBits = mozilla::tl::BitSize<HashNumber>::value; |
michael@0 | 1025 | |
michael@0 | 1026 | // Hash-table alpha is conceptually a fraction, but to avoid floating-point |
michael@0 | 1027 | // math we implement it as a ratio of integers. |
michael@0 | 1028 | static const uint8_t sAlphaDenominator = 4; |
michael@0 | 1029 | static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4 |
michael@0 | 1030 | static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4 |
michael@0 | 1031 | |
michael@0 | 1032 | static const HashNumber sFreeKey = Entry::sFreeKey; |
michael@0 | 1033 | static const HashNumber sRemovedKey = Entry::sRemovedKey; |
michael@0 | 1034 | static const HashNumber sCollisionBit = Entry::sCollisionBit; |
michael@0 | 1035 | |
michael@0 | 1036 | static bool isLiveHash(HashNumber hash) |
michael@0 | 1037 | { |
michael@0 | 1038 | return Entry::isLiveHash(hash); |
michael@0 | 1039 | } |
michael@0 | 1040 | |
michael@0 | 1041 | static HashNumber prepareHash(const Lookup& l) |
michael@0 | 1042 | { |
michael@0 | 1043 | HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l)); |
michael@0 | 1044 | |
michael@0 | 1045 | // Avoid reserved hash codes. |
michael@0 | 1046 | if (!isLiveHash(keyHash)) |
michael@0 | 1047 | keyHash -= (sRemovedKey + 1); |
michael@0 | 1048 | return keyHash & ~sCollisionBit; |
michael@0 | 1049 | } |
michael@0 | 1050 | |
michael@0 | 1051 | static Entry *createTable(AllocPolicy &alloc, uint32_t capacity) |
michael@0 | 1052 | { |
michael@0 | 1053 | static_assert(sFreeKey == 0, |
michael@0 | 1054 | "newly-calloc'd tables have to be considered empty"); |
michael@0 | 1055 | static_assert(sMaxCapacity <= SIZE_MAX / sizeof(Entry), |
michael@0 | 1056 | "would overflow allocating max number of entries"); |
michael@0 | 1057 | return static_cast<Entry*>(alloc.calloc_(capacity * sizeof(Entry))); |
michael@0 | 1058 | } |
michael@0 | 1059 | |
michael@0 | 1060 | static void destroyTable(AllocPolicy &alloc, Entry *oldTable, uint32_t capacity) |
michael@0 | 1061 | { |
michael@0 | 1062 | for (Entry *e = oldTable, *end = e + capacity; e < end; ++e) |
michael@0 | 1063 | e->destroyIfLive(); |
michael@0 | 1064 | alloc.free_(oldTable); |
michael@0 | 1065 | } |
michael@0 | 1066 | |
michael@0 | 1067 | public: |
michael@0 | 1068 | HashTable(AllocPolicy ap) |
michael@0 | 1069 | : AllocPolicy(ap), |
michael@0 | 1070 | hashShift(sHashBits), |
michael@0 | 1071 | entryCount(0), |
michael@0 | 1072 | gen(0), |
michael@0 | 1073 | removedCount(0), |
michael@0 | 1074 | table(nullptr), |
michael@0 | 1075 | entered(false), |
michael@0 | 1076 | mutationCount(0) |
michael@0 | 1077 | {} |
michael@0 | 1078 | |
michael@0 | 1079 | MOZ_WARN_UNUSED_RESULT bool init(uint32_t length) |
michael@0 | 1080 | { |
michael@0 | 1081 | MOZ_ASSERT(!initialized()); |
michael@0 | 1082 | |
michael@0 | 1083 | // Reject all lengths whose initial computed capacity would exceed |
michael@0 | 1084 | // sMaxCapacity. Round that maximum length down to the nearest power |
michael@0 | 1085 | // of two for speedier code. |
michael@0 | 1086 | if (length > sMaxInit) { |
michael@0 | 1087 | this->reportAllocOverflow(); |
michael@0 | 1088 | return false; |
michael@0 | 1089 | } |
michael@0 | 1090 | |
michael@0 | 1091 | static_assert((sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit, |
michael@0 | 1092 | "multiplication in numerator below could overflow"); |
michael@0 | 1093 | static_assert(sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator, |
michael@0 | 1094 | "numerator calculation below could potentially overflow"); |
michael@0 | 1095 | |
michael@0 | 1096 | // Compute the smallest capacity allowing |length| elements to be |
michael@0 | 1097 | // inserted without rehashing: ceil(length / max-alpha). (Ceiling |
michael@0 | 1098 | // integral division: <http://stackoverflow.com/a/2745086>.) |
michael@0 | 1099 | uint32_t newCapacity = |
michael@0 | 1100 | (length * sAlphaDenominator + sMaxAlphaNumerator - 1) / sMaxAlphaNumerator; |
michael@0 | 1101 | if (newCapacity < sMinCapacity) |
michael@0 | 1102 | newCapacity = sMinCapacity; |
michael@0 | 1103 | |
michael@0 | 1104 | // FIXME: use JS_CEILING_LOG2 when PGO stops crashing (bug 543034). |
michael@0 | 1105 | uint32_t roundUp = sMinCapacity, roundUpLog2 = sMinCapacityLog2; |
michael@0 | 1106 | while (roundUp < newCapacity) { |
michael@0 | 1107 | roundUp <<= 1; |
michael@0 | 1108 | ++roundUpLog2; |
michael@0 | 1109 | } |
michael@0 | 1110 | |
michael@0 | 1111 | newCapacity = roundUp; |
michael@0 | 1112 | MOZ_ASSERT(newCapacity >= length); |
michael@0 | 1113 | MOZ_ASSERT(newCapacity <= sMaxCapacity); |
michael@0 | 1114 | |
michael@0 | 1115 | table = createTable(*this, newCapacity); |
michael@0 | 1116 | if (!table) |
michael@0 | 1117 | return false; |
michael@0 | 1118 | |
michael@0 | 1119 | setTableSizeLog2(roundUpLog2); |
michael@0 | 1120 | METER(memset(&stats, 0, sizeof(stats))); |
michael@0 | 1121 | return true; |
michael@0 | 1122 | } |
michael@0 | 1123 | |
michael@0 | 1124 | bool initialized() const |
michael@0 | 1125 | { |
michael@0 | 1126 | return !!table; |
michael@0 | 1127 | } |
michael@0 | 1128 | |
michael@0 | 1129 | ~HashTable() |
michael@0 | 1130 | { |
michael@0 | 1131 | if (table) |
michael@0 | 1132 | destroyTable(*this, table, capacity()); |
michael@0 | 1133 | } |
michael@0 | 1134 | |
michael@0 | 1135 | private: |
michael@0 | 1136 | HashNumber hash1(HashNumber hash0) const |
michael@0 | 1137 | { |
michael@0 | 1138 | return hash0 >> hashShift; |
michael@0 | 1139 | } |
michael@0 | 1140 | |
michael@0 | 1141 | struct DoubleHash |
michael@0 | 1142 | { |
michael@0 | 1143 | HashNumber h2; |
michael@0 | 1144 | HashNumber sizeMask; |
michael@0 | 1145 | }; |
michael@0 | 1146 | |
michael@0 | 1147 | DoubleHash hash2(HashNumber curKeyHash) const |
michael@0 | 1148 | { |
michael@0 | 1149 | unsigned sizeLog2 = sHashBits - hashShift; |
michael@0 | 1150 | DoubleHash dh = { |
michael@0 | 1151 | ((curKeyHash << sizeLog2) >> hashShift) | 1, |
michael@0 | 1152 | (HashNumber(1) << sizeLog2) - 1 |
michael@0 | 1153 | }; |
michael@0 | 1154 | return dh; |
michael@0 | 1155 | } |
michael@0 | 1156 | |
michael@0 | 1157 | static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash &dh) |
michael@0 | 1158 | { |
michael@0 | 1159 | return (h1 - dh.h2) & dh.sizeMask; |
michael@0 | 1160 | } |
michael@0 | 1161 | |
michael@0 | 1162 | bool overloaded() |
michael@0 | 1163 | { |
michael@0 | 1164 | static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator, |
michael@0 | 1165 | "multiplication below could overflow"); |
michael@0 | 1166 | return entryCount + removedCount >= |
michael@0 | 1167 | capacity() * sMaxAlphaNumerator / sAlphaDenominator; |
michael@0 | 1168 | } |
michael@0 | 1169 | |
michael@0 | 1170 | // Would the table be underloaded if it had the given capacity and entryCount? |
michael@0 | 1171 | static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount) |
michael@0 | 1172 | { |
michael@0 | 1173 | static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator, |
michael@0 | 1174 | "multiplication below could overflow"); |
michael@0 | 1175 | return capacity > sMinCapacity && |
michael@0 | 1176 | entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator; |
michael@0 | 1177 | } |
michael@0 | 1178 | |
michael@0 | 1179 | bool underloaded() |
michael@0 | 1180 | { |
michael@0 | 1181 | return wouldBeUnderloaded(capacity(), entryCount); |
michael@0 | 1182 | } |
michael@0 | 1183 | |
michael@0 | 1184 | static bool match(Entry &e, const Lookup &l) |
michael@0 | 1185 | { |
michael@0 | 1186 | return HashPolicy::match(HashPolicy::getKey(e.get()), l); |
michael@0 | 1187 | } |
michael@0 | 1188 | |
michael@0 | 1189 | Entry &lookup(const Lookup &l, HashNumber keyHash, unsigned collisionBit) const |
michael@0 | 1190 | { |
michael@0 | 1191 | MOZ_ASSERT(isLiveHash(keyHash)); |
michael@0 | 1192 | MOZ_ASSERT(!(keyHash & sCollisionBit)); |
michael@0 | 1193 | MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit); |
michael@0 | 1194 | MOZ_ASSERT(table); |
michael@0 | 1195 | METER(stats.searches++); |
michael@0 | 1196 | |
michael@0 | 1197 | // Compute the primary hash address. |
michael@0 | 1198 | HashNumber h1 = hash1(keyHash); |
michael@0 | 1199 | Entry *entry = &table[h1]; |
michael@0 | 1200 | |
michael@0 | 1201 | // Miss: return space for a new entry. |
michael@0 | 1202 | if (entry->isFree()) { |
michael@0 | 1203 | METER(stats.misses++); |
michael@0 | 1204 | return *entry; |
michael@0 | 1205 | } |
michael@0 | 1206 | |
michael@0 | 1207 | // Hit: return entry. |
michael@0 | 1208 | if (entry->matchHash(keyHash) && match(*entry, l)) { |
michael@0 | 1209 | METER(stats.hits++); |
michael@0 | 1210 | return *entry; |
michael@0 | 1211 | } |
michael@0 | 1212 | |
michael@0 | 1213 | // Collision: double hash. |
michael@0 | 1214 | DoubleHash dh = hash2(keyHash); |
michael@0 | 1215 | |
michael@0 | 1216 | // Save the first removed entry pointer so we can recycle later. |
michael@0 | 1217 | Entry *firstRemoved = nullptr; |
michael@0 | 1218 | |
michael@0 | 1219 | while(true) { |
michael@0 | 1220 | if (MOZ_UNLIKELY(entry->isRemoved())) { |
michael@0 | 1221 | if (!firstRemoved) |
michael@0 | 1222 | firstRemoved = entry; |
michael@0 | 1223 | } else { |
michael@0 | 1224 | entry->setCollision(collisionBit); |
michael@0 | 1225 | } |
michael@0 | 1226 | |
michael@0 | 1227 | METER(stats.steps++); |
michael@0 | 1228 | h1 = applyDoubleHash(h1, dh); |
michael@0 | 1229 | |
michael@0 | 1230 | entry = &table[h1]; |
michael@0 | 1231 | if (entry->isFree()) { |
michael@0 | 1232 | METER(stats.misses++); |
michael@0 | 1233 | return firstRemoved ? *firstRemoved : *entry; |
michael@0 | 1234 | } |
michael@0 | 1235 | |
michael@0 | 1236 | if (entry->matchHash(keyHash) && match(*entry, l)) { |
michael@0 | 1237 | METER(stats.hits++); |
michael@0 | 1238 | return *entry; |
michael@0 | 1239 | } |
michael@0 | 1240 | } |
michael@0 | 1241 | } |
michael@0 | 1242 | |
michael@0 | 1243 | // This is a copy of lookup hardcoded to the assumptions: |
michael@0 | 1244 | // 1. the lookup is a lookupForAdd |
michael@0 | 1245 | // 2. the key, whose |keyHash| has been passed is not in the table, |
michael@0 | 1246 | // 3. no entries have been removed from the table. |
michael@0 | 1247 | // This specialized search avoids the need for recovering lookup values |
michael@0 | 1248 | // from entries, which allows more flexible Lookup/Key types. |
michael@0 | 1249 | Entry &findFreeEntry(HashNumber keyHash) |
michael@0 | 1250 | { |
michael@0 | 1251 | MOZ_ASSERT(!(keyHash & sCollisionBit)); |
michael@0 | 1252 | MOZ_ASSERT(table); |
michael@0 | 1253 | METER(stats.searches++); |
michael@0 | 1254 | |
michael@0 | 1255 | // We assume 'keyHash' has already been distributed. |
michael@0 | 1256 | |
michael@0 | 1257 | // Compute the primary hash address. |
michael@0 | 1258 | HashNumber h1 = hash1(keyHash); |
michael@0 | 1259 | Entry *entry = &table[h1]; |
michael@0 | 1260 | |
michael@0 | 1261 | // Miss: return space for a new entry. |
michael@0 | 1262 | if (!entry->isLive()) { |
michael@0 | 1263 | METER(stats.misses++); |
michael@0 | 1264 | return *entry; |
michael@0 | 1265 | } |
michael@0 | 1266 | |
michael@0 | 1267 | // Collision: double hash. |
michael@0 | 1268 | DoubleHash dh = hash2(keyHash); |
michael@0 | 1269 | |
michael@0 | 1270 | while(true) { |
michael@0 | 1271 | MOZ_ASSERT(!entry->isRemoved()); |
michael@0 | 1272 | entry->setCollision(); |
michael@0 | 1273 | |
michael@0 | 1274 | METER(stats.steps++); |
michael@0 | 1275 | h1 = applyDoubleHash(h1, dh); |
michael@0 | 1276 | |
michael@0 | 1277 | entry = &table[h1]; |
michael@0 | 1278 | if (!entry->isLive()) { |
michael@0 | 1279 | METER(stats.misses++); |
michael@0 | 1280 | return *entry; |
michael@0 | 1281 | } |
michael@0 | 1282 | } |
michael@0 | 1283 | } |
michael@0 | 1284 | |
michael@0 | 1285 | enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed }; |
michael@0 | 1286 | |
michael@0 | 1287 | RebuildStatus changeTableSize(int deltaLog2) |
michael@0 | 1288 | { |
michael@0 | 1289 | // Look, but don't touch, until we succeed in getting new entry store. |
michael@0 | 1290 | Entry *oldTable = table; |
michael@0 | 1291 | uint32_t oldCap = capacity(); |
michael@0 | 1292 | uint32_t newLog2 = sHashBits - hashShift + deltaLog2; |
michael@0 | 1293 | uint32_t newCapacity = JS_BIT(newLog2); |
michael@0 | 1294 | if (newCapacity > sMaxCapacity) { |
michael@0 | 1295 | this->reportAllocOverflow(); |
michael@0 | 1296 | return RehashFailed; |
michael@0 | 1297 | } |
michael@0 | 1298 | |
michael@0 | 1299 | Entry *newTable = createTable(*this, newCapacity); |
michael@0 | 1300 | if (!newTable) |
michael@0 | 1301 | return RehashFailed; |
michael@0 | 1302 | |
michael@0 | 1303 | // We can't fail from here on, so update table parameters. |
michael@0 | 1304 | setTableSizeLog2(newLog2); |
michael@0 | 1305 | removedCount = 0; |
michael@0 | 1306 | gen++; |
michael@0 | 1307 | table = newTable; |
michael@0 | 1308 | |
michael@0 | 1309 | // Copy only live entries, leaving removed ones behind. |
michael@0 | 1310 | for (Entry *src = oldTable, *end = src + oldCap; src < end; ++src) { |
michael@0 | 1311 | if (src->isLive()) { |
michael@0 | 1312 | HashNumber hn = src->getKeyHash(); |
michael@0 | 1313 | findFreeEntry(hn).setLive(hn, mozilla::Move(src->get())); |
michael@0 | 1314 | src->destroy(); |
michael@0 | 1315 | } |
michael@0 | 1316 | } |
michael@0 | 1317 | |
michael@0 | 1318 | // All entries have been destroyed, no need to destroyTable. |
michael@0 | 1319 | this->free_(oldTable); |
michael@0 | 1320 | return Rehashed; |
michael@0 | 1321 | } |
michael@0 | 1322 | |
michael@0 | 1323 | RebuildStatus checkOverloaded() |
michael@0 | 1324 | { |
michael@0 | 1325 | if (!overloaded()) |
michael@0 | 1326 | return NotOverloaded; |
michael@0 | 1327 | |
michael@0 | 1328 | // Compress if a quarter or more of all entries are removed. |
michael@0 | 1329 | int deltaLog2; |
michael@0 | 1330 | if (removedCount >= (capacity() >> 2)) { |
michael@0 | 1331 | METER(stats.compresses++); |
michael@0 | 1332 | deltaLog2 = 0; |
michael@0 | 1333 | } else { |
michael@0 | 1334 | METER(stats.grows++); |
michael@0 | 1335 | deltaLog2 = 1; |
michael@0 | 1336 | } |
michael@0 | 1337 | |
michael@0 | 1338 | return changeTableSize(deltaLog2); |
michael@0 | 1339 | } |
michael@0 | 1340 | |
michael@0 | 1341 | // Infallibly rehash the table if we are overloaded with removals. |
michael@0 | 1342 | void checkOverRemoved() |
michael@0 | 1343 | { |
michael@0 | 1344 | if (overloaded()) { |
michael@0 | 1345 | if (checkOverloaded() == RehashFailed) |
michael@0 | 1346 | rehashTableInPlace(); |
michael@0 | 1347 | } |
michael@0 | 1348 | } |
michael@0 | 1349 | |
michael@0 | 1350 | void remove(Entry &e) |
michael@0 | 1351 | { |
michael@0 | 1352 | MOZ_ASSERT(table); |
michael@0 | 1353 | METER(stats.removes++); |
michael@0 | 1354 | |
michael@0 | 1355 | if (e.hasCollision()) { |
michael@0 | 1356 | e.removeLive(); |
michael@0 | 1357 | removedCount++; |
michael@0 | 1358 | } else { |
michael@0 | 1359 | METER(stats.removeFrees++); |
michael@0 | 1360 | e.clearLive(); |
michael@0 | 1361 | } |
michael@0 | 1362 | entryCount--; |
michael@0 | 1363 | mutationCount++; |
michael@0 | 1364 | } |
michael@0 | 1365 | |
michael@0 | 1366 | void checkUnderloaded() |
michael@0 | 1367 | { |
michael@0 | 1368 | if (underloaded()) { |
michael@0 | 1369 | METER(stats.shrinks++); |
michael@0 | 1370 | (void) changeTableSize(-1); |
michael@0 | 1371 | } |
michael@0 | 1372 | } |
michael@0 | 1373 | |
michael@0 | 1374 | // Resize the table down to the largest capacity which doesn't underload the |
michael@0 | 1375 | // table. Since we call checkUnderloaded() on every remove, you only need |
michael@0 | 1376 | // to call this after a bulk removal of items done without calling remove(). |
michael@0 | 1377 | void compactIfUnderloaded() |
michael@0 | 1378 | { |
michael@0 | 1379 | int32_t resizeLog2 = 0; |
michael@0 | 1380 | uint32_t newCapacity = capacity(); |
michael@0 | 1381 | while (wouldBeUnderloaded(newCapacity, entryCount)) { |
michael@0 | 1382 | newCapacity = newCapacity >> 1; |
michael@0 | 1383 | resizeLog2--; |
michael@0 | 1384 | } |
michael@0 | 1385 | |
michael@0 | 1386 | if (resizeLog2 != 0) { |
michael@0 | 1387 | changeTableSize(resizeLog2); |
michael@0 | 1388 | } |
michael@0 | 1389 | } |
michael@0 | 1390 | |
michael@0 | 1391 | // This is identical to changeTableSize(currentSize), but without requiring |
michael@0 | 1392 | // a second table. We do this by recycling the collision bits to tell us if |
michael@0 | 1393 | // the element is already inserted or still waiting to be inserted. Since |
michael@0 | 1394 | // already-inserted elements win any conflicts, we get the same table as we |
michael@0 | 1395 | // would have gotten through random insertion order. |
michael@0 | 1396 | void rehashTableInPlace() |
michael@0 | 1397 | { |
michael@0 | 1398 | METER(stats.rehashes++); |
michael@0 | 1399 | removedCount = 0; |
michael@0 | 1400 | for (size_t i = 0; i < capacity(); ++i) |
michael@0 | 1401 | table[i].unsetCollision(); |
michael@0 | 1402 | |
michael@0 | 1403 | for (size_t i = 0; i < capacity();) { |
michael@0 | 1404 | Entry *src = &table[i]; |
michael@0 | 1405 | |
michael@0 | 1406 | if (!src->isLive() || src->hasCollision()) { |
michael@0 | 1407 | ++i; |
michael@0 | 1408 | continue; |
michael@0 | 1409 | } |
michael@0 | 1410 | |
michael@0 | 1411 | HashNumber keyHash = src->getKeyHash(); |
michael@0 | 1412 | HashNumber h1 = hash1(keyHash); |
michael@0 | 1413 | DoubleHash dh = hash2(keyHash); |
michael@0 | 1414 | Entry *tgt = &table[h1]; |
michael@0 | 1415 | while (true) { |
michael@0 | 1416 | if (!tgt->hasCollision()) { |
michael@0 | 1417 | src->swap(tgt); |
michael@0 | 1418 | tgt->setCollision(); |
michael@0 | 1419 | break; |
michael@0 | 1420 | } |
michael@0 | 1421 | |
michael@0 | 1422 | h1 = applyDoubleHash(h1, dh); |
michael@0 | 1423 | tgt = &table[h1]; |
michael@0 | 1424 | } |
michael@0 | 1425 | } |
michael@0 | 1426 | |
michael@0 | 1427 | // TODO: this algorithm leaves collision bits on *all* elements, even if |
michael@0 | 1428 | // they are on no collision path. We have the option of setting the |
michael@0 | 1429 | // collision bits correctly on a subsequent pass or skipping the rehash |
michael@0 | 1430 | // unless we are totally filled with tombstones: benchmark to find out |
michael@0 | 1431 | // which approach is best. |
michael@0 | 1432 | } |
michael@0 | 1433 | |
michael@0 | 1434 | public: |
michael@0 | 1435 | void clear() |
michael@0 | 1436 | { |
michael@0 | 1437 | if (mozilla::IsPod<Entry>::value) { |
michael@0 | 1438 | memset(table, 0, sizeof(*table) * capacity()); |
michael@0 | 1439 | } else { |
michael@0 | 1440 | uint32_t tableCapacity = capacity(); |
michael@0 | 1441 | for (Entry *e = table, *end = table + tableCapacity; e < end; ++e) |
michael@0 | 1442 | e->clear(); |
michael@0 | 1443 | } |
michael@0 | 1444 | removedCount = 0; |
michael@0 | 1445 | entryCount = 0; |
michael@0 | 1446 | mutationCount++; |
michael@0 | 1447 | } |
michael@0 | 1448 | |
michael@0 | 1449 | void finish() |
michael@0 | 1450 | { |
michael@0 | 1451 | MOZ_ASSERT(!entered); |
michael@0 | 1452 | |
michael@0 | 1453 | if (!table) |
michael@0 | 1454 | return; |
michael@0 | 1455 | |
michael@0 | 1456 | destroyTable(*this, table, capacity()); |
michael@0 | 1457 | table = nullptr; |
michael@0 | 1458 | gen++; |
michael@0 | 1459 | entryCount = 0; |
michael@0 | 1460 | removedCount = 0; |
michael@0 | 1461 | mutationCount++; |
michael@0 | 1462 | } |
michael@0 | 1463 | |
michael@0 | 1464 | Range all() const |
michael@0 | 1465 | { |
michael@0 | 1466 | MOZ_ASSERT(table); |
michael@0 | 1467 | return Range(*this, table, table + capacity()); |
michael@0 | 1468 | } |
michael@0 | 1469 | |
michael@0 | 1470 | bool empty() const |
michael@0 | 1471 | { |
michael@0 | 1472 | MOZ_ASSERT(table); |
michael@0 | 1473 | return !entryCount; |
michael@0 | 1474 | } |
michael@0 | 1475 | |
michael@0 | 1476 | uint32_t count() const |
michael@0 | 1477 | { |
michael@0 | 1478 | MOZ_ASSERT(table); |
michael@0 | 1479 | return entryCount; |
michael@0 | 1480 | } |
michael@0 | 1481 | |
michael@0 | 1482 | uint32_t capacity() const |
michael@0 | 1483 | { |
michael@0 | 1484 | MOZ_ASSERT(table); |
michael@0 | 1485 | return JS_BIT(sHashBits - hashShift); |
michael@0 | 1486 | } |
michael@0 | 1487 | |
michael@0 | 1488 | uint32_t generation() const |
michael@0 | 1489 | { |
michael@0 | 1490 | MOZ_ASSERT(table); |
michael@0 | 1491 | return gen; |
michael@0 | 1492 | } |
michael@0 | 1493 | |
michael@0 | 1494 | size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const |
michael@0 | 1495 | { |
michael@0 | 1496 | return mallocSizeOf(table); |
michael@0 | 1497 | } |
michael@0 | 1498 | |
michael@0 | 1499 | size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const |
michael@0 | 1500 | { |
michael@0 | 1501 | return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf); |
michael@0 | 1502 | } |
michael@0 | 1503 | |
michael@0 | 1504 | Ptr lookup(const Lookup &l) const |
michael@0 | 1505 | { |
michael@0 | 1506 | mozilla::ReentrancyGuard g(*this); |
michael@0 | 1507 | HashNumber keyHash = prepareHash(l); |
michael@0 | 1508 | return Ptr(lookup(l, keyHash, 0), *this); |
michael@0 | 1509 | } |
michael@0 | 1510 | |
michael@0 | 1511 | Ptr readonlyThreadsafeLookup(const Lookup &l) const |
michael@0 | 1512 | { |
michael@0 | 1513 | HashNumber keyHash = prepareHash(l); |
michael@0 | 1514 | return Ptr(lookup(l, keyHash, 0), *this); |
michael@0 | 1515 | } |
michael@0 | 1516 | |
michael@0 | 1517 | AddPtr lookupForAdd(const Lookup &l) const |
michael@0 | 1518 | { |
michael@0 | 1519 | mozilla::ReentrancyGuard g(*this); |
michael@0 | 1520 | HashNumber keyHash = prepareHash(l); |
michael@0 | 1521 | Entry &entry = lookup(l, keyHash, sCollisionBit); |
michael@0 | 1522 | AddPtr p(entry, *this, keyHash); |
michael@0 | 1523 | return p; |
michael@0 | 1524 | } |
michael@0 | 1525 | |
michael@0 | 1526 | template <class U> |
michael@0 | 1527 | bool add(AddPtr &p, U &&u) |
michael@0 | 1528 | { |
michael@0 | 1529 | mozilla::ReentrancyGuard g(*this); |
michael@0 | 1530 | MOZ_ASSERT(table); |
michael@0 | 1531 | MOZ_ASSERT(!p.found()); |
michael@0 | 1532 | MOZ_ASSERT(!(p.keyHash & sCollisionBit)); |
michael@0 | 1533 | |
michael@0 | 1534 | // Changing an entry from removed to live does not affect whether we |
michael@0 | 1535 | // are overloaded and can be handled separately. |
michael@0 | 1536 | if (p.entry_->isRemoved()) { |
michael@0 | 1537 | METER(stats.addOverRemoved++); |
michael@0 | 1538 | removedCount--; |
michael@0 | 1539 | p.keyHash |= sCollisionBit; |
michael@0 | 1540 | } else { |
michael@0 | 1541 | // Preserve the validity of |p.entry_|. |
michael@0 | 1542 | RebuildStatus status = checkOverloaded(); |
michael@0 | 1543 | if (status == RehashFailed) |
michael@0 | 1544 | return false; |
michael@0 | 1545 | if (status == Rehashed) |
michael@0 | 1546 | p.entry_ = &findFreeEntry(p.keyHash); |
michael@0 | 1547 | } |
michael@0 | 1548 | |
michael@0 | 1549 | p.entry_->setLive(p.keyHash, mozilla::Forward<U>(u)); |
michael@0 | 1550 | entryCount++; |
michael@0 | 1551 | mutationCount++; |
michael@0 | 1552 | #ifdef DEBUG |
michael@0 | 1553 | p.generation = generation(); |
michael@0 | 1554 | p.mutationCount = mutationCount; |
michael@0 | 1555 | #endif |
michael@0 | 1556 | return true; |
michael@0 | 1557 | } |
michael@0 | 1558 | |
michael@0 | 1559 | // Note: |l| may be a reference to a piece of |u|, so this function |
michael@0 | 1560 | // must take care not to use |l| after moving |u|. |
michael@0 | 1561 | template <class U> |
michael@0 | 1562 | void putNewInfallible(const Lookup &l, U &&u) |
michael@0 | 1563 | { |
michael@0 | 1564 | MOZ_ASSERT(table); |
michael@0 | 1565 | |
michael@0 | 1566 | HashNumber keyHash = prepareHash(l); |
michael@0 | 1567 | Entry *entry = &findFreeEntry(keyHash); |
michael@0 | 1568 | |
michael@0 | 1569 | if (entry->isRemoved()) { |
michael@0 | 1570 | METER(stats.addOverRemoved++); |
michael@0 | 1571 | removedCount--; |
michael@0 | 1572 | keyHash |= sCollisionBit; |
michael@0 | 1573 | } |
michael@0 | 1574 | |
michael@0 | 1575 | entry->setLive(keyHash, mozilla::Forward<U>(u)); |
michael@0 | 1576 | entryCount++; |
michael@0 | 1577 | mutationCount++; |
michael@0 | 1578 | } |
michael@0 | 1579 | |
michael@0 | 1580 | // Note: |l| may be a reference to a piece of |u|, so this function |
michael@0 | 1581 | // must take care not to use |l| after moving |u|. |
michael@0 | 1582 | template <class U> |
michael@0 | 1583 | bool putNew(const Lookup &l, U &&u) |
michael@0 | 1584 | { |
michael@0 | 1585 | if (checkOverloaded() == RehashFailed) |
michael@0 | 1586 | return false; |
michael@0 | 1587 | |
michael@0 | 1588 | putNewInfallible(l, mozilla::Forward<U>(u)); |
michael@0 | 1589 | return true; |
michael@0 | 1590 | } |
michael@0 | 1591 | |
michael@0 | 1592 | // Note: |l| may be a reference to a piece of |u|, so this function |
michael@0 | 1593 | // must take care not to use |l| after moving |u|. |
michael@0 | 1594 | template <class U> |
michael@0 | 1595 | bool relookupOrAdd(AddPtr& p, const Lookup &l, U &&u) |
michael@0 | 1596 | { |
michael@0 | 1597 | #ifdef DEBUG |
michael@0 | 1598 | p.generation = generation(); |
michael@0 | 1599 | p.mutationCount = mutationCount; |
michael@0 | 1600 | #endif |
michael@0 | 1601 | { |
michael@0 | 1602 | mozilla::ReentrancyGuard g(*this); |
michael@0 | 1603 | MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed |
michael@0 | 1604 | p.entry_ = &lookup(l, p.keyHash, sCollisionBit); |
michael@0 | 1605 | } |
michael@0 | 1606 | return p.found() || add(p, mozilla::Forward<U>(u)); |
michael@0 | 1607 | } |
michael@0 | 1608 | |
michael@0 | 1609 | void remove(Ptr p) |
michael@0 | 1610 | { |
michael@0 | 1611 | MOZ_ASSERT(table); |
michael@0 | 1612 | mozilla::ReentrancyGuard g(*this); |
michael@0 | 1613 | MOZ_ASSERT(p.found()); |
michael@0 | 1614 | remove(*p.entry_); |
michael@0 | 1615 | checkUnderloaded(); |
michael@0 | 1616 | } |
michael@0 | 1617 | |
michael@0 | 1618 | void rekeyWithoutRehash(Ptr p, const Lookup &l, const Key &k) |
michael@0 | 1619 | { |
michael@0 | 1620 | MOZ_ASSERT(table); |
michael@0 | 1621 | mozilla::ReentrancyGuard g(*this); |
michael@0 | 1622 | MOZ_ASSERT(p.found()); |
michael@0 | 1623 | typename HashTableEntry<T>::NonConstT t(mozilla::Move(*p)); |
michael@0 | 1624 | HashPolicy::setKey(t, const_cast<Key &>(k)); |
michael@0 | 1625 | remove(*p.entry_); |
michael@0 | 1626 | putNewInfallible(l, mozilla::Move(t)); |
michael@0 | 1627 | } |
michael@0 | 1628 | |
michael@0 | 1629 | void rekeyAndMaybeRehash(Ptr p, const Lookup &l, const Key &k) |
michael@0 | 1630 | { |
michael@0 | 1631 | rekeyWithoutRehash(p, l, k); |
michael@0 | 1632 | checkOverRemoved(); |
michael@0 | 1633 | } |
michael@0 | 1634 | |
michael@0 | 1635 | #undef METER |
michael@0 | 1636 | }; |
michael@0 | 1637 | |
michael@0 | 1638 | } // namespace detail |
michael@0 | 1639 | } // namespace js |
michael@0 | 1640 | |
michael@0 | 1641 | #endif /* js_HashTable_h */ |