Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "builtin/MapObject.h"
9 #include "mozilla/Move.h"
11 #include "jscntxt.h"
12 #include "jsiter.h"
13 #include "jsobj.h"
15 #include "gc/Marking.h"
16 #include "js/Utility.h"
17 #include "vm/GlobalObject.h"
18 #include "vm/Interpreter.h"
20 #include "jsobjinlines.h"
22 using namespace js;
24 using mozilla::NumberEqualsInt32;
25 using mozilla::Forward;
26 using mozilla::IsNaN;
27 using mozilla::Move;
28 using mozilla::ArrayLength;
29 using JS::DoubleNaNValue;
30 using JS::ForOfIterator;
33 /*** OrderedHashTable ****************************************************************************/
35 /*
36 * Define two collection templates, js::OrderedHashMap and js::OrderedHashSet.
37 * They are like js::HashMap and js::HashSet except that:
38 *
39 * - Iterating over an Ordered hash table visits the entries in the order in
40 * which they were inserted. This means that unlike a HashMap, the behavior
41 * of an OrderedHashMap is deterministic (as long as the HashPolicy methods
42 * are effect-free and consistent); the hashing is a pure performance
43 * optimization.
44 *
45 * - Range objects over Ordered tables remain valid even when entries are
46 * added or removed or the table is resized. (However in the case of
47 * removing entries, note the warning on class Range below.)
48 *
49 * - The API is a little different, so it's not a drop-in replacement.
50 * In particular, the hash policy is a little different.
51 * Also, the Ordered templates lack the Ptr and AddPtr types.
52 *
53 * Hash policies
54 *
55 * See the comment about "Hash policy" in HashTable.h for general features that
56 * hash policy classes must provide. Hash policies for OrderedHashMaps and Sets
57 * must additionally provide a distinguished "empty" key value and the
58 * following static member functions:
59 * bool isEmpty(const Key &);
60 * void makeEmpty(Key *);
61 */
63 namespace js {
65 namespace detail {
67 /*
68 * detail::OrderedHashTable is the underlying data structure used to implement both
69 * OrderedHashMap and OrderedHashSet. Programs should use one of those two
70 * templates rather than OrderedHashTable.
71 */
72 template <class T, class Ops, class AllocPolicy>
73 class OrderedHashTable
74 {
75 public:
76 typedef typename Ops::KeyType Key;
77 typedef typename Ops::Lookup Lookup;
79 struct Data
80 {
81 T element;
82 Data *chain;
84 Data(const T &e, Data *c) : element(e), chain(c) {}
85 Data(T &&e, Data *c) : element(Move(e)), chain(c) {}
86 };
88 class Range;
89 friend class Range;
91 private:
92 Data **hashTable; // hash table (has hashBuckets() elements)
93 Data *data; // data vector, an array of Data objects
94 // data[0:dataLength] are constructed
95 uint32_t dataLength; // number of constructed elements in data
96 uint32_t dataCapacity; // size of data, in elements
97 uint32_t liveCount; // dataLength less empty (removed) entries
98 uint32_t hashShift; // multiplicative hash shift
99 Range *ranges; // list of all live Ranges on this table
100 AllocPolicy alloc;
102 public:
103 OrderedHashTable(AllocPolicy &ap)
104 : hashTable(nullptr), data(nullptr), dataLength(0), ranges(nullptr), alloc(ap) {}
106 bool init() {
107 MOZ_ASSERT(!hashTable, "init must be called at most once");
109 uint32_t buckets = initialBuckets();
110 Data **tableAlloc = static_cast<Data **>(alloc.malloc_(buckets * sizeof(Data *)));
111 if (!tableAlloc)
112 return false;
113 for (uint32_t i = 0; i < buckets; i++)
114 tableAlloc[i] = nullptr;
116 uint32_t capacity = uint32_t(buckets * fillFactor());
117 Data *dataAlloc = static_cast<Data *>(alloc.malloc_(capacity * sizeof(Data)));
118 if (!dataAlloc) {
119 alloc.free_(tableAlloc);
120 return false;
121 }
123 // clear() requires that members are assigned only after all allocation
124 // has succeeded, and that this->ranges is left untouched.
125 hashTable = tableAlloc;
126 data = dataAlloc;
127 dataLength = 0;
128 dataCapacity = capacity;
129 liveCount = 0;
130 hashShift = HashNumberSizeBits - initialBucketsLog2();
131 MOZ_ASSERT(hashBuckets() == buckets);
132 return true;
133 }
135 ~OrderedHashTable() {
136 for (Range *r = ranges, *next; r; r = next) {
137 next = r->next;
138 r->onTableDestroyed();
139 }
140 alloc.free_(hashTable);
141 freeData(data, dataLength);
142 }
144 /* Return the number of elements in the table. */
145 uint32_t count() const { return liveCount; }
147 /* True if any element matches l. */
148 bool has(const Lookup &l) const {
149 return lookup(l) != nullptr;
150 }
152 /* Return a pointer to the element, if any, that matches l, or nullptr. */
153 T *get(const Lookup &l) {
154 Data *e = lookup(l, prepareHash(l));
155 return e ? &e->element : nullptr;
156 }
158 /* Return a pointer to the element, if any, that matches l, or nullptr. */
159 const T *get(const Lookup &l) const {
160 return const_cast<OrderedHashTable *>(this)->get(l);
161 }
163 /*
164 * If the table already contains an entry that matches |element|,
165 * replace that entry with |element|. Otherwise add a new entry.
166 *
167 * On success, return true, whether there was already a matching element or
168 * not. On allocation failure, return false. If this returns false, it
169 * means the element was not added to the table.
170 */
171 template <typename ElementInput>
172 bool put(ElementInput &&element) {
173 HashNumber h = prepareHash(Ops::getKey(element));
174 if (Data *e = lookup(Ops::getKey(element), h)) {
175 e->element = Forward<ElementInput>(element);
176 return true;
177 }
179 if (dataLength == dataCapacity) {
180 // If the hashTable is more than 1/4 deleted data, simply rehash in
181 // place to free up some space. Otherwise, grow the table.
182 uint32_t newHashShift = liveCount >= dataCapacity * 0.75 ? hashShift - 1 : hashShift;
183 if (!rehash(newHashShift))
184 return false;
185 }
187 h >>= hashShift;
188 liveCount++;
189 Data *e = &data[dataLength++];
190 new (e) Data(Forward<ElementInput>(element), hashTable[h]);
191 hashTable[h] = e;
192 return true;
193 }
195 /*
196 * If the table contains an element matching l, remove it and set *foundp
197 * to true. Otherwise set *foundp to false.
198 *
199 * Return true on success, false if we tried to shrink the table and hit an
200 * allocation failure. Even if this returns false, *foundp is set correctly
201 * and the matching element was removed. Shrinking is an optimization and
202 * it's OK for it to fail.
203 */
204 bool remove(const Lookup &l, bool *foundp) {
205 // Note: This could be optimized so that removing the last entry,
206 // data[dataLength - 1], decrements dataLength. LIFO use cases would
207 // benefit.
209 // If a matching entry exists, empty it.
210 Data *e = lookup(l, prepareHash(l));
211 if (e == nullptr) {
212 *foundp = false;
213 return true;
214 }
216 *foundp = true;
217 liveCount--;
218 Ops::makeEmpty(&e->element);
220 // Update active Ranges.
221 uint32_t pos = e - data;
222 for (Range *r = ranges; r; r = r->next)
223 r->onRemove(pos);
225 // If many entries have been removed, try to shrink the table.
226 if (hashBuckets() > initialBuckets() && liveCount < dataLength * minDataFill()) {
227 if (!rehash(hashShift + 1))
228 return false;
229 }
230 return true;
231 }
233 /*
234 * Remove all entries.
235 *
236 * Returns false on OOM, leaving the OrderedHashTable and any live Ranges
237 * in the old state.
238 *
239 * The effect on live Ranges is the same as removing all entries; in
240 * particular, those Ranges are still live and will see any entries added
241 * after a successful clear().
242 */
243 bool clear() {
244 if (dataLength != 0) {
245 Data **oldHashTable = hashTable;
246 Data *oldData = data;
247 uint32_t oldDataLength = dataLength;
249 hashTable = nullptr;
250 if (!init()) {
251 // init() only mutates members on success; see comment above.
252 hashTable = oldHashTable;
253 return false;
254 }
256 alloc.free_(oldHashTable);
257 freeData(oldData, oldDataLength);
258 for (Range *r = ranges; r; r = r->next)
259 r->onClear();
260 }
262 MOZ_ASSERT(hashTable);
263 MOZ_ASSERT(data);
264 MOZ_ASSERT(dataLength == 0);
265 MOZ_ASSERT(liveCount == 0);
266 return true;
267 }
269 /*
270 * Ranges are used to iterate over OrderedHashTables.
271 *
272 * Suppose 'Map' is some instance of OrderedHashMap, and 'map' is a Map.
273 * Then you can walk all the key-value pairs like this:
274 *
275 * for (Map::Range r = map.all(); !r.empty(); r.popFront()) {
276 * Map::Entry &pair = r.front();
277 * ... do something with pair ...
278 * }
279 *
280 * Ranges remain valid for the lifetime of the OrderedHashTable, even if
281 * entries are added or removed or the table is resized. Don't do anything
282 * to a Range, except destroy it, after the OrderedHashTable has been
283 * destroyed. (We support destroying the two objects in either order to
284 * humor the GC, bless its nondeterministic heart.)
285 *
286 * Warning: The behavior when the current front() entry is removed from the
287 * table is subtly different from js::HashTable<>::Enum::removeFront()!
288 * HashTable::Enum doesn't skip any entries when you removeFront() and then
289 * popFront(). OrderedHashTable::Range does! (This is useful for using a
290 * Range to implement JS Map.prototype.iterator.)
291 *
292 * The workaround is to call popFront() as soon as possible,
293 * before there's any possibility of modifying the table:
294 *
295 * for (Map::Range r = map.all(); !r.empty(); ) {
296 * Key key = r.front().key; // this won't modify map
297 * Value val = r.front().value; // this won't modify map
298 * r.popFront();
299 * // ...do things that might modify map...
300 * }
301 */
302 class Range
303 {
304 friend class OrderedHashTable;
306 OrderedHashTable &ht;
308 /* The index of front() within ht.data. */
309 uint32_t i;
311 /*
312 * The number of nonempty entries in ht.data to the left of front().
313 * This is used when the table is resized or compacted.
314 */
315 uint32_t count;
317 /*
318 * Links in the doubly-linked list of active Ranges on ht.
319 *
320 * prevp points to the previous Range's .next field;
321 * or to ht.ranges if this is the first Range in the list.
322 * next points to the next Range;
323 * or nullptr if this is the last Range in the list.
324 *
325 * Invariant: *prevp == this.
326 */
327 Range **prevp;
328 Range *next;
330 /*
331 * Create a Range over all the entries in ht.
332 * (This is private on purpose. End users must use ht.all().)
333 */
334 Range(OrderedHashTable &ht) : ht(ht), i(0), count(0), prevp(&ht.ranges), next(ht.ranges) {
335 *prevp = this;
336 if (next)
337 next->prevp = &next;
338 seek();
339 }
341 public:
342 Range(const Range &other)
343 : ht(other.ht), i(other.i), count(other.count), prevp(&ht.ranges), next(ht.ranges)
344 {
345 *prevp = this;
346 if (next)
347 next->prevp = &next;
348 }
350 ~Range() {
351 *prevp = next;
352 if (next)
353 next->prevp = prevp;
354 }
356 private:
357 // Prohibit copy assignment.
358 Range &operator=(const Range &other) MOZ_DELETE;
360 void seek() {
361 while (i < ht.dataLength && Ops::isEmpty(Ops::getKey(ht.data[i].element)))
362 i++;
363 }
365 /*
366 * The hash table calls this when an entry is removed.
367 * j is the index of the removed entry.
368 */
369 void onRemove(uint32_t j) {
370 MOZ_ASSERT(valid());
371 if (j < i)
372 count--;
373 if (j == i)
374 seek();
375 }
377 /*
378 * The hash table calls this when the table is resized or compacted.
379 * Since |count| is the number of nonempty entries to the left of
380 * front(), discarding the empty entries will not affect count, and it
381 * will make i and count equal.
382 */
383 void onCompact() {
384 MOZ_ASSERT(valid());
385 i = count;
386 }
388 /* The hash table calls this when cleared. */
389 void onClear() {
390 MOZ_ASSERT(valid());
391 i = count = 0;
392 }
394 bool valid() const {
395 return next != this;
396 }
398 void onTableDestroyed() {
399 MOZ_ASSERT(valid());
400 prevp = &next;
401 next = this;
402 }
404 public:
405 bool empty() const {
406 MOZ_ASSERT(valid());
407 return i >= ht.dataLength;
408 }
410 /*
411 * Return the first element in the range. This must not be called if
412 * this->empty().
413 *
414 * Warning: Removing an entry from the table also removes it from any
415 * live Ranges, and a Range can become empty that way, rendering
416 * front() invalid. If in doubt, check empty() before calling front().
417 */
418 T &front() {
419 MOZ_ASSERT(valid());
420 MOZ_ASSERT(!empty());
421 return ht.data[i].element;
422 }
424 /*
425 * Remove the first element from this range.
426 * This must not be called if this->empty().
427 *
428 * Warning: Removing an entry from the table also removes it from any
429 * live Ranges, and a Range can become empty that way, rendering
430 * popFront() invalid. If in doubt, check empty() before calling
431 * popFront().
432 */
433 void popFront() {
434 MOZ_ASSERT(valid());
435 MOZ_ASSERT(!empty());
436 MOZ_ASSERT(!Ops::isEmpty(Ops::getKey(ht.data[i].element)));
437 count++;
438 i++;
439 seek();
440 }
442 /*
443 * Change the key of the front entry.
444 *
445 * This calls Ops::hash on both the current key and the new key.
446 * Ops::hash on the current key must return the same hash code as
447 * when the entry was added to the table.
448 */
449 void rekeyFront(const Key &k) {
450 MOZ_ASSERT(valid());
451 Data &entry = ht.data[i];
452 HashNumber oldHash = prepareHash(Ops::getKey(entry.element)) >> ht.hashShift;
453 HashNumber newHash = prepareHash(k) >> ht.hashShift;
454 Ops::setKey(entry.element, k);
455 if (newHash != oldHash) {
456 // Remove this entry from its old hash chain. (If this crashes
457 // reading nullptr, it would mean we did not find this entry on
458 // the hash chain where we expected it. That probably means the
459 // key's hash code changed since it was inserted, breaking the
460 // hash code invariant.)
461 Data **ep = &ht.hashTable[oldHash];
462 while (*ep != &entry)
463 ep = &(*ep)->chain;
464 *ep = entry.chain;
466 // Add it to the new hash chain. We could just insert it at the
467 // beginning of the chain. Instead, we do a bit of work to
468 // preserve the invariant that hash chains always go in reverse
469 // insertion order (descending memory order). No code currently
470 // depends on this invariant, so it's fine to kill it if
471 // needed.
472 ep = &ht.hashTable[newHash];
473 while (*ep && *ep > &entry)
474 ep = &(*ep)->chain;
475 entry.chain = *ep;
476 *ep = &entry;
477 }
478 }
479 };
481 Range all() { return Range(*this); }
483 /*
484 * Change the value of the given key.
485 *
486 * This calls Ops::hash on both the current key and the new key.
487 * Ops::hash on the current key must return the same hash code as
488 * when the entry was added to the table.
489 */
490 void rekeyOneEntry(const Key ¤t, const Key &newKey, const T &element) {
491 if (current == newKey)
492 return;
494 Data *entry = lookup(current, prepareHash(current));
495 if (!entry)
496 return;
498 HashNumber oldHash = prepareHash(current) >> hashShift;
499 HashNumber newHash = prepareHash(newKey) >> hashShift;
501 entry->element = element;
503 // Remove this entry from its old hash chain. (If this crashes
504 // reading nullptr, it would mean we did not find this entry on
505 // the hash chain where we expected it. That probably means the
506 // key's hash code changed since it was inserted, breaking the
507 // hash code invariant.)
508 Data **ep = &hashTable[oldHash];
509 while (*ep != entry)
510 ep = &(*ep)->chain;
511 *ep = entry->chain;
513 // Add it to the new hash chain. We could just insert it at the
514 // beginning of the chain. Instead, we do a bit of work to
515 // preserve the invariant that hash chains always go in reverse
516 // insertion order (descending memory order). No code currently
517 // depends on this invariant, so it's fine to kill it if
518 // needed.
519 ep = &hashTable[newHash];
520 while (*ep && *ep > entry)
521 ep = &(*ep)->chain;
522 entry->chain = *ep;
523 *ep = entry;
524 }
526 private:
527 /* Logarithm base 2 of the number of buckets in the hash table initially. */
528 static uint32_t initialBucketsLog2() { return 1; }
529 static uint32_t initialBuckets() { return 1 << initialBucketsLog2(); }
531 /*
532 * The maximum load factor (mean number of entries per bucket).
533 * It is an invariant that
534 * dataCapacity == floor(hashBuckets() * fillFactor()).
535 *
536 * The fill factor should be between 2 and 4, and it should be chosen so that
537 * the fill factor times sizeof(Data) is close to but <= a power of 2.
538 * This fixed fill factor was chosen to make the size of the data
539 * array, in bytes, close to a power of two when sizeof(T) is 16.
540 */
541 static double fillFactor() { return 8.0 / 3.0; }
543 /*
544 * The minimum permitted value of (liveCount / dataLength).
545 * If that ratio drops below this value, we shrink the table.
546 */
547 static double minDataFill() { return 0.25; }
549 static HashNumber prepareHash(const Lookup &l) {
550 return ScrambleHashCode(Ops::hash(l));
551 }
553 /* The size of hashTable, in elements. Always a power of two. */
554 uint32_t hashBuckets() const {
555 return 1 << (HashNumberSizeBits - hashShift);
556 }
558 static void destroyData(Data *data, uint32_t length) {
559 for (Data *p = data + length; p != data; )
560 (--p)->~Data();
561 }
563 void freeData(Data *data, uint32_t length) {
564 destroyData(data, length);
565 alloc.free_(data);
566 }
568 Data *lookup(const Lookup &l, HashNumber h) {
569 for (Data *e = hashTable[h >> hashShift]; e; e = e->chain) {
570 if (Ops::match(Ops::getKey(e->element), l))
571 return e;
572 }
573 return nullptr;
574 }
576 const Data *lookup(const Lookup &l) const {
577 return const_cast<OrderedHashTable *>(this)->lookup(l, prepareHash(l));
578 }
580 /* This is called after rehashing the table. */
581 void compacted() {
582 // If we had any empty entries, compacting may have moved live entries
583 // to the left within |data|. Notify all live Ranges of the change.
584 for (Range *r = ranges; r; r = r->next)
585 r->onCompact();
586 }
588 /* Compact the entries in |data| and rehash them. */
589 void rehashInPlace() {
590 for (uint32_t i = 0, N = hashBuckets(); i < N; i++)
591 hashTable[i] = nullptr;
592 Data *wp = data, *end = data + dataLength;
593 for (Data *rp = data; rp != end; rp++) {
594 if (!Ops::isEmpty(Ops::getKey(rp->element))) {
595 HashNumber h = prepareHash(Ops::getKey(rp->element)) >> hashShift;
596 if (rp != wp)
597 wp->element = Move(rp->element);
598 wp->chain = hashTable[h];
599 hashTable[h] = wp;
600 wp++;
601 }
602 }
603 MOZ_ASSERT(wp == data + liveCount);
605 while (wp != end)
606 (--end)->~Data();
607 dataLength = liveCount;
608 compacted();
609 }
611 /*
612 * Grow, shrink, or compact both |hashTable| and |data|.
613 *
614 * On success, this returns true, dataLength == liveCount, and there are no
615 * empty elements in data[0:dataLength]. On allocation failure, this
616 * leaves everything as it was and returns false.
617 */
618 bool rehash(uint32_t newHashShift) {
619 // If the size of the table is not changing, rehash in place to avoid
620 // allocating memory.
621 if (newHashShift == hashShift) {
622 rehashInPlace();
623 return true;
624 }
626 size_t newHashBuckets = 1 << (HashNumberSizeBits - newHashShift);
627 Data **newHashTable = static_cast<Data **>(alloc.malloc_(newHashBuckets * sizeof(Data *)));
628 if (!newHashTable)
629 return false;
630 for (uint32_t i = 0; i < newHashBuckets; i++)
631 newHashTable[i] = nullptr;
633 uint32_t newCapacity = uint32_t(newHashBuckets * fillFactor());
634 Data *newData = static_cast<Data *>(alloc.malloc_(newCapacity * sizeof(Data)));
635 if (!newData) {
636 alloc.free_(newHashTable);
637 return false;
638 }
640 Data *wp = newData;
641 for (Data *p = data, *end = data + dataLength; p != end; p++) {
642 if (!Ops::isEmpty(Ops::getKey(p->element))) {
643 HashNumber h = prepareHash(Ops::getKey(p->element)) >> newHashShift;
644 new (wp) Data(Move(p->element), newHashTable[h]);
645 newHashTable[h] = wp;
646 wp++;
647 }
648 }
649 MOZ_ASSERT(wp == newData + liveCount);
651 alloc.free_(hashTable);
652 freeData(data, dataLength);
654 hashTable = newHashTable;
655 data = newData;
656 dataLength = liveCount;
657 dataCapacity = newCapacity;
658 hashShift = newHashShift;
659 MOZ_ASSERT(hashBuckets() == newHashBuckets);
661 compacted();
662 return true;
663 }
665 // Not copyable.
666 OrderedHashTable &operator=(const OrderedHashTable &) MOZ_DELETE;
667 OrderedHashTable(const OrderedHashTable &) MOZ_DELETE;
668 };
670 } // namespace detail
672 template <class Key, class Value, class OrderedHashPolicy, class AllocPolicy>
673 class OrderedHashMap
674 {
675 public:
676 class Entry
677 {
678 template <class, class, class> friend class detail::OrderedHashTable;
679 void operator=(const Entry &rhs) {
680 const_cast<Key &>(key) = rhs.key;
681 value = rhs.value;
682 }
684 void operator=(Entry &&rhs) {
685 MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
686 const_cast<Key &>(key) = Move(rhs.key);
687 value = Move(rhs.value);
688 }
690 public:
691 Entry() : key(), value() {}
692 Entry(const Key &k, const Value &v) : key(k), value(v) {}
693 Entry(Entry &&rhs) : key(Move(rhs.key)), value(Move(rhs.value)) {}
695 const Key key;
696 Value value;
697 };
699 private:
700 struct MapOps : OrderedHashPolicy
701 {
702 typedef Key KeyType;
703 static void makeEmpty(Entry *e) {
704 OrderedHashPolicy::makeEmpty(const_cast<Key *>(&e->key));
706 // Clear the value. Destroying it is another possibility, but that
707 // would complicate class Entry considerably.
708 e->value = Value();
709 }
710 static const Key &getKey(const Entry &e) { return e.key; }
711 static void setKey(Entry &e, const Key &k) { const_cast<Key &>(e.key) = k; }
712 };
714 typedef detail::OrderedHashTable<Entry, MapOps, AllocPolicy> Impl;
715 Impl impl;
717 public:
718 typedef typename Impl::Range Range;
720 OrderedHashMap(AllocPolicy ap = AllocPolicy()) : impl(ap) {}
721 bool init() { return impl.init(); }
722 uint32_t count() const { return impl.count(); }
723 bool has(const Key &key) const { return impl.has(key); }
724 Range all() { return impl.all(); }
725 const Entry *get(const Key &key) const { return impl.get(key); }
726 Entry *get(const Key &key) { return impl.get(key); }
727 bool put(const Key &key, const Value &value) { return impl.put(Entry(key, value)); }
728 bool remove(const Key &key, bool *foundp) { return impl.remove(key, foundp); }
729 bool clear() { return impl.clear(); }
731 void rekeyOneEntry(const Key ¤t, const Key &newKey) {
732 const Entry *e = get(current);
733 if (!e)
734 return;
735 return impl.rekeyOneEntry(current, newKey, Entry(newKey, e->value));
736 }
737 };
739 template <class T, class OrderedHashPolicy, class AllocPolicy>
740 class OrderedHashSet
741 {
742 private:
743 struct SetOps : OrderedHashPolicy
744 {
745 typedef const T KeyType;
746 static const T &getKey(const T &v) { return v; }
747 static void setKey(const T &e, const T &v) { const_cast<T &>(e) = v; }
748 };
750 typedef detail::OrderedHashTable<T, SetOps, AllocPolicy> Impl;
751 Impl impl;
753 public:
754 typedef typename Impl::Range Range;
756 OrderedHashSet(AllocPolicy ap = AllocPolicy()) : impl(ap) {}
757 bool init() { return impl.init(); }
758 uint32_t count() const { return impl.count(); }
759 bool has(const T &value) const { return impl.has(value); }
760 Range all() { return impl.all(); }
761 bool put(const T &value) { return impl.put(value); }
762 bool remove(const T &value, bool *foundp) { return impl.remove(value, foundp); }
763 bool clear() { return impl.clear(); }
765 void rekeyOneEntry(const T ¤t, const T &newKey) {
766 return impl.rekeyOneEntry(current, newKey, newKey);
767 }
768 };
770 } // namespace js
773 /*** HashableValue *******************************************************************************/
775 bool
776 HashableValue::setValue(JSContext *cx, HandleValue v)
777 {
778 if (v.isString()) {
779 // Atomize so that hash() and operator==() are fast and infallible.
780 JSString *str = AtomizeString(cx, v.toString(), DoNotInternAtom);
781 if (!str)
782 return false;
783 value = StringValue(str);
784 } else if (v.isDouble()) {
785 double d = v.toDouble();
786 int32_t i;
787 if (NumberEqualsInt32(d, &i)) {
788 // Normalize int32_t-valued doubles to int32_t for faster hashing and testing.
789 value = Int32Value(i);
790 } else if (IsNaN(d)) {
791 // NaNs with different bits must hash and test identically.
792 value = DoubleNaNValue();
793 } else {
794 value = v;
795 }
796 } else {
797 value = v;
798 }
800 JS_ASSERT(value.isUndefined() || value.isNull() || value.isBoolean() ||
801 value.isNumber() || value.isString() || value.isObject());
802 return true;
803 }
805 HashNumber
806 HashableValue::hash() const
807 {
808 // HashableValue::setValue normalizes values so that the SameValue relation
809 // on HashableValues is the same as the == relationship on
810 // value.data.asBits.
811 return value.asRawBits();
812 }
814 bool
815 HashableValue::operator==(const HashableValue &other) const
816 {
817 // Two HashableValues are equal if they have equal bits.
818 bool b = (value.asRawBits() == other.value.asRawBits());
820 #ifdef DEBUG
821 bool same;
822 JS_ASSERT(SameValue(nullptr, value, other.value, &same));
823 JS_ASSERT(same == b);
824 #endif
825 return b;
826 }
828 HashableValue
829 HashableValue::mark(JSTracer *trc) const
830 {
831 HashableValue hv(*this);
832 trc->setTracingLocation((void *)this);
833 gc::MarkValue(trc, &hv.value, "key");
834 return hv;
835 }
838 /*** MapIterator *********************************************************************************/
840 namespace {
842 class MapIteratorObject : public JSObject
843 {
844 public:
845 static const Class class_;
847 enum { TargetSlot, KindSlot, RangeSlot, SlotCount };
848 static const JSFunctionSpec methods[];
849 static MapIteratorObject *create(JSContext *cx, HandleObject mapobj, ValueMap *data,
850 MapObject::IteratorKind kind);
851 static void finalize(FreeOp *fop, JSObject *obj);
853 private:
854 static inline bool is(HandleValue v);
855 inline ValueMap::Range *range();
856 inline MapObject::IteratorKind kind() const;
857 static bool next_impl(JSContext *cx, CallArgs args);
858 static bool next(JSContext *cx, unsigned argc, Value *vp);
859 };
861 } /* anonymous namespace */
863 const Class MapIteratorObject::class_ = {
864 "Map Iterator",
865 JSCLASS_IMPLEMENTS_BARRIERS |
866 JSCLASS_HAS_RESERVED_SLOTS(MapIteratorObject::SlotCount),
867 JS_PropertyStub, /* addProperty */
868 JS_DeletePropertyStub, /* delProperty */
869 JS_PropertyStub, /* getProperty */
870 JS_StrictPropertyStub, /* setProperty */
871 JS_EnumerateStub,
872 JS_ResolveStub,
873 JS_ConvertStub,
874 MapIteratorObject::finalize
875 };
877 const JSFunctionSpec MapIteratorObject::methods[] = {
878 JS_SELF_HOSTED_FN("@@iterator", "IteratorIdentity", 0, 0),
879 JS_FN("next", next, 0, 0),
880 JS_FS_END
881 };
883 inline ValueMap::Range *
884 MapIteratorObject::range()
885 {
886 return static_cast<ValueMap::Range *>(getSlot(RangeSlot).toPrivate());
887 }
889 inline MapObject::IteratorKind
890 MapIteratorObject::kind() const
891 {
892 int32_t i = getSlot(KindSlot).toInt32();
893 JS_ASSERT(i == MapObject::Keys || i == MapObject::Values || i == MapObject::Entries);
894 return MapObject::IteratorKind(i);
895 }
897 bool
898 GlobalObject::initMapIteratorProto(JSContext *cx, Handle<GlobalObject *> global)
899 {
900 JSObject *base = GlobalObject::getOrCreateIteratorPrototype(cx, global);
901 if (!base)
902 return false;
903 Rooted<JSObject*> proto(cx,
904 NewObjectWithGivenProto(cx, &MapIteratorObject::class_, base, global));
905 if (!proto)
906 return false;
907 proto->setSlot(MapIteratorObject::RangeSlot, PrivateValue(nullptr));
908 if (!JS_DefineFunctions(cx, proto, MapIteratorObject::methods))
909 return false;
910 global->setReservedSlot(MAP_ITERATOR_PROTO, ObjectValue(*proto));
911 return true;
912 }
914 MapIteratorObject *
915 MapIteratorObject::create(JSContext *cx, HandleObject mapobj, ValueMap *data,
916 MapObject::IteratorKind kind)
917 {
918 Rooted<GlobalObject *> global(cx, &mapobj->global());
919 Rooted<JSObject*> proto(cx, GlobalObject::getOrCreateMapIteratorPrototype(cx, global));
920 if (!proto)
921 return nullptr;
923 ValueMap::Range *range = cx->new_<ValueMap::Range>(data->all());
924 if (!range)
925 return nullptr;
927 JSObject *iterobj = NewObjectWithGivenProto(cx, &class_, proto, global);
928 if (!iterobj) {
929 js_delete(range);
930 return nullptr;
931 }
932 iterobj->setSlot(TargetSlot, ObjectValue(*mapobj));
933 iterobj->setSlot(KindSlot, Int32Value(int32_t(kind)));
934 iterobj->setSlot(RangeSlot, PrivateValue(range));
935 return static_cast<MapIteratorObject *>(iterobj);
936 }
938 void
939 MapIteratorObject::finalize(FreeOp *fop, JSObject *obj)
940 {
941 fop->delete_(obj->as<MapIteratorObject>().range());
942 }
944 bool
945 MapIteratorObject::is(HandleValue v)
946 {
947 return v.isObject() && v.toObject().hasClass(&class_);
948 }
950 bool
951 MapIteratorObject::next_impl(JSContext *cx, CallArgs args)
952 {
953 MapIteratorObject &thisobj = args.thisv().toObject().as<MapIteratorObject>();
954 ValueMap::Range *range = thisobj.range();
955 RootedValue value(cx);
956 bool done;
958 if (!range || range->empty()) {
959 js_delete(range);
960 thisobj.setReservedSlot(RangeSlot, PrivateValue(nullptr));
961 value.setUndefined();
962 done = true;
963 } else {
964 switch (thisobj.kind()) {
965 case MapObject::Keys:
966 value = range->front().key.get();
967 break;
969 case MapObject::Values:
970 value = range->front().value;
971 break;
973 case MapObject::Entries: {
974 JS::AutoValueArray<2> pair(cx);
975 pair[0].set(range->front().key.get());
976 pair[1].set(range->front().value);
978 JSObject *pairobj = NewDenseCopiedArray(cx, pair.length(), pair.begin());
979 if (!pairobj)
980 return false;
981 value.setObject(*pairobj);
982 break;
983 }
984 }
985 range->popFront();
986 done = false;
987 }
989 RootedObject result(cx, CreateItrResultObject(cx, value, done));
990 if (!result)
991 return false;
992 args.rval().setObject(*result);
994 return true;
995 }
997 bool
998 MapIteratorObject::next(JSContext *cx, unsigned argc, Value *vp)
999 {
1000 CallArgs args = CallArgsFromVp(argc, vp);
1001 return CallNonGenericMethod(cx, is, next_impl, args);
1002 }
1005 /*** Map *****************************************************************************************/
1007 const Class MapObject::class_ = {
1008 "Map",
1009 JSCLASS_HAS_PRIVATE | JSCLASS_IMPLEMENTS_BARRIERS |
1010 JSCLASS_HAS_CACHED_PROTO(JSProto_Map),
1011 JS_PropertyStub, // addProperty
1012 JS_DeletePropertyStub, // delProperty
1013 JS_PropertyStub, // getProperty
1014 JS_StrictPropertyStub, // setProperty
1015 JS_EnumerateStub,
1016 JS_ResolveStub,
1017 JS_ConvertStub,
1018 finalize,
1019 nullptr, // call
1020 nullptr, // hasInstance
1021 nullptr, // construct
1022 mark
1023 };
1025 const JSPropertySpec MapObject::properties[] = {
1026 JS_PSG("size", size, 0),
1027 JS_PS_END
1028 };
1030 const JSFunctionSpec MapObject::methods[] = {
1031 JS_FN("get", get, 1, 0),
1032 JS_FN("has", has, 1, 0),
1033 JS_FN("set", set, 2, 0),
1034 JS_FN("delete", delete_, 1, 0),
1035 JS_FN("keys", keys, 0, 0),
1036 JS_FN("values", values, 0, 0),
1037 JS_FN("clear", clear, 0, 0),
1038 JS_SELF_HOSTED_FN("forEach", "MapForEach", 2, 0),
1039 JS_FS_END
1040 };
1042 static JSObject *
1043 InitClass(JSContext *cx, Handle<GlobalObject*> global, const Class *clasp, JSProtoKey key, Native construct,
1044 const JSPropertySpec *properties, const JSFunctionSpec *methods)
1045 {
1046 Rooted<JSObject*> proto(cx, global->createBlankPrototype(cx, clasp));
1047 if (!proto)
1048 return nullptr;
1049 proto->setPrivate(nullptr);
1051 Rooted<JSFunction*> ctor(cx, global->createConstructor(cx, construct, ClassName(key, cx), 0));
1052 if (!ctor ||
1053 !LinkConstructorAndPrototype(cx, ctor, proto) ||
1054 !DefinePropertiesAndBrand(cx, proto, properties, methods) ||
1055 !GlobalObject::initBuiltinConstructor(cx, global, key, ctor, proto))
1056 {
1057 return nullptr;
1058 }
1059 return proto;
1060 }
1062 JSObject *
1063 MapObject::initClass(JSContext *cx, JSObject *obj)
1064 {
1065 Rooted<GlobalObject*> global(cx, &obj->as<GlobalObject>());
1066 RootedObject proto(cx,
1067 InitClass(cx, global, &class_, JSProto_Map, construct, properties, methods));
1068 if (proto) {
1069 // Define the "entries" method.
1070 JSFunction *fun = JS_DefineFunction(cx, proto, "entries", entries, 0, 0);
1071 if (!fun)
1072 return nullptr;
1074 // Define its alias.
1075 RootedValue funval(cx, ObjectValue(*fun));
1076 if (!JS_DefineProperty(cx, proto, js_std_iterator_str, funval, 0))
1077 return nullptr;
1078 }
1079 return proto;
1080 }
1082 template <class Range>
1083 static void
1084 MarkKey(Range &r, const HashableValue &key, JSTracer *trc)
1085 {
1086 HashableValue newKey = key.mark(trc);
1088 if (newKey.get() != key.get()) {
1089 // The hash function only uses the bits of the Value, so it is safe to
1090 // rekey even when the object or string has been modified by the GC.
1091 r.rekeyFront(newKey);
1092 }
1093 }
1095 void
1096 MapObject::mark(JSTracer *trc, JSObject *obj)
1097 {
1098 if (ValueMap *map = obj->as<MapObject>().getData()) {
1099 for (ValueMap::Range r = map->all(); !r.empty(); r.popFront()) {
1100 MarkKey(r, r.front().key, trc);
1101 gc::MarkValue(trc, &r.front().value, "value");
1102 }
1103 }
1104 }
1106 #ifdef JSGC_GENERATIONAL
1107 struct UnbarrieredHashPolicy {
1108 typedef Value Lookup;
1109 static HashNumber hash(const Lookup &v) { return v.asRawBits(); }
1110 static bool match(const Value &k, const Lookup &l) { return k == l; }
1111 static bool isEmpty(const Value &v) { return v.isMagic(JS_HASH_KEY_EMPTY); }
1112 static void makeEmpty(Value *vp) { vp->setMagic(JS_HASH_KEY_EMPTY); }
1113 };
1115 template <typename TableType>
1116 class OrderedHashTableRef : public gc::BufferableRef
1117 {
1118 TableType *table;
1119 Value key;
1121 public:
1122 explicit OrderedHashTableRef(TableType *t, const Value &k) : table(t), key(k) {}
1124 void mark(JSTracer *trc) {
1125 JS_ASSERT(UnbarrieredHashPolicy::hash(key) ==
1126 HashableValue::Hasher::hash(*reinterpret_cast<HashableValue*>(&key)));
1127 Value prior = key;
1128 gc::MarkValueUnbarriered(trc, &key, "ordered hash table key");
1129 table->rekeyOneEntry(prior, key);
1130 }
1131 };
1132 #endif
1134 static void
1135 WriteBarrierPost(JSRuntime *rt, ValueMap *map, const HashableValue &key)
1136 {
1137 #ifdef JSGC_GENERATIONAL
1138 typedef OrderedHashMap<Value, Value, UnbarrieredHashPolicy, RuntimeAllocPolicy> UnbarrieredMap;
1139 rt->gcStoreBuffer.putGeneric(OrderedHashTableRef<UnbarrieredMap>(
1140 reinterpret_cast<UnbarrieredMap *>(map), key.get()));
1141 #endif
1142 }
1144 static void
1145 WriteBarrierPost(JSRuntime *rt, ValueSet *set, const HashableValue &key)
1146 {
1147 #ifdef JSGC_GENERATIONAL
1148 typedef OrderedHashSet<Value, UnbarrieredHashPolicy, RuntimeAllocPolicy> UnbarrieredSet;
1149 rt->gcStoreBuffer.putGeneric(OrderedHashTableRef<UnbarrieredSet>(
1150 reinterpret_cast<UnbarrieredSet *>(set), key.get()));
1151 #endif
1152 }
1154 void
1155 MapObject::finalize(FreeOp *fop, JSObject *obj)
1156 {
1157 if (ValueMap *map = obj->as<MapObject>().getData())
1158 fop->delete_(map);
1159 }
1161 bool
1162 MapObject::construct(JSContext *cx, unsigned argc, Value *vp)
1163 {
1164 Rooted<JSObject*> obj(cx, NewBuiltinClassInstance(cx, &class_));
1165 if (!obj)
1166 return false;
1168 ValueMap *map = cx->new_<ValueMap>(cx->runtime());
1169 if (!map)
1170 return false;
1171 if (!map->init()) {
1172 js_ReportOutOfMemory(cx);
1173 return false;
1174 }
1175 obj->setPrivate(map);
1177 CallArgs args = CallArgsFromVp(argc, vp);
1178 if (args.hasDefined(0)) {
1179 ForOfIterator iter(cx);
1180 if (!iter.init(args[0]))
1181 return false;
1182 RootedValue pairVal(cx);
1183 RootedObject pairObj(cx);
1184 while (true) {
1185 bool done;
1186 if (!iter.next(&pairVal, &done))
1187 return false;
1188 if (done)
1189 break;
1190 if (!pairVal.isObject()) {
1191 JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_INVALID_MAP_ITERABLE);
1192 return false;
1193 }
1195 pairObj = &pairVal.toObject();
1196 if (!pairObj)
1197 return false;
1199 RootedValue key(cx);
1200 if (!JSObject::getElement(cx, pairObj, pairObj, 0, &key))
1201 return false;
1203 AutoHashableValueRooter hkey(cx);
1204 if (!hkey.setValue(cx, key))
1205 return false;
1207 RootedValue val(cx);
1208 if (!JSObject::getElement(cx, pairObj, pairObj, 1, &val))
1209 return false;
1211 RelocatableValue rval(val);
1212 if (!map->put(hkey, rval)) {
1213 js_ReportOutOfMemory(cx);
1214 return false;
1215 }
1216 WriteBarrierPost(cx->runtime(), map, hkey);
1217 }
1218 }
1220 args.rval().setObject(*obj);
1221 return true;
1222 }
1224 bool
1225 MapObject::is(HandleValue v)
1226 {
1227 return v.isObject() && v.toObject().hasClass(&class_) && v.toObject().getPrivate();
1228 }
1230 #define ARG0_KEY(cx, args, key) \
1231 AutoHashableValueRooter key(cx); \
1232 if (args.length() > 0 && !key.setValue(cx, args[0])) \
1233 return false
1235 ValueMap &
1236 MapObject::extract(CallReceiver call)
1237 {
1238 JS_ASSERT(call.thisv().isObject());
1239 JS_ASSERT(call.thisv().toObject().hasClass(&MapObject::class_));
1240 return *call.thisv().toObject().as<MapObject>().getData();
1241 }
1243 bool
1244 MapObject::size_impl(JSContext *cx, CallArgs args)
1245 {
1246 JS_ASSERT(MapObject::is(args.thisv()));
1248 ValueMap &map = extract(args);
1249 JS_STATIC_ASSERT(sizeof map.count() <= sizeof(uint32_t));
1250 args.rval().setNumber(map.count());
1251 return true;
1252 }
1254 bool
1255 MapObject::size(JSContext *cx, unsigned argc, Value *vp)
1256 {
1257 CallArgs args = CallArgsFromVp(argc, vp);
1258 return CallNonGenericMethod<MapObject::is, MapObject::size_impl>(cx, args);
1259 }
1261 bool
1262 MapObject::get_impl(JSContext *cx, CallArgs args)
1263 {
1264 JS_ASSERT(MapObject::is(args.thisv()));
1266 ValueMap &map = extract(args);
1267 ARG0_KEY(cx, args, key);
1269 if (ValueMap::Entry *p = map.get(key))
1270 args.rval().set(p->value);
1271 else
1272 args.rval().setUndefined();
1273 return true;
1274 }
1276 bool
1277 MapObject::get(JSContext *cx, unsigned argc, Value *vp)
1278 {
1279 CallArgs args = CallArgsFromVp(argc, vp);
1280 return CallNonGenericMethod<MapObject::is, MapObject::get_impl>(cx, args);
1281 }
1283 bool
1284 MapObject::has_impl(JSContext *cx, CallArgs args)
1285 {
1286 JS_ASSERT(MapObject::is(args.thisv()));
1288 ValueMap &map = extract(args);
1289 ARG0_KEY(cx, args, key);
1290 args.rval().setBoolean(map.has(key));
1291 return true;
1292 }
1294 bool
1295 MapObject::has(JSContext *cx, unsigned argc, Value *vp)
1296 {
1297 CallArgs args = CallArgsFromVp(argc, vp);
1298 return CallNonGenericMethod<MapObject::is, MapObject::has_impl>(cx, args);
1299 }
1301 bool
1302 MapObject::set_impl(JSContext *cx, CallArgs args)
1303 {
1304 JS_ASSERT(MapObject::is(args.thisv()));
1306 ValueMap &map = extract(args);
1307 ARG0_KEY(cx, args, key);
1308 RelocatableValue rval(args.get(1));
1309 if (!map.put(key, rval)) {
1310 js_ReportOutOfMemory(cx);
1311 return false;
1312 }
1313 WriteBarrierPost(cx->runtime(), &map, key);
1314 args.rval().setUndefined();
1315 return true;
1316 }
1318 bool
1319 MapObject::set(JSContext *cx, unsigned argc, Value *vp)
1320 {
1321 CallArgs args = CallArgsFromVp(argc, vp);
1322 return CallNonGenericMethod<MapObject::is, MapObject::set_impl>(cx, args);
1323 }
1325 bool
1326 MapObject::delete_impl(JSContext *cx, CallArgs args)
1327 {
1328 // MapObject::mark does not mark deleted entries. Incremental GC therefore
1329 // requires that no RelocatableValue objects pointing to heap values be
1330 // left alive in the ValueMap.
1331 //
1332 // OrderedHashMap::remove() doesn't destroy the removed entry. It merely
1333 // calls OrderedHashMap::MapOps::makeEmpty. But that is sufficient, because
1334 // makeEmpty clears the value by doing e->value = Value(), and in the case
1335 // of a ValueMap, Value() means RelocatableValue(), which is the same as
1336 // RelocatableValue(UndefinedValue()).
1337 JS_ASSERT(MapObject::is(args.thisv()));
1339 ValueMap &map = extract(args);
1340 ARG0_KEY(cx, args, key);
1341 bool found;
1342 if (!map.remove(key, &found)) {
1343 js_ReportOutOfMemory(cx);
1344 return false;
1345 }
1346 args.rval().setBoolean(found);
1347 return true;
1348 }
1350 bool
1351 MapObject::delete_(JSContext *cx, unsigned argc, Value *vp)
1352 {
1353 CallArgs args = CallArgsFromVp(argc, vp);
1354 return CallNonGenericMethod<MapObject::is, MapObject::delete_impl>(cx, args);
1355 }
1357 bool
1358 MapObject::iterator_impl(JSContext *cx, CallArgs args, IteratorKind kind)
1359 {
1360 Rooted<MapObject*> mapobj(cx, &args.thisv().toObject().as<MapObject>());
1361 ValueMap &map = *mapobj->getData();
1362 Rooted<JSObject*> iterobj(cx, MapIteratorObject::create(cx, mapobj, &map, kind));
1363 if (!iterobj)
1364 return false;
1365 args.rval().setObject(*iterobj);
1366 return true;
1367 }
1369 bool
1370 MapObject::keys_impl(JSContext *cx, CallArgs args)
1371 {
1372 return iterator_impl(cx, args, Keys);
1373 }
1375 bool
1376 MapObject::keys(JSContext *cx, unsigned argc, Value *vp)
1377 {
1378 CallArgs args = CallArgsFromVp(argc, vp);
1379 return CallNonGenericMethod(cx, is, keys_impl, args);
1380 }
1382 bool
1383 MapObject::values_impl(JSContext *cx, CallArgs args)
1384 {
1385 return iterator_impl(cx, args, Values);
1386 }
1388 bool
1389 MapObject::values(JSContext *cx, unsigned argc, Value *vp)
1390 {
1391 CallArgs args = CallArgsFromVp(argc, vp);
1392 return CallNonGenericMethod(cx, is, values_impl, args);
1393 }
1395 bool
1396 MapObject::entries_impl(JSContext *cx, CallArgs args)
1397 {
1398 return iterator_impl(cx, args, Entries);
1399 }
1401 bool
1402 MapObject::entries(JSContext *cx, unsigned argc, Value *vp)
1403 {
1404 CallArgs args = CallArgsFromVp(argc, vp);
1405 return CallNonGenericMethod(cx, is, entries_impl, args);
1406 }
1408 bool
1409 MapObject::clear_impl(JSContext *cx, CallArgs args)
1410 {
1411 Rooted<MapObject*> mapobj(cx, &args.thisv().toObject().as<MapObject>());
1412 if (!mapobj->getData()->clear()) {
1413 js_ReportOutOfMemory(cx);
1414 return false;
1415 }
1416 args.rval().setUndefined();
1417 return true;
1418 }
1420 bool
1421 MapObject::clear(JSContext *cx, unsigned argc, Value *vp)
1422 {
1423 CallArgs args = CallArgsFromVp(argc, vp);
1424 return CallNonGenericMethod(cx, is, clear_impl, args);
1425 }
1427 JSObject *
1428 js_InitMapClass(JSContext *cx, HandleObject obj)
1429 {
1430 return MapObject::initClass(cx, obj);
1431 }
1434 /*** SetIterator *********************************************************************************/
1436 namespace {
1438 class SetIteratorObject : public JSObject
1439 {
1440 public:
1441 static const Class class_;
1443 enum { TargetSlot, KindSlot, RangeSlot, SlotCount };
1444 static const JSFunctionSpec methods[];
1445 static SetIteratorObject *create(JSContext *cx, HandleObject setobj, ValueSet *data,
1446 SetObject::IteratorKind kind);
1447 static void finalize(FreeOp *fop, JSObject *obj);
1449 private:
1450 static inline bool is(HandleValue v);
1451 inline ValueSet::Range *range();
1452 inline SetObject::IteratorKind kind() const;
1453 static bool next_impl(JSContext *cx, CallArgs args);
1454 static bool next(JSContext *cx, unsigned argc, Value *vp);
1455 };
1457 } /* anonymous namespace */
1459 const Class SetIteratorObject::class_ = {
1460 "Set Iterator",
1461 JSCLASS_IMPLEMENTS_BARRIERS |
1462 JSCLASS_HAS_RESERVED_SLOTS(SetIteratorObject::SlotCount),
1463 JS_PropertyStub, /* addProperty */
1464 JS_DeletePropertyStub, /* delProperty */
1465 JS_PropertyStub, /* getProperty */
1466 JS_StrictPropertyStub, /* setProperty */
1467 JS_EnumerateStub,
1468 JS_ResolveStub,
1469 JS_ConvertStub,
1470 SetIteratorObject::finalize
1471 };
1473 const JSFunctionSpec SetIteratorObject::methods[] = {
1474 JS_SELF_HOSTED_FN("@@iterator", "IteratorIdentity", 0, 0),
1475 JS_FN("next", next, 0, 0),
1476 JS_FS_END
1477 };
1479 inline ValueSet::Range *
1480 SetIteratorObject::range()
1481 {
1482 return static_cast<ValueSet::Range *>(getSlot(RangeSlot).toPrivate());
1483 }
1485 inline SetObject::IteratorKind
1486 SetIteratorObject::kind() const
1487 {
1488 int32_t i = getSlot(KindSlot).toInt32();
1489 JS_ASSERT(i == SetObject::Values || i == SetObject::Entries);
1490 return SetObject::IteratorKind(i);
1491 }
1493 bool
1494 GlobalObject::initSetIteratorProto(JSContext *cx, Handle<GlobalObject*> global)
1495 {
1496 JSObject *base = GlobalObject::getOrCreateIteratorPrototype(cx, global);
1497 if (!base)
1498 return false;
1499 RootedObject proto(cx, NewObjectWithGivenProto(cx, &SetIteratorObject::class_, base, global));
1500 if (!proto)
1501 return false;
1502 proto->setSlot(SetIteratorObject::RangeSlot, PrivateValue(nullptr));
1503 if (!JS_DefineFunctions(cx, proto, SetIteratorObject::methods))
1504 return false;
1505 global->setReservedSlot(SET_ITERATOR_PROTO, ObjectValue(*proto));
1506 return true;
1507 }
1509 SetIteratorObject *
1510 SetIteratorObject::create(JSContext *cx, HandleObject setobj, ValueSet *data,
1511 SetObject::IteratorKind kind)
1512 {
1513 Rooted<GlobalObject *> global(cx, &setobj->global());
1514 Rooted<JSObject*> proto(cx, GlobalObject::getOrCreateSetIteratorPrototype(cx, global));
1515 if (!proto)
1516 return nullptr;
1518 ValueSet::Range *range = cx->new_<ValueSet::Range>(data->all());
1519 if (!range)
1520 return nullptr;
1522 JSObject *iterobj = NewObjectWithGivenProto(cx, &class_, proto, global);
1523 if (!iterobj) {
1524 js_delete(range);
1525 return nullptr;
1526 }
1527 iterobj->setSlot(TargetSlot, ObjectValue(*setobj));
1528 iterobj->setSlot(KindSlot, Int32Value(int32_t(kind)));
1529 iterobj->setSlot(RangeSlot, PrivateValue(range));
1530 return static_cast<SetIteratorObject *>(iterobj);
1531 }
1533 void
1534 SetIteratorObject::finalize(FreeOp *fop, JSObject *obj)
1535 {
1536 fop->delete_(obj->as<SetIteratorObject>().range());
1537 }
1539 bool
1540 SetIteratorObject::is(HandleValue v)
1541 {
1542 return v.isObject() && v.toObject().is<SetIteratorObject>();
1543 }
1545 bool
1546 SetIteratorObject::next_impl(JSContext *cx, CallArgs args)
1547 {
1548 SetIteratorObject &thisobj = args.thisv().toObject().as<SetIteratorObject>();
1549 ValueSet::Range *range = thisobj.range();
1550 RootedValue value(cx);
1551 bool done;
1553 if (!range || range->empty()) {
1554 js_delete(range);
1555 thisobj.setReservedSlot(RangeSlot, PrivateValue(nullptr));
1556 value.setUndefined();
1557 done = true;
1558 } else {
1559 switch (thisobj.kind()) {
1560 case SetObject::Values:
1561 value = range->front().get();
1562 break;
1564 case SetObject::Entries: {
1565 JS::AutoValueArray<2> pair(cx);
1566 pair[0].set(range->front().get());
1567 pair[1].set(range->front().get());
1569 JSObject *pairObj = NewDenseCopiedArray(cx, 2, pair.begin());
1570 if (!pairObj)
1571 return false;
1572 value.setObject(*pairObj);
1573 break;
1574 }
1575 }
1576 range->popFront();
1577 done = false;
1578 }
1580 RootedObject result(cx, CreateItrResultObject(cx, value, done));
1581 if (!result)
1582 return false;
1583 args.rval().setObject(*result);
1585 return true;
1586 }
1588 bool
1589 SetIteratorObject::next(JSContext *cx, unsigned argc, Value *vp)
1590 {
1591 CallArgs args = CallArgsFromVp(argc, vp);
1592 return CallNonGenericMethod(cx, is, next_impl, args);
1593 }
1596 /*** Set *****************************************************************************************/
1598 const Class SetObject::class_ = {
1599 "Set",
1600 JSCLASS_HAS_PRIVATE | JSCLASS_IMPLEMENTS_BARRIERS |
1601 JSCLASS_HAS_CACHED_PROTO(JSProto_Set),
1602 JS_PropertyStub, // addProperty
1603 JS_DeletePropertyStub, // delProperty
1604 JS_PropertyStub, // getProperty
1605 JS_StrictPropertyStub, // setProperty
1606 JS_EnumerateStub,
1607 JS_ResolveStub,
1608 JS_ConvertStub,
1609 finalize,
1610 nullptr, // call
1611 nullptr, // hasInstance
1612 nullptr, // construct
1613 mark
1614 };
1616 const JSPropertySpec SetObject::properties[] = {
1617 JS_PSG("size", size, 0),
1618 JS_PS_END
1619 };
1621 const JSFunctionSpec SetObject::methods[] = {
1622 JS_FN("has", has, 1, 0),
1623 JS_FN("add", add, 1, 0),
1624 JS_FN("delete", delete_, 1, 0),
1625 JS_FN("entries", entries, 0, 0),
1626 JS_FN("clear", clear, 0, 0),
1627 JS_SELF_HOSTED_FN("forEach", "SetForEach", 2, 0),
1628 JS_FS_END
1629 };
1631 JSObject *
1632 SetObject::initClass(JSContext *cx, JSObject *obj)
1633 {
1634 Rooted<GlobalObject*> global(cx, &obj->as<GlobalObject>());
1635 RootedObject proto(cx,
1636 InitClass(cx, global, &class_, JSProto_Set, construct, properties, methods));
1637 if (proto) {
1638 // Define the "values" method.
1639 JSFunction *fun = JS_DefineFunction(cx, proto, "values", values, 0, 0);
1640 if (!fun)
1641 return nullptr;
1643 // Define its aliases.
1644 RootedValue funval(cx, ObjectValue(*fun));
1645 if (!JS_DefineProperty(cx, proto, "keys", funval, 0))
1646 return nullptr;
1647 if (!JS_DefineProperty(cx, proto, js_std_iterator_str, funval, 0))
1648 return nullptr;
1649 }
1650 return proto;
1651 }
1653 void
1654 SetObject::mark(JSTracer *trc, JSObject *obj)
1655 {
1656 SetObject *setobj = static_cast<SetObject *>(obj);
1657 if (ValueSet *set = setobj->getData()) {
1658 for (ValueSet::Range r = set->all(); !r.empty(); r.popFront())
1659 MarkKey(r, r.front(), trc);
1660 }
1661 }
1663 void
1664 SetObject::finalize(FreeOp *fop, JSObject *obj)
1665 {
1666 SetObject *setobj = static_cast<SetObject *>(obj);
1667 if (ValueSet *set = setobj->getData())
1668 fop->delete_(set);
1669 }
1671 bool
1672 SetObject::construct(JSContext *cx, unsigned argc, Value *vp)
1673 {
1674 Rooted<JSObject*> obj(cx, NewBuiltinClassInstance(cx, &class_));
1675 if (!obj)
1676 return false;
1678 ValueSet *set = cx->new_<ValueSet>(cx->runtime());
1679 if (!set)
1680 return false;
1681 if (!set->init()) {
1682 js_ReportOutOfMemory(cx);
1683 return false;
1684 }
1685 obj->setPrivate(set);
1687 CallArgs args = CallArgsFromVp(argc, vp);
1688 if (args.hasDefined(0)) {
1689 RootedValue keyVal(cx);
1690 ForOfIterator iter(cx);
1691 if (!iter.init(args[0]))
1692 return false;
1693 AutoHashableValueRooter key(cx);
1694 while (true) {
1695 bool done;
1696 if (!iter.next(&keyVal, &done))
1697 return false;
1698 if (done)
1699 break;
1700 if (!key.setValue(cx, keyVal))
1701 return false;
1702 if (!set->put(key)) {
1703 js_ReportOutOfMemory(cx);
1704 return false;
1705 }
1706 WriteBarrierPost(cx->runtime(), set, key);
1707 }
1708 }
1710 args.rval().setObject(*obj);
1711 return true;
1712 }
1714 bool
1715 SetObject::is(HandleValue v)
1716 {
1717 return v.isObject() && v.toObject().hasClass(&class_) && v.toObject().getPrivate();
1718 }
1720 ValueSet &
1721 SetObject::extract(CallReceiver call)
1722 {
1723 JS_ASSERT(call.thisv().isObject());
1724 JS_ASSERT(call.thisv().toObject().hasClass(&SetObject::class_));
1725 return *static_cast<SetObject&>(call.thisv().toObject()).getData();
1726 }
1728 bool
1729 SetObject::size_impl(JSContext *cx, CallArgs args)
1730 {
1731 JS_ASSERT(is(args.thisv()));
1733 ValueSet &set = extract(args);
1734 JS_STATIC_ASSERT(sizeof set.count() <= sizeof(uint32_t));
1735 args.rval().setNumber(set.count());
1736 return true;
1737 }
1739 bool
1740 SetObject::size(JSContext *cx, unsigned argc, Value *vp)
1741 {
1742 CallArgs args = CallArgsFromVp(argc, vp);
1743 return CallNonGenericMethod<SetObject::is, SetObject::size_impl>(cx, args);
1744 }
1746 bool
1747 SetObject::has_impl(JSContext *cx, CallArgs args)
1748 {
1749 JS_ASSERT(is(args.thisv()));
1751 ValueSet &set = extract(args);
1752 ARG0_KEY(cx, args, key);
1753 args.rval().setBoolean(set.has(key));
1754 return true;
1755 }
1757 bool
1758 SetObject::has(JSContext *cx, unsigned argc, Value *vp)
1759 {
1760 CallArgs args = CallArgsFromVp(argc, vp);
1761 return CallNonGenericMethod<SetObject::is, SetObject::has_impl>(cx, args);
1762 }
1764 bool
1765 SetObject::add_impl(JSContext *cx, CallArgs args)
1766 {
1767 JS_ASSERT(is(args.thisv()));
1769 ValueSet &set = extract(args);
1770 ARG0_KEY(cx, args, key);
1771 if (!set.put(key)) {
1772 js_ReportOutOfMemory(cx);
1773 return false;
1774 }
1775 WriteBarrierPost(cx->runtime(), &set, key);
1776 args.rval().setUndefined();
1777 return true;
1778 }
1780 bool
1781 SetObject::add(JSContext *cx, unsigned argc, Value *vp)
1782 {
1783 CallArgs args = CallArgsFromVp(argc, vp);
1784 return CallNonGenericMethod<SetObject::is, SetObject::add_impl>(cx, args);
1785 }
1787 bool
1788 SetObject::delete_impl(JSContext *cx, CallArgs args)
1789 {
1790 JS_ASSERT(is(args.thisv()));
1792 ValueSet &set = extract(args);
1793 ARG0_KEY(cx, args, key);
1794 bool found;
1795 if (!set.remove(key, &found)) {
1796 js_ReportOutOfMemory(cx);
1797 return false;
1798 }
1799 args.rval().setBoolean(found);
1800 return true;
1801 }
1803 bool
1804 SetObject::delete_(JSContext *cx, unsigned argc, Value *vp)
1805 {
1806 CallArgs args = CallArgsFromVp(argc, vp);
1807 return CallNonGenericMethod<SetObject::is, SetObject::delete_impl>(cx, args);
1808 }
1810 bool
1811 SetObject::iterator_impl(JSContext *cx, CallArgs args, IteratorKind kind)
1812 {
1813 Rooted<SetObject*> setobj(cx, &args.thisv().toObject().as<SetObject>());
1814 ValueSet &set = *setobj->getData();
1815 Rooted<JSObject*> iterobj(cx, SetIteratorObject::create(cx, setobj, &set, kind));
1816 if (!iterobj)
1817 return false;
1818 args.rval().setObject(*iterobj);
1819 return true;
1820 }
1822 bool
1823 SetObject::values_impl(JSContext *cx, CallArgs args)
1824 {
1825 return iterator_impl(cx, args, Values);
1826 }
1828 bool
1829 SetObject::values(JSContext *cx, unsigned argc, Value *vp)
1830 {
1831 CallArgs args = CallArgsFromVp(argc, vp);
1832 return CallNonGenericMethod(cx, is, values_impl, args);
1833 }
1835 bool
1836 SetObject::entries_impl(JSContext *cx, CallArgs args)
1837 {
1838 return iterator_impl(cx, args, Entries);
1839 }
1841 bool
1842 SetObject::entries(JSContext *cx, unsigned argc, Value *vp)
1843 {
1844 CallArgs args = CallArgsFromVp(argc, vp);
1845 return CallNonGenericMethod(cx, is, entries_impl, args);
1846 }
1848 bool
1849 SetObject::clear_impl(JSContext *cx, CallArgs args)
1850 {
1851 Rooted<SetObject*> setobj(cx, &args.thisv().toObject().as<SetObject>());
1852 if (!setobj->getData()->clear()) {
1853 js_ReportOutOfMemory(cx);
1854 return false;
1855 }
1856 args.rval().setUndefined();
1857 return true;
1858 }
1860 bool
1861 SetObject::clear(JSContext *cx, unsigned argc, Value *vp)
1862 {
1863 CallArgs args = CallArgsFromVp(argc, vp);
1864 return CallNonGenericMethod(cx, is, clear_impl, args);
1865 }
1867 JSObject *
1868 js_InitSetClass(JSContext *cx, HandleObject obj)
1869 {
1870 return SetObject::initClass(cx, obj);
1871 }