js/src/gc/Barrier.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

     1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     2  * vim: set ts=8 sts=4 et sw=4 tw=99:
     3  * This Source Code Form is subject to the terms of the Mozilla Public
     4  * License, v. 2.0. If a copy of the MPL was not distributed with this
     5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     7 #ifndef gc_Barrier_h
     8 #define gc_Barrier_h
    10 #include "NamespaceImports.h"
    12 #include "gc/Heap.h"
    13 #ifdef JSGC_GENERATIONAL
    14 # include "gc/StoreBuffer.h"
    15 #endif
    16 #include "js/HashTable.h"
    17 #include "js/Id.h"
    18 #include "js/RootingAPI.h"
    20 /*
    21  * A write barrier is a mechanism used by incremental or generation GCs to
    22  * ensure that every value that needs to be marked is marked. In general, the
    23  * write barrier should be invoked whenever a write can cause the set of things
    24  * traced through by the GC to change. This includes:
    25  *   - writes to object properties
    26  *   - writes to array slots
    27  *   - writes to fields like JSObject::shape_ that we trace through
    28  *   - writes to fields in private data, like JSGenerator::obj
    29  *   - writes to non-markable fields like JSObject::private that point to
    30  *     markable data
    31  * The last category is the trickiest. Even though the private pointers does not
    32  * point to a GC thing, changing the private pointer may change the set of
    33  * objects that are traced by the GC. Therefore it needs a write barrier.
    34  *
    35  * Every barriered write should have the following form:
    36  *   <pre-barrier>
    37  *   obj->field = value; // do the actual write
    38  *   <post-barrier>
    39  * The pre-barrier is used for incremental GC and the post-barrier is for
    40  * generational GC.
    41  *
    42  *                               PRE-BARRIER
    43  *
    44  * To understand the pre-barrier, let's consider how incremental GC works. The
    45  * GC itself is divided into "slices". Between each slice, JS code is allowed to
    46  * run. Each slice should be short so that the user doesn't notice the
    47  * interruptions. In our GC, the structure of the slices is as follows:
    48  *
    49  * 1. ... JS work, which leads to a request to do GC ...
    50  * 2. [first GC slice, which performs all root marking and possibly more marking]
    51  * 3. ... more JS work is allowed to run ...
    52  * 4. [GC mark slice, which runs entirely in drainMarkStack]
    53  * 5. ... more JS work ...
    54  * 6. [GC mark slice, which runs entirely in drainMarkStack]
    55  * 7. ... more JS work ...
    56  * 8. [GC marking finishes; sweeping done non-incrementally; GC is done]
    57  * 9. ... JS continues uninterrupted now that GC is finishes ...
    58  *
    59  * Of course, there may be a different number of slices depending on how much
    60  * marking is to be done.
    61  *
    62  * The danger inherent in this scheme is that the JS code in steps 3, 5, and 7
    63  * might change the heap in a way that causes the GC to collect an object that
    64  * is actually reachable. The write barrier prevents this from happening. We use
    65  * a variant of incremental GC called "snapshot at the beginning." This approach
    66  * guarantees the invariant that if an object is reachable in step 2, then we
    67  * will mark it eventually. The name comes from the idea that we take a
    68  * theoretical "snapshot" of all reachable objects in step 2; all objects in
    69  * that snapshot should eventually be marked. (Note that the write barrier
    70  * verifier code takes an actual snapshot.)
    71  *
    72  * The basic correctness invariant of a snapshot-at-the-beginning collector is
    73  * that any object reachable at the end of the GC (step 9) must either:
    74  *   (1) have been reachable at the beginning (step 2) and thus in the snapshot
    75  *   (2) or must have been newly allocated, in steps 3, 5, or 7.
    76  * To deal with case (2), any objects allocated during an incremental GC are
    77  * automatically marked black.
    78  *
    79  * This strategy is actually somewhat conservative: if an object becomes
    80  * unreachable between steps 2 and 8, it would be safe to collect it. We won't,
    81  * mainly for simplicity. (Also, note that the snapshot is entirely
    82  * theoretical. We don't actually do anything special in step 2 that we wouldn't
    83  * do in a non-incremental GC.
    84  *
    85  * It's the pre-barrier's job to maintain the snapshot invariant. Consider the
    86  * write "obj->field = value". Let the prior value of obj->field be
    87  * value0. Since it's possible that value0 may have been what obj->field
    88  * contained in step 2, when the snapshot was taken, the barrier marks
    89  * value0. Note that it only does this if we're in the middle of an incremental
    90  * GC. Since this is rare, the cost of the write barrier is usually just an
    91  * extra branch.
    92  *
    93  * In practice, we implement the pre-barrier differently based on the type of
    94  * value0. E.g., see JSObject::writeBarrierPre, which is used if obj->field is
    95  * a JSObject*. It takes value0 as a parameter.
    96  *
    97  *                                POST-BARRIER
    98  *
    99  * For generational GC, we want to be able to quickly collect the nursery in a
   100  * minor collection.  Part of the way this is achieved is to only mark the
   101  * nursery itself; tenured things, which may form the majority of the heap, are
   102  * not traced through or marked.  This leads to the problem of what to do about
   103  * tenured objects that have pointers into the nursery: if such things are not
   104  * marked, they may be discarded while there are still live objects which
   105  * reference them. The solution is to maintain information about these pointers,
   106  * and mark their targets when we start a minor collection.
   107  *
   108  * The pointers can be thoughs of as edges in object graph, and the set of edges
   109  * from the tenured generation into the nursery is know as the remembered set.
   110  * Post barriers are used to track this remembered set.
   111  *
   112  * Whenever a slot which could contain such a pointer is written, we use a write
   113  * barrier to check if the edge created is in the remembered set, and if so we
   114  * insert it into the store buffer, which is the collector's representation of
   115  * the remembered set.  This means than when we come to do a minor collection we
   116  * can examine the contents of the store buffer and mark any edge targets that
   117  * are in the nursery.
   118  *
   119  *                            IMPLEMENTATION DETAILS
   120  *
   121  * Since it would be awkward to change every write to memory into a function
   122  * call, this file contains a bunch of C++ classes and templates that use
   123  * operator overloading to take care of barriers automatically. In many cases,
   124  * all that's necessary to make some field be barriered is to replace
   125  *     Type *field;
   126  * with
   127  *     HeapPtr<Type> field;
   128  * There are also special classes HeapValue and HeapId, which barrier js::Value
   129  * and jsid, respectively.
   130  *
   131  * One additional note: not all object writes need to be barriered. Writes to
   132  * newly allocated objects do not need a pre-barrier.  In these cases, we use
   133  * the "obj->field.init(value)" method instead of "obj->field = value". We use
   134  * the init naming idiom in many places to signify that a field is being
   135  * assigned for the first time.
   136  *
   137  * For each of pointers, Values and jsids this file implements four classes,
   138  * illustrated here for the pointer (Ptr) classes:
   139  *
   140  * BarrieredPtr           abstract base class which provides common operations
   141  *  |  |  |
   142  *  |  | EncapsulatedPtr  provides pre-barriers only
   143  *  |  |
   144  *  | HeapPtr             provides pre- and post-barriers
   145  *  |
   146  * RelocatablePtr         provides pre- and post-barriers and is relocatable
   147  *
   148  * These classes are designed to be used by the internals of the JS engine.
   149  * Barriers designed to be used externally are provided in
   150  * js/public/RootingAPI.h.
   151  */
   153 namespace js {
   155 class PropertyName;
   157 #ifdef DEBUG
   158 bool
   159 RuntimeFromMainThreadIsHeapMajorCollecting(JS::shadow::Zone *shadowZone);
   160 #endif
   162 namespace gc {
   164 template <typename T>
   165 void
   166 MarkUnbarriered(JSTracer *trc, T **thingp, const char *name);
   168 // Direct value access used by the write barriers and the jits.
   169 void
   170 MarkValueUnbarriered(JSTracer *trc, Value *v, const char *name);
   172 // These two declarations are also present in gc/Marking.h, via the DeclMarker
   173 // macro.  Not great, but hard to avoid.
   174 void
   175 MarkObjectUnbarriered(JSTracer *trc, JSObject **obj, const char *name);
   176 void
   177 MarkStringUnbarriered(JSTracer *trc, JSString **str, const char *name);
   179 // Note that some subclasses (e.g. ObjectImpl) specialize some of these
   180 // methods.
   181 template <typename T>
   182 class BarrieredCell : public gc::Cell
   183 {
   184   public:
   185     MOZ_ALWAYS_INLINE JS::Zone *zone() const { return tenuredZone(); }
   186     MOZ_ALWAYS_INLINE JS::shadow::Zone *shadowZone() const { return JS::shadow::Zone::asShadowZone(zone()); }
   187     MOZ_ALWAYS_INLINE JS::Zone *zoneFromAnyThread() const { return tenuredZoneFromAnyThread(); }
   188     MOZ_ALWAYS_INLINE JS::shadow::Zone *shadowZoneFromAnyThread() const {
   189         return JS::shadow::Zone::asShadowZone(zoneFromAnyThread());
   190     }
   192     static MOZ_ALWAYS_INLINE void readBarrier(T *thing) {
   193 #ifdef JSGC_INCREMENTAL
   194         JS::shadow::Zone *shadowZone = thing->shadowZoneFromAnyThread();
   195         if (shadowZone->needsBarrier()) {
   196             MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone));
   197             T *tmp = thing;
   198             js::gc::MarkUnbarriered<T>(shadowZone->barrierTracer(), &tmp, "read barrier");
   199             JS_ASSERT(tmp == thing);
   200         }
   201 #endif
   202     }
   204     static MOZ_ALWAYS_INLINE bool needWriteBarrierPre(JS::Zone *zone) {
   205 #ifdef JSGC_INCREMENTAL
   206         return JS::shadow::Zone::asShadowZone(zone)->needsBarrier();
   207 #else
   208         return false;
   209 #endif
   210     }
   212     static MOZ_ALWAYS_INLINE bool isNullLike(T *thing) { return !thing; }
   214     static MOZ_ALWAYS_INLINE void writeBarrierPre(T *thing) {
   215 #ifdef JSGC_INCREMENTAL
   216         if (isNullLike(thing) || !thing->shadowRuntimeFromAnyThread()->needsBarrier())
   217             return;
   219         JS::shadow::Zone *shadowZone = thing->shadowZoneFromAnyThread();
   220         if (shadowZone->needsBarrier()) {
   221             MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone));
   222             T *tmp = thing;
   223             js::gc::MarkUnbarriered<T>(shadowZone->barrierTracer(), &tmp, "write barrier");
   224             JS_ASSERT(tmp == thing);
   225         }
   226 #endif
   227     }
   229     static void writeBarrierPost(T *thing, void *addr) {}
   230     static void writeBarrierPostRelocate(T *thing, void *addr) {}
   231     static void writeBarrierPostRemove(T *thing, void *addr) {}
   232 };
   234 } // namespace gc
   236 // Note: the following Zone-getting functions must be equivalent to the zone()
   237 // and shadowZone() functions implemented by the subclasses of BarrieredCell.
   239 JS::Zone *
   240 ZoneOfObject(const JSObject &obj);
   242 static inline JS::shadow::Zone *
   243 ShadowZoneOfObject(JSObject *obj)
   244 {
   245     return JS::shadow::Zone::asShadowZone(ZoneOfObject(*obj));
   246 }
   248 static inline JS::shadow::Zone *
   249 ShadowZoneOfString(JSString *str)
   250 {
   251     return JS::shadow::Zone::asShadowZone(reinterpret_cast<const js::gc::Cell *>(str)->tenuredZone());
   252 }
   254 MOZ_ALWAYS_INLINE JS::Zone *
   255 ZoneOfValue(const JS::Value &value)
   256 {
   257     JS_ASSERT(value.isMarkable());
   258     if (value.isObject())
   259         return ZoneOfObject(value.toObject());
   260     return static_cast<js::gc::Cell *>(value.toGCThing())->tenuredZone();
   261 }
   263 JS::Zone *
   264 ZoneOfObjectFromAnyThread(const JSObject &obj);
   266 static inline JS::shadow::Zone *
   267 ShadowZoneOfObjectFromAnyThread(JSObject *obj)
   268 {
   269     return JS::shadow::Zone::asShadowZone(ZoneOfObjectFromAnyThread(*obj));
   270 }
   272 static inline JS::shadow::Zone *
   273 ShadowZoneOfStringFromAnyThread(JSString *str)
   274 {
   275     return JS::shadow::Zone::asShadowZone(
   276         reinterpret_cast<const js::gc::Cell *>(str)->tenuredZoneFromAnyThread());
   277 }
   279 MOZ_ALWAYS_INLINE JS::Zone *
   280 ZoneOfValueFromAnyThread(const JS::Value &value)
   281 {
   282     JS_ASSERT(value.isMarkable());
   283     if (value.isObject())
   284         return ZoneOfObjectFromAnyThread(value.toObject());
   285     return static_cast<js::gc::Cell *>(value.toGCThing())->tenuredZoneFromAnyThread();
   286 }
   288 /*
   289  * Base class for barriered pointer types.
   290  */
   291 template <class T, typename Unioned = uintptr_t>
   292 class BarrieredPtr
   293 {
   294   protected:
   295     union {
   296         T *value;
   297         Unioned other;
   298     };
   300     BarrieredPtr(T *v) : value(v) {}
   301     ~BarrieredPtr() { pre(); }
   303   public:
   304     void init(T *v) {
   305         JS_ASSERT(!IsPoisonedPtr<T>(v));
   306         this->value = v;
   307     }
   309     /* Use this if the automatic coercion to T* isn't working. */
   310     T *get() const { return value; }
   312     /*
   313      * Use these if you want to change the value without invoking the barrier.
   314      * Obviously this is dangerous unless you know the barrier is not needed.
   315      */
   316     T **unsafeGet() { return &value; }
   317     void unsafeSet(T *v) { value = v; }
   319     Unioned *unsafeGetUnioned() { return &other; }
   321     T &operator*() const { return *value; }
   322     T *operator->() const { return value; }
   324     operator T*() const { return value; }
   326   protected:
   327     void pre() { T::writeBarrierPre(value); }
   328 };
   330 /*
   331  * EncapsulatedPtr only automatically handles pre-barriers. Post-barriers must
   332  * be manually implemented when using this class. HeapPtr and RelocatablePtr
   333  * should be used in all cases that do not require explicit low-level control
   334  * of moving behavior, e.g. for HashMap keys.
   335  */
   336 template <class T, typename Unioned = uintptr_t>
   337 class EncapsulatedPtr : public BarrieredPtr<T, Unioned>
   338 {
   339   public:
   340     EncapsulatedPtr() : BarrieredPtr<T, Unioned>(nullptr) {}
   341     EncapsulatedPtr(T *v) : BarrieredPtr<T, Unioned>(v) {}
   342     explicit EncapsulatedPtr(const EncapsulatedPtr<T, Unioned> &v)
   343       : BarrieredPtr<T, Unioned>(v.value) {}
   345     /* Use to set the pointer to nullptr. */
   346     void clear() {
   347         this->pre();
   348         this->value = nullptr;
   349     }
   351     EncapsulatedPtr<T, Unioned> &operator=(T *v) {
   352         this->pre();
   353         JS_ASSERT(!IsPoisonedPtr<T>(v));
   354         this->value = v;
   355         return *this;
   356     }
   358     EncapsulatedPtr<T, Unioned> &operator=(const EncapsulatedPtr<T> &v) {
   359         this->pre();
   360         JS_ASSERT(!IsPoisonedPtr<T>(v.value));
   361         this->value = v.value;
   362         return *this;
   363     }
   364 };
   366 /*
   367  * A pre- and post-barriered heap pointer, for use inside the JS engine.
   368  *
   369  * Not to be confused with JS::Heap<T>. This is a different class from the
   370  * external interface and implements substantially different semantics.
   371  *
   372  * The post-barriers implemented by this class are faster than those
   373  * implemented by RelocatablePtr<T> or JS::Heap<T> at the cost of not
   374  * automatically handling deletion or movement. It should generally only be
   375  * stored in memory that has GC lifetime. HeapPtr must not be used in contexts
   376  * where it may be implicitly moved or deleted, e.g. most containers.
   377  */
   378 template <class T, class Unioned = uintptr_t>
   379 class HeapPtr : public BarrieredPtr<T, Unioned>
   380 {
   381   public:
   382     HeapPtr() : BarrieredPtr<T, Unioned>(nullptr) {}
   383     explicit HeapPtr(T *v) : BarrieredPtr<T, Unioned>(v) { post(); }
   384     explicit HeapPtr(const HeapPtr<T, Unioned> &v) : BarrieredPtr<T, Unioned>(v) { post(); }
   386     void init(T *v) {
   387         JS_ASSERT(!IsPoisonedPtr<T>(v));
   388         this->value = v;
   389         post();
   390     }
   392     HeapPtr<T, Unioned> &operator=(T *v) {
   393         this->pre();
   394         JS_ASSERT(!IsPoisonedPtr<T>(v));
   395         this->value = v;
   396         post();
   397         return *this;
   398     }
   400     HeapPtr<T, Unioned> &operator=(const HeapPtr<T, Unioned> &v) {
   401         this->pre();
   402         JS_ASSERT(!IsPoisonedPtr<T>(v.value));
   403         this->value = v.value;
   404         post();
   405         return *this;
   406     }
   408   protected:
   409     void post() { T::writeBarrierPost(this->value, (void *)&this->value); }
   411     /* Make this friend so it can access pre() and post(). */
   412     template <class T1, class T2>
   413     friend inline void
   414     BarrieredSetPair(Zone *zone,
   415                      HeapPtr<T1> &v1, T1 *val1,
   416                      HeapPtr<T2> &v2, T2 *val2);
   418   private:
   419     /*
   420      * Unlike RelocatablePtr<T>, HeapPtr<T> must be managed with GC lifetimes.
   421      * Specifically, the memory used by the pointer itself must be live until
   422      * at least the next minor GC. For that reason, move semantics are invalid
   423      * and are deleted here. Please note that not all containers support move
   424      * semantics, so this does not completely prevent invalid uses.
   425      */
   426     HeapPtr(HeapPtr<T> &&) MOZ_DELETE;
   427     HeapPtr<T, Unioned> &operator=(HeapPtr<T, Unioned> &&) MOZ_DELETE;
   428 };
   430 /*
   431  * FixedHeapPtr is designed for one very narrow case: replacing immutable raw
   432  * pointers to GC-managed things, implicitly converting to a handle type for
   433  * ease of use.  Pointers encapsulated by this type must:
   434  *
   435  *   be immutable (no incremental write barriers),
   436  *   never point into the nursery (no generational write barriers), and
   437  *   be traced via MarkRuntime (we use fromMarkedLocation).
   438  *
   439  * In short: you *really* need to know what you're doing before you use this
   440  * class!
   441  */
   442 template <class T>
   443 class FixedHeapPtr
   444 {
   445     T *value;
   447   public:
   448     operator T*() const { return value; }
   449     T * operator->() const { return value; }
   451     operator Handle<T*>() const {
   452         return Handle<T*>::fromMarkedLocation(&value);
   453     }
   455     void init(T *ptr) {
   456         value = ptr;
   457     }
   458 };
   460 /*
   461  * A pre- and post-barriered heap pointer, for use inside the JS engine.
   462  *
   463  * Unlike HeapPtr<T>, it can be used in memory that is not managed by the GC,
   464  * i.e. in C++ containers.  It is, however, somewhat slower, so should only be
   465  * used in contexts where this ability is necessary.
   466  */
   467 template <class T>
   468 class RelocatablePtr : public BarrieredPtr<T>
   469 {
   470   public:
   471     RelocatablePtr() : BarrieredPtr<T>(nullptr) {}
   472     explicit RelocatablePtr(T *v) : BarrieredPtr<T>(v) {
   473         if (v)
   474             post();
   475     }
   477     /*
   478      * For RelocatablePtr, move semantics are equivalent to copy semantics. In
   479      * C++, a copy constructor taking const-ref is the way to get a single
   480      * function that will be used for both lvalue and rvalue copies, so we can
   481      * simply omit the rvalue variant.
   482      */
   483     RelocatablePtr(const RelocatablePtr<T> &v) : BarrieredPtr<T>(v) {
   484         if (this->value)
   485             post();
   486     }
   488     ~RelocatablePtr() {
   489         if (this->value)
   490             relocate();
   491     }
   493     RelocatablePtr<T> &operator=(T *v) {
   494         this->pre();
   495         JS_ASSERT(!IsPoisonedPtr<T>(v));
   496         if (v) {
   497             this->value = v;
   498             post();
   499         } else if (this->value) {
   500             relocate();
   501             this->value = v;
   502         }
   503         return *this;
   504     }
   506     RelocatablePtr<T> &operator=(const RelocatablePtr<T> &v) {
   507         this->pre();
   508         JS_ASSERT(!IsPoisonedPtr<T>(v.value));
   509         if (v.value) {
   510             this->value = v.value;
   511             post();
   512         } else if (this->value) {
   513             relocate();
   514             this->value = v;
   515         }
   516         return *this;
   517     }
   519   protected:
   520     void post() {
   521 #ifdef JSGC_GENERATIONAL
   522         JS_ASSERT(this->value);
   523         T::writeBarrierPostRelocate(this->value, &this->value);
   524 #endif
   525     }
   527     void relocate() {
   528 #ifdef JSGC_GENERATIONAL
   529         JS_ASSERT(this->value);
   530         T::writeBarrierPostRemove(this->value, &this->value);
   531 #endif
   532     }
   533 };
   535 /*
   536  * This is a hack for RegExpStatics::updateFromMatch. It allows us to do two
   537  * barriers with only one branch to check if we're in an incremental GC.
   538  */
   539 template <class T1, class T2>
   540 static inline void
   541 BarrieredSetPair(Zone *zone,
   542                  HeapPtr<T1> &v1, T1 *val1,
   543                  HeapPtr<T2> &v2, T2 *val2)
   544 {
   545     if (T1::needWriteBarrierPre(zone)) {
   546         v1.pre();
   547         v2.pre();
   548     }
   549     v1.unsafeSet(val1);
   550     v2.unsafeSet(val2);
   551     v1.post();
   552     v2.post();
   553 }
   555 class Shape;
   556 class BaseShape;
   557 namespace types { struct TypeObject; }
   559 typedef BarrieredPtr<JSObject> BarrieredPtrObject;
   560 typedef BarrieredPtr<JSScript> BarrieredPtrScript;
   562 typedef EncapsulatedPtr<JSObject> EncapsulatedPtrObject;
   563 typedef EncapsulatedPtr<JSScript> EncapsulatedPtrScript;
   565 typedef RelocatablePtr<JSObject> RelocatablePtrObject;
   566 typedef RelocatablePtr<JSScript> RelocatablePtrScript;
   568 typedef HeapPtr<JSObject> HeapPtrObject;
   569 typedef HeapPtr<JSFunction> HeapPtrFunction;
   570 typedef HeapPtr<JSString> HeapPtrString;
   571 typedef HeapPtr<PropertyName> HeapPtrPropertyName;
   572 typedef HeapPtr<JSScript> HeapPtrScript;
   573 typedef HeapPtr<Shape> HeapPtrShape;
   574 typedef HeapPtr<BaseShape> HeapPtrBaseShape;
   575 typedef HeapPtr<types::TypeObject> HeapPtrTypeObject;
   577 /* Useful for hashtables with a HeapPtr as key. */
   579 template <class T>
   580 struct HeapPtrHasher
   581 {
   582     typedef HeapPtr<T> Key;
   583     typedef T *Lookup;
   585     static HashNumber hash(Lookup obj) { return DefaultHasher<T *>::hash(obj); }
   586     static bool match(const Key &k, Lookup l) { return k.get() == l; }
   587     static void rekey(Key &k, const Key& newKey) { k.unsafeSet(newKey); }
   588 };
   590 /* Specialized hashing policy for HeapPtrs. */
   591 template <class T>
   592 struct DefaultHasher< HeapPtr<T> > : HeapPtrHasher<T> { };
   594 template <class T>
   595 struct EncapsulatedPtrHasher
   596 {
   597     typedef EncapsulatedPtr<T> Key;
   598     typedef T *Lookup;
   600     static HashNumber hash(Lookup obj) { return DefaultHasher<T *>::hash(obj); }
   601     static bool match(const Key &k, Lookup l) { return k.get() == l; }
   602     static void rekey(Key &k, const Key& newKey) { k.unsafeSet(newKey); }
   603 };
   605 template <class T>
   606 struct DefaultHasher< EncapsulatedPtr<T> > : EncapsulatedPtrHasher<T> { };
   608 bool
   609 StringIsPermanentAtom(JSString *str);
   611 /*
   612  * Base class for barriered value types.
   613  */
   614 class BarrieredValue : public ValueOperations<BarrieredValue>
   615 {
   616   protected:
   617     Value value;
   619     /*
   620      * Ensure that EncapsulatedValue is not constructable, except by our
   621      * implementations.
   622      */
   623     BarrieredValue() MOZ_DELETE;
   625     BarrieredValue(const Value &v) : value(v) {
   626         JS_ASSERT(!IsPoisonedValue(v));
   627     }
   629     ~BarrieredValue() {
   630         pre();
   631     }
   633   public:
   634     void init(const Value &v) {
   635         JS_ASSERT(!IsPoisonedValue(v));
   636         value = v;
   637     }
   638     void init(JSRuntime *rt, const Value &v) {
   639         JS_ASSERT(!IsPoisonedValue(v));
   640         value = v;
   641     }
   643     bool operator==(const BarrieredValue &v) const { return value == v.value; }
   644     bool operator!=(const BarrieredValue &v) const { return value != v.value; }
   646     const Value &get() const { return value; }
   647     Value *unsafeGet() { return &value; }
   648     operator const Value &() const { return value; }
   650     JSGCTraceKind gcKind() const { return value.gcKind(); }
   652     uint64_t asRawBits() const { return value.asRawBits(); }
   654     static void writeBarrierPre(const Value &v) {
   655 #ifdef JSGC_INCREMENTAL
   656         if (v.isMarkable() && shadowRuntimeFromAnyThread(v)->needsBarrier())
   657             writeBarrierPre(ZoneOfValueFromAnyThread(v), v);
   658 #endif
   659     }
   661     static void writeBarrierPre(Zone *zone, const Value &v) {
   662 #ifdef JSGC_INCREMENTAL
   663         if (v.isString() && StringIsPermanentAtom(v.toString()))
   664             return;
   665         JS::shadow::Zone *shadowZone = JS::shadow::Zone::asShadowZone(zone);
   666         if (shadowZone->needsBarrier()) {
   667             JS_ASSERT_IF(v.isMarkable(), shadowRuntimeFromMainThread(v)->needsBarrier());
   668             Value tmp(v);
   669             js::gc::MarkValueUnbarriered(shadowZone->barrierTracer(), &tmp, "write barrier");
   670             JS_ASSERT(tmp == v);
   671         }
   672 #endif
   673     }
   675   protected:
   676     void pre() { writeBarrierPre(value); }
   677     void pre(Zone *zone) { writeBarrierPre(zone, value); }
   679     static JSRuntime *runtimeFromMainThread(const Value &v) {
   680         JS_ASSERT(v.isMarkable());
   681         return static_cast<js::gc::Cell *>(v.toGCThing())->runtimeFromMainThread();
   682     }
   683     static JSRuntime *runtimeFromAnyThread(const Value &v) {
   684         JS_ASSERT(v.isMarkable());
   685         return static_cast<js::gc::Cell *>(v.toGCThing())->runtimeFromAnyThread();
   686     }
   687     static JS::shadow::Runtime *shadowRuntimeFromMainThread(const Value &v) {
   688         return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromMainThread(v));
   689     }
   690     static JS::shadow::Runtime *shadowRuntimeFromAnyThread(const Value &v) {
   691         return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromAnyThread(v));
   692     }
   694   private:
   695     friend class ValueOperations<BarrieredValue>;
   696     const Value * extract() const { return &value; }
   697 };
   699 // Like EncapsulatedPtr, but specialized for Value.
   700 // See the comments on that class for details.
   701 class EncapsulatedValue : public BarrieredValue
   702 {
   703   public:
   704     EncapsulatedValue(const Value &v) : BarrieredValue(v) {}
   705     EncapsulatedValue(const EncapsulatedValue &v) : BarrieredValue(v) {}
   707     EncapsulatedValue &operator=(const Value &v) {
   708         pre();
   709         JS_ASSERT(!IsPoisonedValue(v));
   710         value = v;
   711         return *this;
   712     }
   714     EncapsulatedValue &operator=(const EncapsulatedValue &v) {
   715         pre();
   716         JS_ASSERT(!IsPoisonedValue(v));
   717         value = v.get();
   718         return *this;
   719     }
   720 };
   722 // Like HeapPtr, but specialized for Value.
   723 // See the comments on that class for details.
   724 class HeapValue : public BarrieredValue
   725 {
   726   public:
   727     explicit HeapValue()
   728       : BarrieredValue(UndefinedValue())
   729     {
   730         post();
   731     }
   733     explicit HeapValue(const Value &v)
   734       : BarrieredValue(v)
   735     {
   736         JS_ASSERT(!IsPoisonedValue(v));
   737         post();
   738     }
   740     explicit HeapValue(const HeapValue &v)
   741       : BarrieredValue(v.value)
   742     {
   743         JS_ASSERT(!IsPoisonedValue(v.value));
   744         post();
   745     }
   747     ~HeapValue() {
   748         pre();
   749     }
   751     void init(const Value &v) {
   752         JS_ASSERT(!IsPoisonedValue(v));
   753         value = v;
   754         post();
   755     }
   757     void init(JSRuntime *rt, const Value &v) {
   758         JS_ASSERT(!IsPoisonedValue(v));
   759         value = v;
   760         post(rt);
   761     }
   763     HeapValue &operator=(const Value &v) {
   764         pre();
   765         JS_ASSERT(!IsPoisonedValue(v));
   766         value = v;
   767         post();
   768         return *this;
   769     }
   771     HeapValue &operator=(const HeapValue &v) {
   772         pre();
   773         JS_ASSERT(!IsPoisonedValue(v.value));
   774         value = v.value;
   775         post();
   776         return *this;
   777     }
   779 #ifdef DEBUG
   780     bool preconditionForSet(Zone *zone);
   781 #endif
   783     /*
   784      * This is a faster version of operator=. Normally, operator= has to
   785      * determine the compartment of the value before it can decide whether to do
   786      * the barrier. If you already know the compartment, it's faster to pass it
   787      * in.
   788      */
   789     void set(Zone *zone, const Value &v) {
   790         JS::shadow::Zone *shadowZone = JS::shadow::Zone::asShadowZone(zone);
   791         JS_ASSERT(preconditionForSet(zone));
   792         pre(zone);
   793         JS_ASSERT(!IsPoisonedValue(v));
   794         value = v;
   795         post(shadowZone->runtimeFromAnyThread());
   796     }
   798     static void writeBarrierPost(const Value &value, Value *addr) {
   799 #ifdef JSGC_GENERATIONAL
   800         if (value.isMarkable())
   801             shadowRuntimeFromAnyThread(value)->gcStoreBufferPtr()->putValue(addr);
   802 #endif
   803     }
   805     static void writeBarrierPost(JSRuntime *rt, const Value &value, Value *addr) {
   806 #ifdef JSGC_GENERATIONAL
   807         if (value.isMarkable()) {
   808             JS::shadow::Runtime *shadowRuntime = JS::shadow::Runtime::asShadowRuntime(rt);
   809             shadowRuntime->gcStoreBufferPtr()->putValue(addr);
   810         }
   811 #endif
   812     }
   814   private:
   815     void post() {
   816         writeBarrierPost(value, &value);
   817     }
   819     void post(JSRuntime *rt) {
   820         writeBarrierPost(rt, value, &value);
   821     }
   823     HeapValue(HeapValue &&) MOZ_DELETE;
   824     HeapValue &operator=(HeapValue &&) MOZ_DELETE;
   825 };
   827 // Like RelocatablePtr, but specialized for Value.
   828 // See the comments on that class for details.
   829 class RelocatableValue : public BarrieredValue
   830 {
   831   public:
   832     explicit RelocatableValue() : BarrieredValue(UndefinedValue()) {}
   834     explicit RelocatableValue(const Value &v)
   835       : BarrieredValue(v)
   836     {
   837         if (v.isMarkable())
   838             post();
   839     }
   841     RelocatableValue(const RelocatableValue &v)
   842       : BarrieredValue(v.value)
   843     {
   844         JS_ASSERT(!IsPoisonedValue(v.value));
   845         if (v.value.isMarkable())
   846             post();
   847     }
   849     ~RelocatableValue()
   850     {
   851         if (value.isMarkable())
   852             relocate(runtimeFromAnyThread(value));
   853     }
   855     RelocatableValue &operator=(const Value &v) {
   856         pre();
   857         JS_ASSERT(!IsPoisonedValue(v));
   858         if (v.isMarkable()) {
   859             value = v;
   860             post();
   861         } else if (value.isMarkable()) {
   862             JSRuntime *rt = runtimeFromAnyThread(value);
   863             relocate(rt);
   864             value = v;
   865         } else {
   866             value = v;
   867         }
   868         return *this;
   869     }
   871     RelocatableValue &operator=(const RelocatableValue &v) {
   872         pre();
   873         JS_ASSERT(!IsPoisonedValue(v.value));
   874         if (v.value.isMarkable()) {
   875             value = v.value;
   876             post();
   877         } else if (value.isMarkable()) {
   878             JSRuntime *rt = runtimeFromAnyThread(value);
   879             relocate(rt);
   880             value = v.value;
   881         } else {
   882             value = v.value;
   883         }
   884         return *this;
   885     }
   887   private:
   888     void post() {
   889 #ifdef JSGC_GENERATIONAL
   890         JS_ASSERT(value.isMarkable());
   891         shadowRuntimeFromAnyThread(value)->gcStoreBufferPtr()->putRelocatableValue(&value);
   892 #endif
   893     }
   895     void relocate(JSRuntime *rt) {
   896 #ifdef JSGC_GENERATIONAL
   897         JS::shadow::Runtime *shadowRuntime = JS::shadow::Runtime::asShadowRuntime(rt);
   898         shadowRuntime->gcStoreBufferPtr()->removeRelocatableValue(&value);
   899 #endif
   900     }
   901 };
   903 // A pre- and post-barriered Value that is specialized to be aware that it
   904 // resides in a slots or elements vector. This allows it to be relocated in
   905 // memory, but with substantially less overhead than a RelocatablePtr.
   906 class HeapSlot : public BarrieredValue
   907 {
   908   public:
   909     enum Kind {
   910         Slot = 0,
   911         Element = 1
   912     };
   914     explicit HeapSlot() MOZ_DELETE;
   916     explicit HeapSlot(JSObject *obj, Kind kind, uint32_t slot, const Value &v)
   917       : BarrieredValue(v)
   918     {
   919         JS_ASSERT(!IsPoisonedValue(v));
   920         post(obj, kind, slot, v);
   921     }
   923     explicit HeapSlot(JSObject *obj, Kind kind, uint32_t slot, const HeapSlot &s)
   924       : BarrieredValue(s.value)
   925     {
   926         JS_ASSERT(!IsPoisonedValue(s.value));
   927         post(obj, kind, slot, s);
   928     }
   930     ~HeapSlot() {
   931         pre();
   932     }
   934     void init(JSObject *owner, Kind kind, uint32_t slot, const Value &v) {
   935         value = v;
   936         post(owner, kind, slot, v);
   937     }
   939     void init(JSRuntime *rt, JSObject *owner, Kind kind, uint32_t slot, const Value &v) {
   940         value = v;
   941         post(rt, owner, kind, slot, v);
   942     }
   944 #ifdef DEBUG
   945     bool preconditionForSet(JSObject *owner, Kind kind, uint32_t slot);
   946     bool preconditionForSet(Zone *zone, JSObject *owner, Kind kind, uint32_t slot);
   947     static void preconditionForWriteBarrierPost(JSObject *obj, Kind kind, uint32_t slot,
   948                                                 Value target);
   949 #endif
   951     void set(JSObject *owner, Kind kind, uint32_t slot, const Value &v) {
   952         JS_ASSERT(preconditionForSet(owner, kind, slot));
   953         pre();
   954         JS_ASSERT(!IsPoisonedValue(v));
   955         value = v;
   956         post(owner, kind, slot, v);
   957     }
   959     void set(Zone *zone, JSObject *owner, Kind kind, uint32_t slot, const Value &v) {
   960         JS_ASSERT(preconditionForSet(zone, owner, kind, slot));
   961         JS::shadow::Zone *shadowZone = JS::shadow::Zone::asShadowZone(zone);
   962         pre(zone);
   963         JS_ASSERT(!IsPoisonedValue(v));
   964         value = v;
   965         post(shadowZone->runtimeFromAnyThread(), owner, kind, slot, v);
   966     }
   968     static void writeBarrierPost(JSObject *obj, Kind kind, uint32_t slot, Value target)
   969     {
   970 #ifdef JSGC_GENERATIONAL
   971         js::gc::Cell *cell = reinterpret_cast<js::gc::Cell*>(obj);
   972         writeBarrierPost(cell->runtimeFromAnyThread(), obj, kind, slot, target);
   973 #endif
   974     }
   976     static void writeBarrierPost(JSRuntime *rt, JSObject *obj, Kind kind, uint32_t slot,
   977                                  Value target)
   978     {
   979 #ifdef DEBUG
   980         preconditionForWriteBarrierPost(obj, kind, slot, target);
   981 #endif
   982 #ifdef JSGC_GENERATIONAL
   983         if (target.isObject()) {
   984             JS::shadow::Runtime *shadowRuntime = JS::shadow::Runtime::asShadowRuntime(rt);
   985             shadowRuntime->gcStoreBufferPtr()->putSlot(obj, kind, slot, 1);
   986         }
   987 #endif
   988     }
   990   private:
   991     void post(JSObject *owner, Kind kind, uint32_t slot, Value target) {
   992         HeapSlot::writeBarrierPost(owner, kind, slot, target);
   993     }
   995     void post(JSRuntime *rt, JSObject *owner, Kind kind, uint32_t slot, Value target) {
   996         HeapSlot::writeBarrierPost(rt, owner, kind, slot, target);
   997     }
   998 };
  1000 static inline const Value *
  1001 Valueify(const BarrieredValue *array)
  1003     JS_STATIC_ASSERT(sizeof(HeapValue) == sizeof(Value));
  1004     JS_STATIC_ASSERT(sizeof(HeapSlot) == sizeof(Value));
  1005     return (const Value *)array;
  1008 static inline HeapValue *
  1009 HeapValueify(Value *v)
  1011     JS_STATIC_ASSERT(sizeof(HeapValue) == sizeof(Value));
  1012     JS_STATIC_ASSERT(sizeof(HeapSlot) == sizeof(Value));
  1013     return (HeapValue *)v;
  1016 class HeapSlotArray
  1018     HeapSlot *array;
  1020   public:
  1021     HeapSlotArray(HeapSlot *array) : array(array) {}
  1023     operator const Value *() const { return Valueify(array); }
  1024     operator HeapSlot *() const { return array; }
  1026     HeapSlotArray operator +(int offset) const { return HeapSlotArray(array + offset); }
  1027     HeapSlotArray operator +(uint32_t offset) const { return HeapSlotArray(array + offset); }
  1028 };
  1030 /*
  1031  * Base class for barriered jsid types.
  1032  */
  1033 class BarrieredId
  1035   protected:
  1036     jsid value;
  1038   private:
  1039     BarrieredId(const BarrieredId &v) MOZ_DELETE;
  1041   protected:
  1042     explicit BarrieredId(jsid id) : value(id) {}
  1043     ~BarrieredId() { pre(); }
  1045   public:
  1046     bool operator==(jsid id) const { return value == id; }
  1047     bool operator!=(jsid id) const { return value != id; }
  1049     jsid get() const { return value; }
  1050     jsid *unsafeGet() { return &value; }
  1051     void unsafeSet(jsid newId) { value = newId; }
  1052     operator jsid() const { return value; }
  1054   protected:
  1055     void pre() {
  1056 #ifdef JSGC_INCREMENTAL
  1057         if (JSID_IS_OBJECT(value)) {
  1058             JSObject *obj = JSID_TO_OBJECT(value);
  1059             JS::shadow::Zone *shadowZone = ShadowZoneOfObjectFromAnyThread(obj);
  1060             if (shadowZone->needsBarrier()) {
  1061                 js::gc::MarkObjectUnbarriered(shadowZone->barrierTracer(), &obj, "write barrier");
  1062                 JS_ASSERT(obj == JSID_TO_OBJECT(value));
  1064         } else if (JSID_IS_STRING(value)) {
  1065             JSString *str = JSID_TO_STRING(value);
  1066             JS::shadow::Zone *shadowZone = ShadowZoneOfStringFromAnyThread(str);
  1067             if (shadowZone->needsBarrier()) {
  1068                 js::gc::MarkStringUnbarriered(shadowZone->barrierTracer(), &str, "write barrier");
  1069                 JS_ASSERT(str == JSID_TO_STRING(value));
  1072 #endif
  1074 };
  1076 // Like EncapsulatedPtr, but specialized for jsid.
  1077 // See the comments on that class for details.
  1078 class EncapsulatedId : public BarrieredId
  1080   public:
  1081     explicit EncapsulatedId(jsid id) : BarrieredId(id) {}
  1082     explicit EncapsulatedId() : BarrieredId(JSID_VOID) {}
  1084     EncapsulatedId &operator=(const EncapsulatedId &v) {
  1085         if (v.value != value)
  1086             pre();
  1087         JS_ASSERT(!IsPoisonedId(v.value));
  1088         value = v.value;
  1089         return *this;
  1091 };
  1093 // Like RelocatablePtr, but specialized for jsid.
  1094 // See the comments on that class for details.
  1095 class RelocatableId : public BarrieredId
  1097   public:
  1098     explicit RelocatableId() : BarrieredId(JSID_VOID) {}
  1099     explicit inline RelocatableId(jsid id) : BarrieredId(id) {}
  1100     ~RelocatableId() { pre(); }
  1102     bool operator==(jsid id) const { return value == id; }
  1103     bool operator!=(jsid id) const { return value != id; }
  1105     jsid get() const { return value; }
  1106     operator jsid() const { return value; }
  1108     jsid *unsafeGet() { return &value; }
  1110     RelocatableId &operator=(jsid id) {
  1111         if (id != value)
  1112             pre();
  1113         JS_ASSERT(!IsPoisonedId(id));
  1114         value = id;
  1115         return *this;
  1118     RelocatableId &operator=(const RelocatableId &v) {
  1119         if (v.value != value)
  1120             pre();
  1121         JS_ASSERT(!IsPoisonedId(v.value));
  1122         value = v.value;
  1123         return *this;
  1125 };
  1127 // Like HeapPtr, but specialized for jsid.
  1128 // See the comments on that class for details.
  1129 class HeapId : public BarrieredId
  1131   public:
  1132     explicit HeapId() : BarrieredId(JSID_VOID) {}
  1134     explicit HeapId(jsid id)
  1135       : BarrieredId(id)
  1137         JS_ASSERT(!IsPoisonedId(id));
  1138         post();
  1141     ~HeapId() { pre(); }
  1143     void init(jsid id) {
  1144         JS_ASSERT(!IsPoisonedId(id));
  1145         value = id;
  1146         post();
  1149     HeapId &operator=(jsid id) {
  1150         if (id != value)
  1151             pre();
  1152         JS_ASSERT(!IsPoisonedId(id));
  1153         value = id;
  1154         post();
  1155         return *this;
  1158     HeapId &operator=(const HeapId &v) {
  1159         if (v.value != value)
  1160             pre();
  1161         JS_ASSERT(!IsPoisonedId(v.value));
  1162         value = v.value;
  1163         post();
  1164         return *this;
  1167   private:
  1168     void post() {};
  1170     HeapId(const HeapId &v) MOZ_DELETE;
  1172     HeapId(HeapId &&) MOZ_DELETE;
  1173     HeapId &operator=(HeapId &&) MOZ_DELETE;
  1174 };
  1176 /*
  1177  * Incremental GC requires that weak pointers have read barriers. This is mostly
  1178  * an issue for empty shapes stored in JSCompartment. The problem happens when,
  1179  * during an incremental GC, some JS code stores one of the compartment's empty
  1180  * shapes into an object already marked black. Normally, this would not be a
  1181  * problem, because the empty shape would have been part of the initial snapshot
  1182  * when the GC started. However, since this is a weak pointer, it isn't. So we
  1183  * may collect the empty shape even though a live object points to it. To fix
  1184  * this, we mark these empty shapes black whenever they get read out.
  1185  */
  1186 template <class T>
  1187 class ReadBarriered
  1189     T *value;
  1191   public:
  1192     ReadBarriered() : value(nullptr) {}
  1193     ReadBarriered(T *value) : value(value) {}
  1194     ReadBarriered(const Rooted<T*> &rooted) : value(rooted) {}
  1196     T *get() const {
  1197         if (!value)
  1198             return nullptr;
  1199         T::readBarrier(value);
  1200         return value;
  1203     operator T*() const { return get(); }
  1205     T &operator*() const { return *get(); }
  1206     T *operator->() const { return get(); }
  1208     T **unsafeGet() { return &value; }
  1209     T * const * unsafeGet() const { return &value; }
  1211     void set(T *v) { value = v; }
  1213     operator bool() { return !!value; }
  1214 };
  1216 class ReadBarrieredValue
  1218     Value value;
  1220   public:
  1221     ReadBarrieredValue() : value(UndefinedValue()) {}
  1222     ReadBarrieredValue(const Value &value) : value(value) {}
  1224     inline const Value &get() const;
  1225     Value *unsafeGet() { return &value; }
  1226     inline operator const Value &() const;
  1228     inline JSObject &toObject() const;
  1229 };
  1231 /*
  1232  * Operations on a Heap thing inside the GC need to strip the barriers from
  1233  * pointer operations. This template helps do that in contexts where the type
  1234  * is templatized.
  1235  */
  1236 template <typename T> struct Unbarriered {};
  1237 template <typename S> struct Unbarriered< EncapsulatedPtr<S> > { typedef S *type; };
  1238 template <typename S> struct Unbarriered< RelocatablePtr<S> > { typedef S *type; };
  1239 template <> struct Unbarriered<EncapsulatedValue> { typedef Value type; };
  1240 template <> struct Unbarriered<RelocatableValue> { typedef Value type; };
  1241 template <typename S> struct Unbarriered< DefaultHasher< EncapsulatedPtr<S> > > {
  1242     typedef DefaultHasher<S *> type;
  1243 };
  1245 } /* namespace js */
  1247 #endif /* gc_Barrier_h */

mercurial