js/src/gc/Barrier.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/gc/Barrier.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,1247 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#ifndef gc_Barrier_h
    1.11 +#define gc_Barrier_h
    1.12 +
    1.13 +#include "NamespaceImports.h"
    1.14 +
    1.15 +#include "gc/Heap.h"
    1.16 +#ifdef JSGC_GENERATIONAL
    1.17 +# include "gc/StoreBuffer.h"
    1.18 +#endif
    1.19 +#include "js/HashTable.h"
    1.20 +#include "js/Id.h"
    1.21 +#include "js/RootingAPI.h"
    1.22 +
    1.23 +/*
    1.24 + * A write barrier is a mechanism used by incremental or generation GCs to
    1.25 + * ensure that every value that needs to be marked is marked. In general, the
    1.26 + * write barrier should be invoked whenever a write can cause the set of things
    1.27 + * traced through by the GC to change. This includes:
    1.28 + *   - writes to object properties
    1.29 + *   - writes to array slots
    1.30 + *   - writes to fields like JSObject::shape_ that we trace through
    1.31 + *   - writes to fields in private data, like JSGenerator::obj
    1.32 + *   - writes to non-markable fields like JSObject::private that point to
    1.33 + *     markable data
    1.34 + * The last category is the trickiest. Even though the private pointers does not
    1.35 + * point to a GC thing, changing the private pointer may change the set of
    1.36 + * objects that are traced by the GC. Therefore it needs a write barrier.
    1.37 + *
    1.38 + * Every barriered write should have the following form:
    1.39 + *   <pre-barrier>
    1.40 + *   obj->field = value; // do the actual write
    1.41 + *   <post-barrier>
    1.42 + * The pre-barrier is used for incremental GC and the post-barrier is for
    1.43 + * generational GC.
    1.44 + *
    1.45 + *                               PRE-BARRIER
    1.46 + *
    1.47 + * To understand the pre-barrier, let's consider how incremental GC works. The
    1.48 + * GC itself is divided into "slices". Between each slice, JS code is allowed to
    1.49 + * run. Each slice should be short so that the user doesn't notice the
    1.50 + * interruptions. In our GC, the structure of the slices is as follows:
    1.51 + *
    1.52 + * 1. ... JS work, which leads to a request to do GC ...
    1.53 + * 2. [first GC slice, which performs all root marking and possibly more marking]
    1.54 + * 3. ... more JS work is allowed to run ...
    1.55 + * 4. [GC mark slice, which runs entirely in drainMarkStack]
    1.56 + * 5. ... more JS work ...
    1.57 + * 6. [GC mark slice, which runs entirely in drainMarkStack]
    1.58 + * 7. ... more JS work ...
    1.59 + * 8. [GC marking finishes; sweeping done non-incrementally; GC is done]
    1.60 + * 9. ... JS continues uninterrupted now that GC is finishes ...
    1.61 + *
    1.62 + * Of course, there may be a different number of slices depending on how much
    1.63 + * marking is to be done.
    1.64 + *
    1.65 + * The danger inherent in this scheme is that the JS code in steps 3, 5, and 7
    1.66 + * might change the heap in a way that causes the GC to collect an object that
    1.67 + * is actually reachable. The write barrier prevents this from happening. We use
    1.68 + * a variant of incremental GC called "snapshot at the beginning." This approach
    1.69 + * guarantees the invariant that if an object is reachable in step 2, then we
    1.70 + * will mark it eventually. The name comes from the idea that we take a
    1.71 + * theoretical "snapshot" of all reachable objects in step 2; all objects in
    1.72 + * that snapshot should eventually be marked. (Note that the write barrier
    1.73 + * verifier code takes an actual snapshot.)
    1.74 + *
    1.75 + * The basic correctness invariant of a snapshot-at-the-beginning collector is
    1.76 + * that any object reachable at the end of the GC (step 9) must either:
    1.77 + *   (1) have been reachable at the beginning (step 2) and thus in the snapshot
    1.78 + *   (2) or must have been newly allocated, in steps 3, 5, or 7.
    1.79 + * To deal with case (2), any objects allocated during an incremental GC are
    1.80 + * automatically marked black.
    1.81 + *
    1.82 + * This strategy is actually somewhat conservative: if an object becomes
    1.83 + * unreachable between steps 2 and 8, it would be safe to collect it. We won't,
    1.84 + * mainly for simplicity. (Also, note that the snapshot is entirely
    1.85 + * theoretical. We don't actually do anything special in step 2 that we wouldn't
    1.86 + * do in a non-incremental GC.
    1.87 + *
    1.88 + * It's the pre-barrier's job to maintain the snapshot invariant. Consider the
    1.89 + * write "obj->field = value". Let the prior value of obj->field be
    1.90 + * value0. Since it's possible that value0 may have been what obj->field
    1.91 + * contained in step 2, when the snapshot was taken, the barrier marks
    1.92 + * value0. Note that it only does this if we're in the middle of an incremental
    1.93 + * GC. Since this is rare, the cost of the write barrier is usually just an
    1.94 + * extra branch.
    1.95 + *
    1.96 + * In practice, we implement the pre-barrier differently based on the type of
    1.97 + * value0. E.g., see JSObject::writeBarrierPre, which is used if obj->field is
    1.98 + * a JSObject*. It takes value0 as a parameter.
    1.99 + *
   1.100 + *                                POST-BARRIER
   1.101 + *
   1.102 + * For generational GC, we want to be able to quickly collect the nursery in a
   1.103 + * minor collection.  Part of the way this is achieved is to only mark the
   1.104 + * nursery itself; tenured things, which may form the majority of the heap, are
   1.105 + * not traced through or marked.  This leads to the problem of what to do about
   1.106 + * tenured objects that have pointers into the nursery: if such things are not
   1.107 + * marked, they may be discarded while there are still live objects which
   1.108 + * reference them. The solution is to maintain information about these pointers,
   1.109 + * and mark their targets when we start a minor collection.
   1.110 + *
   1.111 + * The pointers can be thoughs of as edges in object graph, and the set of edges
   1.112 + * from the tenured generation into the nursery is know as the remembered set.
   1.113 + * Post barriers are used to track this remembered set.
   1.114 + *
   1.115 + * Whenever a slot which could contain such a pointer is written, we use a write
   1.116 + * barrier to check if the edge created is in the remembered set, and if so we
   1.117 + * insert it into the store buffer, which is the collector's representation of
   1.118 + * the remembered set.  This means than when we come to do a minor collection we
   1.119 + * can examine the contents of the store buffer and mark any edge targets that
   1.120 + * are in the nursery.
   1.121 + *
   1.122 + *                            IMPLEMENTATION DETAILS
   1.123 + *
   1.124 + * Since it would be awkward to change every write to memory into a function
   1.125 + * call, this file contains a bunch of C++ classes and templates that use
   1.126 + * operator overloading to take care of barriers automatically. In many cases,
   1.127 + * all that's necessary to make some field be barriered is to replace
   1.128 + *     Type *field;
   1.129 + * with
   1.130 + *     HeapPtr<Type> field;
   1.131 + * There are also special classes HeapValue and HeapId, which barrier js::Value
   1.132 + * and jsid, respectively.
   1.133 + *
   1.134 + * One additional note: not all object writes need to be barriered. Writes to
   1.135 + * newly allocated objects do not need a pre-barrier.  In these cases, we use
   1.136 + * the "obj->field.init(value)" method instead of "obj->field = value". We use
   1.137 + * the init naming idiom in many places to signify that a field is being
   1.138 + * assigned for the first time.
   1.139 + *
   1.140 + * For each of pointers, Values and jsids this file implements four classes,
   1.141 + * illustrated here for the pointer (Ptr) classes:
   1.142 + *
   1.143 + * BarrieredPtr           abstract base class which provides common operations
   1.144 + *  |  |  |
   1.145 + *  |  | EncapsulatedPtr  provides pre-barriers only
   1.146 + *  |  |
   1.147 + *  | HeapPtr             provides pre- and post-barriers
   1.148 + *  |
   1.149 + * RelocatablePtr         provides pre- and post-barriers and is relocatable
   1.150 + *
   1.151 + * These classes are designed to be used by the internals of the JS engine.
   1.152 + * Barriers designed to be used externally are provided in
   1.153 + * js/public/RootingAPI.h.
   1.154 + */
   1.155 +
   1.156 +namespace js {
   1.157 +
   1.158 +class PropertyName;
   1.159 +
   1.160 +#ifdef DEBUG
   1.161 +bool
   1.162 +RuntimeFromMainThreadIsHeapMajorCollecting(JS::shadow::Zone *shadowZone);
   1.163 +#endif
   1.164 +
   1.165 +namespace gc {
   1.166 +
   1.167 +template <typename T>
   1.168 +void
   1.169 +MarkUnbarriered(JSTracer *trc, T **thingp, const char *name);
   1.170 +
   1.171 +// Direct value access used by the write barriers and the jits.
   1.172 +void
   1.173 +MarkValueUnbarriered(JSTracer *trc, Value *v, const char *name);
   1.174 +
   1.175 +// These two declarations are also present in gc/Marking.h, via the DeclMarker
   1.176 +// macro.  Not great, but hard to avoid.
   1.177 +void
   1.178 +MarkObjectUnbarriered(JSTracer *trc, JSObject **obj, const char *name);
   1.179 +void
   1.180 +MarkStringUnbarriered(JSTracer *trc, JSString **str, const char *name);
   1.181 +
   1.182 +// Note that some subclasses (e.g. ObjectImpl) specialize some of these
   1.183 +// methods.
   1.184 +template <typename T>
   1.185 +class BarrieredCell : public gc::Cell
   1.186 +{
   1.187 +  public:
   1.188 +    MOZ_ALWAYS_INLINE JS::Zone *zone() const { return tenuredZone(); }
   1.189 +    MOZ_ALWAYS_INLINE JS::shadow::Zone *shadowZone() const { return JS::shadow::Zone::asShadowZone(zone()); }
   1.190 +    MOZ_ALWAYS_INLINE JS::Zone *zoneFromAnyThread() const { return tenuredZoneFromAnyThread(); }
   1.191 +    MOZ_ALWAYS_INLINE JS::shadow::Zone *shadowZoneFromAnyThread() const {
   1.192 +        return JS::shadow::Zone::asShadowZone(zoneFromAnyThread());
   1.193 +    }
   1.194 +
   1.195 +    static MOZ_ALWAYS_INLINE void readBarrier(T *thing) {
   1.196 +#ifdef JSGC_INCREMENTAL
   1.197 +        JS::shadow::Zone *shadowZone = thing->shadowZoneFromAnyThread();
   1.198 +        if (shadowZone->needsBarrier()) {
   1.199 +            MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone));
   1.200 +            T *tmp = thing;
   1.201 +            js::gc::MarkUnbarriered<T>(shadowZone->barrierTracer(), &tmp, "read barrier");
   1.202 +            JS_ASSERT(tmp == thing);
   1.203 +        }
   1.204 +#endif
   1.205 +    }
   1.206 +
   1.207 +    static MOZ_ALWAYS_INLINE bool needWriteBarrierPre(JS::Zone *zone) {
   1.208 +#ifdef JSGC_INCREMENTAL
   1.209 +        return JS::shadow::Zone::asShadowZone(zone)->needsBarrier();
   1.210 +#else
   1.211 +        return false;
   1.212 +#endif
   1.213 +    }
   1.214 +
   1.215 +    static MOZ_ALWAYS_INLINE bool isNullLike(T *thing) { return !thing; }
   1.216 +
   1.217 +    static MOZ_ALWAYS_INLINE void writeBarrierPre(T *thing) {
   1.218 +#ifdef JSGC_INCREMENTAL
   1.219 +        if (isNullLike(thing) || !thing->shadowRuntimeFromAnyThread()->needsBarrier())
   1.220 +            return;
   1.221 +
   1.222 +        JS::shadow::Zone *shadowZone = thing->shadowZoneFromAnyThread();
   1.223 +        if (shadowZone->needsBarrier()) {
   1.224 +            MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone));
   1.225 +            T *tmp = thing;
   1.226 +            js::gc::MarkUnbarriered<T>(shadowZone->barrierTracer(), &tmp, "write barrier");
   1.227 +            JS_ASSERT(tmp == thing);
   1.228 +        }
   1.229 +#endif
   1.230 +    }
   1.231 +
   1.232 +    static void writeBarrierPost(T *thing, void *addr) {}
   1.233 +    static void writeBarrierPostRelocate(T *thing, void *addr) {}
   1.234 +    static void writeBarrierPostRemove(T *thing, void *addr) {}
   1.235 +};
   1.236 +
   1.237 +} // namespace gc
   1.238 +
   1.239 +// Note: the following Zone-getting functions must be equivalent to the zone()
   1.240 +// and shadowZone() functions implemented by the subclasses of BarrieredCell.
   1.241 +
   1.242 +JS::Zone *
   1.243 +ZoneOfObject(const JSObject &obj);
   1.244 +
   1.245 +static inline JS::shadow::Zone *
   1.246 +ShadowZoneOfObject(JSObject *obj)
   1.247 +{
   1.248 +    return JS::shadow::Zone::asShadowZone(ZoneOfObject(*obj));
   1.249 +}
   1.250 +
   1.251 +static inline JS::shadow::Zone *
   1.252 +ShadowZoneOfString(JSString *str)
   1.253 +{
   1.254 +    return JS::shadow::Zone::asShadowZone(reinterpret_cast<const js::gc::Cell *>(str)->tenuredZone());
   1.255 +}
   1.256 +
   1.257 +MOZ_ALWAYS_INLINE JS::Zone *
   1.258 +ZoneOfValue(const JS::Value &value)
   1.259 +{
   1.260 +    JS_ASSERT(value.isMarkable());
   1.261 +    if (value.isObject())
   1.262 +        return ZoneOfObject(value.toObject());
   1.263 +    return static_cast<js::gc::Cell *>(value.toGCThing())->tenuredZone();
   1.264 +}
   1.265 +
   1.266 +JS::Zone *
   1.267 +ZoneOfObjectFromAnyThread(const JSObject &obj);
   1.268 +
   1.269 +static inline JS::shadow::Zone *
   1.270 +ShadowZoneOfObjectFromAnyThread(JSObject *obj)
   1.271 +{
   1.272 +    return JS::shadow::Zone::asShadowZone(ZoneOfObjectFromAnyThread(*obj));
   1.273 +}
   1.274 +
   1.275 +static inline JS::shadow::Zone *
   1.276 +ShadowZoneOfStringFromAnyThread(JSString *str)
   1.277 +{
   1.278 +    return JS::shadow::Zone::asShadowZone(
   1.279 +        reinterpret_cast<const js::gc::Cell *>(str)->tenuredZoneFromAnyThread());
   1.280 +}
   1.281 +
   1.282 +MOZ_ALWAYS_INLINE JS::Zone *
   1.283 +ZoneOfValueFromAnyThread(const JS::Value &value)
   1.284 +{
   1.285 +    JS_ASSERT(value.isMarkable());
   1.286 +    if (value.isObject())
   1.287 +        return ZoneOfObjectFromAnyThread(value.toObject());
   1.288 +    return static_cast<js::gc::Cell *>(value.toGCThing())->tenuredZoneFromAnyThread();
   1.289 +}
   1.290 +
   1.291 +/*
   1.292 + * Base class for barriered pointer types.
   1.293 + */
   1.294 +template <class T, typename Unioned = uintptr_t>
   1.295 +class BarrieredPtr
   1.296 +{
   1.297 +  protected:
   1.298 +    union {
   1.299 +        T *value;
   1.300 +        Unioned other;
   1.301 +    };
   1.302 +
   1.303 +    BarrieredPtr(T *v) : value(v) {}
   1.304 +    ~BarrieredPtr() { pre(); }
   1.305 +
   1.306 +  public:
   1.307 +    void init(T *v) {
   1.308 +        JS_ASSERT(!IsPoisonedPtr<T>(v));
   1.309 +        this->value = v;
   1.310 +    }
   1.311 +
   1.312 +    /* Use this if the automatic coercion to T* isn't working. */
   1.313 +    T *get() const { return value; }
   1.314 +
   1.315 +    /*
   1.316 +     * Use these if you want to change the value without invoking the barrier.
   1.317 +     * Obviously this is dangerous unless you know the barrier is not needed.
   1.318 +     */
   1.319 +    T **unsafeGet() { return &value; }
   1.320 +    void unsafeSet(T *v) { value = v; }
   1.321 +
   1.322 +    Unioned *unsafeGetUnioned() { return &other; }
   1.323 +
   1.324 +    T &operator*() const { return *value; }
   1.325 +    T *operator->() const { return value; }
   1.326 +
   1.327 +    operator T*() const { return value; }
   1.328 +
   1.329 +  protected:
   1.330 +    void pre() { T::writeBarrierPre(value); }
   1.331 +};
   1.332 +
   1.333 +/*
   1.334 + * EncapsulatedPtr only automatically handles pre-barriers. Post-barriers must
   1.335 + * be manually implemented when using this class. HeapPtr and RelocatablePtr
   1.336 + * should be used in all cases that do not require explicit low-level control
   1.337 + * of moving behavior, e.g. for HashMap keys.
   1.338 + */
   1.339 +template <class T, typename Unioned = uintptr_t>
   1.340 +class EncapsulatedPtr : public BarrieredPtr<T, Unioned>
   1.341 +{
   1.342 +  public:
   1.343 +    EncapsulatedPtr() : BarrieredPtr<T, Unioned>(nullptr) {}
   1.344 +    EncapsulatedPtr(T *v) : BarrieredPtr<T, Unioned>(v) {}
   1.345 +    explicit EncapsulatedPtr(const EncapsulatedPtr<T, Unioned> &v)
   1.346 +      : BarrieredPtr<T, Unioned>(v.value) {}
   1.347 +
   1.348 +    /* Use to set the pointer to nullptr. */
   1.349 +    void clear() {
   1.350 +        this->pre();
   1.351 +        this->value = nullptr;
   1.352 +    }
   1.353 +
   1.354 +    EncapsulatedPtr<T, Unioned> &operator=(T *v) {
   1.355 +        this->pre();
   1.356 +        JS_ASSERT(!IsPoisonedPtr<T>(v));
   1.357 +        this->value = v;
   1.358 +        return *this;
   1.359 +    }
   1.360 +
   1.361 +    EncapsulatedPtr<T, Unioned> &operator=(const EncapsulatedPtr<T> &v) {
   1.362 +        this->pre();
   1.363 +        JS_ASSERT(!IsPoisonedPtr<T>(v.value));
   1.364 +        this->value = v.value;
   1.365 +        return *this;
   1.366 +    }
   1.367 +};
   1.368 +
   1.369 +/*
   1.370 + * A pre- and post-barriered heap pointer, for use inside the JS engine.
   1.371 + *
   1.372 + * Not to be confused with JS::Heap<T>. This is a different class from the
   1.373 + * external interface and implements substantially different semantics.
   1.374 + *
   1.375 + * The post-barriers implemented by this class are faster than those
   1.376 + * implemented by RelocatablePtr<T> or JS::Heap<T> at the cost of not
   1.377 + * automatically handling deletion or movement. It should generally only be
   1.378 + * stored in memory that has GC lifetime. HeapPtr must not be used in contexts
   1.379 + * where it may be implicitly moved or deleted, e.g. most containers.
   1.380 + */
   1.381 +template <class T, class Unioned = uintptr_t>
   1.382 +class HeapPtr : public BarrieredPtr<T, Unioned>
   1.383 +{
   1.384 +  public:
   1.385 +    HeapPtr() : BarrieredPtr<T, Unioned>(nullptr) {}
   1.386 +    explicit HeapPtr(T *v) : BarrieredPtr<T, Unioned>(v) { post(); }
   1.387 +    explicit HeapPtr(const HeapPtr<T, Unioned> &v) : BarrieredPtr<T, Unioned>(v) { post(); }
   1.388 +
   1.389 +    void init(T *v) {
   1.390 +        JS_ASSERT(!IsPoisonedPtr<T>(v));
   1.391 +        this->value = v;
   1.392 +        post();
   1.393 +    }
   1.394 +
   1.395 +    HeapPtr<T, Unioned> &operator=(T *v) {
   1.396 +        this->pre();
   1.397 +        JS_ASSERT(!IsPoisonedPtr<T>(v));
   1.398 +        this->value = v;
   1.399 +        post();
   1.400 +        return *this;
   1.401 +    }
   1.402 +
   1.403 +    HeapPtr<T, Unioned> &operator=(const HeapPtr<T, Unioned> &v) {
   1.404 +        this->pre();
   1.405 +        JS_ASSERT(!IsPoisonedPtr<T>(v.value));
   1.406 +        this->value = v.value;
   1.407 +        post();
   1.408 +        return *this;
   1.409 +    }
   1.410 +
   1.411 +  protected:
   1.412 +    void post() { T::writeBarrierPost(this->value, (void *)&this->value); }
   1.413 +
   1.414 +    /* Make this friend so it can access pre() and post(). */
   1.415 +    template <class T1, class T2>
   1.416 +    friend inline void
   1.417 +    BarrieredSetPair(Zone *zone,
   1.418 +                     HeapPtr<T1> &v1, T1 *val1,
   1.419 +                     HeapPtr<T2> &v2, T2 *val2);
   1.420 +
   1.421 +  private:
   1.422 +    /*
   1.423 +     * Unlike RelocatablePtr<T>, HeapPtr<T> must be managed with GC lifetimes.
   1.424 +     * Specifically, the memory used by the pointer itself must be live until
   1.425 +     * at least the next minor GC. For that reason, move semantics are invalid
   1.426 +     * and are deleted here. Please note that not all containers support move
   1.427 +     * semantics, so this does not completely prevent invalid uses.
   1.428 +     */
   1.429 +    HeapPtr(HeapPtr<T> &&) MOZ_DELETE;
   1.430 +    HeapPtr<T, Unioned> &operator=(HeapPtr<T, Unioned> &&) MOZ_DELETE;
   1.431 +};
   1.432 +
   1.433 +/*
   1.434 + * FixedHeapPtr is designed for one very narrow case: replacing immutable raw
   1.435 + * pointers to GC-managed things, implicitly converting to a handle type for
   1.436 + * ease of use.  Pointers encapsulated by this type must:
   1.437 + *
   1.438 + *   be immutable (no incremental write barriers),
   1.439 + *   never point into the nursery (no generational write barriers), and
   1.440 + *   be traced via MarkRuntime (we use fromMarkedLocation).
   1.441 + *
   1.442 + * In short: you *really* need to know what you're doing before you use this
   1.443 + * class!
   1.444 + */
   1.445 +template <class T>
   1.446 +class FixedHeapPtr
   1.447 +{
   1.448 +    T *value;
   1.449 +
   1.450 +  public:
   1.451 +    operator T*() const { return value; }
   1.452 +    T * operator->() const { return value; }
   1.453 +
   1.454 +    operator Handle<T*>() const {
   1.455 +        return Handle<T*>::fromMarkedLocation(&value);
   1.456 +    }
   1.457 +
   1.458 +    void init(T *ptr) {
   1.459 +        value = ptr;
   1.460 +    }
   1.461 +};
   1.462 +
   1.463 +/*
   1.464 + * A pre- and post-barriered heap pointer, for use inside the JS engine.
   1.465 + *
   1.466 + * Unlike HeapPtr<T>, it can be used in memory that is not managed by the GC,
   1.467 + * i.e. in C++ containers.  It is, however, somewhat slower, so should only be
   1.468 + * used in contexts where this ability is necessary.
   1.469 + */
   1.470 +template <class T>
   1.471 +class RelocatablePtr : public BarrieredPtr<T>
   1.472 +{
   1.473 +  public:
   1.474 +    RelocatablePtr() : BarrieredPtr<T>(nullptr) {}
   1.475 +    explicit RelocatablePtr(T *v) : BarrieredPtr<T>(v) {
   1.476 +        if (v)
   1.477 +            post();
   1.478 +    }
   1.479 +
   1.480 +    /*
   1.481 +     * For RelocatablePtr, move semantics are equivalent to copy semantics. In
   1.482 +     * C++, a copy constructor taking const-ref is the way to get a single
   1.483 +     * function that will be used for both lvalue and rvalue copies, so we can
   1.484 +     * simply omit the rvalue variant.
   1.485 +     */
   1.486 +    RelocatablePtr(const RelocatablePtr<T> &v) : BarrieredPtr<T>(v) {
   1.487 +        if (this->value)
   1.488 +            post();
   1.489 +    }
   1.490 +
   1.491 +    ~RelocatablePtr() {
   1.492 +        if (this->value)
   1.493 +            relocate();
   1.494 +    }
   1.495 +
   1.496 +    RelocatablePtr<T> &operator=(T *v) {
   1.497 +        this->pre();
   1.498 +        JS_ASSERT(!IsPoisonedPtr<T>(v));
   1.499 +        if (v) {
   1.500 +            this->value = v;
   1.501 +            post();
   1.502 +        } else if (this->value) {
   1.503 +            relocate();
   1.504 +            this->value = v;
   1.505 +        }
   1.506 +        return *this;
   1.507 +    }
   1.508 +
   1.509 +    RelocatablePtr<T> &operator=(const RelocatablePtr<T> &v) {
   1.510 +        this->pre();
   1.511 +        JS_ASSERT(!IsPoisonedPtr<T>(v.value));
   1.512 +        if (v.value) {
   1.513 +            this->value = v.value;
   1.514 +            post();
   1.515 +        } else if (this->value) {
   1.516 +            relocate();
   1.517 +            this->value = v;
   1.518 +        }
   1.519 +        return *this;
   1.520 +    }
   1.521 +
   1.522 +  protected:
   1.523 +    void post() {
   1.524 +#ifdef JSGC_GENERATIONAL
   1.525 +        JS_ASSERT(this->value);
   1.526 +        T::writeBarrierPostRelocate(this->value, &this->value);
   1.527 +#endif
   1.528 +    }
   1.529 +
   1.530 +    void relocate() {
   1.531 +#ifdef JSGC_GENERATIONAL
   1.532 +        JS_ASSERT(this->value);
   1.533 +        T::writeBarrierPostRemove(this->value, &this->value);
   1.534 +#endif
   1.535 +    }
   1.536 +};
   1.537 +
   1.538 +/*
   1.539 + * This is a hack for RegExpStatics::updateFromMatch. It allows us to do two
   1.540 + * barriers with only one branch to check if we're in an incremental GC.
   1.541 + */
   1.542 +template <class T1, class T2>
   1.543 +static inline void
   1.544 +BarrieredSetPair(Zone *zone,
   1.545 +                 HeapPtr<T1> &v1, T1 *val1,
   1.546 +                 HeapPtr<T2> &v2, T2 *val2)
   1.547 +{
   1.548 +    if (T1::needWriteBarrierPre(zone)) {
   1.549 +        v1.pre();
   1.550 +        v2.pre();
   1.551 +    }
   1.552 +    v1.unsafeSet(val1);
   1.553 +    v2.unsafeSet(val2);
   1.554 +    v1.post();
   1.555 +    v2.post();
   1.556 +}
   1.557 +
   1.558 +class Shape;
   1.559 +class BaseShape;
   1.560 +namespace types { struct TypeObject; }
   1.561 +
   1.562 +typedef BarrieredPtr<JSObject> BarrieredPtrObject;
   1.563 +typedef BarrieredPtr<JSScript> BarrieredPtrScript;
   1.564 +
   1.565 +typedef EncapsulatedPtr<JSObject> EncapsulatedPtrObject;
   1.566 +typedef EncapsulatedPtr<JSScript> EncapsulatedPtrScript;
   1.567 +
   1.568 +typedef RelocatablePtr<JSObject> RelocatablePtrObject;
   1.569 +typedef RelocatablePtr<JSScript> RelocatablePtrScript;
   1.570 +
   1.571 +typedef HeapPtr<JSObject> HeapPtrObject;
   1.572 +typedef HeapPtr<JSFunction> HeapPtrFunction;
   1.573 +typedef HeapPtr<JSString> HeapPtrString;
   1.574 +typedef HeapPtr<PropertyName> HeapPtrPropertyName;
   1.575 +typedef HeapPtr<JSScript> HeapPtrScript;
   1.576 +typedef HeapPtr<Shape> HeapPtrShape;
   1.577 +typedef HeapPtr<BaseShape> HeapPtrBaseShape;
   1.578 +typedef HeapPtr<types::TypeObject> HeapPtrTypeObject;
   1.579 +
   1.580 +/* Useful for hashtables with a HeapPtr as key. */
   1.581 +
   1.582 +template <class T>
   1.583 +struct HeapPtrHasher
   1.584 +{
   1.585 +    typedef HeapPtr<T> Key;
   1.586 +    typedef T *Lookup;
   1.587 +
   1.588 +    static HashNumber hash(Lookup obj) { return DefaultHasher<T *>::hash(obj); }
   1.589 +    static bool match(const Key &k, Lookup l) { return k.get() == l; }
   1.590 +    static void rekey(Key &k, const Key& newKey) { k.unsafeSet(newKey); }
   1.591 +};
   1.592 +
   1.593 +/* Specialized hashing policy for HeapPtrs. */
   1.594 +template <class T>
   1.595 +struct DefaultHasher< HeapPtr<T> > : HeapPtrHasher<T> { };
   1.596 +
   1.597 +template <class T>
   1.598 +struct EncapsulatedPtrHasher
   1.599 +{
   1.600 +    typedef EncapsulatedPtr<T> Key;
   1.601 +    typedef T *Lookup;
   1.602 +
   1.603 +    static HashNumber hash(Lookup obj) { return DefaultHasher<T *>::hash(obj); }
   1.604 +    static bool match(const Key &k, Lookup l) { return k.get() == l; }
   1.605 +    static void rekey(Key &k, const Key& newKey) { k.unsafeSet(newKey); }
   1.606 +};
   1.607 +
   1.608 +template <class T>
   1.609 +struct DefaultHasher< EncapsulatedPtr<T> > : EncapsulatedPtrHasher<T> { };
   1.610 +
   1.611 +bool
   1.612 +StringIsPermanentAtom(JSString *str);
   1.613 +
   1.614 +/*
   1.615 + * Base class for barriered value types.
   1.616 + */
   1.617 +class BarrieredValue : public ValueOperations<BarrieredValue>
   1.618 +{
   1.619 +  protected:
   1.620 +    Value value;
   1.621 +
   1.622 +    /*
   1.623 +     * Ensure that EncapsulatedValue is not constructable, except by our
   1.624 +     * implementations.
   1.625 +     */
   1.626 +    BarrieredValue() MOZ_DELETE;
   1.627 +
   1.628 +    BarrieredValue(const Value &v) : value(v) {
   1.629 +        JS_ASSERT(!IsPoisonedValue(v));
   1.630 +    }
   1.631 +
   1.632 +    ~BarrieredValue() {
   1.633 +        pre();
   1.634 +    }
   1.635 +
   1.636 +  public:
   1.637 +    void init(const Value &v) {
   1.638 +        JS_ASSERT(!IsPoisonedValue(v));
   1.639 +        value = v;
   1.640 +    }
   1.641 +    void init(JSRuntime *rt, const Value &v) {
   1.642 +        JS_ASSERT(!IsPoisonedValue(v));
   1.643 +        value = v;
   1.644 +    }
   1.645 +
   1.646 +    bool operator==(const BarrieredValue &v) const { return value == v.value; }
   1.647 +    bool operator!=(const BarrieredValue &v) const { return value != v.value; }
   1.648 +
   1.649 +    const Value &get() const { return value; }
   1.650 +    Value *unsafeGet() { return &value; }
   1.651 +    operator const Value &() const { return value; }
   1.652 +
   1.653 +    JSGCTraceKind gcKind() const { return value.gcKind(); }
   1.654 +
   1.655 +    uint64_t asRawBits() const { return value.asRawBits(); }
   1.656 +
   1.657 +    static void writeBarrierPre(const Value &v) {
   1.658 +#ifdef JSGC_INCREMENTAL
   1.659 +        if (v.isMarkable() && shadowRuntimeFromAnyThread(v)->needsBarrier())
   1.660 +            writeBarrierPre(ZoneOfValueFromAnyThread(v), v);
   1.661 +#endif
   1.662 +    }
   1.663 +
   1.664 +    static void writeBarrierPre(Zone *zone, const Value &v) {
   1.665 +#ifdef JSGC_INCREMENTAL
   1.666 +        if (v.isString() && StringIsPermanentAtom(v.toString()))
   1.667 +            return;
   1.668 +        JS::shadow::Zone *shadowZone = JS::shadow::Zone::asShadowZone(zone);
   1.669 +        if (shadowZone->needsBarrier()) {
   1.670 +            JS_ASSERT_IF(v.isMarkable(), shadowRuntimeFromMainThread(v)->needsBarrier());
   1.671 +            Value tmp(v);
   1.672 +            js::gc::MarkValueUnbarriered(shadowZone->barrierTracer(), &tmp, "write barrier");
   1.673 +            JS_ASSERT(tmp == v);
   1.674 +        }
   1.675 +#endif
   1.676 +    }
   1.677 +
   1.678 +  protected:
   1.679 +    void pre() { writeBarrierPre(value); }
   1.680 +    void pre(Zone *zone) { writeBarrierPre(zone, value); }
   1.681 +
   1.682 +    static JSRuntime *runtimeFromMainThread(const Value &v) {
   1.683 +        JS_ASSERT(v.isMarkable());
   1.684 +        return static_cast<js::gc::Cell *>(v.toGCThing())->runtimeFromMainThread();
   1.685 +    }
   1.686 +    static JSRuntime *runtimeFromAnyThread(const Value &v) {
   1.687 +        JS_ASSERT(v.isMarkable());
   1.688 +        return static_cast<js::gc::Cell *>(v.toGCThing())->runtimeFromAnyThread();
   1.689 +    }
   1.690 +    static JS::shadow::Runtime *shadowRuntimeFromMainThread(const Value &v) {
   1.691 +        return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromMainThread(v));
   1.692 +    }
   1.693 +    static JS::shadow::Runtime *shadowRuntimeFromAnyThread(const Value &v) {
   1.694 +        return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromAnyThread(v));
   1.695 +    }
   1.696 +
   1.697 +  private:
   1.698 +    friend class ValueOperations<BarrieredValue>;
   1.699 +    const Value * extract() const { return &value; }
   1.700 +};
   1.701 +
   1.702 +// Like EncapsulatedPtr, but specialized for Value.
   1.703 +// See the comments on that class for details.
   1.704 +class EncapsulatedValue : public BarrieredValue
   1.705 +{
   1.706 +  public:
   1.707 +    EncapsulatedValue(const Value &v) : BarrieredValue(v) {}
   1.708 +    EncapsulatedValue(const EncapsulatedValue &v) : BarrieredValue(v) {}
   1.709 +
   1.710 +    EncapsulatedValue &operator=(const Value &v) {
   1.711 +        pre();
   1.712 +        JS_ASSERT(!IsPoisonedValue(v));
   1.713 +        value = v;
   1.714 +        return *this;
   1.715 +    }
   1.716 +
   1.717 +    EncapsulatedValue &operator=(const EncapsulatedValue &v) {
   1.718 +        pre();
   1.719 +        JS_ASSERT(!IsPoisonedValue(v));
   1.720 +        value = v.get();
   1.721 +        return *this;
   1.722 +    }
   1.723 +};
   1.724 +
   1.725 +// Like HeapPtr, but specialized for Value.
   1.726 +// See the comments on that class for details.
   1.727 +class HeapValue : public BarrieredValue
   1.728 +{
   1.729 +  public:
   1.730 +    explicit HeapValue()
   1.731 +      : BarrieredValue(UndefinedValue())
   1.732 +    {
   1.733 +        post();
   1.734 +    }
   1.735 +
   1.736 +    explicit HeapValue(const Value &v)
   1.737 +      : BarrieredValue(v)
   1.738 +    {
   1.739 +        JS_ASSERT(!IsPoisonedValue(v));
   1.740 +        post();
   1.741 +    }
   1.742 +
   1.743 +    explicit HeapValue(const HeapValue &v)
   1.744 +      : BarrieredValue(v.value)
   1.745 +    {
   1.746 +        JS_ASSERT(!IsPoisonedValue(v.value));
   1.747 +        post();
   1.748 +    }
   1.749 +
   1.750 +    ~HeapValue() {
   1.751 +        pre();
   1.752 +    }
   1.753 +
   1.754 +    void init(const Value &v) {
   1.755 +        JS_ASSERT(!IsPoisonedValue(v));
   1.756 +        value = v;
   1.757 +        post();
   1.758 +    }
   1.759 +
   1.760 +    void init(JSRuntime *rt, const Value &v) {
   1.761 +        JS_ASSERT(!IsPoisonedValue(v));
   1.762 +        value = v;
   1.763 +        post(rt);
   1.764 +    }
   1.765 +
   1.766 +    HeapValue &operator=(const Value &v) {
   1.767 +        pre();
   1.768 +        JS_ASSERT(!IsPoisonedValue(v));
   1.769 +        value = v;
   1.770 +        post();
   1.771 +        return *this;
   1.772 +    }
   1.773 +
   1.774 +    HeapValue &operator=(const HeapValue &v) {
   1.775 +        pre();
   1.776 +        JS_ASSERT(!IsPoisonedValue(v.value));
   1.777 +        value = v.value;
   1.778 +        post();
   1.779 +        return *this;
   1.780 +    }
   1.781 +
   1.782 +#ifdef DEBUG
   1.783 +    bool preconditionForSet(Zone *zone);
   1.784 +#endif
   1.785 +
   1.786 +    /*
   1.787 +     * This is a faster version of operator=. Normally, operator= has to
   1.788 +     * determine the compartment of the value before it can decide whether to do
   1.789 +     * the barrier. If you already know the compartment, it's faster to pass it
   1.790 +     * in.
   1.791 +     */
   1.792 +    void set(Zone *zone, const Value &v) {
   1.793 +        JS::shadow::Zone *shadowZone = JS::shadow::Zone::asShadowZone(zone);
   1.794 +        JS_ASSERT(preconditionForSet(zone));
   1.795 +        pre(zone);
   1.796 +        JS_ASSERT(!IsPoisonedValue(v));
   1.797 +        value = v;
   1.798 +        post(shadowZone->runtimeFromAnyThread());
   1.799 +    }
   1.800 +
   1.801 +    static void writeBarrierPost(const Value &value, Value *addr) {
   1.802 +#ifdef JSGC_GENERATIONAL
   1.803 +        if (value.isMarkable())
   1.804 +            shadowRuntimeFromAnyThread(value)->gcStoreBufferPtr()->putValue(addr);
   1.805 +#endif
   1.806 +    }
   1.807 +
   1.808 +    static void writeBarrierPost(JSRuntime *rt, const Value &value, Value *addr) {
   1.809 +#ifdef JSGC_GENERATIONAL
   1.810 +        if (value.isMarkable()) {
   1.811 +            JS::shadow::Runtime *shadowRuntime = JS::shadow::Runtime::asShadowRuntime(rt);
   1.812 +            shadowRuntime->gcStoreBufferPtr()->putValue(addr);
   1.813 +        }
   1.814 +#endif
   1.815 +    }
   1.816 +
   1.817 +  private:
   1.818 +    void post() {
   1.819 +        writeBarrierPost(value, &value);
   1.820 +    }
   1.821 +
   1.822 +    void post(JSRuntime *rt) {
   1.823 +        writeBarrierPost(rt, value, &value);
   1.824 +    }
   1.825 +
   1.826 +    HeapValue(HeapValue &&) MOZ_DELETE;
   1.827 +    HeapValue &operator=(HeapValue &&) MOZ_DELETE;
   1.828 +};
   1.829 +
   1.830 +// Like RelocatablePtr, but specialized for Value.
   1.831 +// See the comments on that class for details.
   1.832 +class RelocatableValue : public BarrieredValue
   1.833 +{
   1.834 +  public:
   1.835 +    explicit RelocatableValue() : BarrieredValue(UndefinedValue()) {}
   1.836 +
   1.837 +    explicit RelocatableValue(const Value &v)
   1.838 +      : BarrieredValue(v)
   1.839 +    {
   1.840 +        if (v.isMarkable())
   1.841 +            post();
   1.842 +    }
   1.843 +
   1.844 +    RelocatableValue(const RelocatableValue &v)
   1.845 +      : BarrieredValue(v.value)
   1.846 +    {
   1.847 +        JS_ASSERT(!IsPoisonedValue(v.value));
   1.848 +        if (v.value.isMarkable())
   1.849 +            post();
   1.850 +    }
   1.851 +
   1.852 +    ~RelocatableValue()
   1.853 +    {
   1.854 +        if (value.isMarkable())
   1.855 +            relocate(runtimeFromAnyThread(value));
   1.856 +    }
   1.857 +
   1.858 +    RelocatableValue &operator=(const Value &v) {
   1.859 +        pre();
   1.860 +        JS_ASSERT(!IsPoisonedValue(v));
   1.861 +        if (v.isMarkable()) {
   1.862 +            value = v;
   1.863 +            post();
   1.864 +        } else if (value.isMarkable()) {
   1.865 +            JSRuntime *rt = runtimeFromAnyThread(value);
   1.866 +            relocate(rt);
   1.867 +            value = v;
   1.868 +        } else {
   1.869 +            value = v;
   1.870 +        }
   1.871 +        return *this;
   1.872 +    }
   1.873 +
   1.874 +    RelocatableValue &operator=(const RelocatableValue &v) {
   1.875 +        pre();
   1.876 +        JS_ASSERT(!IsPoisonedValue(v.value));
   1.877 +        if (v.value.isMarkable()) {
   1.878 +            value = v.value;
   1.879 +            post();
   1.880 +        } else if (value.isMarkable()) {
   1.881 +            JSRuntime *rt = runtimeFromAnyThread(value);
   1.882 +            relocate(rt);
   1.883 +            value = v.value;
   1.884 +        } else {
   1.885 +            value = v.value;
   1.886 +        }
   1.887 +        return *this;
   1.888 +    }
   1.889 +
   1.890 +  private:
   1.891 +    void post() {
   1.892 +#ifdef JSGC_GENERATIONAL
   1.893 +        JS_ASSERT(value.isMarkable());
   1.894 +        shadowRuntimeFromAnyThread(value)->gcStoreBufferPtr()->putRelocatableValue(&value);
   1.895 +#endif
   1.896 +    }
   1.897 +
   1.898 +    void relocate(JSRuntime *rt) {
   1.899 +#ifdef JSGC_GENERATIONAL
   1.900 +        JS::shadow::Runtime *shadowRuntime = JS::shadow::Runtime::asShadowRuntime(rt);
   1.901 +        shadowRuntime->gcStoreBufferPtr()->removeRelocatableValue(&value);
   1.902 +#endif
   1.903 +    }
   1.904 +};
   1.905 +
   1.906 +// A pre- and post-barriered Value that is specialized to be aware that it
   1.907 +// resides in a slots or elements vector. This allows it to be relocated in
   1.908 +// memory, but with substantially less overhead than a RelocatablePtr.
   1.909 +class HeapSlot : public BarrieredValue
   1.910 +{
   1.911 +  public:
   1.912 +    enum Kind {
   1.913 +        Slot = 0,
   1.914 +        Element = 1
   1.915 +    };
   1.916 +
   1.917 +    explicit HeapSlot() MOZ_DELETE;
   1.918 +
   1.919 +    explicit HeapSlot(JSObject *obj, Kind kind, uint32_t slot, const Value &v)
   1.920 +      : BarrieredValue(v)
   1.921 +    {
   1.922 +        JS_ASSERT(!IsPoisonedValue(v));
   1.923 +        post(obj, kind, slot, v);
   1.924 +    }
   1.925 +
   1.926 +    explicit HeapSlot(JSObject *obj, Kind kind, uint32_t slot, const HeapSlot &s)
   1.927 +      : BarrieredValue(s.value)
   1.928 +    {
   1.929 +        JS_ASSERT(!IsPoisonedValue(s.value));
   1.930 +        post(obj, kind, slot, s);
   1.931 +    }
   1.932 +
   1.933 +    ~HeapSlot() {
   1.934 +        pre();
   1.935 +    }
   1.936 +
   1.937 +    void init(JSObject *owner, Kind kind, uint32_t slot, const Value &v) {
   1.938 +        value = v;
   1.939 +        post(owner, kind, slot, v);
   1.940 +    }
   1.941 +
   1.942 +    void init(JSRuntime *rt, JSObject *owner, Kind kind, uint32_t slot, const Value &v) {
   1.943 +        value = v;
   1.944 +        post(rt, owner, kind, slot, v);
   1.945 +    }
   1.946 +
   1.947 +#ifdef DEBUG
   1.948 +    bool preconditionForSet(JSObject *owner, Kind kind, uint32_t slot);
   1.949 +    bool preconditionForSet(Zone *zone, JSObject *owner, Kind kind, uint32_t slot);
   1.950 +    static void preconditionForWriteBarrierPost(JSObject *obj, Kind kind, uint32_t slot,
   1.951 +                                                Value target);
   1.952 +#endif
   1.953 +
   1.954 +    void set(JSObject *owner, Kind kind, uint32_t slot, const Value &v) {
   1.955 +        JS_ASSERT(preconditionForSet(owner, kind, slot));
   1.956 +        pre();
   1.957 +        JS_ASSERT(!IsPoisonedValue(v));
   1.958 +        value = v;
   1.959 +        post(owner, kind, slot, v);
   1.960 +    }
   1.961 +
   1.962 +    void set(Zone *zone, JSObject *owner, Kind kind, uint32_t slot, const Value &v) {
   1.963 +        JS_ASSERT(preconditionForSet(zone, owner, kind, slot));
   1.964 +        JS::shadow::Zone *shadowZone = JS::shadow::Zone::asShadowZone(zone);
   1.965 +        pre(zone);
   1.966 +        JS_ASSERT(!IsPoisonedValue(v));
   1.967 +        value = v;
   1.968 +        post(shadowZone->runtimeFromAnyThread(), owner, kind, slot, v);
   1.969 +    }
   1.970 +
   1.971 +    static void writeBarrierPost(JSObject *obj, Kind kind, uint32_t slot, Value target)
   1.972 +    {
   1.973 +#ifdef JSGC_GENERATIONAL
   1.974 +        js::gc::Cell *cell = reinterpret_cast<js::gc::Cell*>(obj);
   1.975 +        writeBarrierPost(cell->runtimeFromAnyThread(), obj, kind, slot, target);
   1.976 +#endif
   1.977 +    }
   1.978 +
   1.979 +    static void writeBarrierPost(JSRuntime *rt, JSObject *obj, Kind kind, uint32_t slot,
   1.980 +                                 Value target)
   1.981 +    {
   1.982 +#ifdef DEBUG
   1.983 +        preconditionForWriteBarrierPost(obj, kind, slot, target);
   1.984 +#endif
   1.985 +#ifdef JSGC_GENERATIONAL
   1.986 +        if (target.isObject()) {
   1.987 +            JS::shadow::Runtime *shadowRuntime = JS::shadow::Runtime::asShadowRuntime(rt);
   1.988 +            shadowRuntime->gcStoreBufferPtr()->putSlot(obj, kind, slot, 1);
   1.989 +        }
   1.990 +#endif
   1.991 +    }
   1.992 +
   1.993 +  private:
   1.994 +    void post(JSObject *owner, Kind kind, uint32_t slot, Value target) {
   1.995 +        HeapSlot::writeBarrierPost(owner, kind, slot, target);
   1.996 +    }
   1.997 +
   1.998 +    void post(JSRuntime *rt, JSObject *owner, Kind kind, uint32_t slot, Value target) {
   1.999 +        HeapSlot::writeBarrierPost(rt, owner, kind, slot, target);
  1.1000 +    }
  1.1001 +};
  1.1002 +
  1.1003 +static inline const Value *
  1.1004 +Valueify(const BarrieredValue *array)
  1.1005 +{
  1.1006 +    JS_STATIC_ASSERT(sizeof(HeapValue) == sizeof(Value));
  1.1007 +    JS_STATIC_ASSERT(sizeof(HeapSlot) == sizeof(Value));
  1.1008 +    return (const Value *)array;
  1.1009 +}
  1.1010 +
  1.1011 +static inline HeapValue *
  1.1012 +HeapValueify(Value *v)
  1.1013 +{
  1.1014 +    JS_STATIC_ASSERT(sizeof(HeapValue) == sizeof(Value));
  1.1015 +    JS_STATIC_ASSERT(sizeof(HeapSlot) == sizeof(Value));
  1.1016 +    return (HeapValue *)v;
  1.1017 +}
  1.1018 +
  1.1019 +class HeapSlotArray
  1.1020 +{
  1.1021 +    HeapSlot *array;
  1.1022 +
  1.1023 +  public:
  1.1024 +    HeapSlotArray(HeapSlot *array) : array(array) {}
  1.1025 +
  1.1026 +    operator const Value *() const { return Valueify(array); }
  1.1027 +    operator HeapSlot *() const { return array; }
  1.1028 +
  1.1029 +    HeapSlotArray operator +(int offset) const { return HeapSlotArray(array + offset); }
  1.1030 +    HeapSlotArray operator +(uint32_t offset) const { return HeapSlotArray(array + offset); }
  1.1031 +};
  1.1032 +
  1.1033 +/*
  1.1034 + * Base class for barriered jsid types.
  1.1035 + */
  1.1036 +class BarrieredId
  1.1037 +{
  1.1038 +  protected:
  1.1039 +    jsid value;
  1.1040 +
  1.1041 +  private:
  1.1042 +    BarrieredId(const BarrieredId &v) MOZ_DELETE;
  1.1043 +
  1.1044 +  protected:
  1.1045 +    explicit BarrieredId(jsid id) : value(id) {}
  1.1046 +    ~BarrieredId() { pre(); }
  1.1047 +
  1.1048 +  public:
  1.1049 +    bool operator==(jsid id) const { return value == id; }
  1.1050 +    bool operator!=(jsid id) const { return value != id; }
  1.1051 +
  1.1052 +    jsid get() const { return value; }
  1.1053 +    jsid *unsafeGet() { return &value; }
  1.1054 +    void unsafeSet(jsid newId) { value = newId; }
  1.1055 +    operator jsid() const { return value; }
  1.1056 +
  1.1057 +  protected:
  1.1058 +    void pre() {
  1.1059 +#ifdef JSGC_INCREMENTAL
  1.1060 +        if (JSID_IS_OBJECT(value)) {
  1.1061 +            JSObject *obj = JSID_TO_OBJECT(value);
  1.1062 +            JS::shadow::Zone *shadowZone = ShadowZoneOfObjectFromAnyThread(obj);
  1.1063 +            if (shadowZone->needsBarrier()) {
  1.1064 +                js::gc::MarkObjectUnbarriered(shadowZone->barrierTracer(), &obj, "write barrier");
  1.1065 +                JS_ASSERT(obj == JSID_TO_OBJECT(value));
  1.1066 +            }
  1.1067 +        } else if (JSID_IS_STRING(value)) {
  1.1068 +            JSString *str = JSID_TO_STRING(value);
  1.1069 +            JS::shadow::Zone *shadowZone = ShadowZoneOfStringFromAnyThread(str);
  1.1070 +            if (shadowZone->needsBarrier()) {
  1.1071 +                js::gc::MarkStringUnbarriered(shadowZone->barrierTracer(), &str, "write barrier");
  1.1072 +                JS_ASSERT(str == JSID_TO_STRING(value));
  1.1073 +            }
  1.1074 +        }
  1.1075 +#endif
  1.1076 +    }
  1.1077 +};
  1.1078 +
  1.1079 +// Like EncapsulatedPtr, but specialized for jsid.
  1.1080 +// See the comments on that class for details.
  1.1081 +class EncapsulatedId : public BarrieredId
  1.1082 +{
  1.1083 +  public:
  1.1084 +    explicit EncapsulatedId(jsid id) : BarrieredId(id) {}
  1.1085 +    explicit EncapsulatedId() : BarrieredId(JSID_VOID) {}
  1.1086 +
  1.1087 +    EncapsulatedId &operator=(const EncapsulatedId &v) {
  1.1088 +        if (v.value != value)
  1.1089 +            pre();
  1.1090 +        JS_ASSERT(!IsPoisonedId(v.value));
  1.1091 +        value = v.value;
  1.1092 +        return *this;
  1.1093 +    }
  1.1094 +};
  1.1095 +
  1.1096 +// Like RelocatablePtr, but specialized for jsid.
  1.1097 +// See the comments on that class for details.
  1.1098 +class RelocatableId : public BarrieredId
  1.1099 +{
  1.1100 +  public:
  1.1101 +    explicit RelocatableId() : BarrieredId(JSID_VOID) {}
  1.1102 +    explicit inline RelocatableId(jsid id) : BarrieredId(id) {}
  1.1103 +    ~RelocatableId() { pre(); }
  1.1104 +
  1.1105 +    bool operator==(jsid id) const { return value == id; }
  1.1106 +    bool operator!=(jsid id) const { return value != id; }
  1.1107 +
  1.1108 +    jsid get() const { return value; }
  1.1109 +    operator jsid() const { return value; }
  1.1110 +
  1.1111 +    jsid *unsafeGet() { return &value; }
  1.1112 +
  1.1113 +    RelocatableId &operator=(jsid id) {
  1.1114 +        if (id != value)
  1.1115 +            pre();
  1.1116 +        JS_ASSERT(!IsPoisonedId(id));
  1.1117 +        value = id;
  1.1118 +        return *this;
  1.1119 +    }
  1.1120 +
  1.1121 +    RelocatableId &operator=(const RelocatableId &v) {
  1.1122 +        if (v.value != value)
  1.1123 +            pre();
  1.1124 +        JS_ASSERT(!IsPoisonedId(v.value));
  1.1125 +        value = v.value;
  1.1126 +        return *this;
  1.1127 +    }
  1.1128 +};
  1.1129 +
  1.1130 +// Like HeapPtr, but specialized for jsid.
  1.1131 +// See the comments on that class for details.
  1.1132 +class HeapId : public BarrieredId
  1.1133 +{
  1.1134 +  public:
  1.1135 +    explicit HeapId() : BarrieredId(JSID_VOID) {}
  1.1136 +
  1.1137 +    explicit HeapId(jsid id)
  1.1138 +      : BarrieredId(id)
  1.1139 +    {
  1.1140 +        JS_ASSERT(!IsPoisonedId(id));
  1.1141 +        post();
  1.1142 +    }
  1.1143 +
  1.1144 +    ~HeapId() { pre(); }
  1.1145 +
  1.1146 +    void init(jsid id) {
  1.1147 +        JS_ASSERT(!IsPoisonedId(id));
  1.1148 +        value = id;
  1.1149 +        post();
  1.1150 +    }
  1.1151 +
  1.1152 +    HeapId &operator=(jsid id) {
  1.1153 +        if (id != value)
  1.1154 +            pre();
  1.1155 +        JS_ASSERT(!IsPoisonedId(id));
  1.1156 +        value = id;
  1.1157 +        post();
  1.1158 +        return *this;
  1.1159 +    }
  1.1160 +
  1.1161 +    HeapId &operator=(const HeapId &v) {
  1.1162 +        if (v.value != value)
  1.1163 +            pre();
  1.1164 +        JS_ASSERT(!IsPoisonedId(v.value));
  1.1165 +        value = v.value;
  1.1166 +        post();
  1.1167 +        return *this;
  1.1168 +    }
  1.1169 +
  1.1170 +  private:
  1.1171 +    void post() {};
  1.1172 +
  1.1173 +    HeapId(const HeapId &v) MOZ_DELETE;
  1.1174 +
  1.1175 +    HeapId(HeapId &&) MOZ_DELETE;
  1.1176 +    HeapId &operator=(HeapId &&) MOZ_DELETE;
  1.1177 +};
  1.1178 +
  1.1179 +/*
  1.1180 + * Incremental GC requires that weak pointers have read barriers. This is mostly
  1.1181 + * an issue for empty shapes stored in JSCompartment. The problem happens when,
  1.1182 + * during an incremental GC, some JS code stores one of the compartment's empty
  1.1183 + * shapes into an object already marked black. Normally, this would not be a
  1.1184 + * problem, because the empty shape would have been part of the initial snapshot
  1.1185 + * when the GC started. However, since this is a weak pointer, it isn't. So we
  1.1186 + * may collect the empty shape even though a live object points to it. To fix
  1.1187 + * this, we mark these empty shapes black whenever they get read out.
  1.1188 + */
  1.1189 +template <class T>
  1.1190 +class ReadBarriered
  1.1191 +{
  1.1192 +    T *value;
  1.1193 +
  1.1194 +  public:
  1.1195 +    ReadBarriered() : value(nullptr) {}
  1.1196 +    ReadBarriered(T *value) : value(value) {}
  1.1197 +    ReadBarriered(const Rooted<T*> &rooted) : value(rooted) {}
  1.1198 +
  1.1199 +    T *get() const {
  1.1200 +        if (!value)
  1.1201 +            return nullptr;
  1.1202 +        T::readBarrier(value);
  1.1203 +        return value;
  1.1204 +    }
  1.1205 +
  1.1206 +    operator T*() const { return get(); }
  1.1207 +
  1.1208 +    T &operator*() const { return *get(); }
  1.1209 +    T *operator->() const { return get(); }
  1.1210 +
  1.1211 +    T **unsafeGet() { return &value; }
  1.1212 +    T * const * unsafeGet() const { return &value; }
  1.1213 +
  1.1214 +    void set(T *v) { value = v; }
  1.1215 +
  1.1216 +    operator bool() { return !!value; }
  1.1217 +};
  1.1218 +
  1.1219 +class ReadBarrieredValue
  1.1220 +{
  1.1221 +    Value value;
  1.1222 +
  1.1223 +  public:
  1.1224 +    ReadBarrieredValue() : value(UndefinedValue()) {}
  1.1225 +    ReadBarrieredValue(const Value &value) : value(value) {}
  1.1226 +
  1.1227 +    inline const Value &get() const;
  1.1228 +    Value *unsafeGet() { return &value; }
  1.1229 +    inline operator const Value &() const;
  1.1230 +
  1.1231 +    inline JSObject &toObject() const;
  1.1232 +};
  1.1233 +
  1.1234 +/*
  1.1235 + * Operations on a Heap thing inside the GC need to strip the barriers from
  1.1236 + * pointer operations. This template helps do that in contexts where the type
  1.1237 + * is templatized.
  1.1238 + */
  1.1239 +template <typename T> struct Unbarriered {};
  1.1240 +template <typename S> struct Unbarriered< EncapsulatedPtr<S> > { typedef S *type; };
  1.1241 +template <typename S> struct Unbarriered< RelocatablePtr<S> > { typedef S *type; };
  1.1242 +template <> struct Unbarriered<EncapsulatedValue> { typedef Value type; };
  1.1243 +template <> struct Unbarriered<RelocatableValue> { typedef Value type; };
  1.1244 +template <typename S> struct Unbarriered< DefaultHasher< EncapsulatedPtr<S> > > {
  1.1245 +    typedef DefaultHasher<S *> type;
  1.1246 +};
  1.1247 +
  1.1248 +} /* namespace js */
  1.1249 +
  1.1250 +#endif /* gc_Barrier_h */

mercurial