michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef gc_Barrier_h michael@0: #define gc_Barrier_h michael@0: michael@0: #include "NamespaceImports.h" michael@0: michael@0: #include "gc/Heap.h" michael@0: #ifdef JSGC_GENERATIONAL michael@0: # include "gc/StoreBuffer.h" michael@0: #endif michael@0: #include "js/HashTable.h" michael@0: #include "js/Id.h" michael@0: #include "js/RootingAPI.h" michael@0: michael@0: /* michael@0: * A write barrier is a mechanism used by incremental or generation GCs to michael@0: * ensure that every value that needs to be marked is marked. In general, the michael@0: * write barrier should be invoked whenever a write can cause the set of things michael@0: * traced through by the GC to change. This includes: michael@0: * - writes to object properties michael@0: * - writes to array slots michael@0: * - writes to fields like JSObject::shape_ that we trace through michael@0: * - writes to fields in private data, like JSGenerator::obj michael@0: * - writes to non-markable fields like JSObject::private that point to michael@0: * markable data michael@0: * The last category is the trickiest. Even though the private pointers does not michael@0: * point to a GC thing, changing the private pointer may change the set of michael@0: * objects that are traced by the GC. Therefore it needs a write barrier. michael@0: * michael@0: * Every barriered write should have the following form: michael@0: * michael@0: * obj->field = value; // do the actual write michael@0: * michael@0: * The pre-barrier is used for incremental GC and the post-barrier is for michael@0: * generational GC. michael@0: * michael@0: * PRE-BARRIER michael@0: * michael@0: * To understand the pre-barrier, let's consider how incremental GC works. The michael@0: * GC itself is divided into "slices". Between each slice, JS code is allowed to michael@0: * run. Each slice should be short so that the user doesn't notice the michael@0: * interruptions. In our GC, the structure of the slices is as follows: michael@0: * michael@0: * 1. ... JS work, which leads to a request to do GC ... michael@0: * 2. [first GC slice, which performs all root marking and possibly more marking] michael@0: * 3. ... more JS work is allowed to run ... michael@0: * 4. [GC mark slice, which runs entirely in drainMarkStack] michael@0: * 5. ... more JS work ... michael@0: * 6. [GC mark slice, which runs entirely in drainMarkStack] michael@0: * 7. ... more JS work ... michael@0: * 8. [GC marking finishes; sweeping done non-incrementally; GC is done] michael@0: * 9. ... JS continues uninterrupted now that GC is finishes ... michael@0: * michael@0: * Of course, there may be a different number of slices depending on how much michael@0: * marking is to be done. michael@0: * michael@0: * The danger inherent in this scheme is that the JS code in steps 3, 5, and 7 michael@0: * might change the heap in a way that causes the GC to collect an object that michael@0: * is actually reachable. The write barrier prevents this from happening. We use michael@0: * a variant of incremental GC called "snapshot at the beginning." This approach michael@0: * guarantees the invariant that if an object is reachable in step 2, then we michael@0: * will mark it eventually. The name comes from the idea that we take a michael@0: * theoretical "snapshot" of all reachable objects in step 2; all objects in michael@0: * that snapshot should eventually be marked. (Note that the write barrier michael@0: * verifier code takes an actual snapshot.) michael@0: * michael@0: * The basic correctness invariant of a snapshot-at-the-beginning collector is michael@0: * that any object reachable at the end of the GC (step 9) must either: michael@0: * (1) have been reachable at the beginning (step 2) and thus in the snapshot michael@0: * (2) or must have been newly allocated, in steps 3, 5, or 7. michael@0: * To deal with case (2), any objects allocated during an incremental GC are michael@0: * automatically marked black. michael@0: * michael@0: * This strategy is actually somewhat conservative: if an object becomes michael@0: * unreachable between steps 2 and 8, it would be safe to collect it. We won't, michael@0: * mainly for simplicity. (Also, note that the snapshot is entirely michael@0: * theoretical. We don't actually do anything special in step 2 that we wouldn't michael@0: * do in a non-incremental GC. michael@0: * michael@0: * It's the pre-barrier's job to maintain the snapshot invariant. Consider the michael@0: * write "obj->field = value". Let the prior value of obj->field be michael@0: * value0. Since it's possible that value0 may have been what obj->field michael@0: * contained in step 2, when the snapshot was taken, the barrier marks michael@0: * value0. Note that it only does this if we're in the middle of an incremental michael@0: * GC. Since this is rare, the cost of the write barrier is usually just an michael@0: * extra branch. michael@0: * michael@0: * In practice, we implement the pre-barrier differently based on the type of michael@0: * value0. E.g., see JSObject::writeBarrierPre, which is used if obj->field is michael@0: * a JSObject*. It takes value0 as a parameter. michael@0: * michael@0: * POST-BARRIER michael@0: * michael@0: * For generational GC, we want to be able to quickly collect the nursery in a michael@0: * minor collection. Part of the way this is achieved is to only mark the michael@0: * nursery itself; tenured things, which may form the majority of the heap, are michael@0: * not traced through or marked. This leads to the problem of what to do about michael@0: * tenured objects that have pointers into the nursery: if such things are not michael@0: * marked, they may be discarded while there are still live objects which michael@0: * reference them. The solution is to maintain information about these pointers, michael@0: * and mark their targets when we start a minor collection. michael@0: * michael@0: * The pointers can be thoughs of as edges in object graph, and the set of edges michael@0: * from the tenured generation into the nursery is know as the remembered set. michael@0: * Post barriers are used to track this remembered set. michael@0: * michael@0: * Whenever a slot which could contain such a pointer is written, we use a write michael@0: * barrier to check if the edge created is in the remembered set, and if so we michael@0: * insert it into the store buffer, which is the collector's representation of michael@0: * the remembered set. This means than when we come to do a minor collection we michael@0: * can examine the contents of the store buffer and mark any edge targets that michael@0: * are in the nursery. michael@0: * michael@0: * IMPLEMENTATION DETAILS michael@0: * michael@0: * Since it would be awkward to change every write to memory into a function michael@0: * call, this file contains a bunch of C++ classes and templates that use michael@0: * operator overloading to take care of barriers automatically. In many cases, michael@0: * all that's necessary to make some field be barriered is to replace michael@0: * Type *field; michael@0: * with michael@0: * HeapPtr field; michael@0: * There are also special classes HeapValue and HeapId, which barrier js::Value michael@0: * and jsid, respectively. michael@0: * michael@0: * One additional note: not all object writes need to be barriered. Writes to michael@0: * newly allocated objects do not need a pre-barrier. In these cases, we use michael@0: * the "obj->field.init(value)" method instead of "obj->field = value". We use michael@0: * the init naming idiom in many places to signify that a field is being michael@0: * assigned for the first time. michael@0: * michael@0: * For each of pointers, Values and jsids this file implements four classes, michael@0: * illustrated here for the pointer (Ptr) classes: michael@0: * michael@0: * BarrieredPtr abstract base class which provides common operations michael@0: * | | | michael@0: * | | EncapsulatedPtr provides pre-barriers only michael@0: * | | michael@0: * | HeapPtr provides pre- and post-barriers michael@0: * | michael@0: * RelocatablePtr provides pre- and post-barriers and is relocatable michael@0: * michael@0: * These classes are designed to be used by the internals of the JS engine. michael@0: * Barriers designed to be used externally are provided in michael@0: * js/public/RootingAPI.h. michael@0: */ michael@0: michael@0: namespace js { michael@0: michael@0: class PropertyName; michael@0: michael@0: #ifdef DEBUG michael@0: bool michael@0: RuntimeFromMainThreadIsHeapMajorCollecting(JS::shadow::Zone *shadowZone); michael@0: #endif michael@0: michael@0: namespace gc { michael@0: michael@0: template michael@0: void michael@0: MarkUnbarriered(JSTracer *trc, T **thingp, const char *name); michael@0: michael@0: // Direct value access used by the write barriers and the jits. michael@0: void michael@0: MarkValueUnbarriered(JSTracer *trc, Value *v, const char *name); michael@0: michael@0: // These two declarations are also present in gc/Marking.h, via the DeclMarker michael@0: // macro. Not great, but hard to avoid. michael@0: void michael@0: MarkObjectUnbarriered(JSTracer *trc, JSObject **obj, const char *name); michael@0: void michael@0: MarkStringUnbarriered(JSTracer *trc, JSString **str, const char *name); michael@0: michael@0: // Note that some subclasses (e.g. ObjectImpl) specialize some of these michael@0: // methods. michael@0: template michael@0: class BarrieredCell : public gc::Cell michael@0: { michael@0: public: michael@0: MOZ_ALWAYS_INLINE JS::Zone *zone() const { return tenuredZone(); } michael@0: MOZ_ALWAYS_INLINE JS::shadow::Zone *shadowZone() const { return JS::shadow::Zone::asShadowZone(zone()); } michael@0: MOZ_ALWAYS_INLINE JS::Zone *zoneFromAnyThread() const { return tenuredZoneFromAnyThread(); } michael@0: MOZ_ALWAYS_INLINE JS::shadow::Zone *shadowZoneFromAnyThread() const { michael@0: return JS::shadow::Zone::asShadowZone(zoneFromAnyThread()); michael@0: } michael@0: michael@0: static MOZ_ALWAYS_INLINE void readBarrier(T *thing) { michael@0: #ifdef JSGC_INCREMENTAL michael@0: JS::shadow::Zone *shadowZone = thing->shadowZoneFromAnyThread(); michael@0: if (shadowZone->needsBarrier()) { michael@0: MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone)); michael@0: T *tmp = thing; michael@0: js::gc::MarkUnbarriered(shadowZone->barrierTracer(), &tmp, "read barrier"); michael@0: JS_ASSERT(tmp == thing); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: static MOZ_ALWAYS_INLINE bool needWriteBarrierPre(JS::Zone *zone) { michael@0: #ifdef JSGC_INCREMENTAL michael@0: return JS::shadow::Zone::asShadowZone(zone)->needsBarrier(); michael@0: #else michael@0: return false; michael@0: #endif michael@0: } michael@0: michael@0: static MOZ_ALWAYS_INLINE bool isNullLike(T *thing) { return !thing; } michael@0: michael@0: static MOZ_ALWAYS_INLINE void writeBarrierPre(T *thing) { michael@0: #ifdef JSGC_INCREMENTAL michael@0: if (isNullLike(thing) || !thing->shadowRuntimeFromAnyThread()->needsBarrier()) michael@0: return; michael@0: michael@0: JS::shadow::Zone *shadowZone = thing->shadowZoneFromAnyThread(); michael@0: if (shadowZone->needsBarrier()) { michael@0: MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone)); michael@0: T *tmp = thing; michael@0: js::gc::MarkUnbarriered(shadowZone->barrierTracer(), &tmp, "write barrier"); michael@0: JS_ASSERT(tmp == thing); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: static void writeBarrierPost(T *thing, void *addr) {} michael@0: static void writeBarrierPostRelocate(T *thing, void *addr) {} michael@0: static void writeBarrierPostRemove(T *thing, void *addr) {} michael@0: }; michael@0: michael@0: } // namespace gc michael@0: michael@0: // Note: the following Zone-getting functions must be equivalent to the zone() michael@0: // and shadowZone() functions implemented by the subclasses of BarrieredCell. michael@0: michael@0: JS::Zone * michael@0: ZoneOfObject(const JSObject &obj); michael@0: michael@0: static inline JS::shadow::Zone * michael@0: ShadowZoneOfObject(JSObject *obj) michael@0: { michael@0: return JS::shadow::Zone::asShadowZone(ZoneOfObject(*obj)); michael@0: } michael@0: michael@0: static inline JS::shadow::Zone * michael@0: ShadowZoneOfString(JSString *str) michael@0: { michael@0: return JS::shadow::Zone::asShadowZone(reinterpret_cast(str)->tenuredZone()); michael@0: } michael@0: michael@0: MOZ_ALWAYS_INLINE JS::Zone * michael@0: ZoneOfValue(const JS::Value &value) michael@0: { michael@0: JS_ASSERT(value.isMarkable()); michael@0: if (value.isObject()) michael@0: return ZoneOfObject(value.toObject()); michael@0: return static_cast(value.toGCThing())->tenuredZone(); michael@0: } michael@0: michael@0: JS::Zone * michael@0: ZoneOfObjectFromAnyThread(const JSObject &obj); michael@0: michael@0: static inline JS::shadow::Zone * michael@0: ShadowZoneOfObjectFromAnyThread(JSObject *obj) michael@0: { michael@0: return JS::shadow::Zone::asShadowZone(ZoneOfObjectFromAnyThread(*obj)); michael@0: } michael@0: michael@0: static inline JS::shadow::Zone * michael@0: ShadowZoneOfStringFromAnyThread(JSString *str) michael@0: { michael@0: return JS::shadow::Zone::asShadowZone( michael@0: reinterpret_cast(str)->tenuredZoneFromAnyThread()); michael@0: } michael@0: michael@0: MOZ_ALWAYS_INLINE JS::Zone * michael@0: ZoneOfValueFromAnyThread(const JS::Value &value) michael@0: { michael@0: JS_ASSERT(value.isMarkable()); michael@0: if (value.isObject()) michael@0: return ZoneOfObjectFromAnyThread(value.toObject()); michael@0: return static_cast(value.toGCThing())->tenuredZoneFromAnyThread(); michael@0: } michael@0: michael@0: /* michael@0: * Base class for barriered pointer types. michael@0: */ michael@0: template michael@0: class BarrieredPtr michael@0: { michael@0: protected: michael@0: union { michael@0: T *value; michael@0: Unioned other; michael@0: }; michael@0: michael@0: BarrieredPtr(T *v) : value(v) {} michael@0: ~BarrieredPtr() { pre(); } michael@0: michael@0: public: michael@0: void init(T *v) { michael@0: JS_ASSERT(!IsPoisonedPtr(v)); michael@0: this->value = v; michael@0: } michael@0: michael@0: /* Use this if the automatic coercion to T* isn't working. */ michael@0: T *get() const { return value; } michael@0: michael@0: /* michael@0: * Use these if you want to change the value without invoking the barrier. michael@0: * Obviously this is dangerous unless you know the barrier is not needed. michael@0: */ michael@0: T **unsafeGet() { return &value; } michael@0: void unsafeSet(T *v) { value = v; } michael@0: michael@0: Unioned *unsafeGetUnioned() { return &other; } michael@0: michael@0: T &operator*() const { return *value; } michael@0: T *operator->() const { return value; } michael@0: michael@0: operator T*() const { return value; } michael@0: michael@0: protected: michael@0: void pre() { T::writeBarrierPre(value); } michael@0: }; michael@0: michael@0: /* michael@0: * EncapsulatedPtr only automatically handles pre-barriers. Post-barriers must michael@0: * be manually implemented when using this class. HeapPtr and RelocatablePtr michael@0: * should be used in all cases that do not require explicit low-level control michael@0: * of moving behavior, e.g. for HashMap keys. michael@0: */ michael@0: template michael@0: class EncapsulatedPtr : public BarrieredPtr michael@0: { michael@0: public: michael@0: EncapsulatedPtr() : BarrieredPtr(nullptr) {} michael@0: EncapsulatedPtr(T *v) : BarrieredPtr(v) {} michael@0: explicit EncapsulatedPtr(const EncapsulatedPtr &v) michael@0: : BarrieredPtr(v.value) {} michael@0: michael@0: /* Use to set the pointer to nullptr. */ michael@0: void clear() { michael@0: this->pre(); michael@0: this->value = nullptr; michael@0: } michael@0: michael@0: EncapsulatedPtr &operator=(T *v) { michael@0: this->pre(); michael@0: JS_ASSERT(!IsPoisonedPtr(v)); michael@0: this->value = v; michael@0: return *this; michael@0: } michael@0: michael@0: EncapsulatedPtr &operator=(const EncapsulatedPtr &v) { michael@0: this->pre(); michael@0: JS_ASSERT(!IsPoisonedPtr(v.value)); michael@0: this->value = v.value; michael@0: return *this; michael@0: } michael@0: }; michael@0: michael@0: /* michael@0: * A pre- and post-barriered heap pointer, for use inside the JS engine. michael@0: * michael@0: * Not to be confused with JS::Heap. This is a different class from the michael@0: * external interface and implements substantially different semantics. michael@0: * michael@0: * The post-barriers implemented by this class are faster than those michael@0: * implemented by RelocatablePtr or JS::Heap at the cost of not michael@0: * automatically handling deletion or movement. It should generally only be michael@0: * stored in memory that has GC lifetime. HeapPtr must not be used in contexts michael@0: * where it may be implicitly moved or deleted, e.g. most containers. michael@0: */ michael@0: template michael@0: class HeapPtr : public BarrieredPtr michael@0: { michael@0: public: michael@0: HeapPtr() : BarrieredPtr(nullptr) {} michael@0: explicit HeapPtr(T *v) : BarrieredPtr(v) { post(); } michael@0: explicit HeapPtr(const HeapPtr &v) : BarrieredPtr(v) { post(); } michael@0: michael@0: void init(T *v) { michael@0: JS_ASSERT(!IsPoisonedPtr(v)); michael@0: this->value = v; michael@0: post(); michael@0: } michael@0: michael@0: HeapPtr &operator=(T *v) { michael@0: this->pre(); michael@0: JS_ASSERT(!IsPoisonedPtr(v)); michael@0: this->value = v; michael@0: post(); michael@0: return *this; michael@0: } michael@0: michael@0: HeapPtr &operator=(const HeapPtr &v) { michael@0: this->pre(); michael@0: JS_ASSERT(!IsPoisonedPtr(v.value)); michael@0: this->value = v.value; michael@0: post(); michael@0: return *this; michael@0: } michael@0: michael@0: protected: michael@0: void post() { T::writeBarrierPost(this->value, (void *)&this->value); } michael@0: michael@0: /* Make this friend so it can access pre() and post(). */ michael@0: template michael@0: friend inline void michael@0: BarrieredSetPair(Zone *zone, michael@0: HeapPtr &v1, T1 *val1, michael@0: HeapPtr &v2, T2 *val2); michael@0: michael@0: private: michael@0: /* michael@0: * Unlike RelocatablePtr, HeapPtr must be managed with GC lifetimes. michael@0: * Specifically, the memory used by the pointer itself must be live until michael@0: * at least the next minor GC. For that reason, move semantics are invalid michael@0: * and are deleted here. Please note that not all containers support move michael@0: * semantics, so this does not completely prevent invalid uses. michael@0: */ michael@0: HeapPtr(HeapPtr &&) MOZ_DELETE; michael@0: HeapPtr &operator=(HeapPtr &&) MOZ_DELETE; michael@0: }; michael@0: michael@0: /* michael@0: * FixedHeapPtr is designed for one very narrow case: replacing immutable raw michael@0: * pointers to GC-managed things, implicitly converting to a handle type for michael@0: * ease of use. Pointers encapsulated by this type must: michael@0: * michael@0: * be immutable (no incremental write barriers), michael@0: * never point into the nursery (no generational write barriers), and michael@0: * be traced via MarkRuntime (we use fromMarkedLocation). michael@0: * michael@0: * In short: you *really* need to know what you're doing before you use this michael@0: * class! michael@0: */ michael@0: template michael@0: class FixedHeapPtr michael@0: { michael@0: T *value; michael@0: michael@0: public: michael@0: operator T*() const { return value; } michael@0: T * operator->() const { return value; } michael@0: michael@0: operator Handle() const { michael@0: return Handle::fromMarkedLocation(&value); michael@0: } michael@0: michael@0: void init(T *ptr) { michael@0: value = ptr; michael@0: } michael@0: }; michael@0: michael@0: /* michael@0: * A pre- and post-barriered heap pointer, for use inside the JS engine. michael@0: * michael@0: * Unlike HeapPtr, it can be used in memory that is not managed by the GC, michael@0: * i.e. in C++ containers. It is, however, somewhat slower, so should only be michael@0: * used in contexts where this ability is necessary. michael@0: */ michael@0: template michael@0: class RelocatablePtr : public BarrieredPtr michael@0: { michael@0: public: michael@0: RelocatablePtr() : BarrieredPtr(nullptr) {} michael@0: explicit RelocatablePtr(T *v) : BarrieredPtr(v) { michael@0: if (v) michael@0: post(); michael@0: } michael@0: michael@0: /* michael@0: * For RelocatablePtr, move semantics are equivalent to copy semantics. In michael@0: * C++, a copy constructor taking const-ref is the way to get a single michael@0: * function that will be used for both lvalue and rvalue copies, so we can michael@0: * simply omit the rvalue variant. michael@0: */ michael@0: RelocatablePtr(const RelocatablePtr &v) : BarrieredPtr(v) { michael@0: if (this->value) michael@0: post(); michael@0: } michael@0: michael@0: ~RelocatablePtr() { michael@0: if (this->value) michael@0: relocate(); michael@0: } michael@0: michael@0: RelocatablePtr &operator=(T *v) { michael@0: this->pre(); michael@0: JS_ASSERT(!IsPoisonedPtr(v)); michael@0: if (v) { michael@0: this->value = v; michael@0: post(); michael@0: } else if (this->value) { michael@0: relocate(); michael@0: this->value = v; michael@0: } michael@0: return *this; michael@0: } michael@0: michael@0: RelocatablePtr &operator=(const RelocatablePtr &v) { michael@0: this->pre(); michael@0: JS_ASSERT(!IsPoisonedPtr(v.value)); michael@0: if (v.value) { michael@0: this->value = v.value; michael@0: post(); michael@0: } else if (this->value) { michael@0: relocate(); michael@0: this->value = v; michael@0: } michael@0: return *this; michael@0: } michael@0: michael@0: protected: michael@0: void post() { michael@0: #ifdef JSGC_GENERATIONAL michael@0: JS_ASSERT(this->value); michael@0: T::writeBarrierPostRelocate(this->value, &this->value); michael@0: #endif michael@0: } michael@0: michael@0: void relocate() { michael@0: #ifdef JSGC_GENERATIONAL michael@0: JS_ASSERT(this->value); michael@0: T::writeBarrierPostRemove(this->value, &this->value); michael@0: #endif michael@0: } michael@0: }; michael@0: michael@0: /* michael@0: * This is a hack for RegExpStatics::updateFromMatch. It allows us to do two michael@0: * barriers with only one branch to check if we're in an incremental GC. michael@0: */ michael@0: template michael@0: static inline void michael@0: BarrieredSetPair(Zone *zone, michael@0: HeapPtr &v1, T1 *val1, michael@0: HeapPtr &v2, T2 *val2) michael@0: { michael@0: if (T1::needWriteBarrierPre(zone)) { michael@0: v1.pre(); michael@0: v2.pre(); michael@0: } michael@0: v1.unsafeSet(val1); michael@0: v2.unsafeSet(val2); michael@0: v1.post(); michael@0: v2.post(); michael@0: } michael@0: michael@0: class Shape; michael@0: class BaseShape; michael@0: namespace types { struct TypeObject; } michael@0: michael@0: typedef BarrieredPtr BarrieredPtrObject; michael@0: typedef BarrieredPtr BarrieredPtrScript; michael@0: michael@0: typedef EncapsulatedPtr EncapsulatedPtrObject; michael@0: typedef EncapsulatedPtr EncapsulatedPtrScript; michael@0: michael@0: typedef RelocatablePtr RelocatablePtrObject; michael@0: typedef RelocatablePtr RelocatablePtrScript; michael@0: michael@0: typedef HeapPtr HeapPtrObject; michael@0: typedef HeapPtr HeapPtrFunction; michael@0: typedef HeapPtr HeapPtrString; michael@0: typedef HeapPtr HeapPtrPropertyName; michael@0: typedef HeapPtr HeapPtrScript; michael@0: typedef HeapPtr HeapPtrShape; michael@0: typedef HeapPtr HeapPtrBaseShape; michael@0: typedef HeapPtr HeapPtrTypeObject; michael@0: michael@0: /* Useful for hashtables with a HeapPtr as key. */ michael@0: michael@0: template michael@0: struct HeapPtrHasher michael@0: { michael@0: typedef HeapPtr Key; michael@0: typedef T *Lookup; michael@0: michael@0: static HashNumber hash(Lookup obj) { return DefaultHasher::hash(obj); } michael@0: static bool match(const Key &k, Lookup l) { return k.get() == l; } michael@0: static void rekey(Key &k, const Key& newKey) { k.unsafeSet(newKey); } michael@0: }; michael@0: michael@0: /* Specialized hashing policy for HeapPtrs. */ michael@0: template michael@0: struct DefaultHasher< HeapPtr > : HeapPtrHasher { }; michael@0: michael@0: template michael@0: struct EncapsulatedPtrHasher michael@0: { michael@0: typedef EncapsulatedPtr Key; michael@0: typedef T *Lookup; michael@0: michael@0: static HashNumber hash(Lookup obj) { return DefaultHasher::hash(obj); } michael@0: static bool match(const Key &k, Lookup l) { return k.get() == l; } michael@0: static void rekey(Key &k, const Key& newKey) { k.unsafeSet(newKey); } michael@0: }; michael@0: michael@0: template michael@0: struct DefaultHasher< EncapsulatedPtr > : EncapsulatedPtrHasher { }; michael@0: michael@0: bool michael@0: StringIsPermanentAtom(JSString *str); michael@0: michael@0: /* michael@0: * Base class for barriered value types. michael@0: */ michael@0: class BarrieredValue : public ValueOperations michael@0: { michael@0: protected: michael@0: Value value; michael@0: michael@0: /* michael@0: * Ensure that EncapsulatedValue is not constructable, except by our michael@0: * implementations. michael@0: */ michael@0: BarrieredValue() MOZ_DELETE; michael@0: michael@0: BarrieredValue(const Value &v) : value(v) { michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: } michael@0: michael@0: ~BarrieredValue() { michael@0: pre(); michael@0: } michael@0: michael@0: public: michael@0: void init(const Value &v) { michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: value = v; michael@0: } michael@0: void init(JSRuntime *rt, const Value &v) { michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: value = v; michael@0: } michael@0: michael@0: bool operator==(const BarrieredValue &v) const { return value == v.value; } michael@0: bool operator!=(const BarrieredValue &v) const { return value != v.value; } michael@0: michael@0: const Value &get() const { return value; } michael@0: Value *unsafeGet() { return &value; } michael@0: operator const Value &() const { return value; } michael@0: michael@0: JSGCTraceKind gcKind() const { return value.gcKind(); } michael@0: michael@0: uint64_t asRawBits() const { return value.asRawBits(); } michael@0: michael@0: static void writeBarrierPre(const Value &v) { michael@0: #ifdef JSGC_INCREMENTAL michael@0: if (v.isMarkable() && shadowRuntimeFromAnyThread(v)->needsBarrier()) michael@0: writeBarrierPre(ZoneOfValueFromAnyThread(v), v); michael@0: #endif michael@0: } michael@0: michael@0: static void writeBarrierPre(Zone *zone, const Value &v) { michael@0: #ifdef JSGC_INCREMENTAL michael@0: if (v.isString() && StringIsPermanentAtom(v.toString())) michael@0: return; michael@0: JS::shadow::Zone *shadowZone = JS::shadow::Zone::asShadowZone(zone); michael@0: if (shadowZone->needsBarrier()) { michael@0: JS_ASSERT_IF(v.isMarkable(), shadowRuntimeFromMainThread(v)->needsBarrier()); michael@0: Value tmp(v); michael@0: js::gc::MarkValueUnbarriered(shadowZone->barrierTracer(), &tmp, "write barrier"); michael@0: JS_ASSERT(tmp == v); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: protected: michael@0: void pre() { writeBarrierPre(value); } michael@0: void pre(Zone *zone) { writeBarrierPre(zone, value); } michael@0: michael@0: static JSRuntime *runtimeFromMainThread(const Value &v) { michael@0: JS_ASSERT(v.isMarkable()); michael@0: return static_cast(v.toGCThing())->runtimeFromMainThread(); michael@0: } michael@0: static JSRuntime *runtimeFromAnyThread(const Value &v) { michael@0: JS_ASSERT(v.isMarkable()); michael@0: return static_cast(v.toGCThing())->runtimeFromAnyThread(); michael@0: } michael@0: static JS::shadow::Runtime *shadowRuntimeFromMainThread(const Value &v) { michael@0: return reinterpret_cast(runtimeFromMainThread(v)); michael@0: } michael@0: static JS::shadow::Runtime *shadowRuntimeFromAnyThread(const Value &v) { michael@0: return reinterpret_cast(runtimeFromAnyThread(v)); michael@0: } michael@0: michael@0: private: michael@0: friend class ValueOperations; michael@0: const Value * extract() const { return &value; } michael@0: }; michael@0: michael@0: // Like EncapsulatedPtr, but specialized for Value. michael@0: // See the comments on that class for details. michael@0: class EncapsulatedValue : public BarrieredValue michael@0: { michael@0: public: michael@0: EncapsulatedValue(const Value &v) : BarrieredValue(v) {} michael@0: EncapsulatedValue(const EncapsulatedValue &v) : BarrieredValue(v) {} michael@0: michael@0: EncapsulatedValue &operator=(const Value &v) { michael@0: pre(); michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: value = v; michael@0: return *this; michael@0: } michael@0: michael@0: EncapsulatedValue &operator=(const EncapsulatedValue &v) { michael@0: pre(); michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: value = v.get(); michael@0: return *this; michael@0: } michael@0: }; michael@0: michael@0: // Like HeapPtr, but specialized for Value. michael@0: // See the comments on that class for details. michael@0: class HeapValue : public BarrieredValue michael@0: { michael@0: public: michael@0: explicit HeapValue() michael@0: : BarrieredValue(UndefinedValue()) michael@0: { michael@0: post(); michael@0: } michael@0: michael@0: explicit HeapValue(const Value &v) michael@0: : BarrieredValue(v) michael@0: { michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: post(); michael@0: } michael@0: michael@0: explicit HeapValue(const HeapValue &v) michael@0: : BarrieredValue(v.value) michael@0: { michael@0: JS_ASSERT(!IsPoisonedValue(v.value)); michael@0: post(); michael@0: } michael@0: michael@0: ~HeapValue() { michael@0: pre(); michael@0: } michael@0: michael@0: void init(const Value &v) { michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: value = v; michael@0: post(); michael@0: } michael@0: michael@0: void init(JSRuntime *rt, const Value &v) { michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: value = v; michael@0: post(rt); michael@0: } michael@0: michael@0: HeapValue &operator=(const Value &v) { michael@0: pre(); michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: value = v; michael@0: post(); michael@0: return *this; michael@0: } michael@0: michael@0: HeapValue &operator=(const HeapValue &v) { michael@0: pre(); michael@0: JS_ASSERT(!IsPoisonedValue(v.value)); michael@0: value = v.value; michael@0: post(); michael@0: return *this; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: bool preconditionForSet(Zone *zone); michael@0: #endif michael@0: michael@0: /* michael@0: * This is a faster version of operator=. Normally, operator= has to michael@0: * determine the compartment of the value before it can decide whether to do michael@0: * the barrier. If you already know the compartment, it's faster to pass it michael@0: * in. michael@0: */ michael@0: void set(Zone *zone, const Value &v) { michael@0: JS::shadow::Zone *shadowZone = JS::shadow::Zone::asShadowZone(zone); michael@0: JS_ASSERT(preconditionForSet(zone)); michael@0: pre(zone); michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: value = v; michael@0: post(shadowZone->runtimeFromAnyThread()); michael@0: } michael@0: michael@0: static void writeBarrierPost(const Value &value, Value *addr) { michael@0: #ifdef JSGC_GENERATIONAL michael@0: if (value.isMarkable()) michael@0: shadowRuntimeFromAnyThread(value)->gcStoreBufferPtr()->putValue(addr); michael@0: #endif michael@0: } michael@0: michael@0: static void writeBarrierPost(JSRuntime *rt, const Value &value, Value *addr) { michael@0: #ifdef JSGC_GENERATIONAL michael@0: if (value.isMarkable()) { michael@0: JS::shadow::Runtime *shadowRuntime = JS::shadow::Runtime::asShadowRuntime(rt); michael@0: shadowRuntime->gcStoreBufferPtr()->putValue(addr); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: private: michael@0: void post() { michael@0: writeBarrierPost(value, &value); michael@0: } michael@0: michael@0: void post(JSRuntime *rt) { michael@0: writeBarrierPost(rt, value, &value); michael@0: } michael@0: michael@0: HeapValue(HeapValue &&) MOZ_DELETE; michael@0: HeapValue &operator=(HeapValue &&) MOZ_DELETE; michael@0: }; michael@0: michael@0: // Like RelocatablePtr, but specialized for Value. michael@0: // See the comments on that class for details. michael@0: class RelocatableValue : public BarrieredValue michael@0: { michael@0: public: michael@0: explicit RelocatableValue() : BarrieredValue(UndefinedValue()) {} michael@0: michael@0: explicit RelocatableValue(const Value &v) michael@0: : BarrieredValue(v) michael@0: { michael@0: if (v.isMarkable()) michael@0: post(); michael@0: } michael@0: michael@0: RelocatableValue(const RelocatableValue &v) michael@0: : BarrieredValue(v.value) michael@0: { michael@0: JS_ASSERT(!IsPoisonedValue(v.value)); michael@0: if (v.value.isMarkable()) michael@0: post(); michael@0: } michael@0: michael@0: ~RelocatableValue() michael@0: { michael@0: if (value.isMarkable()) michael@0: relocate(runtimeFromAnyThread(value)); michael@0: } michael@0: michael@0: RelocatableValue &operator=(const Value &v) { michael@0: pre(); michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: if (v.isMarkable()) { michael@0: value = v; michael@0: post(); michael@0: } else if (value.isMarkable()) { michael@0: JSRuntime *rt = runtimeFromAnyThread(value); michael@0: relocate(rt); michael@0: value = v; michael@0: } else { michael@0: value = v; michael@0: } michael@0: return *this; michael@0: } michael@0: michael@0: RelocatableValue &operator=(const RelocatableValue &v) { michael@0: pre(); michael@0: JS_ASSERT(!IsPoisonedValue(v.value)); michael@0: if (v.value.isMarkable()) { michael@0: value = v.value; michael@0: post(); michael@0: } else if (value.isMarkable()) { michael@0: JSRuntime *rt = runtimeFromAnyThread(value); michael@0: relocate(rt); michael@0: value = v.value; michael@0: } else { michael@0: value = v.value; michael@0: } michael@0: return *this; michael@0: } michael@0: michael@0: private: michael@0: void post() { michael@0: #ifdef JSGC_GENERATIONAL michael@0: JS_ASSERT(value.isMarkable()); michael@0: shadowRuntimeFromAnyThread(value)->gcStoreBufferPtr()->putRelocatableValue(&value); michael@0: #endif michael@0: } michael@0: michael@0: void relocate(JSRuntime *rt) { michael@0: #ifdef JSGC_GENERATIONAL michael@0: JS::shadow::Runtime *shadowRuntime = JS::shadow::Runtime::asShadowRuntime(rt); michael@0: shadowRuntime->gcStoreBufferPtr()->removeRelocatableValue(&value); michael@0: #endif michael@0: } michael@0: }; michael@0: michael@0: // A pre- and post-barriered Value that is specialized to be aware that it michael@0: // resides in a slots or elements vector. This allows it to be relocated in michael@0: // memory, but with substantially less overhead than a RelocatablePtr. michael@0: class HeapSlot : public BarrieredValue michael@0: { michael@0: public: michael@0: enum Kind { michael@0: Slot = 0, michael@0: Element = 1 michael@0: }; michael@0: michael@0: explicit HeapSlot() MOZ_DELETE; michael@0: michael@0: explicit HeapSlot(JSObject *obj, Kind kind, uint32_t slot, const Value &v) michael@0: : BarrieredValue(v) michael@0: { michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: post(obj, kind, slot, v); michael@0: } michael@0: michael@0: explicit HeapSlot(JSObject *obj, Kind kind, uint32_t slot, const HeapSlot &s) michael@0: : BarrieredValue(s.value) michael@0: { michael@0: JS_ASSERT(!IsPoisonedValue(s.value)); michael@0: post(obj, kind, slot, s); michael@0: } michael@0: michael@0: ~HeapSlot() { michael@0: pre(); michael@0: } michael@0: michael@0: void init(JSObject *owner, Kind kind, uint32_t slot, const Value &v) { michael@0: value = v; michael@0: post(owner, kind, slot, v); michael@0: } michael@0: michael@0: void init(JSRuntime *rt, JSObject *owner, Kind kind, uint32_t slot, const Value &v) { michael@0: value = v; michael@0: post(rt, owner, kind, slot, v); michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: bool preconditionForSet(JSObject *owner, Kind kind, uint32_t slot); michael@0: bool preconditionForSet(Zone *zone, JSObject *owner, Kind kind, uint32_t slot); michael@0: static void preconditionForWriteBarrierPost(JSObject *obj, Kind kind, uint32_t slot, michael@0: Value target); michael@0: #endif michael@0: michael@0: void set(JSObject *owner, Kind kind, uint32_t slot, const Value &v) { michael@0: JS_ASSERT(preconditionForSet(owner, kind, slot)); michael@0: pre(); michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: value = v; michael@0: post(owner, kind, slot, v); michael@0: } michael@0: michael@0: void set(Zone *zone, JSObject *owner, Kind kind, uint32_t slot, const Value &v) { michael@0: JS_ASSERT(preconditionForSet(zone, owner, kind, slot)); michael@0: JS::shadow::Zone *shadowZone = JS::shadow::Zone::asShadowZone(zone); michael@0: pre(zone); michael@0: JS_ASSERT(!IsPoisonedValue(v)); michael@0: value = v; michael@0: post(shadowZone->runtimeFromAnyThread(), owner, kind, slot, v); michael@0: } michael@0: michael@0: static void writeBarrierPost(JSObject *obj, Kind kind, uint32_t slot, Value target) michael@0: { michael@0: #ifdef JSGC_GENERATIONAL michael@0: js::gc::Cell *cell = reinterpret_cast(obj); michael@0: writeBarrierPost(cell->runtimeFromAnyThread(), obj, kind, slot, target); michael@0: #endif michael@0: } michael@0: michael@0: static void writeBarrierPost(JSRuntime *rt, JSObject *obj, Kind kind, uint32_t slot, michael@0: Value target) michael@0: { michael@0: #ifdef DEBUG michael@0: preconditionForWriteBarrierPost(obj, kind, slot, target); michael@0: #endif michael@0: #ifdef JSGC_GENERATIONAL michael@0: if (target.isObject()) { michael@0: JS::shadow::Runtime *shadowRuntime = JS::shadow::Runtime::asShadowRuntime(rt); michael@0: shadowRuntime->gcStoreBufferPtr()->putSlot(obj, kind, slot, 1); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: private: michael@0: void post(JSObject *owner, Kind kind, uint32_t slot, Value target) { michael@0: HeapSlot::writeBarrierPost(owner, kind, slot, target); michael@0: } michael@0: michael@0: void post(JSRuntime *rt, JSObject *owner, Kind kind, uint32_t slot, Value target) { michael@0: HeapSlot::writeBarrierPost(rt, owner, kind, slot, target); michael@0: } michael@0: }; michael@0: michael@0: static inline const Value * michael@0: Valueify(const BarrieredValue *array) michael@0: { michael@0: JS_STATIC_ASSERT(sizeof(HeapValue) == sizeof(Value)); michael@0: JS_STATIC_ASSERT(sizeof(HeapSlot) == sizeof(Value)); michael@0: return (const Value *)array; michael@0: } michael@0: michael@0: static inline HeapValue * michael@0: HeapValueify(Value *v) michael@0: { michael@0: JS_STATIC_ASSERT(sizeof(HeapValue) == sizeof(Value)); michael@0: JS_STATIC_ASSERT(sizeof(HeapSlot) == sizeof(Value)); michael@0: return (HeapValue *)v; michael@0: } michael@0: michael@0: class HeapSlotArray michael@0: { michael@0: HeapSlot *array; michael@0: michael@0: public: michael@0: HeapSlotArray(HeapSlot *array) : array(array) {} michael@0: michael@0: operator const Value *() const { return Valueify(array); } michael@0: operator HeapSlot *() const { return array; } michael@0: michael@0: HeapSlotArray operator +(int offset) const { return HeapSlotArray(array + offset); } michael@0: HeapSlotArray operator +(uint32_t offset) const { return HeapSlotArray(array + offset); } michael@0: }; michael@0: michael@0: /* michael@0: * Base class for barriered jsid types. michael@0: */ michael@0: class BarrieredId michael@0: { michael@0: protected: michael@0: jsid value; michael@0: michael@0: private: michael@0: BarrieredId(const BarrieredId &v) MOZ_DELETE; michael@0: michael@0: protected: michael@0: explicit BarrieredId(jsid id) : value(id) {} michael@0: ~BarrieredId() { pre(); } michael@0: michael@0: public: michael@0: bool operator==(jsid id) const { return value == id; } michael@0: bool operator!=(jsid id) const { return value != id; } michael@0: michael@0: jsid get() const { return value; } michael@0: jsid *unsafeGet() { return &value; } michael@0: void unsafeSet(jsid newId) { value = newId; } michael@0: operator jsid() const { return value; } michael@0: michael@0: protected: michael@0: void pre() { michael@0: #ifdef JSGC_INCREMENTAL michael@0: if (JSID_IS_OBJECT(value)) { michael@0: JSObject *obj = JSID_TO_OBJECT(value); michael@0: JS::shadow::Zone *shadowZone = ShadowZoneOfObjectFromAnyThread(obj); michael@0: if (shadowZone->needsBarrier()) { michael@0: js::gc::MarkObjectUnbarriered(shadowZone->barrierTracer(), &obj, "write barrier"); michael@0: JS_ASSERT(obj == JSID_TO_OBJECT(value)); michael@0: } michael@0: } else if (JSID_IS_STRING(value)) { michael@0: JSString *str = JSID_TO_STRING(value); michael@0: JS::shadow::Zone *shadowZone = ShadowZoneOfStringFromAnyThread(str); michael@0: if (shadowZone->needsBarrier()) { michael@0: js::gc::MarkStringUnbarriered(shadowZone->barrierTracer(), &str, "write barrier"); michael@0: JS_ASSERT(str == JSID_TO_STRING(value)); michael@0: } michael@0: } michael@0: #endif michael@0: } michael@0: }; michael@0: michael@0: // Like EncapsulatedPtr, but specialized for jsid. michael@0: // See the comments on that class for details. michael@0: class EncapsulatedId : public BarrieredId michael@0: { michael@0: public: michael@0: explicit EncapsulatedId(jsid id) : BarrieredId(id) {} michael@0: explicit EncapsulatedId() : BarrieredId(JSID_VOID) {} michael@0: michael@0: EncapsulatedId &operator=(const EncapsulatedId &v) { michael@0: if (v.value != value) michael@0: pre(); michael@0: JS_ASSERT(!IsPoisonedId(v.value)); michael@0: value = v.value; michael@0: return *this; michael@0: } michael@0: }; michael@0: michael@0: // Like RelocatablePtr, but specialized for jsid. michael@0: // See the comments on that class for details. michael@0: class RelocatableId : public BarrieredId michael@0: { michael@0: public: michael@0: explicit RelocatableId() : BarrieredId(JSID_VOID) {} michael@0: explicit inline RelocatableId(jsid id) : BarrieredId(id) {} michael@0: ~RelocatableId() { pre(); } michael@0: michael@0: bool operator==(jsid id) const { return value == id; } michael@0: bool operator!=(jsid id) const { return value != id; } michael@0: michael@0: jsid get() const { return value; } michael@0: operator jsid() const { return value; } michael@0: michael@0: jsid *unsafeGet() { return &value; } michael@0: michael@0: RelocatableId &operator=(jsid id) { michael@0: if (id != value) michael@0: pre(); michael@0: JS_ASSERT(!IsPoisonedId(id)); michael@0: value = id; michael@0: return *this; michael@0: } michael@0: michael@0: RelocatableId &operator=(const RelocatableId &v) { michael@0: if (v.value != value) michael@0: pre(); michael@0: JS_ASSERT(!IsPoisonedId(v.value)); michael@0: value = v.value; michael@0: return *this; michael@0: } michael@0: }; michael@0: michael@0: // Like HeapPtr, but specialized for jsid. michael@0: // See the comments on that class for details. michael@0: class HeapId : public BarrieredId michael@0: { michael@0: public: michael@0: explicit HeapId() : BarrieredId(JSID_VOID) {} michael@0: michael@0: explicit HeapId(jsid id) michael@0: : BarrieredId(id) michael@0: { michael@0: JS_ASSERT(!IsPoisonedId(id)); michael@0: post(); michael@0: } michael@0: michael@0: ~HeapId() { pre(); } michael@0: michael@0: void init(jsid id) { michael@0: JS_ASSERT(!IsPoisonedId(id)); michael@0: value = id; michael@0: post(); michael@0: } michael@0: michael@0: HeapId &operator=(jsid id) { michael@0: if (id != value) michael@0: pre(); michael@0: JS_ASSERT(!IsPoisonedId(id)); michael@0: value = id; michael@0: post(); michael@0: return *this; michael@0: } michael@0: michael@0: HeapId &operator=(const HeapId &v) { michael@0: if (v.value != value) michael@0: pre(); michael@0: JS_ASSERT(!IsPoisonedId(v.value)); michael@0: value = v.value; michael@0: post(); michael@0: return *this; michael@0: } michael@0: michael@0: private: michael@0: void post() {}; michael@0: michael@0: HeapId(const HeapId &v) MOZ_DELETE; michael@0: michael@0: HeapId(HeapId &&) MOZ_DELETE; michael@0: HeapId &operator=(HeapId &&) MOZ_DELETE; michael@0: }; michael@0: michael@0: /* michael@0: * Incremental GC requires that weak pointers have read barriers. This is mostly michael@0: * an issue for empty shapes stored in JSCompartment. The problem happens when, michael@0: * during an incremental GC, some JS code stores one of the compartment's empty michael@0: * shapes into an object already marked black. Normally, this would not be a michael@0: * problem, because the empty shape would have been part of the initial snapshot michael@0: * when the GC started. However, since this is a weak pointer, it isn't. So we michael@0: * may collect the empty shape even though a live object points to it. To fix michael@0: * this, we mark these empty shapes black whenever they get read out. michael@0: */ michael@0: template michael@0: class ReadBarriered michael@0: { michael@0: T *value; michael@0: michael@0: public: michael@0: ReadBarriered() : value(nullptr) {} michael@0: ReadBarriered(T *value) : value(value) {} michael@0: ReadBarriered(const Rooted &rooted) : value(rooted) {} michael@0: michael@0: T *get() const { michael@0: if (!value) michael@0: return nullptr; michael@0: T::readBarrier(value); michael@0: return value; michael@0: } michael@0: michael@0: operator T*() const { return get(); } michael@0: michael@0: T &operator*() const { return *get(); } michael@0: T *operator->() const { return get(); } michael@0: michael@0: T **unsafeGet() { return &value; } michael@0: T * const * unsafeGet() const { return &value; } michael@0: michael@0: void set(T *v) { value = v; } michael@0: michael@0: operator bool() { return !!value; } michael@0: }; michael@0: michael@0: class ReadBarrieredValue michael@0: { michael@0: Value value; michael@0: michael@0: public: michael@0: ReadBarrieredValue() : value(UndefinedValue()) {} michael@0: ReadBarrieredValue(const Value &value) : value(value) {} michael@0: michael@0: inline const Value &get() const; michael@0: Value *unsafeGet() { return &value; } michael@0: inline operator const Value &() const; michael@0: michael@0: inline JSObject &toObject() const; michael@0: }; michael@0: michael@0: /* michael@0: * Operations on a Heap thing inside the GC need to strip the barriers from michael@0: * pointer operations. This template helps do that in contexts where the type michael@0: * is templatized. michael@0: */ michael@0: template struct Unbarriered {}; michael@0: template struct Unbarriered< EncapsulatedPtr > { typedef S *type; }; michael@0: template struct Unbarriered< RelocatablePtr > { typedef S *type; }; michael@0: template <> struct Unbarriered { typedef Value type; }; michael@0: template <> struct Unbarriered { typedef Value type; }; michael@0: template struct Unbarriered< DefaultHasher< EncapsulatedPtr > > { michael@0: typedef DefaultHasher type; michael@0: }; michael@0: michael@0: } /* namespace js */ michael@0: michael@0: #endif /* gc_Barrier_h */