michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef gc_Zone_h michael@0: #define gc_Zone_h michael@0: michael@0: #include "mozilla/Atomics.h" michael@0: #include "mozilla/MemoryReporting.h" michael@0: michael@0: #include "jscntxt.h" michael@0: #include "jsgc.h" michael@0: #include "jsinfer.h" michael@0: michael@0: #include "gc/FindSCCs.h" michael@0: michael@0: namespace js { michael@0: michael@0: namespace jit { michael@0: class JitZone; michael@0: } michael@0: michael@0: /* michael@0: * Encapsulates the data needed to perform allocation. Typically there is michael@0: * precisely one of these per zone (|cx->zone().allocator|). However, in michael@0: * parallel execution mode, there will be one per worker thread. michael@0: */ michael@0: class Allocator michael@0: { michael@0: /* michael@0: * Since allocators can be accessed from worker threads, the parent zone_ michael@0: * should not be accessed in general. ArenaLists is allowed to actually do michael@0: * the allocation, however. michael@0: */ michael@0: friend class gc::ArenaLists; michael@0: michael@0: JS::Zone *zone_; michael@0: michael@0: public: michael@0: explicit Allocator(JS::Zone *zone); michael@0: michael@0: js::gc::ArenaLists arenas; michael@0: }; michael@0: michael@0: typedef Vector CompartmentVector; michael@0: michael@0: } /* namespace js */ michael@0: michael@0: namespace JS { michael@0: michael@0: /* michael@0: * A zone is a collection of compartments. Every compartment belongs to exactly michael@0: * one zone. In Firefox, there is roughly one zone per tab along with a system michael@0: * zone for everything else. Zones mainly serve as boundaries for garbage michael@0: * collection. Unlike compartments, they have no special security properties. michael@0: * michael@0: * Every GC thing belongs to exactly one zone. GC things from the same zone but michael@0: * different compartments can share an arena (4k page). GC things from different michael@0: * zones cannot be stored in the same arena. The garbage collector is capable of michael@0: * collecting one zone at a time; it cannot collect at the granularity of michael@0: * compartments. michael@0: * michael@0: * GC things are tied to zones and compartments as follows: michael@0: * michael@0: * - JSObjects belong to a compartment and cannot be shared between michael@0: * compartments. If an object needs to point to a JSObject in a different michael@0: * compartment, regardless of zone, it must go through a cross-compartment michael@0: * wrapper. Each compartment keeps track of its outgoing wrappers in a table. michael@0: * michael@0: * - JSStrings do not belong to any particular compartment, but they do belong michael@0: * to a zone. Thus, two different compartments in the same zone can point to a michael@0: * JSString. When a string needs to be wrapped, we copy it if it's in a michael@0: * different zone and do nothing if it's in the same zone. Thus, transferring michael@0: * strings within a zone is very efficient. michael@0: * michael@0: * - Shapes and base shapes belong to a compartment and cannot be shared between michael@0: * compartments. A base shape holds a pointer to its compartment. Shapes find michael@0: * their compartment via their base shape. JSObjects find their compartment michael@0: * via their shape. michael@0: * michael@0: * - Scripts are also compartment-local and cannot be shared. A script points to michael@0: * its compartment. michael@0: * michael@0: * - Type objects and JitCode objects belong to a compartment and cannot be michael@0: * shared. However, there is no mechanism to obtain their compartments. michael@0: * michael@0: * A zone remains alive as long as any GC things in the zone are alive. A michael@0: * compartment remains alive as long as any JSObjects, scripts, shapes, or base michael@0: * shapes within it are alive. michael@0: * michael@0: * We always guarantee that a zone has at least one live compartment by refusing michael@0: * to delete the last compartment in a live zone. (This could happen, for michael@0: * example, if the conservative scanner marks a string in an otherwise dead michael@0: * zone.) michael@0: */ michael@0: michael@0: struct Zone : public JS::shadow::Zone, michael@0: public js::gc::GraphNodeBase, michael@0: public js::MallocProvider michael@0: { michael@0: private: michael@0: friend bool js::CurrentThreadCanAccessZone(Zone *zone); michael@0: michael@0: public: michael@0: js::Allocator allocator; michael@0: michael@0: js::CompartmentVector compartments; michael@0: michael@0: private: michael@0: bool ionUsingBarriers_; michael@0: michael@0: public: michael@0: bool active; // GC flag, whether there are active frames michael@0: michael@0: bool compileBarriers(bool needsBarrier) const { michael@0: return needsBarrier || runtimeFromMainThread()->gcZeal() == js::gc::ZealVerifierPreValue; michael@0: } michael@0: michael@0: bool compileBarriers() const { michael@0: return compileBarriers(needsBarrier()); michael@0: } michael@0: michael@0: enum ShouldUpdateIon { michael@0: DontUpdateIon, michael@0: UpdateIon michael@0: }; michael@0: michael@0: void setNeedsBarrier(bool needs, ShouldUpdateIon updateIon); michael@0: michael@0: const bool *addressOfNeedsBarrier() const { michael@0: return &needsBarrier_; michael@0: } michael@0: michael@0: public: michael@0: enum GCState { michael@0: NoGC, michael@0: Mark, michael@0: MarkGray, michael@0: Sweep, michael@0: Finished michael@0: }; michael@0: michael@0: private: michael@0: bool gcScheduled; michael@0: GCState gcState; michael@0: bool gcPreserveCode; michael@0: michael@0: public: michael@0: bool isCollecting() const { michael@0: if (runtimeFromMainThread()->isHeapCollecting()) michael@0: return gcState != NoGC; michael@0: else michael@0: return needsBarrier(); michael@0: } michael@0: michael@0: bool isPreservingCode() const { michael@0: return gcPreserveCode; michael@0: } michael@0: michael@0: /* michael@0: * If this returns true, all object tracing must be done with a GC marking michael@0: * tracer. michael@0: */ michael@0: bool requireGCTracer() const { michael@0: return runtimeFromMainThread()->isHeapMajorCollecting() && gcState != NoGC; michael@0: } michael@0: michael@0: void setGCState(GCState state) { michael@0: JS_ASSERT(runtimeFromMainThread()->isHeapBusy()); michael@0: JS_ASSERT_IF(state != NoGC, canCollect()); michael@0: gcState = state; michael@0: } michael@0: michael@0: void scheduleGC() { michael@0: JS_ASSERT(!runtimeFromMainThread()->isHeapBusy()); michael@0: gcScheduled = true; michael@0: } michael@0: michael@0: void unscheduleGC() { michael@0: gcScheduled = false; michael@0: } michael@0: michael@0: bool isGCScheduled() { michael@0: return gcScheduled && canCollect(); michael@0: } michael@0: michael@0: void setPreservingCode(bool preserving) { michael@0: gcPreserveCode = preserving; michael@0: } michael@0: michael@0: bool canCollect() { michael@0: // Zones cannot be collected while in use by other threads. michael@0: if (usedByExclusiveThread) michael@0: return false; michael@0: JSRuntime *rt = runtimeFromAnyThread(); michael@0: if (rt->isAtomsZone(this) && rt->exclusiveThreadsPresent()) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: bool wasGCStarted() const { michael@0: return gcState != NoGC; michael@0: } michael@0: michael@0: bool isGCMarking() { michael@0: if (runtimeFromMainThread()->isHeapCollecting()) michael@0: return gcState == Mark || gcState == MarkGray; michael@0: else michael@0: return needsBarrier(); michael@0: } michael@0: michael@0: bool isGCMarkingBlack() { michael@0: return gcState == Mark; michael@0: } michael@0: michael@0: bool isGCMarkingGray() { michael@0: return gcState == MarkGray; michael@0: } michael@0: michael@0: bool isGCSweeping() { michael@0: return gcState == Sweep; michael@0: } michael@0: michael@0: bool isGCFinished() { michael@0: return gcState == Finished; michael@0: } michael@0: michael@0: /* This is updated by both the main and GC helper threads. */ michael@0: mozilla::Atomic gcBytes; michael@0: michael@0: size_t gcTriggerBytes; michael@0: size_t gcMaxMallocBytes; michael@0: double gcHeapGrowthFactor; michael@0: michael@0: bool isSystem; michael@0: michael@0: /* Whether this zone is being used by a thread with an ExclusiveContext. */ michael@0: bool usedByExclusiveThread; michael@0: michael@0: /* michael@0: * Get a number that is incremented whenever this zone is collected, and michael@0: * possibly at other times too. michael@0: */ michael@0: uint64_t gcNumber(); michael@0: michael@0: /* michael@0: * These flags help us to discover if a compartment that shouldn't be alive michael@0: * manages to outlive a GC. michael@0: */ michael@0: bool scheduledForDestruction; michael@0: bool maybeAlive; michael@0: michael@0: /* michael@0: * Malloc counter to measure memory pressure for GC scheduling. It runs from michael@0: * gcMaxMallocBytes down to zero. This counter should be used only when it's michael@0: * not possible to know the size of a free. michael@0: */ michael@0: mozilla::Atomic gcMallocBytes; michael@0: michael@0: /* michael@0: * Whether a GC has been triggered as a result of gcMallocBytes falling michael@0: * below zero. michael@0: * michael@0: * This should be a bool, but Atomic only supports 32-bit and pointer-sized michael@0: * types. michael@0: */ michael@0: mozilla::Atomic gcMallocGCTriggered; michael@0: michael@0: /* This compartment's gray roots. */ michael@0: js::Vector gcGrayRoots; michael@0: michael@0: /* Per-zone data for use by an embedder. */ michael@0: void *data; michael@0: michael@0: Zone(JSRuntime *rt); michael@0: ~Zone(); michael@0: michael@0: void findOutgoingEdges(js::gc::ComponentFinder &finder); michael@0: michael@0: void discardJitCode(js::FreeOp *fop); michael@0: michael@0: void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, michael@0: size_t *typePool, michael@0: size_t *baselineStubsOptimized); michael@0: michael@0: void setGCLastBytes(size_t lastBytes, js::JSGCInvocationKind gckind); michael@0: void reduceGCTriggerBytes(size_t amount); michael@0: michael@0: void resetGCMallocBytes(); michael@0: void setGCMaxMallocBytes(size_t value); michael@0: void updateMallocCounter(size_t nbytes) { michael@0: /* michael@0: * Note: this code may be run from worker threads. We michael@0: * tolerate any thread races when updating gcMallocBytes. michael@0: */ michael@0: gcMallocBytes -= ptrdiff_t(nbytes); michael@0: if (MOZ_UNLIKELY(isTooMuchMalloc())) michael@0: onTooMuchMalloc(); michael@0: } michael@0: michael@0: bool isTooMuchMalloc() const { michael@0: return gcMallocBytes <= 0; michael@0: } michael@0: michael@0: void onTooMuchMalloc(); michael@0: michael@0: void *onOutOfMemory(void *p, size_t nbytes) { michael@0: return runtimeFromMainThread()->onOutOfMemory(p, nbytes); michael@0: } michael@0: void reportAllocationOverflow() { michael@0: js_ReportAllocationOverflow(nullptr); michael@0: } michael@0: michael@0: js::types::TypeZone types; michael@0: michael@0: void sweep(js::FreeOp *fop, bool releaseTypes, bool *oom); michael@0: michael@0: bool hasMarkedCompartments(); michael@0: michael@0: private: michael@0: void sweepBreakpoints(js::FreeOp *fop); michael@0: michael@0: #ifdef JS_ION michael@0: js::jit::JitZone *jitZone_; michael@0: js::jit::JitZone *createJitZone(JSContext *cx); michael@0: michael@0: public: michael@0: js::jit::JitZone *getJitZone(JSContext *cx) { michael@0: return jitZone_ ? jitZone_ : createJitZone(cx); michael@0: } michael@0: js::jit::JitZone *jitZone() { michael@0: return jitZone_; michael@0: } michael@0: #endif michael@0: }; michael@0: michael@0: } /* namespace JS */ michael@0: michael@0: namespace js { michael@0: michael@0: /* michael@0: * Using the atoms zone without holding the exclusive access lock is dangerous michael@0: * because worker threads may be using it simultaneously. Therefore, it's michael@0: * better to skip the atoms zone when iterating over zones. If you need to michael@0: * iterate over the atoms zone, consider taking the exclusive access lock first. michael@0: */ michael@0: enum ZoneSelector { michael@0: WithAtoms, michael@0: SkipAtoms michael@0: }; michael@0: michael@0: class ZonesIter { michael@0: private: michael@0: JS::Zone **it, **end; michael@0: michael@0: public: michael@0: ZonesIter(JSRuntime *rt, ZoneSelector selector) { michael@0: it = rt->zones.begin(); michael@0: end = rt->zones.end(); michael@0: michael@0: if (selector == SkipAtoms) { michael@0: JS_ASSERT(rt->isAtomsZone(*it)); michael@0: it++; michael@0: } michael@0: } michael@0: michael@0: bool done() const { return it == end; } michael@0: michael@0: void next() { michael@0: JS_ASSERT(!done()); michael@0: do { michael@0: it++; michael@0: } while (!done() && (*it)->usedByExclusiveThread); michael@0: } michael@0: michael@0: JS::Zone *get() const { michael@0: JS_ASSERT(!done()); michael@0: return *it; michael@0: } michael@0: michael@0: operator JS::Zone *() const { return get(); } michael@0: JS::Zone *operator->() const { return get(); } michael@0: }; michael@0: michael@0: struct CompartmentsInZoneIter michael@0: { michael@0: // This is for the benefit of CompartmentsIterT::comp. michael@0: friend class mozilla::Maybe; michael@0: private: michael@0: JSCompartment **it, **end; michael@0: michael@0: CompartmentsInZoneIter() michael@0: : it(nullptr), end(nullptr) michael@0: {} michael@0: michael@0: public: michael@0: explicit CompartmentsInZoneIter(JS::Zone *zone) { michael@0: it = zone->compartments.begin(); michael@0: end = zone->compartments.end(); michael@0: } michael@0: michael@0: bool done() const { michael@0: JS_ASSERT(it); michael@0: return it == end; michael@0: } michael@0: void next() { michael@0: JS_ASSERT(!done()); michael@0: it++; michael@0: } michael@0: michael@0: JSCompartment *get() const { michael@0: JS_ASSERT(it); michael@0: return *it; michael@0: } michael@0: michael@0: operator JSCompartment *() const { return get(); } michael@0: JSCompartment *operator->() const { return get(); } michael@0: }; michael@0: michael@0: /* michael@0: * This iterator iterates over all the compartments in a given set of zones. The michael@0: * set of zones is determined by iterating ZoneIterT. michael@0: */ michael@0: template michael@0: class CompartmentsIterT michael@0: { michael@0: private: michael@0: ZonesIterT zone; michael@0: mozilla::Maybe comp; michael@0: michael@0: public: michael@0: explicit CompartmentsIterT(JSRuntime *rt) michael@0: : zone(rt) michael@0: { michael@0: if (zone.done()) michael@0: comp.construct(); michael@0: else michael@0: comp.construct(zone); michael@0: } michael@0: michael@0: CompartmentsIterT(JSRuntime *rt, ZoneSelector selector) michael@0: : zone(rt, selector) michael@0: { michael@0: if (zone.done()) michael@0: comp.construct(); michael@0: else michael@0: comp.construct(zone); michael@0: } michael@0: michael@0: bool done() const { return zone.done(); } michael@0: michael@0: void next() { michael@0: JS_ASSERT(!done()); michael@0: JS_ASSERT(!comp.ref().done()); michael@0: comp.ref().next(); michael@0: if (comp.ref().done()) { michael@0: comp.destroy(); michael@0: zone.next(); michael@0: if (!zone.done()) michael@0: comp.construct(zone); michael@0: } michael@0: } michael@0: michael@0: JSCompartment *get() const { michael@0: JS_ASSERT(!done()); michael@0: return comp.ref(); michael@0: } michael@0: michael@0: operator JSCompartment *() const { return get(); } michael@0: JSCompartment *operator->() const { return get(); } michael@0: }; michael@0: michael@0: typedef CompartmentsIterT CompartmentsIter; michael@0: michael@0: } /* namespace js */ michael@0: michael@0: #endif /* gc_Zone_h */