michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ michael@0: /* vim: set ts=8 sts=4 et sw=4 tw=80: */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: // michael@0: // This file implements a garbage-cycle collector based on the paper michael@0: // michael@0: // Concurrent Cycle Collection in Reference Counted Systems michael@0: // Bacon & Rajan (2001), ECOOP 2001 / Springer LNCS vol 2072 michael@0: // michael@0: // We are not using the concurrent or acyclic cases of that paper; so michael@0: // the green, red and orange colors are not used. michael@0: // michael@0: // The collector is based on tracking pointers of four colors: michael@0: // michael@0: // Black nodes are definitely live. If we ever determine a node is michael@0: // black, it's ok to forget about, drop from our records. michael@0: // michael@0: // White nodes are definitely garbage cycles. Once we finish with our michael@0: // scanning, we unlink all the white nodes and expect that by michael@0: // unlinking them they will self-destruct (since a garbage cycle is michael@0: // only keeping itself alive with internal links, by definition). michael@0: // michael@0: // Snow-white is an addition to the original algorithm. Snow-white object michael@0: // has reference count zero and is just waiting for deletion. michael@0: // michael@0: // Grey nodes are being scanned. Nodes that turn grey will turn michael@0: // either black if we determine that they're live, or white if we michael@0: // determine that they're a garbage cycle. After the main collection michael@0: // algorithm there should be no grey nodes. michael@0: // michael@0: // Purple nodes are *candidates* for being scanned. They are nodes we michael@0: // haven't begun scanning yet because they're not old enough, or we're michael@0: // still partway through the algorithm. michael@0: // michael@0: // XPCOM objects participating in garbage-cycle collection are obliged michael@0: // to inform us when they ought to turn purple; that is, when their michael@0: // refcount transitions from N+1 -> N, for nonzero N. Furthermore we michael@0: // require that *after* an XPCOM object has informed us of turning michael@0: // purple, they will tell us when they either transition back to being michael@0: // black (incremented refcount) or are ultimately deleted. michael@0: michael@0: // Incremental cycle collection michael@0: // michael@0: // Beyond the simple state machine required to implement incremental michael@0: // collection, the CC needs to be able to compensate for things the browser michael@0: // is doing during the collection. There are two kinds of problems. For each michael@0: // of these, there are two cases to deal with: purple-buffered C++ objects michael@0: // and JS objects. michael@0: michael@0: // The first problem is that an object in the CC's graph can become garbage. michael@0: // This is bad because the CC touches the objects in its graph at every michael@0: // stage of its operation. michael@0: // michael@0: // All cycle collected C++ objects that die during a cycle collection michael@0: // will end up actually getting deleted by the SnowWhiteKiller. Before michael@0: // the SWK deletes an object, it checks if an ICC is running, and if so, michael@0: // if the object is in the graph. If it is, the CC clears mPointer and michael@0: // mParticipant so it does not point to the raw object any more. Because michael@0: // objects could die any time the CC returns to the mutator, any time the CC michael@0: // accesses a PtrInfo it must perform a null check on mParticipant to michael@0: // ensure the object has not gone away. michael@0: // michael@0: // JS objects don't always run finalizers, so the CC can't remove them from michael@0: // the graph when they die. Fortunately, JS objects can only die during a GC, michael@0: // so if a GC is begun during an ICC, the browser synchronously finishes off michael@0: // the ICC, which clears the entire CC graph. If the GC and CC are scheduled michael@0: // properly, this should be rare. michael@0: // michael@0: // The second problem is that objects in the graph can be changed, say by michael@0: // being addrefed or released, or by having a field updated, after the object michael@0: // has been added to the graph. The problem is that ICC can miss a newly michael@0: // created reference to an object, and end up unlinking an object that is michael@0: // actually alive. michael@0: // michael@0: // The basic idea of the solution, from "An on-the-fly Reference Counting michael@0: // Garbage Collector for Java" by Levanoni and Petrank, is to notice if an michael@0: // object has had an additional reference to it created during the collection, michael@0: // and if so, don't collect it during the current collection. This avoids having michael@0: // to rerun the scan as in Bacon & Rajan 2001. michael@0: // michael@0: // For cycle collected C++ objects, we modify AddRef to place the object in michael@0: // the purple buffer, in addition to Release. Then, in the CC, we treat any michael@0: // objects in the purple buffer as being alive, after graph building has michael@0: // completed. Because they are in the purple buffer, they will be suspected michael@0: // in the next CC, so there's no danger of leaks. This is imprecise, because michael@0: // we will treat as live an object that has been Released but not AddRefed michael@0: // during graph building, but that's probably rare enough that the additional michael@0: // bookkeeping overhead is not worthwhile. michael@0: // michael@0: // For JS objects, the cycle collector is only looking at gray objects. If a michael@0: // gray object is touched during ICC, it will be made black by UnmarkGray. michael@0: // Thus, if a JS object has become black during the ICC, we treat it as live. michael@0: // Merged JS zones have to be handled specially: we scan all zone globals. michael@0: // If any are black, we treat the zone as being black. michael@0: michael@0: michael@0: // Safety michael@0: // michael@0: // An XPCOM object is either scan-safe or scan-unsafe, purple-safe or michael@0: // purple-unsafe. michael@0: // michael@0: // An nsISupports object is scan-safe if: michael@0: // michael@0: // - It can be QI'ed to |nsXPCOMCycleCollectionParticipant|, though michael@0: // this operation loses ISupports identity (like nsIClassInfo). michael@0: // - Additionally, the operation |traverse| on the resulting michael@0: // nsXPCOMCycleCollectionParticipant does not cause *any* refcount michael@0: // adjustment to occur (no AddRef / Release calls). michael@0: // michael@0: // A non-nsISupports ("native") object is scan-safe by explicitly michael@0: // providing its nsCycleCollectionParticipant. michael@0: // michael@0: // An object is purple-safe if it satisfies the following properties: michael@0: // michael@0: // - The object is scan-safe. michael@0: // michael@0: // When we receive a pointer |ptr| via michael@0: // |nsCycleCollector::suspect(ptr)|, we assume it is purple-safe. We michael@0: // can check the scan-safety, but have no way to ensure the michael@0: // purple-safety; objects must obey, or else the entire system falls michael@0: // apart. Don't involve an object in this scheme if you can't michael@0: // guarantee its purple-safety. The easiest way to ensure that an michael@0: // object is purple-safe is to use nsCycleCollectingAutoRefCnt. michael@0: // michael@0: // When we have a scannable set of purple nodes ready, we begin michael@0: // our walks. During the walks, the nodes we |traverse| should only michael@0: // feed us more scan-safe nodes, and should not adjust the refcounts michael@0: // of those nodes. michael@0: // michael@0: // We do not |AddRef| or |Release| any objects during scanning. We michael@0: // rely on the purple-safety of the roots that call |suspect| to michael@0: // hold, such that we will clear the pointer from the purple buffer michael@0: // entry to the object before it is destroyed. The pointers that are michael@0: // merely scan-safe we hold only for the duration of scanning, and michael@0: // there should be no objects released from the scan-safe set during michael@0: // the scan. michael@0: // michael@0: // We *do* call |Root| and |Unroot| on every white object, on michael@0: // either side of the calls to |Unlink|. This keeps the set of white michael@0: // objects alive during the unlinking. michael@0: // michael@0: michael@0: #if !defined(__MINGW32__) michael@0: #ifdef WIN32 michael@0: #include michael@0: #include michael@0: #endif michael@0: #endif michael@0: michael@0: #include "base/process_util.h" michael@0: michael@0: #include "mozilla/ArrayUtils.h" michael@0: #include "mozilla/AutoRestore.h" michael@0: #include "mozilla/CycleCollectedJSRuntime.h" michael@0: #include "mozilla/HoldDropJSObjects.h" michael@0: /* This must occur *after* base/process_util.h to avoid typedefs conflicts. */ michael@0: #include "mozilla/MemoryReporting.h" michael@0: #include "mozilla/LinkedList.h" michael@0: michael@0: #include "nsCycleCollectionParticipant.h" michael@0: #include "nsCycleCollectionNoteRootCallback.h" michael@0: #include "nsDeque.h" michael@0: #include "nsCycleCollector.h" michael@0: #include "nsThreadUtils.h" michael@0: #include "nsXULAppAPI.h" michael@0: #include "prenv.h" michael@0: #include "nsPrintfCString.h" michael@0: #include "nsTArray.h" michael@0: #include "nsIConsoleService.h" michael@0: #include "mozilla/Attributes.h" michael@0: #include "nsICycleCollectorListener.h" michael@0: #include "nsIMemoryReporter.h" michael@0: #include "nsIFile.h" michael@0: #include "nsDumpUtils.h" michael@0: #include "xpcpublic.h" michael@0: #include "GeckoProfiler.h" michael@0: #include "js/SliceBudget.h" michael@0: #include michael@0: #include michael@0: michael@0: #include "mozilla/Likely.h" michael@0: #include "mozilla/PoisonIOInterposer.h" michael@0: #include "mozilla/Telemetry.h" michael@0: #include "mozilla/ThreadLocal.h" michael@0: michael@0: using namespace mozilla; michael@0: michael@0: //#define COLLECT_TIME_DEBUG michael@0: michael@0: // Enable assertions that are useful for diagnosing errors in graph construction. michael@0: //#define DEBUG_CC_GRAPH michael@0: michael@0: #define DEFAULT_SHUTDOWN_COLLECTIONS 5 michael@0: michael@0: // One to do the freeing, then another to detect there is no more work to do. michael@0: #define NORMAL_SHUTDOWN_COLLECTIONS 2 michael@0: michael@0: // Cycle collector environment variables michael@0: // michael@0: // MOZ_CC_LOG_ALL: If defined, always log cycle collector heaps. michael@0: // michael@0: // MOZ_CC_LOG_SHUTDOWN: If defined, log cycle collector heaps at shutdown. michael@0: // michael@0: // MOZ_CC_LOG_THREAD: If set to "main", only automatically log main thread michael@0: // CCs. If set to "worker", only automatically log worker CCs. If set to "all", michael@0: // log either. The default value is "all". This must be used with either michael@0: // MOZ_CC_LOG_ALL or MOZ_CC_LOG_SHUTDOWN for it to do anything. michael@0: // michael@0: // MOZ_CC_LOG_PROCESS: If set to "main", only automatically log main process michael@0: // CCs. If set to "content", only automatically log tab CCs. If set to michael@0: // "plugins", only automatically log plugin CCs. If set to "all", log michael@0: // everything. The default value is "all". This must be used with either michael@0: // MOZ_CC_LOG_ALL or MOZ_CC_LOG_SHUTDOWN for it to do anything. michael@0: // michael@0: // MOZ_CC_ALL_TRACES: If set to "all", any cycle collector michael@0: // logging done will be WantAllTraces, which disables michael@0: // various cycle collector optimizations to give a fuller picture of michael@0: // the heap. If set to "shutdown", only shutdown logging will be WantAllTraces. michael@0: // The default is none. michael@0: // michael@0: // MOZ_CC_RUN_DURING_SHUTDOWN: In non-DEBUG or builds, if this is set, michael@0: // run cycle collections at shutdown. michael@0: // michael@0: // MOZ_CC_LOG_DIRECTORY: The directory in which logs are placed (such as michael@0: // logs from MOZ_CC_LOG_ALL and MOZ_CC_LOG_SHUTDOWN, or other uses michael@0: // of nsICycleCollectorListener) michael@0: michael@0: // Various parameters of this collector can be tuned using environment michael@0: // variables. michael@0: michael@0: struct nsCycleCollectorParams michael@0: { michael@0: bool mLogAll; michael@0: bool mLogShutdown; michael@0: bool mAllTracesAll; michael@0: bool mAllTracesShutdown; michael@0: bool mLogThisThread; michael@0: michael@0: nsCycleCollectorParams() : michael@0: mLogAll (PR_GetEnv("MOZ_CC_LOG_ALL") != nullptr), michael@0: mLogShutdown (PR_GetEnv("MOZ_CC_LOG_SHUTDOWN") != nullptr), michael@0: mAllTracesAll(false), michael@0: mAllTracesShutdown(false) michael@0: { michael@0: const char* logThreadEnv = PR_GetEnv("MOZ_CC_LOG_THREAD"); michael@0: bool threadLogging = true; michael@0: if (logThreadEnv && !!strcmp(logThreadEnv, "all")) { michael@0: if (NS_IsMainThread()) { michael@0: threadLogging = !strcmp(logThreadEnv, "main"); michael@0: } else { michael@0: threadLogging = !strcmp(logThreadEnv, "worker"); michael@0: } michael@0: } michael@0: michael@0: const char* logProcessEnv = PR_GetEnv("MOZ_CC_LOG_PROCESS"); michael@0: bool processLogging = true; michael@0: if (logProcessEnv && !!strcmp(logProcessEnv, "all")) { michael@0: switch (XRE_GetProcessType()) { michael@0: case GeckoProcessType_Default: michael@0: processLogging = !strcmp(logProcessEnv, "main"); michael@0: break; michael@0: case GeckoProcessType_Plugin: michael@0: processLogging = !strcmp(logProcessEnv, "plugins"); michael@0: break; michael@0: case GeckoProcessType_Content: michael@0: processLogging = !strcmp(logProcessEnv, "content"); michael@0: break; michael@0: default: michael@0: processLogging = false; michael@0: break; michael@0: } michael@0: } michael@0: mLogThisThread = threadLogging && processLogging; michael@0: michael@0: const char* allTracesEnv = PR_GetEnv("MOZ_CC_ALL_TRACES"); michael@0: if (allTracesEnv) { michael@0: if (!strcmp(allTracesEnv, "all")) { michael@0: mAllTracesAll = true; michael@0: } else if (!strcmp(allTracesEnv, "shutdown")) { michael@0: mAllTracesShutdown = true; michael@0: } michael@0: } michael@0: } michael@0: michael@0: bool LogThisCC(bool aIsShutdown) michael@0: { michael@0: return (mLogAll || (aIsShutdown && mLogShutdown)) && mLogThisThread; michael@0: } michael@0: michael@0: bool AllTracesThisCC(bool aIsShutdown) michael@0: { michael@0: return mAllTracesAll || (aIsShutdown && mAllTracesShutdown); michael@0: } michael@0: }; michael@0: michael@0: #ifdef COLLECT_TIME_DEBUG michael@0: class TimeLog michael@0: { michael@0: public: michael@0: TimeLog() : mLastCheckpoint(TimeStamp::Now()) {} michael@0: michael@0: void michael@0: Checkpoint(const char* aEvent) michael@0: { michael@0: TimeStamp now = TimeStamp::Now(); michael@0: uint32_t dur = (uint32_t) ((now - mLastCheckpoint).ToMilliseconds()); michael@0: if (dur > 0) { michael@0: printf("cc: %s took %dms\n", aEvent, dur); michael@0: } michael@0: mLastCheckpoint = now; michael@0: } michael@0: michael@0: private: michael@0: TimeStamp mLastCheckpoint; michael@0: }; michael@0: #else michael@0: class TimeLog michael@0: { michael@0: public: michael@0: TimeLog() {} michael@0: void Checkpoint(const char* aEvent) {} michael@0: }; michael@0: #endif michael@0: michael@0: michael@0: //////////////////////////////////////////////////////////////////////// michael@0: // Base types michael@0: //////////////////////////////////////////////////////////////////////// michael@0: michael@0: struct PtrInfo; michael@0: michael@0: class EdgePool michael@0: { michael@0: public: michael@0: // EdgePool allocates arrays of void*, primarily to hold PtrInfo*. michael@0: // However, at the end of a block, the last two pointers are a null michael@0: // and then a void** pointing to the next block. This allows michael@0: // EdgePool::Iterators to be a single word but still capable of crossing michael@0: // block boundaries. michael@0: michael@0: EdgePool() michael@0: { michael@0: mSentinelAndBlocks[0].block = nullptr; michael@0: mSentinelAndBlocks[1].block = nullptr; michael@0: } michael@0: michael@0: ~EdgePool() michael@0: { michael@0: MOZ_ASSERT(!mSentinelAndBlocks[0].block && michael@0: !mSentinelAndBlocks[1].block, michael@0: "Didn't call Clear()?"); michael@0: } michael@0: michael@0: void Clear() michael@0: { michael@0: Block *b = Blocks(); michael@0: while (b) { michael@0: Block *next = b->Next(); michael@0: delete b; michael@0: b = next; michael@0: } michael@0: michael@0: mSentinelAndBlocks[0].block = nullptr; michael@0: mSentinelAndBlocks[1].block = nullptr; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: bool IsEmpty() michael@0: { michael@0: return !mSentinelAndBlocks[0].block && michael@0: !mSentinelAndBlocks[1].block; michael@0: } michael@0: #endif michael@0: michael@0: private: michael@0: struct Block; michael@0: union PtrInfoOrBlock { michael@0: // Use a union to avoid reinterpret_cast and the ensuing michael@0: // potential aliasing bugs. michael@0: PtrInfo *ptrInfo; michael@0: Block *block; michael@0: }; michael@0: struct Block { michael@0: enum { BlockSize = 16 * 1024 }; michael@0: michael@0: PtrInfoOrBlock mPointers[BlockSize]; michael@0: Block() { michael@0: mPointers[BlockSize - 2].block = nullptr; // sentinel michael@0: mPointers[BlockSize - 1].block = nullptr; // next block pointer michael@0: } michael@0: Block*& Next() { return mPointers[BlockSize - 1].block; } michael@0: PtrInfoOrBlock* Start() { return &mPointers[0]; } michael@0: PtrInfoOrBlock* End() { return &mPointers[BlockSize - 2]; } michael@0: }; michael@0: michael@0: // Store the null sentinel so that we can have valid iterators michael@0: // before adding any edges and without adding any blocks. michael@0: PtrInfoOrBlock mSentinelAndBlocks[2]; michael@0: michael@0: Block*& Blocks() { return mSentinelAndBlocks[1].block; } michael@0: Block* Blocks() const { return mSentinelAndBlocks[1].block; } michael@0: michael@0: public: michael@0: class Iterator michael@0: { michael@0: public: michael@0: Iterator() : mPointer(nullptr) {} michael@0: Iterator(PtrInfoOrBlock *aPointer) : mPointer(aPointer) {} michael@0: Iterator(const Iterator& aOther) : mPointer(aOther.mPointer) {} michael@0: michael@0: Iterator& operator++() michael@0: { michael@0: if (mPointer->ptrInfo == nullptr) { michael@0: // Null pointer is a sentinel for link to the next block. michael@0: mPointer = (mPointer + 1)->block->mPointers; michael@0: } michael@0: ++mPointer; michael@0: return *this; michael@0: } michael@0: michael@0: PtrInfo* operator*() const michael@0: { michael@0: if (mPointer->ptrInfo == nullptr) { michael@0: // Null pointer is a sentinel for link to the next block. michael@0: return (mPointer + 1)->block->mPointers->ptrInfo; michael@0: } michael@0: return mPointer->ptrInfo; michael@0: } michael@0: bool operator==(const Iterator& aOther) const michael@0: { return mPointer == aOther.mPointer; } michael@0: bool operator!=(const Iterator& aOther) const michael@0: { return mPointer != aOther.mPointer; } michael@0: michael@0: #ifdef DEBUG_CC_GRAPH michael@0: bool Initialized() const michael@0: { michael@0: return mPointer != nullptr; michael@0: } michael@0: #endif michael@0: michael@0: private: michael@0: PtrInfoOrBlock *mPointer; michael@0: }; michael@0: michael@0: class Builder; michael@0: friend class Builder; michael@0: class Builder { michael@0: public: michael@0: Builder(EdgePool &aPool) michael@0: : mCurrent(&aPool.mSentinelAndBlocks[0]), michael@0: mBlockEnd(&aPool.mSentinelAndBlocks[0]), michael@0: mNextBlockPtr(&aPool.Blocks()) michael@0: { michael@0: } michael@0: michael@0: Iterator Mark() { return Iterator(mCurrent); } michael@0: michael@0: void Add(PtrInfo* aEdge) { michael@0: if (mCurrent == mBlockEnd) { michael@0: Block *b = new Block(); michael@0: *mNextBlockPtr = b; michael@0: mCurrent = b->Start(); michael@0: mBlockEnd = b->End(); michael@0: mNextBlockPtr = &b->Next(); michael@0: } michael@0: (mCurrent++)->ptrInfo = aEdge; michael@0: } michael@0: private: michael@0: // mBlockEnd points to space for null sentinel michael@0: PtrInfoOrBlock *mCurrent, *mBlockEnd; michael@0: Block **mNextBlockPtr; michael@0: }; michael@0: michael@0: size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { michael@0: size_t n = 0; michael@0: Block *b = Blocks(); michael@0: while (b) { michael@0: n += aMallocSizeOf(b); michael@0: b = b->Next(); michael@0: } michael@0: return n; michael@0: } michael@0: }; michael@0: michael@0: #ifdef DEBUG_CC_GRAPH michael@0: #define CC_GRAPH_ASSERT(b) MOZ_ASSERT(b) michael@0: #else michael@0: #define CC_GRAPH_ASSERT(b) michael@0: #endif michael@0: michael@0: #define CC_TELEMETRY(_name, _value) \ michael@0: PR_BEGIN_MACRO \ michael@0: if (NS_IsMainThread()) { \ michael@0: Telemetry::Accumulate(Telemetry::CYCLE_COLLECTOR##_name, _value); \ michael@0: } else { \ michael@0: Telemetry::Accumulate(Telemetry::CYCLE_COLLECTOR_WORKER##_name, _value); \ michael@0: } \ michael@0: PR_END_MACRO michael@0: michael@0: enum NodeColor { black, white, grey }; michael@0: michael@0: // This structure should be kept as small as possible; we may expect michael@0: // hundreds of thousands of them to be allocated and touched michael@0: // repeatedly during each cycle collection. michael@0: michael@0: struct PtrInfo michael@0: { michael@0: void *mPointer; michael@0: nsCycleCollectionParticipant *mParticipant; michael@0: uint32_t mColor : 2; michael@0: uint32_t mInternalRefs : 30; michael@0: uint32_t mRefCount; michael@0: private: michael@0: EdgePool::Iterator mFirstChild; michael@0: michael@0: public: michael@0: michael@0: PtrInfo(void *aPointer, nsCycleCollectionParticipant *aParticipant) michael@0: : mPointer(aPointer), michael@0: mParticipant(aParticipant), michael@0: mColor(grey), michael@0: mInternalRefs(0), michael@0: mRefCount(UINT32_MAX - 1), michael@0: mFirstChild() michael@0: { michael@0: // We initialize mRefCount to a large non-zero value so michael@0: // that it doesn't look like a JS object to the cycle collector michael@0: // in the case where the object dies before being traversed. michael@0: michael@0: MOZ_ASSERT(aParticipant); michael@0: } michael@0: michael@0: // Allow NodePool::Block's constructor to compile. michael@0: PtrInfo() { michael@0: NS_NOTREACHED("should never be called"); michael@0: } michael@0: michael@0: EdgePool::Iterator FirstChild() michael@0: { michael@0: CC_GRAPH_ASSERT(mFirstChild.Initialized()); michael@0: return mFirstChild; michael@0: } michael@0: michael@0: // this PtrInfo must be part of a NodePool michael@0: EdgePool::Iterator LastChild() michael@0: { michael@0: CC_GRAPH_ASSERT((this + 1)->mFirstChild.Initialized()); michael@0: return (this + 1)->mFirstChild; michael@0: } michael@0: michael@0: void SetFirstChild(EdgePool::Iterator aFirstChild) michael@0: { michael@0: CC_GRAPH_ASSERT(aFirstChild.Initialized()); michael@0: mFirstChild = aFirstChild; michael@0: } michael@0: michael@0: // this PtrInfo must be part of a NodePool michael@0: void SetLastChild(EdgePool::Iterator aLastChild) michael@0: { michael@0: CC_GRAPH_ASSERT(aLastChild.Initialized()); michael@0: (this + 1)->mFirstChild = aLastChild; michael@0: } michael@0: }; michael@0: michael@0: /** michael@0: * A structure designed to be used like a linked list of PtrInfo, except michael@0: * that allocates the PtrInfo 32K-at-a-time. michael@0: */ michael@0: class NodePool michael@0: { michael@0: private: michael@0: enum { BlockSize = 8 * 1024 }; // could be int template parameter michael@0: michael@0: struct Block { michael@0: // We create and destroy Block using NS_Alloc/NS_Free rather michael@0: // than new and delete to avoid calling its constructor and michael@0: // destructor. michael@0: Block() { NS_NOTREACHED("should never be called"); } michael@0: ~Block() { NS_NOTREACHED("should never be called"); } michael@0: michael@0: Block* mNext; michael@0: PtrInfo mEntries[BlockSize + 1]; // +1 to store last child of last node michael@0: }; michael@0: michael@0: public: michael@0: NodePool() michael@0: : mBlocks(nullptr), michael@0: mLast(nullptr) michael@0: { michael@0: } michael@0: michael@0: ~NodePool() michael@0: { michael@0: MOZ_ASSERT(!mBlocks, "Didn't call Clear()?"); michael@0: } michael@0: michael@0: void Clear() michael@0: { michael@0: Block *b = mBlocks; michael@0: while (b) { michael@0: Block *n = b->mNext; michael@0: NS_Free(b); michael@0: b = n; michael@0: } michael@0: michael@0: mBlocks = nullptr; michael@0: mLast = nullptr; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: bool IsEmpty() michael@0: { michael@0: return !mBlocks && !mLast; michael@0: } michael@0: #endif michael@0: michael@0: class Builder; michael@0: friend class Builder; michael@0: class Builder { michael@0: public: michael@0: Builder(NodePool& aPool) michael@0: : mNextBlock(&aPool.mBlocks), michael@0: mNext(aPool.mLast), michael@0: mBlockEnd(nullptr) michael@0: { michael@0: MOZ_ASSERT(aPool.mBlocks == nullptr && aPool.mLast == nullptr, michael@0: "pool not empty"); michael@0: } michael@0: PtrInfo *Add(void *aPointer, nsCycleCollectionParticipant *aParticipant) michael@0: { michael@0: if (mNext == mBlockEnd) { michael@0: Block *block = static_cast(NS_Alloc(sizeof(Block))); michael@0: *mNextBlock = block; michael@0: mNext = block->mEntries; michael@0: mBlockEnd = block->mEntries + BlockSize; michael@0: block->mNext = nullptr; michael@0: mNextBlock = &block->mNext; michael@0: } michael@0: return new (mNext++) PtrInfo(aPointer, aParticipant); michael@0: } michael@0: private: michael@0: Block **mNextBlock; michael@0: PtrInfo *&mNext; michael@0: PtrInfo *mBlockEnd; michael@0: }; michael@0: michael@0: class Enumerator; michael@0: friend class Enumerator; michael@0: class Enumerator { michael@0: public: michael@0: Enumerator(NodePool& aPool) michael@0: : mFirstBlock(aPool.mBlocks), michael@0: mCurBlock(nullptr), michael@0: mNext(nullptr), michael@0: mBlockEnd(nullptr), michael@0: mLast(aPool.mLast) michael@0: { michael@0: } michael@0: michael@0: bool IsDone() const michael@0: { michael@0: return mNext == mLast; michael@0: } michael@0: michael@0: bool AtBlockEnd() const michael@0: { michael@0: return mNext == mBlockEnd; michael@0: } michael@0: michael@0: PtrInfo* GetNext() michael@0: { michael@0: MOZ_ASSERT(!IsDone(), "calling GetNext when done"); michael@0: if (mNext == mBlockEnd) { michael@0: Block *nextBlock = mCurBlock ? mCurBlock->mNext : mFirstBlock; michael@0: mNext = nextBlock->mEntries; michael@0: mBlockEnd = mNext + BlockSize; michael@0: mCurBlock = nextBlock; michael@0: } michael@0: return mNext++; michael@0: } michael@0: private: michael@0: // mFirstBlock is a reference to allow an Enumerator to be constructed michael@0: // for an empty graph. michael@0: Block *&mFirstBlock; michael@0: Block *mCurBlock; michael@0: // mNext is the next value we want to return, unless mNext == mBlockEnd michael@0: // NB: mLast is a reference to allow enumerating while building! michael@0: PtrInfo *mNext, *mBlockEnd, *&mLast; michael@0: }; michael@0: michael@0: size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { michael@0: // We don't measure the things pointed to by mEntries[] because those michael@0: // pointers are non-owning. michael@0: size_t n = 0; michael@0: Block *b = mBlocks; michael@0: while (b) { michael@0: n += aMallocSizeOf(b); michael@0: b = b->mNext; michael@0: } michael@0: return n; michael@0: } michael@0: michael@0: private: michael@0: Block *mBlocks; michael@0: PtrInfo *mLast; michael@0: }; michael@0: michael@0: michael@0: // Declarations for mPtrToNodeMap. michael@0: michael@0: struct PtrToNodeEntry : public PLDHashEntryHdr michael@0: { michael@0: // The key is mNode->mPointer michael@0: PtrInfo *mNode; michael@0: }; michael@0: michael@0: static bool michael@0: PtrToNodeMatchEntry(PLDHashTable *table, michael@0: const PLDHashEntryHdr *entry, michael@0: const void *key) michael@0: { michael@0: const PtrToNodeEntry *n = static_cast(entry); michael@0: return n->mNode->mPointer == key; michael@0: } michael@0: michael@0: static PLDHashTableOps PtrNodeOps = { michael@0: PL_DHashAllocTable, michael@0: PL_DHashFreeTable, michael@0: PL_DHashVoidPtrKeyStub, michael@0: PtrToNodeMatchEntry, michael@0: PL_DHashMoveEntryStub, michael@0: PL_DHashClearEntryStub, michael@0: PL_DHashFinalizeStub, michael@0: nullptr michael@0: }; michael@0: michael@0: michael@0: struct WeakMapping michael@0: { michael@0: // map and key will be null if the corresponding objects are GC marked michael@0: PtrInfo *mMap; michael@0: PtrInfo *mKey; michael@0: PtrInfo *mKeyDelegate; michael@0: PtrInfo *mVal; michael@0: }; michael@0: michael@0: class GCGraphBuilder; michael@0: michael@0: struct GCGraph michael@0: { michael@0: NodePool mNodes; michael@0: EdgePool mEdges; michael@0: nsTArray mWeakMaps; michael@0: uint32_t mRootCount; michael@0: michael@0: private: michael@0: PLDHashTable mPtrToNodeMap; michael@0: michael@0: public: michael@0: GCGraph() : mRootCount(0) michael@0: { michael@0: mPtrToNodeMap.ops = nullptr; michael@0: } michael@0: michael@0: ~GCGraph() michael@0: { michael@0: if (mPtrToNodeMap.ops) { michael@0: PL_DHashTableFinish(&mPtrToNodeMap); michael@0: } michael@0: } michael@0: michael@0: void Init() michael@0: { michael@0: MOZ_ASSERT(IsEmpty(), "Failed to call GCGraph::Clear"); michael@0: PL_DHashTableInit(&mPtrToNodeMap, &PtrNodeOps, nullptr, michael@0: sizeof(PtrToNodeEntry), 32768); michael@0: } michael@0: michael@0: void Clear() michael@0: { michael@0: mNodes.Clear(); michael@0: mEdges.Clear(); michael@0: mWeakMaps.Clear(); michael@0: mRootCount = 0; michael@0: PL_DHashTableFinish(&mPtrToNodeMap); michael@0: mPtrToNodeMap.ops = nullptr; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: bool IsEmpty() michael@0: { michael@0: return mNodes.IsEmpty() && mEdges.IsEmpty() && michael@0: mWeakMaps.IsEmpty() && mRootCount == 0 && michael@0: !mPtrToNodeMap.ops; michael@0: } michael@0: #endif michael@0: michael@0: PtrInfo* FindNode(void *aPtr); michael@0: PtrToNodeEntry* AddNodeToMap(void *aPtr); michael@0: void RemoveNodeFromMap(void *aPtr); michael@0: michael@0: uint32_t MapCount() const michael@0: { michael@0: return mPtrToNodeMap.entryCount; michael@0: } michael@0: michael@0: void SizeOfExcludingThis(MallocSizeOf aMallocSizeOf, michael@0: size_t *aNodesSize, size_t *aEdgesSize, michael@0: size_t *aWeakMapsSize) const { michael@0: *aNodesSize = mNodes.SizeOfExcludingThis(aMallocSizeOf); michael@0: *aEdgesSize = mEdges.SizeOfExcludingThis(aMallocSizeOf); michael@0: michael@0: // We don't measure what the WeakMappings point to, because the michael@0: // pointers are non-owning. michael@0: *aWeakMapsSize = mWeakMaps.SizeOfExcludingThis(aMallocSizeOf); michael@0: } michael@0: }; michael@0: michael@0: PtrInfo* michael@0: GCGraph::FindNode(void *aPtr) michael@0: { michael@0: PtrToNodeEntry *e = static_cast(PL_DHashTableOperate(&mPtrToNodeMap, aPtr, PL_DHASH_LOOKUP)); michael@0: if (!PL_DHASH_ENTRY_IS_BUSY(e)) { michael@0: return nullptr; michael@0: } michael@0: return e->mNode; michael@0: } michael@0: michael@0: PtrToNodeEntry* michael@0: GCGraph::AddNodeToMap(void *aPtr) michael@0: { michael@0: PtrToNodeEntry *e = static_cast(PL_DHashTableOperate(&mPtrToNodeMap, aPtr, PL_DHASH_ADD)); michael@0: if (!e) { michael@0: // Caller should track OOMs michael@0: return nullptr; michael@0: } michael@0: return e; michael@0: } michael@0: michael@0: void michael@0: GCGraph::RemoveNodeFromMap(void *aPtr) michael@0: { michael@0: PL_DHashTableOperate(&mPtrToNodeMap, aPtr, PL_DHASH_REMOVE); michael@0: } michael@0: michael@0: michael@0: static nsISupports * michael@0: CanonicalizeXPCOMParticipant(nsISupports *in) michael@0: { michael@0: nsISupports* out; michael@0: in->QueryInterface(NS_GET_IID(nsCycleCollectionISupports), michael@0: reinterpret_cast(&out)); michael@0: return out; michael@0: } michael@0: michael@0: static inline void michael@0: ToParticipant(nsISupports *s, nsXPCOMCycleCollectionParticipant **cp); michael@0: michael@0: static void michael@0: CanonicalizeParticipant(void **parti, nsCycleCollectionParticipant **cp) michael@0: { michael@0: // If the participant is null, this is an nsISupports participant, michael@0: // so we must QI to get the real participant. michael@0: michael@0: if (!*cp) { michael@0: nsISupports *nsparti = static_cast(*parti); michael@0: nsparti = CanonicalizeXPCOMParticipant(nsparti); michael@0: NS_ASSERTION(nsparti, michael@0: "Don't add objects that don't participate in collection!"); michael@0: nsXPCOMCycleCollectionParticipant *xcp; michael@0: ToParticipant(nsparti, &xcp); michael@0: *parti = nsparti; michael@0: *cp = xcp; michael@0: } michael@0: } michael@0: michael@0: struct nsPurpleBufferEntry { michael@0: union { michael@0: void *mObject; // when low bit unset michael@0: nsPurpleBufferEntry *mNextInFreeList; // when low bit set michael@0: }; michael@0: michael@0: nsCycleCollectingAutoRefCnt *mRefCnt; michael@0: michael@0: nsCycleCollectionParticipant *mParticipant; // nullptr for nsISupports michael@0: }; michael@0: michael@0: class nsCycleCollector; michael@0: michael@0: struct nsPurpleBuffer michael@0: { michael@0: private: michael@0: struct Block { michael@0: Block *mNext; michael@0: // Try to match the size of a jemalloc bucket, to minimize slop bytes. michael@0: // - On 32-bit platforms sizeof(nsPurpleBufferEntry) is 12, so mEntries michael@0: // is 16,380 bytes, which leaves 4 bytes for mNext. michael@0: // - On 64-bit platforms sizeof(nsPurpleBufferEntry) is 24, so mEntries michael@0: // is 32,544 bytes, which leaves 8 bytes for mNext. michael@0: nsPurpleBufferEntry mEntries[1365]; michael@0: michael@0: Block() : mNext(nullptr) { michael@0: // Ensure Block is the right size (see above). michael@0: static_assert( michael@0: sizeof(Block) == 16384 || // 32-bit michael@0: sizeof(Block) == 32768, // 64-bit michael@0: "ill-sized nsPurpleBuffer::Block" michael@0: ); michael@0: } michael@0: michael@0: template michael@0: void VisitEntries(nsPurpleBuffer &aBuffer, PurpleVisitor &aVisitor) michael@0: { michael@0: nsPurpleBufferEntry *eEnd = ArrayEnd(mEntries); michael@0: for (nsPurpleBufferEntry *e = mEntries; e != eEnd; ++e) { michael@0: if (!(uintptr_t(e->mObject) & uintptr_t(1))) { michael@0: aVisitor.Visit(aBuffer, e); michael@0: } michael@0: } michael@0: } michael@0: }; michael@0: // This class wraps a linked list of the elements in the purple michael@0: // buffer. michael@0: michael@0: uint32_t mCount; michael@0: Block mFirstBlock; michael@0: nsPurpleBufferEntry *mFreeList; michael@0: michael@0: public: michael@0: nsPurpleBuffer() michael@0: { michael@0: InitBlocks(); michael@0: } michael@0: michael@0: ~nsPurpleBuffer() michael@0: { michael@0: FreeBlocks(); michael@0: } michael@0: michael@0: template michael@0: void VisitEntries(PurpleVisitor &aVisitor) michael@0: { michael@0: for (Block *b = &mFirstBlock; b; b = b->mNext) { michael@0: b->VisitEntries(*this, aVisitor); michael@0: } michael@0: } michael@0: michael@0: void InitBlocks() michael@0: { michael@0: mCount = 0; michael@0: mFreeList = nullptr; michael@0: StartBlock(&mFirstBlock); michael@0: } michael@0: michael@0: void StartBlock(Block *aBlock) michael@0: { michael@0: NS_ABORT_IF_FALSE(!mFreeList, "should not have free list"); michael@0: michael@0: // Put all the entries in the block on the free list. michael@0: nsPurpleBufferEntry *entries = aBlock->mEntries; michael@0: mFreeList = entries; michael@0: for (uint32_t i = 1; i < ArrayLength(aBlock->mEntries); ++i) { michael@0: entries[i - 1].mNextInFreeList = michael@0: (nsPurpleBufferEntry*)(uintptr_t(entries + i) | 1); michael@0: } michael@0: entries[ArrayLength(aBlock->mEntries) - 1].mNextInFreeList = michael@0: (nsPurpleBufferEntry*)1; michael@0: } michael@0: michael@0: void FreeBlocks() michael@0: { michael@0: if (mCount > 0) michael@0: UnmarkRemainingPurple(&mFirstBlock); michael@0: Block *b = mFirstBlock.mNext; michael@0: while (b) { michael@0: if (mCount > 0) michael@0: UnmarkRemainingPurple(b); michael@0: Block *next = b->mNext; michael@0: delete b; michael@0: b = next; michael@0: } michael@0: mFirstBlock.mNext = nullptr; michael@0: } michael@0: michael@0: struct UnmarkRemainingPurpleVisitor michael@0: { michael@0: void michael@0: Visit(nsPurpleBuffer &aBuffer, nsPurpleBufferEntry *aEntry) michael@0: { michael@0: if (aEntry->mRefCnt) { michael@0: aEntry->mRefCnt->RemoveFromPurpleBuffer(); michael@0: aEntry->mRefCnt = nullptr; michael@0: } michael@0: aEntry->mObject = nullptr; michael@0: --aBuffer.mCount; michael@0: } michael@0: }; michael@0: michael@0: void UnmarkRemainingPurple(Block *b) michael@0: { michael@0: UnmarkRemainingPurpleVisitor visitor; michael@0: b->VisitEntries(*this, visitor); michael@0: } michael@0: michael@0: void SelectPointers(GCGraphBuilder &builder); michael@0: michael@0: // RemoveSkippable removes entries from the purple buffer synchronously michael@0: // (1) if aAsyncSnowWhiteFreeing is false and nsPurpleBufferEntry::mRefCnt is 0 or michael@0: // (2) if the object's nsXPCOMCycleCollectionParticipant::CanSkip() returns true or michael@0: // (3) if nsPurpleBufferEntry::mRefCnt->IsPurple() is false. michael@0: // (4) If removeChildlessNodes is true, then any nodes in the purple buffer michael@0: // that will have no children in the cycle collector graph will also be michael@0: // removed. CanSkip() may be run on these children. michael@0: void RemoveSkippable(nsCycleCollector* aCollector, michael@0: bool removeChildlessNodes, michael@0: bool aAsyncSnowWhiteFreeing, michael@0: CC_ForgetSkippableCallback aCb); michael@0: michael@0: MOZ_ALWAYS_INLINE nsPurpleBufferEntry* NewEntry() michael@0: { michael@0: if (MOZ_UNLIKELY(!mFreeList)) { michael@0: Block *b = new Block; michael@0: StartBlock(b); michael@0: michael@0: // Add the new block as the second block in the list. michael@0: b->mNext = mFirstBlock.mNext; michael@0: mFirstBlock.mNext = b; michael@0: } michael@0: michael@0: nsPurpleBufferEntry *e = mFreeList; michael@0: mFreeList = (nsPurpleBufferEntry*) michael@0: (uintptr_t(mFreeList->mNextInFreeList) & ~uintptr_t(1)); michael@0: return e; michael@0: } michael@0: michael@0: MOZ_ALWAYS_INLINE void Put(void *p, nsCycleCollectionParticipant *cp, michael@0: nsCycleCollectingAutoRefCnt *aRefCnt) michael@0: { michael@0: nsPurpleBufferEntry *e = NewEntry(); michael@0: michael@0: ++mCount; michael@0: michael@0: e->mObject = p; michael@0: e->mRefCnt = aRefCnt; michael@0: e->mParticipant = cp; michael@0: } michael@0: michael@0: void Remove(nsPurpleBufferEntry *e) michael@0: { michael@0: MOZ_ASSERT(mCount != 0, "must have entries"); michael@0: michael@0: if (e->mRefCnt) { michael@0: e->mRefCnt->RemoveFromPurpleBuffer(); michael@0: e->mRefCnt = nullptr; michael@0: } michael@0: e->mNextInFreeList = michael@0: (nsPurpleBufferEntry*)(uintptr_t(mFreeList) | uintptr_t(1)); michael@0: mFreeList = e; michael@0: michael@0: --mCount; michael@0: } michael@0: michael@0: uint32_t Count() const michael@0: { michael@0: return mCount; michael@0: } michael@0: michael@0: size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const michael@0: { michael@0: size_t n = 0; michael@0: michael@0: // Don't measure mFirstBlock because it's within |this|. michael@0: const Block *block = mFirstBlock.mNext; michael@0: while (block) { michael@0: n += aMallocSizeOf(block); michael@0: block = block->mNext; michael@0: } michael@0: michael@0: // mFreeList is deliberately not measured because it points into michael@0: // the purple buffer, which is within mFirstBlock and thus within |this|. michael@0: // michael@0: // We also don't measure the things pointed to by mEntries[] because michael@0: // those pointers are non-owning. michael@0: michael@0: return n; michael@0: } michael@0: }; michael@0: michael@0: static bool michael@0: AddPurpleRoot(GCGraphBuilder &aBuilder, void *aRoot, nsCycleCollectionParticipant *aParti); michael@0: michael@0: struct SelectPointersVisitor michael@0: { michael@0: SelectPointersVisitor(GCGraphBuilder &aBuilder) michael@0: : mBuilder(aBuilder) michael@0: {} michael@0: michael@0: void michael@0: Visit(nsPurpleBuffer &aBuffer, nsPurpleBufferEntry *aEntry) michael@0: { michael@0: MOZ_ASSERT(aEntry->mObject, "Null object in purple buffer"); michael@0: MOZ_ASSERT(aEntry->mRefCnt->get() != 0, michael@0: "SelectPointersVisitor: snow-white object in the purple buffer"); michael@0: if (!aEntry->mRefCnt->IsPurple() || michael@0: AddPurpleRoot(mBuilder, aEntry->mObject, aEntry->mParticipant)) { michael@0: aBuffer.Remove(aEntry); michael@0: } michael@0: } michael@0: michael@0: private: michael@0: GCGraphBuilder &mBuilder; michael@0: }; michael@0: michael@0: void michael@0: nsPurpleBuffer::SelectPointers(GCGraphBuilder &aBuilder) michael@0: { michael@0: SelectPointersVisitor visitor(aBuilder); michael@0: VisitEntries(visitor); michael@0: michael@0: NS_ASSERTION(mCount == 0, "AddPurpleRoot failed"); michael@0: if (mCount == 0) { michael@0: FreeBlocks(); michael@0: InitBlocks(); michael@0: } michael@0: } michael@0: michael@0: enum ccPhase { michael@0: IdlePhase, michael@0: GraphBuildingPhase, michael@0: ScanAndCollectWhitePhase, michael@0: CleanupPhase michael@0: }; michael@0: michael@0: enum ccType { michael@0: SliceCC, /* If a CC is in progress, continue it. Otherwise, start a new one. */ michael@0: ManualCC, /* Explicitly triggered. */ michael@0: ShutdownCC /* Shutdown CC, used for finding leaks. */ michael@0: }; michael@0: michael@0: #ifdef MOZ_NUWA_PROCESS michael@0: #include "ipc/Nuwa.h" michael@0: #endif michael@0: michael@0: //////////////////////////////////////////////////////////////////////// michael@0: // Top level structure for the cycle collector. michael@0: //////////////////////////////////////////////////////////////////////// michael@0: michael@0: typedef js::SliceBudget SliceBudget; michael@0: michael@0: class JSPurpleBuffer; michael@0: michael@0: class nsCycleCollector : public nsIMemoryReporter michael@0: { michael@0: NS_DECL_ISUPPORTS michael@0: NS_DECL_NSIMEMORYREPORTER michael@0: michael@0: bool mActivelyCollecting; michael@0: bool mFreeingSnowWhite; michael@0: // mScanInProgress should be false when we're collecting white objects. michael@0: bool mScanInProgress; michael@0: CycleCollectorResults mResults; michael@0: TimeStamp mCollectionStart; michael@0: michael@0: CycleCollectedJSRuntime *mJSRuntime; michael@0: michael@0: ccPhase mIncrementalPhase; michael@0: GCGraph mGraph; michael@0: nsAutoPtr mBuilder; michael@0: nsAutoPtr mCurrNode; michael@0: nsCOMPtr mListener; michael@0: michael@0: nsIThread* mThread; michael@0: michael@0: nsCycleCollectorParams mParams; michael@0: michael@0: uint32_t mWhiteNodeCount; michael@0: michael@0: CC_BeforeUnlinkCallback mBeforeUnlinkCB; michael@0: CC_ForgetSkippableCallback mForgetSkippableCB; michael@0: michael@0: nsPurpleBuffer mPurpleBuf; michael@0: michael@0: uint32_t mUnmergedNeeded; michael@0: uint32_t mMergedInARow; michael@0: michael@0: JSPurpleBuffer* mJSPurpleBuffer; michael@0: michael@0: public: michael@0: nsCycleCollector(); michael@0: virtual ~nsCycleCollector(); michael@0: michael@0: void RegisterJSRuntime(CycleCollectedJSRuntime *aJSRuntime); michael@0: void ForgetJSRuntime(); michael@0: michael@0: void SetBeforeUnlinkCallback(CC_BeforeUnlinkCallback aBeforeUnlinkCB) michael@0: { michael@0: CheckThreadSafety(); michael@0: mBeforeUnlinkCB = aBeforeUnlinkCB; michael@0: } michael@0: michael@0: void SetForgetSkippableCallback(CC_ForgetSkippableCallback aForgetSkippableCB) michael@0: { michael@0: CheckThreadSafety(); michael@0: mForgetSkippableCB = aForgetSkippableCB; michael@0: } michael@0: michael@0: void Suspect(void *n, nsCycleCollectionParticipant *cp, michael@0: nsCycleCollectingAutoRefCnt *aRefCnt); michael@0: uint32_t SuspectedCount(); michael@0: void ForgetSkippable(bool aRemoveChildlessNodes, bool aAsyncSnowWhiteFreeing); michael@0: bool FreeSnowWhite(bool aUntilNoSWInPurpleBuffer); michael@0: michael@0: // This method assumes its argument is already canonicalized. michael@0: void RemoveObjectFromGraph(void *aPtr); michael@0: michael@0: void PrepareForGarbageCollection(); michael@0: michael@0: bool Collect(ccType aCCType, michael@0: SliceBudget &aBudget, michael@0: nsICycleCollectorListener *aManualListener); michael@0: void Shutdown(); michael@0: michael@0: void SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf, michael@0: size_t *aObjectSize, michael@0: size_t *aGraphNodesSize, michael@0: size_t *aGraphEdgesSize, michael@0: size_t *aWeakMapsSize, michael@0: size_t *aPurpleBufferSize) const; michael@0: michael@0: JSPurpleBuffer* GetJSPurpleBuffer(); michael@0: private: michael@0: void CheckThreadSafety(); michael@0: void ShutdownCollect(); michael@0: michael@0: void FixGrayBits(bool aForceGC); michael@0: bool ShouldMergeZones(ccType aCCType); michael@0: michael@0: void BeginCollection(ccType aCCType, nsICycleCollectorListener *aManualListener); michael@0: void MarkRoots(SliceBudget &aBudget); michael@0: void ScanRoots(bool aFullySynchGraphBuild); michael@0: void ScanIncrementalRoots(); michael@0: void ScanWeakMaps(); michael@0: michael@0: // returns whether anything was collected michael@0: bool CollectWhite(); michael@0: michael@0: void CleanupAfterCollection(); michael@0: }; michael@0: michael@0: NS_IMPL_ISUPPORTS(nsCycleCollector, nsIMemoryReporter) michael@0: michael@0: /** michael@0: * GraphWalker is templatized over a Visitor class that must provide michael@0: * the following two methods: michael@0: * michael@0: * bool ShouldVisitNode(PtrInfo const *pi); michael@0: * void VisitNode(PtrInfo *pi); michael@0: */ michael@0: template michael@0: class GraphWalker michael@0: { michael@0: private: michael@0: Visitor mVisitor; michael@0: michael@0: void DoWalk(nsDeque &aQueue); michael@0: michael@0: void CheckedPush(nsDeque &aQueue, PtrInfo *pi) michael@0: { michael@0: if (!pi) { michael@0: MOZ_CRASH(); michael@0: } michael@0: if (!aQueue.Push(pi, fallible_t())) { michael@0: mVisitor.Failed(); michael@0: } michael@0: } michael@0: michael@0: public: michael@0: void Walk(PtrInfo *s0); michael@0: void WalkFromRoots(GCGraph &aGraph); michael@0: // copy-constructing the visitor should be cheap, and less michael@0: // indirection than using a reference michael@0: GraphWalker(const Visitor aVisitor) : mVisitor(aVisitor) {} michael@0: }; michael@0: michael@0: michael@0: //////////////////////////////////////////////////////////////////////// michael@0: // The static collector struct michael@0: //////////////////////////////////////////////////////////////////////// michael@0: michael@0: struct CollectorData { michael@0: nsRefPtr mCollector; michael@0: CycleCollectedJSRuntime* mRuntime; michael@0: }; michael@0: michael@0: static mozilla::ThreadLocal sCollectorData; michael@0: michael@0: //////////////////////////////////////////////////////////////////////// michael@0: // Utility functions michael@0: //////////////////////////////////////////////////////////////////////// michael@0: michael@0: MOZ_NEVER_INLINE static void michael@0: Fault(const char *msg, const void *ptr=nullptr) michael@0: { michael@0: if (ptr) michael@0: printf("Fault in cycle collector: %s (ptr: %p)\n", msg, ptr); michael@0: else michael@0: printf("Fault in cycle collector: %s\n", msg); michael@0: michael@0: NS_RUNTIMEABORT("cycle collector fault"); michael@0: } michael@0: michael@0: static void michael@0: Fault(const char *msg, PtrInfo *pi) michael@0: { michael@0: Fault(msg, pi->mPointer); michael@0: } michael@0: michael@0: static inline void michael@0: ToParticipant(nsISupports *s, nsXPCOMCycleCollectionParticipant **cp) michael@0: { michael@0: // We use QI to move from an nsISupports to an michael@0: // nsXPCOMCycleCollectionParticipant, which is a per-class singleton helper michael@0: // object that implements traversal and unlinking logic for the nsISupports michael@0: // in question. michael@0: CallQueryInterface(s, cp); michael@0: } michael@0: michael@0: template michael@0: MOZ_NEVER_INLINE void michael@0: GraphWalker::Walk(PtrInfo *s0) michael@0: { michael@0: nsDeque queue; michael@0: CheckedPush(queue, s0); michael@0: DoWalk(queue); michael@0: } michael@0: michael@0: template michael@0: MOZ_NEVER_INLINE void michael@0: GraphWalker::WalkFromRoots(GCGraph& aGraph) michael@0: { michael@0: nsDeque queue; michael@0: NodePool::Enumerator etor(aGraph.mNodes); michael@0: for (uint32_t i = 0; i < aGraph.mRootCount; ++i) { michael@0: CheckedPush(queue, etor.GetNext()); michael@0: } michael@0: DoWalk(queue); michael@0: } michael@0: michael@0: template michael@0: MOZ_NEVER_INLINE void michael@0: GraphWalker::DoWalk(nsDeque &aQueue) michael@0: { michael@0: // Use a aQueue to match the breadth-first traversal used when we michael@0: // built the graph, for hopefully-better locality. michael@0: while (aQueue.GetSize() > 0) { michael@0: PtrInfo *pi = static_cast(aQueue.PopFront()); michael@0: michael@0: if (pi->mParticipant && mVisitor.ShouldVisitNode(pi)) { michael@0: mVisitor.VisitNode(pi); michael@0: for (EdgePool::Iterator child = pi->FirstChild(), michael@0: child_end = pi->LastChild(); michael@0: child != child_end; ++child) { michael@0: CheckedPush(aQueue, *child); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: struct CCGraphDescriber : public LinkedListElement michael@0: { michael@0: CCGraphDescriber() michael@0: : mAddress("0x"), mCnt(0), mType(eUnknown) {} michael@0: michael@0: enum Type michael@0: { michael@0: eRefCountedObject, michael@0: eGCedObject, michael@0: eGCMarkedObject, michael@0: eEdge, michael@0: eRoot, michael@0: eGarbage, michael@0: eUnknown michael@0: }; michael@0: michael@0: nsCString mAddress; michael@0: nsCString mName; michael@0: nsCString mCompartmentOrToAddress; michael@0: uint32_t mCnt; michael@0: Type mType; michael@0: }; michael@0: michael@0: class nsCycleCollectorLogger MOZ_FINAL : public nsICycleCollectorListener michael@0: { michael@0: public: michael@0: nsCycleCollectorLogger() : michael@0: mStream(nullptr), mWantAllTraces(false), michael@0: mDisableLog(false), mWantAfterProcessing(false) michael@0: { michael@0: } michael@0: ~nsCycleCollectorLogger() michael@0: { michael@0: ClearDescribers(); michael@0: if (mStream) { michael@0: MozillaUnRegisterDebugFILE(mStream); michael@0: fclose(mStream); michael@0: } michael@0: } michael@0: NS_DECL_ISUPPORTS michael@0: michael@0: void SetAllTraces() michael@0: { michael@0: mWantAllTraces = true; michael@0: } michael@0: michael@0: NS_IMETHOD AllTraces(nsICycleCollectorListener** aListener) michael@0: { michael@0: SetAllTraces(); michael@0: NS_ADDREF(*aListener = this); michael@0: return NS_OK; michael@0: } michael@0: michael@0: NS_IMETHOD GetWantAllTraces(bool* aAllTraces) michael@0: { michael@0: *aAllTraces = mWantAllTraces; michael@0: return NS_OK; michael@0: } michael@0: michael@0: NS_IMETHOD GetDisableLog(bool* aDisableLog) michael@0: { michael@0: *aDisableLog = mDisableLog; michael@0: return NS_OK; michael@0: } michael@0: michael@0: NS_IMETHOD SetDisableLog(bool aDisableLog) michael@0: { michael@0: mDisableLog = aDisableLog; michael@0: return NS_OK; michael@0: } michael@0: michael@0: NS_IMETHOD GetWantAfterProcessing(bool* aWantAfterProcessing) michael@0: { michael@0: *aWantAfterProcessing = mWantAfterProcessing; michael@0: return NS_OK; michael@0: } michael@0: michael@0: NS_IMETHOD SetWantAfterProcessing(bool aWantAfterProcessing) michael@0: { michael@0: mWantAfterProcessing = aWantAfterProcessing; michael@0: return NS_OK; michael@0: } michael@0: michael@0: NS_IMETHOD GetFilenameIdentifier(nsAString& aIdentifier) michael@0: { michael@0: aIdentifier = mFilenameIdentifier; michael@0: return NS_OK; michael@0: } michael@0: michael@0: NS_IMETHOD SetFilenameIdentifier(const nsAString& aIdentifier) michael@0: { michael@0: mFilenameIdentifier = aIdentifier; michael@0: return NS_OK; michael@0: } michael@0: michael@0: NS_IMETHOD GetGcLogPath(nsAString &aPath) michael@0: { michael@0: aPath = mGCLogPath; michael@0: return NS_OK; michael@0: } michael@0: michael@0: NS_IMETHOD GetCcLogPath(nsAString &aPath) michael@0: { michael@0: aPath = mCCLogPath; michael@0: return NS_OK; michael@0: } michael@0: michael@0: NS_IMETHOD Begin() michael@0: { michael@0: mCurrentAddress.AssignLiteral("0x"); michael@0: ClearDescribers(); michael@0: if (mDisableLog) { michael@0: return NS_OK; michael@0: } michael@0: michael@0: // Initially create the log in a file starting with michael@0: // "incomplete-gc-edges". We'll move the file and strip off the michael@0: // "incomplete-" once the dump completes. (We do this because we don't michael@0: // want scripts which poll the filesystem looking for gc/cc dumps to michael@0: // grab a file before we're finished writing to it.) michael@0: nsCOMPtr gcLogFile = CreateTempFile("incomplete-gc-edges"); michael@0: if (NS_WARN_IF(!gcLogFile)) michael@0: return NS_ERROR_UNEXPECTED; michael@0: michael@0: // Dump the JS heap. michael@0: FILE* gcLogANSIFile = nullptr; michael@0: gcLogFile->OpenANSIFileDesc("w", &gcLogANSIFile); michael@0: if (NS_WARN_IF(!gcLogANSIFile)) michael@0: return NS_ERROR_UNEXPECTED; michael@0: MozillaRegisterDebugFILE(gcLogANSIFile); michael@0: CollectorData *data = sCollectorData.get(); michael@0: if (data && data->mRuntime) michael@0: data->mRuntime->DumpJSHeap(gcLogANSIFile); michael@0: MozillaUnRegisterDebugFILE(gcLogANSIFile); michael@0: fclose(gcLogANSIFile); michael@0: michael@0: // Strip off "incomplete-". michael@0: nsCOMPtr gcLogFileFinalDestination = michael@0: CreateTempFile("gc-edges"); michael@0: if (NS_WARN_IF(!gcLogFileFinalDestination)) michael@0: return NS_ERROR_UNEXPECTED; michael@0: michael@0: nsAutoString gcLogFileFinalDestinationName; michael@0: gcLogFileFinalDestination->GetLeafName(gcLogFileFinalDestinationName); michael@0: if (NS_WARN_IF(gcLogFileFinalDestinationName.IsEmpty())) michael@0: return NS_ERROR_UNEXPECTED; michael@0: michael@0: gcLogFile->MoveTo(/* directory */ nullptr, gcLogFileFinalDestinationName); michael@0: michael@0: // Log to the error console. michael@0: nsCOMPtr cs = michael@0: do_GetService(NS_CONSOLESERVICE_CONTRACTID); michael@0: if (cs) { michael@0: nsAutoString gcLogPath; michael@0: gcLogFileFinalDestination->GetPath(gcLogPath); michael@0: michael@0: nsString msg = NS_LITERAL_STRING("Garbage Collector log dumped to ") + michael@0: gcLogPath; michael@0: cs->LogStringMessage(msg.get()); michael@0: michael@0: mGCLogPath = gcLogPath; michael@0: } michael@0: michael@0: // Open a file for dumping the CC graph. We again prefix with michael@0: // "incomplete-". michael@0: mOutFile = CreateTempFile("incomplete-cc-edges"); michael@0: if (NS_WARN_IF(!mOutFile)) michael@0: return NS_ERROR_UNEXPECTED; michael@0: MOZ_ASSERT(!mStream); michael@0: mOutFile->OpenANSIFileDesc("w", &mStream); michael@0: if (NS_WARN_IF(!mStream)) michael@0: return NS_ERROR_UNEXPECTED; michael@0: MozillaRegisterDebugFILE(mStream); michael@0: michael@0: fprintf(mStream, "# WantAllTraces=%s\n", mWantAllTraces ? "true" : "false"); michael@0: michael@0: return NS_OK; michael@0: } michael@0: NS_IMETHOD NoteRefCountedObject(uint64_t aAddress, uint32_t refCount, michael@0: const char *aObjectDescription) michael@0: { michael@0: if (!mDisableLog) { michael@0: fprintf(mStream, "%p [rc=%u] %s\n", (void*)aAddress, refCount, michael@0: aObjectDescription); michael@0: } michael@0: if (mWantAfterProcessing) { michael@0: CCGraphDescriber* d = new CCGraphDescriber(); michael@0: mDescribers.insertBack(d); michael@0: mCurrentAddress.AssignLiteral("0x"); michael@0: mCurrentAddress.AppendInt(aAddress, 16); michael@0: d->mType = CCGraphDescriber::eRefCountedObject; michael@0: d->mAddress = mCurrentAddress; michael@0: d->mCnt = refCount; michael@0: d->mName.Append(aObjectDescription); michael@0: } michael@0: return NS_OK; michael@0: } michael@0: NS_IMETHOD NoteGCedObject(uint64_t aAddress, bool aMarked, michael@0: const char *aObjectDescription, michael@0: uint64_t aCompartmentAddress) michael@0: { michael@0: if (!mDisableLog) { michael@0: fprintf(mStream, "%p [gc%s] %s\n", (void*)aAddress, michael@0: aMarked ? ".marked" : "", aObjectDescription); michael@0: } michael@0: if (mWantAfterProcessing) { michael@0: CCGraphDescriber* d = new CCGraphDescriber(); michael@0: mDescribers.insertBack(d); michael@0: mCurrentAddress.AssignLiteral("0x"); michael@0: mCurrentAddress.AppendInt(aAddress, 16); michael@0: d->mType = aMarked ? CCGraphDescriber::eGCMarkedObject : michael@0: CCGraphDescriber::eGCedObject; michael@0: d->mAddress = mCurrentAddress; michael@0: d->mName.Append(aObjectDescription); michael@0: if (aCompartmentAddress) { michael@0: d->mCompartmentOrToAddress.AssignLiteral("0x"); michael@0: d->mCompartmentOrToAddress.AppendInt(aCompartmentAddress, 16); michael@0: } else { michael@0: d->mCompartmentOrToAddress.SetIsVoid(true); michael@0: } michael@0: } michael@0: return NS_OK; michael@0: } michael@0: NS_IMETHOD NoteEdge(uint64_t aToAddress, const char *aEdgeName) michael@0: { michael@0: if (!mDisableLog) { michael@0: fprintf(mStream, "> %p %s\n", (void*)aToAddress, aEdgeName); michael@0: } michael@0: if (mWantAfterProcessing) { michael@0: CCGraphDescriber* d = new CCGraphDescriber(); michael@0: mDescribers.insertBack(d); michael@0: d->mType = CCGraphDescriber::eEdge; michael@0: d->mAddress = mCurrentAddress; michael@0: d->mCompartmentOrToAddress.AssignLiteral("0x"); michael@0: d->mCompartmentOrToAddress.AppendInt(aToAddress, 16); michael@0: d->mName.Append(aEdgeName); michael@0: } michael@0: return NS_OK; michael@0: } michael@0: NS_IMETHOD NoteWeakMapEntry(uint64_t aMap, uint64_t aKey, michael@0: uint64_t aKeyDelegate, uint64_t aValue) michael@0: { michael@0: if (!mDisableLog) { michael@0: fprintf(mStream, "WeakMapEntry map=%p key=%p keyDelegate=%p value=%p\n", michael@0: (void*)aMap, (void*)aKey, (void*)aKeyDelegate, (void*)aValue); michael@0: } michael@0: // We don't support after-processing for weak map entries. michael@0: return NS_OK; michael@0: } michael@0: NS_IMETHOD NoteIncrementalRoot(uint64_t aAddress) michael@0: { michael@0: if (!mDisableLog) { michael@0: fprintf(mStream, "IncrementalRoot %p\n", (void*)aAddress); michael@0: } michael@0: // We don't support after-processing for incremental roots. michael@0: return NS_OK; michael@0: } michael@0: NS_IMETHOD BeginResults() michael@0: { michael@0: if (!mDisableLog) { michael@0: fputs("==========\n", mStream); michael@0: } michael@0: return NS_OK; michael@0: } michael@0: NS_IMETHOD DescribeRoot(uint64_t aAddress, uint32_t aKnownEdges) michael@0: { michael@0: if (!mDisableLog) { michael@0: fprintf(mStream, "%p [known=%u]\n", (void*)aAddress, aKnownEdges); michael@0: } michael@0: if (mWantAfterProcessing) { michael@0: CCGraphDescriber* d = new CCGraphDescriber(); michael@0: mDescribers.insertBack(d); michael@0: d->mType = CCGraphDescriber::eRoot; michael@0: d->mAddress.AppendInt(aAddress, 16); michael@0: d->mCnt = aKnownEdges; michael@0: } michael@0: return NS_OK; michael@0: } michael@0: NS_IMETHOD DescribeGarbage(uint64_t aAddress) michael@0: { michael@0: if (!mDisableLog) { michael@0: fprintf(mStream, "%p [garbage]\n", (void*)aAddress); michael@0: } michael@0: if (mWantAfterProcessing) { michael@0: CCGraphDescriber* d = new CCGraphDescriber(); michael@0: mDescribers.insertBack(d); michael@0: d->mType = CCGraphDescriber::eGarbage; michael@0: d->mAddress.AppendInt(aAddress, 16); michael@0: } michael@0: return NS_OK; michael@0: } michael@0: NS_IMETHOD End() michael@0: { michael@0: if (!mDisableLog) { michael@0: MOZ_ASSERT(mStream); michael@0: MOZ_ASSERT(mOutFile); michael@0: michael@0: MozillaUnRegisterDebugFILE(mStream); michael@0: fclose(mStream); michael@0: mStream = nullptr; michael@0: michael@0: // Strip off "incomplete-" from the log file's name. michael@0: nsCOMPtr logFileFinalDestination = michael@0: CreateTempFile("cc-edges"); michael@0: if (NS_WARN_IF(!logFileFinalDestination)) michael@0: return NS_ERROR_UNEXPECTED; michael@0: michael@0: nsAutoString logFileFinalDestinationName; michael@0: logFileFinalDestination->GetLeafName(logFileFinalDestinationName); michael@0: if (NS_WARN_IF(logFileFinalDestinationName.IsEmpty())) michael@0: return NS_ERROR_UNEXPECTED; michael@0: michael@0: mOutFile->MoveTo(/* directory = */ nullptr, michael@0: logFileFinalDestinationName); michael@0: mOutFile = nullptr; michael@0: michael@0: // Log to the error console. michael@0: nsCOMPtr cs = michael@0: do_GetService(NS_CONSOLESERVICE_CONTRACTID); michael@0: if (cs) { michael@0: nsAutoString ccLogPath; michael@0: logFileFinalDestination->GetPath(ccLogPath); michael@0: michael@0: nsString msg = NS_LITERAL_STRING("Cycle Collector log dumped to ") + michael@0: ccLogPath; michael@0: cs->LogStringMessage(msg.get()); michael@0: michael@0: mCCLogPath = ccLogPath; michael@0: } michael@0: } michael@0: return NS_OK; michael@0: } michael@0: NS_IMETHOD ProcessNext(nsICycleCollectorHandler* aHandler, michael@0: bool* aCanContinue) michael@0: { michael@0: if (NS_WARN_IF(!aHandler) || NS_WARN_IF(!mWantAfterProcessing)) michael@0: return NS_ERROR_UNEXPECTED; michael@0: CCGraphDescriber* d = mDescribers.popFirst(); michael@0: if (d) { michael@0: switch (d->mType) { michael@0: case CCGraphDescriber::eRefCountedObject: michael@0: aHandler->NoteRefCountedObject(d->mAddress, michael@0: d->mCnt, michael@0: d->mName); michael@0: break; michael@0: case CCGraphDescriber::eGCedObject: michael@0: case CCGraphDescriber::eGCMarkedObject: michael@0: aHandler->NoteGCedObject(d->mAddress, michael@0: d->mType == michael@0: CCGraphDescriber::eGCMarkedObject, michael@0: d->mName, michael@0: d->mCompartmentOrToAddress); michael@0: break; michael@0: case CCGraphDescriber::eEdge: michael@0: aHandler->NoteEdge(d->mAddress, michael@0: d->mCompartmentOrToAddress, michael@0: d->mName); michael@0: break; michael@0: case CCGraphDescriber::eRoot: michael@0: aHandler->DescribeRoot(d->mAddress, michael@0: d->mCnt); michael@0: break; michael@0: case CCGraphDescriber::eGarbage: michael@0: aHandler->DescribeGarbage(d->mAddress); michael@0: break; michael@0: case CCGraphDescriber::eUnknown: michael@0: NS_NOTREACHED("CCGraphDescriber::eUnknown"); michael@0: break; michael@0: } michael@0: delete d; michael@0: } michael@0: if (!(*aCanContinue = !mDescribers.isEmpty())) { michael@0: mCurrentAddress.AssignLiteral("0x"); michael@0: } michael@0: return NS_OK; michael@0: } michael@0: private: michael@0: /** michael@0: * Create a new file named something like aPrefix.$PID.$IDENTIFIER.log in michael@0: * $MOZ_CC_LOG_DIRECTORY or in the system's temp directory. No existing michael@0: * file will be overwritten; if aPrefix.$PID.$IDENTIFIER.log exists, we'll michael@0: * try a file named something like aPrefix.$PID.$IDENTIFIER-1.log, and so michael@0: * on. michael@0: */ michael@0: already_AddRefed michael@0: CreateTempFile(const char* aPrefix) michael@0: { michael@0: nsPrintfCString filename("%s.%d%s%s.log", michael@0: aPrefix, michael@0: base::GetCurrentProcId(), michael@0: mFilenameIdentifier.IsEmpty() ? "" : ".", michael@0: NS_ConvertUTF16toUTF8(mFilenameIdentifier).get()); michael@0: michael@0: // Get the log directory either from $MOZ_CC_LOG_DIRECTORY or from michael@0: // the fallback directories in OpenTempFile. We don't use an nsCOMPtr michael@0: // here because OpenTempFile uses an in/out param and getter_AddRefs michael@0: // wouldn't work. michael@0: nsIFile* logFile = nullptr; michael@0: if (char* env = PR_GetEnv("MOZ_CC_LOG_DIRECTORY")) { michael@0: NS_NewNativeLocalFile(nsCString(env), /* followLinks = */ true, michael@0: &logFile); michael@0: } michael@0: michael@0: // In Android case, this function will open a file named aFilename under michael@0: // specific folder (/data/local/tmp/memory-reports). Otherwise, it will michael@0: // open a file named aFilename under "NS_OS_TEMP_DIR". michael@0: nsresult rv = nsDumpUtils::OpenTempFile( michael@0: filename, michael@0: &logFile, michael@0: NS_LITERAL_CSTRING("memory-reports")); michael@0: if (NS_FAILED(rv)) { michael@0: NS_IF_RELEASE(logFile); michael@0: return nullptr; michael@0: } michael@0: michael@0: return dont_AddRef(logFile); michael@0: } michael@0: michael@0: void ClearDescribers() michael@0: { michael@0: CCGraphDescriber* d; michael@0: while((d = mDescribers.popFirst())) { michael@0: delete d; michael@0: } michael@0: } michael@0: michael@0: FILE *mStream; michael@0: nsCOMPtr mOutFile; michael@0: bool mWantAllTraces; michael@0: bool mDisableLog; michael@0: bool mWantAfterProcessing; michael@0: nsString mFilenameIdentifier; michael@0: nsString mGCLogPath; michael@0: nsString mCCLogPath; michael@0: nsCString mCurrentAddress; michael@0: mozilla::LinkedList mDescribers; michael@0: }; michael@0: michael@0: NS_IMPL_ISUPPORTS(nsCycleCollectorLogger, nsICycleCollectorListener) michael@0: michael@0: nsresult michael@0: nsCycleCollectorLoggerConstructor(nsISupports* aOuter, michael@0: const nsIID& aIID, michael@0: void* *aInstancePtr) michael@0: { michael@0: if (NS_WARN_IF(aOuter)) michael@0: return NS_ERROR_NO_AGGREGATION; michael@0: michael@0: nsISupports *logger = new nsCycleCollectorLogger(); michael@0: michael@0: return logger->QueryInterface(aIID, aInstancePtr); michael@0: } michael@0: michael@0: //////////////////////////////////////////////////////////////////////// michael@0: // Bacon & Rajan's |MarkRoots| routine. michael@0: //////////////////////////////////////////////////////////////////////// michael@0: michael@0: class GCGraphBuilder : public nsCycleCollectionTraversalCallback, michael@0: public nsCycleCollectionNoteRootCallback michael@0: { michael@0: private: michael@0: GCGraph &mGraph; michael@0: CycleCollectorResults &mResults; michael@0: NodePool::Builder mNodeBuilder; michael@0: EdgePool::Builder mEdgeBuilder; michael@0: PtrInfo *mCurrPi; michael@0: nsCycleCollectionParticipant *mJSParticipant; michael@0: nsCycleCollectionParticipant *mJSZoneParticipant; michael@0: nsCString mNextEdgeName; michael@0: nsICycleCollectorListener *mListener; michael@0: bool mMergeZones; michael@0: bool mRanOutOfMemory; michael@0: michael@0: public: michael@0: GCGraphBuilder(GCGraph &aGraph, michael@0: CycleCollectorResults &aResults, michael@0: CycleCollectedJSRuntime *aJSRuntime, michael@0: nsICycleCollectorListener *aListener, michael@0: bool aMergeZones); michael@0: virtual ~GCGraphBuilder(); michael@0: michael@0: bool WantAllTraces() const michael@0: { michael@0: return nsCycleCollectionNoteRootCallback::WantAllTraces(); michael@0: } michael@0: michael@0: PtrInfo* AddNode(void *aPtr, nsCycleCollectionParticipant *aParticipant); michael@0: PtrInfo* AddWeakMapNode(void* node); michael@0: void Traverse(PtrInfo* aPtrInfo); michael@0: void SetLastChild(); michael@0: michael@0: bool RanOutOfMemory() const { return mRanOutOfMemory; } michael@0: michael@0: private: michael@0: void DescribeNode(uint32_t refCount, const char *objName) michael@0: { michael@0: mCurrPi->mRefCount = refCount; michael@0: } michael@0: michael@0: public: michael@0: // nsCycleCollectionNoteRootCallback methods. michael@0: NS_IMETHOD_(void) NoteXPCOMRoot(nsISupports *root); michael@0: NS_IMETHOD_(void) NoteJSRoot(void *root); michael@0: NS_IMETHOD_(void) NoteNativeRoot(void *root, nsCycleCollectionParticipant *participant); michael@0: NS_IMETHOD_(void) NoteWeakMapping(void *map, void *key, void *kdelegate, void *val); michael@0: michael@0: // nsCycleCollectionTraversalCallback methods. michael@0: NS_IMETHOD_(void) DescribeRefCountedNode(nsrefcnt refCount, michael@0: const char *objName); michael@0: NS_IMETHOD_(void) DescribeGCedNode(bool isMarked, const char *objName, michael@0: uint64_t aCompartmentAddress); michael@0: michael@0: NS_IMETHOD_(void) NoteXPCOMChild(nsISupports *child); michael@0: NS_IMETHOD_(void) NoteJSChild(void *child); michael@0: NS_IMETHOD_(void) NoteNativeChild(void *child, michael@0: nsCycleCollectionParticipant *participant); michael@0: NS_IMETHOD_(void) NoteNextEdgeName(const char* name); michael@0: michael@0: private: michael@0: NS_IMETHOD_(void) NoteRoot(void *root, michael@0: nsCycleCollectionParticipant *participant) michael@0: { michael@0: MOZ_ASSERT(root); michael@0: MOZ_ASSERT(participant); michael@0: michael@0: if (!participant->CanSkipInCC(root) || MOZ_UNLIKELY(WantAllTraces())) { michael@0: AddNode(root, participant); michael@0: } michael@0: } michael@0: michael@0: NS_IMETHOD_(void) NoteChild(void *child, nsCycleCollectionParticipant *cp, michael@0: nsCString edgeName) michael@0: { michael@0: PtrInfo *childPi = AddNode(child, cp); michael@0: if (!childPi) michael@0: return; michael@0: mEdgeBuilder.Add(childPi); michael@0: if (mListener) { michael@0: mListener->NoteEdge((uint64_t)child, edgeName.get()); michael@0: } michael@0: ++childPi->mInternalRefs; michael@0: } michael@0: michael@0: JS::Zone *MergeZone(void *gcthing) { michael@0: if (!mMergeZones) { michael@0: return nullptr; michael@0: } michael@0: JS::Zone *zone = JS::GetGCThingZone(gcthing); michael@0: if (js::IsSystemZone(zone)) { michael@0: return nullptr; michael@0: } michael@0: return zone; michael@0: } michael@0: }; michael@0: michael@0: GCGraphBuilder::GCGraphBuilder(GCGraph &aGraph, michael@0: CycleCollectorResults &aResults, michael@0: CycleCollectedJSRuntime *aJSRuntime, michael@0: nsICycleCollectorListener *aListener, michael@0: bool aMergeZones) michael@0: : mGraph(aGraph), michael@0: mResults(aResults), michael@0: mNodeBuilder(aGraph.mNodes), michael@0: mEdgeBuilder(aGraph.mEdges), michael@0: mJSParticipant(nullptr), michael@0: mJSZoneParticipant(nullptr), michael@0: mListener(aListener), michael@0: mMergeZones(aMergeZones), michael@0: mRanOutOfMemory(false) michael@0: { michael@0: if (aJSRuntime) { michael@0: mJSParticipant = aJSRuntime->GCThingParticipant(); michael@0: mJSZoneParticipant = aJSRuntime->ZoneParticipant(); michael@0: } michael@0: michael@0: uint32_t flags = 0; michael@0: if (!flags && mListener) { michael@0: flags = nsCycleCollectionTraversalCallback::WANT_DEBUG_INFO; michael@0: bool all = false; michael@0: mListener->GetWantAllTraces(&all); michael@0: if (all) { michael@0: flags |= nsCycleCollectionTraversalCallback::WANT_ALL_TRACES; michael@0: mWantAllTraces = true; // for nsCycleCollectionNoteRootCallback michael@0: } michael@0: } michael@0: michael@0: mFlags |= flags; michael@0: michael@0: mMergeZones = mMergeZones && MOZ_LIKELY(!WantAllTraces()); michael@0: michael@0: MOZ_ASSERT(nsCycleCollectionNoteRootCallback::WantAllTraces() == michael@0: nsCycleCollectionTraversalCallback::WantAllTraces()); michael@0: } michael@0: michael@0: GCGraphBuilder::~GCGraphBuilder() michael@0: { michael@0: } michael@0: michael@0: PtrInfo* michael@0: GCGraphBuilder::AddNode(void *aPtr, nsCycleCollectionParticipant *aParticipant) michael@0: { michael@0: PtrToNodeEntry *e = mGraph.AddNodeToMap(aPtr); michael@0: if (!e) { michael@0: mRanOutOfMemory = true; michael@0: return nullptr; michael@0: } michael@0: michael@0: PtrInfo *result; michael@0: if (!e->mNode) { michael@0: // New entry. michael@0: result = mNodeBuilder.Add(aPtr, aParticipant); michael@0: e->mNode = result; michael@0: NS_ASSERTION(result, "mNodeBuilder.Add returned null"); michael@0: } else { michael@0: result = e->mNode; michael@0: MOZ_ASSERT(result->mParticipant == aParticipant, michael@0: "nsCycleCollectionParticipant shouldn't change!"); michael@0: } michael@0: return result; michael@0: } michael@0: michael@0: MOZ_NEVER_INLINE void michael@0: GCGraphBuilder::Traverse(PtrInfo* aPtrInfo) michael@0: { michael@0: mCurrPi = aPtrInfo; michael@0: michael@0: mCurrPi->SetFirstChild(mEdgeBuilder.Mark()); michael@0: michael@0: if (!aPtrInfo->mParticipant) { michael@0: return; michael@0: } michael@0: michael@0: nsresult rv = aPtrInfo->mParticipant->Traverse(aPtrInfo->mPointer, *this); michael@0: if (NS_FAILED(rv)) { michael@0: Fault("script pointer traversal failed", aPtrInfo); michael@0: } michael@0: } michael@0: michael@0: void michael@0: GCGraphBuilder::SetLastChild() michael@0: { michael@0: mCurrPi->SetLastChild(mEdgeBuilder.Mark()); michael@0: } michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: GCGraphBuilder::NoteXPCOMRoot(nsISupports *root) michael@0: { michael@0: root = CanonicalizeXPCOMParticipant(root); michael@0: NS_ASSERTION(root, michael@0: "Don't add objects that don't participate in collection!"); michael@0: michael@0: nsXPCOMCycleCollectionParticipant *cp; michael@0: ToParticipant(root, &cp); michael@0: michael@0: NoteRoot(root, cp); michael@0: } michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: GCGraphBuilder::NoteJSRoot(void *root) michael@0: { michael@0: if (JS::Zone *zone = MergeZone(root)) { michael@0: NoteRoot(zone, mJSZoneParticipant); michael@0: } else { michael@0: NoteRoot(root, mJSParticipant); michael@0: } michael@0: } michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: GCGraphBuilder::NoteNativeRoot(void *root, nsCycleCollectionParticipant *participant) michael@0: { michael@0: NoteRoot(root, participant); michael@0: } michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: GCGraphBuilder::DescribeRefCountedNode(nsrefcnt refCount, const char *objName) michael@0: { michael@0: if (refCount == 0) michael@0: Fault("zero refcount", mCurrPi); michael@0: if (refCount == UINT32_MAX) michael@0: Fault("overflowing refcount", mCurrPi); michael@0: mResults.mVisitedRefCounted++; michael@0: michael@0: if (mListener) { michael@0: mListener->NoteRefCountedObject((uint64_t)mCurrPi->mPointer, refCount, michael@0: objName); michael@0: } michael@0: michael@0: DescribeNode(refCount, objName); michael@0: } michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: GCGraphBuilder::DescribeGCedNode(bool isMarked, const char *objName, michael@0: uint64_t aCompartmentAddress) michael@0: { michael@0: uint32_t refCount = isMarked ? UINT32_MAX : 0; michael@0: mResults.mVisitedGCed++; michael@0: michael@0: if (mListener) { michael@0: mListener->NoteGCedObject((uint64_t)mCurrPi->mPointer, isMarked, michael@0: objName, aCompartmentAddress); michael@0: } michael@0: michael@0: DescribeNode(refCount, objName); michael@0: } michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: GCGraphBuilder::NoteXPCOMChild(nsISupports *child) michael@0: { michael@0: nsCString edgeName; michael@0: if (WantDebugInfo()) { michael@0: edgeName.Assign(mNextEdgeName); michael@0: mNextEdgeName.Truncate(); michael@0: } michael@0: if (!child || !(child = CanonicalizeXPCOMParticipant(child))) michael@0: return; michael@0: michael@0: nsXPCOMCycleCollectionParticipant *cp; michael@0: ToParticipant(child, &cp); michael@0: if (cp && (!cp->CanSkipThis(child) || WantAllTraces())) { michael@0: NoteChild(child, cp, edgeName); michael@0: } michael@0: } michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: GCGraphBuilder::NoteNativeChild(void *child, michael@0: nsCycleCollectionParticipant *participant) michael@0: { michael@0: nsCString edgeName; michael@0: if (WantDebugInfo()) { michael@0: edgeName.Assign(mNextEdgeName); michael@0: mNextEdgeName.Truncate(); michael@0: } michael@0: if (!child) michael@0: return; michael@0: michael@0: MOZ_ASSERT(participant, "Need a nsCycleCollectionParticipant!"); michael@0: NoteChild(child, participant, edgeName); michael@0: } michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: GCGraphBuilder::NoteJSChild(void *child) michael@0: { michael@0: if (!child) { michael@0: return; michael@0: } michael@0: michael@0: nsCString edgeName; michael@0: if (MOZ_UNLIKELY(WantDebugInfo())) { michael@0: edgeName.Assign(mNextEdgeName); michael@0: mNextEdgeName.Truncate(); michael@0: } michael@0: michael@0: if (xpc_GCThingIsGrayCCThing(child) || MOZ_UNLIKELY(WantAllTraces())) { michael@0: if (JS::Zone *zone = MergeZone(child)) { michael@0: NoteChild(zone, mJSZoneParticipant, edgeName); michael@0: } else { michael@0: NoteChild(child, mJSParticipant, edgeName); michael@0: } michael@0: } michael@0: } michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: GCGraphBuilder::NoteNextEdgeName(const char* name) michael@0: { michael@0: if (WantDebugInfo()) { michael@0: mNextEdgeName = name; michael@0: } michael@0: } michael@0: michael@0: PtrInfo* michael@0: GCGraphBuilder::AddWeakMapNode(void *node) michael@0: { michael@0: MOZ_ASSERT(node, "Weak map node should be non-null."); michael@0: michael@0: if (!xpc_GCThingIsGrayCCThing(node) && !WantAllTraces()) michael@0: return nullptr; michael@0: michael@0: if (JS::Zone *zone = MergeZone(node)) { michael@0: return AddNode(zone, mJSZoneParticipant); michael@0: } else { michael@0: return AddNode(node, mJSParticipant); michael@0: } michael@0: } michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: GCGraphBuilder::NoteWeakMapping(void *map, void *key, void *kdelegate, void *val) michael@0: { michael@0: // Don't try to optimize away the entry here, as we've already attempted to michael@0: // do that in TraceWeakMapping in nsXPConnect. michael@0: WeakMapping *mapping = mGraph.mWeakMaps.AppendElement(); michael@0: mapping->mMap = map ? AddWeakMapNode(map) : nullptr; michael@0: mapping->mKey = key ? AddWeakMapNode(key) : nullptr; michael@0: mapping->mKeyDelegate = kdelegate ? AddWeakMapNode(kdelegate) : mapping->mKey; michael@0: mapping->mVal = val ? AddWeakMapNode(val) : nullptr; michael@0: michael@0: if (mListener) { michael@0: mListener->NoteWeakMapEntry((uint64_t)map, (uint64_t)key, michael@0: (uint64_t)kdelegate, (uint64_t)val); michael@0: } michael@0: } michael@0: michael@0: static bool michael@0: AddPurpleRoot(GCGraphBuilder &aBuilder, void *aRoot, nsCycleCollectionParticipant *aParti) michael@0: { michael@0: CanonicalizeParticipant(&aRoot, &aParti); michael@0: michael@0: if (aBuilder.WantAllTraces() || !aParti->CanSkipInCC(aRoot)) { michael@0: PtrInfo *pinfo = aBuilder.AddNode(aRoot, aParti); michael@0: if (!pinfo) { michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: // MayHaveChild() will be false after a Traverse if the object does michael@0: // not have any children the CC will visit. michael@0: class ChildFinder : public nsCycleCollectionTraversalCallback michael@0: { michael@0: public: michael@0: ChildFinder() : mMayHaveChild(false) {} michael@0: michael@0: // The logic of the Note*Child functions must mirror that of their michael@0: // respective functions in GCGraphBuilder. michael@0: NS_IMETHOD_(void) NoteXPCOMChild(nsISupports *child); michael@0: NS_IMETHOD_(void) NoteNativeChild(void *child, michael@0: nsCycleCollectionParticipant *helper); michael@0: NS_IMETHOD_(void) NoteJSChild(void *child); michael@0: michael@0: NS_IMETHOD_(void) DescribeRefCountedNode(nsrefcnt refcount, michael@0: const char *objname) {} michael@0: NS_IMETHOD_(void) DescribeGCedNode(bool ismarked, michael@0: const char *objname, michael@0: uint64_t aCompartmentAddress) {} michael@0: NS_IMETHOD_(void) NoteNextEdgeName(const char* name) {} michael@0: bool MayHaveChild() { michael@0: return mMayHaveChild; michael@0: } michael@0: private: michael@0: bool mMayHaveChild; michael@0: }; michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: ChildFinder::NoteXPCOMChild(nsISupports *child) michael@0: { michael@0: if (!child || !(child = CanonicalizeXPCOMParticipant(child))) michael@0: return; michael@0: nsXPCOMCycleCollectionParticipant *cp; michael@0: ToParticipant(child, &cp); michael@0: if (cp && !cp->CanSkip(child, true)) michael@0: mMayHaveChild = true; michael@0: } michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: ChildFinder::NoteNativeChild(void *child, michael@0: nsCycleCollectionParticipant *helper) michael@0: { michael@0: if (child) michael@0: mMayHaveChild = true; michael@0: } michael@0: michael@0: NS_IMETHODIMP_(void) michael@0: ChildFinder::NoteJSChild(void *child) michael@0: { michael@0: if (child && xpc_GCThingIsGrayCCThing(child)) { michael@0: mMayHaveChild = true; michael@0: } michael@0: } michael@0: michael@0: static bool michael@0: MayHaveChild(void *o, nsCycleCollectionParticipant* cp) michael@0: { michael@0: ChildFinder cf; michael@0: cp->Traverse(o, cf); michael@0: return cf.MayHaveChild(); michael@0: } michael@0: michael@0: template michael@0: class SegmentedArrayElement : public LinkedListElement> michael@0: , public AutoFallibleTArray michael@0: { michael@0: }; michael@0: michael@0: template michael@0: class SegmentedArray michael@0: { michael@0: public: michael@0: ~SegmentedArray() michael@0: { michael@0: MOZ_ASSERT(IsEmpty()); michael@0: } michael@0: michael@0: void AppendElement(T& aElement) michael@0: { michael@0: SegmentedArrayElement* last = mSegments.getLast(); michael@0: if (!last || last->Length() == last->Capacity()) { michael@0: last = new SegmentedArrayElement(); michael@0: mSegments.insertBack(last); michael@0: } michael@0: last->AppendElement(aElement); michael@0: } michael@0: michael@0: void Clear() michael@0: { michael@0: SegmentedArrayElement* first; michael@0: while ((first = mSegments.popFirst())) { michael@0: delete first; michael@0: } michael@0: } michael@0: michael@0: SegmentedArrayElement* GetFirstSegment() michael@0: { michael@0: return mSegments.getFirst(); michael@0: } michael@0: michael@0: bool IsEmpty() michael@0: { michael@0: return !GetFirstSegment(); michael@0: } michael@0: michael@0: private: michael@0: mozilla::LinkedList> mSegments; michael@0: }; michael@0: michael@0: // JSPurpleBuffer keeps references to GCThings which might affect the michael@0: // next cycle collection. It is owned only by itself and during unlink its michael@0: // self reference is broken down and the object ends up killing itself. michael@0: // If GC happens before CC, references to GCthings and the self reference are michael@0: // removed. michael@0: class JSPurpleBuffer michael@0: { michael@0: public: michael@0: JSPurpleBuffer(JSPurpleBuffer*& aReferenceToThis) michael@0: : mReferenceToThis(aReferenceToThis) michael@0: { michael@0: mReferenceToThis = this; michael@0: NS_ADDREF_THIS(); michael@0: mozilla::HoldJSObjects(this); michael@0: } michael@0: michael@0: ~JSPurpleBuffer() michael@0: { michael@0: MOZ_ASSERT(mValues.IsEmpty()); michael@0: MOZ_ASSERT(mObjects.IsEmpty()); michael@0: MOZ_ASSERT(mTenuredObjects.IsEmpty()); michael@0: } michael@0: michael@0: void Destroy() michael@0: { michael@0: mReferenceToThis = nullptr; michael@0: mValues.Clear(); michael@0: mObjects.Clear(); michael@0: mTenuredObjects.Clear(); michael@0: mozilla::DropJSObjects(this); michael@0: NS_RELEASE_THIS(); michael@0: } michael@0: michael@0: NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(JSPurpleBuffer) michael@0: NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(JSPurpleBuffer) michael@0: michael@0: JSPurpleBuffer*& mReferenceToThis; michael@0: SegmentedArray> mValues; michael@0: SegmentedArray> mObjects; michael@0: SegmentedArray> mTenuredObjects; michael@0: }; michael@0: michael@0: NS_IMPL_CYCLE_COLLECTION_CLASS(JSPurpleBuffer) michael@0: michael@0: NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(JSPurpleBuffer) michael@0: tmp->Destroy(); michael@0: NS_IMPL_CYCLE_COLLECTION_UNLINK_END michael@0: michael@0: NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(JSPurpleBuffer) michael@0: CycleCollectionNoteChild(cb, tmp, "self"); michael@0: NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS michael@0: NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END michael@0: michael@0: #define NS_TRACE_SEGMENTED_ARRAY(_field) \ michael@0: { \ michael@0: auto segment = tmp->_field.GetFirstSegment(); \ michael@0: while (segment) { \ michael@0: for (uint32_t i = segment->Length(); i > 0;) { \ michael@0: aCallbacks.Trace(&segment->ElementAt(--i), #_field, aClosure); \ michael@0: } \ michael@0: segment = segment->getNext(); \ michael@0: } \ michael@0: } michael@0: michael@0: NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(JSPurpleBuffer) michael@0: NS_TRACE_SEGMENTED_ARRAY(mValues) michael@0: NS_TRACE_SEGMENTED_ARRAY(mObjects) michael@0: NS_TRACE_SEGMENTED_ARRAY(mTenuredObjects) michael@0: NS_IMPL_CYCLE_COLLECTION_TRACE_END michael@0: michael@0: NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(JSPurpleBuffer, AddRef) michael@0: NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(JSPurpleBuffer, Release) michael@0: michael@0: struct SnowWhiteObject michael@0: { michael@0: void* mPointer; michael@0: nsCycleCollectionParticipant* mParticipant; michael@0: nsCycleCollectingAutoRefCnt* mRefCnt; michael@0: }; michael@0: michael@0: class SnowWhiteKiller : public TraceCallbacks michael@0: { michael@0: public: michael@0: SnowWhiteKiller(nsCycleCollector *aCollector, uint32_t aMaxCount) michael@0: : mCollector(aCollector) michael@0: { michael@0: MOZ_ASSERT(mCollector, "Calling SnowWhiteKiller after nsCC went away"); michael@0: while (true) { michael@0: if (mObjects.SetCapacity(aMaxCount)) { michael@0: break; michael@0: } michael@0: if (aMaxCount == 1) { michael@0: NS_RUNTIMEABORT("Not enough memory to even delete objects!"); michael@0: } michael@0: aMaxCount /= 2; michael@0: } michael@0: } michael@0: michael@0: ~SnowWhiteKiller() michael@0: { michael@0: for (uint32_t i = 0; i < mObjects.Length(); ++i) { michael@0: SnowWhiteObject& o = mObjects[i]; michael@0: if (!o.mRefCnt->get() && !o.mRefCnt->IsInPurpleBuffer()) { michael@0: mCollector->RemoveObjectFromGraph(o.mPointer); michael@0: o.mRefCnt->stabilizeForDeletion(); michael@0: o.mParticipant->Trace(o.mPointer, *this, nullptr); michael@0: o.mParticipant->DeleteCycleCollectable(o.mPointer); michael@0: } michael@0: } michael@0: } michael@0: michael@0: void michael@0: Visit(nsPurpleBuffer& aBuffer, nsPurpleBufferEntry* aEntry) michael@0: { michael@0: MOZ_ASSERT(aEntry->mObject, "Null object in purple buffer"); michael@0: if (!aEntry->mRefCnt->get()) { michael@0: void *o = aEntry->mObject; michael@0: nsCycleCollectionParticipant *cp = aEntry->mParticipant; michael@0: CanonicalizeParticipant(&o, &cp); michael@0: SnowWhiteObject swo = { o, cp, aEntry->mRefCnt }; michael@0: if (mObjects.AppendElement(swo)) { michael@0: aBuffer.Remove(aEntry); michael@0: } michael@0: } michael@0: } michael@0: michael@0: bool HasSnowWhiteObjects() const michael@0: { michael@0: return mObjects.Length() > 0; michael@0: } michael@0: michael@0: virtual void Trace(JS::Heap* aValue, const char* aName, michael@0: void* aClosure) const michael@0: { michael@0: if (aValue->isMarkable()) { michael@0: void* thing = aValue->toGCThing(); michael@0: if (thing && xpc_GCThingIsGrayCCThing(thing)) { michael@0: mCollector->GetJSPurpleBuffer()->mValues.AppendElement(*aValue); michael@0: } michael@0: } michael@0: } michael@0: michael@0: virtual void Trace(JS::Heap* aId, const char* aName, michael@0: void* aClosure) const michael@0: { michael@0: } michael@0: michael@0: virtual void Trace(JS::Heap* aObject, const char* aName, michael@0: void* aClosure) const michael@0: { michael@0: if (*aObject && xpc_GCThingIsGrayCCThing(*aObject)) { michael@0: mCollector->GetJSPurpleBuffer()->mObjects.AppendElement(*aObject); michael@0: } michael@0: } michael@0: michael@0: virtual void Trace(JS::TenuredHeap* aObject, const char* aName, michael@0: void* aClosure) const michael@0: { michael@0: if (*aObject && xpc_GCThingIsGrayCCThing(*aObject)) { michael@0: mCollector->GetJSPurpleBuffer()->mTenuredObjects.AppendElement(*aObject); michael@0: } michael@0: } michael@0: michael@0: virtual void Trace(JS::Heap* aString, const char* aName, michael@0: void* aClosure) const michael@0: { michael@0: } michael@0: michael@0: virtual void Trace(JS::Heap* aScript, const char* aName, michael@0: void* aClosure) const michael@0: { michael@0: } michael@0: michael@0: virtual void Trace(JS::Heap* aFunction, const char* aName, michael@0: void* aClosure) const michael@0: { michael@0: } michael@0: michael@0: private: michael@0: nsCycleCollector *mCollector; michael@0: FallibleTArray mObjects; michael@0: }; michael@0: michael@0: class RemoveSkippableVisitor : public SnowWhiteKiller michael@0: { michael@0: public: michael@0: RemoveSkippableVisitor(nsCycleCollector* aCollector, michael@0: uint32_t aMaxCount, bool aRemoveChildlessNodes, michael@0: bool aAsyncSnowWhiteFreeing, michael@0: CC_ForgetSkippableCallback aCb) michael@0: : SnowWhiteKiller(aCollector, aAsyncSnowWhiteFreeing ? 0 : aMaxCount), michael@0: mRemoveChildlessNodes(aRemoveChildlessNodes), michael@0: mAsyncSnowWhiteFreeing(aAsyncSnowWhiteFreeing), michael@0: mDispatchedDeferredDeletion(false), michael@0: mCallback(aCb) michael@0: {} michael@0: michael@0: ~RemoveSkippableVisitor() michael@0: { michael@0: // Note, we must call the callback before SnowWhiteKiller calls michael@0: // DeleteCycleCollectable! michael@0: if (mCallback) { michael@0: mCallback(); michael@0: } michael@0: if (HasSnowWhiteObjects()) { michael@0: // Effectively a continuation. michael@0: nsCycleCollector_dispatchDeferredDeletion(true); michael@0: } michael@0: } michael@0: michael@0: void michael@0: Visit(nsPurpleBuffer &aBuffer, nsPurpleBufferEntry *aEntry) michael@0: { michael@0: MOZ_ASSERT(aEntry->mObject, "null mObject in purple buffer"); michael@0: if (!aEntry->mRefCnt->get()) { michael@0: if (!mAsyncSnowWhiteFreeing) { michael@0: SnowWhiteKiller::Visit(aBuffer, aEntry); michael@0: } else if (!mDispatchedDeferredDeletion) { michael@0: mDispatchedDeferredDeletion = true; michael@0: nsCycleCollector_dispatchDeferredDeletion(false); michael@0: } michael@0: return; michael@0: } michael@0: void *o = aEntry->mObject; michael@0: nsCycleCollectionParticipant *cp = aEntry->mParticipant; michael@0: CanonicalizeParticipant(&o, &cp); michael@0: if (aEntry->mRefCnt->IsPurple() && !cp->CanSkip(o, false) && michael@0: (!mRemoveChildlessNodes || MayHaveChild(o, cp))) { michael@0: return; michael@0: } michael@0: aBuffer.Remove(aEntry); michael@0: } michael@0: michael@0: private: michael@0: bool mRemoveChildlessNodes; michael@0: bool mAsyncSnowWhiteFreeing; michael@0: bool mDispatchedDeferredDeletion; michael@0: CC_ForgetSkippableCallback mCallback; michael@0: }; michael@0: michael@0: void michael@0: nsPurpleBuffer::RemoveSkippable(nsCycleCollector* aCollector, michael@0: bool aRemoveChildlessNodes, michael@0: bool aAsyncSnowWhiteFreeing, michael@0: CC_ForgetSkippableCallback aCb) michael@0: { michael@0: RemoveSkippableVisitor visitor(aCollector, Count(), aRemoveChildlessNodes, michael@0: aAsyncSnowWhiteFreeing, aCb); michael@0: VisitEntries(visitor); michael@0: } michael@0: michael@0: bool michael@0: nsCycleCollector::FreeSnowWhite(bool aUntilNoSWInPurpleBuffer) michael@0: { michael@0: CheckThreadSafety(); michael@0: michael@0: if (mFreeingSnowWhite) { michael@0: return false; michael@0: } michael@0: michael@0: AutoRestore ar(mFreeingSnowWhite); michael@0: mFreeingSnowWhite = true; michael@0: michael@0: bool hadSnowWhiteObjects = false; michael@0: do { michael@0: SnowWhiteKiller visitor(this, mPurpleBuf.Count()); michael@0: mPurpleBuf.VisitEntries(visitor); michael@0: hadSnowWhiteObjects = hadSnowWhiteObjects || michael@0: visitor.HasSnowWhiteObjects(); michael@0: if (!visitor.HasSnowWhiteObjects()) { michael@0: break; michael@0: } michael@0: } while (aUntilNoSWInPurpleBuffer); michael@0: return hadSnowWhiteObjects; michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector::ForgetSkippable(bool aRemoveChildlessNodes, michael@0: bool aAsyncSnowWhiteFreeing) michael@0: { michael@0: CheckThreadSafety(); michael@0: michael@0: // If we remove things from the purple buffer during graph building, we may michael@0: // lose track of an object that was mutated during graph building. michael@0: MOZ_ASSERT(mIncrementalPhase == IdlePhase); michael@0: michael@0: if (mJSRuntime) { michael@0: mJSRuntime->PrepareForForgetSkippable(); michael@0: } michael@0: MOZ_ASSERT(!mScanInProgress, "Don't forget skippable or free snow-white while scan is in progress."); michael@0: mPurpleBuf.RemoveSkippable(this, aRemoveChildlessNodes, michael@0: aAsyncSnowWhiteFreeing, mForgetSkippableCB); michael@0: } michael@0: michael@0: MOZ_NEVER_INLINE void michael@0: nsCycleCollector::MarkRoots(SliceBudget &aBudget) michael@0: { michael@0: const intptr_t kNumNodesBetweenTimeChecks = 1000; michael@0: const intptr_t kStep = SliceBudget::CounterReset / kNumNodesBetweenTimeChecks; michael@0: michael@0: TimeLog timeLog; michael@0: AutoRestore ar(mScanInProgress); michael@0: MOZ_ASSERT(!mScanInProgress); michael@0: mScanInProgress = true; michael@0: MOZ_ASSERT(mIncrementalPhase == GraphBuildingPhase); michael@0: MOZ_ASSERT(mCurrNode); michael@0: michael@0: while (!aBudget.isOverBudget() && !mCurrNode->IsDone()) { michael@0: PtrInfo *pi = mCurrNode->GetNext(); michael@0: if (!pi) { michael@0: MOZ_CRASH(); michael@0: } michael@0: michael@0: // We need to call the builder's Traverse() method on deleted nodes, to michael@0: // set their firstChild() that may be read by a prior non-deleted michael@0: // neighbor. michael@0: mBuilder->Traverse(pi); michael@0: if (mCurrNode->AtBlockEnd()) { michael@0: mBuilder->SetLastChild(); michael@0: } michael@0: aBudget.step(kStep); michael@0: } michael@0: michael@0: if (!mCurrNode->IsDone()) { michael@0: timeLog.Checkpoint("MarkRoots()"); michael@0: return; michael@0: } michael@0: michael@0: if (mGraph.mRootCount > 0) { michael@0: mBuilder->SetLastChild(); michael@0: } michael@0: michael@0: if (mBuilder->RanOutOfMemory()) { michael@0: MOZ_ASSERT(false, "Ran out of memory while building cycle collector graph"); michael@0: CC_TELEMETRY(_OOM, true); michael@0: } michael@0: michael@0: mBuilder = nullptr; michael@0: mCurrNode = nullptr; michael@0: mIncrementalPhase = ScanAndCollectWhitePhase; michael@0: timeLog.Checkpoint("MarkRoots()"); michael@0: } michael@0: michael@0: michael@0: //////////////////////////////////////////////////////////////////////// michael@0: // Bacon & Rajan's |ScanRoots| routine. michael@0: //////////////////////////////////////////////////////////////////////// michael@0: michael@0: michael@0: struct ScanBlackVisitor michael@0: { michael@0: ScanBlackVisitor(uint32_t &aWhiteNodeCount, bool &aFailed) michael@0: : mWhiteNodeCount(aWhiteNodeCount), mFailed(aFailed) michael@0: { michael@0: } michael@0: michael@0: bool ShouldVisitNode(PtrInfo const *pi) michael@0: { michael@0: return pi->mColor != black; michael@0: } michael@0: michael@0: MOZ_NEVER_INLINE void VisitNode(PtrInfo *pi) michael@0: { michael@0: if (pi->mColor == white) michael@0: --mWhiteNodeCount; michael@0: pi->mColor = black; michael@0: } michael@0: michael@0: void Failed() michael@0: { michael@0: mFailed = true; michael@0: } michael@0: michael@0: private: michael@0: uint32_t &mWhiteNodeCount; michael@0: bool &mFailed; michael@0: }; michael@0: michael@0: michael@0: struct scanVisitor michael@0: { michael@0: scanVisitor(uint32_t &aWhiteNodeCount, bool &aFailed, bool aWasIncremental) michael@0: : mWhiteNodeCount(aWhiteNodeCount), mFailed(aFailed), michael@0: mWasIncremental(aWasIncremental) michael@0: { michael@0: } michael@0: michael@0: bool ShouldVisitNode(PtrInfo const *pi) michael@0: { michael@0: return pi->mColor == grey; michael@0: } michael@0: michael@0: MOZ_NEVER_INLINE void VisitNode(PtrInfo *pi) michael@0: { michael@0: if (pi->mInternalRefs > pi->mRefCount && pi->mRefCount > 0) { michael@0: // If we found more references to an object than its ref count, then michael@0: // the object should have already been marked as an incremental michael@0: // root. Note that this is imprecise, because pi could have been michael@0: // marked black for other reasons. Always fault if we weren't michael@0: // incremental, as there were no incremental roots in that case. michael@0: if (!mWasIncremental || pi->mColor != black) { michael@0: Fault("traversed refs exceed refcount", pi); michael@0: } michael@0: } michael@0: michael@0: if (pi->mInternalRefs == pi->mRefCount || pi->mRefCount == 0) { michael@0: pi->mColor = white; michael@0: ++mWhiteNodeCount; michael@0: } else { michael@0: GraphWalker(ScanBlackVisitor(mWhiteNodeCount, mFailed)).Walk(pi); michael@0: MOZ_ASSERT(pi->mColor == black, michael@0: "Why didn't ScanBlackVisitor make pi black?"); michael@0: } michael@0: } michael@0: michael@0: void Failed() { michael@0: mFailed = true; michael@0: } michael@0: michael@0: private: michael@0: uint32_t &mWhiteNodeCount; michael@0: bool &mFailed; michael@0: bool mWasIncremental; michael@0: }; michael@0: michael@0: // Iterate over the WeakMaps. If we mark anything while iterating michael@0: // over the WeakMaps, we must iterate over all of the WeakMaps again. michael@0: void michael@0: nsCycleCollector::ScanWeakMaps() michael@0: { michael@0: bool anyChanged; michael@0: bool failed = false; michael@0: do { michael@0: anyChanged = false; michael@0: for (uint32_t i = 0; i < mGraph.mWeakMaps.Length(); i++) { michael@0: WeakMapping *wm = &mGraph.mWeakMaps[i]; michael@0: michael@0: // If any of these are null, the original object was marked black. michael@0: uint32_t mColor = wm->mMap ? wm->mMap->mColor : black; michael@0: uint32_t kColor = wm->mKey ? wm->mKey->mColor : black; michael@0: uint32_t kdColor = wm->mKeyDelegate ? wm->mKeyDelegate->mColor : black; michael@0: uint32_t vColor = wm->mVal ? wm->mVal->mColor : black; michael@0: michael@0: // All non-null weak mapping maps, keys and values are michael@0: // roots (in the sense of WalkFromRoots) in the cycle michael@0: // collector graph, and thus should have been colored michael@0: // either black or white in ScanRoots(). michael@0: MOZ_ASSERT(mColor != grey, "Uncolored weak map"); michael@0: MOZ_ASSERT(kColor != grey, "Uncolored weak map key"); michael@0: MOZ_ASSERT(kdColor != grey, "Uncolored weak map key delegate"); michael@0: MOZ_ASSERT(vColor != grey, "Uncolored weak map value"); michael@0: michael@0: if (mColor == black && kColor != black && kdColor == black) { michael@0: GraphWalker(ScanBlackVisitor(mWhiteNodeCount, failed)).Walk(wm->mKey); michael@0: anyChanged = true; michael@0: } michael@0: michael@0: if (mColor == black && kColor == black && vColor != black) { michael@0: GraphWalker(ScanBlackVisitor(mWhiteNodeCount, failed)).Walk(wm->mVal); michael@0: anyChanged = true; michael@0: } michael@0: } michael@0: } while (anyChanged); michael@0: michael@0: if (failed) { michael@0: MOZ_ASSERT(false, "Ran out of memory in ScanWeakMaps"); michael@0: CC_TELEMETRY(_OOM, true); michael@0: } michael@0: } michael@0: michael@0: // Flood black from any objects in the purple buffer that are in the CC graph. michael@0: class PurpleScanBlackVisitor michael@0: { michael@0: public: michael@0: PurpleScanBlackVisitor(GCGraph &aGraph, nsICycleCollectorListener *aListener, michael@0: uint32_t &aCount, bool &aFailed) michael@0: : mGraph(aGraph), mListener(aListener), mCount(aCount), mFailed(aFailed) michael@0: { michael@0: } michael@0: michael@0: void michael@0: Visit(nsPurpleBuffer &aBuffer, nsPurpleBufferEntry *aEntry) michael@0: { michael@0: MOZ_ASSERT(aEntry->mObject, "Entries with null mObject shouldn't be in the purple buffer."); michael@0: MOZ_ASSERT(aEntry->mRefCnt->get() != 0, "Snow-white objects shouldn't be in the purple buffer."); michael@0: michael@0: void *obj = aEntry->mObject; michael@0: if (!aEntry->mParticipant) { michael@0: obj = CanonicalizeXPCOMParticipant(static_cast(obj)); michael@0: MOZ_ASSERT(obj, "Don't add objects that don't participate in collection!"); michael@0: } michael@0: michael@0: PtrInfo *pi = mGraph.FindNode(obj); michael@0: if (!pi) { michael@0: return; michael@0: } michael@0: MOZ_ASSERT(pi->mParticipant, "No dead objects should be in the purple buffer."); michael@0: if (MOZ_UNLIKELY(mListener)) { michael@0: mListener->NoteIncrementalRoot((uint64_t)pi->mPointer); michael@0: } michael@0: if (pi->mColor == black) { michael@0: return; michael@0: } michael@0: GraphWalker(ScanBlackVisitor(mCount, mFailed)).Walk(pi); michael@0: } michael@0: michael@0: private: michael@0: GCGraph &mGraph; michael@0: nsICycleCollectorListener *mListener; michael@0: uint32_t &mCount; michael@0: bool &mFailed; michael@0: }; michael@0: michael@0: // Objects that have been stored somewhere since the start of incremental graph building must michael@0: // be treated as live for this cycle collection, because we may not have accurate information michael@0: // about who holds references to them. michael@0: void michael@0: nsCycleCollector::ScanIncrementalRoots() michael@0: { michael@0: TimeLog timeLog; michael@0: michael@0: // Reference counted objects: michael@0: // We cleared the purple buffer at the start of the current ICC, so if a michael@0: // refcounted object is purple, it may have been AddRef'd during the current michael@0: // ICC. (It may also have only been released.) If that is the case, we cannot michael@0: // be sure that the set of things pointing to the object in the CC graph michael@0: // is accurate. Therefore, for safety, we treat any purple objects as being michael@0: // live during the current CC. We don't remove anything from the purple michael@0: // buffer here, so these objects will be suspected and freed in the next CC michael@0: // if they are garbage. michael@0: bool failed = false; michael@0: PurpleScanBlackVisitor purpleScanBlackVisitor(mGraph, mListener, mWhiteNodeCount, failed); michael@0: mPurpleBuf.VisitEntries(purpleScanBlackVisitor); michael@0: timeLog.Checkpoint("ScanIncrementalRoots::fix purple"); michael@0: michael@0: // Garbage collected objects: michael@0: // If a GCed object was added to the graph with a refcount of zero, and is michael@0: // now marked black by the GC, it was probably gray before and was exposed michael@0: // to active JS, so it may have been stored somewhere, so it needs to be michael@0: // treated as live. michael@0: if (mJSRuntime) { michael@0: nsCycleCollectionParticipant *jsParticipant = mJSRuntime->GCThingParticipant(); michael@0: nsCycleCollectionParticipant *zoneParticipant = mJSRuntime->ZoneParticipant(); michael@0: NodePool::Enumerator etor(mGraph.mNodes); michael@0: michael@0: while (!etor.IsDone()) { michael@0: PtrInfo *pi = etor.GetNext(); michael@0: michael@0: // If the refcount is non-zero, pi can't have been a gray JS object. michael@0: if (pi->mRefCount != 0) { michael@0: continue; michael@0: } michael@0: michael@0: // As an optimization, if an object has already been determined to be live, michael@0: // don't consider it further. We can't do this if there is a listener, michael@0: // because the listener wants to know the complete set of incremental roots. michael@0: if (pi->mColor == black && MOZ_LIKELY(!mListener)) { michael@0: continue; michael@0: } michael@0: michael@0: // If the object is still marked gray by the GC, nothing could have gotten michael@0: // hold of it, so it isn't an incremental root. michael@0: if (pi->mParticipant == jsParticipant) { michael@0: if (xpc_GCThingIsGrayCCThing(pi->mPointer)) { michael@0: continue; michael@0: } michael@0: } else if (pi->mParticipant == zoneParticipant) { michael@0: JS::Zone *zone = static_cast(pi->mPointer); michael@0: if (js::ZoneGlobalsAreAllGray(zone)) { michael@0: continue; michael@0: } michael@0: } else { michael@0: MOZ_ASSERT(false, "Non-JS thing with 0 refcount? Treating as live."); michael@0: } michael@0: michael@0: // At this point, pi must be an incremental root. michael@0: michael@0: // If there's a listener, tell it about this root. We don't bother with the michael@0: // optimization of skipping the Walk() if pi is black: it will just return michael@0: // without doing anything and there's no need to make this case faster. michael@0: if (MOZ_UNLIKELY(mListener)) { michael@0: mListener->NoteIncrementalRoot((uint64_t)pi->mPointer); michael@0: } michael@0: michael@0: GraphWalker(ScanBlackVisitor(mWhiteNodeCount, failed)).Walk(pi); michael@0: } michael@0: michael@0: timeLog.Checkpoint("ScanIncrementalRoots::fix JS"); michael@0: } michael@0: michael@0: if (failed) { michael@0: NS_ASSERTION(false, "Ran out of memory in ScanIncrementalRoots"); michael@0: CC_TELEMETRY(_OOM, true); michael@0: } michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector::ScanRoots(bool aFullySynchGraphBuild) michael@0: { michael@0: AutoRestore ar(mScanInProgress); michael@0: MOZ_ASSERT(!mScanInProgress); michael@0: mScanInProgress = true; michael@0: mWhiteNodeCount = 0; michael@0: MOZ_ASSERT(mIncrementalPhase == ScanAndCollectWhitePhase); michael@0: michael@0: if (!aFullySynchGraphBuild) { michael@0: ScanIncrementalRoots(); michael@0: } michael@0: michael@0: TimeLog timeLog; michael@0: michael@0: // On the assumption that most nodes will be black, it's michael@0: // probably faster to use a GraphWalker than a michael@0: // NodePool::Enumerator. michael@0: bool failed = false; michael@0: scanVisitor sv(mWhiteNodeCount, failed, !aFullySynchGraphBuild); michael@0: GraphWalker(sv).WalkFromRoots(mGraph); michael@0: timeLog.Checkpoint("ScanRoots::WalkFromRoots"); michael@0: michael@0: if (failed) { michael@0: NS_ASSERTION(false, "Ran out of memory in ScanRoots"); michael@0: CC_TELEMETRY(_OOM, true); michael@0: } michael@0: michael@0: // Scanning weak maps must be done last. michael@0: ScanWeakMaps(); michael@0: timeLog.Checkpoint("ScanRoots::ScanWeakMaps"); michael@0: michael@0: if (mListener) { michael@0: mListener->BeginResults(); michael@0: michael@0: NodePool::Enumerator etor(mGraph.mNodes); michael@0: while (!etor.IsDone()) { michael@0: PtrInfo *pi = etor.GetNext(); michael@0: if (!pi->mParticipant) { michael@0: continue; michael@0: } michael@0: switch (pi->mColor) { michael@0: case black: michael@0: if (pi->mRefCount > 0 && pi->mRefCount < UINT32_MAX && michael@0: pi->mInternalRefs != pi->mRefCount) { michael@0: mListener->DescribeRoot((uint64_t)pi->mPointer, michael@0: pi->mInternalRefs); michael@0: } michael@0: break; michael@0: case white: michael@0: mListener->DescribeGarbage((uint64_t)pi->mPointer); michael@0: break; michael@0: case grey: michael@0: // With incremental CC, we can end up with a grey object after michael@0: // scanning if it is only reachable from an object that gets freed. michael@0: break; michael@0: } michael@0: } michael@0: michael@0: mListener->End(); michael@0: mListener = nullptr; michael@0: timeLog.Checkpoint("ScanRoots::listener"); michael@0: } michael@0: } michael@0: michael@0: michael@0: //////////////////////////////////////////////////////////////////////// michael@0: // Bacon & Rajan's |CollectWhite| routine, somewhat modified. michael@0: //////////////////////////////////////////////////////////////////////// michael@0: michael@0: bool michael@0: nsCycleCollector::CollectWhite() michael@0: { michael@0: // Explanation of "somewhat modified": we have no way to collect the michael@0: // set of whites "all at once", we have to ask each of them to drop michael@0: // their outgoing links and assume this will cause the garbage cycle michael@0: // to *mostly* self-destruct (except for the reference we continue michael@0: // to hold). michael@0: // michael@0: // To do this "safely" we must make sure that the white nodes we're michael@0: // operating on are stable for the duration of our operation. So we michael@0: // make 3 sets of calls to language runtimes: michael@0: // michael@0: // - Root(whites), which should pin the whites in memory. michael@0: // - Unlink(whites), which drops outgoing links on each white. michael@0: // - Unroot(whites), which returns the whites to normal GC. michael@0: michael@0: TimeLog timeLog; michael@0: nsAutoTArray whiteNodes; michael@0: michael@0: MOZ_ASSERT(mIncrementalPhase == ScanAndCollectWhitePhase); michael@0: michael@0: whiteNodes.SetCapacity(mWhiteNodeCount); michael@0: uint32_t numWhiteGCed = 0; michael@0: michael@0: NodePool::Enumerator etor(mGraph.mNodes); michael@0: while (!etor.IsDone()) michael@0: { michael@0: PtrInfo *pinfo = etor.GetNext(); michael@0: if (pinfo->mColor == white && pinfo->mParticipant) { michael@0: whiteNodes.AppendElement(pinfo); michael@0: pinfo->mParticipant->Root(pinfo->mPointer); michael@0: if (pinfo->mRefCount == 0) { michael@0: // only JS objects have a refcount of 0 michael@0: ++numWhiteGCed; michael@0: } michael@0: } michael@0: } michael@0: michael@0: uint32_t count = whiteNodes.Length(); michael@0: MOZ_ASSERT(numWhiteGCed <= count, michael@0: "More freed GCed nodes than total freed nodes."); michael@0: mResults.mFreedRefCounted += count - numWhiteGCed; michael@0: mResults.mFreedGCed += numWhiteGCed; michael@0: michael@0: timeLog.Checkpoint("CollectWhite::Root"); michael@0: michael@0: if (mBeforeUnlinkCB) { michael@0: mBeforeUnlinkCB(); michael@0: timeLog.Checkpoint("CollectWhite::BeforeUnlinkCB"); michael@0: } michael@0: michael@0: for (uint32_t i = 0; i < count; ++i) { michael@0: PtrInfo *pinfo = whiteNodes.ElementAt(i); michael@0: MOZ_ASSERT(pinfo->mParticipant, "Unlink shouldn't see objects removed from graph."); michael@0: pinfo->mParticipant->Unlink(pinfo->mPointer); michael@0: #ifdef DEBUG michael@0: if (mJSRuntime) { michael@0: mJSRuntime->AssertNoObjectsToTrace(pinfo->mPointer); michael@0: } michael@0: #endif michael@0: } michael@0: timeLog.Checkpoint("CollectWhite::Unlink"); michael@0: michael@0: for (uint32_t i = 0; i < count; ++i) { michael@0: PtrInfo *pinfo = whiteNodes.ElementAt(i); michael@0: MOZ_ASSERT(pinfo->mParticipant, "Unroot shouldn't see objects removed from graph."); michael@0: pinfo->mParticipant->Unroot(pinfo->mPointer); michael@0: } michael@0: timeLog.Checkpoint("CollectWhite::Unroot"); michael@0: michael@0: nsCycleCollector_dispatchDeferredDeletion(false); michael@0: mIncrementalPhase = CleanupPhase; michael@0: michael@0: return count > 0; michael@0: } michael@0: michael@0: michael@0: //////////////////////// michael@0: // Memory reporting michael@0: //////////////////////// michael@0: michael@0: MOZ_DEFINE_MALLOC_SIZE_OF(CycleCollectorMallocSizeOf) michael@0: michael@0: NS_IMETHODIMP michael@0: nsCycleCollector::CollectReports(nsIHandleReportCallback* aHandleReport, michael@0: nsISupports* aData) michael@0: { michael@0: size_t objectSize, graphNodesSize, graphEdgesSize, weakMapsSize, michael@0: purpleBufferSize; michael@0: SizeOfIncludingThis(CycleCollectorMallocSizeOf, michael@0: &objectSize, michael@0: &graphNodesSize, &graphEdgesSize, michael@0: &weakMapsSize, michael@0: &purpleBufferSize); michael@0: michael@0: #define REPORT(_path, _amount, _desc) \ michael@0: do { \ michael@0: size_t amount = _amount; /* evaluate |_amount| only once */ \ michael@0: if (amount > 0) { \ michael@0: nsresult rv; \ michael@0: rv = aHandleReport->Callback(EmptyCString(), \ michael@0: NS_LITERAL_CSTRING(_path), \ michael@0: KIND_HEAP, UNITS_BYTES, _amount, \ michael@0: NS_LITERAL_CSTRING(_desc), \ michael@0: aData); \ michael@0: if (NS_WARN_IF(NS_FAILED(rv))) \ michael@0: return rv; \ michael@0: } \ michael@0: } while (0) michael@0: michael@0: REPORT("explicit/cycle-collector/collector-object", objectSize, michael@0: "Memory used for the cycle collector object itself."); michael@0: michael@0: REPORT("explicit/cycle-collector/graph-nodes", graphNodesSize, michael@0: "Memory used for the nodes of the cycle collector's graph. " michael@0: "This should be zero when the collector is idle."); michael@0: michael@0: REPORT("explicit/cycle-collector/graph-edges", graphEdgesSize, michael@0: "Memory used for the edges of the cycle collector's graph. " michael@0: "This should be zero when the collector is idle."); michael@0: michael@0: REPORT("explicit/cycle-collector/weak-maps", weakMapsSize, michael@0: "Memory used for the representation of weak maps in the " michael@0: "cycle collector's graph. " michael@0: "This should be zero when the collector is idle."); michael@0: michael@0: REPORT("explicit/cycle-collector/purple-buffer", purpleBufferSize, michael@0: "Memory used for the cycle collector's purple buffer."); michael@0: michael@0: #undef REPORT michael@0: michael@0: return NS_OK; michael@0: }; michael@0: michael@0: michael@0: //////////////////////////////////////////////////////////////////////// michael@0: // Collector implementation michael@0: //////////////////////////////////////////////////////////////////////// michael@0: michael@0: nsCycleCollector::nsCycleCollector() : michael@0: mActivelyCollecting(false), michael@0: mFreeingSnowWhite(false), michael@0: mScanInProgress(false), michael@0: mJSRuntime(nullptr), michael@0: mIncrementalPhase(IdlePhase), michael@0: mThread(NS_GetCurrentThread()), michael@0: mWhiteNodeCount(0), michael@0: mBeforeUnlinkCB(nullptr), michael@0: mForgetSkippableCB(nullptr), michael@0: mUnmergedNeeded(0), michael@0: mMergedInARow(0), michael@0: mJSPurpleBuffer(nullptr) michael@0: { michael@0: } michael@0: michael@0: nsCycleCollector::~nsCycleCollector() michael@0: { michael@0: UnregisterWeakMemoryReporter(this); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector::RegisterJSRuntime(CycleCollectedJSRuntime *aJSRuntime) michael@0: { michael@0: if (mJSRuntime) michael@0: Fault("multiple registrations of cycle collector JS runtime", aJSRuntime); michael@0: michael@0: mJSRuntime = aJSRuntime; michael@0: michael@0: // We can't register as a reporter in nsCycleCollector() because that runs michael@0: // before the memory reporter manager is initialized. So we do it here michael@0: // instead. michael@0: static bool registered = false; michael@0: if (!registered) { michael@0: RegisterWeakMemoryReporter(this); michael@0: registered = true; michael@0: } michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector::ForgetJSRuntime() michael@0: { michael@0: if (!mJSRuntime) michael@0: Fault("forgetting non-registered cycle collector JS runtime"); michael@0: michael@0: mJSRuntime = nullptr; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: static bool michael@0: HasParticipant(void *aPtr, nsCycleCollectionParticipant *aParti) michael@0: { michael@0: if (aParti) { michael@0: return true; michael@0: } michael@0: michael@0: nsXPCOMCycleCollectionParticipant *xcp; michael@0: ToParticipant(static_cast(aPtr), &xcp); michael@0: return xcp != nullptr; michael@0: } michael@0: #endif michael@0: michael@0: MOZ_ALWAYS_INLINE void michael@0: nsCycleCollector::Suspect(void *aPtr, nsCycleCollectionParticipant *aParti, michael@0: nsCycleCollectingAutoRefCnt *aRefCnt) michael@0: { michael@0: CheckThreadSafety(); michael@0: michael@0: // Re-entering ::Suspect during collection used to be a fault, but michael@0: // we are canonicalizing nsISupports pointers using QI, so we will michael@0: // see some spurious refcount traffic here. michael@0: michael@0: if (MOZ_UNLIKELY(mScanInProgress)) { michael@0: return; michael@0: } michael@0: michael@0: MOZ_ASSERT(aPtr, "Don't suspect null pointers"); michael@0: michael@0: MOZ_ASSERT(HasParticipant(aPtr, aParti), michael@0: "Suspected nsISupports pointer must QI to nsXPCOMCycleCollectionParticipant"); michael@0: michael@0: mPurpleBuf.Put(aPtr, aParti, aRefCnt); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector::CheckThreadSafety() michael@0: { michael@0: #ifdef DEBUG michael@0: nsIThread* currentThread = NS_GetCurrentThread(); michael@0: // XXXkhuey we can be called so late in shutdown that NS_GetCurrentThread michael@0: // returns null (after the thread manager has shut down) michael@0: MOZ_ASSERT(mThread == currentThread || !currentThread); michael@0: #endif michael@0: } michael@0: michael@0: // The cycle collector uses the mark bitmap to discover what JS objects michael@0: // were reachable only from XPConnect roots that might participate in michael@0: // cycles. We ask the JS runtime whether we need to force a GC before michael@0: // this CC. It returns true on startup (before the mark bits have been set), michael@0: // and also when UnmarkGray has run out of stack. We also force GCs on shut michael@0: // down to collect cycles involving both DOM and JS. michael@0: void michael@0: nsCycleCollector::FixGrayBits(bool aForceGC) michael@0: { michael@0: CheckThreadSafety(); michael@0: michael@0: if (!mJSRuntime) michael@0: return; michael@0: michael@0: if (!aForceGC) { michael@0: mJSRuntime->FixWeakMappingGrayBits(); michael@0: michael@0: bool needGC = mJSRuntime->NeedCollect(); michael@0: // Only do a telemetry ping for non-shutdown CCs. michael@0: CC_TELEMETRY(_NEED_GC, needGC); michael@0: if (!needGC) michael@0: return; michael@0: mResults.mForcedGC = true; michael@0: } michael@0: michael@0: TimeLog timeLog; michael@0: mJSRuntime->Collect(aForceGC ? JS::gcreason::SHUTDOWN_CC : JS::gcreason::CC_FORCED); michael@0: timeLog.Checkpoint("GC()"); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector::CleanupAfterCollection() michael@0: { michael@0: MOZ_ASSERT(mIncrementalPhase == CleanupPhase); michael@0: mGraph.Clear(); michael@0: michael@0: uint32_t interval = (uint32_t) ((TimeStamp::Now() - mCollectionStart).ToMilliseconds()); michael@0: #ifdef COLLECT_TIME_DEBUG michael@0: printf("cc: total cycle collector time was %ums\n", interval); michael@0: printf("cc: visited %u ref counted and %u GCed objects, freed %d ref counted and %d GCed objects", michael@0: mResults.mVisitedRefCounted, mResults.mVisitedGCed, michael@0: mResults.mFreedRefCounted, mResults.mFreedGCed); michael@0: uint32_t numVisited = mResults.mVisitedRefCounted + mResults.mVisitedGCed; michael@0: if (numVisited > 1000) { michael@0: uint32_t numFreed = mResults.mFreedRefCounted + mResults.mFreedGCed; michael@0: printf(" (%d%%)", 100 * numFreed / numVisited); michael@0: } michael@0: printf(".\ncc: \n"); michael@0: #endif michael@0: CC_TELEMETRY( , interval); michael@0: CC_TELEMETRY(_VISITED_REF_COUNTED, mResults.mVisitedRefCounted); michael@0: CC_TELEMETRY(_VISITED_GCED, mResults.mVisitedGCed); michael@0: CC_TELEMETRY(_COLLECTED, mWhiteNodeCount); michael@0: michael@0: if (mJSRuntime) { michael@0: mJSRuntime->EndCycleCollectionCallback(mResults); michael@0: } michael@0: mIncrementalPhase = IdlePhase; michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector::ShutdownCollect() michael@0: { michael@0: SliceBudget unlimitedBudget; michael@0: uint32_t i; michael@0: for (i = 0; i < DEFAULT_SHUTDOWN_COLLECTIONS; ++i) { michael@0: if (!Collect(ShutdownCC, unlimitedBudget, nullptr)) { michael@0: break; michael@0: } michael@0: } michael@0: NS_WARN_IF_FALSE(i < NORMAL_SHUTDOWN_COLLECTIONS, "Extra shutdown CC"); michael@0: } michael@0: michael@0: static void michael@0: PrintPhase(const char *aPhase) michael@0: { michael@0: #ifdef DEBUG_PHASES michael@0: printf("cc: begin %s on %s\n", aPhase, michael@0: NS_IsMainThread() ? "mainthread" : "worker"); michael@0: #endif michael@0: } michael@0: michael@0: bool michael@0: nsCycleCollector::Collect(ccType aCCType, michael@0: SliceBudget &aBudget, michael@0: nsICycleCollectorListener *aManualListener) michael@0: { michael@0: CheckThreadSafety(); michael@0: michael@0: // This can legitimately happen in a few cases. See bug 383651. michael@0: if (mActivelyCollecting || mFreeingSnowWhite) { michael@0: return false; michael@0: } michael@0: mActivelyCollecting = true; michael@0: michael@0: bool startedIdle = (mIncrementalPhase == IdlePhase); michael@0: bool collectedAny = false; michael@0: michael@0: // If the CC started idle, it will call BeginCollection, which michael@0: // will do FreeSnowWhite, so it doesn't need to be done here. michael@0: if (!startedIdle) { michael@0: FreeSnowWhite(true); michael@0: } michael@0: michael@0: bool finished = false; michael@0: do { michael@0: switch (mIncrementalPhase) { michael@0: case IdlePhase: michael@0: PrintPhase("BeginCollection"); michael@0: BeginCollection(aCCType, aManualListener); michael@0: break; michael@0: case GraphBuildingPhase: michael@0: PrintPhase("MarkRoots"); michael@0: MarkRoots(aBudget); michael@0: break; michael@0: case ScanAndCollectWhitePhase: michael@0: // We do ScanRoots and CollectWhite in a single slice to ensure michael@0: // that we won't unlink a live object if a weak reference is michael@0: // promoted to a strong reference after ScanRoots has finished. michael@0: // See bug 926533. michael@0: PrintPhase("ScanRoots"); michael@0: ScanRoots(startedIdle); michael@0: PrintPhase("CollectWhite"); michael@0: collectedAny = CollectWhite(); michael@0: break; michael@0: case CleanupPhase: michael@0: PrintPhase("CleanupAfterCollection"); michael@0: CleanupAfterCollection(); michael@0: finished = true; michael@0: break; michael@0: } michael@0: } while (!aBudget.checkOverBudget() && !finished); michael@0: michael@0: // Clear mActivelyCollecting here to ensure that a recursive call to michael@0: // Collect() does something. michael@0: mActivelyCollecting = false; michael@0: michael@0: if (aCCType != SliceCC && !startedIdle) { michael@0: // We were in the middle of an incremental CC (using its own listener). michael@0: // Somebody has forced a CC, so after having finished out the current CC, michael@0: // run the CC again using the new listener. michael@0: MOZ_ASSERT(mIncrementalPhase == IdlePhase); michael@0: if (Collect(aCCType, aBudget, aManualListener)) { michael@0: collectedAny = true; michael@0: } michael@0: } michael@0: michael@0: MOZ_ASSERT_IF(aCCType != SliceCC, mIncrementalPhase == IdlePhase); michael@0: michael@0: return collectedAny; michael@0: } michael@0: michael@0: // Any JS objects we have in the graph could die when we GC, but we michael@0: // don't want to abandon the current CC, because the graph contains michael@0: // information about purple roots. So we synchronously finish off michael@0: // the current CC. michael@0: void michael@0: nsCycleCollector::PrepareForGarbageCollection() michael@0: { michael@0: if (mIncrementalPhase == IdlePhase) { michael@0: MOZ_ASSERT(mGraph.IsEmpty(), "Non-empty graph when idle"); michael@0: MOZ_ASSERT(!mBuilder, "Non-null builder when idle"); michael@0: if (mJSPurpleBuffer) { michael@0: mJSPurpleBuffer->Destroy(); michael@0: } michael@0: return; michael@0: } michael@0: michael@0: SliceBudget unlimitedBudget; michael@0: PrintPhase("PrepareForGarbageCollection"); michael@0: // Use SliceCC because we only want to finish the CC in progress. michael@0: Collect(SliceCC, unlimitedBudget, nullptr); michael@0: MOZ_ASSERT(mIncrementalPhase == IdlePhase); michael@0: } michael@0: michael@0: // Don't merge too many times in a row, and do at least a minimum michael@0: // number of unmerged CCs in a row. michael@0: static const uint32_t kMinConsecutiveUnmerged = 3; michael@0: static const uint32_t kMaxConsecutiveMerged = 3; michael@0: michael@0: bool michael@0: nsCycleCollector::ShouldMergeZones(ccType aCCType) michael@0: { michael@0: if (!mJSRuntime) { michael@0: return false; michael@0: } michael@0: michael@0: MOZ_ASSERT(mUnmergedNeeded <= kMinConsecutiveUnmerged); michael@0: MOZ_ASSERT(mMergedInARow <= kMaxConsecutiveMerged); michael@0: michael@0: if (mMergedInARow == kMaxConsecutiveMerged) { michael@0: MOZ_ASSERT(mUnmergedNeeded == 0); michael@0: mUnmergedNeeded = kMinConsecutiveUnmerged; michael@0: } michael@0: michael@0: if (mUnmergedNeeded > 0) { michael@0: mUnmergedNeeded--; michael@0: mMergedInARow = 0; michael@0: return false; michael@0: } michael@0: michael@0: if (aCCType == SliceCC && mJSRuntime->UsefulToMergeZones()) { michael@0: mMergedInARow++; michael@0: return true; michael@0: } else { michael@0: mMergedInARow = 0; michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector::BeginCollection(ccType aCCType, michael@0: nsICycleCollectorListener *aManualListener) michael@0: { michael@0: TimeLog timeLog; michael@0: MOZ_ASSERT(mIncrementalPhase == IdlePhase); michael@0: michael@0: mCollectionStart = TimeStamp::Now(); michael@0: michael@0: if (mJSRuntime) { michael@0: mJSRuntime->BeginCycleCollectionCallback(); michael@0: timeLog.Checkpoint("BeginCycleCollectionCallback()"); michael@0: } michael@0: michael@0: bool isShutdown = (aCCType == ShutdownCC); michael@0: michael@0: // Set up the listener for this CC. michael@0: MOZ_ASSERT_IF(isShutdown, !aManualListener); michael@0: MOZ_ASSERT(!mListener, "Forgot to clear a previous listener?"); michael@0: mListener = aManualListener; michael@0: aManualListener = nullptr; michael@0: if (!mListener && mParams.LogThisCC(isShutdown)) { michael@0: nsRefPtr logger = new nsCycleCollectorLogger(); michael@0: if (mParams.AllTracesThisCC(isShutdown)) { michael@0: logger->SetAllTraces(); michael@0: } michael@0: mListener = logger.forget(); michael@0: } michael@0: michael@0: bool forceGC = isShutdown; michael@0: if (!forceGC && mListener) { michael@0: // On a WantAllTraces CC, force a synchronous global GC to prevent michael@0: // hijinks from ForgetSkippable and compartmental GCs. michael@0: mListener->GetWantAllTraces(&forceGC); michael@0: } michael@0: FixGrayBits(forceGC); michael@0: michael@0: FreeSnowWhite(true); michael@0: michael@0: if (mListener && NS_FAILED(mListener->Begin())) { michael@0: mListener = nullptr; michael@0: } michael@0: michael@0: // Set up the data structures for building the graph. michael@0: mGraph.Init(); michael@0: mResults.Init(); michael@0: bool mergeZones = ShouldMergeZones(aCCType); michael@0: mResults.mMergedZones = mergeZones; michael@0: michael@0: MOZ_ASSERT(!mBuilder, "Forgot to clear mBuilder"); michael@0: mBuilder = new GCGraphBuilder(mGraph, mResults, mJSRuntime, mListener, mergeZones); michael@0: michael@0: if (mJSRuntime) { michael@0: mJSRuntime->TraverseRoots(*mBuilder); michael@0: timeLog.Checkpoint("mJSRuntime->TraverseRoots()"); michael@0: } michael@0: michael@0: AutoRestore ar(mScanInProgress); michael@0: MOZ_ASSERT(!mScanInProgress); michael@0: mScanInProgress = true; michael@0: mPurpleBuf.SelectPointers(*mBuilder); michael@0: timeLog.Checkpoint("SelectPointers()"); michael@0: michael@0: // We've finished adding roots, and everything in the graph is a root. michael@0: mGraph.mRootCount = mGraph.MapCount(); michael@0: michael@0: mCurrNode = new NodePool::Enumerator(mGraph.mNodes); michael@0: mIncrementalPhase = GraphBuildingPhase; michael@0: } michael@0: michael@0: uint32_t michael@0: nsCycleCollector::SuspectedCount() michael@0: { michael@0: CheckThreadSafety(); michael@0: return mPurpleBuf.Count(); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector::Shutdown() michael@0: { michael@0: CheckThreadSafety(); michael@0: michael@0: // Always delete snow white objects. michael@0: FreeSnowWhite(true); michael@0: michael@0: #ifndef DEBUG michael@0: if (PR_GetEnv("MOZ_CC_RUN_DURING_SHUTDOWN")) michael@0: #endif michael@0: { michael@0: ShutdownCollect(); michael@0: } michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector::RemoveObjectFromGraph(void *aObj) michael@0: { michael@0: if (mIncrementalPhase == IdlePhase) { michael@0: return; michael@0: } michael@0: michael@0: if (PtrInfo *pinfo = mGraph.FindNode(aObj)) { michael@0: mGraph.RemoveNodeFromMap(aObj); michael@0: michael@0: pinfo->mPointer = nullptr; michael@0: pinfo->mParticipant = nullptr; michael@0: } michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf, michael@0: size_t *aObjectSize, michael@0: size_t *aGraphNodesSize, michael@0: size_t *aGraphEdgesSize, michael@0: size_t *aWeakMapsSize, michael@0: size_t *aPurpleBufferSize) const michael@0: { michael@0: *aObjectSize = aMallocSizeOf(this); michael@0: michael@0: mGraph.SizeOfExcludingThis(aMallocSizeOf, aGraphNodesSize, aGraphEdgesSize, michael@0: aWeakMapsSize); michael@0: michael@0: *aPurpleBufferSize = mPurpleBuf.SizeOfExcludingThis(aMallocSizeOf); michael@0: michael@0: // These fields are deliberately not measured: michael@0: // - mJSRuntime: because it's non-owning and measured by JS reporters. michael@0: // - mParams: because it only contains scalars. michael@0: } michael@0: michael@0: JSPurpleBuffer* michael@0: nsCycleCollector::GetJSPurpleBuffer() michael@0: { michael@0: if (!mJSPurpleBuffer) { michael@0: // JSPurpleBuffer keeps itself alive, but we need to create it in such way michael@0: // that it ends up in the normal purple buffer. That happens when michael@0: // nsRefPtr goes out of the scope and calls Release. michael@0: nsRefPtr pb = new JSPurpleBuffer(mJSPurpleBuffer); michael@0: } michael@0: return mJSPurpleBuffer; michael@0: } michael@0: michael@0: //////////////////////////////////////////////////////////////////////// michael@0: // Module public API (exported in nsCycleCollector.h) michael@0: // Just functions that redirect into the singleton, once it's built. michael@0: //////////////////////////////////////////////////////////////////////// michael@0: michael@0: void michael@0: nsCycleCollector_registerJSRuntime(CycleCollectedJSRuntime *rt) michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now. michael@0: MOZ_ASSERT(data); michael@0: MOZ_ASSERT(data->mCollector); michael@0: // But we shouldn't already have a runtime. michael@0: MOZ_ASSERT(!data->mRuntime); michael@0: michael@0: data->mRuntime = rt; michael@0: data->mCollector->RegisterJSRuntime(rt); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector_forgetJSRuntime() michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now. michael@0: MOZ_ASSERT(data); michael@0: // And we shouldn't have already forgotten our runtime. michael@0: MOZ_ASSERT(data->mRuntime); michael@0: michael@0: // But it may have shutdown already. michael@0: if (data->mCollector) { michael@0: data->mCollector->ForgetJSRuntime(); michael@0: data->mRuntime = nullptr; michael@0: } else { michael@0: data->mRuntime = nullptr; michael@0: delete data; michael@0: sCollectorData.set(nullptr); michael@0: } michael@0: } michael@0: michael@0: /* static */ CycleCollectedJSRuntime* michael@0: CycleCollectedJSRuntime::Get() michael@0: { michael@0: CollectorData* data = sCollectorData.get(); michael@0: if (data) { michael@0: return data->mRuntime; michael@0: } michael@0: return nullptr; michael@0: } michael@0: michael@0: michael@0: namespace mozilla { michael@0: namespace cyclecollector { michael@0: michael@0: void michael@0: HoldJSObjectsImpl(void* aHolder, nsScriptObjectTracer* aTracer) michael@0: { michael@0: CollectorData* data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now. michael@0: MOZ_ASSERT(data); michael@0: MOZ_ASSERT(data->mCollector); michael@0: // And we should have a runtime. michael@0: MOZ_ASSERT(data->mRuntime); michael@0: michael@0: data->mRuntime->AddJSHolder(aHolder, aTracer); michael@0: } michael@0: michael@0: void michael@0: HoldJSObjectsImpl(nsISupports* aHolder) michael@0: { michael@0: nsXPCOMCycleCollectionParticipant* participant; michael@0: CallQueryInterface(aHolder, &participant); michael@0: MOZ_ASSERT(participant, "Failed to QI to nsXPCOMCycleCollectionParticipant!"); michael@0: MOZ_ASSERT(participant->CheckForRightISupports(aHolder), michael@0: "The result of QIing a JS holder should be the same as ToSupports"); michael@0: michael@0: HoldJSObjectsImpl(aHolder, participant); michael@0: } michael@0: michael@0: void michael@0: DropJSObjectsImpl(void* aHolder) michael@0: { michael@0: CollectorData* data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now, and not completely michael@0: // shut down. michael@0: MOZ_ASSERT(data); michael@0: // And we should have a runtime. michael@0: MOZ_ASSERT(data->mRuntime); michael@0: michael@0: data->mRuntime->RemoveJSHolder(aHolder); michael@0: } michael@0: michael@0: void michael@0: DropJSObjectsImpl(nsISupports* aHolder) michael@0: { michael@0: #ifdef DEBUG michael@0: nsXPCOMCycleCollectionParticipant* participant; michael@0: CallQueryInterface(aHolder, &participant); michael@0: MOZ_ASSERT(participant, "Failed to QI to nsXPCOMCycleCollectionParticipant!"); michael@0: MOZ_ASSERT(participant->CheckForRightISupports(aHolder), michael@0: "The result of QIing a JS holder should be the same as ToSupports"); michael@0: #endif michael@0: DropJSObjectsImpl(static_cast(aHolder)); michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: bool michael@0: IsJSHolder(void* aHolder) michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now, and not completely michael@0: // shut down. michael@0: MOZ_ASSERT(data); michael@0: // And we should have a runtime. michael@0: MOZ_ASSERT(data->mRuntime); michael@0: michael@0: return data->mRuntime->IsJSHolder(aHolder); michael@0: } michael@0: #endif michael@0: michael@0: void michael@0: DeferredFinalize(nsISupports* aSupports) michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now, and not completely michael@0: // shut down. michael@0: MOZ_ASSERT(data); michael@0: // And we should have a runtime. michael@0: MOZ_ASSERT(data->mRuntime); michael@0: michael@0: data->mRuntime->DeferredFinalize(aSupports); michael@0: } michael@0: michael@0: void michael@0: DeferredFinalize(DeferredFinalizeAppendFunction aAppendFunc, michael@0: DeferredFinalizeFunction aFunc, michael@0: void* aThing) michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now, and not completely michael@0: // shut down. michael@0: MOZ_ASSERT(data); michael@0: // And we should have a runtime. michael@0: MOZ_ASSERT(data->mRuntime); michael@0: michael@0: data->mRuntime->DeferredFinalize(aAppendFunc, aFunc, aThing); michael@0: } michael@0: michael@0: } // namespace cyclecollector michael@0: } // namespace mozilla michael@0: michael@0: michael@0: MOZ_NEVER_INLINE static void michael@0: SuspectAfterShutdown(void* n, nsCycleCollectionParticipant* cp, michael@0: nsCycleCollectingAutoRefCnt* aRefCnt, michael@0: bool* aShouldDelete) michael@0: { michael@0: if (aRefCnt->get() == 0) { michael@0: if (!aShouldDelete) { michael@0: // The CC is shut down, so we can't be in the middle of an ICC. michael@0: CanonicalizeParticipant(&n, &cp); michael@0: aRefCnt->stabilizeForDeletion(); michael@0: cp->DeleteCycleCollectable(n); michael@0: } else { michael@0: *aShouldDelete = true; michael@0: } michael@0: } else { michael@0: // Make sure we'll get called again. michael@0: aRefCnt->RemoveFromPurpleBuffer(); michael@0: } michael@0: } michael@0: michael@0: void michael@0: NS_CycleCollectorSuspect3(void *n, nsCycleCollectionParticipant *cp, michael@0: nsCycleCollectingAutoRefCnt *aRefCnt, michael@0: bool* aShouldDelete) michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now. michael@0: MOZ_ASSERT(data); michael@0: michael@0: if (MOZ_LIKELY(data->mCollector)) { michael@0: data->mCollector->Suspect(n, cp, aRefCnt); michael@0: return; michael@0: } michael@0: SuspectAfterShutdown(n, cp, aRefCnt, aShouldDelete); michael@0: } michael@0: michael@0: uint32_t michael@0: nsCycleCollector_suspectedCount() michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now. michael@0: MOZ_ASSERT(data); michael@0: michael@0: if (!data->mCollector) { michael@0: return 0; michael@0: } michael@0: michael@0: return data->mCollector->SuspectedCount(); michael@0: } michael@0: michael@0: bool michael@0: nsCycleCollector_init() michael@0: { michael@0: MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!"); michael@0: MOZ_ASSERT(!sCollectorData.initialized(), "Called twice!?"); michael@0: michael@0: return sCollectorData.init(); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector_startup() michael@0: { michael@0: MOZ_ASSERT(sCollectorData.initialized(), michael@0: "Forgot to call nsCycleCollector_init!"); michael@0: if (sCollectorData.get()) { michael@0: MOZ_CRASH(); michael@0: } michael@0: michael@0: CollectorData* data = new CollectorData; michael@0: data->mCollector = new nsCycleCollector(); michael@0: data->mRuntime = nullptr; michael@0: michael@0: sCollectorData.set(data); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector_setBeforeUnlinkCallback(CC_BeforeUnlinkCallback aCB) michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now. michael@0: MOZ_ASSERT(data); michael@0: MOZ_ASSERT(data->mCollector); michael@0: michael@0: data->mCollector->SetBeforeUnlinkCallback(aCB); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector_setForgetSkippableCallback(CC_ForgetSkippableCallback aCB) michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now. michael@0: MOZ_ASSERT(data); michael@0: MOZ_ASSERT(data->mCollector); michael@0: michael@0: data->mCollector->SetForgetSkippableCallback(aCB); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector_forgetSkippable(bool aRemoveChildlessNodes, michael@0: bool aAsyncSnowWhiteFreeing) michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now. michael@0: MOZ_ASSERT(data); michael@0: MOZ_ASSERT(data->mCollector); michael@0: michael@0: PROFILER_LABEL("CC", "nsCycleCollector_forgetSkippable"); michael@0: TimeLog timeLog; michael@0: data->mCollector->ForgetSkippable(aRemoveChildlessNodes, michael@0: aAsyncSnowWhiteFreeing); michael@0: timeLog.Checkpoint("ForgetSkippable()"); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector_dispatchDeferredDeletion(bool aContinuation) michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: if (!data || !data->mRuntime) { michael@0: return; michael@0: } michael@0: michael@0: data->mRuntime->DispatchDeferredDeletion(aContinuation); michael@0: } michael@0: michael@0: bool michael@0: nsCycleCollector_doDeferredDeletion() michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now. michael@0: MOZ_ASSERT(data); michael@0: MOZ_ASSERT(data->mCollector); michael@0: MOZ_ASSERT(data->mRuntime); michael@0: michael@0: return data->mCollector->FreeSnowWhite(false); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector_collect(nsICycleCollectorListener *aManualListener) michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now. michael@0: MOZ_ASSERT(data); michael@0: MOZ_ASSERT(data->mCollector); michael@0: michael@0: PROFILER_LABEL("CC", "nsCycleCollector_collect"); michael@0: SliceBudget unlimitedBudget; michael@0: data->mCollector->Collect(ManualCC, unlimitedBudget, aManualListener); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector_collectSlice(int64_t aSliceTime) michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: // We should have started the cycle collector by now. michael@0: MOZ_ASSERT(data); michael@0: MOZ_ASSERT(data->mCollector); michael@0: michael@0: PROFILER_LABEL("CC", "nsCycleCollector_collectSlice"); michael@0: SliceBudget budget; michael@0: if (aSliceTime > 0) { michael@0: budget = SliceBudget::TimeBudget(aSliceTime); michael@0: } else if (aSliceTime == 0) { michael@0: budget = SliceBudget::WorkBudget(1); michael@0: } michael@0: data->mCollector->Collect(SliceCC, budget, nullptr); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector_prepareForGarbageCollection() michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: MOZ_ASSERT(data); michael@0: michael@0: if (!data->mCollector) { michael@0: return; michael@0: } michael@0: michael@0: data->mCollector->PrepareForGarbageCollection(); michael@0: } michael@0: michael@0: void michael@0: nsCycleCollector_shutdown() michael@0: { michael@0: CollectorData *data = sCollectorData.get(); michael@0: michael@0: if (data) { michael@0: MOZ_ASSERT(data->mCollector); michael@0: PROFILER_LABEL("CC", "nsCycleCollector_shutdown"); michael@0: data->mCollector->Shutdown(); michael@0: data->mCollector = nullptr; michael@0: if (!data->mRuntime) { michael@0: delete data; michael@0: sCollectorData.set(nullptr); michael@0: } michael@0: } michael@0: }