Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ |
michael@0 | 2 | /* vim: set ts=8 sts=4 et sw=4 tw=80: */ |
michael@0 | 3 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | // |
michael@0 | 8 | // This file implements a garbage-cycle collector based on the paper |
michael@0 | 9 | // |
michael@0 | 10 | // Concurrent Cycle Collection in Reference Counted Systems |
michael@0 | 11 | // Bacon & Rajan (2001), ECOOP 2001 / Springer LNCS vol 2072 |
michael@0 | 12 | // |
michael@0 | 13 | // We are not using the concurrent or acyclic cases of that paper; so |
michael@0 | 14 | // the green, red and orange colors are not used. |
michael@0 | 15 | // |
michael@0 | 16 | // The collector is based on tracking pointers of four colors: |
michael@0 | 17 | // |
michael@0 | 18 | // Black nodes are definitely live. If we ever determine a node is |
michael@0 | 19 | // black, it's ok to forget about, drop from our records. |
michael@0 | 20 | // |
michael@0 | 21 | // White nodes are definitely garbage cycles. Once we finish with our |
michael@0 | 22 | // scanning, we unlink all the white nodes and expect that by |
michael@0 | 23 | // unlinking them they will self-destruct (since a garbage cycle is |
michael@0 | 24 | // only keeping itself alive with internal links, by definition). |
michael@0 | 25 | // |
michael@0 | 26 | // Snow-white is an addition to the original algorithm. Snow-white object |
michael@0 | 27 | // has reference count zero and is just waiting for deletion. |
michael@0 | 28 | // |
michael@0 | 29 | // Grey nodes are being scanned. Nodes that turn grey will turn |
michael@0 | 30 | // either black if we determine that they're live, or white if we |
michael@0 | 31 | // determine that they're a garbage cycle. After the main collection |
michael@0 | 32 | // algorithm there should be no grey nodes. |
michael@0 | 33 | // |
michael@0 | 34 | // Purple nodes are *candidates* for being scanned. They are nodes we |
michael@0 | 35 | // haven't begun scanning yet because they're not old enough, or we're |
michael@0 | 36 | // still partway through the algorithm. |
michael@0 | 37 | // |
michael@0 | 38 | // XPCOM objects participating in garbage-cycle collection are obliged |
michael@0 | 39 | // to inform us when they ought to turn purple; that is, when their |
michael@0 | 40 | // refcount transitions from N+1 -> N, for nonzero N. Furthermore we |
michael@0 | 41 | // require that *after* an XPCOM object has informed us of turning |
michael@0 | 42 | // purple, they will tell us when they either transition back to being |
michael@0 | 43 | // black (incremented refcount) or are ultimately deleted. |
michael@0 | 44 | |
michael@0 | 45 | // Incremental cycle collection |
michael@0 | 46 | // |
michael@0 | 47 | // Beyond the simple state machine required to implement incremental |
michael@0 | 48 | // collection, the CC needs to be able to compensate for things the browser |
michael@0 | 49 | // is doing during the collection. There are two kinds of problems. For each |
michael@0 | 50 | // of these, there are two cases to deal with: purple-buffered C++ objects |
michael@0 | 51 | // and JS objects. |
michael@0 | 52 | |
michael@0 | 53 | // The first problem is that an object in the CC's graph can become garbage. |
michael@0 | 54 | // This is bad because the CC touches the objects in its graph at every |
michael@0 | 55 | // stage of its operation. |
michael@0 | 56 | // |
michael@0 | 57 | // All cycle collected C++ objects that die during a cycle collection |
michael@0 | 58 | // will end up actually getting deleted by the SnowWhiteKiller. Before |
michael@0 | 59 | // the SWK deletes an object, it checks if an ICC is running, and if so, |
michael@0 | 60 | // if the object is in the graph. If it is, the CC clears mPointer and |
michael@0 | 61 | // mParticipant so it does not point to the raw object any more. Because |
michael@0 | 62 | // objects could die any time the CC returns to the mutator, any time the CC |
michael@0 | 63 | // accesses a PtrInfo it must perform a null check on mParticipant to |
michael@0 | 64 | // ensure the object has not gone away. |
michael@0 | 65 | // |
michael@0 | 66 | // JS objects don't always run finalizers, so the CC can't remove them from |
michael@0 | 67 | // the graph when they die. Fortunately, JS objects can only die during a GC, |
michael@0 | 68 | // so if a GC is begun during an ICC, the browser synchronously finishes off |
michael@0 | 69 | // the ICC, which clears the entire CC graph. If the GC and CC are scheduled |
michael@0 | 70 | // properly, this should be rare. |
michael@0 | 71 | // |
michael@0 | 72 | // The second problem is that objects in the graph can be changed, say by |
michael@0 | 73 | // being addrefed or released, or by having a field updated, after the object |
michael@0 | 74 | // has been added to the graph. The problem is that ICC can miss a newly |
michael@0 | 75 | // created reference to an object, and end up unlinking an object that is |
michael@0 | 76 | // actually alive. |
michael@0 | 77 | // |
michael@0 | 78 | // The basic idea of the solution, from "An on-the-fly Reference Counting |
michael@0 | 79 | // Garbage Collector for Java" by Levanoni and Petrank, is to notice if an |
michael@0 | 80 | // object has had an additional reference to it created during the collection, |
michael@0 | 81 | // and if so, don't collect it during the current collection. This avoids having |
michael@0 | 82 | // to rerun the scan as in Bacon & Rajan 2001. |
michael@0 | 83 | // |
michael@0 | 84 | // For cycle collected C++ objects, we modify AddRef to place the object in |
michael@0 | 85 | // the purple buffer, in addition to Release. Then, in the CC, we treat any |
michael@0 | 86 | // objects in the purple buffer as being alive, after graph building has |
michael@0 | 87 | // completed. Because they are in the purple buffer, they will be suspected |
michael@0 | 88 | // in the next CC, so there's no danger of leaks. This is imprecise, because |
michael@0 | 89 | // we will treat as live an object that has been Released but not AddRefed |
michael@0 | 90 | // during graph building, but that's probably rare enough that the additional |
michael@0 | 91 | // bookkeeping overhead is not worthwhile. |
michael@0 | 92 | // |
michael@0 | 93 | // For JS objects, the cycle collector is only looking at gray objects. If a |
michael@0 | 94 | // gray object is touched during ICC, it will be made black by UnmarkGray. |
michael@0 | 95 | // Thus, if a JS object has become black during the ICC, we treat it as live. |
michael@0 | 96 | // Merged JS zones have to be handled specially: we scan all zone globals. |
michael@0 | 97 | // If any are black, we treat the zone as being black. |
michael@0 | 98 | |
michael@0 | 99 | |
michael@0 | 100 | // Safety |
michael@0 | 101 | // |
michael@0 | 102 | // An XPCOM object is either scan-safe or scan-unsafe, purple-safe or |
michael@0 | 103 | // purple-unsafe. |
michael@0 | 104 | // |
michael@0 | 105 | // An nsISupports object is scan-safe if: |
michael@0 | 106 | // |
michael@0 | 107 | // - It can be QI'ed to |nsXPCOMCycleCollectionParticipant|, though |
michael@0 | 108 | // this operation loses ISupports identity (like nsIClassInfo). |
michael@0 | 109 | // - Additionally, the operation |traverse| on the resulting |
michael@0 | 110 | // nsXPCOMCycleCollectionParticipant does not cause *any* refcount |
michael@0 | 111 | // adjustment to occur (no AddRef / Release calls). |
michael@0 | 112 | // |
michael@0 | 113 | // A non-nsISupports ("native") object is scan-safe by explicitly |
michael@0 | 114 | // providing its nsCycleCollectionParticipant. |
michael@0 | 115 | // |
michael@0 | 116 | // An object is purple-safe if it satisfies the following properties: |
michael@0 | 117 | // |
michael@0 | 118 | // - The object is scan-safe. |
michael@0 | 119 | // |
michael@0 | 120 | // When we receive a pointer |ptr| via |
michael@0 | 121 | // |nsCycleCollector::suspect(ptr)|, we assume it is purple-safe. We |
michael@0 | 122 | // can check the scan-safety, but have no way to ensure the |
michael@0 | 123 | // purple-safety; objects must obey, or else the entire system falls |
michael@0 | 124 | // apart. Don't involve an object in this scheme if you can't |
michael@0 | 125 | // guarantee its purple-safety. The easiest way to ensure that an |
michael@0 | 126 | // object is purple-safe is to use nsCycleCollectingAutoRefCnt. |
michael@0 | 127 | // |
michael@0 | 128 | // When we have a scannable set of purple nodes ready, we begin |
michael@0 | 129 | // our walks. During the walks, the nodes we |traverse| should only |
michael@0 | 130 | // feed us more scan-safe nodes, and should not adjust the refcounts |
michael@0 | 131 | // of those nodes. |
michael@0 | 132 | // |
michael@0 | 133 | // We do not |AddRef| or |Release| any objects during scanning. We |
michael@0 | 134 | // rely on the purple-safety of the roots that call |suspect| to |
michael@0 | 135 | // hold, such that we will clear the pointer from the purple buffer |
michael@0 | 136 | // entry to the object before it is destroyed. The pointers that are |
michael@0 | 137 | // merely scan-safe we hold only for the duration of scanning, and |
michael@0 | 138 | // there should be no objects released from the scan-safe set during |
michael@0 | 139 | // the scan. |
michael@0 | 140 | // |
michael@0 | 141 | // We *do* call |Root| and |Unroot| on every white object, on |
michael@0 | 142 | // either side of the calls to |Unlink|. This keeps the set of white |
michael@0 | 143 | // objects alive during the unlinking. |
michael@0 | 144 | // |
michael@0 | 145 | |
michael@0 | 146 | #if !defined(__MINGW32__) |
michael@0 | 147 | #ifdef WIN32 |
michael@0 | 148 | #include <crtdbg.h> |
michael@0 | 149 | #include <errno.h> |
michael@0 | 150 | #endif |
michael@0 | 151 | #endif |
michael@0 | 152 | |
michael@0 | 153 | #include "base/process_util.h" |
michael@0 | 154 | |
michael@0 | 155 | #include "mozilla/ArrayUtils.h" |
michael@0 | 156 | #include "mozilla/AutoRestore.h" |
michael@0 | 157 | #include "mozilla/CycleCollectedJSRuntime.h" |
michael@0 | 158 | #include "mozilla/HoldDropJSObjects.h" |
michael@0 | 159 | /* This must occur *after* base/process_util.h to avoid typedefs conflicts. */ |
michael@0 | 160 | #include "mozilla/MemoryReporting.h" |
michael@0 | 161 | #include "mozilla/LinkedList.h" |
michael@0 | 162 | |
michael@0 | 163 | #include "nsCycleCollectionParticipant.h" |
michael@0 | 164 | #include "nsCycleCollectionNoteRootCallback.h" |
michael@0 | 165 | #include "nsDeque.h" |
michael@0 | 166 | #include "nsCycleCollector.h" |
michael@0 | 167 | #include "nsThreadUtils.h" |
michael@0 | 168 | #include "nsXULAppAPI.h" |
michael@0 | 169 | #include "prenv.h" |
michael@0 | 170 | #include "nsPrintfCString.h" |
michael@0 | 171 | #include "nsTArray.h" |
michael@0 | 172 | #include "nsIConsoleService.h" |
michael@0 | 173 | #include "mozilla/Attributes.h" |
michael@0 | 174 | #include "nsICycleCollectorListener.h" |
michael@0 | 175 | #include "nsIMemoryReporter.h" |
michael@0 | 176 | #include "nsIFile.h" |
michael@0 | 177 | #include "nsDumpUtils.h" |
michael@0 | 178 | #include "xpcpublic.h" |
michael@0 | 179 | #include "GeckoProfiler.h" |
michael@0 | 180 | #include "js/SliceBudget.h" |
michael@0 | 181 | #include <stdint.h> |
michael@0 | 182 | #include <stdio.h> |
michael@0 | 183 | |
michael@0 | 184 | #include "mozilla/Likely.h" |
michael@0 | 185 | #include "mozilla/PoisonIOInterposer.h" |
michael@0 | 186 | #include "mozilla/Telemetry.h" |
michael@0 | 187 | #include "mozilla/ThreadLocal.h" |
michael@0 | 188 | |
michael@0 | 189 | using namespace mozilla; |
michael@0 | 190 | |
michael@0 | 191 | //#define COLLECT_TIME_DEBUG |
michael@0 | 192 | |
michael@0 | 193 | // Enable assertions that are useful for diagnosing errors in graph construction. |
michael@0 | 194 | //#define DEBUG_CC_GRAPH |
michael@0 | 195 | |
michael@0 | 196 | #define DEFAULT_SHUTDOWN_COLLECTIONS 5 |
michael@0 | 197 | |
michael@0 | 198 | // One to do the freeing, then another to detect there is no more work to do. |
michael@0 | 199 | #define NORMAL_SHUTDOWN_COLLECTIONS 2 |
michael@0 | 200 | |
michael@0 | 201 | // Cycle collector environment variables |
michael@0 | 202 | // |
michael@0 | 203 | // MOZ_CC_LOG_ALL: If defined, always log cycle collector heaps. |
michael@0 | 204 | // |
michael@0 | 205 | // MOZ_CC_LOG_SHUTDOWN: If defined, log cycle collector heaps at shutdown. |
michael@0 | 206 | // |
michael@0 | 207 | // MOZ_CC_LOG_THREAD: If set to "main", only automatically log main thread |
michael@0 | 208 | // CCs. If set to "worker", only automatically log worker CCs. If set to "all", |
michael@0 | 209 | // log either. The default value is "all". This must be used with either |
michael@0 | 210 | // MOZ_CC_LOG_ALL or MOZ_CC_LOG_SHUTDOWN for it to do anything. |
michael@0 | 211 | // |
michael@0 | 212 | // MOZ_CC_LOG_PROCESS: If set to "main", only automatically log main process |
michael@0 | 213 | // CCs. If set to "content", only automatically log tab CCs. If set to |
michael@0 | 214 | // "plugins", only automatically log plugin CCs. If set to "all", log |
michael@0 | 215 | // everything. The default value is "all". This must be used with either |
michael@0 | 216 | // MOZ_CC_LOG_ALL or MOZ_CC_LOG_SHUTDOWN for it to do anything. |
michael@0 | 217 | // |
michael@0 | 218 | // MOZ_CC_ALL_TRACES: If set to "all", any cycle collector |
michael@0 | 219 | // logging done will be WantAllTraces, which disables |
michael@0 | 220 | // various cycle collector optimizations to give a fuller picture of |
michael@0 | 221 | // the heap. If set to "shutdown", only shutdown logging will be WantAllTraces. |
michael@0 | 222 | // The default is none. |
michael@0 | 223 | // |
michael@0 | 224 | // MOZ_CC_RUN_DURING_SHUTDOWN: In non-DEBUG or builds, if this is set, |
michael@0 | 225 | // run cycle collections at shutdown. |
michael@0 | 226 | // |
michael@0 | 227 | // MOZ_CC_LOG_DIRECTORY: The directory in which logs are placed (such as |
michael@0 | 228 | // logs from MOZ_CC_LOG_ALL and MOZ_CC_LOG_SHUTDOWN, or other uses |
michael@0 | 229 | // of nsICycleCollectorListener) |
michael@0 | 230 | |
michael@0 | 231 | // Various parameters of this collector can be tuned using environment |
michael@0 | 232 | // variables. |
michael@0 | 233 | |
michael@0 | 234 | struct nsCycleCollectorParams |
michael@0 | 235 | { |
michael@0 | 236 | bool mLogAll; |
michael@0 | 237 | bool mLogShutdown; |
michael@0 | 238 | bool mAllTracesAll; |
michael@0 | 239 | bool mAllTracesShutdown; |
michael@0 | 240 | bool mLogThisThread; |
michael@0 | 241 | |
michael@0 | 242 | nsCycleCollectorParams() : |
michael@0 | 243 | mLogAll (PR_GetEnv("MOZ_CC_LOG_ALL") != nullptr), |
michael@0 | 244 | mLogShutdown (PR_GetEnv("MOZ_CC_LOG_SHUTDOWN") != nullptr), |
michael@0 | 245 | mAllTracesAll(false), |
michael@0 | 246 | mAllTracesShutdown(false) |
michael@0 | 247 | { |
michael@0 | 248 | const char* logThreadEnv = PR_GetEnv("MOZ_CC_LOG_THREAD"); |
michael@0 | 249 | bool threadLogging = true; |
michael@0 | 250 | if (logThreadEnv && !!strcmp(logThreadEnv, "all")) { |
michael@0 | 251 | if (NS_IsMainThread()) { |
michael@0 | 252 | threadLogging = !strcmp(logThreadEnv, "main"); |
michael@0 | 253 | } else { |
michael@0 | 254 | threadLogging = !strcmp(logThreadEnv, "worker"); |
michael@0 | 255 | } |
michael@0 | 256 | } |
michael@0 | 257 | |
michael@0 | 258 | const char* logProcessEnv = PR_GetEnv("MOZ_CC_LOG_PROCESS"); |
michael@0 | 259 | bool processLogging = true; |
michael@0 | 260 | if (logProcessEnv && !!strcmp(logProcessEnv, "all")) { |
michael@0 | 261 | switch (XRE_GetProcessType()) { |
michael@0 | 262 | case GeckoProcessType_Default: |
michael@0 | 263 | processLogging = !strcmp(logProcessEnv, "main"); |
michael@0 | 264 | break; |
michael@0 | 265 | case GeckoProcessType_Plugin: |
michael@0 | 266 | processLogging = !strcmp(logProcessEnv, "plugins"); |
michael@0 | 267 | break; |
michael@0 | 268 | case GeckoProcessType_Content: |
michael@0 | 269 | processLogging = !strcmp(logProcessEnv, "content"); |
michael@0 | 270 | break; |
michael@0 | 271 | default: |
michael@0 | 272 | processLogging = false; |
michael@0 | 273 | break; |
michael@0 | 274 | } |
michael@0 | 275 | } |
michael@0 | 276 | mLogThisThread = threadLogging && processLogging; |
michael@0 | 277 | |
michael@0 | 278 | const char* allTracesEnv = PR_GetEnv("MOZ_CC_ALL_TRACES"); |
michael@0 | 279 | if (allTracesEnv) { |
michael@0 | 280 | if (!strcmp(allTracesEnv, "all")) { |
michael@0 | 281 | mAllTracesAll = true; |
michael@0 | 282 | } else if (!strcmp(allTracesEnv, "shutdown")) { |
michael@0 | 283 | mAllTracesShutdown = true; |
michael@0 | 284 | } |
michael@0 | 285 | } |
michael@0 | 286 | } |
michael@0 | 287 | |
michael@0 | 288 | bool LogThisCC(bool aIsShutdown) |
michael@0 | 289 | { |
michael@0 | 290 | return (mLogAll || (aIsShutdown && mLogShutdown)) && mLogThisThread; |
michael@0 | 291 | } |
michael@0 | 292 | |
michael@0 | 293 | bool AllTracesThisCC(bool aIsShutdown) |
michael@0 | 294 | { |
michael@0 | 295 | return mAllTracesAll || (aIsShutdown && mAllTracesShutdown); |
michael@0 | 296 | } |
michael@0 | 297 | }; |
michael@0 | 298 | |
michael@0 | 299 | #ifdef COLLECT_TIME_DEBUG |
michael@0 | 300 | class TimeLog |
michael@0 | 301 | { |
michael@0 | 302 | public: |
michael@0 | 303 | TimeLog() : mLastCheckpoint(TimeStamp::Now()) {} |
michael@0 | 304 | |
michael@0 | 305 | void |
michael@0 | 306 | Checkpoint(const char* aEvent) |
michael@0 | 307 | { |
michael@0 | 308 | TimeStamp now = TimeStamp::Now(); |
michael@0 | 309 | uint32_t dur = (uint32_t) ((now - mLastCheckpoint).ToMilliseconds()); |
michael@0 | 310 | if (dur > 0) { |
michael@0 | 311 | printf("cc: %s took %dms\n", aEvent, dur); |
michael@0 | 312 | } |
michael@0 | 313 | mLastCheckpoint = now; |
michael@0 | 314 | } |
michael@0 | 315 | |
michael@0 | 316 | private: |
michael@0 | 317 | TimeStamp mLastCheckpoint; |
michael@0 | 318 | }; |
michael@0 | 319 | #else |
michael@0 | 320 | class TimeLog |
michael@0 | 321 | { |
michael@0 | 322 | public: |
michael@0 | 323 | TimeLog() {} |
michael@0 | 324 | void Checkpoint(const char* aEvent) {} |
michael@0 | 325 | }; |
michael@0 | 326 | #endif |
michael@0 | 327 | |
michael@0 | 328 | |
michael@0 | 329 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 330 | // Base types |
michael@0 | 331 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 332 | |
michael@0 | 333 | struct PtrInfo; |
michael@0 | 334 | |
michael@0 | 335 | class EdgePool |
michael@0 | 336 | { |
michael@0 | 337 | public: |
michael@0 | 338 | // EdgePool allocates arrays of void*, primarily to hold PtrInfo*. |
michael@0 | 339 | // However, at the end of a block, the last two pointers are a null |
michael@0 | 340 | // and then a void** pointing to the next block. This allows |
michael@0 | 341 | // EdgePool::Iterators to be a single word but still capable of crossing |
michael@0 | 342 | // block boundaries. |
michael@0 | 343 | |
michael@0 | 344 | EdgePool() |
michael@0 | 345 | { |
michael@0 | 346 | mSentinelAndBlocks[0].block = nullptr; |
michael@0 | 347 | mSentinelAndBlocks[1].block = nullptr; |
michael@0 | 348 | } |
michael@0 | 349 | |
michael@0 | 350 | ~EdgePool() |
michael@0 | 351 | { |
michael@0 | 352 | MOZ_ASSERT(!mSentinelAndBlocks[0].block && |
michael@0 | 353 | !mSentinelAndBlocks[1].block, |
michael@0 | 354 | "Didn't call Clear()?"); |
michael@0 | 355 | } |
michael@0 | 356 | |
michael@0 | 357 | void Clear() |
michael@0 | 358 | { |
michael@0 | 359 | Block *b = Blocks(); |
michael@0 | 360 | while (b) { |
michael@0 | 361 | Block *next = b->Next(); |
michael@0 | 362 | delete b; |
michael@0 | 363 | b = next; |
michael@0 | 364 | } |
michael@0 | 365 | |
michael@0 | 366 | mSentinelAndBlocks[0].block = nullptr; |
michael@0 | 367 | mSentinelAndBlocks[1].block = nullptr; |
michael@0 | 368 | } |
michael@0 | 369 | |
michael@0 | 370 | #ifdef DEBUG |
michael@0 | 371 | bool IsEmpty() |
michael@0 | 372 | { |
michael@0 | 373 | return !mSentinelAndBlocks[0].block && |
michael@0 | 374 | !mSentinelAndBlocks[1].block; |
michael@0 | 375 | } |
michael@0 | 376 | #endif |
michael@0 | 377 | |
michael@0 | 378 | private: |
michael@0 | 379 | struct Block; |
michael@0 | 380 | union PtrInfoOrBlock { |
michael@0 | 381 | // Use a union to avoid reinterpret_cast and the ensuing |
michael@0 | 382 | // potential aliasing bugs. |
michael@0 | 383 | PtrInfo *ptrInfo; |
michael@0 | 384 | Block *block; |
michael@0 | 385 | }; |
michael@0 | 386 | struct Block { |
michael@0 | 387 | enum { BlockSize = 16 * 1024 }; |
michael@0 | 388 | |
michael@0 | 389 | PtrInfoOrBlock mPointers[BlockSize]; |
michael@0 | 390 | Block() { |
michael@0 | 391 | mPointers[BlockSize - 2].block = nullptr; // sentinel |
michael@0 | 392 | mPointers[BlockSize - 1].block = nullptr; // next block pointer |
michael@0 | 393 | } |
michael@0 | 394 | Block*& Next() { return mPointers[BlockSize - 1].block; } |
michael@0 | 395 | PtrInfoOrBlock* Start() { return &mPointers[0]; } |
michael@0 | 396 | PtrInfoOrBlock* End() { return &mPointers[BlockSize - 2]; } |
michael@0 | 397 | }; |
michael@0 | 398 | |
michael@0 | 399 | // Store the null sentinel so that we can have valid iterators |
michael@0 | 400 | // before adding any edges and without adding any blocks. |
michael@0 | 401 | PtrInfoOrBlock mSentinelAndBlocks[2]; |
michael@0 | 402 | |
michael@0 | 403 | Block*& Blocks() { return mSentinelAndBlocks[1].block; } |
michael@0 | 404 | Block* Blocks() const { return mSentinelAndBlocks[1].block; } |
michael@0 | 405 | |
michael@0 | 406 | public: |
michael@0 | 407 | class Iterator |
michael@0 | 408 | { |
michael@0 | 409 | public: |
michael@0 | 410 | Iterator() : mPointer(nullptr) {} |
michael@0 | 411 | Iterator(PtrInfoOrBlock *aPointer) : mPointer(aPointer) {} |
michael@0 | 412 | Iterator(const Iterator& aOther) : mPointer(aOther.mPointer) {} |
michael@0 | 413 | |
michael@0 | 414 | Iterator& operator++() |
michael@0 | 415 | { |
michael@0 | 416 | if (mPointer->ptrInfo == nullptr) { |
michael@0 | 417 | // Null pointer is a sentinel for link to the next block. |
michael@0 | 418 | mPointer = (mPointer + 1)->block->mPointers; |
michael@0 | 419 | } |
michael@0 | 420 | ++mPointer; |
michael@0 | 421 | return *this; |
michael@0 | 422 | } |
michael@0 | 423 | |
michael@0 | 424 | PtrInfo* operator*() const |
michael@0 | 425 | { |
michael@0 | 426 | if (mPointer->ptrInfo == nullptr) { |
michael@0 | 427 | // Null pointer is a sentinel for link to the next block. |
michael@0 | 428 | return (mPointer + 1)->block->mPointers->ptrInfo; |
michael@0 | 429 | } |
michael@0 | 430 | return mPointer->ptrInfo; |
michael@0 | 431 | } |
michael@0 | 432 | bool operator==(const Iterator& aOther) const |
michael@0 | 433 | { return mPointer == aOther.mPointer; } |
michael@0 | 434 | bool operator!=(const Iterator& aOther) const |
michael@0 | 435 | { return mPointer != aOther.mPointer; } |
michael@0 | 436 | |
michael@0 | 437 | #ifdef DEBUG_CC_GRAPH |
michael@0 | 438 | bool Initialized() const |
michael@0 | 439 | { |
michael@0 | 440 | return mPointer != nullptr; |
michael@0 | 441 | } |
michael@0 | 442 | #endif |
michael@0 | 443 | |
michael@0 | 444 | private: |
michael@0 | 445 | PtrInfoOrBlock *mPointer; |
michael@0 | 446 | }; |
michael@0 | 447 | |
michael@0 | 448 | class Builder; |
michael@0 | 449 | friend class Builder; |
michael@0 | 450 | class Builder { |
michael@0 | 451 | public: |
michael@0 | 452 | Builder(EdgePool &aPool) |
michael@0 | 453 | : mCurrent(&aPool.mSentinelAndBlocks[0]), |
michael@0 | 454 | mBlockEnd(&aPool.mSentinelAndBlocks[0]), |
michael@0 | 455 | mNextBlockPtr(&aPool.Blocks()) |
michael@0 | 456 | { |
michael@0 | 457 | } |
michael@0 | 458 | |
michael@0 | 459 | Iterator Mark() { return Iterator(mCurrent); } |
michael@0 | 460 | |
michael@0 | 461 | void Add(PtrInfo* aEdge) { |
michael@0 | 462 | if (mCurrent == mBlockEnd) { |
michael@0 | 463 | Block *b = new Block(); |
michael@0 | 464 | *mNextBlockPtr = b; |
michael@0 | 465 | mCurrent = b->Start(); |
michael@0 | 466 | mBlockEnd = b->End(); |
michael@0 | 467 | mNextBlockPtr = &b->Next(); |
michael@0 | 468 | } |
michael@0 | 469 | (mCurrent++)->ptrInfo = aEdge; |
michael@0 | 470 | } |
michael@0 | 471 | private: |
michael@0 | 472 | // mBlockEnd points to space for null sentinel |
michael@0 | 473 | PtrInfoOrBlock *mCurrent, *mBlockEnd; |
michael@0 | 474 | Block **mNextBlockPtr; |
michael@0 | 475 | }; |
michael@0 | 476 | |
michael@0 | 477 | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { |
michael@0 | 478 | size_t n = 0; |
michael@0 | 479 | Block *b = Blocks(); |
michael@0 | 480 | while (b) { |
michael@0 | 481 | n += aMallocSizeOf(b); |
michael@0 | 482 | b = b->Next(); |
michael@0 | 483 | } |
michael@0 | 484 | return n; |
michael@0 | 485 | } |
michael@0 | 486 | }; |
michael@0 | 487 | |
michael@0 | 488 | #ifdef DEBUG_CC_GRAPH |
michael@0 | 489 | #define CC_GRAPH_ASSERT(b) MOZ_ASSERT(b) |
michael@0 | 490 | #else |
michael@0 | 491 | #define CC_GRAPH_ASSERT(b) |
michael@0 | 492 | #endif |
michael@0 | 493 | |
michael@0 | 494 | #define CC_TELEMETRY(_name, _value) \ |
michael@0 | 495 | PR_BEGIN_MACRO \ |
michael@0 | 496 | if (NS_IsMainThread()) { \ |
michael@0 | 497 | Telemetry::Accumulate(Telemetry::CYCLE_COLLECTOR##_name, _value); \ |
michael@0 | 498 | } else { \ |
michael@0 | 499 | Telemetry::Accumulate(Telemetry::CYCLE_COLLECTOR_WORKER##_name, _value); \ |
michael@0 | 500 | } \ |
michael@0 | 501 | PR_END_MACRO |
michael@0 | 502 | |
michael@0 | 503 | enum NodeColor { black, white, grey }; |
michael@0 | 504 | |
michael@0 | 505 | // This structure should be kept as small as possible; we may expect |
michael@0 | 506 | // hundreds of thousands of them to be allocated and touched |
michael@0 | 507 | // repeatedly during each cycle collection. |
michael@0 | 508 | |
michael@0 | 509 | struct PtrInfo |
michael@0 | 510 | { |
michael@0 | 511 | void *mPointer; |
michael@0 | 512 | nsCycleCollectionParticipant *mParticipant; |
michael@0 | 513 | uint32_t mColor : 2; |
michael@0 | 514 | uint32_t mInternalRefs : 30; |
michael@0 | 515 | uint32_t mRefCount; |
michael@0 | 516 | private: |
michael@0 | 517 | EdgePool::Iterator mFirstChild; |
michael@0 | 518 | |
michael@0 | 519 | public: |
michael@0 | 520 | |
michael@0 | 521 | PtrInfo(void *aPointer, nsCycleCollectionParticipant *aParticipant) |
michael@0 | 522 | : mPointer(aPointer), |
michael@0 | 523 | mParticipant(aParticipant), |
michael@0 | 524 | mColor(grey), |
michael@0 | 525 | mInternalRefs(0), |
michael@0 | 526 | mRefCount(UINT32_MAX - 1), |
michael@0 | 527 | mFirstChild() |
michael@0 | 528 | { |
michael@0 | 529 | // We initialize mRefCount to a large non-zero value so |
michael@0 | 530 | // that it doesn't look like a JS object to the cycle collector |
michael@0 | 531 | // in the case where the object dies before being traversed. |
michael@0 | 532 | |
michael@0 | 533 | MOZ_ASSERT(aParticipant); |
michael@0 | 534 | } |
michael@0 | 535 | |
michael@0 | 536 | // Allow NodePool::Block's constructor to compile. |
michael@0 | 537 | PtrInfo() { |
michael@0 | 538 | NS_NOTREACHED("should never be called"); |
michael@0 | 539 | } |
michael@0 | 540 | |
michael@0 | 541 | EdgePool::Iterator FirstChild() |
michael@0 | 542 | { |
michael@0 | 543 | CC_GRAPH_ASSERT(mFirstChild.Initialized()); |
michael@0 | 544 | return mFirstChild; |
michael@0 | 545 | } |
michael@0 | 546 | |
michael@0 | 547 | // this PtrInfo must be part of a NodePool |
michael@0 | 548 | EdgePool::Iterator LastChild() |
michael@0 | 549 | { |
michael@0 | 550 | CC_GRAPH_ASSERT((this + 1)->mFirstChild.Initialized()); |
michael@0 | 551 | return (this + 1)->mFirstChild; |
michael@0 | 552 | } |
michael@0 | 553 | |
michael@0 | 554 | void SetFirstChild(EdgePool::Iterator aFirstChild) |
michael@0 | 555 | { |
michael@0 | 556 | CC_GRAPH_ASSERT(aFirstChild.Initialized()); |
michael@0 | 557 | mFirstChild = aFirstChild; |
michael@0 | 558 | } |
michael@0 | 559 | |
michael@0 | 560 | // this PtrInfo must be part of a NodePool |
michael@0 | 561 | void SetLastChild(EdgePool::Iterator aLastChild) |
michael@0 | 562 | { |
michael@0 | 563 | CC_GRAPH_ASSERT(aLastChild.Initialized()); |
michael@0 | 564 | (this + 1)->mFirstChild = aLastChild; |
michael@0 | 565 | } |
michael@0 | 566 | }; |
michael@0 | 567 | |
michael@0 | 568 | /** |
michael@0 | 569 | * A structure designed to be used like a linked list of PtrInfo, except |
michael@0 | 570 | * that allocates the PtrInfo 32K-at-a-time. |
michael@0 | 571 | */ |
michael@0 | 572 | class NodePool |
michael@0 | 573 | { |
michael@0 | 574 | private: |
michael@0 | 575 | enum { BlockSize = 8 * 1024 }; // could be int template parameter |
michael@0 | 576 | |
michael@0 | 577 | struct Block { |
michael@0 | 578 | // We create and destroy Block using NS_Alloc/NS_Free rather |
michael@0 | 579 | // than new and delete to avoid calling its constructor and |
michael@0 | 580 | // destructor. |
michael@0 | 581 | Block() { NS_NOTREACHED("should never be called"); } |
michael@0 | 582 | ~Block() { NS_NOTREACHED("should never be called"); } |
michael@0 | 583 | |
michael@0 | 584 | Block* mNext; |
michael@0 | 585 | PtrInfo mEntries[BlockSize + 1]; // +1 to store last child of last node |
michael@0 | 586 | }; |
michael@0 | 587 | |
michael@0 | 588 | public: |
michael@0 | 589 | NodePool() |
michael@0 | 590 | : mBlocks(nullptr), |
michael@0 | 591 | mLast(nullptr) |
michael@0 | 592 | { |
michael@0 | 593 | } |
michael@0 | 594 | |
michael@0 | 595 | ~NodePool() |
michael@0 | 596 | { |
michael@0 | 597 | MOZ_ASSERT(!mBlocks, "Didn't call Clear()?"); |
michael@0 | 598 | } |
michael@0 | 599 | |
michael@0 | 600 | void Clear() |
michael@0 | 601 | { |
michael@0 | 602 | Block *b = mBlocks; |
michael@0 | 603 | while (b) { |
michael@0 | 604 | Block *n = b->mNext; |
michael@0 | 605 | NS_Free(b); |
michael@0 | 606 | b = n; |
michael@0 | 607 | } |
michael@0 | 608 | |
michael@0 | 609 | mBlocks = nullptr; |
michael@0 | 610 | mLast = nullptr; |
michael@0 | 611 | } |
michael@0 | 612 | |
michael@0 | 613 | #ifdef DEBUG |
michael@0 | 614 | bool IsEmpty() |
michael@0 | 615 | { |
michael@0 | 616 | return !mBlocks && !mLast; |
michael@0 | 617 | } |
michael@0 | 618 | #endif |
michael@0 | 619 | |
michael@0 | 620 | class Builder; |
michael@0 | 621 | friend class Builder; |
michael@0 | 622 | class Builder { |
michael@0 | 623 | public: |
michael@0 | 624 | Builder(NodePool& aPool) |
michael@0 | 625 | : mNextBlock(&aPool.mBlocks), |
michael@0 | 626 | mNext(aPool.mLast), |
michael@0 | 627 | mBlockEnd(nullptr) |
michael@0 | 628 | { |
michael@0 | 629 | MOZ_ASSERT(aPool.mBlocks == nullptr && aPool.mLast == nullptr, |
michael@0 | 630 | "pool not empty"); |
michael@0 | 631 | } |
michael@0 | 632 | PtrInfo *Add(void *aPointer, nsCycleCollectionParticipant *aParticipant) |
michael@0 | 633 | { |
michael@0 | 634 | if (mNext == mBlockEnd) { |
michael@0 | 635 | Block *block = static_cast<Block*>(NS_Alloc(sizeof(Block))); |
michael@0 | 636 | *mNextBlock = block; |
michael@0 | 637 | mNext = block->mEntries; |
michael@0 | 638 | mBlockEnd = block->mEntries + BlockSize; |
michael@0 | 639 | block->mNext = nullptr; |
michael@0 | 640 | mNextBlock = &block->mNext; |
michael@0 | 641 | } |
michael@0 | 642 | return new (mNext++) PtrInfo(aPointer, aParticipant); |
michael@0 | 643 | } |
michael@0 | 644 | private: |
michael@0 | 645 | Block **mNextBlock; |
michael@0 | 646 | PtrInfo *&mNext; |
michael@0 | 647 | PtrInfo *mBlockEnd; |
michael@0 | 648 | }; |
michael@0 | 649 | |
michael@0 | 650 | class Enumerator; |
michael@0 | 651 | friend class Enumerator; |
michael@0 | 652 | class Enumerator { |
michael@0 | 653 | public: |
michael@0 | 654 | Enumerator(NodePool& aPool) |
michael@0 | 655 | : mFirstBlock(aPool.mBlocks), |
michael@0 | 656 | mCurBlock(nullptr), |
michael@0 | 657 | mNext(nullptr), |
michael@0 | 658 | mBlockEnd(nullptr), |
michael@0 | 659 | mLast(aPool.mLast) |
michael@0 | 660 | { |
michael@0 | 661 | } |
michael@0 | 662 | |
michael@0 | 663 | bool IsDone() const |
michael@0 | 664 | { |
michael@0 | 665 | return mNext == mLast; |
michael@0 | 666 | } |
michael@0 | 667 | |
michael@0 | 668 | bool AtBlockEnd() const |
michael@0 | 669 | { |
michael@0 | 670 | return mNext == mBlockEnd; |
michael@0 | 671 | } |
michael@0 | 672 | |
michael@0 | 673 | PtrInfo* GetNext() |
michael@0 | 674 | { |
michael@0 | 675 | MOZ_ASSERT(!IsDone(), "calling GetNext when done"); |
michael@0 | 676 | if (mNext == mBlockEnd) { |
michael@0 | 677 | Block *nextBlock = mCurBlock ? mCurBlock->mNext : mFirstBlock; |
michael@0 | 678 | mNext = nextBlock->mEntries; |
michael@0 | 679 | mBlockEnd = mNext + BlockSize; |
michael@0 | 680 | mCurBlock = nextBlock; |
michael@0 | 681 | } |
michael@0 | 682 | return mNext++; |
michael@0 | 683 | } |
michael@0 | 684 | private: |
michael@0 | 685 | // mFirstBlock is a reference to allow an Enumerator to be constructed |
michael@0 | 686 | // for an empty graph. |
michael@0 | 687 | Block *&mFirstBlock; |
michael@0 | 688 | Block *mCurBlock; |
michael@0 | 689 | // mNext is the next value we want to return, unless mNext == mBlockEnd |
michael@0 | 690 | // NB: mLast is a reference to allow enumerating while building! |
michael@0 | 691 | PtrInfo *mNext, *mBlockEnd, *&mLast; |
michael@0 | 692 | }; |
michael@0 | 693 | |
michael@0 | 694 | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { |
michael@0 | 695 | // We don't measure the things pointed to by mEntries[] because those |
michael@0 | 696 | // pointers are non-owning. |
michael@0 | 697 | size_t n = 0; |
michael@0 | 698 | Block *b = mBlocks; |
michael@0 | 699 | while (b) { |
michael@0 | 700 | n += aMallocSizeOf(b); |
michael@0 | 701 | b = b->mNext; |
michael@0 | 702 | } |
michael@0 | 703 | return n; |
michael@0 | 704 | } |
michael@0 | 705 | |
michael@0 | 706 | private: |
michael@0 | 707 | Block *mBlocks; |
michael@0 | 708 | PtrInfo *mLast; |
michael@0 | 709 | }; |
michael@0 | 710 | |
michael@0 | 711 | |
michael@0 | 712 | // Declarations for mPtrToNodeMap. |
michael@0 | 713 | |
michael@0 | 714 | struct PtrToNodeEntry : public PLDHashEntryHdr |
michael@0 | 715 | { |
michael@0 | 716 | // The key is mNode->mPointer |
michael@0 | 717 | PtrInfo *mNode; |
michael@0 | 718 | }; |
michael@0 | 719 | |
michael@0 | 720 | static bool |
michael@0 | 721 | PtrToNodeMatchEntry(PLDHashTable *table, |
michael@0 | 722 | const PLDHashEntryHdr *entry, |
michael@0 | 723 | const void *key) |
michael@0 | 724 | { |
michael@0 | 725 | const PtrToNodeEntry *n = static_cast<const PtrToNodeEntry*>(entry); |
michael@0 | 726 | return n->mNode->mPointer == key; |
michael@0 | 727 | } |
michael@0 | 728 | |
michael@0 | 729 | static PLDHashTableOps PtrNodeOps = { |
michael@0 | 730 | PL_DHashAllocTable, |
michael@0 | 731 | PL_DHashFreeTable, |
michael@0 | 732 | PL_DHashVoidPtrKeyStub, |
michael@0 | 733 | PtrToNodeMatchEntry, |
michael@0 | 734 | PL_DHashMoveEntryStub, |
michael@0 | 735 | PL_DHashClearEntryStub, |
michael@0 | 736 | PL_DHashFinalizeStub, |
michael@0 | 737 | nullptr |
michael@0 | 738 | }; |
michael@0 | 739 | |
michael@0 | 740 | |
michael@0 | 741 | struct WeakMapping |
michael@0 | 742 | { |
michael@0 | 743 | // map and key will be null if the corresponding objects are GC marked |
michael@0 | 744 | PtrInfo *mMap; |
michael@0 | 745 | PtrInfo *mKey; |
michael@0 | 746 | PtrInfo *mKeyDelegate; |
michael@0 | 747 | PtrInfo *mVal; |
michael@0 | 748 | }; |
michael@0 | 749 | |
michael@0 | 750 | class GCGraphBuilder; |
michael@0 | 751 | |
michael@0 | 752 | struct GCGraph |
michael@0 | 753 | { |
michael@0 | 754 | NodePool mNodes; |
michael@0 | 755 | EdgePool mEdges; |
michael@0 | 756 | nsTArray<WeakMapping> mWeakMaps; |
michael@0 | 757 | uint32_t mRootCount; |
michael@0 | 758 | |
michael@0 | 759 | private: |
michael@0 | 760 | PLDHashTable mPtrToNodeMap; |
michael@0 | 761 | |
michael@0 | 762 | public: |
michael@0 | 763 | GCGraph() : mRootCount(0) |
michael@0 | 764 | { |
michael@0 | 765 | mPtrToNodeMap.ops = nullptr; |
michael@0 | 766 | } |
michael@0 | 767 | |
michael@0 | 768 | ~GCGraph() |
michael@0 | 769 | { |
michael@0 | 770 | if (mPtrToNodeMap.ops) { |
michael@0 | 771 | PL_DHashTableFinish(&mPtrToNodeMap); |
michael@0 | 772 | } |
michael@0 | 773 | } |
michael@0 | 774 | |
michael@0 | 775 | void Init() |
michael@0 | 776 | { |
michael@0 | 777 | MOZ_ASSERT(IsEmpty(), "Failed to call GCGraph::Clear"); |
michael@0 | 778 | PL_DHashTableInit(&mPtrToNodeMap, &PtrNodeOps, nullptr, |
michael@0 | 779 | sizeof(PtrToNodeEntry), 32768); |
michael@0 | 780 | } |
michael@0 | 781 | |
michael@0 | 782 | void Clear() |
michael@0 | 783 | { |
michael@0 | 784 | mNodes.Clear(); |
michael@0 | 785 | mEdges.Clear(); |
michael@0 | 786 | mWeakMaps.Clear(); |
michael@0 | 787 | mRootCount = 0; |
michael@0 | 788 | PL_DHashTableFinish(&mPtrToNodeMap); |
michael@0 | 789 | mPtrToNodeMap.ops = nullptr; |
michael@0 | 790 | } |
michael@0 | 791 | |
michael@0 | 792 | #ifdef DEBUG |
michael@0 | 793 | bool IsEmpty() |
michael@0 | 794 | { |
michael@0 | 795 | return mNodes.IsEmpty() && mEdges.IsEmpty() && |
michael@0 | 796 | mWeakMaps.IsEmpty() && mRootCount == 0 && |
michael@0 | 797 | !mPtrToNodeMap.ops; |
michael@0 | 798 | } |
michael@0 | 799 | #endif |
michael@0 | 800 | |
michael@0 | 801 | PtrInfo* FindNode(void *aPtr); |
michael@0 | 802 | PtrToNodeEntry* AddNodeToMap(void *aPtr); |
michael@0 | 803 | void RemoveNodeFromMap(void *aPtr); |
michael@0 | 804 | |
michael@0 | 805 | uint32_t MapCount() const |
michael@0 | 806 | { |
michael@0 | 807 | return mPtrToNodeMap.entryCount; |
michael@0 | 808 | } |
michael@0 | 809 | |
michael@0 | 810 | void SizeOfExcludingThis(MallocSizeOf aMallocSizeOf, |
michael@0 | 811 | size_t *aNodesSize, size_t *aEdgesSize, |
michael@0 | 812 | size_t *aWeakMapsSize) const { |
michael@0 | 813 | *aNodesSize = mNodes.SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 814 | *aEdgesSize = mEdges.SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 815 | |
michael@0 | 816 | // We don't measure what the WeakMappings point to, because the |
michael@0 | 817 | // pointers are non-owning. |
michael@0 | 818 | *aWeakMapsSize = mWeakMaps.SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 819 | } |
michael@0 | 820 | }; |
michael@0 | 821 | |
michael@0 | 822 | PtrInfo* |
michael@0 | 823 | GCGraph::FindNode(void *aPtr) |
michael@0 | 824 | { |
michael@0 | 825 | PtrToNodeEntry *e = static_cast<PtrToNodeEntry*>(PL_DHashTableOperate(&mPtrToNodeMap, aPtr, PL_DHASH_LOOKUP)); |
michael@0 | 826 | if (!PL_DHASH_ENTRY_IS_BUSY(e)) { |
michael@0 | 827 | return nullptr; |
michael@0 | 828 | } |
michael@0 | 829 | return e->mNode; |
michael@0 | 830 | } |
michael@0 | 831 | |
michael@0 | 832 | PtrToNodeEntry* |
michael@0 | 833 | GCGraph::AddNodeToMap(void *aPtr) |
michael@0 | 834 | { |
michael@0 | 835 | PtrToNodeEntry *e = static_cast<PtrToNodeEntry*>(PL_DHashTableOperate(&mPtrToNodeMap, aPtr, PL_DHASH_ADD)); |
michael@0 | 836 | if (!e) { |
michael@0 | 837 | // Caller should track OOMs |
michael@0 | 838 | return nullptr; |
michael@0 | 839 | } |
michael@0 | 840 | return e; |
michael@0 | 841 | } |
michael@0 | 842 | |
michael@0 | 843 | void |
michael@0 | 844 | GCGraph::RemoveNodeFromMap(void *aPtr) |
michael@0 | 845 | { |
michael@0 | 846 | PL_DHashTableOperate(&mPtrToNodeMap, aPtr, PL_DHASH_REMOVE); |
michael@0 | 847 | } |
michael@0 | 848 | |
michael@0 | 849 | |
michael@0 | 850 | static nsISupports * |
michael@0 | 851 | CanonicalizeXPCOMParticipant(nsISupports *in) |
michael@0 | 852 | { |
michael@0 | 853 | nsISupports* out; |
michael@0 | 854 | in->QueryInterface(NS_GET_IID(nsCycleCollectionISupports), |
michael@0 | 855 | reinterpret_cast<void**>(&out)); |
michael@0 | 856 | return out; |
michael@0 | 857 | } |
michael@0 | 858 | |
michael@0 | 859 | static inline void |
michael@0 | 860 | ToParticipant(nsISupports *s, nsXPCOMCycleCollectionParticipant **cp); |
michael@0 | 861 | |
michael@0 | 862 | static void |
michael@0 | 863 | CanonicalizeParticipant(void **parti, nsCycleCollectionParticipant **cp) |
michael@0 | 864 | { |
michael@0 | 865 | // If the participant is null, this is an nsISupports participant, |
michael@0 | 866 | // so we must QI to get the real participant. |
michael@0 | 867 | |
michael@0 | 868 | if (!*cp) { |
michael@0 | 869 | nsISupports *nsparti = static_cast<nsISupports*>(*parti); |
michael@0 | 870 | nsparti = CanonicalizeXPCOMParticipant(nsparti); |
michael@0 | 871 | NS_ASSERTION(nsparti, |
michael@0 | 872 | "Don't add objects that don't participate in collection!"); |
michael@0 | 873 | nsXPCOMCycleCollectionParticipant *xcp; |
michael@0 | 874 | ToParticipant(nsparti, &xcp); |
michael@0 | 875 | *parti = nsparti; |
michael@0 | 876 | *cp = xcp; |
michael@0 | 877 | } |
michael@0 | 878 | } |
michael@0 | 879 | |
michael@0 | 880 | struct nsPurpleBufferEntry { |
michael@0 | 881 | union { |
michael@0 | 882 | void *mObject; // when low bit unset |
michael@0 | 883 | nsPurpleBufferEntry *mNextInFreeList; // when low bit set |
michael@0 | 884 | }; |
michael@0 | 885 | |
michael@0 | 886 | nsCycleCollectingAutoRefCnt *mRefCnt; |
michael@0 | 887 | |
michael@0 | 888 | nsCycleCollectionParticipant *mParticipant; // nullptr for nsISupports |
michael@0 | 889 | }; |
michael@0 | 890 | |
michael@0 | 891 | class nsCycleCollector; |
michael@0 | 892 | |
michael@0 | 893 | struct nsPurpleBuffer |
michael@0 | 894 | { |
michael@0 | 895 | private: |
michael@0 | 896 | struct Block { |
michael@0 | 897 | Block *mNext; |
michael@0 | 898 | // Try to match the size of a jemalloc bucket, to minimize slop bytes. |
michael@0 | 899 | // - On 32-bit platforms sizeof(nsPurpleBufferEntry) is 12, so mEntries |
michael@0 | 900 | // is 16,380 bytes, which leaves 4 bytes for mNext. |
michael@0 | 901 | // - On 64-bit platforms sizeof(nsPurpleBufferEntry) is 24, so mEntries |
michael@0 | 902 | // is 32,544 bytes, which leaves 8 bytes for mNext. |
michael@0 | 903 | nsPurpleBufferEntry mEntries[1365]; |
michael@0 | 904 | |
michael@0 | 905 | Block() : mNext(nullptr) { |
michael@0 | 906 | // Ensure Block is the right size (see above). |
michael@0 | 907 | static_assert( |
michael@0 | 908 | sizeof(Block) == 16384 || // 32-bit |
michael@0 | 909 | sizeof(Block) == 32768, // 64-bit |
michael@0 | 910 | "ill-sized nsPurpleBuffer::Block" |
michael@0 | 911 | ); |
michael@0 | 912 | } |
michael@0 | 913 | |
michael@0 | 914 | template <class PurpleVisitor> |
michael@0 | 915 | void VisitEntries(nsPurpleBuffer &aBuffer, PurpleVisitor &aVisitor) |
michael@0 | 916 | { |
michael@0 | 917 | nsPurpleBufferEntry *eEnd = ArrayEnd(mEntries); |
michael@0 | 918 | for (nsPurpleBufferEntry *e = mEntries; e != eEnd; ++e) { |
michael@0 | 919 | if (!(uintptr_t(e->mObject) & uintptr_t(1))) { |
michael@0 | 920 | aVisitor.Visit(aBuffer, e); |
michael@0 | 921 | } |
michael@0 | 922 | } |
michael@0 | 923 | } |
michael@0 | 924 | }; |
michael@0 | 925 | // This class wraps a linked list of the elements in the purple |
michael@0 | 926 | // buffer. |
michael@0 | 927 | |
michael@0 | 928 | uint32_t mCount; |
michael@0 | 929 | Block mFirstBlock; |
michael@0 | 930 | nsPurpleBufferEntry *mFreeList; |
michael@0 | 931 | |
michael@0 | 932 | public: |
michael@0 | 933 | nsPurpleBuffer() |
michael@0 | 934 | { |
michael@0 | 935 | InitBlocks(); |
michael@0 | 936 | } |
michael@0 | 937 | |
michael@0 | 938 | ~nsPurpleBuffer() |
michael@0 | 939 | { |
michael@0 | 940 | FreeBlocks(); |
michael@0 | 941 | } |
michael@0 | 942 | |
michael@0 | 943 | template <class PurpleVisitor> |
michael@0 | 944 | void VisitEntries(PurpleVisitor &aVisitor) |
michael@0 | 945 | { |
michael@0 | 946 | for (Block *b = &mFirstBlock; b; b = b->mNext) { |
michael@0 | 947 | b->VisitEntries(*this, aVisitor); |
michael@0 | 948 | } |
michael@0 | 949 | } |
michael@0 | 950 | |
michael@0 | 951 | void InitBlocks() |
michael@0 | 952 | { |
michael@0 | 953 | mCount = 0; |
michael@0 | 954 | mFreeList = nullptr; |
michael@0 | 955 | StartBlock(&mFirstBlock); |
michael@0 | 956 | } |
michael@0 | 957 | |
michael@0 | 958 | void StartBlock(Block *aBlock) |
michael@0 | 959 | { |
michael@0 | 960 | NS_ABORT_IF_FALSE(!mFreeList, "should not have free list"); |
michael@0 | 961 | |
michael@0 | 962 | // Put all the entries in the block on the free list. |
michael@0 | 963 | nsPurpleBufferEntry *entries = aBlock->mEntries; |
michael@0 | 964 | mFreeList = entries; |
michael@0 | 965 | for (uint32_t i = 1; i < ArrayLength(aBlock->mEntries); ++i) { |
michael@0 | 966 | entries[i - 1].mNextInFreeList = |
michael@0 | 967 | (nsPurpleBufferEntry*)(uintptr_t(entries + i) | 1); |
michael@0 | 968 | } |
michael@0 | 969 | entries[ArrayLength(aBlock->mEntries) - 1].mNextInFreeList = |
michael@0 | 970 | (nsPurpleBufferEntry*)1; |
michael@0 | 971 | } |
michael@0 | 972 | |
michael@0 | 973 | void FreeBlocks() |
michael@0 | 974 | { |
michael@0 | 975 | if (mCount > 0) |
michael@0 | 976 | UnmarkRemainingPurple(&mFirstBlock); |
michael@0 | 977 | Block *b = mFirstBlock.mNext; |
michael@0 | 978 | while (b) { |
michael@0 | 979 | if (mCount > 0) |
michael@0 | 980 | UnmarkRemainingPurple(b); |
michael@0 | 981 | Block *next = b->mNext; |
michael@0 | 982 | delete b; |
michael@0 | 983 | b = next; |
michael@0 | 984 | } |
michael@0 | 985 | mFirstBlock.mNext = nullptr; |
michael@0 | 986 | } |
michael@0 | 987 | |
michael@0 | 988 | struct UnmarkRemainingPurpleVisitor |
michael@0 | 989 | { |
michael@0 | 990 | void |
michael@0 | 991 | Visit(nsPurpleBuffer &aBuffer, nsPurpleBufferEntry *aEntry) |
michael@0 | 992 | { |
michael@0 | 993 | if (aEntry->mRefCnt) { |
michael@0 | 994 | aEntry->mRefCnt->RemoveFromPurpleBuffer(); |
michael@0 | 995 | aEntry->mRefCnt = nullptr; |
michael@0 | 996 | } |
michael@0 | 997 | aEntry->mObject = nullptr; |
michael@0 | 998 | --aBuffer.mCount; |
michael@0 | 999 | } |
michael@0 | 1000 | }; |
michael@0 | 1001 | |
michael@0 | 1002 | void UnmarkRemainingPurple(Block *b) |
michael@0 | 1003 | { |
michael@0 | 1004 | UnmarkRemainingPurpleVisitor visitor; |
michael@0 | 1005 | b->VisitEntries(*this, visitor); |
michael@0 | 1006 | } |
michael@0 | 1007 | |
michael@0 | 1008 | void SelectPointers(GCGraphBuilder &builder); |
michael@0 | 1009 | |
michael@0 | 1010 | // RemoveSkippable removes entries from the purple buffer synchronously |
michael@0 | 1011 | // (1) if aAsyncSnowWhiteFreeing is false and nsPurpleBufferEntry::mRefCnt is 0 or |
michael@0 | 1012 | // (2) if the object's nsXPCOMCycleCollectionParticipant::CanSkip() returns true or |
michael@0 | 1013 | // (3) if nsPurpleBufferEntry::mRefCnt->IsPurple() is false. |
michael@0 | 1014 | // (4) If removeChildlessNodes is true, then any nodes in the purple buffer |
michael@0 | 1015 | // that will have no children in the cycle collector graph will also be |
michael@0 | 1016 | // removed. CanSkip() may be run on these children. |
michael@0 | 1017 | void RemoveSkippable(nsCycleCollector* aCollector, |
michael@0 | 1018 | bool removeChildlessNodes, |
michael@0 | 1019 | bool aAsyncSnowWhiteFreeing, |
michael@0 | 1020 | CC_ForgetSkippableCallback aCb); |
michael@0 | 1021 | |
michael@0 | 1022 | MOZ_ALWAYS_INLINE nsPurpleBufferEntry* NewEntry() |
michael@0 | 1023 | { |
michael@0 | 1024 | if (MOZ_UNLIKELY(!mFreeList)) { |
michael@0 | 1025 | Block *b = new Block; |
michael@0 | 1026 | StartBlock(b); |
michael@0 | 1027 | |
michael@0 | 1028 | // Add the new block as the second block in the list. |
michael@0 | 1029 | b->mNext = mFirstBlock.mNext; |
michael@0 | 1030 | mFirstBlock.mNext = b; |
michael@0 | 1031 | } |
michael@0 | 1032 | |
michael@0 | 1033 | nsPurpleBufferEntry *e = mFreeList; |
michael@0 | 1034 | mFreeList = (nsPurpleBufferEntry*) |
michael@0 | 1035 | (uintptr_t(mFreeList->mNextInFreeList) & ~uintptr_t(1)); |
michael@0 | 1036 | return e; |
michael@0 | 1037 | } |
michael@0 | 1038 | |
michael@0 | 1039 | MOZ_ALWAYS_INLINE void Put(void *p, nsCycleCollectionParticipant *cp, |
michael@0 | 1040 | nsCycleCollectingAutoRefCnt *aRefCnt) |
michael@0 | 1041 | { |
michael@0 | 1042 | nsPurpleBufferEntry *e = NewEntry(); |
michael@0 | 1043 | |
michael@0 | 1044 | ++mCount; |
michael@0 | 1045 | |
michael@0 | 1046 | e->mObject = p; |
michael@0 | 1047 | e->mRefCnt = aRefCnt; |
michael@0 | 1048 | e->mParticipant = cp; |
michael@0 | 1049 | } |
michael@0 | 1050 | |
michael@0 | 1051 | void Remove(nsPurpleBufferEntry *e) |
michael@0 | 1052 | { |
michael@0 | 1053 | MOZ_ASSERT(mCount != 0, "must have entries"); |
michael@0 | 1054 | |
michael@0 | 1055 | if (e->mRefCnt) { |
michael@0 | 1056 | e->mRefCnt->RemoveFromPurpleBuffer(); |
michael@0 | 1057 | e->mRefCnt = nullptr; |
michael@0 | 1058 | } |
michael@0 | 1059 | e->mNextInFreeList = |
michael@0 | 1060 | (nsPurpleBufferEntry*)(uintptr_t(mFreeList) | uintptr_t(1)); |
michael@0 | 1061 | mFreeList = e; |
michael@0 | 1062 | |
michael@0 | 1063 | --mCount; |
michael@0 | 1064 | } |
michael@0 | 1065 | |
michael@0 | 1066 | uint32_t Count() const |
michael@0 | 1067 | { |
michael@0 | 1068 | return mCount; |
michael@0 | 1069 | } |
michael@0 | 1070 | |
michael@0 | 1071 | size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const |
michael@0 | 1072 | { |
michael@0 | 1073 | size_t n = 0; |
michael@0 | 1074 | |
michael@0 | 1075 | // Don't measure mFirstBlock because it's within |this|. |
michael@0 | 1076 | const Block *block = mFirstBlock.mNext; |
michael@0 | 1077 | while (block) { |
michael@0 | 1078 | n += aMallocSizeOf(block); |
michael@0 | 1079 | block = block->mNext; |
michael@0 | 1080 | } |
michael@0 | 1081 | |
michael@0 | 1082 | // mFreeList is deliberately not measured because it points into |
michael@0 | 1083 | // the purple buffer, which is within mFirstBlock and thus within |this|. |
michael@0 | 1084 | // |
michael@0 | 1085 | // We also don't measure the things pointed to by mEntries[] because |
michael@0 | 1086 | // those pointers are non-owning. |
michael@0 | 1087 | |
michael@0 | 1088 | return n; |
michael@0 | 1089 | } |
michael@0 | 1090 | }; |
michael@0 | 1091 | |
michael@0 | 1092 | static bool |
michael@0 | 1093 | AddPurpleRoot(GCGraphBuilder &aBuilder, void *aRoot, nsCycleCollectionParticipant *aParti); |
michael@0 | 1094 | |
michael@0 | 1095 | struct SelectPointersVisitor |
michael@0 | 1096 | { |
michael@0 | 1097 | SelectPointersVisitor(GCGraphBuilder &aBuilder) |
michael@0 | 1098 | : mBuilder(aBuilder) |
michael@0 | 1099 | {} |
michael@0 | 1100 | |
michael@0 | 1101 | void |
michael@0 | 1102 | Visit(nsPurpleBuffer &aBuffer, nsPurpleBufferEntry *aEntry) |
michael@0 | 1103 | { |
michael@0 | 1104 | MOZ_ASSERT(aEntry->mObject, "Null object in purple buffer"); |
michael@0 | 1105 | MOZ_ASSERT(aEntry->mRefCnt->get() != 0, |
michael@0 | 1106 | "SelectPointersVisitor: snow-white object in the purple buffer"); |
michael@0 | 1107 | if (!aEntry->mRefCnt->IsPurple() || |
michael@0 | 1108 | AddPurpleRoot(mBuilder, aEntry->mObject, aEntry->mParticipant)) { |
michael@0 | 1109 | aBuffer.Remove(aEntry); |
michael@0 | 1110 | } |
michael@0 | 1111 | } |
michael@0 | 1112 | |
michael@0 | 1113 | private: |
michael@0 | 1114 | GCGraphBuilder &mBuilder; |
michael@0 | 1115 | }; |
michael@0 | 1116 | |
michael@0 | 1117 | void |
michael@0 | 1118 | nsPurpleBuffer::SelectPointers(GCGraphBuilder &aBuilder) |
michael@0 | 1119 | { |
michael@0 | 1120 | SelectPointersVisitor visitor(aBuilder); |
michael@0 | 1121 | VisitEntries(visitor); |
michael@0 | 1122 | |
michael@0 | 1123 | NS_ASSERTION(mCount == 0, "AddPurpleRoot failed"); |
michael@0 | 1124 | if (mCount == 0) { |
michael@0 | 1125 | FreeBlocks(); |
michael@0 | 1126 | InitBlocks(); |
michael@0 | 1127 | } |
michael@0 | 1128 | } |
michael@0 | 1129 | |
michael@0 | 1130 | enum ccPhase { |
michael@0 | 1131 | IdlePhase, |
michael@0 | 1132 | GraphBuildingPhase, |
michael@0 | 1133 | ScanAndCollectWhitePhase, |
michael@0 | 1134 | CleanupPhase |
michael@0 | 1135 | }; |
michael@0 | 1136 | |
michael@0 | 1137 | enum ccType { |
michael@0 | 1138 | SliceCC, /* If a CC is in progress, continue it. Otherwise, start a new one. */ |
michael@0 | 1139 | ManualCC, /* Explicitly triggered. */ |
michael@0 | 1140 | ShutdownCC /* Shutdown CC, used for finding leaks. */ |
michael@0 | 1141 | }; |
michael@0 | 1142 | |
michael@0 | 1143 | #ifdef MOZ_NUWA_PROCESS |
michael@0 | 1144 | #include "ipc/Nuwa.h" |
michael@0 | 1145 | #endif |
michael@0 | 1146 | |
michael@0 | 1147 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 1148 | // Top level structure for the cycle collector. |
michael@0 | 1149 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 1150 | |
michael@0 | 1151 | typedef js::SliceBudget SliceBudget; |
michael@0 | 1152 | |
michael@0 | 1153 | class JSPurpleBuffer; |
michael@0 | 1154 | |
michael@0 | 1155 | class nsCycleCollector : public nsIMemoryReporter |
michael@0 | 1156 | { |
michael@0 | 1157 | NS_DECL_ISUPPORTS |
michael@0 | 1158 | NS_DECL_NSIMEMORYREPORTER |
michael@0 | 1159 | |
michael@0 | 1160 | bool mActivelyCollecting; |
michael@0 | 1161 | bool mFreeingSnowWhite; |
michael@0 | 1162 | // mScanInProgress should be false when we're collecting white objects. |
michael@0 | 1163 | bool mScanInProgress; |
michael@0 | 1164 | CycleCollectorResults mResults; |
michael@0 | 1165 | TimeStamp mCollectionStart; |
michael@0 | 1166 | |
michael@0 | 1167 | CycleCollectedJSRuntime *mJSRuntime; |
michael@0 | 1168 | |
michael@0 | 1169 | ccPhase mIncrementalPhase; |
michael@0 | 1170 | GCGraph mGraph; |
michael@0 | 1171 | nsAutoPtr<GCGraphBuilder> mBuilder; |
michael@0 | 1172 | nsAutoPtr<NodePool::Enumerator> mCurrNode; |
michael@0 | 1173 | nsCOMPtr<nsICycleCollectorListener> mListener; |
michael@0 | 1174 | |
michael@0 | 1175 | nsIThread* mThread; |
michael@0 | 1176 | |
michael@0 | 1177 | nsCycleCollectorParams mParams; |
michael@0 | 1178 | |
michael@0 | 1179 | uint32_t mWhiteNodeCount; |
michael@0 | 1180 | |
michael@0 | 1181 | CC_BeforeUnlinkCallback mBeforeUnlinkCB; |
michael@0 | 1182 | CC_ForgetSkippableCallback mForgetSkippableCB; |
michael@0 | 1183 | |
michael@0 | 1184 | nsPurpleBuffer mPurpleBuf; |
michael@0 | 1185 | |
michael@0 | 1186 | uint32_t mUnmergedNeeded; |
michael@0 | 1187 | uint32_t mMergedInARow; |
michael@0 | 1188 | |
michael@0 | 1189 | JSPurpleBuffer* mJSPurpleBuffer; |
michael@0 | 1190 | |
michael@0 | 1191 | public: |
michael@0 | 1192 | nsCycleCollector(); |
michael@0 | 1193 | virtual ~nsCycleCollector(); |
michael@0 | 1194 | |
michael@0 | 1195 | void RegisterJSRuntime(CycleCollectedJSRuntime *aJSRuntime); |
michael@0 | 1196 | void ForgetJSRuntime(); |
michael@0 | 1197 | |
michael@0 | 1198 | void SetBeforeUnlinkCallback(CC_BeforeUnlinkCallback aBeforeUnlinkCB) |
michael@0 | 1199 | { |
michael@0 | 1200 | CheckThreadSafety(); |
michael@0 | 1201 | mBeforeUnlinkCB = aBeforeUnlinkCB; |
michael@0 | 1202 | } |
michael@0 | 1203 | |
michael@0 | 1204 | void SetForgetSkippableCallback(CC_ForgetSkippableCallback aForgetSkippableCB) |
michael@0 | 1205 | { |
michael@0 | 1206 | CheckThreadSafety(); |
michael@0 | 1207 | mForgetSkippableCB = aForgetSkippableCB; |
michael@0 | 1208 | } |
michael@0 | 1209 | |
michael@0 | 1210 | void Suspect(void *n, nsCycleCollectionParticipant *cp, |
michael@0 | 1211 | nsCycleCollectingAutoRefCnt *aRefCnt); |
michael@0 | 1212 | uint32_t SuspectedCount(); |
michael@0 | 1213 | void ForgetSkippable(bool aRemoveChildlessNodes, bool aAsyncSnowWhiteFreeing); |
michael@0 | 1214 | bool FreeSnowWhite(bool aUntilNoSWInPurpleBuffer); |
michael@0 | 1215 | |
michael@0 | 1216 | // This method assumes its argument is already canonicalized. |
michael@0 | 1217 | void RemoveObjectFromGraph(void *aPtr); |
michael@0 | 1218 | |
michael@0 | 1219 | void PrepareForGarbageCollection(); |
michael@0 | 1220 | |
michael@0 | 1221 | bool Collect(ccType aCCType, |
michael@0 | 1222 | SliceBudget &aBudget, |
michael@0 | 1223 | nsICycleCollectorListener *aManualListener); |
michael@0 | 1224 | void Shutdown(); |
michael@0 | 1225 | |
michael@0 | 1226 | void SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf, |
michael@0 | 1227 | size_t *aObjectSize, |
michael@0 | 1228 | size_t *aGraphNodesSize, |
michael@0 | 1229 | size_t *aGraphEdgesSize, |
michael@0 | 1230 | size_t *aWeakMapsSize, |
michael@0 | 1231 | size_t *aPurpleBufferSize) const; |
michael@0 | 1232 | |
michael@0 | 1233 | JSPurpleBuffer* GetJSPurpleBuffer(); |
michael@0 | 1234 | private: |
michael@0 | 1235 | void CheckThreadSafety(); |
michael@0 | 1236 | void ShutdownCollect(); |
michael@0 | 1237 | |
michael@0 | 1238 | void FixGrayBits(bool aForceGC); |
michael@0 | 1239 | bool ShouldMergeZones(ccType aCCType); |
michael@0 | 1240 | |
michael@0 | 1241 | void BeginCollection(ccType aCCType, nsICycleCollectorListener *aManualListener); |
michael@0 | 1242 | void MarkRoots(SliceBudget &aBudget); |
michael@0 | 1243 | void ScanRoots(bool aFullySynchGraphBuild); |
michael@0 | 1244 | void ScanIncrementalRoots(); |
michael@0 | 1245 | void ScanWeakMaps(); |
michael@0 | 1246 | |
michael@0 | 1247 | // returns whether anything was collected |
michael@0 | 1248 | bool CollectWhite(); |
michael@0 | 1249 | |
michael@0 | 1250 | void CleanupAfterCollection(); |
michael@0 | 1251 | }; |
michael@0 | 1252 | |
michael@0 | 1253 | NS_IMPL_ISUPPORTS(nsCycleCollector, nsIMemoryReporter) |
michael@0 | 1254 | |
michael@0 | 1255 | /** |
michael@0 | 1256 | * GraphWalker is templatized over a Visitor class that must provide |
michael@0 | 1257 | * the following two methods: |
michael@0 | 1258 | * |
michael@0 | 1259 | * bool ShouldVisitNode(PtrInfo const *pi); |
michael@0 | 1260 | * void VisitNode(PtrInfo *pi); |
michael@0 | 1261 | */ |
michael@0 | 1262 | template <class Visitor> |
michael@0 | 1263 | class GraphWalker |
michael@0 | 1264 | { |
michael@0 | 1265 | private: |
michael@0 | 1266 | Visitor mVisitor; |
michael@0 | 1267 | |
michael@0 | 1268 | void DoWalk(nsDeque &aQueue); |
michael@0 | 1269 | |
michael@0 | 1270 | void CheckedPush(nsDeque &aQueue, PtrInfo *pi) |
michael@0 | 1271 | { |
michael@0 | 1272 | if (!pi) { |
michael@0 | 1273 | MOZ_CRASH(); |
michael@0 | 1274 | } |
michael@0 | 1275 | if (!aQueue.Push(pi, fallible_t())) { |
michael@0 | 1276 | mVisitor.Failed(); |
michael@0 | 1277 | } |
michael@0 | 1278 | } |
michael@0 | 1279 | |
michael@0 | 1280 | public: |
michael@0 | 1281 | void Walk(PtrInfo *s0); |
michael@0 | 1282 | void WalkFromRoots(GCGraph &aGraph); |
michael@0 | 1283 | // copy-constructing the visitor should be cheap, and less |
michael@0 | 1284 | // indirection than using a reference |
michael@0 | 1285 | GraphWalker(const Visitor aVisitor) : mVisitor(aVisitor) {} |
michael@0 | 1286 | }; |
michael@0 | 1287 | |
michael@0 | 1288 | |
michael@0 | 1289 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 1290 | // The static collector struct |
michael@0 | 1291 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 1292 | |
michael@0 | 1293 | struct CollectorData { |
michael@0 | 1294 | nsRefPtr<nsCycleCollector> mCollector; |
michael@0 | 1295 | CycleCollectedJSRuntime* mRuntime; |
michael@0 | 1296 | }; |
michael@0 | 1297 | |
michael@0 | 1298 | static mozilla::ThreadLocal<CollectorData*> sCollectorData; |
michael@0 | 1299 | |
michael@0 | 1300 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 1301 | // Utility functions |
michael@0 | 1302 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 1303 | |
michael@0 | 1304 | MOZ_NEVER_INLINE static void |
michael@0 | 1305 | Fault(const char *msg, const void *ptr=nullptr) |
michael@0 | 1306 | { |
michael@0 | 1307 | if (ptr) |
michael@0 | 1308 | printf("Fault in cycle collector: %s (ptr: %p)\n", msg, ptr); |
michael@0 | 1309 | else |
michael@0 | 1310 | printf("Fault in cycle collector: %s\n", msg); |
michael@0 | 1311 | |
michael@0 | 1312 | NS_RUNTIMEABORT("cycle collector fault"); |
michael@0 | 1313 | } |
michael@0 | 1314 | |
michael@0 | 1315 | static void |
michael@0 | 1316 | Fault(const char *msg, PtrInfo *pi) |
michael@0 | 1317 | { |
michael@0 | 1318 | Fault(msg, pi->mPointer); |
michael@0 | 1319 | } |
michael@0 | 1320 | |
michael@0 | 1321 | static inline void |
michael@0 | 1322 | ToParticipant(nsISupports *s, nsXPCOMCycleCollectionParticipant **cp) |
michael@0 | 1323 | { |
michael@0 | 1324 | // We use QI to move from an nsISupports to an |
michael@0 | 1325 | // nsXPCOMCycleCollectionParticipant, which is a per-class singleton helper |
michael@0 | 1326 | // object that implements traversal and unlinking logic for the nsISupports |
michael@0 | 1327 | // in question. |
michael@0 | 1328 | CallQueryInterface(s, cp); |
michael@0 | 1329 | } |
michael@0 | 1330 | |
michael@0 | 1331 | template <class Visitor> |
michael@0 | 1332 | MOZ_NEVER_INLINE void |
michael@0 | 1333 | GraphWalker<Visitor>::Walk(PtrInfo *s0) |
michael@0 | 1334 | { |
michael@0 | 1335 | nsDeque queue; |
michael@0 | 1336 | CheckedPush(queue, s0); |
michael@0 | 1337 | DoWalk(queue); |
michael@0 | 1338 | } |
michael@0 | 1339 | |
michael@0 | 1340 | template <class Visitor> |
michael@0 | 1341 | MOZ_NEVER_INLINE void |
michael@0 | 1342 | GraphWalker<Visitor>::WalkFromRoots(GCGraph& aGraph) |
michael@0 | 1343 | { |
michael@0 | 1344 | nsDeque queue; |
michael@0 | 1345 | NodePool::Enumerator etor(aGraph.mNodes); |
michael@0 | 1346 | for (uint32_t i = 0; i < aGraph.mRootCount; ++i) { |
michael@0 | 1347 | CheckedPush(queue, etor.GetNext()); |
michael@0 | 1348 | } |
michael@0 | 1349 | DoWalk(queue); |
michael@0 | 1350 | } |
michael@0 | 1351 | |
michael@0 | 1352 | template <class Visitor> |
michael@0 | 1353 | MOZ_NEVER_INLINE void |
michael@0 | 1354 | GraphWalker<Visitor>::DoWalk(nsDeque &aQueue) |
michael@0 | 1355 | { |
michael@0 | 1356 | // Use a aQueue to match the breadth-first traversal used when we |
michael@0 | 1357 | // built the graph, for hopefully-better locality. |
michael@0 | 1358 | while (aQueue.GetSize() > 0) { |
michael@0 | 1359 | PtrInfo *pi = static_cast<PtrInfo*>(aQueue.PopFront()); |
michael@0 | 1360 | |
michael@0 | 1361 | if (pi->mParticipant && mVisitor.ShouldVisitNode(pi)) { |
michael@0 | 1362 | mVisitor.VisitNode(pi); |
michael@0 | 1363 | for (EdgePool::Iterator child = pi->FirstChild(), |
michael@0 | 1364 | child_end = pi->LastChild(); |
michael@0 | 1365 | child != child_end; ++child) { |
michael@0 | 1366 | CheckedPush(aQueue, *child); |
michael@0 | 1367 | } |
michael@0 | 1368 | } |
michael@0 | 1369 | } |
michael@0 | 1370 | } |
michael@0 | 1371 | |
michael@0 | 1372 | struct CCGraphDescriber : public LinkedListElement<CCGraphDescriber> |
michael@0 | 1373 | { |
michael@0 | 1374 | CCGraphDescriber() |
michael@0 | 1375 | : mAddress("0x"), mCnt(0), mType(eUnknown) {} |
michael@0 | 1376 | |
michael@0 | 1377 | enum Type |
michael@0 | 1378 | { |
michael@0 | 1379 | eRefCountedObject, |
michael@0 | 1380 | eGCedObject, |
michael@0 | 1381 | eGCMarkedObject, |
michael@0 | 1382 | eEdge, |
michael@0 | 1383 | eRoot, |
michael@0 | 1384 | eGarbage, |
michael@0 | 1385 | eUnknown |
michael@0 | 1386 | }; |
michael@0 | 1387 | |
michael@0 | 1388 | nsCString mAddress; |
michael@0 | 1389 | nsCString mName; |
michael@0 | 1390 | nsCString mCompartmentOrToAddress; |
michael@0 | 1391 | uint32_t mCnt; |
michael@0 | 1392 | Type mType; |
michael@0 | 1393 | }; |
michael@0 | 1394 | |
michael@0 | 1395 | class nsCycleCollectorLogger MOZ_FINAL : public nsICycleCollectorListener |
michael@0 | 1396 | { |
michael@0 | 1397 | public: |
michael@0 | 1398 | nsCycleCollectorLogger() : |
michael@0 | 1399 | mStream(nullptr), mWantAllTraces(false), |
michael@0 | 1400 | mDisableLog(false), mWantAfterProcessing(false) |
michael@0 | 1401 | { |
michael@0 | 1402 | } |
michael@0 | 1403 | ~nsCycleCollectorLogger() |
michael@0 | 1404 | { |
michael@0 | 1405 | ClearDescribers(); |
michael@0 | 1406 | if (mStream) { |
michael@0 | 1407 | MozillaUnRegisterDebugFILE(mStream); |
michael@0 | 1408 | fclose(mStream); |
michael@0 | 1409 | } |
michael@0 | 1410 | } |
michael@0 | 1411 | NS_DECL_ISUPPORTS |
michael@0 | 1412 | |
michael@0 | 1413 | void SetAllTraces() |
michael@0 | 1414 | { |
michael@0 | 1415 | mWantAllTraces = true; |
michael@0 | 1416 | } |
michael@0 | 1417 | |
michael@0 | 1418 | NS_IMETHOD AllTraces(nsICycleCollectorListener** aListener) |
michael@0 | 1419 | { |
michael@0 | 1420 | SetAllTraces(); |
michael@0 | 1421 | NS_ADDREF(*aListener = this); |
michael@0 | 1422 | return NS_OK; |
michael@0 | 1423 | } |
michael@0 | 1424 | |
michael@0 | 1425 | NS_IMETHOD GetWantAllTraces(bool* aAllTraces) |
michael@0 | 1426 | { |
michael@0 | 1427 | *aAllTraces = mWantAllTraces; |
michael@0 | 1428 | return NS_OK; |
michael@0 | 1429 | } |
michael@0 | 1430 | |
michael@0 | 1431 | NS_IMETHOD GetDisableLog(bool* aDisableLog) |
michael@0 | 1432 | { |
michael@0 | 1433 | *aDisableLog = mDisableLog; |
michael@0 | 1434 | return NS_OK; |
michael@0 | 1435 | } |
michael@0 | 1436 | |
michael@0 | 1437 | NS_IMETHOD SetDisableLog(bool aDisableLog) |
michael@0 | 1438 | { |
michael@0 | 1439 | mDisableLog = aDisableLog; |
michael@0 | 1440 | return NS_OK; |
michael@0 | 1441 | } |
michael@0 | 1442 | |
michael@0 | 1443 | NS_IMETHOD GetWantAfterProcessing(bool* aWantAfterProcessing) |
michael@0 | 1444 | { |
michael@0 | 1445 | *aWantAfterProcessing = mWantAfterProcessing; |
michael@0 | 1446 | return NS_OK; |
michael@0 | 1447 | } |
michael@0 | 1448 | |
michael@0 | 1449 | NS_IMETHOD SetWantAfterProcessing(bool aWantAfterProcessing) |
michael@0 | 1450 | { |
michael@0 | 1451 | mWantAfterProcessing = aWantAfterProcessing; |
michael@0 | 1452 | return NS_OK; |
michael@0 | 1453 | } |
michael@0 | 1454 | |
michael@0 | 1455 | NS_IMETHOD GetFilenameIdentifier(nsAString& aIdentifier) |
michael@0 | 1456 | { |
michael@0 | 1457 | aIdentifier = mFilenameIdentifier; |
michael@0 | 1458 | return NS_OK; |
michael@0 | 1459 | } |
michael@0 | 1460 | |
michael@0 | 1461 | NS_IMETHOD SetFilenameIdentifier(const nsAString& aIdentifier) |
michael@0 | 1462 | { |
michael@0 | 1463 | mFilenameIdentifier = aIdentifier; |
michael@0 | 1464 | return NS_OK; |
michael@0 | 1465 | } |
michael@0 | 1466 | |
michael@0 | 1467 | NS_IMETHOD GetGcLogPath(nsAString &aPath) |
michael@0 | 1468 | { |
michael@0 | 1469 | aPath = mGCLogPath; |
michael@0 | 1470 | return NS_OK; |
michael@0 | 1471 | } |
michael@0 | 1472 | |
michael@0 | 1473 | NS_IMETHOD GetCcLogPath(nsAString &aPath) |
michael@0 | 1474 | { |
michael@0 | 1475 | aPath = mCCLogPath; |
michael@0 | 1476 | return NS_OK; |
michael@0 | 1477 | } |
michael@0 | 1478 | |
michael@0 | 1479 | NS_IMETHOD Begin() |
michael@0 | 1480 | { |
michael@0 | 1481 | mCurrentAddress.AssignLiteral("0x"); |
michael@0 | 1482 | ClearDescribers(); |
michael@0 | 1483 | if (mDisableLog) { |
michael@0 | 1484 | return NS_OK; |
michael@0 | 1485 | } |
michael@0 | 1486 | |
michael@0 | 1487 | // Initially create the log in a file starting with |
michael@0 | 1488 | // "incomplete-gc-edges". We'll move the file and strip off the |
michael@0 | 1489 | // "incomplete-" once the dump completes. (We do this because we don't |
michael@0 | 1490 | // want scripts which poll the filesystem looking for gc/cc dumps to |
michael@0 | 1491 | // grab a file before we're finished writing to it.) |
michael@0 | 1492 | nsCOMPtr<nsIFile> gcLogFile = CreateTempFile("incomplete-gc-edges"); |
michael@0 | 1493 | if (NS_WARN_IF(!gcLogFile)) |
michael@0 | 1494 | return NS_ERROR_UNEXPECTED; |
michael@0 | 1495 | |
michael@0 | 1496 | // Dump the JS heap. |
michael@0 | 1497 | FILE* gcLogANSIFile = nullptr; |
michael@0 | 1498 | gcLogFile->OpenANSIFileDesc("w", &gcLogANSIFile); |
michael@0 | 1499 | if (NS_WARN_IF(!gcLogANSIFile)) |
michael@0 | 1500 | return NS_ERROR_UNEXPECTED; |
michael@0 | 1501 | MozillaRegisterDebugFILE(gcLogANSIFile); |
michael@0 | 1502 | CollectorData *data = sCollectorData.get(); |
michael@0 | 1503 | if (data && data->mRuntime) |
michael@0 | 1504 | data->mRuntime->DumpJSHeap(gcLogANSIFile); |
michael@0 | 1505 | MozillaUnRegisterDebugFILE(gcLogANSIFile); |
michael@0 | 1506 | fclose(gcLogANSIFile); |
michael@0 | 1507 | |
michael@0 | 1508 | // Strip off "incomplete-". |
michael@0 | 1509 | nsCOMPtr<nsIFile> gcLogFileFinalDestination = |
michael@0 | 1510 | CreateTempFile("gc-edges"); |
michael@0 | 1511 | if (NS_WARN_IF(!gcLogFileFinalDestination)) |
michael@0 | 1512 | return NS_ERROR_UNEXPECTED; |
michael@0 | 1513 | |
michael@0 | 1514 | nsAutoString gcLogFileFinalDestinationName; |
michael@0 | 1515 | gcLogFileFinalDestination->GetLeafName(gcLogFileFinalDestinationName); |
michael@0 | 1516 | if (NS_WARN_IF(gcLogFileFinalDestinationName.IsEmpty())) |
michael@0 | 1517 | return NS_ERROR_UNEXPECTED; |
michael@0 | 1518 | |
michael@0 | 1519 | gcLogFile->MoveTo(/* directory */ nullptr, gcLogFileFinalDestinationName); |
michael@0 | 1520 | |
michael@0 | 1521 | // Log to the error console. |
michael@0 | 1522 | nsCOMPtr<nsIConsoleService> cs = |
michael@0 | 1523 | do_GetService(NS_CONSOLESERVICE_CONTRACTID); |
michael@0 | 1524 | if (cs) { |
michael@0 | 1525 | nsAutoString gcLogPath; |
michael@0 | 1526 | gcLogFileFinalDestination->GetPath(gcLogPath); |
michael@0 | 1527 | |
michael@0 | 1528 | nsString msg = NS_LITERAL_STRING("Garbage Collector log dumped to ") + |
michael@0 | 1529 | gcLogPath; |
michael@0 | 1530 | cs->LogStringMessage(msg.get()); |
michael@0 | 1531 | |
michael@0 | 1532 | mGCLogPath = gcLogPath; |
michael@0 | 1533 | } |
michael@0 | 1534 | |
michael@0 | 1535 | // Open a file for dumping the CC graph. We again prefix with |
michael@0 | 1536 | // "incomplete-". |
michael@0 | 1537 | mOutFile = CreateTempFile("incomplete-cc-edges"); |
michael@0 | 1538 | if (NS_WARN_IF(!mOutFile)) |
michael@0 | 1539 | return NS_ERROR_UNEXPECTED; |
michael@0 | 1540 | MOZ_ASSERT(!mStream); |
michael@0 | 1541 | mOutFile->OpenANSIFileDesc("w", &mStream); |
michael@0 | 1542 | if (NS_WARN_IF(!mStream)) |
michael@0 | 1543 | return NS_ERROR_UNEXPECTED; |
michael@0 | 1544 | MozillaRegisterDebugFILE(mStream); |
michael@0 | 1545 | |
michael@0 | 1546 | fprintf(mStream, "# WantAllTraces=%s\n", mWantAllTraces ? "true" : "false"); |
michael@0 | 1547 | |
michael@0 | 1548 | return NS_OK; |
michael@0 | 1549 | } |
michael@0 | 1550 | NS_IMETHOD NoteRefCountedObject(uint64_t aAddress, uint32_t refCount, |
michael@0 | 1551 | const char *aObjectDescription) |
michael@0 | 1552 | { |
michael@0 | 1553 | if (!mDisableLog) { |
michael@0 | 1554 | fprintf(mStream, "%p [rc=%u] %s\n", (void*)aAddress, refCount, |
michael@0 | 1555 | aObjectDescription); |
michael@0 | 1556 | } |
michael@0 | 1557 | if (mWantAfterProcessing) { |
michael@0 | 1558 | CCGraphDescriber* d = new CCGraphDescriber(); |
michael@0 | 1559 | mDescribers.insertBack(d); |
michael@0 | 1560 | mCurrentAddress.AssignLiteral("0x"); |
michael@0 | 1561 | mCurrentAddress.AppendInt(aAddress, 16); |
michael@0 | 1562 | d->mType = CCGraphDescriber::eRefCountedObject; |
michael@0 | 1563 | d->mAddress = mCurrentAddress; |
michael@0 | 1564 | d->mCnt = refCount; |
michael@0 | 1565 | d->mName.Append(aObjectDescription); |
michael@0 | 1566 | } |
michael@0 | 1567 | return NS_OK; |
michael@0 | 1568 | } |
michael@0 | 1569 | NS_IMETHOD NoteGCedObject(uint64_t aAddress, bool aMarked, |
michael@0 | 1570 | const char *aObjectDescription, |
michael@0 | 1571 | uint64_t aCompartmentAddress) |
michael@0 | 1572 | { |
michael@0 | 1573 | if (!mDisableLog) { |
michael@0 | 1574 | fprintf(mStream, "%p [gc%s] %s\n", (void*)aAddress, |
michael@0 | 1575 | aMarked ? ".marked" : "", aObjectDescription); |
michael@0 | 1576 | } |
michael@0 | 1577 | if (mWantAfterProcessing) { |
michael@0 | 1578 | CCGraphDescriber* d = new CCGraphDescriber(); |
michael@0 | 1579 | mDescribers.insertBack(d); |
michael@0 | 1580 | mCurrentAddress.AssignLiteral("0x"); |
michael@0 | 1581 | mCurrentAddress.AppendInt(aAddress, 16); |
michael@0 | 1582 | d->mType = aMarked ? CCGraphDescriber::eGCMarkedObject : |
michael@0 | 1583 | CCGraphDescriber::eGCedObject; |
michael@0 | 1584 | d->mAddress = mCurrentAddress; |
michael@0 | 1585 | d->mName.Append(aObjectDescription); |
michael@0 | 1586 | if (aCompartmentAddress) { |
michael@0 | 1587 | d->mCompartmentOrToAddress.AssignLiteral("0x"); |
michael@0 | 1588 | d->mCompartmentOrToAddress.AppendInt(aCompartmentAddress, 16); |
michael@0 | 1589 | } else { |
michael@0 | 1590 | d->mCompartmentOrToAddress.SetIsVoid(true); |
michael@0 | 1591 | } |
michael@0 | 1592 | } |
michael@0 | 1593 | return NS_OK; |
michael@0 | 1594 | } |
michael@0 | 1595 | NS_IMETHOD NoteEdge(uint64_t aToAddress, const char *aEdgeName) |
michael@0 | 1596 | { |
michael@0 | 1597 | if (!mDisableLog) { |
michael@0 | 1598 | fprintf(mStream, "> %p %s\n", (void*)aToAddress, aEdgeName); |
michael@0 | 1599 | } |
michael@0 | 1600 | if (mWantAfterProcessing) { |
michael@0 | 1601 | CCGraphDescriber* d = new CCGraphDescriber(); |
michael@0 | 1602 | mDescribers.insertBack(d); |
michael@0 | 1603 | d->mType = CCGraphDescriber::eEdge; |
michael@0 | 1604 | d->mAddress = mCurrentAddress; |
michael@0 | 1605 | d->mCompartmentOrToAddress.AssignLiteral("0x"); |
michael@0 | 1606 | d->mCompartmentOrToAddress.AppendInt(aToAddress, 16); |
michael@0 | 1607 | d->mName.Append(aEdgeName); |
michael@0 | 1608 | } |
michael@0 | 1609 | return NS_OK; |
michael@0 | 1610 | } |
michael@0 | 1611 | NS_IMETHOD NoteWeakMapEntry(uint64_t aMap, uint64_t aKey, |
michael@0 | 1612 | uint64_t aKeyDelegate, uint64_t aValue) |
michael@0 | 1613 | { |
michael@0 | 1614 | if (!mDisableLog) { |
michael@0 | 1615 | fprintf(mStream, "WeakMapEntry map=%p key=%p keyDelegate=%p value=%p\n", |
michael@0 | 1616 | (void*)aMap, (void*)aKey, (void*)aKeyDelegate, (void*)aValue); |
michael@0 | 1617 | } |
michael@0 | 1618 | // We don't support after-processing for weak map entries. |
michael@0 | 1619 | return NS_OK; |
michael@0 | 1620 | } |
michael@0 | 1621 | NS_IMETHOD NoteIncrementalRoot(uint64_t aAddress) |
michael@0 | 1622 | { |
michael@0 | 1623 | if (!mDisableLog) { |
michael@0 | 1624 | fprintf(mStream, "IncrementalRoot %p\n", (void*)aAddress); |
michael@0 | 1625 | } |
michael@0 | 1626 | // We don't support after-processing for incremental roots. |
michael@0 | 1627 | return NS_OK; |
michael@0 | 1628 | } |
michael@0 | 1629 | NS_IMETHOD BeginResults() |
michael@0 | 1630 | { |
michael@0 | 1631 | if (!mDisableLog) { |
michael@0 | 1632 | fputs("==========\n", mStream); |
michael@0 | 1633 | } |
michael@0 | 1634 | return NS_OK; |
michael@0 | 1635 | } |
michael@0 | 1636 | NS_IMETHOD DescribeRoot(uint64_t aAddress, uint32_t aKnownEdges) |
michael@0 | 1637 | { |
michael@0 | 1638 | if (!mDisableLog) { |
michael@0 | 1639 | fprintf(mStream, "%p [known=%u]\n", (void*)aAddress, aKnownEdges); |
michael@0 | 1640 | } |
michael@0 | 1641 | if (mWantAfterProcessing) { |
michael@0 | 1642 | CCGraphDescriber* d = new CCGraphDescriber(); |
michael@0 | 1643 | mDescribers.insertBack(d); |
michael@0 | 1644 | d->mType = CCGraphDescriber::eRoot; |
michael@0 | 1645 | d->mAddress.AppendInt(aAddress, 16); |
michael@0 | 1646 | d->mCnt = aKnownEdges; |
michael@0 | 1647 | } |
michael@0 | 1648 | return NS_OK; |
michael@0 | 1649 | } |
michael@0 | 1650 | NS_IMETHOD DescribeGarbage(uint64_t aAddress) |
michael@0 | 1651 | { |
michael@0 | 1652 | if (!mDisableLog) { |
michael@0 | 1653 | fprintf(mStream, "%p [garbage]\n", (void*)aAddress); |
michael@0 | 1654 | } |
michael@0 | 1655 | if (mWantAfterProcessing) { |
michael@0 | 1656 | CCGraphDescriber* d = new CCGraphDescriber(); |
michael@0 | 1657 | mDescribers.insertBack(d); |
michael@0 | 1658 | d->mType = CCGraphDescriber::eGarbage; |
michael@0 | 1659 | d->mAddress.AppendInt(aAddress, 16); |
michael@0 | 1660 | } |
michael@0 | 1661 | return NS_OK; |
michael@0 | 1662 | } |
michael@0 | 1663 | NS_IMETHOD End() |
michael@0 | 1664 | { |
michael@0 | 1665 | if (!mDisableLog) { |
michael@0 | 1666 | MOZ_ASSERT(mStream); |
michael@0 | 1667 | MOZ_ASSERT(mOutFile); |
michael@0 | 1668 | |
michael@0 | 1669 | MozillaUnRegisterDebugFILE(mStream); |
michael@0 | 1670 | fclose(mStream); |
michael@0 | 1671 | mStream = nullptr; |
michael@0 | 1672 | |
michael@0 | 1673 | // Strip off "incomplete-" from the log file's name. |
michael@0 | 1674 | nsCOMPtr<nsIFile> logFileFinalDestination = |
michael@0 | 1675 | CreateTempFile("cc-edges"); |
michael@0 | 1676 | if (NS_WARN_IF(!logFileFinalDestination)) |
michael@0 | 1677 | return NS_ERROR_UNEXPECTED; |
michael@0 | 1678 | |
michael@0 | 1679 | nsAutoString logFileFinalDestinationName; |
michael@0 | 1680 | logFileFinalDestination->GetLeafName(logFileFinalDestinationName); |
michael@0 | 1681 | if (NS_WARN_IF(logFileFinalDestinationName.IsEmpty())) |
michael@0 | 1682 | return NS_ERROR_UNEXPECTED; |
michael@0 | 1683 | |
michael@0 | 1684 | mOutFile->MoveTo(/* directory = */ nullptr, |
michael@0 | 1685 | logFileFinalDestinationName); |
michael@0 | 1686 | mOutFile = nullptr; |
michael@0 | 1687 | |
michael@0 | 1688 | // Log to the error console. |
michael@0 | 1689 | nsCOMPtr<nsIConsoleService> cs = |
michael@0 | 1690 | do_GetService(NS_CONSOLESERVICE_CONTRACTID); |
michael@0 | 1691 | if (cs) { |
michael@0 | 1692 | nsAutoString ccLogPath; |
michael@0 | 1693 | logFileFinalDestination->GetPath(ccLogPath); |
michael@0 | 1694 | |
michael@0 | 1695 | nsString msg = NS_LITERAL_STRING("Cycle Collector log dumped to ") + |
michael@0 | 1696 | ccLogPath; |
michael@0 | 1697 | cs->LogStringMessage(msg.get()); |
michael@0 | 1698 | |
michael@0 | 1699 | mCCLogPath = ccLogPath; |
michael@0 | 1700 | } |
michael@0 | 1701 | } |
michael@0 | 1702 | return NS_OK; |
michael@0 | 1703 | } |
michael@0 | 1704 | NS_IMETHOD ProcessNext(nsICycleCollectorHandler* aHandler, |
michael@0 | 1705 | bool* aCanContinue) |
michael@0 | 1706 | { |
michael@0 | 1707 | if (NS_WARN_IF(!aHandler) || NS_WARN_IF(!mWantAfterProcessing)) |
michael@0 | 1708 | return NS_ERROR_UNEXPECTED; |
michael@0 | 1709 | CCGraphDescriber* d = mDescribers.popFirst(); |
michael@0 | 1710 | if (d) { |
michael@0 | 1711 | switch (d->mType) { |
michael@0 | 1712 | case CCGraphDescriber::eRefCountedObject: |
michael@0 | 1713 | aHandler->NoteRefCountedObject(d->mAddress, |
michael@0 | 1714 | d->mCnt, |
michael@0 | 1715 | d->mName); |
michael@0 | 1716 | break; |
michael@0 | 1717 | case CCGraphDescriber::eGCedObject: |
michael@0 | 1718 | case CCGraphDescriber::eGCMarkedObject: |
michael@0 | 1719 | aHandler->NoteGCedObject(d->mAddress, |
michael@0 | 1720 | d->mType == |
michael@0 | 1721 | CCGraphDescriber::eGCMarkedObject, |
michael@0 | 1722 | d->mName, |
michael@0 | 1723 | d->mCompartmentOrToAddress); |
michael@0 | 1724 | break; |
michael@0 | 1725 | case CCGraphDescriber::eEdge: |
michael@0 | 1726 | aHandler->NoteEdge(d->mAddress, |
michael@0 | 1727 | d->mCompartmentOrToAddress, |
michael@0 | 1728 | d->mName); |
michael@0 | 1729 | break; |
michael@0 | 1730 | case CCGraphDescriber::eRoot: |
michael@0 | 1731 | aHandler->DescribeRoot(d->mAddress, |
michael@0 | 1732 | d->mCnt); |
michael@0 | 1733 | break; |
michael@0 | 1734 | case CCGraphDescriber::eGarbage: |
michael@0 | 1735 | aHandler->DescribeGarbage(d->mAddress); |
michael@0 | 1736 | break; |
michael@0 | 1737 | case CCGraphDescriber::eUnknown: |
michael@0 | 1738 | NS_NOTREACHED("CCGraphDescriber::eUnknown"); |
michael@0 | 1739 | break; |
michael@0 | 1740 | } |
michael@0 | 1741 | delete d; |
michael@0 | 1742 | } |
michael@0 | 1743 | if (!(*aCanContinue = !mDescribers.isEmpty())) { |
michael@0 | 1744 | mCurrentAddress.AssignLiteral("0x"); |
michael@0 | 1745 | } |
michael@0 | 1746 | return NS_OK; |
michael@0 | 1747 | } |
michael@0 | 1748 | private: |
michael@0 | 1749 | /** |
michael@0 | 1750 | * Create a new file named something like aPrefix.$PID.$IDENTIFIER.log in |
michael@0 | 1751 | * $MOZ_CC_LOG_DIRECTORY or in the system's temp directory. No existing |
michael@0 | 1752 | * file will be overwritten; if aPrefix.$PID.$IDENTIFIER.log exists, we'll |
michael@0 | 1753 | * try a file named something like aPrefix.$PID.$IDENTIFIER-1.log, and so |
michael@0 | 1754 | * on. |
michael@0 | 1755 | */ |
michael@0 | 1756 | already_AddRefed<nsIFile> |
michael@0 | 1757 | CreateTempFile(const char* aPrefix) |
michael@0 | 1758 | { |
michael@0 | 1759 | nsPrintfCString filename("%s.%d%s%s.log", |
michael@0 | 1760 | aPrefix, |
michael@0 | 1761 | base::GetCurrentProcId(), |
michael@0 | 1762 | mFilenameIdentifier.IsEmpty() ? "" : ".", |
michael@0 | 1763 | NS_ConvertUTF16toUTF8(mFilenameIdentifier).get()); |
michael@0 | 1764 | |
michael@0 | 1765 | // Get the log directory either from $MOZ_CC_LOG_DIRECTORY or from |
michael@0 | 1766 | // the fallback directories in OpenTempFile. We don't use an nsCOMPtr |
michael@0 | 1767 | // here because OpenTempFile uses an in/out param and getter_AddRefs |
michael@0 | 1768 | // wouldn't work. |
michael@0 | 1769 | nsIFile* logFile = nullptr; |
michael@0 | 1770 | if (char* env = PR_GetEnv("MOZ_CC_LOG_DIRECTORY")) { |
michael@0 | 1771 | NS_NewNativeLocalFile(nsCString(env), /* followLinks = */ true, |
michael@0 | 1772 | &logFile); |
michael@0 | 1773 | } |
michael@0 | 1774 | |
michael@0 | 1775 | // In Android case, this function will open a file named aFilename under |
michael@0 | 1776 | // specific folder (/data/local/tmp/memory-reports). Otherwise, it will |
michael@0 | 1777 | // open a file named aFilename under "NS_OS_TEMP_DIR". |
michael@0 | 1778 | nsresult rv = nsDumpUtils::OpenTempFile( |
michael@0 | 1779 | filename, |
michael@0 | 1780 | &logFile, |
michael@0 | 1781 | NS_LITERAL_CSTRING("memory-reports")); |
michael@0 | 1782 | if (NS_FAILED(rv)) { |
michael@0 | 1783 | NS_IF_RELEASE(logFile); |
michael@0 | 1784 | return nullptr; |
michael@0 | 1785 | } |
michael@0 | 1786 | |
michael@0 | 1787 | return dont_AddRef(logFile); |
michael@0 | 1788 | } |
michael@0 | 1789 | |
michael@0 | 1790 | void ClearDescribers() |
michael@0 | 1791 | { |
michael@0 | 1792 | CCGraphDescriber* d; |
michael@0 | 1793 | while((d = mDescribers.popFirst())) { |
michael@0 | 1794 | delete d; |
michael@0 | 1795 | } |
michael@0 | 1796 | } |
michael@0 | 1797 | |
michael@0 | 1798 | FILE *mStream; |
michael@0 | 1799 | nsCOMPtr<nsIFile> mOutFile; |
michael@0 | 1800 | bool mWantAllTraces; |
michael@0 | 1801 | bool mDisableLog; |
michael@0 | 1802 | bool mWantAfterProcessing; |
michael@0 | 1803 | nsString mFilenameIdentifier; |
michael@0 | 1804 | nsString mGCLogPath; |
michael@0 | 1805 | nsString mCCLogPath; |
michael@0 | 1806 | nsCString mCurrentAddress; |
michael@0 | 1807 | mozilla::LinkedList<CCGraphDescriber> mDescribers; |
michael@0 | 1808 | }; |
michael@0 | 1809 | |
michael@0 | 1810 | NS_IMPL_ISUPPORTS(nsCycleCollectorLogger, nsICycleCollectorListener) |
michael@0 | 1811 | |
michael@0 | 1812 | nsresult |
michael@0 | 1813 | nsCycleCollectorLoggerConstructor(nsISupports* aOuter, |
michael@0 | 1814 | const nsIID& aIID, |
michael@0 | 1815 | void* *aInstancePtr) |
michael@0 | 1816 | { |
michael@0 | 1817 | if (NS_WARN_IF(aOuter)) |
michael@0 | 1818 | return NS_ERROR_NO_AGGREGATION; |
michael@0 | 1819 | |
michael@0 | 1820 | nsISupports *logger = new nsCycleCollectorLogger(); |
michael@0 | 1821 | |
michael@0 | 1822 | return logger->QueryInterface(aIID, aInstancePtr); |
michael@0 | 1823 | } |
michael@0 | 1824 | |
michael@0 | 1825 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 1826 | // Bacon & Rajan's |MarkRoots| routine. |
michael@0 | 1827 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 1828 | |
michael@0 | 1829 | class GCGraphBuilder : public nsCycleCollectionTraversalCallback, |
michael@0 | 1830 | public nsCycleCollectionNoteRootCallback |
michael@0 | 1831 | { |
michael@0 | 1832 | private: |
michael@0 | 1833 | GCGraph &mGraph; |
michael@0 | 1834 | CycleCollectorResults &mResults; |
michael@0 | 1835 | NodePool::Builder mNodeBuilder; |
michael@0 | 1836 | EdgePool::Builder mEdgeBuilder; |
michael@0 | 1837 | PtrInfo *mCurrPi; |
michael@0 | 1838 | nsCycleCollectionParticipant *mJSParticipant; |
michael@0 | 1839 | nsCycleCollectionParticipant *mJSZoneParticipant; |
michael@0 | 1840 | nsCString mNextEdgeName; |
michael@0 | 1841 | nsICycleCollectorListener *mListener; |
michael@0 | 1842 | bool mMergeZones; |
michael@0 | 1843 | bool mRanOutOfMemory; |
michael@0 | 1844 | |
michael@0 | 1845 | public: |
michael@0 | 1846 | GCGraphBuilder(GCGraph &aGraph, |
michael@0 | 1847 | CycleCollectorResults &aResults, |
michael@0 | 1848 | CycleCollectedJSRuntime *aJSRuntime, |
michael@0 | 1849 | nsICycleCollectorListener *aListener, |
michael@0 | 1850 | bool aMergeZones); |
michael@0 | 1851 | virtual ~GCGraphBuilder(); |
michael@0 | 1852 | |
michael@0 | 1853 | bool WantAllTraces() const |
michael@0 | 1854 | { |
michael@0 | 1855 | return nsCycleCollectionNoteRootCallback::WantAllTraces(); |
michael@0 | 1856 | } |
michael@0 | 1857 | |
michael@0 | 1858 | PtrInfo* AddNode(void *aPtr, nsCycleCollectionParticipant *aParticipant); |
michael@0 | 1859 | PtrInfo* AddWeakMapNode(void* node); |
michael@0 | 1860 | void Traverse(PtrInfo* aPtrInfo); |
michael@0 | 1861 | void SetLastChild(); |
michael@0 | 1862 | |
michael@0 | 1863 | bool RanOutOfMemory() const { return mRanOutOfMemory; } |
michael@0 | 1864 | |
michael@0 | 1865 | private: |
michael@0 | 1866 | void DescribeNode(uint32_t refCount, const char *objName) |
michael@0 | 1867 | { |
michael@0 | 1868 | mCurrPi->mRefCount = refCount; |
michael@0 | 1869 | } |
michael@0 | 1870 | |
michael@0 | 1871 | public: |
michael@0 | 1872 | // nsCycleCollectionNoteRootCallback methods. |
michael@0 | 1873 | NS_IMETHOD_(void) NoteXPCOMRoot(nsISupports *root); |
michael@0 | 1874 | NS_IMETHOD_(void) NoteJSRoot(void *root); |
michael@0 | 1875 | NS_IMETHOD_(void) NoteNativeRoot(void *root, nsCycleCollectionParticipant *participant); |
michael@0 | 1876 | NS_IMETHOD_(void) NoteWeakMapping(void *map, void *key, void *kdelegate, void *val); |
michael@0 | 1877 | |
michael@0 | 1878 | // nsCycleCollectionTraversalCallback methods. |
michael@0 | 1879 | NS_IMETHOD_(void) DescribeRefCountedNode(nsrefcnt refCount, |
michael@0 | 1880 | const char *objName); |
michael@0 | 1881 | NS_IMETHOD_(void) DescribeGCedNode(bool isMarked, const char *objName, |
michael@0 | 1882 | uint64_t aCompartmentAddress); |
michael@0 | 1883 | |
michael@0 | 1884 | NS_IMETHOD_(void) NoteXPCOMChild(nsISupports *child); |
michael@0 | 1885 | NS_IMETHOD_(void) NoteJSChild(void *child); |
michael@0 | 1886 | NS_IMETHOD_(void) NoteNativeChild(void *child, |
michael@0 | 1887 | nsCycleCollectionParticipant *participant); |
michael@0 | 1888 | NS_IMETHOD_(void) NoteNextEdgeName(const char* name); |
michael@0 | 1889 | |
michael@0 | 1890 | private: |
michael@0 | 1891 | NS_IMETHOD_(void) NoteRoot(void *root, |
michael@0 | 1892 | nsCycleCollectionParticipant *participant) |
michael@0 | 1893 | { |
michael@0 | 1894 | MOZ_ASSERT(root); |
michael@0 | 1895 | MOZ_ASSERT(participant); |
michael@0 | 1896 | |
michael@0 | 1897 | if (!participant->CanSkipInCC(root) || MOZ_UNLIKELY(WantAllTraces())) { |
michael@0 | 1898 | AddNode(root, participant); |
michael@0 | 1899 | } |
michael@0 | 1900 | } |
michael@0 | 1901 | |
michael@0 | 1902 | NS_IMETHOD_(void) NoteChild(void *child, nsCycleCollectionParticipant *cp, |
michael@0 | 1903 | nsCString edgeName) |
michael@0 | 1904 | { |
michael@0 | 1905 | PtrInfo *childPi = AddNode(child, cp); |
michael@0 | 1906 | if (!childPi) |
michael@0 | 1907 | return; |
michael@0 | 1908 | mEdgeBuilder.Add(childPi); |
michael@0 | 1909 | if (mListener) { |
michael@0 | 1910 | mListener->NoteEdge((uint64_t)child, edgeName.get()); |
michael@0 | 1911 | } |
michael@0 | 1912 | ++childPi->mInternalRefs; |
michael@0 | 1913 | } |
michael@0 | 1914 | |
michael@0 | 1915 | JS::Zone *MergeZone(void *gcthing) { |
michael@0 | 1916 | if (!mMergeZones) { |
michael@0 | 1917 | return nullptr; |
michael@0 | 1918 | } |
michael@0 | 1919 | JS::Zone *zone = JS::GetGCThingZone(gcthing); |
michael@0 | 1920 | if (js::IsSystemZone(zone)) { |
michael@0 | 1921 | return nullptr; |
michael@0 | 1922 | } |
michael@0 | 1923 | return zone; |
michael@0 | 1924 | } |
michael@0 | 1925 | }; |
michael@0 | 1926 | |
michael@0 | 1927 | GCGraphBuilder::GCGraphBuilder(GCGraph &aGraph, |
michael@0 | 1928 | CycleCollectorResults &aResults, |
michael@0 | 1929 | CycleCollectedJSRuntime *aJSRuntime, |
michael@0 | 1930 | nsICycleCollectorListener *aListener, |
michael@0 | 1931 | bool aMergeZones) |
michael@0 | 1932 | : mGraph(aGraph), |
michael@0 | 1933 | mResults(aResults), |
michael@0 | 1934 | mNodeBuilder(aGraph.mNodes), |
michael@0 | 1935 | mEdgeBuilder(aGraph.mEdges), |
michael@0 | 1936 | mJSParticipant(nullptr), |
michael@0 | 1937 | mJSZoneParticipant(nullptr), |
michael@0 | 1938 | mListener(aListener), |
michael@0 | 1939 | mMergeZones(aMergeZones), |
michael@0 | 1940 | mRanOutOfMemory(false) |
michael@0 | 1941 | { |
michael@0 | 1942 | if (aJSRuntime) { |
michael@0 | 1943 | mJSParticipant = aJSRuntime->GCThingParticipant(); |
michael@0 | 1944 | mJSZoneParticipant = aJSRuntime->ZoneParticipant(); |
michael@0 | 1945 | } |
michael@0 | 1946 | |
michael@0 | 1947 | uint32_t flags = 0; |
michael@0 | 1948 | if (!flags && mListener) { |
michael@0 | 1949 | flags = nsCycleCollectionTraversalCallback::WANT_DEBUG_INFO; |
michael@0 | 1950 | bool all = false; |
michael@0 | 1951 | mListener->GetWantAllTraces(&all); |
michael@0 | 1952 | if (all) { |
michael@0 | 1953 | flags |= nsCycleCollectionTraversalCallback::WANT_ALL_TRACES; |
michael@0 | 1954 | mWantAllTraces = true; // for nsCycleCollectionNoteRootCallback |
michael@0 | 1955 | } |
michael@0 | 1956 | } |
michael@0 | 1957 | |
michael@0 | 1958 | mFlags |= flags; |
michael@0 | 1959 | |
michael@0 | 1960 | mMergeZones = mMergeZones && MOZ_LIKELY(!WantAllTraces()); |
michael@0 | 1961 | |
michael@0 | 1962 | MOZ_ASSERT(nsCycleCollectionNoteRootCallback::WantAllTraces() == |
michael@0 | 1963 | nsCycleCollectionTraversalCallback::WantAllTraces()); |
michael@0 | 1964 | } |
michael@0 | 1965 | |
michael@0 | 1966 | GCGraphBuilder::~GCGraphBuilder() |
michael@0 | 1967 | { |
michael@0 | 1968 | } |
michael@0 | 1969 | |
michael@0 | 1970 | PtrInfo* |
michael@0 | 1971 | GCGraphBuilder::AddNode(void *aPtr, nsCycleCollectionParticipant *aParticipant) |
michael@0 | 1972 | { |
michael@0 | 1973 | PtrToNodeEntry *e = mGraph.AddNodeToMap(aPtr); |
michael@0 | 1974 | if (!e) { |
michael@0 | 1975 | mRanOutOfMemory = true; |
michael@0 | 1976 | return nullptr; |
michael@0 | 1977 | } |
michael@0 | 1978 | |
michael@0 | 1979 | PtrInfo *result; |
michael@0 | 1980 | if (!e->mNode) { |
michael@0 | 1981 | // New entry. |
michael@0 | 1982 | result = mNodeBuilder.Add(aPtr, aParticipant); |
michael@0 | 1983 | e->mNode = result; |
michael@0 | 1984 | NS_ASSERTION(result, "mNodeBuilder.Add returned null"); |
michael@0 | 1985 | } else { |
michael@0 | 1986 | result = e->mNode; |
michael@0 | 1987 | MOZ_ASSERT(result->mParticipant == aParticipant, |
michael@0 | 1988 | "nsCycleCollectionParticipant shouldn't change!"); |
michael@0 | 1989 | } |
michael@0 | 1990 | return result; |
michael@0 | 1991 | } |
michael@0 | 1992 | |
michael@0 | 1993 | MOZ_NEVER_INLINE void |
michael@0 | 1994 | GCGraphBuilder::Traverse(PtrInfo* aPtrInfo) |
michael@0 | 1995 | { |
michael@0 | 1996 | mCurrPi = aPtrInfo; |
michael@0 | 1997 | |
michael@0 | 1998 | mCurrPi->SetFirstChild(mEdgeBuilder.Mark()); |
michael@0 | 1999 | |
michael@0 | 2000 | if (!aPtrInfo->mParticipant) { |
michael@0 | 2001 | return; |
michael@0 | 2002 | } |
michael@0 | 2003 | |
michael@0 | 2004 | nsresult rv = aPtrInfo->mParticipant->Traverse(aPtrInfo->mPointer, *this); |
michael@0 | 2005 | if (NS_FAILED(rv)) { |
michael@0 | 2006 | Fault("script pointer traversal failed", aPtrInfo); |
michael@0 | 2007 | } |
michael@0 | 2008 | } |
michael@0 | 2009 | |
michael@0 | 2010 | void |
michael@0 | 2011 | GCGraphBuilder::SetLastChild() |
michael@0 | 2012 | { |
michael@0 | 2013 | mCurrPi->SetLastChild(mEdgeBuilder.Mark()); |
michael@0 | 2014 | } |
michael@0 | 2015 | |
michael@0 | 2016 | NS_IMETHODIMP_(void) |
michael@0 | 2017 | GCGraphBuilder::NoteXPCOMRoot(nsISupports *root) |
michael@0 | 2018 | { |
michael@0 | 2019 | root = CanonicalizeXPCOMParticipant(root); |
michael@0 | 2020 | NS_ASSERTION(root, |
michael@0 | 2021 | "Don't add objects that don't participate in collection!"); |
michael@0 | 2022 | |
michael@0 | 2023 | nsXPCOMCycleCollectionParticipant *cp; |
michael@0 | 2024 | ToParticipant(root, &cp); |
michael@0 | 2025 | |
michael@0 | 2026 | NoteRoot(root, cp); |
michael@0 | 2027 | } |
michael@0 | 2028 | |
michael@0 | 2029 | NS_IMETHODIMP_(void) |
michael@0 | 2030 | GCGraphBuilder::NoteJSRoot(void *root) |
michael@0 | 2031 | { |
michael@0 | 2032 | if (JS::Zone *zone = MergeZone(root)) { |
michael@0 | 2033 | NoteRoot(zone, mJSZoneParticipant); |
michael@0 | 2034 | } else { |
michael@0 | 2035 | NoteRoot(root, mJSParticipant); |
michael@0 | 2036 | } |
michael@0 | 2037 | } |
michael@0 | 2038 | |
michael@0 | 2039 | NS_IMETHODIMP_(void) |
michael@0 | 2040 | GCGraphBuilder::NoteNativeRoot(void *root, nsCycleCollectionParticipant *participant) |
michael@0 | 2041 | { |
michael@0 | 2042 | NoteRoot(root, participant); |
michael@0 | 2043 | } |
michael@0 | 2044 | |
michael@0 | 2045 | NS_IMETHODIMP_(void) |
michael@0 | 2046 | GCGraphBuilder::DescribeRefCountedNode(nsrefcnt refCount, const char *objName) |
michael@0 | 2047 | { |
michael@0 | 2048 | if (refCount == 0) |
michael@0 | 2049 | Fault("zero refcount", mCurrPi); |
michael@0 | 2050 | if (refCount == UINT32_MAX) |
michael@0 | 2051 | Fault("overflowing refcount", mCurrPi); |
michael@0 | 2052 | mResults.mVisitedRefCounted++; |
michael@0 | 2053 | |
michael@0 | 2054 | if (mListener) { |
michael@0 | 2055 | mListener->NoteRefCountedObject((uint64_t)mCurrPi->mPointer, refCount, |
michael@0 | 2056 | objName); |
michael@0 | 2057 | } |
michael@0 | 2058 | |
michael@0 | 2059 | DescribeNode(refCount, objName); |
michael@0 | 2060 | } |
michael@0 | 2061 | |
michael@0 | 2062 | NS_IMETHODIMP_(void) |
michael@0 | 2063 | GCGraphBuilder::DescribeGCedNode(bool isMarked, const char *objName, |
michael@0 | 2064 | uint64_t aCompartmentAddress) |
michael@0 | 2065 | { |
michael@0 | 2066 | uint32_t refCount = isMarked ? UINT32_MAX : 0; |
michael@0 | 2067 | mResults.mVisitedGCed++; |
michael@0 | 2068 | |
michael@0 | 2069 | if (mListener) { |
michael@0 | 2070 | mListener->NoteGCedObject((uint64_t)mCurrPi->mPointer, isMarked, |
michael@0 | 2071 | objName, aCompartmentAddress); |
michael@0 | 2072 | } |
michael@0 | 2073 | |
michael@0 | 2074 | DescribeNode(refCount, objName); |
michael@0 | 2075 | } |
michael@0 | 2076 | |
michael@0 | 2077 | NS_IMETHODIMP_(void) |
michael@0 | 2078 | GCGraphBuilder::NoteXPCOMChild(nsISupports *child) |
michael@0 | 2079 | { |
michael@0 | 2080 | nsCString edgeName; |
michael@0 | 2081 | if (WantDebugInfo()) { |
michael@0 | 2082 | edgeName.Assign(mNextEdgeName); |
michael@0 | 2083 | mNextEdgeName.Truncate(); |
michael@0 | 2084 | } |
michael@0 | 2085 | if (!child || !(child = CanonicalizeXPCOMParticipant(child))) |
michael@0 | 2086 | return; |
michael@0 | 2087 | |
michael@0 | 2088 | nsXPCOMCycleCollectionParticipant *cp; |
michael@0 | 2089 | ToParticipant(child, &cp); |
michael@0 | 2090 | if (cp && (!cp->CanSkipThis(child) || WantAllTraces())) { |
michael@0 | 2091 | NoteChild(child, cp, edgeName); |
michael@0 | 2092 | } |
michael@0 | 2093 | } |
michael@0 | 2094 | |
michael@0 | 2095 | NS_IMETHODIMP_(void) |
michael@0 | 2096 | GCGraphBuilder::NoteNativeChild(void *child, |
michael@0 | 2097 | nsCycleCollectionParticipant *participant) |
michael@0 | 2098 | { |
michael@0 | 2099 | nsCString edgeName; |
michael@0 | 2100 | if (WantDebugInfo()) { |
michael@0 | 2101 | edgeName.Assign(mNextEdgeName); |
michael@0 | 2102 | mNextEdgeName.Truncate(); |
michael@0 | 2103 | } |
michael@0 | 2104 | if (!child) |
michael@0 | 2105 | return; |
michael@0 | 2106 | |
michael@0 | 2107 | MOZ_ASSERT(participant, "Need a nsCycleCollectionParticipant!"); |
michael@0 | 2108 | NoteChild(child, participant, edgeName); |
michael@0 | 2109 | } |
michael@0 | 2110 | |
michael@0 | 2111 | NS_IMETHODIMP_(void) |
michael@0 | 2112 | GCGraphBuilder::NoteJSChild(void *child) |
michael@0 | 2113 | { |
michael@0 | 2114 | if (!child) { |
michael@0 | 2115 | return; |
michael@0 | 2116 | } |
michael@0 | 2117 | |
michael@0 | 2118 | nsCString edgeName; |
michael@0 | 2119 | if (MOZ_UNLIKELY(WantDebugInfo())) { |
michael@0 | 2120 | edgeName.Assign(mNextEdgeName); |
michael@0 | 2121 | mNextEdgeName.Truncate(); |
michael@0 | 2122 | } |
michael@0 | 2123 | |
michael@0 | 2124 | if (xpc_GCThingIsGrayCCThing(child) || MOZ_UNLIKELY(WantAllTraces())) { |
michael@0 | 2125 | if (JS::Zone *zone = MergeZone(child)) { |
michael@0 | 2126 | NoteChild(zone, mJSZoneParticipant, edgeName); |
michael@0 | 2127 | } else { |
michael@0 | 2128 | NoteChild(child, mJSParticipant, edgeName); |
michael@0 | 2129 | } |
michael@0 | 2130 | } |
michael@0 | 2131 | } |
michael@0 | 2132 | |
michael@0 | 2133 | NS_IMETHODIMP_(void) |
michael@0 | 2134 | GCGraphBuilder::NoteNextEdgeName(const char* name) |
michael@0 | 2135 | { |
michael@0 | 2136 | if (WantDebugInfo()) { |
michael@0 | 2137 | mNextEdgeName = name; |
michael@0 | 2138 | } |
michael@0 | 2139 | } |
michael@0 | 2140 | |
michael@0 | 2141 | PtrInfo* |
michael@0 | 2142 | GCGraphBuilder::AddWeakMapNode(void *node) |
michael@0 | 2143 | { |
michael@0 | 2144 | MOZ_ASSERT(node, "Weak map node should be non-null."); |
michael@0 | 2145 | |
michael@0 | 2146 | if (!xpc_GCThingIsGrayCCThing(node) && !WantAllTraces()) |
michael@0 | 2147 | return nullptr; |
michael@0 | 2148 | |
michael@0 | 2149 | if (JS::Zone *zone = MergeZone(node)) { |
michael@0 | 2150 | return AddNode(zone, mJSZoneParticipant); |
michael@0 | 2151 | } else { |
michael@0 | 2152 | return AddNode(node, mJSParticipant); |
michael@0 | 2153 | } |
michael@0 | 2154 | } |
michael@0 | 2155 | |
michael@0 | 2156 | NS_IMETHODIMP_(void) |
michael@0 | 2157 | GCGraphBuilder::NoteWeakMapping(void *map, void *key, void *kdelegate, void *val) |
michael@0 | 2158 | { |
michael@0 | 2159 | // Don't try to optimize away the entry here, as we've already attempted to |
michael@0 | 2160 | // do that in TraceWeakMapping in nsXPConnect. |
michael@0 | 2161 | WeakMapping *mapping = mGraph.mWeakMaps.AppendElement(); |
michael@0 | 2162 | mapping->mMap = map ? AddWeakMapNode(map) : nullptr; |
michael@0 | 2163 | mapping->mKey = key ? AddWeakMapNode(key) : nullptr; |
michael@0 | 2164 | mapping->mKeyDelegate = kdelegate ? AddWeakMapNode(kdelegate) : mapping->mKey; |
michael@0 | 2165 | mapping->mVal = val ? AddWeakMapNode(val) : nullptr; |
michael@0 | 2166 | |
michael@0 | 2167 | if (mListener) { |
michael@0 | 2168 | mListener->NoteWeakMapEntry((uint64_t)map, (uint64_t)key, |
michael@0 | 2169 | (uint64_t)kdelegate, (uint64_t)val); |
michael@0 | 2170 | } |
michael@0 | 2171 | } |
michael@0 | 2172 | |
michael@0 | 2173 | static bool |
michael@0 | 2174 | AddPurpleRoot(GCGraphBuilder &aBuilder, void *aRoot, nsCycleCollectionParticipant *aParti) |
michael@0 | 2175 | { |
michael@0 | 2176 | CanonicalizeParticipant(&aRoot, &aParti); |
michael@0 | 2177 | |
michael@0 | 2178 | if (aBuilder.WantAllTraces() || !aParti->CanSkipInCC(aRoot)) { |
michael@0 | 2179 | PtrInfo *pinfo = aBuilder.AddNode(aRoot, aParti); |
michael@0 | 2180 | if (!pinfo) { |
michael@0 | 2181 | return false; |
michael@0 | 2182 | } |
michael@0 | 2183 | } |
michael@0 | 2184 | |
michael@0 | 2185 | return true; |
michael@0 | 2186 | } |
michael@0 | 2187 | |
michael@0 | 2188 | // MayHaveChild() will be false after a Traverse if the object does |
michael@0 | 2189 | // not have any children the CC will visit. |
michael@0 | 2190 | class ChildFinder : public nsCycleCollectionTraversalCallback |
michael@0 | 2191 | { |
michael@0 | 2192 | public: |
michael@0 | 2193 | ChildFinder() : mMayHaveChild(false) {} |
michael@0 | 2194 | |
michael@0 | 2195 | // The logic of the Note*Child functions must mirror that of their |
michael@0 | 2196 | // respective functions in GCGraphBuilder. |
michael@0 | 2197 | NS_IMETHOD_(void) NoteXPCOMChild(nsISupports *child); |
michael@0 | 2198 | NS_IMETHOD_(void) NoteNativeChild(void *child, |
michael@0 | 2199 | nsCycleCollectionParticipant *helper); |
michael@0 | 2200 | NS_IMETHOD_(void) NoteJSChild(void *child); |
michael@0 | 2201 | |
michael@0 | 2202 | NS_IMETHOD_(void) DescribeRefCountedNode(nsrefcnt refcount, |
michael@0 | 2203 | const char *objname) {} |
michael@0 | 2204 | NS_IMETHOD_(void) DescribeGCedNode(bool ismarked, |
michael@0 | 2205 | const char *objname, |
michael@0 | 2206 | uint64_t aCompartmentAddress) {} |
michael@0 | 2207 | NS_IMETHOD_(void) NoteNextEdgeName(const char* name) {} |
michael@0 | 2208 | bool MayHaveChild() { |
michael@0 | 2209 | return mMayHaveChild; |
michael@0 | 2210 | } |
michael@0 | 2211 | private: |
michael@0 | 2212 | bool mMayHaveChild; |
michael@0 | 2213 | }; |
michael@0 | 2214 | |
michael@0 | 2215 | NS_IMETHODIMP_(void) |
michael@0 | 2216 | ChildFinder::NoteXPCOMChild(nsISupports *child) |
michael@0 | 2217 | { |
michael@0 | 2218 | if (!child || !(child = CanonicalizeXPCOMParticipant(child))) |
michael@0 | 2219 | return; |
michael@0 | 2220 | nsXPCOMCycleCollectionParticipant *cp; |
michael@0 | 2221 | ToParticipant(child, &cp); |
michael@0 | 2222 | if (cp && !cp->CanSkip(child, true)) |
michael@0 | 2223 | mMayHaveChild = true; |
michael@0 | 2224 | } |
michael@0 | 2225 | |
michael@0 | 2226 | NS_IMETHODIMP_(void) |
michael@0 | 2227 | ChildFinder::NoteNativeChild(void *child, |
michael@0 | 2228 | nsCycleCollectionParticipant *helper) |
michael@0 | 2229 | { |
michael@0 | 2230 | if (child) |
michael@0 | 2231 | mMayHaveChild = true; |
michael@0 | 2232 | } |
michael@0 | 2233 | |
michael@0 | 2234 | NS_IMETHODIMP_(void) |
michael@0 | 2235 | ChildFinder::NoteJSChild(void *child) |
michael@0 | 2236 | { |
michael@0 | 2237 | if (child && xpc_GCThingIsGrayCCThing(child)) { |
michael@0 | 2238 | mMayHaveChild = true; |
michael@0 | 2239 | } |
michael@0 | 2240 | } |
michael@0 | 2241 | |
michael@0 | 2242 | static bool |
michael@0 | 2243 | MayHaveChild(void *o, nsCycleCollectionParticipant* cp) |
michael@0 | 2244 | { |
michael@0 | 2245 | ChildFinder cf; |
michael@0 | 2246 | cp->Traverse(o, cf); |
michael@0 | 2247 | return cf.MayHaveChild(); |
michael@0 | 2248 | } |
michael@0 | 2249 | |
michael@0 | 2250 | template<class T> |
michael@0 | 2251 | class SegmentedArrayElement : public LinkedListElement<SegmentedArrayElement<T>> |
michael@0 | 2252 | , public AutoFallibleTArray<T, 60> |
michael@0 | 2253 | { |
michael@0 | 2254 | }; |
michael@0 | 2255 | |
michael@0 | 2256 | template<class T> |
michael@0 | 2257 | class SegmentedArray |
michael@0 | 2258 | { |
michael@0 | 2259 | public: |
michael@0 | 2260 | ~SegmentedArray() |
michael@0 | 2261 | { |
michael@0 | 2262 | MOZ_ASSERT(IsEmpty()); |
michael@0 | 2263 | } |
michael@0 | 2264 | |
michael@0 | 2265 | void AppendElement(T& aElement) |
michael@0 | 2266 | { |
michael@0 | 2267 | SegmentedArrayElement<T>* last = mSegments.getLast(); |
michael@0 | 2268 | if (!last || last->Length() == last->Capacity()) { |
michael@0 | 2269 | last = new SegmentedArrayElement<T>(); |
michael@0 | 2270 | mSegments.insertBack(last); |
michael@0 | 2271 | } |
michael@0 | 2272 | last->AppendElement(aElement); |
michael@0 | 2273 | } |
michael@0 | 2274 | |
michael@0 | 2275 | void Clear() |
michael@0 | 2276 | { |
michael@0 | 2277 | SegmentedArrayElement<T>* first; |
michael@0 | 2278 | while ((first = mSegments.popFirst())) { |
michael@0 | 2279 | delete first; |
michael@0 | 2280 | } |
michael@0 | 2281 | } |
michael@0 | 2282 | |
michael@0 | 2283 | SegmentedArrayElement<T>* GetFirstSegment() |
michael@0 | 2284 | { |
michael@0 | 2285 | return mSegments.getFirst(); |
michael@0 | 2286 | } |
michael@0 | 2287 | |
michael@0 | 2288 | bool IsEmpty() |
michael@0 | 2289 | { |
michael@0 | 2290 | return !GetFirstSegment(); |
michael@0 | 2291 | } |
michael@0 | 2292 | |
michael@0 | 2293 | private: |
michael@0 | 2294 | mozilla::LinkedList<SegmentedArrayElement<T>> mSegments; |
michael@0 | 2295 | }; |
michael@0 | 2296 | |
michael@0 | 2297 | // JSPurpleBuffer keeps references to GCThings which might affect the |
michael@0 | 2298 | // next cycle collection. It is owned only by itself and during unlink its |
michael@0 | 2299 | // self reference is broken down and the object ends up killing itself. |
michael@0 | 2300 | // If GC happens before CC, references to GCthings and the self reference are |
michael@0 | 2301 | // removed. |
michael@0 | 2302 | class JSPurpleBuffer |
michael@0 | 2303 | { |
michael@0 | 2304 | public: |
michael@0 | 2305 | JSPurpleBuffer(JSPurpleBuffer*& aReferenceToThis) |
michael@0 | 2306 | : mReferenceToThis(aReferenceToThis) |
michael@0 | 2307 | { |
michael@0 | 2308 | mReferenceToThis = this; |
michael@0 | 2309 | NS_ADDREF_THIS(); |
michael@0 | 2310 | mozilla::HoldJSObjects(this); |
michael@0 | 2311 | } |
michael@0 | 2312 | |
michael@0 | 2313 | ~JSPurpleBuffer() |
michael@0 | 2314 | { |
michael@0 | 2315 | MOZ_ASSERT(mValues.IsEmpty()); |
michael@0 | 2316 | MOZ_ASSERT(mObjects.IsEmpty()); |
michael@0 | 2317 | MOZ_ASSERT(mTenuredObjects.IsEmpty()); |
michael@0 | 2318 | } |
michael@0 | 2319 | |
michael@0 | 2320 | void Destroy() |
michael@0 | 2321 | { |
michael@0 | 2322 | mReferenceToThis = nullptr; |
michael@0 | 2323 | mValues.Clear(); |
michael@0 | 2324 | mObjects.Clear(); |
michael@0 | 2325 | mTenuredObjects.Clear(); |
michael@0 | 2326 | mozilla::DropJSObjects(this); |
michael@0 | 2327 | NS_RELEASE_THIS(); |
michael@0 | 2328 | } |
michael@0 | 2329 | |
michael@0 | 2330 | NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(JSPurpleBuffer) |
michael@0 | 2331 | NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(JSPurpleBuffer) |
michael@0 | 2332 | |
michael@0 | 2333 | JSPurpleBuffer*& mReferenceToThis; |
michael@0 | 2334 | SegmentedArray<JS::Heap<JS::Value>> mValues; |
michael@0 | 2335 | SegmentedArray<JS::Heap<JSObject*>> mObjects; |
michael@0 | 2336 | SegmentedArray<JS::TenuredHeap<JSObject*>> mTenuredObjects; |
michael@0 | 2337 | }; |
michael@0 | 2338 | |
michael@0 | 2339 | NS_IMPL_CYCLE_COLLECTION_CLASS(JSPurpleBuffer) |
michael@0 | 2340 | |
michael@0 | 2341 | NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(JSPurpleBuffer) |
michael@0 | 2342 | tmp->Destroy(); |
michael@0 | 2343 | NS_IMPL_CYCLE_COLLECTION_UNLINK_END |
michael@0 | 2344 | |
michael@0 | 2345 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(JSPurpleBuffer) |
michael@0 | 2346 | CycleCollectionNoteChild(cb, tmp, "self"); |
michael@0 | 2347 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS |
michael@0 | 2348 | NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END |
michael@0 | 2349 | |
michael@0 | 2350 | #define NS_TRACE_SEGMENTED_ARRAY(_field) \ |
michael@0 | 2351 | { \ |
michael@0 | 2352 | auto segment = tmp->_field.GetFirstSegment(); \ |
michael@0 | 2353 | while (segment) { \ |
michael@0 | 2354 | for (uint32_t i = segment->Length(); i > 0;) { \ |
michael@0 | 2355 | aCallbacks.Trace(&segment->ElementAt(--i), #_field, aClosure); \ |
michael@0 | 2356 | } \ |
michael@0 | 2357 | segment = segment->getNext(); \ |
michael@0 | 2358 | } \ |
michael@0 | 2359 | } |
michael@0 | 2360 | |
michael@0 | 2361 | NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(JSPurpleBuffer) |
michael@0 | 2362 | NS_TRACE_SEGMENTED_ARRAY(mValues) |
michael@0 | 2363 | NS_TRACE_SEGMENTED_ARRAY(mObjects) |
michael@0 | 2364 | NS_TRACE_SEGMENTED_ARRAY(mTenuredObjects) |
michael@0 | 2365 | NS_IMPL_CYCLE_COLLECTION_TRACE_END |
michael@0 | 2366 | |
michael@0 | 2367 | NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(JSPurpleBuffer, AddRef) |
michael@0 | 2368 | NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(JSPurpleBuffer, Release) |
michael@0 | 2369 | |
michael@0 | 2370 | struct SnowWhiteObject |
michael@0 | 2371 | { |
michael@0 | 2372 | void* mPointer; |
michael@0 | 2373 | nsCycleCollectionParticipant* mParticipant; |
michael@0 | 2374 | nsCycleCollectingAutoRefCnt* mRefCnt; |
michael@0 | 2375 | }; |
michael@0 | 2376 | |
michael@0 | 2377 | class SnowWhiteKiller : public TraceCallbacks |
michael@0 | 2378 | { |
michael@0 | 2379 | public: |
michael@0 | 2380 | SnowWhiteKiller(nsCycleCollector *aCollector, uint32_t aMaxCount) |
michael@0 | 2381 | : mCollector(aCollector) |
michael@0 | 2382 | { |
michael@0 | 2383 | MOZ_ASSERT(mCollector, "Calling SnowWhiteKiller after nsCC went away"); |
michael@0 | 2384 | while (true) { |
michael@0 | 2385 | if (mObjects.SetCapacity(aMaxCount)) { |
michael@0 | 2386 | break; |
michael@0 | 2387 | } |
michael@0 | 2388 | if (aMaxCount == 1) { |
michael@0 | 2389 | NS_RUNTIMEABORT("Not enough memory to even delete objects!"); |
michael@0 | 2390 | } |
michael@0 | 2391 | aMaxCount /= 2; |
michael@0 | 2392 | } |
michael@0 | 2393 | } |
michael@0 | 2394 | |
michael@0 | 2395 | ~SnowWhiteKiller() |
michael@0 | 2396 | { |
michael@0 | 2397 | for (uint32_t i = 0; i < mObjects.Length(); ++i) { |
michael@0 | 2398 | SnowWhiteObject& o = mObjects[i]; |
michael@0 | 2399 | if (!o.mRefCnt->get() && !o.mRefCnt->IsInPurpleBuffer()) { |
michael@0 | 2400 | mCollector->RemoveObjectFromGraph(o.mPointer); |
michael@0 | 2401 | o.mRefCnt->stabilizeForDeletion(); |
michael@0 | 2402 | o.mParticipant->Trace(o.mPointer, *this, nullptr); |
michael@0 | 2403 | o.mParticipant->DeleteCycleCollectable(o.mPointer); |
michael@0 | 2404 | } |
michael@0 | 2405 | } |
michael@0 | 2406 | } |
michael@0 | 2407 | |
michael@0 | 2408 | void |
michael@0 | 2409 | Visit(nsPurpleBuffer& aBuffer, nsPurpleBufferEntry* aEntry) |
michael@0 | 2410 | { |
michael@0 | 2411 | MOZ_ASSERT(aEntry->mObject, "Null object in purple buffer"); |
michael@0 | 2412 | if (!aEntry->mRefCnt->get()) { |
michael@0 | 2413 | void *o = aEntry->mObject; |
michael@0 | 2414 | nsCycleCollectionParticipant *cp = aEntry->mParticipant; |
michael@0 | 2415 | CanonicalizeParticipant(&o, &cp); |
michael@0 | 2416 | SnowWhiteObject swo = { o, cp, aEntry->mRefCnt }; |
michael@0 | 2417 | if (mObjects.AppendElement(swo)) { |
michael@0 | 2418 | aBuffer.Remove(aEntry); |
michael@0 | 2419 | } |
michael@0 | 2420 | } |
michael@0 | 2421 | } |
michael@0 | 2422 | |
michael@0 | 2423 | bool HasSnowWhiteObjects() const |
michael@0 | 2424 | { |
michael@0 | 2425 | return mObjects.Length() > 0; |
michael@0 | 2426 | } |
michael@0 | 2427 | |
michael@0 | 2428 | virtual void Trace(JS::Heap<JS::Value>* aValue, const char* aName, |
michael@0 | 2429 | void* aClosure) const |
michael@0 | 2430 | { |
michael@0 | 2431 | if (aValue->isMarkable()) { |
michael@0 | 2432 | void* thing = aValue->toGCThing(); |
michael@0 | 2433 | if (thing && xpc_GCThingIsGrayCCThing(thing)) { |
michael@0 | 2434 | mCollector->GetJSPurpleBuffer()->mValues.AppendElement(*aValue); |
michael@0 | 2435 | } |
michael@0 | 2436 | } |
michael@0 | 2437 | } |
michael@0 | 2438 | |
michael@0 | 2439 | virtual void Trace(JS::Heap<jsid>* aId, const char* aName, |
michael@0 | 2440 | void* aClosure) const |
michael@0 | 2441 | { |
michael@0 | 2442 | } |
michael@0 | 2443 | |
michael@0 | 2444 | virtual void Trace(JS::Heap<JSObject*>* aObject, const char* aName, |
michael@0 | 2445 | void* aClosure) const |
michael@0 | 2446 | { |
michael@0 | 2447 | if (*aObject && xpc_GCThingIsGrayCCThing(*aObject)) { |
michael@0 | 2448 | mCollector->GetJSPurpleBuffer()->mObjects.AppendElement(*aObject); |
michael@0 | 2449 | } |
michael@0 | 2450 | } |
michael@0 | 2451 | |
michael@0 | 2452 | virtual void Trace(JS::TenuredHeap<JSObject*>* aObject, const char* aName, |
michael@0 | 2453 | void* aClosure) const |
michael@0 | 2454 | { |
michael@0 | 2455 | if (*aObject && xpc_GCThingIsGrayCCThing(*aObject)) { |
michael@0 | 2456 | mCollector->GetJSPurpleBuffer()->mTenuredObjects.AppendElement(*aObject); |
michael@0 | 2457 | } |
michael@0 | 2458 | } |
michael@0 | 2459 | |
michael@0 | 2460 | virtual void Trace(JS::Heap<JSString*>* aString, const char* aName, |
michael@0 | 2461 | void* aClosure) const |
michael@0 | 2462 | { |
michael@0 | 2463 | } |
michael@0 | 2464 | |
michael@0 | 2465 | virtual void Trace(JS::Heap<JSScript*>* aScript, const char* aName, |
michael@0 | 2466 | void* aClosure) const |
michael@0 | 2467 | { |
michael@0 | 2468 | } |
michael@0 | 2469 | |
michael@0 | 2470 | virtual void Trace(JS::Heap<JSFunction*>* aFunction, const char* aName, |
michael@0 | 2471 | void* aClosure) const |
michael@0 | 2472 | { |
michael@0 | 2473 | } |
michael@0 | 2474 | |
michael@0 | 2475 | private: |
michael@0 | 2476 | nsCycleCollector *mCollector; |
michael@0 | 2477 | FallibleTArray<SnowWhiteObject> mObjects; |
michael@0 | 2478 | }; |
michael@0 | 2479 | |
michael@0 | 2480 | class RemoveSkippableVisitor : public SnowWhiteKiller |
michael@0 | 2481 | { |
michael@0 | 2482 | public: |
michael@0 | 2483 | RemoveSkippableVisitor(nsCycleCollector* aCollector, |
michael@0 | 2484 | uint32_t aMaxCount, bool aRemoveChildlessNodes, |
michael@0 | 2485 | bool aAsyncSnowWhiteFreeing, |
michael@0 | 2486 | CC_ForgetSkippableCallback aCb) |
michael@0 | 2487 | : SnowWhiteKiller(aCollector, aAsyncSnowWhiteFreeing ? 0 : aMaxCount), |
michael@0 | 2488 | mRemoveChildlessNodes(aRemoveChildlessNodes), |
michael@0 | 2489 | mAsyncSnowWhiteFreeing(aAsyncSnowWhiteFreeing), |
michael@0 | 2490 | mDispatchedDeferredDeletion(false), |
michael@0 | 2491 | mCallback(aCb) |
michael@0 | 2492 | {} |
michael@0 | 2493 | |
michael@0 | 2494 | ~RemoveSkippableVisitor() |
michael@0 | 2495 | { |
michael@0 | 2496 | // Note, we must call the callback before SnowWhiteKiller calls |
michael@0 | 2497 | // DeleteCycleCollectable! |
michael@0 | 2498 | if (mCallback) { |
michael@0 | 2499 | mCallback(); |
michael@0 | 2500 | } |
michael@0 | 2501 | if (HasSnowWhiteObjects()) { |
michael@0 | 2502 | // Effectively a continuation. |
michael@0 | 2503 | nsCycleCollector_dispatchDeferredDeletion(true); |
michael@0 | 2504 | } |
michael@0 | 2505 | } |
michael@0 | 2506 | |
michael@0 | 2507 | void |
michael@0 | 2508 | Visit(nsPurpleBuffer &aBuffer, nsPurpleBufferEntry *aEntry) |
michael@0 | 2509 | { |
michael@0 | 2510 | MOZ_ASSERT(aEntry->mObject, "null mObject in purple buffer"); |
michael@0 | 2511 | if (!aEntry->mRefCnt->get()) { |
michael@0 | 2512 | if (!mAsyncSnowWhiteFreeing) { |
michael@0 | 2513 | SnowWhiteKiller::Visit(aBuffer, aEntry); |
michael@0 | 2514 | } else if (!mDispatchedDeferredDeletion) { |
michael@0 | 2515 | mDispatchedDeferredDeletion = true; |
michael@0 | 2516 | nsCycleCollector_dispatchDeferredDeletion(false); |
michael@0 | 2517 | } |
michael@0 | 2518 | return; |
michael@0 | 2519 | } |
michael@0 | 2520 | void *o = aEntry->mObject; |
michael@0 | 2521 | nsCycleCollectionParticipant *cp = aEntry->mParticipant; |
michael@0 | 2522 | CanonicalizeParticipant(&o, &cp); |
michael@0 | 2523 | if (aEntry->mRefCnt->IsPurple() && !cp->CanSkip(o, false) && |
michael@0 | 2524 | (!mRemoveChildlessNodes || MayHaveChild(o, cp))) { |
michael@0 | 2525 | return; |
michael@0 | 2526 | } |
michael@0 | 2527 | aBuffer.Remove(aEntry); |
michael@0 | 2528 | } |
michael@0 | 2529 | |
michael@0 | 2530 | private: |
michael@0 | 2531 | bool mRemoveChildlessNodes; |
michael@0 | 2532 | bool mAsyncSnowWhiteFreeing; |
michael@0 | 2533 | bool mDispatchedDeferredDeletion; |
michael@0 | 2534 | CC_ForgetSkippableCallback mCallback; |
michael@0 | 2535 | }; |
michael@0 | 2536 | |
michael@0 | 2537 | void |
michael@0 | 2538 | nsPurpleBuffer::RemoveSkippable(nsCycleCollector* aCollector, |
michael@0 | 2539 | bool aRemoveChildlessNodes, |
michael@0 | 2540 | bool aAsyncSnowWhiteFreeing, |
michael@0 | 2541 | CC_ForgetSkippableCallback aCb) |
michael@0 | 2542 | { |
michael@0 | 2543 | RemoveSkippableVisitor visitor(aCollector, Count(), aRemoveChildlessNodes, |
michael@0 | 2544 | aAsyncSnowWhiteFreeing, aCb); |
michael@0 | 2545 | VisitEntries(visitor); |
michael@0 | 2546 | } |
michael@0 | 2547 | |
michael@0 | 2548 | bool |
michael@0 | 2549 | nsCycleCollector::FreeSnowWhite(bool aUntilNoSWInPurpleBuffer) |
michael@0 | 2550 | { |
michael@0 | 2551 | CheckThreadSafety(); |
michael@0 | 2552 | |
michael@0 | 2553 | if (mFreeingSnowWhite) { |
michael@0 | 2554 | return false; |
michael@0 | 2555 | } |
michael@0 | 2556 | |
michael@0 | 2557 | AutoRestore<bool> ar(mFreeingSnowWhite); |
michael@0 | 2558 | mFreeingSnowWhite = true; |
michael@0 | 2559 | |
michael@0 | 2560 | bool hadSnowWhiteObjects = false; |
michael@0 | 2561 | do { |
michael@0 | 2562 | SnowWhiteKiller visitor(this, mPurpleBuf.Count()); |
michael@0 | 2563 | mPurpleBuf.VisitEntries(visitor); |
michael@0 | 2564 | hadSnowWhiteObjects = hadSnowWhiteObjects || |
michael@0 | 2565 | visitor.HasSnowWhiteObjects(); |
michael@0 | 2566 | if (!visitor.HasSnowWhiteObjects()) { |
michael@0 | 2567 | break; |
michael@0 | 2568 | } |
michael@0 | 2569 | } while (aUntilNoSWInPurpleBuffer); |
michael@0 | 2570 | return hadSnowWhiteObjects; |
michael@0 | 2571 | } |
michael@0 | 2572 | |
michael@0 | 2573 | void |
michael@0 | 2574 | nsCycleCollector::ForgetSkippable(bool aRemoveChildlessNodes, |
michael@0 | 2575 | bool aAsyncSnowWhiteFreeing) |
michael@0 | 2576 | { |
michael@0 | 2577 | CheckThreadSafety(); |
michael@0 | 2578 | |
michael@0 | 2579 | // If we remove things from the purple buffer during graph building, we may |
michael@0 | 2580 | // lose track of an object that was mutated during graph building. |
michael@0 | 2581 | MOZ_ASSERT(mIncrementalPhase == IdlePhase); |
michael@0 | 2582 | |
michael@0 | 2583 | if (mJSRuntime) { |
michael@0 | 2584 | mJSRuntime->PrepareForForgetSkippable(); |
michael@0 | 2585 | } |
michael@0 | 2586 | MOZ_ASSERT(!mScanInProgress, "Don't forget skippable or free snow-white while scan is in progress."); |
michael@0 | 2587 | mPurpleBuf.RemoveSkippable(this, aRemoveChildlessNodes, |
michael@0 | 2588 | aAsyncSnowWhiteFreeing, mForgetSkippableCB); |
michael@0 | 2589 | } |
michael@0 | 2590 | |
michael@0 | 2591 | MOZ_NEVER_INLINE void |
michael@0 | 2592 | nsCycleCollector::MarkRoots(SliceBudget &aBudget) |
michael@0 | 2593 | { |
michael@0 | 2594 | const intptr_t kNumNodesBetweenTimeChecks = 1000; |
michael@0 | 2595 | const intptr_t kStep = SliceBudget::CounterReset / kNumNodesBetweenTimeChecks; |
michael@0 | 2596 | |
michael@0 | 2597 | TimeLog timeLog; |
michael@0 | 2598 | AutoRestore<bool> ar(mScanInProgress); |
michael@0 | 2599 | MOZ_ASSERT(!mScanInProgress); |
michael@0 | 2600 | mScanInProgress = true; |
michael@0 | 2601 | MOZ_ASSERT(mIncrementalPhase == GraphBuildingPhase); |
michael@0 | 2602 | MOZ_ASSERT(mCurrNode); |
michael@0 | 2603 | |
michael@0 | 2604 | while (!aBudget.isOverBudget() && !mCurrNode->IsDone()) { |
michael@0 | 2605 | PtrInfo *pi = mCurrNode->GetNext(); |
michael@0 | 2606 | if (!pi) { |
michael@0 | 2607 | MOZ_CRASH(); |
michael@0 | 2608 | } |
michael@0 | 2609 | |
michael@0 | 2610 | // We need to call the builder's Traverse() method on deleted nodes, to |
michael@0 | 2611 | // set their firstChild() that may be read by a prior non-deleted |
michael@0 | 2612 | // neighbor. |
michael@0 | 2613 | mBuilder->Traverse(pi); |
michael@0 | 2614 | if (mCurrNode->AtBlockEnd()) { |
michael@0 | 2615 | mBuilder->SetLastChild(); |
michael@0 | 2616 | } |
michael@0 | 2617 | aBudget.step(kStep); |
michael@0 | 2618 | } |
michael@0 | 2619 | |
michael@0 | 2620 | if (!mCurrNode->IsDone()) { |
michael@0 | 2621 | timeLog.Checkpoint("MarkRoots()"); |
michael@0 | 2622 | return; |
michael@0 | 2623 | } |
michael@0 | 2624 | |
michael@0 | 2625 | if (mGraph.mRootCount > 0) { |
michael@0 | 2626 | mBuilder->SetLastChild(); |
michael@0 | 2627 | } |
michael@0 | 2628 | |
michael@0 | 2629 | if (mBuilder->RanOutOfMemory()) { |
michael@0 | 2630 | MOZ_ASSERT(false, "Ran out of memory while building cycle collector graph"); |
michael@0 | 2631 | CC_TELEMETRY(_OOM, true); |
michael@0 | 2632 | } |
michael@0 | 2633 | |
michael@0 | 2634 | mBuilder = nullptr; |
michael@0 | 2635 | mCurrNode = nullptr; |
michael@0 | 2636 | mIncrementalPhase = ScanAndCollectWhitePhase; |
michael@0 | 2637 | timeLog.Checkpoint("MarkRoots()"); |
michael@0 | 2638 | } |
michael@0 | 2639 | |
michael@0 | 2640 | |
michael@0 | 2641 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 2642 | // Bacon & Rajan's |ScanRoots| routine. |
michael@0 | 2643 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 2644 | |
michael@0 | 2645 | |
michael@0 | 2646 | struct ScanBlackVisitor |
michael@0 | 2647 | { |
michael@0 | 2648 | ScanBlackVisitor(uint32_t &aWhiteNodeCount, bool &aFailed) |
michael@0 | 2649 | : mWhiteNodeCount(aWhiteNodeCount), mFailed(aFailed) |
michael@0 | 2650 | { |
michael@0 | 2651 | } |
michael@0 | 2652 | |
michael@0 | 2653 | bool ShouldVisitNode(PtrInfo const *pi) |
michael@0 | 2654 | { |
michael@0 | 2655 | return pi->mColor != black; |
michael@0 | 2656 | } |
michael@0 | 2657 | |
michael@0 | 2658 | MOZ_NEVER_INLINE void VisitNode(PtrInfo *pi) |
michael@0 | 2659 | { |
michael@0 | 2660 | if (pi->mColor == white) |
michael@0 | 2661 | --mWhiteNodeCount; |
michael@0 | 2662 | pi->mColor = black; |
michael@0 | 2663 | } |
michael@0 | 2664 | |
michael@0 | 2665 | void Failed() |
michael@0 | 2666 | { |
michael@0 | 2667 | mFailed = true; |
michael@0 | 2668 | } |
michael@0 | 2669 | |
michael@0 | 2670 | private: |
michael@0 | 2671 | uint32_t &mWhiteNodeCount; |
michael@0 | 2672 | bool &mFailed; |
michael@0 | 2673 | }; |
michael@0 | 2674 | |
michael@0 | 2675 | |
michael@0 | 2676 | struct scanVisitor |
michael@0 | 2677 | { |
michael@0 | 2678 | scanVisitor(uint32_t &aWhiteNodeCount, bool &aFailed, bool aWasIncremental) |
michael@0 | 2679 | : mWhiteNodeCount(aWhiteNodeCount), mFailed(aFailed), |
michael@0 | 2680 | mWasIncremental(aWasIncremental) |
michael@0 | 2681 | { |
michael@0 | 2682 | } |
michael@0 | 2683 | |
michael@0 | 2684 | bool ShouldVisitNode(PtrInfo const *pi) |
michael@0 | 2685 | { |
michael@0 | 2686 | return pi->mColor == grey; |
michael@0 | 2687 | } |
michael@0 | 2688 | |
michael@0 | 2689 | MOZ_NEVER_INLINE void VisitNode(PtrInfo *pi) |
michael@0 | 2690 | { |
michael@0 | 2691 | if (pi->mInternalRefs > pi->mRefCount && pi->mRefCount > 0) { |
michael@0 | 2692 | // If we found more references to an object than its ref count, then |
michael@0 | 2693 | // the object should have already been marked as an incremental |
michael@0 | 2694 | // root. Note that this is imprecise, because pi could have been |
michael@0 | 2695 | // marked black for other reasons. Always fault if we weren't |
michael@0 | 2696 | // incremental, as there were no incremental roots in that case. |
michael@0 | 2697 | if (!mWasIncremental || pi->mColor != black) { |
michael@0 | 2698 | Fault("traversed refs exceed refcount", pi); |
michael@0 | 2699 | } |
michael@0 | 2700 | } |
michael@0 | 2701 | |
michael@0 | 2702 | if (pi->mInternalRefs == pi->mRefCount || pi->mRefCount == 0) { |
michael@0 | 2703 | pi->mColor = white; |
michael@0 | 2704 | ++mWhiteNodeCount; |
michael@0 | 2705 | } else { |
michael@0 | 2706 | GraphWalker<ScanBlackVisitor>(ScanBlackVisitor(mWhiteNodeCount, mFailed)).Walk(pi); |
michael@0 | 2707 | MOZ_ASSERT(pi->mColor == black, |
michael@0 | 2708 | "Why didn't ScanBlackVisitor make pi black?"); |
michael@0 | 2709 | } |
michael@0 | 2710 | } |
michael@0 | 2711 | |
michael@0 | 2712 | void Failed() { |
michael@0 | 2713 | mFailed = true; |
michael@0 | 2714 | } |
michael@0 | 2715 | |
michael@0 | 2716 | private: |
michael@0 | 2717 | uint32_t &mWhiteNodeCount; |
michael@0 | 2718 | bool &mFailed; |
michael@0 | 2719 | bool mWasIncremental; |
michael@0 | 2720 | }; |
michael@0 | 2721 | |
michael@0 | 2722 | // Iterate over the WeakMaps. If we mark anything while iterating |
michael@0 | 2723 | // over the WeakMaps, we must iterate over all of the WeakMaps again. |
michael@0 | 2724 | void |
michael@0 | 2725 | nsCycleCollector::ScanWeakMaps() |
michael@0 | 2726 | { |
michael@0 | 2727 | bool anyChanged; |
michael@0 | 2728 | bool failed = false; |
michael@0 | 2729 | do { |
michael@0 | 2730 | anyChanged = false; |
michael@0 | 2731 | for (uint32_t i = 0; i < mGraph.mWeakMaps.Length(); i++) { |
michael@0 | 2732 | WeakMapping *wm = &mGraph.mWeakMaps[i]; |
michael@0 | 2733 | |
michael@0 | 2734 | // If any of these are null, the original object was marked black. |
michael@0 | 2735 | uint32_t mColor = wm->mMap ? wm->mMap->mColor : black; |
michael@0 | 2736 | uint32_t kColor = wm->mKey ? wm->mKey->mColor : black; |
michael@0 | 2737 | uint32_t kdColor = wm->mKeyDelegate ? wm->mKeyDelegate->mColor : black; |
michael@0 | 2738 | uint32_t vColor = wm->mVal ? wm->mVal->mColor : black; |
michael@0 | 2739 | |
michael@0 | 2740 | // All non-null weak mapping maps, keys and values are |
michael@0 | 2741 | // roots (in the sense of WalkFromRoots) in the cycle |
michael@0 | 2742 | // collector graph, and thus should have been colored |
michael@0 | 2743 | // either black or white in ScanRoots(). |
michael@0 | 2744 | MOZ_ASSERT(mColor != grey, "Uncolored weak map"); |
michael@0 | 2745 | MOZ_ASSERT(kColor != grey, "Uncolored weak map key"); |
michael@0 | 2746 | MOZ_ASSERT(kdColor != grey, "Uncolored weak map key delegate"); |
michael@0 | 2747 | MOZ_ASSERT(vColor != grey, "Uncolored weak map value"); |
michael@0 | 2748 | |
michael@0 | 2749 | if (mColor == black && kColor != black && kdColor == black) { |
michael@0 | 2750 | GraphWalker<ScanBlackVisitor>(ScanBlackVisitor(mWhiteNodeCount, failed)).Walk(wm->mKey); |
michael@0 | 2751 | anyChanged = true; |
michael@0 | 2752 | } |
michael@0 | 2753 | |
michael@0 | 2754 | if (mColor == black && kColor == black && vColor != black) { |
michael@0 | 2755 | GraphWalker<ScanBlackVisitor>(ScanBlackVisitor(mWhiteNodeCount, failed)).Walk(wm->mVal); |
michael@0 | 2756 | anyChanged = true; |
michael@0 | 2757 | } |
michael@0 | 2758 | } |
michael@0 | 2759 | } while (anyChanged); |
michael@0 | 2760 | |
michael@0 | 2761 | if (failed) { |
michael@0 | 2762 | MOZ_ASSERT(false, "Ran out of memory in ScanWeakMaps"); |
michael@0 | 2763 | CC_TELEMETRY(_OOM, true); |
michael@0 | 2764 | } |
michael@0 | 2765 | } |
michael@0 | 2766 | |
michael@0 | 2767 | // Flood black from any objects in the purple buffer that are in the CC graph. |
michael@0 | 2768 | class PurpleScanBlackVisitor |
michael@0 | 2769 | { |
michael@0 | 2770 | public: |
michael@0 | 2771 | PurpleScanBlackVisitor(GCGraph &aGraph, nsICycleCollectorListener *aListener, |
michael@0 | 2772 | uint32_t &aCount, bool &aFailed) |
michael@0 | 2773 | : mGraph(aGraph), mListener(aListener), mCount(aCount), mFailed(aFailed) |
michael@0 | 2774 | { |
michael@0 | 2775 | } |
michael@0 | 2776 | |
michael@0 | 2777 | void |
michael@0 | 2778 | Visit(nsPurpleBuffer &aBuffer, nsPurpleBufferEntry *aEntry) |
michael@0 | 2779 | { |
michael@0 | 2780 | MOZ_ASSERT(aEntry->mObject, "Entries with null mObject shouldn't be in the purple buffer."); |
michael@0 | 2781 | MOZ_ASSERT(aEntry->mRefCnt->get() != 0, "Snow-white objects shouldn't be in the purple buffer."); |
michael@0 | 2782 | |
michael@0 | 2783 | void *obj = aEntry->mObject; |
michael@0 | 2784 | if (!aEntry->mParticipant) { |
michael@0 | 2785 | obj = CanonicalizeXPCOMParticipant(static_cast<nsISupports*>(obj)); |
michael@0 | 2786 | MOZ_ASSERT(obj, "Don't add objects that don't participate in collection!"); |
michael@0 | 2787 | } |
michael@0 | 2788 | |
michael@0 | 2789 | PtrInfo *pi = mGraph.FindNode(obj); |
michael@0 | 2790 | if (!pi) { |
michael@0 | 2791 | return; |
michael@0 | 2792 | } |
michael@0 | 2793 | MOZ_ASSERT(pi->mParticipant, "No dead objects should be in the purple buffer."); |
michael@0 | 2794 | if (MOZ_UNLIKELY(mListener)) { |
michael@0 | 2795 | mListener->NoteIncrementalRoot((uint64_t)pi->mPointer); |
michael@0 | 2796 | } |
michael@0 | 2797 | if (pi->mColor == black) { |
michael@0 | 2798 | return; |
michael@0 | 2799 | } |
michael@0 | 2800 | GraphWalker<ScanBlackVisitor>(ScanBlackVisitor(mCount, mFailed)).Walk(pi); |
michael@0 | 2801 | } |
michael@0 | 2802 | |
michael@0 | 2803 | private: |
michael@0 | 2804 | GCGraph &mGraph; |
michael@0 | 2805 | nsICycleCollectorListener *mListener; |
michael@0 | 2806 | uint32_t &mCount; |
michael@0 | 2807 | bool &mFailed; |
michael@0 | 2808 | }; |
michael@0 | 2809 | |
michael@0 | 2810 | // Objects that have been stored somewhere since the start of incremental graph building must |
michael@0 | 2811 | // be treated as live for this cycle collection, because we may not have accurate information |
michael@0 | 2812 | // about who holds references to them. |
michael@0 | 2813 | void |
michael@0 | 2814 | nsCycleCollector::ScanIncrementalRoots() |
michael@0 | 2815 | { |
michael@0 | 2816 | TimeLog timeLog; |
michael@0 | 2817 | |
michael@0 | 2818 | // Reference counted objects: |
michael@0 | 2819 | // We cleared the purple buffer at the start of the current ICC, so if a |
michael@0 | 2820 | // refcounted object is purple, it may have been AddRef'd during the current |
michael@0 | 2821 | // ICC. (It may also have only been released.) If that is the case, we cannot |
michael@0 | 2822 | // be sure that the set of things pointing to the object in the CC graph |
michael@0 | 2823 | // is accurate. Therefore, for safety, we treat any purple objects as being |
michael@0 | 2824 | // live during the current CC. We don't remove anything from the purple |
michael@0 | 2825 | // buffer here, so these objects will be suspected and freed in the next CC |
michael@0 | 2826 | // if they are garbage. |
michael@0 | 2827 | bool failed = false; |
michael@0 | 2828 | PurpleScanBlackVisitor purpleScanBlackVisitor(mGraph, mListener, mWhiteNodeCount, failed); |
michael@0 | 2829 | mPurpleBuf.VisitEntries(purpleScanBlackVisitor); |
michael@0 | 2830 | timeLog.Checkpoint("ScanIncrementalRoots::fix purple"); |
michael@0 | 2831 | |
michael@0 | 2832 | // Garbage collected objects: |
michael@0 | 2833 | // If a GCed object was added to the graph with a refcount of zero, and is |
michael@0 | 2834 | // now marked black by the GC, it was probably gray before and was exposed |
michael@0 | 2835 | // to active JS, so it may have been stored somewhere, so it needs to be |
michael@0 | 2836 | // treated as live. |
michael@0 | 2837 | if (mJSRuntime) { |
michael@0 | 2838 | nsCycleCollectionParticipant *jsParticipant = mJSRuntime->GCThingParticipant(); |
michael@0 | 2839 | nsCycleCollectionParticipant *zoneParticipant = mJSRuntime->ZoneParticipant(); |
michael@0 | 2840 | NodePool::Enumerator etor(mGraph.mNodes); |
michael@0 | 2841 | |
michael@0 | 2842 | while (!etor.IsDone()) { |
michael@0 | 2843 | PtrInfo *pi = etor.GetNext(); |
michael@0 | 2844 | |
michael@0 | 2845 | // If the refcount is non-zero, pi can't have been a gray JS object. |
michael@0 | 2846 | if (pi->mRefCount != 0) { |
michael@0 | 2847 | continue; |
michael@0 | 2848 | } |
michael@0 | 2849 | |
michael@0 | 2850 | // As an optimization, if an object has already been determined to be live, |
michael@0 | 2851 | // don't consider it further. We can't do this if there is a listener, |
michael@0 | 2852 | // because the listener wants to know the complete set of incremental roots. |
michael@0 | 2853 | if (pi->mColor == black && MOZ_LIKELY(!mListener)) { |
michael@0 | 2854 | continue; |
michael@0 | 2855 | } |
michael@0 | 2856 | |
michael@0 | 2857 | // If the object is still marked gray by the GC, nothing could have gotten |
michael@0 | 2858 | // hold of it, so it isn't an incremental root. |
michael@0 | 2859 | if (pi->mParticipant == jsParticipant) { |
michael@0 | 2860 | if (xpc_GCThingIsGrayCCThing(pi->mPointer)) { |
michael@0 | 2861 | continue; |
michael@0 | 2862 | } |
michael@0 | 2863 | } else if (pi->mParticipant == zoneParticipant) { |
michael@0 | 2864 | JS::Zone *zone = static_cast<JS::Zone*>(pi->mPointer); |
michael@0 | 2865 | if (js::ZoneGlobalsAreAllGray(zone)) { |
michael@0 | 2866 | continue; |
michael@0 | 2867 | } |
michael@0 | 2868 | } else { |
michael@0 | 2869 | MOZ_ASSERT(false, "Non-JS thing with 0 refcount? Treating as live."); |
michael@0 | 2870 | } |
michael@0 | 2871 | |
michael@0 | 2872 | // At this point, pi must be an incremental root. |
michael@0 | 2873 | |
michael@0 | 2874 | // If there's a listener, tell it about this root. We don't bother with the |
michael@0 | 2875 | // optimization of skipping the Walk() if pi is black: it will just return |
michael@0 | 2876 | // without doing anything and there's no need to make this case faster. |
michael@0 | 2877 | if (MOZ_UNLIKELY(mListener)) { |
michael@0 | 2878 | mListener->NoteIncrementalRoot((uint64_t)pi->mPointer); |
michael@0 | 2879 | } |
michael@0 | 2880 | |
michael@0 | 2881 | GraphWalker<ScanBlackVisitor>(ScanBlackVisitor(mWhiteNodeCount, failed)).Walk(pi); |
michael@0 | 2882 | } |
michael@0 | 2883 | |
michael@0 | 2884 | timeLog.Checkpoint("ScanIncrementalRoots::fix JS"); |
michael@0 | 2885 | } |
michael@0 | 2886 | |
michael@0 | 2887 | if (failed) { |
michael@0 | 2888 | NS_ASSERTION(false, "Ran out of memory in ScanIncrementalRoots"); |
michael@0 | 2889 | CC_TELEMETRY(_OOM, true); |
michael@0 | 2890 | } |
michael@0 | 2891 | } |
michael@0 | 2892 | |
michael@0 | 2893 | void |
michael@0 | 2894 | nsCycleCollector::ScanRoots(bool aFullySynchGraphBuild) |
michael@0 | 2895 | { |
michael@0 | 2896 | AutoRestore<bool> ar(mScanInProgress); |
michael@0 | 2897 | MOZ_ASSERT(!mScanInProgress); |
michael@0 | 2898 | mScanInProgress = true; |
michael@0 | 2899 | mWhiteNodeCount = 0; |
michael@0 | 2900 | MOZ_ASSERT(mIncrementalPhase == ScanAndCollectWhitePhase); |
michael@0 | 2901 | |
michael@0 | 2902 | if (!aFullySynchGraphBuild) { |
michael@0 | 2903 | ScanIncrementalRoots(); |
michael@0 | 2904 | } |
michael@0 | 2905 | |
michael@0 | 2906 | TimeLog timeLog; |
michael@0 | 2907 | |
michael@0 | 2908 | // On the assumption that most nodes will be black, it's |
michael@0 | 2909 | // probably faster to use a GraphWalker than a |
michael@0 | 2910 | // NodePool::Enumerator. |
michael@0 | 2911 | bool failed = false; |
michael@0 | 2912 | scanVisitor sv(mWhiteNodeCount, failed, !aFullySynchGraphBuild); |
michael@0 | 2913 | GraphWalker<scanVisitor>(sv).WalkFromRoots(mGraph); |
michael@0 | 2914 | timeLog.Checkpoint("ScanRoots::WalkFromRoots"); |
michael@0 | 2915 | |
michael@0 | 2916 | if (failed) { |
michael@0 | 2917 | NS_ASSERTION(false, "Ran out of memory in ScanRoots"); |
michael@0 | 2918 | CC_TELEMETRY(_OOM, true); |
michael@0 | 2919 | } |
michael@0 | 2920 | |
michael@0 | 2921 | // Scanning weak maps must be done last. |
michael@0 | 2922 | ScanWeakMaps(); |
michael@0 | 2923 | timeLog.Checkpoint("ScanRoots::ScanWeakMaps"); |
michael@0 | 2924 | |
michael@0 | 2925 | if (mListener) { |
michael@0 | 2926 | mListener->BeginResults(); |
michael@0 | 2927 | |
michael@0 | 2928 | NodePool::Enumerator etor(mGraph.mNodes); |
michael@0 | 2929 | while (!etor.IsDone()) { |
michael@0 | 2930 | PtrInfo *pi = etor.GetNext(); |
michael@0 | 2931 | if (!pi->mParticipant) { |
michael@0 | 2932 | continue; |
michael@0 | 2933 | } |
michael@0 | 2934 | switch (pi->mColor) { |
michael@0 | 2935 | case black: |
michael@0 | 2936 | if (pi->mRefCount > 0 && pi->mRefCount < UINT32_MAX && |
michael@0 | 2937 | pi->mInternalRefs != pi->mRefCount) { |
michael@0 | 2938 | mListener->DescribeRoot((uint64_t)pi->mPointer, |
michael@0 | 2939 | pi->mInternalRefs); |
michael@0 | 2940 | } |
michael@0 | 2941 | break; |
michael@0 | 2942 | case white: |
michael@0 | 2943 | mListener->DescribeGarbage((uint64_t)pi->mPointer); |
michael@0 | 2944 | break; |
michael@0 | 2945 | case grey: |
michael@0 | 2946 | // With incremental CC, we can end up with a grey object after |
michael@0 | 2947 | // scanning if it is only reachable from an object that gets freed. |
michael@0 | 2948 | break; |
michael@0 | 2949 | } |
michael@0 | 2950 | } |
michael@0 | 2951 | |
michael@0 | 2952 | mListener->End(); |
michael@0 | 2953 | mListener = nullptr; |
michael@0 | 2954 | timeLog.Checkpoint("ScanRoots::listener"); |
michael@0 | 2955 | } |
michael@0 | 2956 | } |
michael@0 | 2957 | |
michael@0 | 2958 | |
michael@0 | 2959 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 2960 | // Bacon & Rajan's |CollectWhite| routine, somewhat modified. |
michael@0 | 2961 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 2962 | |
michael@0 | 2963 | bool |
michael@0 | 2964 | nsCycleCollector::CollectWhite() |
michael@0 | 2965 | { |
michael@0 | 2966 | // Explanation of "somewhat modified": we have no way to collect the |
michael@0 | 2967 | // set of whites "all at once", we have to ask each of them to drop |
michael@0 | 2968 | // their outgoing links and assume this will cause the garbage cycle |
michael@0 | 2969 | // to *mostly* self-destruct (except for the reference we continue |
michael@0 | 2970 | // to hold). |
michael@0 | 2971 | // |
michael@0 | 2972 | // To do this "safely" we must make sure that the white nodes we're |
michael@0 | 2973 | // operating on are stable for the duration of our operation. So we |
michael@0 | 2974 | // make 3 sets of calls to language runtimes: |
michael@0 | 2975 | // |
michael@0 | 2976 | // - Root(whites), which should pin the whites in memory. |
michael@0 | 2977 | // - Unlink(whites), which drops outgoing links on each white. |
michael@0 | 2978 | // - Unroot(whites), which returns the whites to normal GC. |
michael@0 | 2979 | |
michael@0 | 2980 | TimeLog timeLog; |
michael@0 | 2981 | nsAutoTArray<PtrInfo*, 4000> whiteNodes; |
michael@0 | 2982 | |
michael@0 | 2983 | MOZ_ASSERT(mIncrementalPhase == ScanAndCollectWhitePhase); |
michael@0 | 2984 | |
michael@0 | 2985 | whiteNodes.SetCapacity(mWhiteNodeCount); |
michael@0 | 2986 | uint32_t numWhiteGCed = 0; |
michael@0 | 2987 | |
michael@0 | 2988 | NodePool::Enumerator etor(mGraph.mNodes); |
michael@0 | 2989 | while (!etor.IsDone()) |
michael@0 | 2990 | { |
michael@0 | 2991 | PtrInfo *pinfo = etor.GetNext(); |
michael@0 | 2992 | if (pinfo->mColor == white && pinfo->mParticipant) { |
michael@0 | 2993 | whiteNodes.AppendElement(pinfo); |
michael@0 | 2994 | pinfo->mParticipant->Root(pinfo->mPointer); |
michael@0 | 2995 | if (pinfo->mRefCount == 0) { |
michael@0 | 2996 | // only JS objects have a refcount of 0 |
michael@0 | 2997 | ++numWhiteGCed; |
michael@0 | 2998 | } |
michael@0 | 2999 | } |
michael@0 | 3000 | } |
michael@0 | 3001 | |
michael@0 | 3002 | uint32_t count = whiteNodes.Length(); |
michael@0 | 3003 | MOZ_ASSERT(numWhiteGCed <= count, |
michael@0 | 3004 | "More freed GCed nodes than total freed nodes."); |
michael@0 | 3005 | mResults.mFreedRefCounted += count - numWhiteGCed; |
michael@0 | 3006 | mResults.mFreedGCed += numWhiteGCed; |
michael@0 | 3007 | |
michael@0 | 3008 | timeLog.Checkpoint("CollectWhite::Root"); |
michael@0 | 3009 | |
michael@0 | 3010 | if (mBeforeUnlinkCB) { |
michael@0 | 3011 | mBeforeUnlinkCB(); |
michael@0 | 3012 | timeLog.Checkpoint("CollectWhite::BeforeUnlinkCB"); |
michael@0 | 3013 | } |
michael@0 | 3014 | |
michael@0 | 3015 | for (uint32_t i = 0; i < count; ++i) { |
michael@0 | 3016 | PtrInfo *pinfo = whiteNodes.ElementAt(i); |
michael@0 | 3017 | MOZ_ASSERT(pinfo->mParticipant, "Unlink shouldn't see objects removed from graph."); |
michael@0 | 3018 | pinfo->mParticipant->Unlink(pinfo->mPointer); |
michael@0 | 3019 | #ifdef DEBUG |
michael@0 | 3020 | if (mJSRuntime) { |
michael@0 | 3021 | mJSRuntime->AssertNoObjectsToTrace(pinfo->mPointer); |
michael@0 | 3022 | } |
michael@0 | 3023 | #endif |
michael@0 | 3024 | } |
michael@0 | 3025 | timeLog.Checkpoint("CollectWhite::Unlink"); |
michael@0 | 3026 | |
michael@0 | 3027 | for (uint32_t i = 0; i < count; ++i) { |
michael@0 | 3028 | PtrInfo *pinfo = whiteNodes.ElementAt(i); |
michael@0 | 3029 | MOZ_ASSERT(pinfo->mParticipant, "Unroot shouldn't see objects removed from graph."); |
michael@0 | 3030 | pinfo->mParticipant->Unroot(pinfo->mPointer); |
michael@0 | 3031 | } |
michael@0 | 3032 | timeLog.Checkpoint("CollectWhite::Unroot"); |
michael@0 | 3033 | |
michael@0 | 3034 | nsCycleCollector_dispatchDeferredDeletion(false); |
michael@0 | 3035 | mIncrementalPhase = CleanupPhase; |
michael@0 | 3036 | |
michael@0 | 3037 | return count > 0; |
michael@0 | 3038 | } |
michael@0 | 3039 | |
michael@0 | 3040 | |
michael@0 | 3041 | //////////////////////// |
michael@0 | 3042 | // Memory reporting |
michael@0 | 3043 | //////////////////////// |
michael@0 | 3044 | |
michael@0 | 3045 | MOZ_DEFINE_MALLOC_SIZE_OF(CycleCollectorMallocSizeOf) |
michael@0 | 3046 | |
michael@0 | 3047 | NS_IMETHODIMP |
michael@0 | 3048 | nsCycleCollector::CollectReports(nsIHandleReportCallback* aHandleReport, |
michael@0 | 3049 | nsISupports* aData) |
michael@0 | 3050 | { |
michael@0 | 3051 | size_t objectSize, graphNodesSize, graphEdgesSize, weakMapsSize, |
michael@0 | 3052 | purpleBufferSize; |
michael@0 | 3053 | SizeOfIncludingThis(CycleCollectorMallocSizeOf, |
michael@0 | 3054 | &objectSize, |
michael@0 | 3055 | &graphNodesSize, &graphEdgesSize, |
michael@0 | 3056 | &weakMapsSize, |
michael@0 | 3057 | &purpleBufferSize); |
michael@0 | 3058 | |
michael@0 | 3059 | #define REPORT(_path, _amount, _desc) \ |
michael@0 | 3060 | do { \ |
michael@0 | 3061 | size_t amount = _amount; /* evaluate |_amount| only once */ \ |
michael@0 | 3062 | if (amount > 0) { \ |
michael@0 | 3063 | nsresult rv; \ |
michael@0 | 3064 | rv = aHandleReport->Callback(EmptyCString(), \ |
michael@0 | 3065 | NS_LITERAL_CSTRING(_path), \ |
michael@0 | 3066 | KIND_HEAP, UNITS_BYTES, _amount, \ |
michael@0 | 3067 | NS_LITERAL_CSTRING(_desc), \ |
michael@0 | 3068 | aData); \ |
michael@0 | 3069 | if (NS_WARN_IF(NS_FAILED(rv))) \ |
michael@0 | 3070 | return rv; \ |
michael@0 | 3071 | } \ |
michael@0 | 3072 | } while (0) |
michael@0 | 3073 | |
michael@0 | 3074 | REPORT("explicit/cycle-collector/collector-object", objectSize, |
michael@0 | 3075 | "Memory used for the cycle collector object itself."); |
michael@0 | 3076 | |
michael@0 | 3077 | REPORT("explicit/cycle-collector/graph-nodes", graphNodesSize, |
michael@0 | 3078 | "Memory used for the nodes of the cycle collector's graph. " |
michael@0 | 3079 | "This should be zero when the collector is idle."); |
michael@0 | 3080 | |
michael@0 | 3081 | REPORT("explicit/cycle-collector/graph-edges", graphEdgesSize, |
michael@0 | 3082 | "Memory used for the edges of the cycle collector's graph. " |
michael@0 | 3083 | "This should be zero when the collector is idle."); |
michael@0 | 3084 | |
michael@0 | 3085 | REPORT("explicit/cycle-collector/weak-maps", weakMapsSize, |
michael@0 | 3086 | "Memory used for the representation of weak maps in the " |
michael@0 | 3087 | "cycle collector's graph. " |
michael@0 | 3088 | "This should be zero when the collector is idle."); |
michael@0 | 3089 | |
michael@0 | 3090 | REPORT("explicit/cycle-collector/purple-buffer", purpleBufferSize, |
michael@0 | 3091 | "Memory used for the cycle collector's purple buffer."); |
michael@0 | 3092 | |
michael@0 | 3093 | #undef REPORT |
michael@0 | 3094 | |
michael@0 | 3095 | return NS_OK; |
michael@0 | 3096 | }; |
michael@0 | 3097 | |
michael@0 | 3098 | |
michael@0 | 3099 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 3100 | // Collector implementation |
michael@0 | 3101 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 3102 | |
michael@0 | 3103 | nsCycleCollector::nsCycleCollector() : |
michael@0 | 3104 | mActivelyCollecting(false), |
michael@0 | 3105 | mFreeingSnowWhite(false), |
michael@0 | 3106 | mScanInProgress(false), |
michael@0 | 3107 | mJSRuntime(nullptr), |
michael@0 | 3108 | mIncrementalPhase(IdlePhase), |
michael@0 | 3109 | mThread(NS_GetCurrentThread()), |
michael@0 | 3110 | mWhiteNodeCount(0), |
michael@0 | 3111 | mBeforeUnlinkCB(nullptr), |
michael@0 | 3112 | mForgetSkippableCB(nullptr), |
michael@0 | 3113 | mUnmergedNeeded(0), |
michael@0 | 3114 | mMergedInARow(0), |
michael@0 | 3115 | mJSPurpleBuffer(nullptr) |
michael@0 | 3116 | { |
michael@0 | 3117 | } |
michael@0 | 3118 | |
michael@0 | 3119 | nsCycleCollector::~nsCycleCollector() |
michael@0 | 3120 | { |
michael@0 | 3121 | UnregisterWeakMemoryReporter(this); |
michael@0 | 3122 | } |
michael@0 | 3123 | |
michael@0 | 3124 | void |
michael@0 | 3125 | nsCycleCollector::RegisterJSRuntime(CycleCollectedJSRuntime *aJSRuntime) |
michael@0 | 3126 | { |
michael@0 | 3127 | if (mJSRuntime) |
michael@0 | 3128 | Fault("multiple registrations of cycle collector JS runtime", aJSRuntime); |
michael@0 | 3129 | |
michael@0 | 3130 | mJSRuntime = aJSRuntime; |
michael@0 | 3131 | |
michael@0 | 3132 | // We can't register as a reporter in nsCycleCollector() because that runs |
michael@0 | 3133 | // before the memory reporter manager is initialized. So we do it here |
michael@0 | 3134 | // instead. |
michael@0 | 3135 | static bool registered = false; |
michael@0 | 3136 | if (!registered) { |
michael@0 | 3137 | RegisterWeakMemoryReporter(this); |
michael@0 | 3138 | registered = true; |
michael@0 | 3139 | } |
michael@0 | 3140 | } |
michael@0 | 3141 | |
michael@0 | 3142 | void |
michael@0 | 3143 | nsCycleCollector::ForgetJSRuntime() |
michael@0 | 3144 | { |
michael@0 | 3145 | if (!mJSRuntime) |
michael@0 | 3146 | Fault("forgetting non-registered cycle collector JS runtime"); |
michael@0 | 3147 | |
michael@0 | 3148 | mJSRuntime = nullptr; |
michael@0 | 3149 | } |
michael@0 | 3150 | |
michael@0 | 3151 | #ifdef DEBUG |
michael@0 | 3152 | static bool |
michael@0 | 3153 | HasParticipant(void *aPtr, nsCycleCollectionParticipant *aParti) |
michael@0 | 3154 | { |
michael@0 | 3155 | if (aParti) { |
michael@0 | 3156 | return true; |
michael@0 | 3157 | } |
michael@0 | 3158 | |
michael@0 | 3159 | nsXPCOMCycleCollectionParticipant *xcp; |
michael@0 | 3160 | ToParticipant(static_cast<nsISupports*>(aPtr), &xcp); |
michael@0 | 3161 | return xcp != nullptr; |
michael@0 | 3162 | } |
michael@0 | 3163 | #endif |
michael@0 | 3164 | |
michael@0 | 3165 | MOZ_ALWAYS_INLINE void |
michael@0 | 3166 | nsCycleCollector::Suspect(void *aPtr, nsCycleCollectionParticipant *aParti, |
michael@0 | 3167 | nsCycleCollectingAutoRefCnt *aRefCnt) |
michael@0 | 3168 | { |
michael@0 | 3169 | CheckThreadSafety(); |
michael@0 | 3170 | |
michael@0 | 3171 | // Re-entering ::Suspect during collection used to be a fault, but |
michael@0 | 3172 | // we are canonicalizing nsISupports pointers using QI, so we will |
michael@0 | 3173 | // see some spurious refcount traffic here. |
michael@0 | 3174 | |
michael@0 | 3175 | if (MOZ_UNLIKELY(mScanInProgress)) { |
michael@0 | 3176 | return; |
michael@0 | 3177 | } |
michael@0 | 3178 | |
michael@0 | 3179 | MOZ_ASSERT(aPtr, "Don't suspect null pointers"); |
michael@0 | 3180 | |
michael@0 | 3181 | MOZ_ASSERT(HasParticipant(aPtr, aParti), |
michael@0 | 3182 | "Suspected nsISupports pointer must QI to nsXPCOMCycleCollectionParticipant"); |
michael@0 | 3183 | |
michael@0 | 3184 | mPurpleBuf.Put(aPtr, aParti, aRefCnt); |
michael@0 | 3185 | } |
michael@0 | 3186 | |
michael@0 | 3187 | void |
michael@0 | 3188 | nsCycleCollector::CheckThreadSafety() |
michael@0 | 3189 | { |
michael@0 | 3190 | #ifdef DEBUG |
michael@0 | 3191 | nsIThread* currentThread = NS_GetCurrentThread(); |
michael@0 | 3192 | // XXXkhuey we can be called so late in shutdown that NS_GetCurrentThread |
michael@0 | 3193 | // returns null (after the thread manager has shut down) |
michael@0 | 3194 | MOZ_ASSERT(mThread == currentThread || !currentThread); |
michael@0 | 3195 | #endif |
michael@0 | 3196 | } |
michael@0 | 3197 | |
michael@0 | 3198 | // The cycle collector uses the mark bitmap to discover what JS objects |
michael@0 | 3199 | // were reachable only from XPConnect roots that might participate in |
michael@0 | 3200 | // cycles. We ask the JS runtime whether we need to force a GC before |
michael@0 | 3201 | // this CC. It returns true on startup (before the mark bits have been set), |
michael@0 | 3202 | // and also when UnmarkGray has run out of stack. We also force GCs on shut |
michael@0 | 3203 | // down to collect cycles involving both DOM and JS. |
michael@0 | 3204 | void |
michael@0 | 3205 | nsCycleCollector::FixGrayBits(bool aForceGC) |
michael@0 | 3206 | { |
michael@0 | 3207 | CheckThreadSafety(); |
michael@0 | 3208 | |
michael@0 | 3209 | if (!mJSRuntime) |
michael@0 | 3210 | return; |
michael@0 | 3211 | |
michael@0 | 3212 | if (!aForceGC) { |
michael@0 | 3213 | mJSRuntime->FixWeakMappingGrayBits(); |
michael@0 | 3214 | |
michael@0 | 3215 | bool needGC = mJSRuntime->NeedCollect(); |
michael@0 | 3216 | // Only do a telemetry ping for non-shutdown CCs. |
michael@0 | 3217 | CC_TELEMETRY(_NEED_GC, needGC); |
michael@0 | 3218 | if (!needGC) |
michael@0 | 3219 | return; |
michael@0 | 3220 | mResults.mForcedGC = true; |
michael@0 | 3221 | } |
michael@0 | 3222 | |
michael@0 | 3223 | TimeLog timeLog; |
michael@0 | 3224 | mJSRuntime->Collect(aForceGC ? JS::gcreason::SHUTDOWN_CC : JS::gcreason::CC_FORCED); |
michael@0 | 3225 | timeLog.Checkpoint("GC()"); |
michael@0 | 3226 | } |
michael@0 | 3227 | |
michael@0 | 3228 | void |
michael@0 | 3229 | nsCycleCollector::CleanupAfterCollection() |
michael@0 | 3230 | { |
michael@0 | 3231 | MOZ_ASSERT(mIncrementalPhase == CleanupPhase); |
michael@0 | 3232 | mGraph.Clear(); |
michael@0 | 3233 | |
michael@0 | 3234 | uint32_t interval = (uint32_t) ((TimeStamp::Now() - mCollectionStart).ToMilliseconds()); |
michael@0 | 3235 | #ifdef COLLECT_TIME_DEBUG |
michael@0 | 3236 | printf("cc: total cycle collector time was %ums\n", interval); |
michael@0 | 3237 | printf("cc: visited %u ref counted and %u GCed objects, freed %d ref counted and %d GCed objects", |
michael@0 | 3238 | mResults.mVisitedRefCounted, mResults.mVisitedGCed, |
michael@0 | 3239 | mResults.mFreedRefCounted, mResults.mFreedGCed); |
michael@0 | 3240 | uint32_t numVisited = mResults.mVisitedRefCounted + mResults.mVisitedGCed; |
michael@0 | 3241 | if (numVisited > 1000) { |
michael@0 | 3242 | uint32_t numFreed = mResults.mFreedRefCounted + mResults.mFreedGCed; |
michael@0 | 3243 | printf(" (%d%%)", 100 * numFreed / numVisited); |
michael@0 | 3244 | } |
michael@0 | 3245 | printf(".\ncc: \n"); |
michael@0 | 3246 | #endif |
michael@0 | 3247 | CC_TELEMETRY( , interval); |
michael@0 | 3248 | CC_TELEMETRY(_VISITED_REF_COUNTED, mResults.mVisitedRefCounted); |
michael@0 | 3249 | CC_TELEMETRY(_VISITED_GCED, mResults.mVisitedGCed); |
michael@0 | 3250 | CC_TELEMETRY(_COLLECTED, mWhiteNodeCount); |
michael@0 | 3251 | |
michael@0 | 3252 | if (mJSRuntime) { |
michael@0 | 3253 | mJSRuntime->EndCycleCollectionCallback(mResults); |
michael@0 | 3254 | } |
michael@0 | 3255 | mIncrementalPhase = IdlePhase; |
michael@0 | 3256 | } |
michael@0 | 3257 | |
michael@0 | 3258 | void |
michael@0 | 3259 | nsCycleCollector::ShutdownCollect() |
michael@0 | 3260 | { |
michael@0 | 3261 | SliceBudget unlimitedBudget; |
michael@0 | 3262 | uint32_t i; |
michael@0 | 3263 | for (i = 0; i < DEFAULT_SHUTDOWN_COLLECTIONS; ++i) { |
michael@0 | 3264 | if (!Collect(ShutdownCC, unlimitedBudget, nullptr)) { |
michael@0 | 3265 | break; |
michael@0 | 3266 | } |
michael@0 | 3267 | } |
michael@0 | 3268 | NS_WARN_IF_FALSE(i < NORMAL_SHUTDOWN_COLLECTIONS, "Extra shutdown CC"); |
michael@0 | 3269 | } |
michael@0 | 3270 | |
michael@0 | 3271 | static void |
michael@0 | 3272 | PrintPhase(const char *aPhase) |
michael@0 | 3273 | { |
michael@0 | 3274 | #ifdef DEBUG_PHASES |
michael@0 | 3275 | printf("cc: begin %s on %s\n", aPhase, |
michael@0 | 3276 | NS_IsMainThread() ? "mainthread" : "worker"); |
michael@0 | 3277 | #endif |
michael@0 | 3278 | } |
michael@0 | 3279 | |
michael@0 | 3280 | bool |
michael@0 | 3281 | nsCycleCollector::Collect(ccType aCCType, |
michael@0 | 3282 | SliceBudget &aBudget, |
michael@0 | 3283 | nsICycleCollectorListener *aManualListener) |
michael@0 | 3284 | { |
michael@0 | 3285 | CheckThreadSafety(); |
michael@0 | 3286 | |
michael@0 | 3287 | // This can legitimately happen in a few cases. See bug 383651. |
michael@0 | 3288 | if (mActivelyCollecting || mFreeingSnowWhite) { |
michael@0 | 3289 | return false; |
michael@0 | 3290 | } |
michael@0 | 3291 | mActivelyCollecting = true; |
michael@0 | 3292 | |
michael@0 | 3293 | bool startedIdle = (mIncrementalPhase == IdlePhase); |
michael@0 | 3294 | bool collectedAny = false; |
michael@0 | 3295 | |
michael@0 | 3296 | // If the CC started idle, it will call BeginCollection, which |
michael@0 | 3297 | // will do FreeSnowWhite, so it doesn't need to be done here. |
michael@0 | 3298 | if (!startedIdle) { |
michael@0 | 3299 | FreeSnowWhite(true); |
michael@0 | 3300 | } |
michael@0 | 3301 | |
michael@0 | 3302 | bool finished = false; |
michael@0 | 3303 | do { |
michael@0 | 3304 | switch (mIncrementalPhase) { |
michael@0 | 3305 | case IdlePhase: |
michael@0 | 3306 | PrintPhase("BeginCollection"); |
michael@0 | 3307 | BeginCollection(aCCType, aManualListener); |
michael@0 | 3308 | break; |
michael@0 | 3309 | case GraphBuildingPhase: |
michael@0 | 3310 | PrintPhase("MarkRoots"); |
michael@0 | 3311 | MarkRoots(aBudget); |
michael@0 | 3312 | break; |
michael@0 | 3313 | case ScanAndCollectWhitePhase: |
michael@0 | 3314 | // We do ScanRoots and CollectWhite in a single slice to ensure |
michael@0 | 3315 | // that we won't unlink a live object if a weak reference is |
michael@0 | 3316 | // promoted to a strong reference after ScanRoots has finished. |
michael@0 | 3317 | // See bug 926533. |
michael@0 | 3318 | PrintPhase("ScanRoots"); |
michael@0 | 3319 | ScanRoots(startedIdle); |
michael@0 | 3320 | PrintPhase("CollectWhite"); |
michael@0 | 3321 | collectedAny = CollectWhite(); |
michael@0 | 3322 | break; |
michael@0 | 3323 | case CleanupPhase: |
michael@0 | 3324 | PrintPhase("CleanupAfterCollection"); |
michael@0 | 3325 | CleanupAfterCollection(); |
michael@0 | 3326 | finished = true; |
michael@0 | 3327 | break; |
michael@0 | 3328 | } |
michael@0 | 3329 | } while (!aBudget.checkOverBudget() && !finished); |
michael@0 | 3330 | |
michael@0 | 3331 | // Clear mActivelyCollecting here to ensure that a recursive call to |
michael@0 | 3332 | // Collect() does something. |
michael@0 | 3333 | mActivelyCollecting = false; |
michael@0 | 3334 | |
michael@0 | 3335 | if (aCCType != SliceCC && !startedIdle) { |
michael@0 | 3336 | // We were in the middle of an incremental CC (using its own listener). |
michael@0 | 3337 | // Somebody has forced a CC, so after having finished out the current CC, |
michael@0 | 3338 | // run the CC again using the new listener. |
michael@0 | 3339 | MOZ_ASSERT(mIncrementalPhase == IdlePhase); |
michael@0 | 3340 | if (Collect(aCCType, aBudget, aManualListener)) { |
michael@0 | 3341 | collectedAny = true; |
michael@0 | 3342 | } |
michael@0 | 3343 | } |
michael@0 | 3344 | |
michael@0 | 3345 | MOZ_ASSERT_IF(aCCType != SliceCC, mIncrementalPhase == IdlePhase); |
michael@0 | 3346 | |
michael@0 | 3347 | return collectedAny; |
michael@0 | 3348 | } |
michael@0 | 3349 | |
michael@0 | 3350 | // Any JS objects we have in the graph could die when we GC, but we |
michael@0 | 3351 | // don't want to abandon the current CC, because the graph contains |
michael@0 | 3352 | // information about purple roots. So we synchronously finish off |
michael@0 | 3353 | // the current CC. |
michael@0 | 3354 | void |
michael@0 | 3355 | nsCycleCollector::PrepareForGarbageCollection() |
michael@0 | 3356 | { |
michael@0 | 3357 | if (mIncrementalPhase == IdlePhase) { |
michael@0 | 3358 | MOZ_ASSERT(mGraph.IsEmpty(), "Non-empty graph when idle"); |
michael@0 | 3359 | MOZ_ASSERT(!mBuilder, "Non-null builder when idle"); |
michael@0 | 3360 | if (mJSPurpleBuffer) { |
michael@0 | 3361 | mJSPurpleBuffer->Destroy(); |
michael@0 | 3362 | } |
michael@0 | 3363 | return; |
michael@0 | 3364 | } |
michael@0 | 3365 | |
michael@0 | 3366 | SliceBudget unlimitedBudget; |
michael@0 | 3367 | PrintPhase("PrepareForGarbageCollection"); |
michael@0 | 3368 | // Use SliceCC because we only want to finish the CC in progress. |
michael@0 | 3369 | Collect(SliceCC, unlimitedBudget, nullptr); |
michael@0 | 3370 | MOZ_ASSERT(mIncrementalPhase == IdlePhase); |
michael@0 | 3371 | } |
michael@0 | 3372 | |
michael@0 | 3373 | // Don't merge too many times in a row, and do at least a minimum |
michael@0 | 3374 | // number of unmerged CCs in a row. |
michael@0 | 3375 | static const uint32_t kMinConsecutiveUnmerged = 3; |
michael@0 | 3376 | static const uint32_t kMaxConsecutiveMerged = 3; |
michael@0 | 3377 | |
michael@0 | 3378 | bool |
michael@0 | 3379 | nsCycleCollector::ShouldMergeZones(ccType aCCType) |
michael@0 | 3380 | { |
michael@0 | 3381 | if (!mJSRuntime) { |
michael@0 | 3382 | return false; |
michael@0 | 3383 | } |
michael@0 | 3384 | |
michael@0 | 3385 | MOZ_ASSERT(mUnmergedNeeded <= kMinConsecutiveUnmerged); |
michael@0 | 3386 | MOZ_ASSERT(mMergedInARow <= kMaxConsecutiveMerged); |
michael@0 | 3387 | |
michael@0 | 3388 | if (mMergedInARow == kMaxConsecutiveMerged) { |
michael@0 | 3389 | MOZ_ASSERT(mUnmergedNeeded == 0); |
michael@0 | 3390 | mUnmergedNeeded = kMinConsecutiveUnmerged; |
michael@0 | 3391 | } |
michael@0 | 3392 | |
michael@0 | 3393 | if (mUnmergedNeeded > 0) { |
michael@0 | 3394 | mUnmergedNeeded--; |
michael@0 | 3395 | mMergedInARow = 0; |
michael@0 | 3396 | return false; |
michael@0 | 3397 | } |
michael@0 | 3398 | |
michael@0 | 3399 | if (aCCType == SliceCC && mJSRuntime->UsefulToMergeZones()) { |
michael@0 | 3400 | mMergedInARow++; |
michael@0 | 3401 | return true; |
michael@0 | 3402 | } else { |
michael@0 | 3403 | mMergedInARow = 0; |
michael@0 | 3404 | return false; |
michael@0 | 3405 | } |
michael@0 | 3406 | } |
michael@0 | 3407 | |
michael@0 | 3408 | void |
michael@0 | 3409 | nsCycleCollector::BeginCollection(ccType aCCType, |
michael@0 | 3410 | nsICycleCollectorListener *aManualListener) |
michael@0 | 3411 | { |
michael@0 | 3412 | TimeLog timeLog; |
michael@0 | 3413 | MOZ_ASSERT(mIncrementalPhase == IdlePhase); |
michael@0 | 3414 | |
michael@0 | 3415 | mCollectionStart = TimeStamp::Now(); |
michael@0 | 3416 | |
michael@0 | 3417 | if (mJSRuntime) { |
michael@0 | 3418 | mJSRuntime->BeginCycleCollectionCallback(); |
michael@0 | 3419 | timeLog.Checkpoint("BeginCycleCollectionCallback()"); |
michael@0 | 3420 | } |
michael@0 | 3421 | |
michael@0 | 3422 | bool isShutdown = (aCCType == ShutdownCC); |
michael@0 | 3423 | |
michael@0 | 3424 | // Set up the listener for this CC. |
michael@0 | 3425 | MOZ_ASSERT_IF(isShutdown, !aManualListener); |
michael@0 | 3426 | MOZ_ASSERT(!mListener, "Forgot to clear a previous listener?"); |
michael@0 | 3427 | mListener = aManualListener; |
michael@0 | 3428 | aManualListener = nullptr; |
michael@0 | 3429 | if (!mListener && mParams.LogThisCC(isShutdown)) { |
michael@0 | 3430 | nsRefPtr<nsCycleCollectorLogger> logger = new nsCycleCollectorLogger(); |
michael@0 | 3431 | if (mParams.AllTracesThisCC(isShutdown)) { |
michael@0 | 3432 | logger->SetAllTraces(); |
michael@0 | 3433 | } |
michael@0 | 3434 | mListener = logger.forget(); |
michael@0 | 3435 | } |
michael@0 | 3436 | |
michael@0 | 3437 | bool forceGC = isShutdown; |
michael@0 | 3438 | if (!forceGC && mListener) { |
michael@0 | 3439 | // On a WantAllTraces CC, force a synchronous global GC to prevent |
michael@0 | 3440 | // hijinks from ForgetSkippable and compartmental GCs. |
michael@0 | 3441 | mListener->GetWantAllTraces(&forceGC); |
michael@0 | 3442 | } |
michael@0 | 3443 | FixGrayBits(forceGC); |
michael@0 | 3444 | |
michael@0 | 3445 | FreeSnowWhite(true); |
michael@0 | 3446 | |
michael@0 | 3447 | if (mListener && NS_FAILED(mListener->Begin())) { |
michael@0 | 3448 | mListener = nullptr; |
michael@0 | 3449 | } |
michael@0 | 3450 | |
michael@0 | 3451 | // Set up the data structures for building the graph. |
michael@0 | 3452 | mGraph.Init(); |
michael@0 | 3453 | mResults.Init(); |
michael@0 | 3454 | bool mergeZones = ShouldMergeZones(aCCType); |
michael@0 | 3455 | mResults.mMergedZones = mergeZones; |
michael@0 | 3456 | |
michael@0 | 3457 | MOZ_ASSERT(!mBuilder, "Forgot to clear mBuilder"); |
michael@0 | 3458 | mBuilder = new GCGraphBuilder(mGraph, mResults, mJSRuntime, mListener, mergeZones); |
michael@0 | 3459 | |
michael@0 | 3460 | if (mJSRuntime) { |
michael@0 | 3461 | mJSRuntime->TraverseRoots(*mBuilder); |
michael@0 | 3462 | timeLog.Checkpoint("mJSRuntime->TraverseRoots()"); |
michael@0 | 3463 | } |
michael@0 | 3464 | |
michael@0 | 3465 | AutoRestore<bool> ar(mScanInProgress); |
michael@0 | 3466 | MOZ_ASSERT(!mScanInProgress); |
michael@0 | 3467 | mScanInProgress = true; |
michael@0 | 3468 | mPurpleBuf.SelectPointers(*mBuilder); |
michael@0 | 3469 | timeLog.Checkpoint("SelectPointers()"); |
michael@0 | 3470 | |
michael@0 | 3471 | // We've finished adding roots, and everything in the graph is a root. |
michael@0 | 3472 | mGraph.mRootCount = mGraph.MapCount(); |
michael@0 | 3473 | |
michael@0 | 3474 | mCurrNode = new NodePool::Enumerator(mGraph.mNodes); |
michael@0 | 3475 | mIncrementalPhase = GraphBuildingPhase; |
michael@0 | 3476 | } |
michael@0 | 3477 | |
michael@0 | 3478 | uint32_t |
michael@0 | 3479 | nsCycleCollector::SuspectedCount() |
michael@0 | 3480 | { |
michael@0 | 3481 | CheckThreadSafety(); |
michael@0 | 3482 | return mPurpleBuf.Count(); |
michael@0 | 3483 | } |
michael@0 | 3484 | |
michael@0 | 3485 | void |
michael@0 | 3486 | nsCycleCollector::Shutdown() |
michael@0 | 3487 | { |
michael@0 | 3488 | CheckThreadSafety(); |
michael@0 | 3489 | |
michael@0 | 3490 | // Always delete snow white objects. |
michael@0 | 3491 | FreeSnowWhite(true); |
michael@0 | 3492 | |
michael@0 | 3493 | #ifndef DEBUG |
michael@0 | 3494 | if (PR_GetEnv("MOZ_CC_RUN_DURING_SHUTDOWN")) |
michael@0 | 3495 | #endif |
michael@0 | 3496 | { |
michael@0 | 3497 | ShutdownCollect(); |
michael@0 | 3498 | } |
michael@0 | 3499 | } |
michael@0 | 3500 | |
michael@0 | 3501 | void |
michael@0 | 3502 | nsCycleCollector::RemoveObjectFromGraph(void *aObj) |
michael@0 | 3503 | { |
michael@0 | 3504 | if (mIncrementalPhase == IdlePhase) { |
michael@0 | 3505 | return; |
michael@0 | 3506 | } |
michael@0 | 3507 | |
michael@0 | 3508 | if (PtrInfo *pinfo = mGraph.FindNode(aObj)) { |
michael@0 | 3509 | mGraph.RemoveNodeFromMap(aObj); |
michael@0 | 3510 | |
michael@0 | 3511 | pinfo->mPointer = nullptr; |
michael@0 | 3512 | pinfo->mParticipant = nullptr; |
michael@0 | 3513 | } |
michael@0 | 3514 | } |
michael@0 | 3515 | |
michael@0 | 3516 | void |
michael@0 | 3517 | nsCycleCollector::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf, |
michael@0 | 3518 | size_t *aObjectSize, |
michael@0 | 3519 | size_t *aGraphNodesSize, |
michael@0 | 3520 | size_t *aGraphEdgesSize, |
michael@0 | 3521 | size_t *aWeakMapsSize, |
michael@0 | 3522 | size_t *aPurpleBufferSize) const |
michael@0 | 3523 | { |
michael@0 | 3524 | *aObjectSize = aMallocSizeOf(this); |
michael@0 | 3525 | |
michael@0 | 3526 | mGraph.SizeOfExcludingThis(aMallocSizeOf, aGraphNodesSize, aGraphEdgesSize, |
michael@0 | 3527 | aWeakMapsSize); |
michael@0 | 3528 | |
michael@0 | 3529 | *aPurpleBufferSize = mPurpleBuf.SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 3530 | |
michael@0 | 3531 | // These fields are deliberately not measured: |
michael@0 | 3532 | // - mJSRuntime: because it's non-owning and measured by JS reporters. |
michael@0 | 3533 | // - mParams: because it only contains scalars. |
michael@0 | 3534 | } |
michael@0 | 3535 | |
michael@0 | 3536 | JSPurpleBuffer* |
michael@0 | 3537 | nsCycleCollector::GetJSPurpleBuffer() |
michael@0 | 3538 | { |
michael@0 | 3539 | if (!mJSPurpleBuffer) { |
michael@0 | 3540 | // JSPurpleBuffer keeps itself alive, but we need to create it in such way |
michael@0 | 3541 | // that it ends up in the normal purple buffer. That happens when |
michael@0 | 3542 | // nsRefPtr goes out of the scope and calls Release. |
michael@0 | 3543 | nsRefPtr<JSPurpleBuffer> pb = new JSPurpleBuffer(mJSPurpleBuffer); |
michael@0 | 3544 | } |
michael@0 | 3545 | return mJSPurpleBuffer; |
michael@0 | 3546 | } |
michael@0 | 3547 | |
michael@0 | 3548 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 3549 | // Module public API (exported in nsCycleCollector.h) |
michael@0 | 3550 | // Just functions that redirect into the singleton, once it's built. |
michael@0 | 3551 | //////////////////////////////////////////////////////////////////////// |
michael@0 | 3552 | |
michael@0 | 3553 | void |
michael@0 | 3554 | nsCycleCollector_registerJSRuntime(CycleCollectedJSRuntime *rt) |
michael@0 | 3555 | { |
michael@0 | 3556 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3557 | |
michael@0 | 3558 | // We should have started the cycle collector by now. |
michael@0 | 3559 | MOZ_ASSERT(data); |
michael@0 | 3560 | MOZ_ASSERT(data->mCollector); |
michael@0 | 3561 | // But we shouldn't already have a runtime. |
michael@0 | 3562 | MOZ_ASSERT(!data->mRuntime); |
michael@0 | 3563 | |
michael@0 | 3564 | data->mRuntime = rt; |
michael@0 | 3565 | data->mCollector->RegisterJSRuntime(rt); |
michael@0 | 3566 | } |
michael@0 | 3567 | |
michael@0 | 3568 | void |
michael@0 | 3569 | nsCycleCollector_forgetJSRuntime() |
michael@0 | 3570 | { |
michael@0 | 3571 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3572 | |
michael@0 | 3573 | // We should have started the cycle collector by now. |
michael@0 | 3574 | MOZ_ASSERT(data); |
michael@0 | 3575 | // And we shouldn't have already forgotten our runtime. |
michael@0 | 3576 | MOZ_ASSERT(data->mRuntime); |
michael@0 | 3577 | |
michael@0 | 3578 | // But it may have shutdown already. |
michael@0 | 3579 | if (data->mCollector) { |
michael@0 | 3580 | data->mCollector->ForgetJSRuntime(); |
michael@0 | 3581 | data->mRuntime = nullptr; |
michael@0 | 3582 | } else { |
michael@0 | 3583 | data->mRuntime = nullptr; |
michael@0 | 3584 | delete data; |
michael@0 | 3585 | sCollectorData.set(nullptr); |
michael@0 | 3586 | } |
michael@0 | 3587 | } |
michael@0 | 3588 | |
michael@0 | 3589 | /* static */ CycleCollectedJSRuntime* |
michael@0 | 3590 | CycleCollectedJSRuntime::Get() |
michael@0 | 3591 | { |
michael@0 | 3592 | CollectorData* data = sCollectorData.get(); |
michael@0 | 3593 | if (data) { |
michael@0 | 3594 | return data->mRuntime; |
michael@0 | 3595 | } |
michael@0 | 3596 | return nullptr; |
michael@0 | 3597 | } |
michael@0 | 3598 | |
michael@0 | 3599 | |
michael@0 | 3600 | namespace mozilla { |
michael@0 | 3601 | namespace cyclecollector { |
michael@0 | 3602 | |
michael@0 | 3603 | void |
michael@0 | 3604 | HoldJSObjectsImpl(void* aHolder, nsScriptObjectTracer* aTracer) |
michael@0 | 3605 | { |
michael@0 | 3606 | CollectorData* data = sCollectorData.get(); |
michael@0 | 3607 | |
michael@0 | 3608 | // We should have started the cycle collector by now. |
michael@0 | 3609 | MOZ_ASSERT(data); |
michael@0 | 3610 | MOZ_ASSERT(data->mCollector); |
michael@0 | 3611 | // And we should have a runtime. |
michael@0 | 3612 | MOZ_ASSERT(data->mRuntime); |
michael@0 | 3613 | |
michael@0 | 3614 | data->mRuntime->AddJSHolder(aHolder, aTracer); |
michael@0 | 3615 | } |
michael@0 | 3616 | |
michael@0 | 3617 | void |
michael@0 | 3618 | HoldJSObjectsImpl(nsISupports* aHolder) |
michael@0 | 3619 | { |
michael@0 | 3620 | nsXPCOMCycleCollectionParticipant* participant; |
michael@0 | 3621 | CallQueryInterface(aHolder, &participant); |
michael@0 | 3622 | MOZ_ASSERT(participant, "Failed to QI to nsXPCOMCycleCollectionParticipant!"); |
michael@0 | 3623 | MOZ_ASSERT(participant->CheckForRightISupports(aHolder), |
michael@0 | 3624 | "The result of QIing a JS holder should be the same as ToSupports"); |
michael@0 | 3625 | |
michael@0 | 3626 | HoldJSObjectsImpl(aHolder, participant); |
michael@0 | 3627 | } |
michael@0 | 3628 | |
michael@0 | 3629 | void |
michael@0 | 3630 | DropJSObjectsImpl(void* aHolder) |
michael@0 | 3631 | { |
michael@0 | 3632 | CollectorData* data = sCollectorData.get(); |
michael@0 | 3633 | |
michael@0 | 3634 | // We should have started the cycle collector by now, and not completely |
michael@0 | 3635 | // shut down. |
michael@0 | 3636 | MOZ_ASSERT(data); |
michael@0 | 3637 | // And we should have a runtime. |
michael@0 | 3638 | MOZ_ASSERT(data->mRuntime); |
michael@0 | 3639 | |
michael@0 | 3640 | data->mRuntime->RemoveJSHolder(aHolder); |
michael@0 | 3641 | } |
michael@0 | 3642 | |
michael@0 | 3643 | void |
michael@0 | 3644 | DropJSObjectsImpl(nsISupports* aHolder) |
michael@0 | 3645 | { |
michael@0 | 3646 | #ifdef DEBUG |
michael@0 | 3647 | nsXPCOMCycleCollectionParticipant* participant; |
michael@0 | 3648 | CallQueryInterface(aHolder, &participant); |
michael@0 | 3649 | MOZ_ASSERT(participant, "Failed to QI to nsXPCOMCycleCollectionParticipant!"); |
michael@0 | 3650 | MOZ_ASSERT(participant->CheckForRightISupports(aHolder), |
michael@0 | 3651 | "The result of QIing a JS holder should be the same as ToSupports"); |
michael@0 | 3652 | #endif |
michael@0 | 3653 | DropJSObjectsImpl(static_cast<void*>(aHolder)); |
michael@0 | 3654 | } |
michael@0 | 3655 | |
michael@0 | 3656 | #ifdef DEBUG |
michael@0 | 3657 | bool |
michael@0 | 3658 | IsJSHolder(void* aHolder) |
michael@0 | 3659 | { |
michael@0 | 3660 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3661 | |
michael@0 | 3662 | // We should have started the cycle collector by now, and not completely |
michael@0 | 3663 | // shut down. |
michael@0 | 3664 | MOZ_ASSERT(data); |
michael@0 | 3665 | // And we should have a runtime. |
michael@0 | 3666 | MOZ_ASSERT(data->mRuntime); |
michael@0 | 3667 | |
michael@0 | 3668 | return data->mRuntime->IsJSHolder(aHolder); |
michael@0 | 3669 | } |
michael@0 | 3670 | #endif |
michael@0 | 3671 | |
michael@0 | 3672 | void |
michael@0 | 3673 | DeferredFinalize(nsISupports* aSupports) |
michael@0 | 3674 | { |
michael@0 | 3675 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3676 | |
michael@0 | 3677 | // We should have started the cycle collector by now, and not completely |
michael@0 | 3678 | // shut down. |
michael@0 | 3679 | MOZ_ASSERT(data); |
michael@0 | 3680 | // And we should have a runtime. |
michael@0 | 3681 | MOZ_ASSERT(data->mRuntime); |
michael@0 | 3682 | |
michael@0 | 3683 | data->mRuntime->DeferredFinalize(aSupports); |
michael@0 | 3684 | } |
michael@0 | 3685 | |
michael@0 | 3686 | void |
michael@0 | 3687 | DeferredFinalize(DeferredFinalizeAppendFunction aAppendFunc, |
michael@0 | 3688 | DeferredFinalizeFunction aFunc, |
michael@0 | 3689 | void* aThing) |
michael@0 | 3690 | { |
michael@0 | 3691 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3692 | |
michael@0 | 3693 | // We should have started the cycle collector by now, and not completely |
michael@0 | 3694 | // shut down. |
michael@0 | 3695 | MOZ_ASSERT(data); |
michael@0 | 3696 | // And we should have a runtime. |
michael@0 | 3697 | MOZ_ASSERT(data->mRuntime); |
michael@0 | 3698 | |
michael@0 | 3699 | data->mRuntime->DeferredFinalize(aAppendFunc, aFunc, aThing); |
michael@0 | 3700 | } |
michael@0 | 3701 | |
michael@0 | 3702 | } // namespace cyclecollector |
michael@0 | 3703 | } // namespace mozilla |
michael@0 | 3704 | |
michael@0 | 3705 | |
michael@0 | 3706 | MOZ_NEVER_INLINE static void |
michael@0 | 3707 | SuspectAfterShutdown(void* n, nsCycleCollectionParticipant* cp, |
michael@0 | 3708 | nsCycleCollectingAutoRefCnt* aRefCnt, |
michael@0 | 3709 | bool* aShouldDelete) |
michael@0 | 3710 | { |
michael@0 | 3711 | if (aRefCnt->get() == 0) { |
michael@0 | 3712 | if (!aShouldDelete) { |
michael@0 | 3713 | // The CC is shut down, so we can't be in the middle of an ICC. |
michael@0 | 3714 | CanonicalizeParticipant(&n, &cp); |
michael@0 | 3715 | aRefCnt->stabilizeForDeletion(); |
michael@0 | 3716 | cp->DeleteCycleCollectable(n); |
michael@0 | 3717 | } else { |
michael@0 | 3718 | *aShouldDelete = true; |
michael@0 | 3719 | } |
michael@0 | 3720 | } else { |
michael@0 | 3721 | // Make sure we'll get called again. |
michael@0 | 3722 | aRefCnt->RemoveFromPurpleBuffer(); |
michael@0 | 3723 | } |
michael@0 | 3724 | } |
michael@0 | 3725 | |
michael@0 | 3726 | void |
michael@0 | 3727 | NS_CycleCollectorSuspect3(void *n, nsCycleCollectionParticipant *cp, |
michael@0 | 3728 | nsCycleCollectingAutoRefCnt *aRefCnt, |
michael@0 | 3729 | bool* aShouldDelete) |
michael@0 | 3730 | { |
michael@0 | 3731 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3732 | |
michael@0 | 3733 | // We should have started the cycle collector by now. |
michael@0 | 3734 | MOZ_ASSERT(data); |
michael@0 | 3735 | |
michael@0 | 3736 | if (MOZ_LIKELY(data->mCollector)) { |
michael@0 | 3737 | data->mCollector->Suspect(n, cp, aRefCnt); |
michael@0 | 3738 | return; |
michael@0 | 3739 | } |
michael@0 | 3740 | SuspectAfterShutdown(n, cp, aRefCnt, aShouldDelete); |
michael@0 | 3741 | } |
michael@0 | 3742 | |
michael@0 | 3743 | uint32_t |
michael@0 | 3744 | nsCycleCollector_suspectedCount() |
michael@0 | 3745 | { |
michael@0 | 3746 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3747 | |
michael@0 | 3748 | // We should have started the cycle collector by now. |
michael@0 | 3749 | MOZ_ASSERT(data); |
michael@0 | 3750 | |
michael@0 | 3751 | if (!data->mCollector) { |
michael@0 | 3752 | return 0; |
michael@0 | 3753 | } |
michael@0 | 3754 | |
michael@0 | 3755 | return data->mCollector->SuspectedCount(); |
michael@0 | 3756 | } |
michael@0 | 3757 | |
michael@0 | 3758 | bool |
michael@0 | 3759 | nsCycleCollector_init() |
michael@0 | 3760 | { |
michael@0 | 3761 | MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!"); |
michael@0 | 3762 | MOZ_ASSERT(!sCollectorData.initialized(), "Called twice!?"); |
michael@0 | 3763 | |
michael@0 | 3764 | return sCollectorData.init(); |
michael@0 | 3765 | } |
michael@0 | 3766 | |
michael@0 | 3767 | void |
michael@0 | 3768 | nsCycleCollector_startup() |
michael@0 | 3769 | { |
michael@0 | 3770 | MOZ_ASSERT(sCollectorData.initialized(), |
michael@0 | 3771 | "Forgot to call nsCycleCollector_init!"); |
michael@0 | 3772 | if (sCollectorData.get()) { |
michael@0 | 3773 | MOZ_CRASH(); |
michael@0 | 3774 | } |
michael@0 | 3775 | |
michael@0 | 3776 | CollectorData* data = new CollectorData; |
michael@0 | 3777 | data->mCollector = new nsCycleCollector(); |
michael@0 | 3778 | data->mRuntime = nullptr; |
michael@0 | 3779 | |
michael@0 | 3780 | sCollectorData.set(data); |
michael@0 | 3781 | } |
michael@0 | 3782 | |
michael@0 | 3783 | void |
michael@0 | 3784 | nsCycleCollector_setBeforeUnlinkCallback(CC_BeforeUnlinkCallback aCB) |
michael@0 | 3785 | { |
michael@0 | 3786 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3787 | |
michael@0 | 3788 | // We should have started the cycle collector by now. |
michael@0 | 3789 | MOZ_ASSERT(data); |
michael@0 | 3790 | MOZ_ASSERT(data->mCollector); |
michael@0 | 3791 | |
michael@0 | 3792 | data->mCollector->SetBeforeUnlinkCallback(aCB); |
michael@0 | 3793 | } |
michael@0 | 3794 | |
michael@0 | 3795 | void |
michael@0 | 3796 | nsCycleCollector_setForgetSkippableCallback(CC_ForgetSkippableCallback aCB) |
michael@0 | 3797 | { |
michael@0 | 3798 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3799 | |
michael@0 | 3800 | // We should have started the cycle collector by now. |
michael@0 | 3801 | MOZ_ASSERT(data); |
michael@0 | 3802 | MOZ_ASSERT(data->mCollector); |
michael@0 | 3803 | |
michael@0 | 3804 | data->mCollector->SetForgetSkippableCallback(aCB); |
michael@0 | 3805 | } |
michael@0 | 3806 | |
michael@0 | 3807 | void |
michael@0 | 3808 | nsCycleCollector_forgetSkippable(bool aRemoveChildlessNodes, |
michael@0 | 3809 | bool aAsyncSnowWhiteFreeing) |
michael@0 | 3810 | { |
michael@0 | 3811 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3812 | |
michael@0 | 3813 | // We should have started the cycle collector by now. |
michael@0 | 3814 | MOZ_ASSERT(data); |
michael@0 | 3815 | MOZ_ASSERT(data->mCollector); |
michael@0 | 3816 | |
michael@0 | 3817 | PROFILER_LABEL("CC", "nsCycleCollector_forgetSkippable"); |
michael@0 | 3818 | TimeLog timeLog; |
michael@0 | 3819 | data->mCollector->ForgetSkippable(aRemoveChildlessNodes, |
michael@0 | 3820 | aAsyncSnowWhiteFreeing); |
michael@0 | 3821 | timeLog.Checkpoint("ForgetSkippable()"); |
michael@0 | 3822 | } |
michael@0 | 3823 | |
michael@0 | 3824 | void |
michael@0 | 3825 | nsCycleCollector_dispatchDeferredDeletion(bool aContinuation) |
michael@0 | 3826 | { |
michael@0 | 3827 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3828 | |
michael@0 | 3829 | if (!data || !data->mRuntime) { |
michael@0 | 3830 | return; |
michael@0 | 3831 | } |
michael@0 | 3832 | |
michael@0 | 3833 | data->mRuntime->DispatchDeferredDeletion(aContinuation); |
michael@0 | 3834 | } |
michael@0 | 3835 | |
michael@0 | 3836 | bool |
michael@0 | 3837 | nsCycleCollector_doDeferredDeletion() |
michael@0 | 3838 | { |
michael@0 | 3839 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3840 | |
michael@0 | 3841 | // We should have started the cycle collector by now. |
michael@0 | 3842 | MOZ_ASSERT(data); |
michael@0 | 3843 | MOZ_ASSERT(data->mCollector); |
michael@0 | 3844 | MOZ_ASSERT(data->mRuntime); |
michael@0 | 3845 | |
michael@0 | 3846 | return data->mCollector->FreeSnowWhite(false); |
michael@0 | 3847 | } |
michael@0 | 3848 | |
michael@0 | 3849 | void |
michael@0 | 3850 | nsCycleCollector_collect(nsICycleCollectorListener *aManualListener) |
michael@0 | 3851 | { |
michael@0 | 3852 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3853 | |
michael@0 | 3854 | // We should have started the cycle collector by now. |
michael@0 | 3855 | MOZ_ASSERT(data); |
michael@0 | 3856 | MOZ_ASSERT(data->mCollector); |
michael@0 | 3857 | |
michael@0 | 3858 | PROFILER_LABEL("CC", "nsCycleCollector_collect"); |
michael@0 | 3859 | SliceBudget unlimitedBudget; |
michael@0 | 3860 | data->mCollector->Collect(ManualCC, unlimitedBudget, aManualListener); |
michael@0 | 3861 | } |
michael@0 | 3862 | |
michael@0 | 3863 | void |
michael@0 | 3864 | nsCycleCollector_collectSlice(int64_t aSliceTime) |
michael@0 | 3865 | { |
michael@0 | 3866 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3867 | |
michael@0 | 3868 | // We should have started the cycle collector by now. |
michael@0 | 3869 | MOZ_ASSERT(data); |
michael@0 | 3870 | MOZ_ASSERT(data->mCollector); |
michael@0 | 3871 | |
michael@0 | 3872 | PROFILER_LABEL("CC", "nsCycleCollector_collectSlice"); |
michael@0 | 3873 | SliceBudget budget; |
michael@0 | 3874 | if (aSliceTime > 0) { |
michael@0 | 3875 | budget = SliceBudget::TimeBudget(aSliceTime); |
michael@0 | 3876 | } else if (aSliceTime == 0) { |
michael@0 | 3877 | budget = SliceBudget::WorkBudget(1); |
michael@0 | 3878 | } |
michael@0 | 3879 | data->mCollector->Collect(SliceCC, budget, nullptr); |
michael@0 | 3880 | } |
michael@0 | 3881 | |
michael@0 | 3882 | void |
michael@0 | 3883 | nsCycleCollector_prepareForGarbageCollection() |
michael@0 | 3884 | { |
michael@0 | 3885 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3886 | |
michael@0 | 3887 | MOZ_ASSERT(data); |
michael@0 | 3888 | |
michael@0 | 3889 | if (!data->mCollector) { |
michael@0 | 3890 | return; |
michael@0 | 3891 | } |
michael@0 | 3892 | |
michael@0 | 3893 | data->mCollector->PrepareForGarbageCollection(); |
michael@0 | 3894 | } |
michael@0 | 3895 | |
michael@0 | 3896 | void |
michael@0 | 3897 | nsCycleCollector_shutdown() |
michael@0 | 3898 | { |
michael@0 | 3899 | CollectorData *data = sCollectorData.get(); |
michael@0 | 3900 | |
michael@0 | 3901 | if (data) { |
michael@0 | 3902 | MOZ_ASSERT(data->mCollector); |
michael@0 | 3903 | PROFILER_LABEL("CC", "nsCycleCollector_shutdown"); |
michael@0 | 3904 | data->mCollector->Shutdown(); |
michael@0 | 3905 | data->mCollector = nullptr; |
michael@0 | 3906 | if (!data->mRuntime) { |
michael@0 | 3907 | delete data; |
michael@0 | 3908 | sCollectorData.set(nullptr); |
michael@0 | 3909 | } |
michael@0 | 3910 | } |
michael@0 | 3911 | } |